Просмотр исходного кода

Merge branch 'master' into WPH95-feature/add_es_alerting

Marcus Efraimsson 7 лет назад
Родитель
Сommit
d3f516f1c4
44 измененных файлов с 418 добавлено и 309 удалено
  1. 5 0
      CHANGELOG.md
  2. 13 1
      docker/blocks/mssql/build/setup.sql.template
  3. 1 1
      docker/blocks/mssql/docker-compose.yaml
  4. 1 0
      docs/sources/http_api/dashboard_permissions.md
  5. 2 0
      docs/sources/http_api/org.md
  6. 3 3
      docs/sources/installation/debian.md
  7. 5 5
      docs/sources/installation/rpm.md
  8. 1 1
      docs/sources/installation/windows.md
  9. 2 0
      docs/sources/reference/dashboard.md
  10. 3 0
      package.json
  11. 1 7
      pkg/cmd/grafana-server/server.go
  12. 1 1
      pkg/login/ldap.go
  13. 1 1
      pkg/services/sqlstore/alert.go
  14. 0 2
      pkg/services/sqlstore/alert_notification_test.go
  15. 5 5
      pkg/services/sqlstore/annotation.go
  16. 16 3
      pkg/services/sqlstore/annotation_test.go
  17. 29 30
      pkg/services/sqlstore/dashboard_snapshot_test.go
  18. 2 8
      pkg/services/sqlstore/migrations/annotation_mig.go
  19. 1 4
      pkg/services/sqlstore/migrations/dashboard_acl.go
  20. 2 4
      pkg/services/sqlstore/migrations/dashboard_mig.go
  21. 1 3
      pkg/services/sqlstore/migrations/dashboard_snapshot_mig.go
  22. 3 11
      pkg/services/sqlstore/migrations/dashboard_version_mig.go
  23. 1 4
      pkg/services/sqlstore/migrations/datasource_mig.go
  24. 1 1
      pkg/services/sqlstore/migrations/migrations_test.go
  25. 1 4
      pkg/services/sqlstore/migrations/org_mig.go
  26. 1 2
      pkg/services/sqlstore/migrations/user_auth_mig.go
  27. 2 41
      pkg/services/sqlstore/migrator/column.go
  28. 103 11
      pkg/services/sqlstore/migrator/dialect.go
  29. 38 17
      pkg/services/sqlstore/migrator/migrations.go
  30. 1 1
      pkg/services/sqlstore/migrator/migrator.go
  31. 25 5
      pkg/services/sqlstore/migrator/mysql_dialect.go
  32. 20 6
      pkg/services/sqlstore/migrator/postgres_dialect.go
  33. 11 6
      pkg/services/sqlstore/migrator/sqlite_dialect.go
  34. 1 0
      pkg/services/sqlstore/migrator/types.go
  35. 1 1
      pkg/services/sqlstore/playlist.go
  36. 3 5
      pkg/services/sqlstore/search_builder.go
  37. 2 5
      pkg/services/sqlstore/search_builder_test.go
  38. 21 0
      pkg/services/sqlstore/shared.go
  39. 19 21
      pkg/services/sqlstore/sqlstore.go
  40. 0 37
      pkg/services/sqlstore/sqlutil/sqlutil.go
  41. 1 2
      pkg/services/sqlstore/team.go
  42. 8 2
      pkg/services/sqlstore/user.go
  43. 55 43
      pkg/tracing/tracing.go
  44. 5 5
      yarn.lock

+ 5 - 0
CHANGELOG.md

@@ -13,6 +13,11 @@
 * **Security**: Fix XSS vulnerabilities in dashboard links [#11813](https://github.com/grafana/grafana/pull/11813)
 * **Singlestat**: Fix "time of last point" shows local time when dashboard timezone set to UTC [#10338](https://github.com/grafana/grafana/issues/10338)
 
+# 5.1.2 (2018-05-09)
+
+* **Database**: Fix MySql migration issue [#11862](https://github.com/grafana/grafana/issues/11862)
+* **Google Analytics**: Enable Google Analytics anonymizeIP setting for GDPR [#11656](https://github.com/grafana/grafana/pull/11656)
+
 # 5.1.1 (2018-05-07)
 
 * **LDAP**: LDAP login with MariaDB/MySQL database and dn>100 chars not possible [#11754](https://github.com/grafana/grafana/issues/11754)

+ 13 - 1
docker/blocks/mssql/build/setup.sql.template

@@ -1,7 +1,19 @@
 CREATE LOGIN %%USER%% WITH PASSWORD = '%%PWD%%'
 GO
 
-CREATE DATABASE %%DB%%;
+CREATE DATABASE %%DB%%
+ON
+( NAME = %%DB%%,
+    FILENAME = '/var/opt/mssql/data/%%DB%%.mdf',
+    SIZE = 500MB,
+    MAXSIZE = 1000MB,
+    FILEGROWTH = 100MB )
+LOG ON
+( NAME = %%DB%%_log,
+    FILENAME = '/var/opt/mssql/data/%%DB%%_log.ldf',
+    SIZE = 500MB,
+    MAXSIZE = 1000MB,
+    FILEGROWTH = 100MB );
 GO
 
 USE %%DB%%;

+ 1 - 1
docker/blocks/mssql/docker-compose.yaml

@@ -4,7 +4,7 @@
     environment:
       ACCEPT_EULA: Y
       MSSQL_SA_PASSWORD: Password!
-      MSSQL_PID: Express
+      MSSQL_PID: Developer
       MSSQL_DATABASE: grafana
       MSSQL_USER: grafana
       MSSQL_PASSWORD: Password!

+ 1 - 0
docs/sources/http_api/dashboard_permissions.md

@@ -106,6 +106,7 @@ Accept: application/json
 Content-Type: application/json
 Authorization: Bearer eyJrIjoiT0tTcG1pUlY2RnVKZTFVaDFsNFZXdE9ZWmNrMkZYbk
 
+{
   "items": [
     {
       "role": "Viewer",

+ 2 - 0
docs/sources/http_api/org.md

@@ -380,6 +380,8 @@ Authorization: Bearer eyJrIjoiT0tTcG1pUlY2RnVKZTFVaDFsNFZXdE9ZWmNrMkZYbk
   "role":"Viewer"
 }
 ```
+Note: The api will only work when you pass the admin name and password
+to the request http url, like http://admin:admin@localhost:3000/api/orgs/1/users
 
 **Example Response**:
 

+ 3 - 3
docs/sources/installation/debian.md

@@ -15,7 +15,7 @@ weight = 1
 
 Description | Download
 ------------ | -------------
-Stable for Debian-based Linux | [grafana_5.1.1_amd64.deb](https://s3-us-west-2.amazonaws.com/grafana-releases/release/grafana_5.1.1_amd64.deb)
+Stable for Debian-based Linux | [grafana_5.1.2_amd64.deb](https://s3-us-west-2.amazonaws.com/grafana-releases/release/grafana_5.1.2_amd64.deb)
 <!--
 Beta for Debian-based Linux | [grafana_5.1.0-beta1_amd64.deb](https://s3-us-west-2.amazonaws.com/grafana-releases/release/grafana_5.1.0-beta1_amd64.deb)
 -->
@@ -27,9 +27,9 @@ installation.
 
 
 ```bash
-wget https://s3-us-west-2.amazonaws.com/grafana-releases/release/grafana_5.1.1_amd64.deb
+wget https://s3-us-west-2.amazonaws.com/grafana-releases/release/grafana_5.1.2_amd64.deb
 sudo apt-get install -y adduser libfontconfig
-sudo dpkg -i grafana_5.1.1_amd64.deb
+sudo dpkg -i grafana_5.1.2_amd64.deb
 ```
 
 <!-- ## Install Latest Beta

+ 5 - 5
docs/sources/installation/rpm.md

@@ -15,7 +15,7 @@ weight = 2
 
 Description | Download
 ------------ | -------------
-Stable for CentOS / Fedora / OpenSuse / Redhat Linux | [5.1.1 (x86-64 rpm)](https://s3-us-west-2.amazonaws.com/grafana-releases/release/grafana-5.1.1-1.x86_64.rpm)
+Stable for CentOS / Fedora / OpenSuse / Redhat Linux | [5.1.2 (x86-64 rpm)](https://s3-us-west-2.amazonaws.com/grafana-releases/release/grafana-5.1.2-1.x86_64.rpm)
 <!--
 Latest Beta for CentOS / Fedora / OpenSuse / Redhat Linux | [5.1.0-beta1 (x86-64 rpm)](https://s3-us-west-2.amazonaws.com/grafana-releases/release/grafana-5.1.0-beta1.x86_64.rpm)
 -->
@@ -28,7 +28,7 @@ installation.
 You can install Grafana using Yum directly.
 
 ```bash
-$ sudo yum install https://s3-us-west-2.amazonaws.com/grafana-releases/release/grafana-5.1.1-1.x86_64.rpm
+$ sudo yum install https://s3-us-west-2.amazonaws.com/grafana-releases/release/grafana-5.1.2-1.x86_64.rpm
 ```
 
 <!-- ## Install Beta
@@ -42,15 +42,15 @@ Or install manually using `rpm`.
 #### On CentOS / Fedora / Redhat:
 
 ```bash
-$ wget https://s3-us-west-2.amazonaws.com/grafana-releases/release/grafana-5.1.1-1.x86_64.rpm
+$ wget https://s3-us-west-2.amazonaws.com/grafana-releases/release/grafana-5.1.2-1.x86_64.rpm
 $ sudo yum install initscripts fontconfig
-$ sudo rpm -Uvh grafana-5.1.1-1.x86_64.rpm
+$ sudo rpm -Uvh grafana-5.1.2-1.x86_64.rpm
 ```
 
 #### On OpenSuse:
 
 ```bash
-$ sudo rpm -i --nodeps grafana-5.1.1-1.x86_64.rpm
+$ sudo rpm -i --nodeps grafana-5.1.2-1.x86_64.rpm
 ```
 
 ## Install via YUM Repository

+ 1 - 1
docs/sources/installation/windows.md

@@ -12,7 +12,7 @@ weight = 3
 
 Description | Download
 ------------ | -------------
-Latest stable package for Windows | [grafana-5.1.1.windows-x64.zip](https://s3-us-west-2.amazonaws.com/grafana-releases/release/grafana-5.1.1.windows-x64.zip)
+Latest stable package for Windows | [grafana-5.1.2.windows-x64.zip](https://s3-us-west-2.amazonaws.com/grafana-releases/release/grafana-5.1.2.windows-x64.zip)
 
 <!--
 Latest beta package for Windows | [grafana.5.1.0-beta1.windows-x64.zip](https://s3-us-west-2.amazonaws.com/grafana-releases/release/grafana-5.0.0-beta5.windows-x64.zip)

+ 2 - 0
docs/sources/reference/dashboard.md

@@ -50,6 +50,7 @@ When a user creates a new dashboard, a new dashboard JSON object is initialized
   "annotations": {
     "list": []
   },
+  "refresh": "5s",
   "schemaVersion": 16,
   "version": 0,
   "links": []
@@ -71,6 +72,7 @@ Each field in the dashboard JSON is explained below with its usage:
 | **timepicker** | timepicker metadata, see [timepicker section](#timepicker) for details |
 | **templating** | templating metadata, see [templating section](#templating) for details |
 | **annotations** | annotations metadata, see [annotations section](#annotations) for details |
+| **refresh** | auto-refresh interval
 | **schemaVersion** | version of the JSON schema (integer), incremented each time a Grafana update brings changes to said schema |
 | **version** | version of the dashboard (integer), incremented each time the dashboard is updated |
 | **panels** | panels array, see below for detail. |

+ 3 - 0
package.json

@@ -179,5 +179,8 @@
     "tether": "^1.4.0",
     "tether-drop": "https://github.com/torkelo/drop/tarball/master",
     "tinycolor2": "^1.4.1"
+  },
+  "resolutions": {
+    "caniuse-db": "1.0.30000772"
   }
 }

+ 1 - 7
pkg/cmd/grafana-server/server.go

@@ -27,7 +27,6 @@ import (
 	"github.com/grafana/grafana/pkg/setting"
 
 	"github.com/grafana/grafana/pkg/social"
-	"github.com/grafana/grafana/pkg/tracing"
 
 	// self registering services
 	_ "github.com/grafana/grafana/pkg/extensions"
@@ -38,6 +37,7 @@ import (
 	_ "github.com/grafana/grafana/pkg/services/notifications"
 	_ "github.com/grafana/grafana/pkg/services/provisioning"
 	_ "github.com/grafana/grafana/pkg/services/search"
+	_ "github.com/grafana/grafana/pkg/tracing"
 )
 
 func NewGrafanaServer() *GrafanaServerImpl {
@@ -77,12 +77,6 @@ func (g *GrafanaServerImpl) Run() error {
 	login.Init()
 	social.NewOAuthService()
 
-	tracingCloser, err := tracing.Init(g.cfg.Raw)
-	if err != nil {
-		return fmt.Errorf("Tracing settings is not valid. error: %v", err)
-	}
-	defer tracingCloser.Close()
-
 	serviceGraph := inject.Graph{}
 	serviceGraph.Provide(&inject.Object{Value: bus.GetBus()})
 	serviceGraph.Provide(&inject.Object{Value: g.cfg})

+ 1 - 1
pkg/login/ldap.go

@@ -349,7 +349,7 @@ func (a *ldapAuther) searchForUser(username string) (*LdapUserInfo, error) {
 
 func getLdapAttrN(name string, result *ldap.SearchResult, n int) string {
 	if name == "DN" {
-		return result.Entries[0].DN
+		return result.Entries[n].DN
 	}
 	for _, attr := range result.Entries[n].Attributes {
 		if attr.Name == name {

+ 1 - 1
pkg/services/sqlstore/alert.go

@@ -114,7 +114,7 @@ func HandleAlertsQuery(query *m.GetAlertsQuery) error {
 	builder.Write(" ORDER BY name ASC")
 
 	if query.Limit != 0 {
-		builder.Write(" LIMIT ?", query.Limit)
+		builder.Write(dialect.Limit(query.Limit))
 	}
 
 	alerts := make([]*m.AlertListItemDTO, 0)

+ 0 - 2
pkg/services/sqlstore/alert_notification_test.go

@@ -1,7 +1,6 @@
 package sqlstore
 
 import (
-	"fmt"
 	"testing"
 
 	"github.com/grafana/grafana/pkg/components/simplejson"
@@ -21,7 +20,6 @@ func TestAlertNotificationSQLAccess(t *testing.T) {
 			}
 
 			err := GetAlertNotifications(cmd)
-			fmt.Printf("error %v", err)
 			So(err, ShouldBeNil)
 			So(cmd.Result, ShouldBeNil)
 		})

+ 5 - 5
pkg/services/sqlstore/annotation.go

@@ -50,7 +50,7 @@ func (r *SqlAnnotationRepo) ensureTagsExist(sess *DBSession, tags []*models.Tag)
 		var existingTag models.Tag
 
 		// check if it exists
-		if exists, err := sess.Table("tag").Where("`key`=? AND `value`=?", tag.Key, tag.Value).Get(&existingTag); err != nil {
+		if exists, err := sess.Table("tag").Where(dialect.Quote("key")+"=? AND "+dialect.Quote("value")+"=?", tag.Key, tag.Value).Get(&existingTag); err != nil {
 			return nil, err
 		} else if exists {
 			tag.Id = existingTag.Id
@@ -146,7 +146,7 @@ func (r *SqlAnnotationRepo) Find(query *annotations.ItemQuery) ([]*annotations.I
 	params = append(params, query.OrgId)
 
 	if query.AnnotationId != 0 {
-		fmt.Print("annotation query")
+		// fmt.Print("annotation query")
 		sql.WriteString(` AND annotation.id = ?`)
 		params = append(params, query.AnnotationId)
 	}
@@ -193,10 +193,10 @@ func (r *SqlAnnotationRepo) Find(query *annotations.ItemQuery) ([]*annotations.I
 		tags := models.ParseTagPairs(query.Tags)
 		for _, tag := range tags {
 			if tag.Value == "" {
-				keyValueFilters = append(keyValueFilters, "(tag.key = ?)")
+				keyValueFilters = append(keyValueFilters, "(tag."+dialect.Quote("key")+" = ?)")
 				params = append(params, tag.Key)
 			} else {
-				keyValueFilters = append(keyValueFilters, "(tag.key = ? AND tag.value = ?)")
+				keyValueFilters = append(keyValueFilters, "(tag."+dialect.Quote("key")+" = ? AND tag."+dialect.Quote("value")+" = ?)")
 				params = append(params, tag.Key, tag.Value)
 			}
 		}
@@ -219,7 +219,7 @@ func (r *SqlAnnotationRepo) Find(query *annotations.ItemQuery) ([]*annotations.I
 		query.Limit = 100
 	}
 
-	sql.WriteString(fmt.Sprintf(" ORDER BY epoch DESC LIMIT %v", query.Limit))
+	sql.WriteString(" ORDER BY epoch DESC" + dialect.Limit(query.Limit))
 
 	items := make([]*annotations.ItemDTO, 0)
 

+ 16 - 3
pkg/services/sqlstore/annotation_test.go

@@ -10,12 +10,18 @@ import (
 )
 
 func TestSavingTags(t *testing.T) {
+	InitTestDB(t)
+
 	Convey("Testing annotation saving/loading", t, func() {
-		InitTestDB(t)
 
 		repo := SqlAnnotationRepo{}
 
 		Convey("Can save tags", func() {
+			Reset(func() {
+				_, err := x.Exec("DELETE FROM annotation_tag WHERE 1=1")
+				So(err, ShouldBeNil)
+			})
+
 			tagPairs := []*models.Tag{
 				{Key: "outage"},
 				{Key: "type", Value: "outage"},
@@ -31,12 +37,19 @@ func TestSavingTags(t *testing.T) {
 }
 
 func TestAnnotations(t *testing.T) {
-	Convey("Testing annotation saving/loading", t, func() {
-		InitTestDB(t)
+	InitTestDB(t)
 
+	Convey("Testing annotation saving/loading", t, func() {
 		repo := SqlAnnotationRepo{}
 
 		Convey("Can save annotation", func() {
+			Reset(func() {
+				_, err := x.Exec("DELETE FROM annotation WHERE 1=1")
+				So(err, ShouldBeNil)
+				_, err = x.Exec("DELETE FROM annotation_tag WHERE 1=1")
+				So(err, ShouldBeNil)
+			})
+
 			annotation := &annotations.Item{
 				OrgId:       1,
 				UserId:      1,

+ 29 - 30
pkg/services/sqlstore/dashboard_snapshot_test.go

@@ -110,42 +110,39 @@ func TestDashboardSnapshotDBAccess(t *testing.T) {
 }
 
 func TestDeleteExpiredSnapshots(t *testing.T) {
-	Convey("Testing dashboard snapshots clean up", t, func() {
-		x := InitTestDB(t)
+	x := InitTestDB(t)
 
+	Convey("Testing dashboard snapshots clean up", t, func() {
 		setting.SnapShotRemoveExpired = true
 
-		notExpiredsnapshot := createTestSnapshot(x, "key1", 1000)
-		createTestSnapshot(x, "key2", -1000)
-		createTestSnapshot(x, "key3", -1000)
+		notExpiredsnapshot := createTestSnapshot(x, "key1", 1200)
+		createTestSnapshot(x, "key2", -1200)
+		createTestSnapshot(x, "key3", -1200)
 
-		Convey("Clean up old dashboard snapshots", func() {
-			err := DeleteExpiredSnapshots(&m.DeleteExpiredSnapshotsCommand{})
-			So(err, ShouldBeNil)
+		err := DeleteExpiredSnapshots(&m.DeleteExpiredSnapshotsCommand{})
+		So(err, ShouldBeNil)
 
-			query := m.GetDashboardSnapshotsQuery{
-				OrgId:        1,
-				SignedInUser: &m.SignedInUser{OrgRole: m.ROLE_ADMIN},
-			}
-			err = SearchDashboardSnapshots(&query)
-			So(err, ShouldBeNil)
+		query := m.GetDashboardSnapshotsQuery{
+			OrgId:        1,
+			SignedInUser: &m.SignedInUser{OrgRole: m.ROLE_ADMIN},
+		}
+		err = SearchDashboardSnapshots(&query)
+		So(err, ShouldBeNil)
 
-			So(len(query.Result), ShouldEqual, 1)
-			So(query.Result[0].Key, ShouldEqual, notExpiredsnapshot.Key)
-		})
+		So(len(query.Result), ShouldEqual, 1)
+		So(query.Result[0].Key, ShouldEqual, notExpiredsnapshot.Key)
 
-		Convey("Don't delete anything if there are no expired snapshots", func() {
-			err := DeleteExpiredSnapshots(&m.DeleteExpiredSnapshotsCommand{})
-			So(err, ShouldBeNil)
+		err = DeleteExpiredSnapshots(&m.DeleteExpiredSnapshotsCommand{})
+		So(err, ShouldBeNil)
 
-			query := m.GetDashboardSnapshotsQuery{
-				OrgId:        1,
-				SignedInUser: &m.SignedInUser{OrgRole: m.ROLE_ADMIN},
-			}
-			SearchDashboardSnapshots(&query)
+		query = m.GetDashboardSnapshotsQuery{
+			OrgId:        1,
+			SignedInUser: &m.SignedInUser{OrgRole: m.ROLE_ADMIN},
+		}
+		SearchDashboardSnapshots(&query)
 
-			So(len(query.Result), ShouldEqual, 1)
-		})
+		So(len(query.Result), ShouldEqual, 1)
+		So(query.Result[0].Key, ShouldEqual, notExpiredsnapshot.Key)
 	})
 }
 
@@ -164,9 +161,11 @@ func createTestSnapshot(x *xorm.Engine, key string, expires int64) *m.DashboardS
 	So(err, ShouldBeNil)
 
 	// Set expiry date manually - to be able to create expired snapshots
-	expireDate := time.Now().Add(time.Second * time.Duration(expires))
-	_, err = x.Exec("update dashboard_snapshot set expires = ? where "+dialect.Quote("key")+" = ?", expireDate, key)
-	So(err, ShouldBeNil)
+	if expires < 0 {
+		expireDate := time.Now().Add(time.Second * time.Duration(expires))
+		_, err = x.Exec("UPDATE dashboard_snapshot SET expires = ? WHERE id = ?", expireDate, cmd.Result.Id)
+		So(err, ShouldBeNil)
+	}
 
 	return cmd.Result
 }

+ 2 - 8
pkg/services/sqlstore/migrations/annotation_mig.go

@@ -86,10 +86,7 @@ func addAnnotationMig(mg *Migrator) {
 	// clear alert text
 	//
 	updateTextFieldSql := "UPDATE annotation SET TEXT = '' WHERE alert_id > 0"
-	mg.AddMigration("Update alert annotations and set TEXT to empty", new(RawSqlMigration).
-		Sqlite(updateTextFieldSql).
-		Postgres(updateTextFieldSql).
-		Mysql(updateTextFieldSql))
+	mg.AddMigration("Update alert annotations and set TEXT to empty", NewRawSqlMigration(updateTextFieldSql))
 
 	//
 	// Add a 'created' & 'updated' column
@@ -111,8 +108,5 @@ func addAnnotationMig(mg *Migrator) {
 	// Convert epoch saved as seconds to miliseconds
 	//
 	updateEpochSql := "UPDATE annotation SET epoch = (epoch*1000) where epoch < 9999999999"
-	mg.AddMigration("Convert existing annotations from seconds to milliseconds", new(RawSqlMigration).
-		Sqlite(updateEpochSql).
-		Postgres(updateEpochSql).
-		Mysql(updateEpochSql))
+	mg.AddMigration("Convert existing annotations from seconds to milliseconds", NewRawSqlMigration(updateEpochSql))
 }

+ 1 - 4
pkg/services/sqlstore/migrations/dashboard_acl.go

@@ -45,8 +45,5 @@ INSERT INTO dashboard_acl
 		(-1,-1, 2,'Editor','2017-06-20','2017-06-20')
 	`
 
-	mg.AddMigration("save default acl rules in dashboard_acl table", new(RawSqlMigration).
-		Sqlite(rawSQL).
-		Postgres(rawSQL).
-		Mysql(rawSQL))
+	mg.AddMigration("save default acl rules in dashboard_acl table", NewRawSqlMigration(rawSQL))
 }

+ 2 - 4
pkg/services/sqlstore/migrations/dashboard_mig.go

@@ -90,9 +90,7 @@ func addDashboardMigration(mg *Migrator) {
 	mg.AddMigration("drop table dashboard_v1", NewDropTableMigration("dashboard_v1"))
 
 	// change column type of dashboard.data
-	mg.AddMigration("alter dashboard.data to mediumtext v1", new(RawSqlMigration).
-		Sqlite("SELECT 0 WHERE 0;").
-		Postgres("SELECT 0;").
+	mg.AddMigration("alter dashboard.data to mediumtext v1", NewRawSqlMigration("").
 		Mysql("ALTER TABLE dashboard MODIFY data MEDIUMTEXT;"))
 
 	// add column to store updater of a dashboard
@@ -157,7 +155,7 @@ func addDashboardMigration(mg *Migrator) {
 		Name: "uid", Type: DB_NVarchar, Length: 40, Nullable: true,
 	}))
 
-	mg.AddMigration("Update uid column values in dashboard", new(RawSqlMigration).
+	mg.AddMigration("Update uid column values in dashboard", NewRawSqlMigration("").
 		Sqlite("UPDATE dashboard SET uid=printf('%09d',id) WHERE uid IS NULL;").
 		Postgres("UPDATE dashboard SET uid=lpad('' || id,9,'0') WHERE uid IS NULL;").
 		Mysql("UPDATE dashboard SET uid=lpad(id,9,'0') WHERE uid IS NULL;"))

+ 1 - 3
pkg/services/sqlstore/migrations/dashboard_snapshot_mig.go

@@ -50,9 +50,7 @@ func addDashboardSnapshotMigrations(mg *Migrator) {
 	addTableIndicesMigrations(mg, "v5", snapshotV5)
 
 	// change column type of dashboard
-	mg.AddMigration("alter dashboard_snapshot to mediumtext v2", new(RawSqlMigration).
-		Sqlite("SELECT 0 WHERE 0;").
-		Postgres("SELECT 0;").
+	mg.AddMigration("alter dashboard_snapshot to mediumtext v2", NewRawSqlMigration("").
 		Mysql("ALTER TABLE dashboard_snapshot MODIFY dashboard MEDIUMTEXT;"))
 
 	mg.AddMigration("Update dashboard_snapshot table charset", NewTableCharsetMigration("dashboard_snapshot", []*Column{

+ 3 - 11
pkg/services/sqlstore/migrations/dashboard_version_mig.go

@@ -28,10 +28,7 @@ func addDashboardVersionMigration(mg *Migrator) {
 
 	// before new dashboards where created with version 0, now they are always inserted with version 1
 	const setVersionTo1WhereZeroSQL = `UPDATE dashboard SET version = 1 WHERE version = 0`
-	mg.AddMigration("Set dashboard version to 1 where 0", new(RawSqlMigration).
-		Sqlite(setVersionTo1WhereZeroSQL).
-		Postgres(setVersionTo1WhereZeroSQL).
-		Mysql(setVersionTo1WhereZeroSQL))
+	mg.AddMigration("Set dashboard version to 1 where 0", NewRawSqlMigration(setVersionTo1WhereZeroSQL))
 
 	const rawSQL = `INSERT INTO dashboard_version
 (
@@ -54,14 +51,9 @@ SELECT
 	'',
 	dashboard.data
 FROM dashboard;`
-	mg.AddMigration("save existing dashboard data in dashboard_version table v1", new(RawSqlMigration).
-		Sqlite(rawSQL).
-		Postgres(rawSQL).
-		Mysql(rawSQL))
+	mg.AddMigration("save existing dashboard data in dashboard_version table v1", NewRawSqlMigration(rawSQL))
 
 	// change column type of dashboard_version.data
-	mg.AddMigration("alter dashboard_version.data to mediumtext v1", new(RawSqlMigration).
-		Sqlite("SELECT 0 WHERE 0;").
-		Postgres("SELECT 0;").
+	mg.AddMigration("alter dashboard_version.data to mediumtext v1", NewRawSqlMigration("").
 		Mysql("ALTER TABLE dashboard_version MODIFY data MEDIUMTEXT;"))
 }

+ 1 - 4
pkg/services/sqlstore/migrations/datasource_mig.go

@@ -122,10 +122,7 @@ func addDataSourceMigration(mg *Migrator) {
 	}))
 
 	const setVersionToOneWhereZero = `UPDATE data_source SET version = 1 WHERE version = 0`
-	mg.AddMigration("Update initial version to 1", new(RawSqlMigration).
-		Sqlite(setVersionToOneWhereZero).
-		Postgres(setVersionToOneWhereZero).
-		Mysql(setVersionToOneWhereZero))
+	mg.AddMigration("Update initial version to 1", NewRawSqlMigration(setVersionToOneWhereZero))
 
 	mg.AddMigration("Add read_only data column", NewAddColumnMigration(tableV2, &Column{
 		Name: "read_only", Type: DB_Bool, Nullable: true,

+ 1 - 1
pkg/services/sqlstore/migrations/migrations_test.go

@@ -25,7 +25,7 @@ func TestMigrations(t *testing.T) {
 			x, err := xorm.NewEngine(testDB.DriverName, testDB.ConnStr)
 			So(err, ShouldBeNil)
 
-			sqlutil.CleanDB(x)
+			NewDialect(x).CleanDB()
 
 			_, err = x.SQL(sql).Get(&r)
 			So(err, ShouldNotBeNil)

+ 1 - 4
pkg/services/sqlstore/migrations/org_mig.go

@@ -85,8 +85,5 @@ func addOrgMigrations(mg *Migrator) {
 	}))
 
 	const migrateReadOnlyViewersToViewers = `UPDATE org_user SET role = 'Viewer' WHERE role = 'Read Only Editor'`
-	mg.AddMigration("Migrate all Read Only Viewers to Viewers", new(RawSqlMigration).
-		Sqlite(migrateReadOnlyViewersToViewers).
-		Postgres(migrateReadOnlyViewersToViewers).
-		Mysql(migrateReadOnlyViewersToViewers))
+	mg.AddMigration("Migrate all Read Only Viewers to Viewers", NewRawSqlMigration(migrateReadOnlyViewersToViewers))
 }

+ 1 - 2
pkg/services/sqlstore/migrations/user_auth_mig.go

@@ -22,8 +22,7 @@ func addUserAuthMigrations(mg *Migrator) {
 	// add indices
 	addTableIndicesMigrations(mg, "v1", userAuthV1)
 
-	mg.AddMigration("alter user_auth.auth_id to length 190", new(RawSqlMigration).
-		Sqlite("SELECT 0 WHERE 0;").
+	mg.AddMigration("alter user_auth.auth_id to length 190", NewRawSqlMigration("").
 		Postgres("ALTER TABLE user_auth ALTER COLUMN auth_id TYPE VARCHAR(190);").
 		Mysql("ALTER TABLE user_auth MODIFY auth_id VARCHAR(190);"))
 }

+ 2 - 41
pkg/services/sqlstore/migrator/column.go

@@ -15,48 +15,9 @@ type Column struct {
 }
 
 func (col *Column) String(d Dialect) string {
-	sql := d.QuoteStr() + col.Name + d.QuoteStr() + " "
-
-	sql += d.SqlType(col) + " "
-
-	if col.IsPrimaryKey {
-		sql += "PRIMARY KEY "
-		if col.IsAutoIncrement {
-			sql += d.AutoIncrStr() + " "
-		}
-	}
-
-	if d.ShowCreateNull() {
-		if col.Nullable {
-			sql += "NULL "
-		} else {
-			sql += "NOT NULL "
-		}
-	}
-
-	if col.Default != "" {
-		sql += "DEFAULT " + col.Default + " "
-	}
-
-	return sql
+	return d.ColString(col)
 }
 
 func (col *Column) StringNoPk(d Dialect) string {
-	sql := d.QuoteStr() + col.Name + d.QuoteStr() + " "
-
-	sql += d.SqlType(col) + " "
-
-	if d.ShowCreateNull() {
-		if col.Nullable {
-			sql += "NULL "
-		} else {
-			sql += "NOT NULL "
-		}
-	}
-
-	if col.Default != "" {
-		sql += "DEFAULT " + d.Default(col) + " "
-	}
-
-	return sql
+	return d.ColStringNoPk(col)
 }

+ 103 - 11
pkg/services/sqlstore/migrator/dialect.go

@@ -3,11 +3,12 @@ package migrator
 import (
 	"fmt"
 	"strings"
+
+	"github.com/go-xorm/xorm"
 )
 
 type Dialect interface {
 	DriverName() string
-	QuoteStr() string
 	Quote(string) string
 	AndStr() string
 	AutoIncrStr() string
@@ -31,16 +32,29 @@ type Dialect interface {
 	TableCheckSql(tableName string) (string, []interface{})
 	RenameTable(oldName string, newName string) string
 	UpdateTableSql(tableName string, columns []*Column) string
+
+	ColString(*Column) string
+	ColStringNoPk(*Column) string
+
+	Limit(limit int64) string
+	LimitOffset(limit int64, offset int64) string
+
+	PreInsertId(table string, sess *xorm.Session) error
+	PostInsertId(table string, sess *xorm.Session) error
+
+	CleanDB() error
+	NoOpSql() string
 }
 
-func NewDialect(name string) Dialect {
+func NewDialect(engine *xorm.Engine) Dialect {
+	name := engine.DriverName()
 	switch name {
 	case MYSQL:
-		return NewMysqlDialect()
+		return NewMysqlDialect(engine)
 	case SQLITE:
-		return NewSqlite3Dialect()
+		return NewSqlite3Dialect(engine)
 	case POSTGRES:
-		return NewPostgresDialect()
+		return NewPostgresDialect(engine)
 	}
 
 	panic("Unsupported database type: " + name)
@@ -48,6 +62,7 @@ func NewDialect(name string) Dialect {
 
 type BaseDialect struct {
 	dialect    Dialect
+	engine     *xorm.Engine
 	driverName string
 }
 
@@ -100,9 +115,12 @@ func (b *BaseDialect) CreateTableSql(table *Table) string {
 	}
 
 	if len(pkList) > 1 {
-		sql += "PRIMARY KEY ( "
-		sql += b.dialect.Quote(strings.Join(pkList, b.dialect.Quote(",")))
-		sql += " ), "
+		quotedCols := []string{}
+		for _, col := range pkList {
+			quotedCols = append(quotedCols, b.dialect.Quote(col))
+		}
+
+		sql += "PRIMARY KEY ( " + strings.Join(quotedCols, ",") + " ), "
 	}
 
 	sql = sql[:len(sql)-2] + ")"
@@ -127,9 +145,12 @@ func (db *BaseDialect) CreateIndexSql(tableName string, index *Index) string {
 
 	idxName := index.XName(tableName)
 
-	return fmt.Sprintf("CREATE%s INDEX %v ON %v (%v);", unique,
-		quote(idxName), quote(tableName),
-		quote(strings.Join(index.Cols, quote(","))))
+	quotedCols := []string{}
+	for _, col := range index.Cols {
+		quotedCols = append(quotedCols, db.dialect.Quote(col))
+	}
+
+	return fmt.Sprintf("CREATE%s INDEX %v ON %v (%v);", unique, quote(idxName), quote(tableName), strings.Join(quotedCols, ","))
 }
 
 func (db *BaseDialect) QuoteColList(cols []string) string {
@@ -168,3 +189,74 @@ func (db *BaseDialect) DropIndexSql(tableName string, index *Index) string {
 func (db *BaseDialect) UpdateTableSql(tableName string, columns []*Column) string {
 	return "-- NOT REQUIRED"
 }
+
+func (db *BaseDialect) ColString(col *Column) string {
+	sql := db.dialect.Quote(col.Name) + " "
+
+	sql += db.dialect.SqlType(col) + " "
+
+	if col.IsPrimaryKey {
+		sql += "PRIMARY KEY "
+		if col.IsAutoIncrement {
+			sql += db.dialect.AutoIncrStr() + " "
+		}
+	}
+
+	if db.dialect.ShowCreateNull() {
+		if col.Nullable {
+			sql += "NULL "
+		} else {
+			sql += "NOT NULL "
+		}
+	}
+
+	if col.Default != "" {
+		sql += "DEFAULT " + db.dialect.Default(col) + " "
+	}
+
+	return sql
+}
+
+func (db *BaseDialect) ColStringNoPk(col *Column) string {
+	sql := db.dialect.Quote(col.Name) + " "
+
+	sql += db.dialect.SqlType(col) + " "
+
+	if db.dialect.ShowCreateNull() {
+		if col.Nullable {
+			sql += "NULL "
+		} else {
+			sql += "NOT NULL "
+		}
+	}
+
+	if col.Default != "" {
+		sql += "DEFAULT " + db.dialect.Default(col) + " "
+	}
+
+	return sql
+}
+
+func (db *BaseDialect) Limit(limit int64) string {
+	return fmt.Sprintf(" LIMIT %d", limit)
+}
+
+func (db *BaseDialect) LimitOffset(limit int64, offset int64) string {
+	return fmt.Sprintf(" LIMIT %d OFFSET %d", limit, offset)
+}
+
+func (db *BaseDialect) PreInsertId(table string, sess *xorm.Session) error {
+	return nil
+}
+
+func (db *BaseDialect) PostInsertId(table string, sess *xorm.Session) error {
+	return nil
+}
+
+func (db *BaseDialect) CleanDB() error {
+	return nil
+}
+
+func (db *BaseDialect) NoOpSql() string {
+	return "SELECT 0;"
+}

+ 38 - 17
pkg/services/sqlstore/migrator/migrations.go

@@ -24,37 +24,58 @@ func (m *MigrationBase) GetCondition() MigrationCondition {
 type RawSqlMigration struct {
 	MigrationBase
 
-	sqlite   string
-	mysql    string
-	postgres string
+	sql map[string]string
+}
+
+func NewRawSqlMigration(sql string) *RawSqlMigration {
+	m := &RawSqlMigration{}
+	if sql != "" {
+		m.Default(sql)
+	}
+	return m
 }
 
 func (m *RawSqlMigration) Sql(dialect Dialect) string {
-	switch dialect.DriverName() {
-	case MYSQL:
-		return m.mysql
-	case SQLITE:
-		return m.sqlite
-	case POSTGRES:
-		return m.postgres
+	if m.sql != nil {
+		if val := m.sql[dialect.DriverName()]; val != "" {
+			return val
+		}
+
+		if val := m.sql["default"]; val != "" {
+			return val
+		}
 	}
 
-	panic("db type not supported")
+	return dialect.NoOpSql()
 }
 
-func (m *RawSqlMigration) Sqlite(sql string) *RawSqlMigration {
-	m.sqlite = sql
+func (m *RawSqlMigration) Set(dialect string, sql string) *RawSqlMigration {
+	if m.sql == nil {
+		m.sql = make(map[string]string)
+	}
+
+	m.sql[dialect] = sql
 	return m
 }
 
+func (m *RawSqlMigration) Default(sql string) *RawSqlMigration {
+	return m.Set("default", sql)
+}
+
+func (m *RawSqlMigration) Sqlite(sql string) *RawSqlMigration {
+	return m.Set(SQLITE, sql)
+}
+
 func (m *RawSqlMigration) Mysql(sql string) *RawSqlMigration {
-	m.mysql = sql
-	return m
+	return m.Set(MYSQL, sql)
 }
 
 func (m *RawSqlMigration) Postgres(sql string) *RawSqlMigration {
-	m.postgres = sql
-	return m
+	return m.Set(POSTGRES, sql)
+}
+
+func (m *RawSqlMigration) Mssql(sql string) *RawSqlMigration {
+	return m.Set(MSSQL, sql)
 }
 
 type AddColumnMigration struct {

+ 1 - 1
pkg/services/sqlstore/migrator/migrator.go

@@ -31,7 +31,7 @@ func NewMigrator(engine *xorm.Engine) *Migrator {
 	mg.x = engine
 	mg.Logger = log.New("migrator")
 	mg.migrations = make([]Migration, 0)
-	mg.dialect = NewDialect(mg.x.DriverName())
+	mg.dialect = NewDialect(mg.x)
 	return mg
 }
 

+ 25 - 5
pkg/services/sqlstore/migrator/mysql_dialect.go

@@ -1,17 +1,21 @@
 package migrator
 
 import (
+	"fmt"
 	"strconv"
 	"strings"
+
+	"github.com/go-xorm/xorm"
 )
 
 type Mysql struct {
 	BaseDialect
 }
 
-func NewMysqlDialect() *Mysql {
+func NewMysqlDialect(engine *xorm.Engine) *Mysql {
 	d := Mysql{}
 	d.BaseDialect.dialect = &d
+	d.BaseDialect.engine = engine
 	d.BaseDialect.driverName = MYSQL
 	return &d
 }
@@ -24,10 +28,6 @@ func (db *Mysql) Quote(name string) string {
 	return "`" + name + "`"
 }
 
-func (db *Mysql) QuoteStr() string {
-	return "`"
-}
-
 func (db *Mysql) AutoIncrStr() string {
 	return "AUTO_INCREMENT"
 }
@@ -105,3 +105,23 @@ func (db *Mysql) UpdateTableSql(tableName string, columns []*Column) string {
 
 	return "ALTER TABLE " + db.Quote(tableName) + " " + strings.Join(statements, ", ") + ";"
 }
+
+func (db *Mysql) CleanDB() error {
+	tables, _ := db.engine.DBMetas()
+	sess := db.engine.NewSession()
+	defer sess.Close()
+
+	for _, table := range tables {
+		if _, err := sess.Exec("set foreign_key_checks = 0"); err != nil {
+			return fmt.Errorf("failed to disable foreign key checks")
+		}
+		if _, err := sess.Exec("drop table " + table.Name + " ;"); err != nil {
+			return fmt.Errorf("failed to delete table: %v, err: %v", table.Name, err)
+		}
+		if _, err := sess.Exec("set foreign_key_checks = 1"); err != nil {
+			return fmt.Errorf("failed to disable foreign key checks")
+		}
+	}
+
+	return nil
+}

+ 20 - 6
pkg/services/sqlstore/migrator/postgres_dialect.go

@@ -4,15 +4,18 @@ import (
 	"fmt"
 	"strconv"
 	"strings"
+
+	"github.com/go-xorm/xorm"
 )
 
 type Postgres struct {
 	BaseDialect
 }
 
-func NewPostgresDialect() *Postgres {
+func NewPostgresDialect(engine *xorm.Engine) *Postgres {
 	d := Postgres{}
 	d.BaseDialect.dialect = &d
+	d.BaseDialect.engine = engine
 	d.BaseDialect.driverName = POSTGRES
 	return &d
 }
@@ -25,10 +28,6 @@ func (db *Postgres) Quote(name string) string {
 	return "\"" + name + "\""
 }
 
-func (db *Postgres) QuoteStr() string {
-	return "\""
-}
-
 func (b *Postgres) LikeStr() string {
 	return "ILIKE"
 }
@@ -117,8 +116,23 @@ func (db *Postgres) UpdateTableSql(tableName string, columns []*Column) string {
 	var statements = []string{}
 
 	for _, col := range columns {
-		statements = append(statements, "ALTER "+db.QuoteStr()+col.Name+db.QuoteStr()+" TYPE "+db.SqlType(col))
+		statements = append(statements, "ALTER "+db.Quote(col.Name)+" TYPE "+db.SqlType(col))
 	}
 
 	return "ALTER TABLE " + db.Quote(tableName) + " " + strings.Join(statements, ", ") + ";"
 }
+
+func (db *Postgres) CleanDB() error {
+	sess := db.engine.NewSession()
+	defer sess.Close()
+
+	if _, err := sess.Exec("DROP SCHEMA public CASCADE;"); err != nil {
+		return fmt.Errorf("Failed to drop schema public")
+	}
+
+	if _, err := sess.Exec("CREATE SCHEMA public;"); err != nil {
+		return fmt.Errorf("Failed to create schema public")
+	}
+
+	return nil
+}

+ 11 - 6
pkg/services/sqlstore/migrator/sqlite_dialect.go

@@ -1,14 +1,19 @@
 package migrator
 
-import "fmt"
+import (
+	"fmt"
+
+	"github.com/go-xorm/xorm"
+)
 
 type Sqlite3 struct {
 	BaseDialect
 }
 
-func NewSqlite3Dialect() *Sqlite3 {
+func NewSqlite3Dialect(engine *xorm.Engine) *Sqlite3 {
 	d := Sqlite3{}
 	d.BaseDialect.dialect = &d
+	d.BaseDialect.engine = engine
 	d.BaseDialect.driverName = SQLITE
 	return &d
 }
@@ -21,10 +26,6 @@ func (db *Sqlite3) Quote(name string) string {
 	return "`" + name + "`"
 }
 
-func (db *Sqlite3) QuoteStr() string {
-	return "`"
-}
-
 func (db *Sqlite3) AutoIncrStr() string {
 	return "AUTOINCREMENT"
 }
@@ -77,3 +78,7 @@ func (db *Sqlite3) DropIndexSql(tableName string, index *Index) string {
 	idxName := index.XName(tableName)
 	return fmt.Sprintf("DROP INDEX %v", quote(idxName))
 }
+
+func (db *Sqlite3) CleanDB() error {
+	return nil
+}

+ 1 - 0
pkg/services/sqlstore/migrator/types.go

@@ -9,6 +9,7 @@ const (
 	POSTGRES = "postgres"
 	SQLITE   = "sqlite3"
 	MYSQL    = "mysql"
+	MSSQL    = "mssql"
 )
 
 type Migration interface {

+ 1 - 1
pkg/services/sqlstore/playlist.go

@@ -64,7 +64,7 @@ func UpdatePlaylist(cmd *m.UpdatePlaylistCommand) error {
 		Interval: playlist.Interval,
 	}
 
-	_, err := x.ID(cmd.Id).Cols("id", "name", "interval").Update(&playlist)
+	_, err := x.ID(cmd.Id).Cols("name", "interval").Update(&playlist)
 
 	if err != nil {
 		return err

+ 3 - 5
pkg/services/sqlstore/search_builder.go

@@ -92,7 +92,7 @@ func (sb *SearchBuilder) ToSql() (string, []interface{}) {
 		LEFT OUTER JOIN dashboard folder on folder.id = dashboard.folder_id
 		LEFT OUTER JOIN dashboard_tag on dashboard.id = dashboard_tag.dashboard_id`)
 
-	sb.sql.WriteString(" ORDER BY dashboard.title ASC LIMIT 5000")
+	sb.sql.WriteString(" ORDER BY dashboard.title ASC" + dialect.Limit(5000))
 
 	return sb.sql.String(), sb.params
 }
@@ -135,12 +135,11 @@ func (sb *SearchBuilder) buildTagQuery() {
 	// this ends the inner select (tag filtered part)
 	sb.sql.WriteString(`
 		GROUP BY dashboard.id HAVING COUNT(dashboard.id) >= ?
-		LIMIT ?) as ids
+		ORDER BY dashboard.id` + dialect.Limit(int64(sb.limit)) + `) as ids
 		INNER JOIN dashboard on ids.id = dashboard.id
 	`)
 
 	sb.params = append(sb.params, len(sb.tags))
-	sb.params = append(sb.params, sb.limit)
 }
 
 func (sb *SearchBuilder) buildMainQuery() {
@@ -153,8 +152,7 @@ func (sb *SearchBuilder) buildMainQuery() {
 	sb.sql.WriteString(` WHERE `)
 	sb.buildSearchWhereClause()
 
-	sb.sql.WriteString(` LIMIT ?) as ids INNER JOIN dashboard on ids.id = dashboard.id `)
-	sb.params = append(sb.params, sb.limit)
+	sb.sql.WriteString(` ORDER BY dashboard.title` + dialect.Limit(int64(sb.limit)) + `) as ids INNER JOIN dashboard on ids.id = dashboard.id `)
 }
 
 func (sb *SearchBuilder) buildSearchWhereClause() {

+ 2 - 5
pkg/services/sqlstore/search_builder_test.go

@@ -4,13 +4,10 @@ import (
 	"testing"
 
 	m "github.com/grafana/grafana/pkg/models"
-	"github.com/grafana/grafana/pkg/services/sqlstore/migrator"
 	. "github.com/smartystreets/goconvey/convey"
 )
 
 func TestSearchBuilder(t *testing.T) {
-	dialect = migrator.NewDialect("sqlite3")
-
 	Convey("Testing building a search", t, func() {
 		signedInUser := &m.SignedInUser{
 			OrgId:  1,
@@ -23,7 +20,7 @@ func TestSearchBuilder(t *testing.T) {
 			sql, params := sb.IsStarred().WithTitle("test").ToSql()
 			So(sql, ShouldStartWith, "SELECT")
 			So(sql, ShouldContainSubstring, "INNER JOIN dashboard on ids.id = dashboard.id")
-			So(sql, ShouldEndWith, "ORDER BY dashboard.title ASC LIMIT 5000")
+			So(sql, ShouldContainSubstring, "ORDER BY dashboard.title ASC")
 			So(len(params), ShouldBeGreaterThan, 0)
 		})
 
@@ -31,7 +28,7 @@ func TestSearchBuilder(t *testing.T) {
 			sql, params := sb.WithTags([]string{"tag1", "tag2"}).ToSql()
 			So(sql, ShouldStartWith, "SELECT")
 			So(sql, ShouldContainSubstring, "LEFT OUTER JOIN dashboard_tag")
-			So(sql, ShouldEndWith, "ORDER BY dashboard.title ASC LIMIT 5000")
+			So(sql, ShouldContainSubstring, "ORDER BY dashboard.title ASC")
 			So(len(params), ShouldBeGreaterThan, 0)
 		})
 	})

+ 21 - 0
pkg/services/sqlstore/shared.go

@@ -1,6 +1,7 @@
 package sqlstore
 
 import (
+	"reflect"
 	"time"
 
 	"github.com/go-xorm/xorm"
@@ -67,3 +68,23 @@ func inTransactionWithRetry(callback dbTransactionFunc, retry int) error {
 
 	return nil
 }
+
+func (sess *DBSession) InsertId(bean interface{}) (int64, error) {
+	table := sess.DB().Mapper.Obj2Table(getTypeName(bean))
+
+	dialect.PreInsertId(table, sess.Session)
+
+	id, err := sess.Session.InsertOne(bean)
+
+	dialect.PostInsertId(table, sess.Session)
+
+	return id, err
+}
+
+func getTypeName(bean interface{}) (res string) {
+	t := reflect.TypeOf(bean)
+	for t.Kind() == reflect.Ptr {
+		t = t.Elem()
+	}
+	return t.Name()
+}

+ 19 - 21
pkg/services/sqlstore/sqlstore.go

@@ -20,7 +20,6 @@ import (
 	"github.com/grafana/grafana/pkg/setting"
 
 	"github.com/go-sql-driver/mysql"
-	_ "github.com/go-sql-driver/mysql"
 	"github.com/go-xorm/xorm"
 	_ "github.com/lib/pq"
 	_ "github.com/mattn/go-sqlite3"
@@ -97,7 +96,7 @@ func NewEngine() *xorm.Engine {
 
 func SetEngine(engine *xorm.Engine) (err error) {
 	x = engine
-	dialect = migrator.NewDialect(x.DriverName())
+	dialect = migrator.NewDialect(x)
 
 	migrator := migrator.NewMigrator(x)
 	migrations.AddMigrations(migrator)
@@ -116,7 +115,7 @@ func getEngine() (*xorm.Engine, error) {
 
 	cnnstr := ""
 	switch DbCfg.Type {
-	case "mysql":
+	case migrator.MYSQL:
 		protocol := "tcp"
 		if strings.HasPrefix(DbCfg.Host, "/") {
 			protocol = "unix"
@@ -133,7 +132,7 @@ func getEngine() (*xorm.Engine, error) {
 			mysql.RegisterTLSConfig("custom", tlsCert)
 			cnnstr += "&tls=custom"
 		}
-	case "postgres":
+	case migrator.POSTGRES:
 		var host, port = "127.0.0.1", "5432"
 		fields := strings.Split(DbCfg.Host, ":")
 		if len(fields) > 0 && len(strings.TrimSpace(fields[0])) > 0 {
@@ -153,7 +152,7 @@ func getEngine() (*xorm.Engine, error) {
 			strings.Replace(DbCfg.ClientKeyPath, `'`, `\'`, -1),
 			strings.Replace(DbCfg.CaCertPath, `'`, `\'`, -1),
 		)
-	case "sqlite3":
+	case migrator.SQLITE:
 		if !filepath.IsAbs(DbCfg.Path) {
 			DbCfg.Path = filepath.Join(setting.DataPath, DbCfg.Path)
 		}
@@ -230,16 +229,10 @@ func LoadConfig() {
 	DbCfg.Path = sec.Key("path").MustString("data/grafana.db")
 }
 
-var (
-	dbSqlite   = "sqlite"
-	dbMySql    = "mysql"
-	dbPostgres = "postgres"
-)
-
 func InitTestDB(t *testing.T) *xorm.Engine {
-	selectedDb := dbSqlite
-	// selectedDb := dbMySql
-	// selectedDb := dbPostgres
+	selectedDb := migrator.SQLITE
+	// selectedDb := migrator.MYSQL
+	// selectedDb := migrator.POSTGRES
 
 	var x *xorm.Engine
 	var err error
@@ -250,9 +243,9 @@ func InitTestDB(t *testing.T) *xorm.Engine {
 	}
 
 	switch strings.ToLower(selectedDb) {
-	case dbMySql:
+	case migrator.MYSQL:
 		x, err = xorm.NewEngine(sqlutil.TestDB_Mysql.DriverName, sqlutil.TestDB_Mysql.ConnStr)
-	case dbPostgres:
+	case migrator.POSTGRES:
 		x, err = xorm.NewEngine(sqlutil.TestDB_Postgres.DriverName, sqlutil.TestDB_Postgres.ConnStr)
 	default:
 		x, err = xorm.NewEngine(sqlutil.TestDB_Sqlite3.DriverName, sqlutil.TestDB_Sqlite3.ConnStr)
@@ -261,24 +254,29 @@ func InitTestDB(t *testing.T) *xorm.Engine {
 	x.DatabaseTZ = time.UTC
 	x.TZLocation = time.UTC
 
-	// x.ShowSQL()
-
 	if err != nil {
 		t.Fatalf("Failed to init test database: %v", err)
 	}
 
-	sqlutil.CleanDB(x)
+	dialect = migrator.NewDialect(x)
+
+	err = dialect.CleanDB()
+	if err != nil {
+		t.Fatalf("Failed to clean test db %v", err)
+	}
 
 	if err := SetEngine(x); err != nil {
 		t.Fatal(err)
 	}
 
+	// x.ShowSQL()
+
 	return x
 }
 
 func IsTestDbMySql() bool {
 	if db, present := os.LookupEnv("GRAFANA_TEST_DB"); present {
-		return db == dbMySql
+		return db == migrator.MYSQL
 	}
 
 	return false
@@ -286,7 +284,7 @@ func IsTestDbMySql() bool {
 
 func IsTestDbPostgres() bool {
 	if db, present := os.LookupEnv("GRAFANA_TEST_DB"); present {
-		return db == dbPostgres
+		return db == migrator.POSTGRES
 	}
 
 	return false

+ 0 - 37
pkg/services/sqlstore/sqlutil/sqlutil.go

@@ -1,11 +1,5 @@
 package sqlutil
 
-import (
-	"fmt"
-
-	"github.com/go-xorm/xorm"
-)
-
 type TestDB struct {
 	DriverName string
 	ConnStr    string
@@ -15,34 +9,3 @@ var TestDB_Sqlite3 = TestDB{DriverName: "sqlite3", ConnStr: ":memory:"}
 var TestDB_Mysql = TestDB{DriverName: "mysql", ConnStr: "grafana:password@tcp(localhost:3306)/grafana_tests?collation=utf8mb4_unicode_ci"}
 var TestDB_Postgres = TestDB{DriverName: "postgres", ConnStr: "user=grafanatest password=grafanatest host=localhost port=5432 dbname=grafanatest sslmode=disable"}
 var TestDB_Mssql = TestDB{DriverName: "mssql", ConnStr: "server=localhost;port=1433;database=grafanatest;user id=grafana;password=Password!"}
-
-func CleanDB(x *xorm.Engine) {
-	if x.DriverName() == "postgres" {
-		sess := x.NewSession()
-		defer sess.Close()
-
-		if _, err := sess.Exec("DROP SCHEMA public CASCADE;"); err != nil {
-			panic("Failed to drop schema public")
-		}
-
-		if _, err := sess.Exec("CREATE SCHEMA public;"); err != nil {
-			panic("Failed to create schema public")
-		}
-	} else if x.DriverName() == "mysql" {
-		tables, _ := x.DBMetas()
-		sess := x.NewSession()
-		defer sess.Close()
-
-		for _, table := range tables {
-			if _, err := sess.Exec("set foreign_key_checks = 0"); err != nil {
-				panic("failed to disable foreign key checks")
-			}
-			if _, err := sess.Exec("drop table " + table.Name + " ;"); err != nil {
-				panic(fmt.Sprintf("failed to delete table: %v, err: %v", table.Name, err))
-			}
-			if _, err := sess.Exec("set foreign_key_checks = 1"); err != nil {
-				panic("failed to disable foreign key checks")
-			}
-		}
-	}
-}

+ 1 - 2
pkg/services/sqlstore/team.go

@@ -161,9 +161,8 @@ func SearchTeams(query *m.SearchTeamsQuery) error {
 	sql.WriteString(` order by team.name asc`)
 
 	if query.Limit != 0 {
-		sql.WriteString(` limit ? offset ?`)
 		offset := query.Limit * (query.Page - 1)
-		params = append(params, query.Limit, offset)
+		sql.WriteString(dialect.LimitOffset(int64(query.Limit), int64(offset)))
 	}
 
 	if err := x.Sql(sql.String(), params...).Find(&query.Result.Teams); err != nil {

+ 8 - 2
pkg/services/sqlstore/user.go

@@ -60,8 +60,14 @@ func getOrgIdForNewUser(cmd *m.CreateUserCommand, sess *DBSession) (int64, error
 	org.Created = time.Now()
 	org.Updated = time.Now()
 
-	if _, err := sess.Insert(&org); err != nil {
-		return 0, err
+	if org.Id != 0 {
+		if _, err := sess.InsertId(&org); err != nil {
+			return 0, err
+		}
+	} else {
+		if _, err := sess.InsertOne(&org); err != nil {
+			return 0, err
+		}
 	}
 
 	sess.publishAfterCommit(&events.OrgCreated{

+ 55 - 43
pkg/tracing/tracing.go

@@ -1,68 +1,71 @@
 package tracing
 
 import (
+	"context"
 	"io"
 	"strings"
 
 	"github.com/grafana/grafana/pkg/log"
+	"github.com/grafana/grafana/pkg/registry"
 	"github.com/grafana/grafana/pkg/setting"
 
 	opentracing "github.com/opentracing/opentracing-go"
 	jaegercfg "github.com/uber/jaeger-client-go/config"
-	ini "gopkg.in/ini.v1"
 )
 
-var (
-	logger log.Logger = log.New("tracing")
-)
-
-type TracingSettings struct {
-	Enabled      bool
-	Address      string
-	CustomTags   map[string]string
-	SamplerType  string
-	SamplerParam float64
+func init() {
+	registry.RegisterService(&TracingService{})
 }
 
-func Init(file *ini.File) (io.Closer, error) {
-	settings := parseSettings(file)
-	return internalInit(settings)
+type TracingService struct {
+	enabled      bool
+	address      string
+	customTags   map[string]string
+	samplerType  string
+	samplerParam float64
+	log          log.Logger
+	closer       io.Closer
+
+	Cfg *setting.Cfg `inject:""`
 }
 
-func parseSettings(file *ini.File) *TracingSettings {
-	settings := &TracingSettings{}
+func (ts *TracingService) Init() error {
+	ts.log = log.New("tracing")
+	ts.parseSettings()
 
-	var section, err = setting.Raw.GetSection("tracing.jaeger")
-	if err != nil {
-		return settings
+	if ts.enabled {
+		ts.initGlobalTracer()
 	}
 
-	settings.Address = section.Key("address").MustString("")
-	if settings.Address != "" {
-		settings.Enabled = true
+	return nil
+}
+
+func (ts *TracingService) parseSettings() {
+	var section, err = ts.Cfg.Raw.GetSection("tracing.jaeger")
+	if err != nil {
+		return
 	}
 
-	settings.CustomTags = splitTagSettings(section.Key("always_included_tag").MustString(""))
-	settings.SamplerType = section.Key("sampler_type").MustString("")
-	settings.SamplerParam = section.Key("sampler_param").MustFloat64(1)
+	ts.address = section.Key("address").MustString("")
+	if ts.address != "" {
+		ts.enabled = true
+	}
 
-	return settings
+	ts.customTags = splitTagSettings(section.Key("always_included_tag").MustString(""))
+	ts.samplerType = section.Key("sampler_type").MustString("")
+	ts.samplerParam = section.Key("sampler_param").MustFloat64(1)
 }
 
-func internalInit(settings *TracingSettings) (io.Closer, error) {
-	if !settings.Enabled {
-		return &nullCloser{}, nil
-	}
-
+func (ts *TracingService) initGlobalTracer() error {
 	cfg := jaegercfg.Configuration{
-		Disabled: !settings.Enabled,
+		Disabled: !ts.enabled,
 		Sampler: &jaegercfg.SamplerConfig{
-			Type:  settings.SamplerType,
-			Param: settings.SamplerParam,
+			Type:  ts.samplerType,
+			Param: ts.samplerParam,
 		},
 		Reporter: &jaegercfg.ReporterConfig{
 			LogSpans:           false,
-			LocalAgentHostPort: settings.Address,
+			LocalAgentHostPort: ts.address,
 		},
 	}
 
@@ -71,18 +74,31 @@ func internalInit(settings *TracingSettings) (io.Closer, error) {
 	options := []jaegercfg.Option{}
 	options = append(options, jaegercfg.Logger(jLogger))
 
-	for tag, value := range settings.CustomTags {
+	for tag, value := range ts.customTags {
 		options = append(options, jaegercfg.Tag(tag, value))
 	}
 
 	tracer, closer, err := cfg.New("grafana", options...)
 	if err != nil {
-		return nil, err
+		return err
 	}
 
 	opentracing.InitGlobalTracer(tracer)
-	logger.Info("Initializing Jaeger tracer", "address", settings.Address)
-	return closer, nil
+
+	ts.closer = closer
+
+	return nil
+}
+
+func (ts *TracingService) Run(ctx context.Context) error {
+	<-ctx.Done()
+
+	if ts.closer != nil {
+		ts.log.Info("Closing tracing")
+		ts.closer.Close()
+	}
+
+	return nil
 }
 
 func splitTagSettings(input string) map[string]string {
@@ -110,7 +126,3 @@ func (jlw *jaegerLogWrapper) Error(msg string) {
 func (jlw *jaegerLogWrapper) Infof(msg string, args ...interface{}) {
 	jlw.logger.Info(msg, args)
 }
-
-type nullCloser struct{}
-
-func (*nullCloser) Close() error { return nil }

+ 5 - 5
yarn.lock

@@ -1655,9 +1655,9 @@ caniuse-api@^1.5.2:
     lodash.memoize "^4.1.2"
     lodash.uniq "^4.5.0"
 
-caniuse-db@^1.0.30000529, caniuse-db@^1.0.30000634, caniuse-db@^1.0.30000639:
-  version "1.0.30000830"
-  resolved "https://registry.yarnpkg.com/caniuse-db/-/caniuse-db-1.0.30000830.tgz#6e45255b345649fd15ff59072da1e12bb3de2f13"
+caniuse-db@1.0.30000772, caniuse-db@^1.0.30000529, caniuse-db@^1.0.30000634, caniuse-db@^1.0.30000639:
+  version "1.0.30000772"
+  resolved "https://registry.yarnpkg.com/caniuse-db/-/caniuse-db-1.0.30000772.tgz#51aae891768286eade4a3d8319ea76d6a01b512b"
 
 capture-stack-trace@^1.0.0:
   version "1.0.0"
@@ -11048,8 +11048,8 @@ unzip-response@^2.0.1:
   resolved "https://registry.yarnpkg.com/unzip-response/-/unzip-response-2.0.1.tgz#d2f0f737d16b0615e72a6935ed04214572d56f97"
 
 upath@^1.0.0:
-  version "1.0.4"
-  resolved "https://registry.yarnpkg.com/upath/-/upath-1.0.4.tgz#ee2321ba0a786c50973db043a50b7bcba822361d"
+  version "1.0.5"
+  resolved "https://registry.yarnpkg.com/upath/-/upath-1.0.5.tgz#02cab9ecebe95bbec6d5fc2566325725ab6d1a73"
 
 update-notifier@^2.3.0:
   version "2.5.0"