浏览代码

Merge remote-tracking branch 'origin/master' into concurrent-sql

Marcus Efraimsson 7 年之前
父节点
当前提交
3932e4db5a
共有 100 个文件被更改,包括 3636 次插入600 次删除
  1. 2 1
      .circleci/config.yml
  2. 1 0
      .gitignore
  3. 14 7
      CHANGELOG.md
  4. 7 1
      Gopkg.lock
  5. 4 0
      Gopkg.toml
  6. 11 6
      build.go
  7. 4 0
      conf/defaults.ini
  8. 5 1
      conf/sample.ini
  9. 9 0
      devenv/bulk_alerting_dashboards/bulk_alerting_dashboards.yaml
  10. 168 0
      devenv/bulk_alerting_dashboards/bulkdash_alerting.jsonnet
  11. 1 1
      devenv/docker/blocks/graphite1/conf/opt/graphite/conf/aggregation-rules.conf
  12. 1 0
      devenv/docker/ha_test/.gitignore
  13. 137 0
      devenv/docker/ha_test/README.md
  14. 156 0
      devenv/docker/ha_test/alerts.sh
  15. 78 0
      devenv/docker/ha_test/docker-compose.yaml
  16. 202 0
      devenv/docker/ha_test/grafana/provisioning/alerts.jsonnet
  17. 8 0
      devenv/docker/ha_test/grafana/provisioning/dashboards/alerts.yaml
  18. 172 0
      devenv/docker/ha_test/grafana/provisioning/dashboards/alerts/overview.json
  19. 11 0
      devenv/docker/ha_test/grafana/provisioning/datasources/datasources.yaml
  20. 39 0
      devenv/docker/ha_test/prometheus/prometheus.yml
  21. 21 4
      devenv/setup.sh
  22. 1 1
      docs/README.md
  23. 1 1
      docs/sources/administration/provisioning.md
  24. 1 0
      docs/sources/auth/ldap.md
  25. 171 0
      docs/sources/features/datasources/stackdriver.md
  26. 1 1
      docs/sources/guides/whats-new-in-v4-2.md
  27. 8 0
      docs/sources/installation/configuration.md
  28. 1 1
      docs/sources/tutorials/ha_setup.md
  29. 1 0
      docs/versions.json
  30. 1 1
      package.json
  31. 0 9
      pkg/api/avatar/avatar.go
  32. 7 3
      pkg/api/dashboard.go
  33. 1 1
      pkg/api/folder.go
  34. 0 10
      pkg/api/folder_test.go
  35. 11 5
      pkg/api/index.go
  36. 0 3
      pkg/api/live/hub.go
  37. 171 0
      pkg/api/pluginproxy/access_token_provider.go
  38. 94 0
      pkg/api/pluginproxy/access_token_provider_test.go
  39. 93 0
      pkg/api/pluginproxy/ds_auth_provider.go
  40. 21 0
      pkg/api/pluginproxy/ds_auth_provider_test.go
  41. 3 130
      pkg/api/pluginproxy/ds_proxy.go
  42. 5 17
      pkg/api/pluginproxy/ds_proxy_test.go
  43. 10 9
      pkg/api/render.go
  44. 2 0
      pkg/cmd/grafana-cli/commands/commands.go
  45. 1 1
      pkg/cmd/grafana-cli/commands/install_command.go
  46. 2 1
      pkg/cmd/grafana-server/main.go
  47. 0 2
      pkg/components/imguploader/azureblobuploader.go
  48. 28 1
      pkg/components/imguploader/s3uploader.go
  49. 8 4
      pkg/components/null/float.go
  50. 0 5
      pkg/middleware/middleware_test.go
  51. 2 2
      pkg/models/alert.go
  52. 33 22
      pkg/models/alert_notifications.go
  53. 3 1
      pkg/models/datasource.go
  54. 10 6
      pkg/plugins/app_plugin.go
  55. 12 5
      pkg/services/alerting/interfaces.go
  56. 77 44
      pkg/services/alerting/notifier.go
  57. 1 1
      pkg/services/alerting/notifiers/alertmanager.go
  58. 28 33
      pkg/services/alerting/notifiers/base.go
  59. 101 60
      pkg/services/alerting/notifiers/base_test.go
  60. 1 1
      pkg/services/alerting/notifiers/kafka.go
  61. 1 1
      pkg/services/alerting/notifiers/opsgenie.go
  62. 1 1
      pkg/services/alerting/notifiers/pagerduty.go
  63. 6 13
      pkg/services/alerting/result_handler.go
  64. 3 0
      pkg/services/alerting/rule.go
  65. 1 1
      pkg/services/alerting/test_notification.go
  66. 0 4
      pkg/services/alerting/ticker.go
  67. 0 6
      pkg/services/notifications/notifications_test.go
  68. 1 1
      pkg/services/provisioning/dashboards/config_reader.go
  69. 2 2
      pkg/services/provisioning/dashboards/config_reader_test.go
  70. 26 22
      pkg/services/provisioning/dashboards/file_reader.go
  71. 4 3
      pkg/services/provisioning/dashboards/file_reader_linux_test.go
  72. 2 1
      pkg/services/provisioning/dashboards/file_reader_test.go
  73. 1 1
      pkg/services/provisioning/dashboards/testdata/test-configs/dashboards-from-disk/dev-dashboards.yaml
  74. 1 1
      pkg/services/provisioning/dashboards/testdata/test-configs/version-0/version-0.yaml
  75. 10 9
      pkg/services/rendering/interface.go
  76. 19 6
      pkg/services/rendering/rendering.go
  77. 6 0
      pkg/services/sqlstore/alert.go
  78. 113 28
      pkg/services/sqlstore/alert_notification.go
  79. 129 46
      pkg/services/sqlstore/alert_notification_test.go
  80. 0 23
      pkg/services/sqlstore/dashboard_service_integration_test.go
  81. 23 0
      pkg/services/sqlstore/migrations/alert_mig.go
  82. 2 0
      pkg/services/sqlstore/migrator/dialect.go
  83. 12 0
      pkg/services/sqlstore/migrator/mysql_dialect.go
  84. 11 0
      pkg/services/sqlstore/migrator/postgres_dialect.go
  85. 11 0
      pkg/services/sqlstore/migrator/sqlite_dialect.go
  86. 0 4
      pkg/services/sqlstore/transactions_test.go
  87. 9 4
      pkg/setting/setting.go
  88. 9 5
      pkg/social/social.go
  89. 1 2
      pkg/tsdb/cloudwatch/credentials.go
  90. 1 1
      pkg/tsdb/cloudwatch/metric_find_query.go
  91. 21 8
      pkg/tsdb/elasticsearch/response_parser.go
  92. 5 5
      pkg/tsdb/elasticsearch/time_series_query.go
  93. 120 0
      pkg/tsdb/stackdriver/annotation_query.go
  94. 33 0
      pkg/tsdb/stackdriver/annotation_query_test.go
  95. 460 0
      pkg/tsdb/stackdriver/stackdriver.go
  96. 357 0
      pkg/tsdb/stackdriver/stackdriver_test.go
  97. 46 0
      pkg/tsdb/stackdriver/test-data/1-series-response-agg-one-metric.json
  98. 145 0
      pkg/tsdb/stackdriver/test-data/2-series-response-no-agg.json
  99. 43 0
      pkg/tsdb/stackdriver/types.go
  100. 39 0
      public/app/core/components/LayoutSelector/LayoutSelector.tsx

+ 2 - 1
.circleci/config.yml

@@ -83,13 +83,14 @@ jobs:
       - checkout
       - run: 'go get -u github.com/alecthomas/gometalinter'
       - run: 'go get -u github.com/tsenart/deadcode'
+      - run: 'go get -u github.com/jgautheron/goconst/cmd/goconst'
       - run: 'go get -u github.com/gordonklaus/ineffassign'
       - run: 'go get -u github.com/opennota/check/cmd/structcheck'
       - run: 'go get -u github.com/mdempsky/unconvert'
       - run: 'go get -u github.com/opennota/check/cmd/varcheck'
       - run:
           name: run linters
-          command: 'gometalinter --enable-gc --vendor --deadline 10m --disable-all --enable=deadcode --enable=ineffassign --enable=structcheck --enable=unconvert --enable=varcheck ./...'
+          command: 'gometalinter --enable-gc --vendor --deadline 10m --disable-all --enable=deadcode --enable=goconst --enable=ineffassign --enable=structcheck --enable=unconvert --enable=varcheck ./...'
       - run:
           name: run go vet
           command: 'go vet ./pkg/...'

+ 1 - 0
.gitignore

@@ -72,3 +72,4 @@ debug.test
 *.orig
 
 /devenv/bulk-dashboards/*.json
+/devenv/bulk_alerting_dashboards/*.json

+ 14 - 7
CHANGELOG.md

@@ -1,23 +1,30 @@
-# 5.4.0 (unreleased)
+# 5.3.0 (unreleased)
+
+# 5.3.0-beta3 ()
+
+* **Alerting**: Alert reminders deduping not working as expected when running multiple Grafana instances [#13492](https://github.com/grafana/grafana/issues/13492)
+
+# 5.3.0-beta2 (2018-10-01)
 
 ### New Features
 
 * **Annotations**: Enable template variables in tagged annotations queries [#9735](https://github.com/grafana/grafana/issues/9735)
+* **Stackdriver**: Support for Google Stackdriver Datasource [#13289](https://github.com/grafana/grafana/pull/13289)
 
 ### Minor
 
+* **Provisioning**: Dashboard Provisioning now support symlinks that changes target [#12534](https://github.com/grafana/grafana/issues/12534), thx [@auhlig](https://github.com/auhlig)
 * **OAuth**: Allow oauth email attribute name to be configurable [#12986](https://github.com/grafana/grafana/issues/12986), thx [@bobmshannon](https://github.com/bobmshannon)
 * **Tags**: Default sort order for GetDashboardTags [#11681](https://github.com/grafana/grafana/pull/11681), thx [@Jonnymcc](https://github.com/Jonnymcc)
 * **Prometheus**: Label completion queries respect dashboard time range  [#12251](https://github.com/grafana/grafana/pull/12251), thx [@mtanda](https://github.com/mtanda)
 * **Prometheus**: Allow to display annotations based on Prometheus series value [#10159](https://github.com/grafana/grafana/issues/10159), thx [@mtanda](https://github.com/mtanda)
 * **Prometheus**: Adhoc-filtering for Prometheus dashboards [#13212](https://github.com/grafana/grafana/issues/13212)
 * **Singlestat**: Fix gauge display accuracy for percents [#13270](https://github.com/grafana/grafana/issues/13270), thx [@tianon](https://github.com/tianon)
-
-# 5.3.0 (unreleased)
-
-### Minor
-
+* **Dashboard**: Prevent auto refresh from starting when loading dashboard with absolute time range [#12030](https://github.com/grafana/grafana/issues/12030)
+* **Templating**: New templating variable type `Text box` that allows free text input [#3173](https://github.com/grafana/grafana/issues/3173)
 * **Alerting**: Link to view full size image in Microsoft Teams alert notifier [#13121](https://github.com/grafana/grafana/issues/13121), thx [@holiiveira](https://github.com/holiiveira)
+* **Alerting**: Fixes a bug where all alerts would send reminders after upgrade & restart [#13402](https://github.com/grafana/grafana/pull/13402)
+* **Alerting**: Concurrent render limit for graphs used in notifications [#13401](https://github.com/grafana/grafana/pull/13401)
 * **Postgres/MySQL/MSSQL**: Add support for replacing $__interval and  $__interval_ms in alert queries [#11555](https://github.com/grafana/grafana/issues/11555), thx [@svenklemm](https://github.com/svenklemm)
 
 # 5.3.0-beta1 (2018-09-06)
@@ -505,7 +512,7 @@ See [security announcement](https://community.grafana.com/t/grafana-5-2-3-and-4-
 # 4.6.2 (2017-11-16)
 
 ## Important
-* **Prometheus**: Fixes bug with new prometheus alerts in Grafana. Make sure to download this version if your using Prometheus for alerting. More details in the issue. [#9777](https://github.com/grafana/grafana/issues/9777)
+* **Prometheus**: Fixes bug with new prometheus alerts in Grafana. Make sure to download this version if you're using Prometheus for alerting. More details in the issue. [#9777](https://github.com/grafana/grafana/issues/9777)
 
 ## Fixes
 * **Color picker**: Bug after using textbox input field to change/paste color string [#9769](https://github.com/grafana/grafana/issues/9769)

+ 7 - 1
Gopkg.lock

@@ -19,6 +19,12 @@
   packages = ["."]
   revision = "7677a1d7c1137cd3dd5ba7a076d0c898a1ef4520"
 
+[[projects]]
+  branch = "master"
+  name = "github.com/VividCortex/mysqlerr"
+  packages = ["."]
+  revision = "6c6b55f8796f578c870b7e19bafb16103bc40095"
+
 [[projects]]
   name = "github.com/aws/aws-sdk-go"
   packages = [
@@ -673,6 +679,6 @@
 [solve-meta]
   analyzer-name = "dep"
   analyzer-version = 1
-  inputs-digest = "81a37e747b875cf870c1b9486fa3147e704dea7db8ba86f7cb942d3ddc01d3e3"
+  inputs-digest = "6e9458f912a5f0eb3430b968f1b4dbc4e3b7671b282cf4fe1573419a6d9ba0d4"
   solver-name = "gps-cdcl"
   solver-version = 1

+ 4 - 0
Gopkg.toml

@@ -203,3 +203,7 @@ ignored = [
 [[constraint]]
   name = "github.com/denisenkom/go-mssqldb"
   revision = "270bc3860bb94dd3a3ffd047377d746c5e276726"
+
+[[constraint]]
+  name = "github.com/VividCortex/mysqlerr"
+  branch = "master"

+ 11 - 6
build.go

@@ -22,6 +22,11 @@ import (
 	"time"
 )
 
+const (
+	windows = "windows"
+	linux   = "linux"
+)
+
 var (
 	//versionRe = regexp.MustCompile(`-[0-9]{1,3}-g[0-9a-f]{5,10}`)
 	goarch  string
@@ -110,13 +115,13 @@ func main() {
 		case "package":
 			grunt(gruntBuildArg("build")...)
 			grunt(gruntBuildArg("package")...)
-			if goos == "linux" {
+			if goos == linux {
 				createLinuxPackages()
 			}
 
 		case "package-only":
 			grunt(gruntBuildArg("package")...)
-			if goos == "linux" {
+			if goos == linux {
 				createLinuxPackages()
 			}
 
@@ -378,7 +383,7 @@ func ensureGoPath() {
 }
 
 func grunt(params ...string) {
-	if runtime.GOOS == "windows" {
+	if runtime.GOOS == windows {
 		runPrint(`.\node_modules\.bin\grunt`, params...)
 	} else {
 		runPrint("./node_modules/.bin/grunt", params...)
@@ -420,7 +425,7 @@ func build(binaryName, pkg string, tags []string) {
 		binary = fmt.Sprintf("./bin/%s", binaryName)
 	}
 
-	if goos == "windows" {
+	if goos == windows {
 		binary += ".exe"
 	}
 
@@ -484,11 +489,11 @@ func clean() {
 
 func setBuildEnv() {
 	os.Setenv("GOOS", goos)
-	if goos == "windows" {
+	if goos == windows {
 		// require windows >=7
 		os.Setenv("CGO_CFLAGS", "-D_WIN32_WINNT=0x0601")
 	}
-	if goarch != "amd64" || goos != "linux" {
+	if goarch != "amd64" || goos != linux {
 		// needed for all other archs
 		cgo = true
 	}

+ 4 - 0
conf/defaults.ini

@@ -474,6 +474,10 @@ error_or_timeout = alerting
 # Default setting for how Grafana handles nodata or null values in alerting. (alerting, no_data, keep_state, ok)
 nodata_or_nullvalues = no_data
 
+# Alert notifications can include images, but rendering many images at the same time can overload the server
+# This limit will protect the server from render overloading and make sure notifications are sent out quickly
+concurrent_render_limit = 5
+
 #################################### Explore #############################
 [explore]
 # Enable the Explore section

+ 5 - 1
conf/sample.ini

@@ -393,6 +393,10 @@ log_queries =
 # Default setting for how Grafana handles nodata or null values in alerting. (alerting, no_data, keep_state, ok)
 ;nodata_or_nullvalues = no_data
 
+# Alert notifications can include images, but rendering many images at the same time can overload the server
+# This limit will protect the server from render overloading and make sure notifications are sent out quickly
+;concurrent_render_limit = 5
+
 #################################### Explore #############################
 [explore]
 # Enable the Explore section
@@ -431,7 +435,7 @@ log_queries =
 ;sampler_param = 1
 
 #################################### Grafana.com integration  ##########################
-# Url used to to import dashboards directly from Grafana.com
+# Url used to import dashboards directly from Grafana.com
 [grafana_com]
 ;url = https://grafana.com
 

+ 9 - 0
devenv/bulk_alerting_dashboards/bulk_alerting_dashboards.yaml

@@ -0,0 +1,9 @@
+apiVersion: 1
+
+providers:
+ - name: 'Bulk alerting dashboards'
+   folder: 'Bulk alerting dashboards'
+   type: file
+   options:
+     path: devenv/bulk_alerting_dashboards
+

+ 168 - 0
devenv/bulk_alerting_dashboards/bulkdash_alerting.jsonnet

@@ -0,0 +1,168 @@
+{
+  "editable": true,
+  "gnetId": null,
+  "graphTooltip": 0,
+  "id": null,
+  "links": [],
+  "panels": [
+    {
+      "alert": {
+        "conditions": [
+          {
+            "evaluator": {
+              "params": [
+                65
+              ],
+              "type": "gt"
+            },
+            "operator": {
+              "type": "and"
+            },
+            "query": {
+              "params": [
+                "A",
+                "5m",
+                "now"
+              ]
+            },
+            "reducer": {
+              "params": [],
+              "type": "avg"
+            },
+            "type": "query"
+          }
+        ],
+        "executionErrorState": "alerting",
+        "frequency": "10s",
+        "handler": 1,
+        "name": "bulk alerting",
+        "noDataState": "no_data",
+        "notifications": []
+      },
+      "aliasColors": {},
+      "bars": false,
+      "dashLength": 10,
+      "dashes": false,
+      "datasource": "gdev-prometheus",
+      "fill": 1,
+      "gridPos": {
+        "h": 9,
+        "w": 12,
+        "x": 0,
+        "y": 0
+      },
+      "id": 2,
+      "legend": {
+        "avg": false,
+        "current": false,
+        "max": false,
+        "min": false,
+        "show": true,
+        "total": false,
+        "values": false
+      },
+      "lines": true,
+      "linewidth": 1,
+      "nullPointMode": "null",
+      "percentage": false,
+      "pointradius": 5,
+      "points": false,
+      "renderer": "flot",
+      "seriesOverrides": [],
+      "spaceLength": 10,
+      "stack": false,
+      "steppedLine": false,
+      "targets": [
+        {
+          "$$hashKey": "object:117",
+          "expr": "go_goroutines",
+          "format": "time_series",
+          "intervalFactor": 1,
+          "refId": "A"
+        }
+      ],
+      "thresholds": [
+        {
+          "colorMode": "critical",
+          "fill": true,
+          "line": true,
+          "op": "gt",
+          "value": 50
+        }
+      ],
+      "timeFrom": null,
+      "timeShift": null,
+      "title": "Panel Title",
+      "tooltip": {
+        "shared": true,
+        "sort": 0,
+        "value_type": "individual"
+      },
+      "type": "graph",
+      "xaxis": {
+        "buckets": null,
+        "mode": "time",
+        "name": null,
+        "show": true,
+        "values": []
+      },
+      "yaxes": [
+        {
+          "format": "short",
+          "label": null,
+          "logBase": 1,
+          "max": null,
+          "min": null,
+          "show": true
+        },
+        {
+          "format": "short",
+          "label": null,
+          "logBase": 1,
+          "max": null,
+          "min": null,
+          "show": true
+        }
+      ]
+    }
+  ],
+  "schemaVersion": 16,
+  "style": "dark",
+  "tags": [],
+  "templating": {
+    "list": []
+  },
+  "time": {
+    "from": "now-6h",
+    "to": "now"
+  },
+  "timepicker": {
+    "refresh_intervals": [
+      "5s",
+      "10s",
+      "30s",
+      "1m",
+      "5m",
+      "15m",
+      "30m",
+      "1h",
+      "2h",
+      "1d"
+    ],
+    "time_options": [
+      "5m",
+      "15m",
+      "1h",
+      "6h",
+      "12h",
+      "24h",
+      "2d",
+      "7d",
+      "30d"
+    ]
+  },
+  "timezone": "",
+  "title": "New dashboard",
+  "uid": null,
+  "version": 0
+}

+ 1 - 1
devenv/docker/blocks/graphite1/conf/opt/graphite/conf/aggregation-rules.conf

@@ -8,7 +8,7 @@
 # 'avg'. The name of the aggregate metric will be derived from
 # 'output_template' filling in any captured fields from 'input_pattern'.
 #
-# For example, if you're metric naming scheme is:
+# For example, if your metric naming scheme is:
 #
 #   <env>.applications.<app>.<server>.<metric>
 #

+ 1 - 0
devenv/docker/ha_test/.gitignore

@@ -0,0 +1 @@
+grafana/provisioning/dashboards/alerts/alert-*

+ 137 - 0
devenv/docker/ha_test/README.md

@@ -0,0 +1,137 @@
+# Grafana High Availability (HA) test setup
+
+A set of docker compose services which together creates a Grafana HA test setup with capability of easily
+scaling up/down number of Grafana instances.
+
+Included services
+
+* Grafana
+* Mysql - Grafana configuration database and session storage
+* Prometheus - Monitoring of Grafana and used as datasource of provisioned alert rules
+* Nginx - Reverse proxy for Grafana and Prometheus. Enables browsing Grafana/Prometheus UI using a hostname
+
+## Prerequisites
+
+### Build grafana docker container
+
+Build a Grafana docker container from current branch and commit and tag it as grafana/grafana:dev.
+
+```bash
+$ cd <grafana repo>
+$ make build-docker-full
+```
+
+### Virtual host names
+
+#### Alternative 1 - Use dnsmasq
+
+```bash
+$ sudo apt-get install dnsmasq
+$ echo 'address=/loc/127.0.0.1' | sudo tee /etc/dnsmasq.d/dnsmasq-loc.conf > /dev/null
+$ sudo /etc/init.d/dnsmasq restart
+$ ping whatever.loc
+PING whatever.loc (127.0.0.1) 56(84) bytes of data.
+64 bytes from localhost (127.0.0.1): icmp_seq=1 ttl=64 time=0.076 ms
+--- whatever.loc ping statistics ---
+1 packet transmitted, 1 received, 0% packet loss, time 1998ms
+```
+
+#### Alternative 2 - Manually update /etc/hosts
+
+Update your `/etc/hosts` to be able to access Grafana and/or Prometheus UI using a hostname.
+
+```bash
+$ cat /etc/hosts
+127.0.0.1       grafana.loc
+127.0.0.1       prometheus.loc
+```
+
+## Start services
+
+```bash
+$ docker-compose up -d
+```
+
+Browse
+* http://grafana.loc/
+* http://prometheus.loc/
+
+Check for any errors
+
+```bash
+$ docker-compose logs | grep error
+```
+
+### Scale Grafana instances up/down
+
+Scale number of Grafana instances to `<instances>`
+
+```bash
+$ docker-compose up --scale grafana=<instances> -d
+# for example 3 instances
+$ docker-compose up --scale grafana=3 -d
+```
+
+## Test alerting
+
+### Create notification channels
+
+Creates default notification channels, if not already exists
+
+```bash
+$ ./alerts.sh setup
+```
+
+### Slack notifications
+
+Disable
+
+```bash
+$ ./alerts.sh slack -d
+```
+
+Enable and configure url
+
+```bash
+$ ./alerts.sh slack -u https://hooks.slack.com/services/...
+```
+
+Enable, configure url and enable reminders
+
+```bash
+$ ./alerts.sh slack -u https://hooks.slack.com/services/... -r -e 10m
+```
+
+### Provision alert dashboards with alert rules
+
+Provision 1 dashboard/alert rule (default)
+
+```bash
+$ ./alerts.sh provision
+```
+
+Provision 10 dashboards/alert rules
+
+```bash
+$ ./alerts.sh provision -a 10
+```
+
+Provision 10 dashboards/alert rules and change condition to `gt > 100`
+
+```bash
+$ ./alerts.sh provision -a 10 -c 100
+```
+
+### Pause/unpause all alert rules
+
+Pause
+
+```bash
+$ ./alerts.sh pause
+```
+
+Unpause
+
+```bash
+$ ./alerts.sh unpause
+```

+ 156 - 0
devenv/docker/ha_test/alerts.sh

@@ -0,0 +1,156 @@
+#!/bin/bash
+
+requiresJsonnet() {
+		if ! type "jsonnet" > /dev/null; then
+				echo "you need you install jsonnet to run this script"
+				echo "follow the instructions on https://github.com/google/jsonnet"
+				exit 1
+		fi
+}
+
+setup() {
+	STATUS=$(curl -s -o /dev/null -w '%{http_code}' http://admin:admin@grafana.loc/api/alert-notifications/1)
+  if [ $STATUS -eq 200 ]; then
+    echo "Email already exists, skipping..."
+  else
+		curl -H "Content-Type: application/json" \
+		-d '{
+			"name": "Email",
+			"type":  "email",
+			"isDefault": false,
+			"sendReminder": false,
+			"uploadImage": true,
+			"settings": {
+				"addresses": "user@test.com"
+			}
+		}' \
+		http://admin:admin@grafana.loc/api/alert-notifications
+  fi
+
+	STATUS=$(curl -s -o /dev/null -w '%{http_code}' http://admin:admin@grafana.loc/api/alert-notifications/2)
+  if [ $STATUS -eq 200 ]; then
+    echo "Slack already exists, skipping..."
+  else
+		curl -H "Content-Type: application/json" \
+		-d '{
+			"name": "Slack",
+			"type":  "slack",
+			"isDefault": false,
+			"sendReminder": false,
+			"uploadImage": true
+		}' \
+		http://admin:admin@grafana.loc/api/alert-notifications
+  fi
+}
+
+slack() {
+	enabled=true
+	url=''
+	remind=false
+	remindEvery='10m'
+
+	while getopts ":e:u:dr" o; do
+    case "${o}" in
+				e)
+            remindEvery=${OPTARG}
+            ;;
+				u)
+            url=${OPTARG}
+            ;;
+				d)
+            enabled=false
+            ;;
+				r)
+            remind=true
+            ;;
+    esac
+	done
+	shift $((OPTIND-1))
+
+	curl -X PUT \
+		-H "Content-Type: application/json" \
+		-d '{
+			"id": 2,
+			"name": "Slack",
+			"type":  "slack",
+			"isDefault": '$enabled',
+			"sendReminder": '$remind',
+			"frequency": "'$remindEvery'",
+			"uploadImage": true,
+			"settings": {
+				"url": "'$url'"
+			}
+		}' \
+		http://admin:admin@grafana.loc/api/alert-notifications/2
+}
+
+provision() {
+	alerts=1
+	condition=65
+	while getopts ":a:c:" o; do
+    case "${o}" in
+        a)
+            alerts=${OPTARG}
+            ;;
+				c)
+            condition=${OPTARG}
+            ;;
+    esac
+	done
+	shift $((OPTIND-1))
+
+	requiresJsonnet
+
+	rm -rf grafana/provisioning/dashboards/alerts/alert-*.json
+	jsonnet -m grafana/provisioning/dashboards/alerts grafana/provisioning/alerts.jsonnet --ext-code alerts=$alerts --ext-code condition=$condition
+}
+
+pause() {
+	curl -H "Content-Type: application/json" \
+  -d '{"paused":true}' \
+  http://admin:admin@grafana.loc/api/admin/pause-all-alerts
+}
+
+unpause() {
+	curl -H "Content-Type: application/json" \
+  -d '{"paused":false}' \
+  http://admin:admin@grafana.loc/api/admin/pause-all-alerts
+}
+
+usage() {
+	echo -e "Usage: ./alerts.sh COMMAND [OPTIONS]\n"
+	echo -e "Commands"
+	echo -e "  setup\t\t creates default alert notification channels"
+	echo -e "  slack\t\t configure slack notification channel"
+	echo -e "    [-d]\t\t\t disable notifier, default enabled"
+	echo -e "    [-u]\t\t\t url"
+	echo -e "    [-r]\t\t\t send reminders"
+	echo -e "    [-e <remind every>]\t\t default 10m\n"
+	echo -e "  provision\t provision alerts"
+	echo -e "    [-a <alert rule count>]\t default 1"
+	echo -e "    [-c <condition value>]\t default 65\n"
+	echo -e "  pause\t\t pause all alerts"
+	echo -e "  unpause\t unpause all alerts"
+}
+
+main() {
+	local cmd=$1
+
+	if [[ $cmd == "setup" ]]; then
+		setup
+	elif [[ $cmd == "slack" ]]; then
+		slack "${@:2}"
+	elif [[ $cmd == "provision" ]]; then
+		provision "${@:2}"
+	elif [[ $cmd == "pause" ]]; then
+		pause
+	elif [[ $cmd == "unpause" ]]; then
+		unpause
+	fi
+
+  if [[ -z "$cmd" ]]; then
+		usage
+	fi
+}
+
+main "$@"

+ 78 - 0
devenv/docker/ha_test/docker-compose.yaml

@@ -0,0 +1,78 @@
+version: "2.1"
+
+services:
+  nginx-proxy:
+    image: jwilder/nginx-proxy
+    ports:
+      - "80:80"
+    volumes:
+      - /var/run/docker.sock:/tmp/docker.sock:ro
+
+  db:
+    image: mysql
+    environment:
+      MYSQL_ROOT_PASSWORD: rootpass
+      MYSQL_DATABASE: grafana
+      MYSQL_USER: grafana
+      MYSQL_PASSWORD: password
+    ports:
+      - 3306
+    healthcheck:
+      test: ["CMD", "mysqladmin" ,"ping", "-h", "localhost"]
+      timeout: 10s
+      retries: 10
+
+  # db:
+  #   image: postgres:9.3
+  #   environment:
+  #     POSTGRES_DATABASE: grafana
+  #     POSTGRES_USER: grafana
+  #     POSTGRES_PASSWORD: password
+  #   ports:
+  #     - 5432
+  #   healthcheck:
+  #     test: ["CMD-SHELL", "pg_isready -d grafana -U grafana"]
+  #     timeout: 10s
+  #     retries: 10
+
+  grafana:
+    image: grafana/grafana:dev
+    volumes:
+      - ./grafana/provisioning/:/etc/grafana/provisioning/
+    environment:
+      - VIRTUAL_HOST=grafana.loc
+      - GF_SERVER_ROOT_URL=http://grafana.loc
+      - GF_DATABASE_NAME=grafana
+      - GF_DATABASE_USER=grafana
+      - GF_DATABASE_PASSWORD=password
+      - GF_DATABASE_TYPE=mysql
+      - GF_DATABASE_HOST=db:3306
+      - GF_SESSION_PROVIDER=mysql
+      - GF_SESSION_PROVIDER_CONFIG=grafana:password@tcp(db:3306)/grafana?allowNativePasswords=true
+      # - GF_DATABASE_TYPE=postgres
+      # - GF_DATABASE_HOST=db:5432
+      # - GF_DATABASE_SSL_MODE=disable
+      # - GF_SESSION_PROVIDER=postgres
+      # - GF_SESSION_PROVIDER_CONFIG=user=grafana password=password host=db port=5432 dbname=grafana sslmode=disable
+      - GF_LOG_FILTERS=alerting.notifier:debug,alerting.notifier.slack:debug
+    ports:
+      - 3000
+    depends_on:
+      db:
+        condition: service_healthy
+
+  prometheus:
+    image: prom/prometheus:v2.4.2
+    volumes:
+      - ./prometheus/:/etc/prometheus/
+    environment:
+      - VIRTUAL_HOST=prometheus.loc
+    ports:
+      - 9090
+
+  # mysqld-exporter:
+  #   image: prom/mysqld-exporter
+  #   environment:
+  #     - DATA_SOURCE_NAME=grafana:password@(mysql:3306)/
+  #   ports:
+  #     - 9104

+ 202 - 0
devenv/docker/ha_test/grafana/provisioning/alerts.jsonnet

@@ -0,0 +1,202 @@
+local numAlerts = std.extVar('alerts');
+local condition = std.extVar('condition');
+local arr = std.range(1, numAlerts);
+
+local alertDashboardTemplate = {
+  "editable": true,
+  "gnetId": null,
+  "graphTooltip": 0,
+  "id": null,
+  "links": [],
+  "panels": [
+    {
+      "alert": {
+        "conditions": [
+          {
+            "evaluator": {
+              "params": [
+                65
+              ],
+              "type": "gt"
+            },
+            "operator": {
+              "type": "and"
+            },
+            "query": {
+              "params": [
+                "A",
+                "5m",
+                "now"
+              ]
+            },
+            "reducer": {
+              "params": [],
+              "type": "avg"
+            },
+            "type": "query"
+          }
+        ],
+        "executionErrorState": "alerting",
+        "frequency": "10s",
+        "handler": 1,
+        "name": "bulk alerting",
+        "noDataState": "no_data",
+        "notifications": [
+          {
+            "id": 2
+          }
+        ]
+      },
+      "aliasColors": {},
+      "bars": false,
+      "dashLength": 10,
+      "dashes": false,
+      "datasource": "Prometheus",
+      "fill": 1,
+      "gridPos": {
+        "h": 9,
+        "w": 12,
+        "x": 0,
+        "y": 0
+      },
+      "id": 2,
+      "legend": {
+        "avg": false,
+        "current": false,
+        "max": false,
+        "min": false,
+        "show": true,
+        "total": false,
+        "values": false
+      },
+      "lines": true,
+      "linewidth": 1,
+      "nullPointMode": "null",
+      "percentage": false,
+      "pointradius": 5,
+      "points": false,
+      "renderer": "flot",
+      "seriesOverrides": [],
+      "spaceLength": 10,
+      "stack": false,
+      "steppedLine": false,
+      "targets": [
+        {
+          "$$hashKey": "object:117",
+          "expr": "go_goroutines",
+          "format": "time_series",
+          "intervalFactor": 1,
+          "refId": "A"
+        }
+      ],
+      "thresholds": [
+        {
+          "colorMode": "critical",
+          "fill": true,
+          "line": true,
+          "op": "gt",
+          "value": 50
+        }
+      ],
+      "timeFrom": null,
+      "timeShift": null,
+      "title": "Panel Title",
+      "tooltip": {
+        "shared": true,
+        "sort": 0,
+        "value_type": "individual"
+      },
+      "type": "graph",
+      "xaxis": {
+        "buckets": null,
+        "mode": "time",
+        "name": null,
+        "show": true,
+        "values": []
+      },
+      "yaxes": [
+        {
+          "format": "short",
+          "label": null,
+          "logBase": 1,
+          "max": null,
+          "min": null,
+          "show": true
+        },
+        {
+          "format": "short",
+          "label": null,
+          "logBase": 1,
+          "max": null,
+          "min": null,
+          "show": true
+        }
+      ]
+    }
+  ],
+  "schemaVersion": 16,
+  "style": "dark",
+  "tags": [],
+  "templating": {
+    "list": []
+  },
+  "time": {
+    "from": "now-6h",
+    "to": "now"
+  },
+  "timepicker": {
+    "refresh_intervals": [
+      "5s",
+      "10s",
+      "30s",
+      "1m",
+      "5m",
+      "15m",
+      "30m",
+      "1h",
+      "2h",
+      "1d"
+    ],
+    "time_options": [
+      "5m",
+      "15m",
+      "1h",
+      "6h",
+      "12h",
+      "24h",
+      "2d",
+      "7d",
+      "30d"
+    ]
+  },
+  "timezone": "",
+  "title": "New dashboard",
+  "uid": null,
+  "version": 0
+};
+
+
+{
+  ['alert-' + std.toString(x) + '.json']:
+    alertDashboardTemplate + {
+      panels: [
+        alertDashboardTemplate.panels[0] +
+        {
+          alert+: {
+            name: 'Alert rule ' + x,
+            conditions: [
+              alertDashboardTemplate.panels[0].alert.conditions[0] +
+              {
+                evaluator+: {
+                  params: [condition]
+                }
+              },
+            ],
+          },
+        },
+      ],
+      uid: 'alert-' + x,
+      title: 'Alert ' + x
+    },
+      for x in arr
+}

+ 8 - 0
devenv/docker/ha_test/grafana/provisioning/dashboards/alerts.yaml

@@ -0,0 +1,8 @@
+apiVersion: 1
+
+providers:
+ - name: 'Alerts'
+   folder: 'Alerts'
+   type: file
+   options:
+     path: /etc/grafana/provisioning/dashboards/alerts

+ 172 - 0
devenv/docker/ha_test/grafana/provisioning/dashboards/alerts/overview.json

@@ -0,0 +1,172 @@
+{
+  "annotations": {
+    "list": [
+      {
+        "builtIn": 1,
+        "datasource": "-- Grafana --",
+        "enable": true,
+        "hide": true,
+        "iconColor": "rgba(0, 211, 255, 1)",
+        "name": "Annotations & Alerts",
+        "type": "dashboard"
+      }
+    ]
+  },
+  "editable": true,
+  "gnetId": null,
+  "graphTooltip": 0,
+  "links": [],
+  "panels": [
+    {
+      "aliasColors": {
+        "Active alerts": "#bf1b00"
+      },
+      "bars": false,
+      "dashLength": 10,
+      "dashes": false,
+      "datasource": "Prometheus",
+      "fill": 1,
+      "gridPos": {
+        "h": 12,
+        "w": 24,
+        "x": 0,
+        "y": 0
+      },
+      "id": 2,
+      "interval": "",
+      "legend": {
+        "alignAsTable": true,
+        "avg": false,
+        "current": true,
+        "max": false,
+        "min": false,
+        "rightSide": true,
+        "show": true,
+        "total": false,
+        "values": true
+      },
+      "lines": true,
+      "linewidth": 2,
+      "links": [],
+      "nullPointMode": "null",
+      "percentage": false,
+      "pointradius": 5,
+      "points": false,
+      "renderer": "flot",
+      "seriesOverrides": [
+        {
+          "alias": "Active grafana instances",
+          "dashes": true,
+          "fill": 0
+        }
+      ],
+      "spaceLength": 10,
+      "stack": false,
+      "steppedLine": false,
+      "targets": [
+        {
+          "expr": "sum(increase(grafana_alerting_notification_sent_total[1m])) by(job)",
+          "format": "time_series",
+          "instant": false,
+          "interval": "1m",
+          "intervalFactor": 1,
+          "legendFormat": "Notifications sent",
+          "refId": "A"
+        },
+        {
+          "expr": "min(grafana_alerting_active_alerts) without(instance)",
+          "format": "time_series",
+          "interval": "1m",
+          "intervalFactor": 1,
+          "legendFormat": "Active alerts",
+          "refId": "B"
+        },
+        {
+          "expr": "count(up{job=\"grafana\"})",
+          "format": "time_series",
+          "intervalFactor": 1,
+          "legendFormat": "Active grafana instances",
+          "refId": "C"
+        }
+      ],
+      "thresholds": [],
+      "timeFrom": null,
+      "timeShift": null,
+      "title": "Notifications sent vs active alerts",
+      "tooltip": {
+        "shared": true,
+        "sort": 0,
+        "value_type": "individual"
+      },
+      "type": "graph",
+      "xaxis": {
+        "buckets": null,
+        "mode": "time",
+        "name": null,
+        "show": true,
+        "values": []
+      },
+      "yaxes": [
+        {
+          "format": "short",
+          "label": null,
+          "logBase": 1,
+          "max": null,
+          "min": "0",
+          "show": true
+        },
+        {
+          "format": "short",
+          "label": null,
+          "logBase": 1,
+          "max": null,
+          "min": null,
+          "show": true
+        }
+      ],
+      "yaxis": {
+        "align": false,
+        "alignLevel": 3
+      }
+    }
+  ],
+  "schemaVersion": 16,
+  "style": "dark",
+  "tags": [],
+  "templating": {
+    "list": []
+  },
+  "time": {
+    "from": "now-1h",
+    "to": "now"
+  },
+  "timepicker": {
+    "refresh_intervals": [
+      "5s",
+      "10s",
+      "30s",
+      "1m",
+      "5m",
+      "15m",
+      "30m",
+      "1h",
+      "2h",
+      "1d"
+    ],
+    "time_options": [
+      "5m",
+      "15m",
+      "1h",
+      "6h",
+      "12h",
+      "24h",
+      "2d",
+      "7d",
+      "30d"
+    ]
+  },
+  "timezone": "",
+  "title": "Overview",
+  "uid": "xHy7-hAik",
+  "version": 6
+}

+ 11 - 0
devenv/docker/ha_test/grafana/provisioning/datasources/datasources.yaml

@@ -0,0 +1,11 @@
+apiVersion: 1
+
+datasources:
+  - name: Prometheus
+    type: prometheus
+    access: proxy
+    url: http://prometheus:9090
+    jsonData:
+      timeInterval: 10s
+      queryTimeout: 30s
+      httpMethod: POST

+ 39 - 0
devenv/docker/ha_test/prometheus/prometheus.yml

@@ -0,0 +1,39 @@
+# my global config
+global:
+  scrape_interval:     10s # By default, scrape targets every 15 seconds.
+  evaluation_interval: 10s # By default, scrape targets every 15 seconds.
+  # scrape_timeout is set to the global default (10s).
+
+# Load and evaluate rules in this file every 'evaluation_interval' seconds.
+#rule_files:
+# - "alert.rules"
+# - "first.rules"
+# - "second.rules"
+
+# alerting:
+#   alertmanagers:
+#   - scheme: http
+#     static_configs:
+#     - targets:
+#       - "127.0.0.1:9093"
+
+scrape_configs:
+  - job_name: 'prometheus'
+    static_configs:
+      - targets: ['localhost:9090']
+
+  - job_name: 'grafana'
+    dns_sd_configs:
+      - names:
+        - 'grafana'
+        type: 'A'
+        port: 3000
+        refresh_interval: 10s
+
+  # - job_name: 'mysql'
+  #   dns_sd_configs:
+  #     - names:
+  #       - 'mysqld-exporter'
+  #       type: 'A'
+  #       port: 9104
+  #       refresh_interval: 10s

+ 21 - 4
devenv/setup.sh

@@ -11,7 +11,21 @@ bulkDashboard() {
 				let COUNTER=COUNTER+1
 		done
 
-		ln -s -f -r ./bulk-dashboards/bulk-dashboards.yaml ../conf/provisioning/dashboards/custom.yaml
+		ln -s -f ../../../devenv/bulk-dashboards/bulk-dashboards.yaml ../conf/provisioning/dashboards/custom.yaml
+}
+
+bulkAlertingDashboard() {
+
+		requiresJsonnet
+
+		COUNTER=0
+		MAX=100
+		while [  $COUNTER -lt $MAX ]; do
+				jsonnet -o "bulk_alerting_dashboards/alerting_dashboard${COUNTER}.json" -e "local bulkDash = import 'bulk_alerting_dashboards/bulkdash_alerting.jsonnet'; bulkDash + {  uid: 'bd-${COUNTER}',  title: 'alerting-title-${COUNTER}' }"
+				let COUNTER=COUNTER+1
+		done
+
+		ln -s -f ../../../devenv/bulk_alerting_dashboards/bulk_alerting_dashboards.yaml ../conf/provisioning/dashboards/custom.yaml
 }
 
 requiresJsonnet() {
@@ -36,8 +50,9 @@ devDatasources() {
 usage() {
 	echo -e "\n"
 	echo "Usage:"
-	echo "  bulk-dashboards                     - create and provisioning 400 dashboards"
-	echo "  no args                             - provisiong core datasources and dev dashboards"
+	echo "  bulk-dashboards                              - create and provisioning 400 dashboards"
+	echo "  bulk-alerting-dashboards                     - create and provisioning 400 dashboards with alerts"
+	echo "  no args                                      - provisiong core datasources and dev dashboards"
 }
 
 main() {
@@ -48,7 +63,9 @@ main() {
 
 	local cmd=$1
 
-	if [[ $cmd == "bulk-dashboards" ]]; then
+	if [[ $cmd == "bulk-alerting-dashboards" ]]; then
+		bulkAlertingDashboard
+	elif [[ $cmd == "bulk-dashboards" ]]; then
 		bulkDashboard
 	else
 		devDashboards

+ 1 - 1
docs/README.md

@@ -65,7 +65,7 @@ make docs-build
 
 This will rebuild the docs docker container. 
 
-To be able to use the image your have to quit  (CTRL-C) the `make watch` command (that you run in the same directory as this README). Then simply rerun `make watch`, it will restart the docs server but now with access to your image. 
+To be able to use the image you have to quit  (CTRL-C) the `make watch` command (that you run in the same directory as this README). Then simply rerun `make watch`, it will restart the docs server but now with access to your image. 
 
 ### Editing content
 

+ 1 - 1
docs/sources/administration/provisioning.md

@@ -203,7 +203,7 @@ providers:
   folder: ''
   type: file
   disableDeletion: false
-  updateIntervalSeconds: 3 #how often Grafana will scan for changed dashboards
+  updateIntervalSeconds: 10 #how often Grafana will scan for changed dashboards
   options:
     path: /var/lib/grafana/dashboards
 ```

+ 1 - 0
docs/sources/auth/ldap.md

@@ -181,6 +181,7 @@ group_search_filter = "(member:1.2.840.113556.1.4.1941:=CN=%s,[user container/OU
 group_search_filter = "(|(member:1.2.840.113556.1.4.1941:=CN=%s,[user container/OU])(member:1.2.840.113556.1.4.1941:=CN=%s,[another user container/OU]))"
 group_search_filter_user_attribute = "cn"
 ```
+For more information on AD searches see [Microsoft's Search Filter Syntax](https://docs.microsoft.com/en-us/windows/desktop/adsi/search-filter-syntax) documentation.
 
 For troubleshooting, by changing `member_of` in `[servers.attributes]` to "dn" it will show you more accurate group memberships when [debug is enabled](#troubleshooting).
 

+ 171 - 0
docs/sources/features/datasources/stackdriver.md

@@ -0,0 +1,171 @@
++++
+title = "Using Stackdriver in Grafana"
+description = "Guide for using Stackdriver in Grafana"
+keywords = ["grafana", "stackdriver", "google", "guide"]
+type = "docs"
+aliases = ["/datasources/stackdriver"]
+[menu.docs]
+name = "Stackdriver"
+parent = "datasources"
+weight = 11
++++
+
+# Using Google Stackdriver in Grafana
+
+> Only available in Grafana v5.3+.
+> The datasource is currently a beta feature and is subject to change.
+
+Grafana ships with built-in support for Google Stackdriver. Just add it as a datasource and you are ready to build dashboards for your Stackdriver metrics.
+
+## Adding the data source to Grafana
+
+1. Open the side menu by clicking the Grafana icon in the top header.
+2. In the side menu under the `Dashboards` link you should find a link named `Data Sources`.
+3. Click the `+ Add data source` button in the top header.
+4. Select `Stackdriver` from the *Type* dropdown.
+5. Upload or paste in the Service Account Key file. See below for steps on how to create a Service Account Key file.
+
+> NOTE: If you're not seeing the `Data Sources` link in your side menu it means that your current user does not have the `Admin` role for the current organization.
+
+| Name                  | Description                                                                         |
+| --------------------- | ----------------------------------------------------------------------------------- |
+| _Name_                | The datasource name. This is how you refer to the datasource in panels & queries.   |
+| _Default_             | Default datasource means that it will be pre-selected for new panels.               |
+| _Service Account Key_ | Service Account Key File for a GCP Project. Instructions below on how to create it. |
+
+## Authentication
+
+### Service Account Credentials - Private Key File
+
+To authenticate with the Stackdriver API, you need to create a Google Cloud Platform (GCP) Service Account for the Project you want to show data for. A Grafana datasource integrates with one GCP Project. If you want to visualize data from multiple GCP Projects then you need to create one datasource per GCP Project.
+
+#### Enable APIs
+
+The following APIs need to be enabled first:
+
+- [Monitoring API](https://console.cloud.google.com/apis/library/monitoring.googleapis.com)
+- [Cloud Resource Manager API](https://console.cloud.google.com/apis/library/cloudresourcemanager.googleapis.com)
+
+Click on the links above and click the `Enable` button:
+
+![Enable GCP APIs](/img/docs/v54/stackdriver_enable_api.png)
+
+#### Create a GCP Service Account for a Project
+
+1. Navigate to the [APIs & Services Credentials page](https://console.cloud.google.com/apis/credentials).
+2. Click on the `Create credentials` dropdown/button and choose the `Service account key` option.
+
+    ![Create service account button](/img/docs/v54/stackdriver_create_service_account_button.png)
+3. On the `Create service account key` page, choose key type `JSON`. Then in the `Service Account` dropdown, choose the `New service account` option:
+
+    ![Create service account key](/img/docs/v54/stackdriver_create_service_account_key.png)
+4. Some new fields will appear. Fill in a name for the service account in the `Service account name` field and then choose the `Monitoring Viewer` role from the `Role` dropdown:
+
+    ![Choose role](/img/docs/v54/stackdriver_service_account_choose_role.png)
+5. Click the Create button. A JSON key file will be created and downloaded to your computer. Store this file in a secure place as it allows access to your Stackdriver data.
+6. Upload it to Grafana on the datasource Configuration page. You can either upload the file or paste in the contents of the file.
+    
+    ![Choose role](/img/docs/v54/stackdriver_grafana_upload_key.png)
+7. The file contents will be encrypted and saved in the Grafana database. Don't forget to save after uploading the file!
+    
+    ![Choose role](/img/docs/v54/stackdriver_grafana_key_uploaded.png)
+
+## Metric Query Editor
+
+Choose a metric from the `Metric` dropdown.
+
+To add a filter, click the plus icon and choose a field to filter by and enter a filter value e.g. `instance_name = grafana-1`
+
+### Aggregation
+
+The aggregation field lets you combine time series based on common statistics. Read more about this option [here](https://cloud.google.com/monitoring/charts/metrics-selector#aggregation-options).
+
+The `Aligner` field allows you to align multiple time series after the same group by time interval. Read more about how it works [here](https://cloud.google.com/monitoring/charts/metrics-selector#alignment).
+
+#### Alignment Period/Group by Time
+
+The `Alignment Period` groups a metric by time if an aggregation is chosen. The default is to use the GCP Stackdriver default groupings (which allows you to compare graphs in Grafana with graphs in the Stackdriver UI).
+The option is called `Stackdriver auto` and the defaults are:
+
+- 1m for time ranges < 23 hours
+- 5m for time ranges >= 23 hours and < 6 days
+- 1h for time ranges >= 6 days
+
+The other automatic option is `Grafana auto`. This will automatically set the group by time depending on the time range chosen and the width of the graph panel. Read more about the details [here](http://docs.grafana.org/reference/templating/#the-interval-variable).
+
+It is also possible to choose fixed time intervals to group by, like `1h` or `1d`.
+
+### Group By
+
+Group by resource or metric labels to reduce the number of time series and to aggregate the results by a group by. E.g. Group by instance_name to see an aggregated metric for a Compute instance.
+
+### Alias Patterns
+
+The Alias By field allows you to control the format of the legend keys. The default is to show the metric name and labels. This can be long and hard to read. Using the following patterns in the alias field, you can format the legend key the way you want it.
+
+#### Metric Type Patterns
+
+Alias Pattern | Description | Example Result
+----------------- | ---------------------------- | -------------
+`{{metric.type}}` | returns the full Metric Type | `compute.googleapis.com/instance/cpu/utilization`
+`{{metric.name}}` | returns the metric name part | `instance/cpu/utilization`
+`{{metric.service}}` | returns the service part | `compute`
+
+#### Label Patterns
+
+In the Group By dropdown, you can see a list of metric and resource labels for a metric. These can be included in the legend key using alias patterns.
+
+Alias Pattern Format | Description | Alias Pattern Example | Example Result
+---------------------- | ---------------------------------- | ---------------------------- | -------------
+`{{metric.label.xxx}}` | returns the metric label value | `{{metric.label.instance_name}}` | `grafana-1-prod`
+`{{resource.label.xxx}}` | returns the resource label value | `{{resource.label.zone}}` | `us-east1-b`
+
+Example Alias By: `{{metric.type}} - {{metric.labels.instance_name}}`
+
+Example Result: `compute.googleapis.com/instance/cpu/usage_time - server1-prod`
+
+## Templating
+
+Instead of hard-coding things like server, application and sensor name in you metric queries you can use variables in their place.
+Variables are shown as dropdown select boxes at the top of the dashboard. These dropdowns makes it easy to change the data
+being displayed in your dashboard.
+
+Checkout the [Templating]({{< relref "reference/templating.md" >}}) documentation for an introduction to the templating feature and the different
+types of template variables.
+
+### Query Variable
+
+Writing variable queries is not supported yet.
+
+### Using variables in queries
+
+There are two syntaxes:
+
+- `$<varname>`  Example: rate(http_requests_total{job=~"$job"}[5m])
+- `[[varname]]` Example: rate(http_requests_total{job=~"[[job]]"}[5m])
+
+Why two ways? The first syntax is easier to read and write but does not allow you to use a variable in the middle of a word. When the *Multi-value* or *Include all value* options are enabled, Grafana converts the labels from plain text to a regex compatible string, which means you have to use `=~` instead of `=`.
+
+## Annotations
+
+[Annotations]({{< relref "reference/annotations.md" >}}) allows you to overlay rich event information on top of graphs. You add annotation
+queries via the Dashboard menu / Annotations view.
+
+## Configure the Datasource with Provisioning
+
+It's now possible to configure datasources using config files with Grafana's provisioning system. You can read more about how it works and all the settings you can set for datasources on the [provisioning docs page](/administration/provisioning/#datasources)
+
+Here is a provisioning example for this datasource.
+
+```yaml
+apiVersion: 1
+
+datasources:
+  - name: Stackdriver
+    type: stackdriver
+    jsonData:
+      tokenUri: https://oauth2.googleapis.com/token
+      clientEmail: stackdriver@myproject.iam.gserviceaccount.com
+    secureJsonData:
+      privateKey: "<contents of your Service Account JWT Key file>"
+```

+ 1 - 1
docs/sources/guides/whats-new-in-v4-2.md

@@ -67,7 +67,7 @@ Making it possible to have users in multiple groups and have detailed access con
 
 ## Upgrade & Breaking changes
 
-If your using https in grafana we now force you to use tls 1.2 and the most secure ciphers.
+If you're using https in grafana we now force you to use tls 1.2 and the most secure ciphers.
 We think its better to be secure by default rather then making it configurable.
 If you want to run https with lower versions of tls we suggest you put a reserve proxy in front of grafana.
 

+ 8 - 0
docs/sources/installation/configuration.md

@@ -566,3 +566,11 @@ Default setting for new alert rules. Defaults to categorize error and timeouts a
 > Available in 5.3  and above
 
 Default setting for how Grafana handles nodata or null values in alerting. (alerting, no_data, keep_state, ok)
+
+# concurrent_render_limit
+
+> Available in 5.3  and above
+
+Alert notifications can include images, but rendering many images at the same time can overload the server.
+This limit will protect the server from render overloading and make sure notifications are sent out quickly. Default
+value is `5`.

+ 1 - 1
docs/sources/tutorials/ha_setup.md

@@ -22,7 +22,7 @@ Setting up Grafana for high availability is fairly simple. It comes down to two
 
 First, you need to do is to setup MySQL or Postgres on another server and configure Grafana to use that database.
 You can find the configuration for doing that in the [[database]]({{< relref "configuration.md" >}}#database) section in the grafana config.
-Grafana will now persist all long term data in the database. How to configure the database for high availability is out of scope for this guide. We recommend finding an expert on for the database your using.
+Grafana will now persist all long term data in the database. How to configure the database for high availability is out of scope for this guide. We recommend finding an expert on for the database you're using.
 
 ## User sessions
 

+ 1 - 0
docs/versions.json

@@ -1,4 +1,5 @@
 [
+  { "version": "v5.3", "path": "/v5.3", "archived": false, "current": false },
   { "version": "v5.2", "path": "/", "archived": false, "current": true },
   { "version": "v5.1", "path": "/v5.1", "archived": true },
   { "version": "v5.0", "path": "/v5.0", "archived": true },

+ 1 - 1
package.json

@@ -12,7 +12,7 @@
   "devDependencies": {
     "@types/d3": "^4.10.1",
     "@types/enzyme": "^3.1.13",
-    "@types/jest": "^21.1.4",
+    "@types/jest": "^23.3.2",
     "@types/node": "^8.0.31",
     "@types/react": "^16.4.14",
     "@types/react-custom-scrollbars": "^4.0.5",

+ 0 - 9
pkg/api/avatar/avatar.go

@@ -97,15 +97,6 @@ type CacheServer struct {
 	cache    *gocache.Cache
 }
 
-func (this *CacheServer) mustInt(r *http.Request, defaultValue int, keys ...string) (v int) {
-	for _, k := range keys {
-		if _, err := fmt.Sscanf(r.FormValue(k), "%d", &v); err == nil {
-			defaultValue = v
-		}
-	}
-	return defaultValue
-}
-
 func (this *CacheServer) Handler(ctx *macaron.Context) {
 	urlPath := ctx.Req.URL.Path
 	hash := urlPath[strings.LastIndex(urlPath, "/")+1:]

+ 7 - 3
pkg/api/dashboard.go

@@ -22,6 +22,10 @@ import (
 	"github.com/grafana/grafana/pkg/util"
 )
 
+const (
+	anonString = "Anonymous"
+)
+
 func isDashboardStarredByUser(c *m.ReqContext, dashID int64) (bool, error) {
 	if !c.IsSignedIn {
 		return false, nil
@@ -64,7 +68,7 @@ func GetDashboard(c *m.ReqContext) Response {
 	}
 
 	// Finding creator and last updater of the dashboard
-	updater, creator := "Anonymous", "Anonymous"
+	updater, creator := anonString, anonString
 	if dash.UpdatedBy > 0 {
 		updater = getUserLogin(dash.UpdatedBy)
 	}
@@ -128,7 +132,7 @@ func getUserLogin(userID int64) string {
 	query := m.GetUserByIdQuery{Id: userID}
 	err := bus.Dispatch(&query)
 	if err != nil {
-		return "Anonymous"
+		return anonString
 	}
 	return query.Result.Login
 }
@@ -403,7 +407,7 @@ func GetDashboardVersion(c *m.ReqContext) Response {
 		return Error(500, fmt.Sprintf("Dashboard version %d not found for dashboardId %d", query.Version, dashID), err)
 	}
 
-	creator := "Anonymous"
+	creator := anonString
 	if query.Result.CreatedBy > 0 {
 		creator = getUserLogin(query.Result.CreatedBy)
 	}

+ 1 - 1
pkg/api/folder.go

@@ -95,7 +95,7 @@ func toFolderDto(g guardian.DashboardGuardian, folder *m.Folder) dtos.Folder {
 	canAdmin, _ := g.CanAdmin()
 
 	// Finding creator and last updater of the folder
-	updater, creator := "Anonymous", "Anonymous"
+	updater, creator := anonString, anonString
 	if folder.CreatedBy > 0 {
 		creator = getUserLogin(folder.CreatedBy)
 	}

+ 0 - 10
pkg/api/folder_test.go

@@ -133,16 +133,6 @@ func TestFoldersApiEndpoint(t *testing.T) {
 	})
 }
 
-func callGetFolderByUID(sc *scenarioContext) {
-	sc.handlerFunc = GetFolderByUID
-	sc.fakeReqWithParams("GET", sc.url, map[string]string{}).exec()
-}
-
-func callDeleteFolder(sc *scenarioContext) {
-	sc.handlerFunc = DeleteFolder
-	sc.fakeReqWithParams("DELETE", sc.url, map[string]string{}).exec()
-}
-
 func callCreateFolder(sc *scenarioContext) {
 	sc.fakeReqWithParams("POST", sc.url, map[string]string{}).exec()
 }

+ 11 - 5
pkg/api/index.go

@@ -11,6 +11,12 @@ import (
 	"github.com/grafana/grafana/pkg/setting"
 )
 
+const (
+	// Themes
+	lightName = "light"
+	darkName  = "dark"
+)
+
 func setIndexViewData(c *m.ReqContext) (*dtos.IndexViewData, error) {
 	settings, err := getFrontendSettingsMap(c)
 	if err != nil {
@@ -60,7 +66,7 @@ func setIndexViewData(c *m.ReqContext) (*dtos.IndexViewData, error) {
 			OrgRole:                    c.OrgRole,
 			GravatarUrl:                dtos.GetGravatarUrl(c.Email),
 			IsGrafanaAdmin:             c.IsGrafanaAdmin,
-			LightTheme:                 prefs.Theme == "light",
+			LightTheme:                 prefs.Theme == lightName,
 			Timezone:                   prefs.Timezone,
 			Locale:                     locale,
 			HelpFlags1:                 c.HelpFlags1,
@@ -88,12 +94,12 @@ func setIndexViewData(c *m.ReqContext) (*dtos.IndexViewData, error) {
 	}
 
 	themeURLParam := c.Query("theme")
-	if themeURLParam == "light" {
+	if themeURLParam == lightName {
 		data.User.LightTheme = true
-		data.Theme = "light"
-	} else if themeURLParam == "dark" {
+		data.Theme = lightName
+	} else if themeURLParam == darkName {
 		data.User.LightTheme = false
-		data.Theme = "dark"
+		data.Theme = darkName
 	}
 
 	if hasEditPermissionInFoldersQuery.Result {

+ 0 - 3
pkg/api/live/hub.go

@@ -37,9 +37,6 @@ func newHub() *hub {
 	}
 }
 
-func (h *hub) removeConnection() {
-}
-
 func (h *hub) run(ctx context.Context) {
 	for {
 		select {

+ 171 - 0
pkg/api/pluginproxy/access_token_provider.go

@@ -0,0 +1,171 @@
+package pluginproxy
+
+import (
+	"bytes"
+	"context"
+	"encoding/json"
+	"fmt"
+	"net/http"
+	"net/url"
+	"strconv"
+	"sync"
+	"time"
+
+	"golang.org/x/oauth2"
+
+	"github.com/grafana/grafana/pkg/models"
+	"github.com/grafana/grafana/pkg/plugins"
+	"golang.org/x/oauth2/jwt"
+)
+
+var (
+	tokenCache = tokenCacheType{
+		cache: map[string]*jwtToken{},
+	}
+	oauthJwtTokenCache = oauthJwtTokenCacheType{
+		cache: map[string]*oauth2.Token{},
+	}
+)
+
+type tokenCacheType struct {
+	cache map[string]*jwtToken
+	sync.Mutex
+}
+
+type oauthJwtTokenCacheType struct {
+	cache map[string]*oauth2.Token
+	sync.Mutex
+}
+
+type accessTokenProvider struct {
+	route             *plugins.AppPluginRoute
+	datasourceId      int64
+	datasourceVersion int
+}
+
+type jwtToken struct {
+	ExpiresOn       time.Time `json:"-"`
+	ExpiresOnString string    `json:"expires_on"`
+	AccessToken     string    `json:"access_token"`
+}
+
+func newAccessTokenProvider(ds *models.DataSource, pluginRoute *plugins.AppPluginRoute) *accessTokenProvider {
+	return &accessTokenProvider{
+		datasourceId:      ds.Id,
+		datasourceVersion: ds.Version,
+		route:             pluginRoute,
+	}
+}
+
+func (provider *accessTokenProvider) getAccessToken(data templateData) (string, error) {
+	tokenCache.Lock()
+	defer tokenCache.Unlock()
+	if cachedToken, found := tokenCache.cache[provider.getAccessTokenCacheKey()]; found {
+		if cachedToken.ExpiresOn.After(time.Now().Add(time.Second * 10)) {
+			logger.Info("Using token from cache")
+			return cachedToken.AccessToken, nil
+		}
+	}
+
+	urlInterpolated, err := interpolateString(provider.route.TokenAuth.Url, data)
+	if err != nil {
+		return "", err
+	}
+
+	params := make(url.Values)
+	for key, value := range provider.route.TokenAuth.Params {
+		interpolatedParam, err := interpolateString(value, data)
+		if err != nil {
+			return "", err
+		}
+		params.Add(key, interpolatedParam)
+	}
+
+	getTokenReq, _ := http.NewRequest("POST", urlInterpolated, bytes.NewBufferString(params.Encode()))
+	getTokenReq.Header.Add("Content-Type", "application/x-www-form-urlencoded")
+	getTokenReq.Header.Add("Content-Length", strconv.Itoa(len(params.Encode())))
+
+	resp, err := client.Do(getTokenReq)
+	if err != nil {
+		return "", err
+	}
+
+	defer resp.Body.Close()
+
+	var token jwtToken
+	if err := json.NewDecoder(resp.Body).Decode(&token); err != nil {
+		return "", err
+	}
+
+	expiresOnEpoch, _ := strconv.ParseInt(token.ExpiresOnString, 10, 64)
+	token.ExpiresOn = time.Unix(expiresOnEpoch, 0)
+	tokenCache.cache[provider.getAccessTokenCacheKey()] = &token
+
+	logger.Info("Got new access token", "ExpiresOn", token.ExpiresOn)
+
+	return token.AccessToken, nil
+}
+
+func (provider *accessTokenProvider) getJwtAccessToken(ctx context.Context, data templateData) (string, error) {
+	oauthJwtTokenCache.Lock()
+	defer oauthJwtTokenCache.Unlock()
+	if cachedToken, found := oauthJwtTokenCache.cache[provider.getAccessTokenCacheKey()]; found {
+		if cachedToken.Expiry.After(time.Now().Add(time.Second * 10)) {
+			logger.Debug("Using token from cache")
+			return cachedToken.AccessToken, nil
+		}
+	}
+
+	conf := &jwt.Config{}
+
+	if val, ok := provider.route.JwtTokenAuth.Params["client_email"]; ok {
+		interpolatedVal, err := interpolateString(val, data)
+		if err != nil {
+			return "", err
+		}
+		conf.Email = interpolatedVal
+	}
+
+	if val, ok := provider.route.JwtTokenAuth.Params["private_key"]; ok {
+		interpolatedVal, err := interpolateString(val, data)
+		if err != nil {
+			return "", err
+		}
+		conf.PrivateKey = []byte(interpolatedVal)
+	}
+
+	if val, ok := provider.route.JwtTokenAuth.Params["token_uri"]; ok {
+		interpolatedVal, err := interpolateString(val, data)
+		if err != nil {
+			return "", err
+		}
+		conf.TokenURL = interpolatedVal
+	}
+
+	conf.Scopes = provider.route.JwtTokenAuth.Scopes
+
+	token, err := getTokenSource(conf, ctx)
+	if err != nil {
+		return "", err
+	}
+
+	oauthJwtTokenCache.cache[provider.getAccessTokenCacheKey()] = token
+
+	logger.Info("Got new access token", "ExpiresOn", token.Expiry)
+
+	return token.AccessToken, nil
+}
+
+var getTokenSource = func(conf *jwt.Config, ctx context.Context) (*oauth2.Token, error) {
+	tokenSrc := conf.TokenSource(ctx)
+	token, err := tokenSrc.Token()
+	if err != nil {
+		return nil, err
+	}
+
+	return token, nil
+}
+
+func (provider *accessTokenProvider) getAccessTokenCacheKey() string {
+	return fmt.Sprintf("%v_%v_%v_%v", provider.datasourceId, provider.datasourceVersion, provider.route.Path, provider.route.Method)
+}

+ 94 - 0
pkg/api/pluginproxy/access_token_provider_test.go

@@ -0,0 +1,94 @@
+package pluginproxy
+
+import (
+	"context"
+	"testing"
+	"time"
+
+	"github.com/grafana/grafana/pkg/models"
+	"github.com/grafana/grafana/pkg/plugins"
+	. "github.com/smartystreets/goconvey/convey"
+	"golang.org/x/oauth2"
+	"golang.org/x/oauth2/jwt"
+)
+
+func TestAccessToken(t *testing.T) {
+	Convey("Plugin with JWT token auth route", t, func() {
+		pluginRoute := &plugins.AppPluginRoute{
+			Path:   "pathwithjwttoken1",
+			Url:    "https://api.jwt.io/some/path",
+			Method: "GET",
+			JwtTokenAuth: &plugins.JwtTokenAuth{
+				Url: "https://login.server.com/{{.JsonData.tenantId}}/oauth2/token",
+				Scopes: []string{
+					"https://www.testapi.com/auth/monitoring.read",
+					"https://www.testapi.com/auth/cloudplatformprojects.readonly",
+				},
+				Params: map[string]string{
+					"token_uri":    "{{.JsonData.tokenUri}}",
+					"client_email": "{{.JsonData.clientEmail}}",
+					"private_key":  "{{.SecureJsonData.privateKey}}",
+				},
+			},
+		}
+
+		templateData := templateData{
+			JsonData: map[string]interface{}{
+				"clientEmail": "test@test.com",
+				"tokenUri":    "login.url.com/token",
+			},
+			SecureJsonData: map[string]string{
+				"privateKey": "testkey",
+			},
+		}
+
+		ds := &models.DataSource{Id: 1, Version: 2}
+
+		Convey("should fetch token using jwt private key", func() {
+			getTokenSource = func(conf *jwt.Config, ctx context.Context) (*oauth2.Token, error) {
+				return &oauth2.Token{AccessToken: "abc"}, nil
+			}
+			provider := newAccessTokenProvider(ds, pluginRoute)
+			token, err := provider.getJwtAccessToken(context.Background(), templateData)
+			So(err, ShouldBeNil)
+
+			So(token, ShouldEqual, "abc")
+		})
+
+		Convey("should set jwt config values", func() {
+			getTokenSource = func(conf *jwt.Config, ctx context.Context) (*oauth2.Token, error) {
+				So(conf.Email, ShouldEqual, "test@test.com")
+				So(conf.PrivateKey, ShouldResemble, []byte("testkey"))
+				So(len(conf.Scopes), ShouldEqual, 2)
+				So(conf.Scopes[0], ShouldEqual, "https://www.testapi.com/auth/monitoring.read")
+				So(conf.Scopes[1], ShouldEqual, "https://www.testapi.com/auth/cloudplatformprojects.readonly")
+				So(conf.TokenURL, ShouldEqual, "login.url.com/token")
+
+				return &oauth2.Token{AccessToken: "abc"}, nil
+			}
+
+			provider := newAccessTokenProvider(ds, pluginRoute)
+			_, err := provider.getJwtAccessToken(context.Background(), templateData)
+			So(err, ShouldBeNil)
+		})
+
+		Convey("should use cached token on second call", func() {
+			getTokenSource = func(conf *jwt.Config, ctx context.Context) (*oauth2.Token, error) {
+				return &oauth2.Token{
+					AccessToken: "abc",
+					Expiry:      time.Now().Add(1 * time.Minute)}, nil
+			}
+			provider := newAccessTokenProvider(ds, pluginRoute)
+			token1, err := provider.getJwtAccessToken(context.Background(), templateData)
+			So(err, ShouldBeNil)
+			So(token1, ShouldEqual, "abc")
+
+			getTokenSource = func(conf *jwt.Config, ctx context.Context) (*oauth2.Token, error) {
+				return &oauth2.Token{AccessToken: "error: cache not used"}, nil
+			}
+			token2, err := provider.getJwtAccessToken(context.Background(), templateData)
+			So(err, ShouldBeNil)
+			So(token2, ShouldEqual, "abc")
+		})
+	})
+}

+ 93 - 0
pkg/api/pluginproxy/ds_auth_provider.go

@@ -0,0 +1,93 @@
+package pluginproxy
+
+import (
+	"bytes"
+	"context"
+	"fmt"
+	"net/http"
+	"net/url"
+	"strings"
+	"text/template"
+
+	m "github.com/grafana/grafana/pkg/models"
+	"github.com/grafana/grafana/pkg/plugins"
+	"github.com/grafana/grafana/pkg/util"
+)
+
+//ApplyRoute should use the plugin route data to set auth headers and custom headers
+func ApplyRoute(ctx context.Context, req *http.Request, proxyPath string, route *plugins.AppPluginRoute, ds *m.DataSource) {
+	proxyPath = strings.TrimPrefix(proxyPath, route.Path)
+
+	data := templateData{
+		JsonData:       ds.JsonData.Interface().(map[string]interface{}),
+		SecureJsonData: ds.SecureJsonData.Decrypt(),
+	}
+
+	interpolatedURL, err := interpolateString(route.Url, data)
+	if err != nil {
+		logger.Error("Error interpolating proxy url", "error", err)
+		return
+	}
+
+	routeURL, err := url.Parse(interpolatedURL)
+	if err != nil {
+		logger.Error("Error parsing plugin route url", "error", err)
+		return
+	}
+
+	req.URL.Scheme = routeURL.Scheme
+	req.URL.Host = routeURL.Host
+	req.Host = routeURL.Host
+	req.URL.Path = util.JoinUrlFragments(routeURL.Path, proxyPath)
+
+	if err := addHeaders(&req.Header, route, data); err != nil {
+		logger.Error("Failed to render plugin headers", "error", err)
+	}
+
+	tokenProvider := newAccessTokenProvider(ds, route)
+
+	if route.TokenAuth != nil {
+		if token, err := tokenProvider.getAccessToken(data); err != nil {
+			logger.Error("Failed to get access token", "error", err)
+		} else {
+			req.Header.Add("Authorization", fmt.Sprintf("Bearer %s", token))
+		}
+	}
+
+	if route.JwtTokenAuth != nil {
+		if token, err := tokenProvider.getJwtAccessToken(ctx, data); err != nil {
+			logger.Error("Failed to get access token", "error", err)
+		} else {
+			req.Header.Add("Authorization", fmt.Sprintf("Bearer %s", token))
+		}
+	}
+	logger.Info("Requesting", "url", req.URL.String())
+
+}
+
+func interpolateString(text string, data templateData) (string, error) {
+	t, err := template.New("content").Parse(text)
+	if err != nil {
+		return "", fmt.Errorf("could not parse template %s", text)
+	}
+
+	var contentBuf bytes.Buffer
+	err = t.Execute(&contentBuf, data)
+	if err != nil {
+		return "", fmt.Errorf("failed to execute template %s", text)
+	}
+
+	return contentBuf.String(), nil
+}
+
+func addHeaders(reqHeaders *http.Header, route *plugins.AppPluginRoute, data templateData) error {
+	for _, header := range route.Headers {
+		interpolated, err := interpolateString(header.Content, data)
+		if err != nil {
+			return err
+		}
+		reqHeaders.Add(header.Name, interpolated)
+	}
+
+	return nil
+}

+ 21 - 0
pkg/api/pluginproxy/ds_auth_provider_test.go

@@ -0,0 +1,21 @@
+package pluginproxy
+
+import (
+	"testing"
+
+	. "github.com/smartystreets/goconvey/convey"
+)
+
+func TestDsAuthProvider(t *testing.T) {
+	Convey("When interpolating string", t, func() {
+		data := templateData{
+			SecureJsonData: map[string]string{
+				"Test": "0asd+asd",
+			},
+		}
+
+		interpolated, err := interpolateString("{{.SecureJsonData.Test}}", data)
+		So(err, ShouldBeNil)
+		So(interpolated, ShouldEqual, "0asd+asd")
+	})
+}

+ 3 - 130
pkg/api/pluginproxy/ds_proxy.go

@@ -2,7 +2,6 @@ package pluginproxy
 
 import (
 	"bytes"
-	"encoding/json"
 	"errors"
 	"fmt"
 	"io/ioutil"
@@ -12,7 +11,6 @@ import (
 	"net/url"
 	"strconv"
 	"strings"
-	"text/template"
 	"time"
 
 	"github.com/opentracing/opentracing-go"
@@ -25,17 +23,10 @@ import (
 )
 
 var (
-	logger     = log.New("data-proxy-log")
-	tokenCache = map[string]*jwtToken{}
-	client     = newHTTPClient()
+	logger = log.New("data-proxy-log")
+	client = newHTTPClient()
 )
 
-type jwtToken struct {
-	ExpiresOn       time.Time `json:"-"`
-	ExpiresOnString string    `json:"expires_on"`
-	AccessToken     string    `json:"access_token"`
-}
-
 type DataSourceProxy struct {
 	ds        *m.DataSource
 	ctx       *m.ReqContext
@@ -162,7 +153,6 @@ func (proxy *DataSourceProxy) getDirector() func(req *http.Request) {
 		} else {
 			req.URL.Path = util.JoinUrlFragments(proxy.targetUrl.Path, proxy.proxyPath)
 		}
-
 		if proxy.ds.BasicAuth {
 			req.Header.Del("Authorization")
 			req.Header.Add("Authorization", util.GetBasicAuthHeader(proxy.ds.BasicAuthUser, proxy.ds.BasicAuthPassword))
@@ -219,7 +209,7 @@ func (proxy *DataSourceProxy) getDirector() func(req *http.Request) {
 		}
 
 		if proxy.route != nil {
-			proxy.applyRoute(req)
+			ApplyRoute(proxy.ctx.Req.Context(), req, proxy.proxyPath, proxy.route, proxy.ds)
 		}
 	}
 }
@@ -311,120 +301,3 @@ func checkWhiteList(c *m.ReqContext, host string) bool {
 
 	return true
 }
-
-func (proxy *DataSourceProxy) applyRoute(req *http.Request) {
-	proxy.proxyPath = strings.TrimPrefix(proxy.proxyPath, proxy.route.Path)
-
-	data := templateData{
-		JsonData:       proxy.ds.JsonData.Interface().(map[string]interface{}),
-		SecureJsonData: proxy.ds.SecureJsonData.Decrypt(),
-	}
-
-	interpolatedURL, err := interpolateString(proxy.route.Url, data)
-	if err != nil {
-		logger.Error("Error interpolating proxy url", "error", err)
-		return
-	}
-
-	routeURL, err := url.Parse(interpolatedURL)
-	if err != nil {
-		logger.Error("Error parsing plugin route url", "error", err)
-		return
-	}
-
-	req.URL.Scheme = routeURL.Scheme
-	req.URL.Host = routeURL.Host
-	req.Host = routeURL.Host
-	req.URL.Path = util.JoinUrlFragments(routeURL.Path, proxy.proxyPath)
-
-	if err := addHeaders(&req.Header, proxy.route, data); err != nil {
-		logger.Error("Failed to render plugin headers", "error", err)
-	}
-
-	if proxy.route.TokenAuth != nil {
-		if token, err := proxy.getAccessToken(data); err != nil {
-			logger.Error("Failed to get access token", "error", err)
-		} else {
-			req.Header.Add("Authorization", fmt.Sprintf("Bearer %s", token))
-		}
-	}
-
-	logger.Info("Requesting", "url", req.URL.String())
-}
-
-func (proxy *DataSourceProxy) getAccessToken(data templateData) (string, error) {
-	if cachedToken, found := tokenCache[proxy.getAccessTokenCacheKey()]; found {
-		if cachedToken.ExpiresOn.After(time.Now().Add(time.Second * 10)) {
-			logger.Info("Using token from cache")
-			return cachedToken.AccessToken, nil
-		}
-	}
-
-	urlInterpolated, err := interpolateString(proxy.route.TokenAuth.Url, data)
-	if err != nil {
-		return "", err
-	}
-
-	params := make(url.Values)
-	for key, value := range proxy.route.TokenAuth.Params {
-		interpolatedParam, err := interpolateString(value, data)
-		if err != nil {
-			return "", err
-		}
-		params.Add(key, interpolatedParam)
-	}
-
-	getTokenReq, _ := http.NewRequest("POST", urlInterpolated, bytes.NewBufferString(params.Encode()))
-	getTokenReq.Header.Add("Content-Type", "application/x-www-form-urlencoded")
-	getTokenReq.Header.Add("Content-Length", strconv.Itoa(len(params.Encode())))
-
-	resp, err := client.Do(getTokenReq)
-	if err != nil {
-		return "", err
-	}
-
-	defer resp.Body.Close()
-
-	var token jwtToken
-	if err := json.NewDecoder(resp.Body).Decode(&token); err != nil {
-		return "", err
-	}
-
-	expiresOnEpoch, _ := strconv.ParseInt(token.ExpiresOnString, 10, 64)
-	token.ExpiresOn = time.Unix(expiresOnEpoch, 0)
-	tokenCache[proxy.getAccessTokenCacheKey()] = &token
-
-	logger.Info("Got new access token", "ExpiresOn", token.ExpiresOn)
-	return token.AccessToken, nil
-}
-
-func (proxy *DataSourceProxy) getAccessTokenCacheKey() string {
-	return fmt.Sprintf("%v_%v_%v", proxy.ds.Id, proxy.route.Path, proxy.route.Method)
-}
-
-func interpolateString(text string, data templateData) (string, error) {
-	t, err := template.New("content").Parse(text)
-	if err != nil {
-		return "", fmt.Errorf("could not parse template %s", text)
-	}
-
-	var contentBuf bytes.Buffer
-	err = t.Execute(&contentBuf, data)
-	if err != nil {
-		return "", fmt.Errorf("failed to execute template %s", text)
-	}
-
-	return contentBuf.String(), nil
-}
-
-func addHeaders(reqHeaders *http.Header, route *plugins.AppPluginRoute, data templateData) error {
-	for _, header := range route.Headers {
-		interpolated, err := interpolateString(header.Content, data)
-		if err != nil {
-			return err
-		}
-		reqHeaders.Add(header.Name, interpolated)
-	}
-
-	return nil
-}

+ 5 - 17
pkg/api/pluginproxy/ds_proxy_test.go

@@ -83,7 +83,7 @@ func TestDSRouteRule(t *testing.T) {
 			Convey("When matching route path", func() {
 				proxy := NewDataSourceProxy(ds, plugin, ctx, "api/v4/some/method")
 				proxy.route = plugin.Routes[0]
-				proxy.applyRoute(req)
+				ApplyRoute(proxy.ctx.Req.Context(), req, proxy.proxyPath, proxy.route, proxy.ds)
 
 				Convey("should add headers and update url", func() {
 					So(req.URL.String(), ShouldEqual, "https://www.google.com/some/method")
@@ -94,7 +94,7 @@ func TestDSRouteRule(t *testing.T) {
 			Convey("When matching route path and has dynamic url", func() {
 				proxy := NewDataSourceProxy(ds, plugin, ctx, "api/common/some/method")
 				proxy.route = plugin.Routes[3]
-				proxy.applyRoute(req)
+				ApplyRoute(proxy.ctx.Req.Context(), req, proxy.proxyPath, proxy.route, proxy.ds)
 
 				Convey("should add headers and interpolate the url", func() {
 					So(req.URL.String(), ShouldEqual, "https://dynamic.grafana.com/some/method")
@@ -188,7 +188,7 @@ func TestDSRouteRule(t *testing.T) {
 					client = newFakeHTTPClient(json)
 					proxy1 := NewDataSourceProxy(ds, plugin, ctx, "pathwithtoken1")
 					proxy1.route = plugin.Routes[0]
-					proxy1.applyRoute(req)
+					ApplyRoute(proxy1.ctx.Req.Context(), req, proxy1.proxyPath, proxy1.route, proxy1.ds)
 
 					authorizationHeaderCall1 = req.Header.Get("Authorization")
 					So(req.URL.String(), ShouldEqual, "https://api.nr1.io/some/path")
@@ -202,7 +202,7 @@ func TestDSRouteRule(t *testing.T) {
 						client = newFakeHTTPClient(json2)
 						proxy2 := NewDataSourceProxy(ds, plugin, ctx, "pathwithtoken2")
 						proxy2.route = plugin.Routes[1]
-						proxy2.applyRoute(req)
+						ApplyRoute(proxy2.ctx.Req.Context(), req, proxy2.proxyPath, proxy2.route, proxy2.ds)
 
 						authorizationHeaderCall2 = req.Header.Get("Authorization")
 
@@ -217,7 +217,7 @@ func TestDSRouteRule(t *testing.T) {
 							client = newFakeHTTPClient([]byte{})
 							proxy3 := NewDataSourceProxy(ds, plugin, ctx, "pathwithtoken1")
 							proxy3.route = plugin.Routes[0]
-							proxy3.applyRoute(req)
+							ApplyRoute(proxy3.ctx.Req.Context(), req, proxy3.proxyPath, proxy3.route, proxy3.ds)
 
 							authorizationHeaderCall3 := req.Header.Get("Authorization")
 							So(req.URL.String(), ShouldEqual, "https://api.nr1.io/some/path")
@@ -331,18 +331,6 @@ func TestDSRouteRule(t *testing.T) {
 			})
 		})
 
-		Convey("When interpolating string", func() {
-			data := templateData{
-				SecureJsonData: map[string]string{
-					"Test": "0asd+asd",
-				},
-			}
-
-			interpolated, err := interpolateString("{{.SecureJsonData.Test}}", data)
-			So(err, ShouldBeNil)
-			So(interpolated, ShouldEqual, "0asd+asd")
-		})
-
 		Convey("When proxying a data source with custom headers specified", func() {
 			plugin := &plugins.DataSourcePlugin{}
 

+ 10 - 9
pkg/api/render.go

@@ -41,15 +41,16 @@ func (hs *HTTPServer) RenderToPng(c *m.ReqContext) {
 	}
 
 	result, err := hs.RenderService.Render(c.Req.Context(), rendering.Opts{
-		Width:    width,
-		Height:   height,
-		Timeout:  time.Duration(timeout) * time.Second,
-		OrgId:    c.OrgId,
-		UserId:   c.UserId,
-		OrgRole:  c.OrgRole,
-		Path:     c.Params("*") + queryParams,
-		Timezone: queryReader.Get("tz", ""),
-		Encoding: queryReader.Get("encoding", ""),
+		Width:           width,
+		Height:          height,
+		Timeout:         time.Duration(timeout) * time.Second,
+		OrgId:           c.OrgId,
+		UserId:          c.UserId,
+		OrgRole:         c.OrgRole,
+		Path:            c.Params("*") + queryParams,
+		Timezone:        queryReader.Get("tz", ""),
+		Encoding:        queryReader.Get("encoding", ""),
+		ConcurrentLimit: 30,
 	})
 
 	if err != nil && err == rendering.ErrTimeout {

+ 2 - 0
pkg/cmd/grafana-cli/commands/commands.go

@@ -6,6 +6,7 @@ import (
 
 	"github.com/codegangsta/cli"
 	"github.com/fatih/color"
+	"github.com/grafana/grafana/pkg/bus"
 	"github.com/grafana/grafana/pkg/cmd/grafana-cli/logger"
 	"github.com/grafana/grafana/pkg/services/sqlstore"
 	"github.com/grafana/grafana/pkg/setting"
@@ -24,6 +25,7 @@ func runDbCommand(command func(commandLine CommandLine) error) func(context *cli
 
 		engine := &sqlstore.SqlStore{}
 		engine.Cfg = cfg
+		engine.Bus = bus.GetBus()
 		engine.Init()
 
 		if err := command(cmd); err != nil {

+ 1 - 1
pkg/cmd/grafana-cli/commands/install_command.go

@@ -112,7 +112,7 @@ func SelectVersion(plugin m.Plugin, version string) (m.Version, error) {
 		}
 	}
 
-	return m.Version{}, errors.New("Could not find the version your looking for")
+	return m.Version{}, errors.New("Could not find the version you're looking for")
 }
 
 func RemoveGitBuildFromName(pluginName, filename string) string {

+ 2 - 1
pkg/cmd/grafana-server/main.go

@@ -29,6 +29,7 @@ import (
 	_ "github.com/grafana/grafana/pkg/tsdb/opentsdb"
 	_ "github.com/grafana/grafana/pkg/tsdb/postgres"
 	_ "github.com/grafana/grafana/pkg/tsdb/prometheus"
+	_ "github.com/grafana/grafana/pkg/tsdb/stackdriver"
 	_ "github.com/grafana/grafana/pkg/tsdb/testdata"
 )
 
@@ -103,7 +104,7 @@ func listenToSystemSignals(server *GrafanaServerImpl) {
 
 	for {
 		select {
-		case _ = <-sighupChan:
+		case <-sighupChan:
 			log.Reload()
 		case sig := <-signalChan:
 			server.Shutdown(fmt.Sprintf("System signal: %s", sig))

+ 0 - 2
pkg/components/imguploader/azureblobuploader.go

@@ -127,8 +127,6 @@ type xmlError struct {
 const ms_date_layout = "Mon, 02 Jan 2006 15:04:05 GMT"
 const version = "2017-04-17"
 
-var client = &http.Client{}
-
 type StorageClient struct {
 	Auth      *Auth
 	Transport http.RoundTripper

+ 28 - 1
pkg/components/imguploader/s3uploader.go

@@ -2,12 +2,15 @@ package imguploader
 
 import (
 	"context"
+	"fmt"
 	"os"
 	"time"
 
 	"github.com/aws/aws-sdk-go/aws"
 	"github.com/aws/aws-sdk-go/aws/credentials"
 	"github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds"
+	"github.com/aws/aws-sdk-go/aws/credentials/endpointcreds"
+	"github.com/aws/aws-sdk-go/aws/defaults"
 	"github.com/aws/aws-sdk-go/aws/ec2metadata"
 	"github.com/aws/aws-sdk-go/aws/endpoints"
 	"github.com/aws/aws-sdk-go/aws/session"
@@ -50,7 +53,7 @@ func (u *S3Uploader) Upload(ctx context.Context, imageDiskPath string) (string,
 				SecretAccessKey: u.secretKey,
 			}},
 			&credentials.EnvProvider{},
-			&ec2rolecreds.EC2RoleProvider{Client: ec2metadata.New(sess), ExpiryWindow: 5 * time.Minute},
+			remoteCredProvider(sess),
 		})
 	cfg := &aws.Config{
 		Region:      aws.String(u.region),
@@ -85,3 +88,27 @@ func (u *S3Uploader) Upload(ctx context.Context, imageDiskPath string) (string,
 	}
 	return image_url, nil
 }
+
+func remoteCredProvider(sess *session.Session) credentials.Provider {
+	ecsCredURI := os.Getenv("AWS_CONTAINER_CREDENTIALS_RELATIVE_URI")
+
+	if len(ecsCredURI) > 0 {
+		return ecsCredProvider(sess, ecsCredURI)
+	}
+	return ec2RoleProvider(sess)
+}
+
+func ecsCredProvider(sess *session.Session, uri string) credentials.Provider {
+	const host = `169.254.170.2`
+
+	d := defaults.Get()
+	return endpointcreds.NewProviderClient(
+		*d.Config,
+		d.Handlers,
+		fmt.Sprintf("http://%s%s", host, uri),
+		func(p *endpointcreds.Provider) { p.ExpiryWindow = 5 * time.Minute })
+}
+
+func ec2RoleProvider(sess *session.Session) credentials.Provider {
+	return &ec2rolecreds.EC2RoleProvider{Client: ec2metadata.New(sess), ExpiryWindow: 5 * time.Minute}
+}

+ 8 - 4
pkg/components/null/float.go

@@ -8,6 +8,10 @@ import (
 	"strconv"
 )
 
+const (
+	nullString = "null"
+)
+
 // Float is a nullable float64.
 // It does not consider zero values to be null.
 // It will decode to null, not zero, if null.
@@ -68,7 +72,7 @@ func (f *Float) UnmarshalJSON(data []byte) error {
 // It will return an error if the input is not an integer, blank, or "null".
 func (f *Float) UnmarshalText(text []byte) error {
 	str := string(text)
-	if str == "" || str == "null" {
+	if str == "" || str == nullString {
 		f.Valid = false
 		return nil
 	}
@@ -82,7 +86,7 @@ func (f *Float) UnmarshalText(text []byte) error {
 // It will encode null if this Float is null.
 func (f Float) MarshalJSON() ([]byte, error) {
 	if !f.Valid {
-		return []byte("null"), nil
+		return []byte(nullString), nil
 	}
 	return []byte(strconv.FormatFloat(f.Float64, 'f', -1, 64)), nil
 }
@@ -100,7 +104,7 @@ func (f Float) MarshalText() ([]byte, error) {
 // It will encode a blank string if this Float is null.
 func (f Float) String() string {
 	if !f.Valid {
-		return "null"
+		return nullString
 	}
 
 	return fmt.Sprintf("%1.3f", f.Float64)
@@ -109,7 +113,7 @@ func (f Float) String() string {
 // FullString returns float as string in full precision
 func (f Float) FullString() string {
 	if !f.Valid {
-		return "null"
+		return nullString
 	}
 
 	return fmt.Sprintf("%f", f.Float64)

+ 0 - 5
pkg/middleware/middleware_test.go

@@ -435,11 +435,6 @@ func (sc *scenarioContext) withValidApiKey() *scenarioContext {
 	return sc
 }
 
-func (sc *scenarioContext) withInvalidApiKey() *scenarioContext {
-	sc.apiKey = "nvalidhhhhds"
-	return sc
-}
-
 func (sc *scenarioContext) withAuthorizationHeader(authHeader string) *scenarioContext {
 	sc.authHeader = authHeader
 	return sc

+ 2 - 2
pkg/models/alert.go

@@ -75,7 +75,7 @@ type Alert struct {
 
 	EvalData     *simplejson.Json
 	NewStateDate time.Time
-	StateChanges int
+	StateChanges int64
 
 	Created time.Time
 	Updated time.Time
@@ -156,7 +156,7 @@ type SetAlertStateCommand struct {
 	Error    string
 	EvalData *simplejson.Json
 
-	Timestamp time.Time
+	Result Alert
 }
 
 //Queries

+ 33 - 22
pkg/models/alert_notifications.go

@@ -8,8 +8,18 @@ import (
 )
 
 var (
-	ErrNotificationFrequencyNotFound = errors.New("Notification frequency not specified")
-	ErrJournalingNotFound            = errors.New("alert notification journaling not found")
+	ErrNotificationFrequencyNotFound         = errors.New("Notification frequency not specified")
+	ErrAlertNotificationStateNotFound        = errors.New("alert notification state not found")
+	ErrAlertNotificationStateVersionConflict = errors.New("alert notification state update version conflict")
+	ErrAlertNotificationStateAlreadyExist    = errors.New("alert notification state already exists.")
+)
+
+type AlertNotificationStateType string
+
+var (
+	AlertNotificationStatePending   = AlertNotificationStateType("pending")
+	AlertNotificationStateCompleted = AlertNotificationStateType("completed")
+	AlertNotificationStateUnknown   = AlertNotificationStateType("unknown")
 )
 
 type AlertNotification struct {
@@ -76,33 +86,34 @@ type GetAllAlertNotificationsQuery struct {
 	Result []*AlertNotification
 }
 
-type AlertNotificationJournal struct {
-	Id         int64
-	OrgId      int64
-	AlertId    int64
-	NotifierId int64
-	SentAt     int64
-	Success    bool
+type AlertNotificationState struct {
+	Id                           int64
+	OrgId                        int64
+	AlertId                      int64
+	NotifierId                   int64
+	State                        AlertNotificationStateType
+	Version                      int64
+	UpdatedAt                    int64
+	AlertRuleStateUpdatedVersion int64
 }
 
-type RecordNotificationJournalCommand struct {
-	OrgId      int64
-	AlertId    int64
-	NotifierId int64
-	SentAt     int64
-	Success    bool
-}
+type SetAlertNotificationStateToPendingCommand struct {
+	Id                           int64
+	AlertRuleStateUpdatedVersion int64
+	Version                      int64
 
-type GetLatestNotificationQuery struct {
-	OrgId      int64
-	AlertId    int64
-	NotifierId int64
+	ResultVersion int64
+}
 
-	Result *AlertNotificationJournal
+type SetAlertNotificationStateToCompleteCommand struct {
+	Id      int64
+	Version int64
 }
 
-type CleanNotificationJournalCommand struct {
+type GetOrCreateNotificationStateQuery struct {
 	OrgId      int64
 	AlertId    int64
 	NotifierId int64
+
+	Result *AlertNotificationState
 }

+ 3 - 1
pkg/models/datasource.go

@@ -22,6 +22,7 @@ const (
 	DS_MSSQL         = "mssql"
 	DS_ACCESS_DIRECT = "direct"
 	DS_ACCESS_PROXY  = "proxy"
+	DS_STACKDRIVER   = "stackdriver"
 )
 
 var (
@@ -70,12 +71,12 @@ var knownDatasourcePlugins = map[string]bool{
 	DS_POSTGRES:                           true,
 	DS_MYSQL:                              true,
 	DS_MSSQL:                              true,
+	DS_STACKDRIVER:                        true,
 	"opennms":                             true,
 	"abhisant-druid-datasource":           true,
 	"dalmatinerdb-datasource":             true,
 	"gnocci":                              true,
 	"zabbix":                              true,
-	"alexanderzobnin-zabbix-datasource":   true,
 	"newrelic-app":                        true,
 	"grafana-datadog-datasource":          true,
 	"grafana-simple-json":                 true,
@@ -88,6 +89,7 @@ var knownDatasourcePlugins = map[string]bool{
 	"ayoungprogrammer-finance-datasource": true,
 	"monasca-datasource":                  true,
 	"vertamedia-clickhouse-datasource":    true,
+	"alexanderzobnin-zabbix-datasource":   true,
 }
 
 func IsKnownDataSourcePlugin(dsType string) bool {

+ 10 - 6
pkg/plugins/app_plugin.go

@@ -23,12 +23,13 @@ type AppPlugin struct {
 }
 
 type AppPluginRoute struct {
-	Path      string                 `json:"path"`
-	Method    string                 `json:"method"`
-	ReqRole   models.RoleType        `json:"reqRole"`
-	Url       string                 `json:"url"`
-	Headers   []AppPluginRouteHeader `json:"headers"`
-	TokenAuth *JwtTokenAuth          `json:"tokenAuth"`
+	Path         string                 `json:"path"`
+	Method       string                 `json:"method"`
+	ReqRole      models.RoleType        `json:"reqRole"`
+	Url          string                 `json:"url"`
+	Headers      []AppPluginRouteHeader `json:"headers"`
+	TokenAuth    *JwtTokenAuth          `json:"tokenAuth"`
+	JwtTokenAuth *JwtTokenAuth          `json:"jwtTokenAuth"`
 }
 
 type AppPluginRouteHeader struct {
@@ -36,8 +37,11 @@ type AppPluginRouteHeader struct {
 	Content string `json:"content"`
 }
 
+// JwtTokenAuth struct is both for normal Token Auth and JWT Token Auth with
+// an uploaded JWT file.
 type JwtTokenAuth struct {
 	Url    string            `json:"url"`
+	Scopes []string          `json:"scopes"`
 	Params map[string]string `json:"params"`
 }
 

+ 12 - 5
pkg/services/alerting/interfaces.go

@@ -3,6 +3,8 @@ package alerting
 import (
 	"context"
 	"time"
+
+	"github.com/grafana/grafana/pkg/models"
 )
 
 type EvalHandler interface {
@@ -20,7 +22,7 @@ type Notifier interface {
 	NeedsImage() bool
 
 	// ShouldNotify checks this evaluation should send an alert notification
-	ShouldNotify(ctx context.Context, evalContext *EvalContext) bool
+	ShouldNotify(ctx context.Context, evalContext *EvalContext, notificationState *models.AlertNotificationState) bool
 
 	GetNotifierId() int64
 	GetIsDefault() bool
@@ -28,11 +30,16 @@ type Notifier interface {
 	GetFrequency() time.Duration
 }
 
-type NotifierSlice []Notifier
+type notifierState struct {
+	notifier Notifier
+	state    *models.AlertNotificationState
+}
+
+type notifierStateSlice []*notifierState
 
-func (notifiers NotifierSlice) ShouldUploadImage() bool {
-	for _, notifier := range notifiers {
-		if notifier.NeedsImage() {
+func (notifiers notifierStateSlice) ShouldUploadImage() bool {
+	for _, ns := range notifiers {
+		if ns.notifier.NeedsImage() {
 			return true
 		}
 	}

+ 77 - 44
pkg/services/alerting/notifier.go

@@ -1,16 +1,15 @@
 package alerting
 
 import (
-	"context"
 	"errors"
 	"fmt"
-	"time"
 
 	"github.com/grafana/grafana/pkg/bus"
 	"github.com/grafana/grafana/pkg/components/imguploader"
 	"github.com/grafana/grafana/pkg/log"
 	"github.com/grafana/grafana/pkg/metrics"
 	"github.com/grafana/grafana/pkg/services/rendering"
+	"github.com/grafana/grafana/pkg/setting"
 
 	m "github.com/grafana/grafana/pkg/models"
 )
@@ -40,61 +39,78 @@ type notificationService struct {
 }
 
 func (n *notificationService) SendIfNeeded(context *EvalContext) error {
-	notifiers, err := n.getNeededNotifiers(context.Rule.OrgId, context.Rule.Notifications, context)
+	notifierStates, err := n.getNeededNotifiers(context.Rule.OrgId, context.Rule.Notifications, context)
 	if err != nil {
 		return err
 	}
 
-	if len(notifiers) == 0 {
+	if len(notifierStates) == 0 {
 		return nil
 	}
 
-	if notifiers.ShouldUploadImage() {
+	if notifierStates.ShouldUploadImage() {
 		if err = n.uploadImage(context); err != nil {
 			n.log.Error("Failed to upload alert panel image.", "error", err)
 		}
 	}
 
-	return n.sendNotifications(context, notifiers)
+	return n.sendNotifications(context, notifierStates)
 }
 
-func (n *notificationService) sendNotifications(evalContext *EvalContext, notifiers []Notifier) error {
-	for _, notifier := range notifiers {
-		not := notifier
+func (n *notificationService) sendAndMarkAsComplete(evalContext *EvalContext, notifierState *notifierState) error {
+	notifier := notifierState.notifier
 
-		err := bus.InTransaction(evalContext.Ctx, func(ctx context.Context) error {
-			n.log.Debug("trying to send notification", "id", not.GetNotifierId())
+	n.log.Debug("Sending notification", "type", notifier.GetType(), "id", notifier.GetNotifierId(), "isDefault", notifier.GetIsDefault())
+	metrics.M_Alerting_Notification_Sent.WithLabelValues(notifier.GetType()).Inc()
 
-			// Verify that we can send the notification again
-			// but this time within the same transaction.
-			if !evalContext.IsTestRun && !not.ShouldNotify(context.Background(), evalContext) {
-				return nil
-			}
+	err := notifier.Notify(evalContext)
 
-			n.log.Debug("Sending notification", "type", not.GetType(), "id", not.GetNotifierId(), "isDefault", not.GetIsDefault())
-			metrics.M_Alerting_Notification_Sent.WithLabelValues(not.GetType()).Inc()
+	if err != nil {
+		n.log.Error("failed to send notification", "id", notifier.GetNotifierId(), "error", err)
+	}
 
-			//send notification
-			success := not.Notify(evalContext) == nil
+	if evalContext.IsTestRun {
+		return nil
+	}
 
-			if evalContext.IsTestRun {
-				return nil
-			}
+	cmd := &m.SetAlertNotificationStateToCompleteCommand{
+		Id:      notifierState.state.Id,
+		Version: notifierState.state.Version,
+	}
 
-			//write result to db.
-			cmd := &m.RecordNotificationJournalCommand{
-				OrgId:      evalContext.Rule.OrgId,
-				AlertId:    evalContext.Rule.Id,
-				NotifierId: not.GetNotifierId(),
-				SentAt:     time.Now().Unix(),
-				Success:    success,
-			}
+	return bus.DispatchCtx(evalContext.Ctx, cmd)
+}
 
-			return bus.DispatchCtx(ctx, cmd)
-		})
+func (n *notificationService) sendNotification(evalContext *EvalContext, notifierState *notifierState) error {
+	if !evalContext.IsTestRun {
+		setPendingCmd := &m.SetAlertNotificationStateToPendingCommand{
+			Id:                           notifierState.state.Id,
+			Version:                      notifierState.state.Version,
+			AlertRuleStateUpdatedVersion: evalContext.Rule.StateChanges,
+		}
+
+		err := bus.DispatchCtx(evalContext.Ctx, setPendingCmd)
+		if err == m.ErrAlertNotificationStateVersionConflict {
+			return nil
+		}
 
 		if err != nil {
-			n.log.Error("failed to send notification", "id", not.GetNotifierId())
+			return err
+		}
+
+		// We need to update state version to be able to log
+		// unexpected version conflicts when marking notifications as ok
+		notifierState.state.Version = setPendingCmd.ResultVersion
+	}
+
+	return n.sendAndMarkAsComplete(evalContext, notifierState)
+}
+
+func (n *notificationService) sendNotifications(evalContext *EvalContext, notifierStates notifierStateSlice) error {
+	for _, notifierState := range notifierStates {
+		err := n.sendNotification(evalContext, notifierState)
+		if err != nil {
+			n.log.Error("failed to send notification", "id", notifierState.notifier.GetNotifierId(), "error", err)
 		}
 	}
 
@@ -108,11 +124,12 @@ func (n *notificationService) uploadImage(context *EvalContext) (err error) {
 	}
 
 	renderOpts := rendering.Opts{
-		Width:   1000,
-		Height:  500,
-		Timeout: alertTimeout / 2,
-		OrgId:   context.Rule.OrgId,
-		OrgRole: m.ROLE_ADMIN,
+		Width:           1000,
+		Height:          500,
+		Timeout:         alertTimeout / 2,
+		OrgId:           context.Rule.OrgId,
+		OrgRole:         m.ROLE_ADMIN,
+		ConcurrentLimit: setting.AlertingRenderLimit,
 	}
 
 	ref, err := context.GetDashboardUID()
@@ -140,22 +157,38 @@ func (n *notificationService) uploadImage(context *EvalContext) (err error) {
 	return nil
 }
 
-func (n *notificationService) getNeededNotifiers(orgId int64, notificationIds []int64, evalContext *EvalContext) (NotifierSlice, error) {
+func (n *notificationService) getNeededNotifiers(orgId int64, notificationIds []int64, evalContext *EvalContext) (notifierStateSlice, error) {
 	query := &m.GetAlertNotificationsToSendQuery{OrgId: orgId, Ids: notificationIds}
 
 	if err := bus.Dispatch(query); err != nil {
 		return nil, err
 	}
 
-	var result []Notifier
+	var result notifierStateSlice
 	for _, notification := range query.Result {
 		not, err := n.createNotifierFor(notification)
 		if err != nil {
-			return nil, err
+			n.log.Error("Could not create notifier", "notifier", notification.Id, "error", err)
+			continue
+		}
+
+		query := &m.GetOrCreateNotificationStateQuery{
+			NotifierId: notification.Id,
+			AlertId:    evalContext.Rule.Id,
+			OrgId:      evalContext.Rule.OrgId,
+		}
+
+		err = bus.DispatchCtx(evalContext.Ctx, query)
+		if err != nil {
+			n.log.Error("Could not get notification state.", "notifier", notification.Id, "error", err)
+			continue
 		}
 
-		if not.ShouldNotify(evalContext.Ctx, evalContext) {
-			result = append(result, not)
+		if not.ShouldNotify(evalContext.Ctx, evalContext, query.Result) {
+			result = append(result, &notifierState{
+				notifier: not,
+				state:    query.Result,
+			})
 		}
 	}
 

+ 1 - 1
pkg/services/alerting/notifiers/alertmanager.go

@@ -46,7 +46,7 @@ type AlertmanagerNotifier struct {
 	log log.Logger
 }
 
-func (this *AlertmanagerNotifier) ShouldNotify(ctx context.Context, evalContext *alerting.EvalContext) bool {
+func (this *AlertmanagerNotifier) ShouldNotify(ctx context.Context, evalContext *alerting.EvalContext, notificationState *m.AlertNotificationState) bool {
 	this.log.Debug("Should notify", "ruleId", evalContext.Rule.Id, "state", evalContext.Rule.State, "previousState", evalContext.PrevAlertState)
 
 	// Do not notify when we become OK for the first time.

+ 28 - 33
pkg/services/alerting/notifiers/base.go

@@ -4,13 +4,16 @@ import (
 	"context"
 	"time"
 
-	"github.com/grafana/grafana/pkg/bus"
 	"github.com/grafana/grafana/pkg/log"
 	"github.com/grafana/grafana/pkg/models"
 
 	"github.com/grafana/grafana/pkg/services/alerting"
 )
 
+const (
+	triggMetrString = "Triggered metrics:\n\n"
+)
+
 type NotifierBase struct {
 	Name         string
 	Type         string
@@ -42,53 +45,45 @@ func NewNotifierBase(model *models.AlertNotification) NotifierBase {
 	}
 }
 
-func defaultShouldNotify(context *alerting.EvalContext, sendReminder bool, frequency time.Duration, lastNotify time.Time) bool {
+// ShouldNotify checks this evaluation should send an alert notification
+func (n *NotifierBase) ShouldNotify(ctx context.Context, context *alerting.EvalContext, notiferState *models.AlertNotificationState) bool {
 	// Only notify on state change.
-	if context.PrevAlertState == context.Rule.State && !sendReminder {
-		return false
-	}
-
-	// Do not notify if interval has not elapsed
-	if sendReminder && !lastNotify.IsZero() && lastNotify.Add(frequency).After(time.Now()) {
+	if context.PrevAlertState == context.Rule.State && !n.SendReminder {
 		return false
 	}
 
-	// Do not notify if alert state if OK or pending even on repeated notify
-	if sendReminder && (context.Rule.State == models.AlertStateOK || context.Rule.State == models.AlertStatePending) {
-		return false
+	if context.PrevAlertState == context.Rule.State && n.SendReminder {
+		// Do not notify if interval has not elapsed
+		lastNotify := time.Unix(notiferState.UpdatedAt, 0)
+		if notiferState.UpdatedAt != 0 && lastNotify.Add(n.Frequency).After(time.Now()) {
+			return false
+		}
+
+		// Do not notify if alert state is OK or pending even on repeated notify
+		if context.Rule.State == models.AlertStateOK || context.Rule.State == models.AlertStatePending {
+			return false
+		}
 	}
 
 	// Do not notify when we become OK for the first time.
-	if (context.PrevAlertState == models.AlertStatePending) && (context.Rule.State == models.AlertStateOK) {
+	if context.PrevAlertState == models.AlertStatePending && context.Rule.State == models.AlertStateOK {
 		return false
 	}
 
-	return true
-}
-
-// ShouldNotify checks this evaluation should send an alert notification
-func (n *NotifierBase) ShouldNotify(ctx context.Context, c *alerting.EvalContext) bool {
-	cmd := &models.GetLatestNotificationQuery{
-		OrgId:      c.Rule.OrgId,
-		AlertId:    c.Rule.Id,
-		NotifierId: n.Id,
-	}
-
-	err := bus.DispatchCtx(ctx, cmd)
-	if err == models.ErrJournalingNotFound {
-		return true
-	}
-
-	if err != nil {
-		n.log.Error("Could not determine last time alert notifier fired", "Alert name", c.Rule.Name, "Error", err)
+	// Do not notify when we OK -> Pending
+	if context.PrevAlertState == models.AlertStateOK && context.Rule.State == models.AlertStatePending {
 		return false
 	}
 
-	if !cmd.Result.Success {
-		return true
+	// Do not notifu if state pending and it have been updated last minute
+	if notiferState.State == models.AlertNotificationStatePending {
+		lastUpdated := time.Unix(notiferState.UpdatedAt, 0)
+		if lastUpdated.Add(1 * time.Minute).After(time.Now()) {
+			return false
+		}
 	}
 
-	return defaultShouldNotify(c, n.SendReminder, n.Frequency, time.Unix(cmd.Result.SentAt, 0))
+	return true
 }
 
 func (n *NotifierBase) GetType() string {

+ 101 - 60
pkg/services/alerting/notifiers/base_test.go

@@ -2,12 +2,9 @@ package notifiers
 
 import (
 	"context"
-	"errors"
 	"testing"
 	"time"
 
-	"github.com/grafana/grafana/pkg/bus"
-
 	"github.com/grafana/grafana/pkg/components/simplejson"
 	m "github.com/grafana/grafana/pkg/models"
 	"github.com/grafana/grafana/pkg/services/alerting"
@@ -15,98 +12,142 @@ import (
 )
 
 func TestShouldSendAlertNotification(t *testing.T) {
+	tnow := time.Now()
+
 	tcs := []struct {
 		name         string
 		prevState    m.AlertStateType
 		newState     m.AlertStateType
-		expected     bool
 		sendReminder bool
+		frequency    time.Duration
+		state        *m.AlertNotificationState
+
+		expect bool
 	}{
 		{
-			name:      "pending -> ok should not trigger an notification",
-			newState:  m.AlertStatePending,
-			prevState: m.AlertStateOK,
-			expected:  false,
+			name:         "pending -> ok should not trigger an notification",
+			newState:     m.AlertStateOK,
+			prevState:    m.AlertStatePending,
+			sendReminder: false,
+			state:        &m.AlertNotificationState{},
+
+			expect: false,
 		},
 		{
-			name:      "ok -> alerting should trigger an notification",
-			newState:  m.AlertStateOK,
-			prevState: m.AlertStateAlerting,
-			expected:  true,
+			name:         "ok -> alerting should trigger an notification",
+			newState:     m.AlertStateAlerting,
+			prevState:    m.AlertStateOK,
+			sendReminder: false,
+			state:        &m.AlertNotificationState{},
+
+			expect: true,
 		},
 		{
-			name:      "ok -> pending should not trigger an notification",
-			newState:  m.AlertStateOK,
-			prevState: m.AlertStatePending,
-			expected:  false,
+			name:         "ok -> pending should not trigger an notification",
+			newState:     m.AlertStatePending,
+			prevState:    m.AlertStateOK,
+			sendReminder: false,
+			state:        &m.AlertNotificationState{},
+
+			expect: false,
 		},
 		{
 			name:         "ok -> ok should not trigger an notification",
 			newState:     m.AlertStateOK,
 			prevState:    m.AlertStateOK,
-			expected:     false,
 			sendReminder: false,
+			state:        &m.AlertNotificationState{},
+
+			expect: false,
 		},
 		{
-			name:         "ok -> alerting should not trigger an notification",
+			name:         "ok -> ok with reminder should not trigger an notification",
 			newState:     m.AlertStateOK,
-			prevState:    m.AlertStateAlerting,
-			expected:     true,
+			prevState:    m.AlertStateOK,
 			sendReminder: true,
+			state:        &m.AlertNotificationState{},
+
+			expect: false,
 		},
 		{
-			name:         "ok -> ok with reminder should not trigger an notification",
+			name:         "alerting -> ok should trigger an notification",
 			newState:     m.AlertStateOK,
-			prevState:    m.AlertStateOK,
-			expected:     false,
+			prevState:    m.AlertStateAlerting,
+			sendReminder: false,
+			state:        &m.AlertNotificationState{},
+
+			expect: true,
+		},
+		{
+			name:         "alerting -> ok should trigger an notification when reminders enabled",
+			newState:     m.AlertStateOK,
+			prevState:    m.AlertStateAlerting,
+			frequency:    time.Minute * 10,
 			sendReminder: true,
+			state:        &m.AlertNotificationState{UpdatedAt: tnow.Add(-time.Minute).Unix()},
+
+			expect: true,
 		},
-	}
+		{
+			name:         "alerting -> alerting with reminder and no state should trigger",
+			newState:     m.AlertStateAlerting,
+			prevState:    m.AlertStateAlerting,
+			frequency:    time.Minute * 10,
+			sendReminder: true,
+			state:        &m.AlertNotificationState{},
 
-	for _, tc := range tcs {
-		evalContext := alerting.NewEvalContext(context.TODO(), &alerting.Rule{
-			State: tc.newState,
-		})
+			expect: true,
+		},
+		{
+			name:         "alerting -> alerting with reminder and last notification sent 1 minute ago should not trigger",
+			newState:     m.AlertStateAlerting,
+			prevState:    m.AlertStateAlerting,
+			frequency:    time.Minute * 10,
+			sendReminder: true,
+			state:        &m.AlertNotificationState{UpdatedAt: tnow.Add(-time.Minute).Unix()},
 
-		evalContext.Rule.State = tc.prevState
-		if defaultShouldNotify(evalContext, true, 0, time.Now()) != tc.expected {
-			t.Errorf("failed %s. expected %+v to return %v", tc.name, tc, tc.expected)
-		}
-	}
-}
+			expect: false,
+		},
+		{
+			name:         "alerting -> alerting with reminder and last notifciation sent 11 minutes ago should trigger",
+			newState:     m.AlertStateAlerting,
+			prevState:    m.AlertStateAlerting,
+			frequency:    time.Minute * 10,
+			sendReminder: true,
+			state:        &m.AlertNotificationState{UpdatedAt: tnow.Add(-11 * time.Minute).Unix()},
 
-func TestShouldNotifyWhenNoJournalingIsFound(t *testing.T) {
-	Convey("base notifier", t, func() {
-		bus.ClearBusHandlers()
+			expect: true,
+		},
+		{
+			name:      "OK -> alerting with notifciation state pending and updated 30 seconds ago should not trigger",
+			newState:  m.AlertStateAlerting,
+			prevState: m.AlertStateOK,
+			state:     &m.AlertNotificationState{State: m.AlertNotificationStatePending, UpdatedAt: tnow.Add(-30 * time.Second).Unix()},
 
-		notifier := NewNotifierBase(&m.AlertNotification{
-			Id:       1,
-			Name:     "name",
-			Type:     "email",
-			Settings: simplejson.New(),
-		})
-		evalContext := alerting.NewEvalContext(context.TODO(), &alerting.Rule{})
+			expect: false,
+		},
+		{
+			name:      "OK -> alerting with notifciation state pending and updated 2 minutes ago should trigger",
+			newState:  m.AlertStateAlerting,
+			prevState: m.AlertStateOK,
+			state:     &m.AlertNotificationState{State: m.AlertNotificationStatePending, UpdatedAt: tnow.Add(-2 * time.Minute).Unix()},
 
-		Convey("should notify if no journaling is found", func() {
-			bus.AddHandlerCtx("", func(ctx context.Context, q *m.GetLatestNotificationQuery) error {
-				return m.ErrJournalingNotFound
-			})
+			expect: true,
+		},
+	}
 
-			if !notifier.ShouldNotify(context.Background(), evalContext) {
-				t.Errorf("should send notifications when ErrJournalingNotFound is returned")
-			}
+	for _, tc := range tcs {
+		evalContext := alerting.NewEvalContext(context.TODO(), &alerting.Rule{
+			State: tc.prevState,
 		})
 
-		Convey("should not notify query returns error", func() {
-			bus.AddHandlerCtx("", func(ctx context.Context, q *m.GetLatestNotificationQuery) error {
-				return errors.New("some kind of error unknown error")
-			})
+		evalContext.Rule.State = tc.newState
+		nb := &NotifierBase{SendReminder: tc.sendReminder, Frequency: tc.frequency}
 
-			if notifier.ShouldNotify(context.Background(), evalContext) {
-				t.Errorf("should not send notifications when query returns error")
-			}
-		})
-	})
+		if nb.ShouldNotify(evalContext.Ctx, evalContext, tc.state) != tc.expect {
+			t.Errorf("failed test %s.\n expected \n%+v \nto return: %v", tc.name, tc, tc.expect)
+		}
+	}
 }
 
 func TestBaseNotifier(t *testing.T) {

+ 1 - 1
pkg/services/alerting/notifiers/kafka.go

@@ -61,7 +61,7 @@ func (this *KafkaNotifier) Notify(evalContext *alerting.EvalContext) error {
 
 	state := evalContext.Rule.State
 
-	customData := "Triggered metrics:\n\n"
+	customData := triggMetrString
 	for _, evt := range evalContext.EvalMatches {
 		customData = customData + fmt.Sprintf("%s: %v\n", evt.Metric, evt.Value)
 	}

+ 1 - 1
pkg/services/alerting/notifiers/opsgenie.go

@@ -95,7 +95,7 @@ func (this *OpsGenieNotifier) createAlert(evalContext *alerting.EvalContext) err
 		return err
 	}
 
-	customData := "Triggered metrics:\n\n"
+	customData := triggMetrString
 	for _, evt := range evalContext.EvalMatches {
 		customData = customData + fmt.Sprintf("%s: %v\n", evt.Metric, evt.Value)
 	}

+ 1 - 1
pkg/services/alerting/notifiers/pagerduty.go

@@ -76,7 +76,7 @@ func (this *PagerdutyNotifier) Notify(evalContext *alerting.EvalContext) error {
 	if evalContext.Rule.State == m.AlertStateOK {
 		eventType = "resolve"
 	}
-	customData := "Triggered metrics:\n\n"
+	customData := triggMetrString
 	for _, evt := range evalContext.EvalMatches {
 		customData = customData + fmt.Sprintf("%s: %v\n", evt.Metric, evt.Value)
 	}

+ 6 - 13
pkg/services/alerting/result_handler.go

@@ -67,6 +67,12 @@ func (handler *DefaultResultHandler) Handle(evalContext *EvalContext) error {
 			}
 
 			handler.log.Error("Failed to save state", "error", err)
+		} else {
+
+			// StateChanges is used for de duping alert notifications
+			// when two servers are raising. This makes sure that the server
+			// with the last state change always sends a notification.
+			evalContext.Rule.StateChanges = cmd.Result.StateChanges
 		}
 
 		// save annotation
@@ -88,19 +94,6 @@ func (handler *DefaultResultHandler) Handle(evalContext *EvalContext) error {
 		}
 	}
 
-	if evalContext.Rule.State == m.AlertStateOK && evalContext.PrevAlertState != m.AlertStateOK {
-		for _, notifierId := range evalContext.Rule.Notifications {
-			cmd := &m.CleanNotificationJournalCommand{
-				AlertId:    evalContext.Rule.Id,
-				NotifierId: notifierId,
-				OrgId:      evalContext.Rule.OrgId,
-			}
-			if err := bus.DispatchCtx(evalContext.Ctx, cmd); err != nil {
-				handler.log.Error("Failed to clean up old notification records", "notifier", notifierId, "alert", evalContext.Rule.Id, "Error", err)
-			}
-		}
-	}
 	handler.notifier.SendIfNeeded(evalContext)
-
 	return nil
 }

+ 3 - 0
pkg/services/alerting/rule.go

@@ -23,6 +23,8 @@ type Rule struct {
 	State               m.AlertStateType
 	Conditions          []Condition
 	Notifications       []int64
+
+	StateChanges int64
 }
 
 type ValidationError struct {
@@ -100,6 +102,7 @@ func NewRuleFromDBAlert(ruleDef *m.Alert) (*Rule, error) {
 	model.State = ruleDef.State
 	model.NoDataState = m.NoDataOption(ruleDef.Settings.Get("noDataState").MustString("no_data"))
 	model.ExecutionErrorState = m.ExecutionErrorOption(ruleDef.Settings.Get("executionErrorState").MustString("alerting"))
+	model.StateChanges = ruleDef.StateChanges
 
 	for _, v := range ruleDef.Settings.Get("notifications").MustArray() {
 		jsonModel := simplejson.NewFromAny(v)

+ 1 - 1
pkg/services/alerting/test_notification.go

@@ -39,7 +39,7 @@ func handleNotificationTestCommand(cmd *NotificationTestCommand) error {
 		return err
 	}
 
-	return notifier.sendNotifications(createTestEvalContext(cmd), []Notifier{notifiers})
+	return notifier.sendNotifications(createTestEvalContext(cmd), notifierStateSlice{{notifier: notifiers}})
 }
 
 func createTestEvalContext(cmd *NotificationTestCommand) *EvalContext {

+ 0 - 4
pkg/services/alerting/ticker.go

@@ -37,10 +37,6 @@ func NewTicker(last time.Time, initialOffset time.Duration, c clock.Clock) *Tick
 	return t
 }
 
-func (t *Ticker) updateOffset(offset time.Duration) {
-	t.newOffset <- offset
-}
-
 func (t *Ticker) run() {
 	for {
 		next := t.last.Add(time.Duration(1) * time.Second)

+ 0 - 6
pkg/services/notifications/notifications_test.go

@@ -9,12 +9,6 @@ import (
 	. "github.com/smartystreets/goconvey/convey"
 )
 
-type testTriggeredAlert struct {
-	ActualValue float64
-	Name        string
-	State       string
-}
-
 func TestNotifications(t *testing.T) {
 
 	Convey("Given the notifications service", t, func() {

+ 1 - 1
pkg/services/provisioning/dashboards/config_reader.go

@@ -83,7 +83,7 @@ func (cr *configReader) readConfig() ([]*DashboardsAsConfig, error) {
 		}
 
 		if dashboards[i].UpdateIntervalSeconds == 0 {
-			dashboards[i].UpdateIntervalSeconds = 3
+			dashboards[i].UpdateIntervalSeconds = 10
 		}
 	}
 

+ 2 - 2
pkg/services/provisioning/dashboards/config_reader_test.go

@@ -70,7 +70,7 @@ func validateDashboardAsConfig(t *testing.T, cfg []*DashboardsAsConfig) {
 	So(len(ds.Options), ShouldEqual, 1)
 	So(ds.Options["path"], ShouldEqual, "/var/lib/grafana/dashboards")
 	So(ds.DisableDeletion, ShouldBeTrue)
-	So(ds.UpdateIntervalSeconds, ShouldEqual, 10)
+	So(ds.UpdateIntervalSeconds, ShouldEqual, 15)
 
 	ds2 := cfg[1]
 	So(ds2.Name, ShouldEqual, "default")
@@ -81,5 +81,5 @@ func validateDashboardAsConfig(t *testing.T, cfg []*DashboardsAsConfig) {
 	So(len(ds2.Options), ShouldEqual, 1)
 	So(ds2.Options["path"], ShouldEqual, "/var/lib/grafana/dashboards")
 	So(ds2.DisableDeletion, ShouldBeFalse)
-	So(ds2.UpdateIntervalSeconds, ShouldEqual, 3)
+	So(ds2.UpdateIntervalSeconds, ShouldEqual, 10)
 }

+ 26 - 22
pkg/services/provisioning/dashboards/file_reader.go

@@ -43,26 +43,6 @@ func NewDashboardFileReader(cfg *DashboardsAsConfig, log log.Logger) (*fileReade
 		log.Warn("[Deprecated] The folder property is deprecated. Please use path instead.")
 	}
 
-	if _, err := os.Stat(path); os.IsNotExist(err) {
-		log.Error("Cannot read directory", "error", err)
-	}
-
-	copy := path
-	path, err := filepath.Abs(path)
-	if err != nil {
-		log.Error("Could not create absolute path ", "path", path)
-	}
-
-	path, err = filepath.EvalSymlinks(path)
-	if err != nil {
-		log.Error("Failed to read content of symlinked path: %s", path)
-	}
-
-	if path == "" {
-		path = copy
-		log.Info("falling back to original path due to EvalSymlink/Abs failure")
-	}
-
 	return &fileReader{
 		Cfg:              cfg,
 		Path:             path,
@@ -99,7 +79,8 @@ func (fr *fileReader) ReadAndListen(ctx context.Context) error {
 }
 
 func (fr *fileReader) startWalkingDisk() error {
-	if _, err := os.Stat(fr.Path); err != nil {
+	resolvedPath := fr.resolvePath(fr.Path)
+	if _, err := os.Stat(resolvedPath); err != nil {
 		if os.IsNotExist(err) {
 			return err
 		}
@@ -116,7 +97,7 @@ func (fr *fileReader) startWalkingDisk() error {
 	}
 
 	filesFoundOnDisk := map[string]os.FileInfo{}
-	err = filepath.Walk(fr.Path, createWalkFn(filesFoundOnDisk))
+	err = filepath.Walk(resolvedPath, createWalkFn(filesFoundOnDisk))
 	if err != nil {
 		return err
 	}
@@ -344,6 +325,29 @@ func (fr *fileReader) readDashboardFromFile(path string, lastModified time.Time,
 	}, nil
 }
 
+func (fr *fileReader) resolvePath(path string) string {
+	if _, err := os.Stat(path); os.IsNotExist(err) {
+		fr.log.Error("Cannot read directory", "error", err)
+	}
+
+	copy := path
+	path, err := filepath.Abs(path)
+	if err != nil {
+		fr.log.Error("Could not create absolute path ", "path", path)
+	}
+
+	path, err = filepath.EvalSymlinks(path)
+	if err != nil {
+		fr.log.Error("Failed to read content of symlinked path: %s", path)
+	}
+
+	if path == "" {
+		path = copy
+		fr.log.Info("falling back to original path due to EvalSymlink/Abs failure")
+	}
+	return path
+}
+
 type provisioningMetadata struct {
 	uid   string
 	title string

+ 4 - 3
pkg/services/provisioning/dashboards/file_reader_linux_test.go

@@ -30,10 +30,11 @@ func TestProvsionedSymlinkedFolder(t *testing.T) {
 	want, err := filepath.Abs(containingId)
 
 	if err != nil {
-		t.Errorf("expected err to be nill")
+		t.Errorf("expected err to be nil")
 	}
 
-	if reader.Path != want {
-		t.Errorf("got %s want %s", reader.Path, want)
+	resolvedPath := reader.resolvePath(reader.Path)
+	if resolvedPath != want {
+		t.Errorf("got %s want %s", resolvedPath, want)
 	}
 }

+ 2 - 1
pkg/services/provisioning/dashboards/file_reader_test.go

@@ -67,7 +67,8 @@ func TestCreatingNewDashboardFileReader(t *testing.T) {
 			reader, err := NewDashboardFileReader(cfg, log.New("test-logger"))
 			So(err, ShouldBeNil)
 
-			So(filepath.IsAbs(reader.Path), ShouldBeTrue)
+			resolvedPath := reader.resolvePath(reader.Path)
+			So(filepath.IsAbs(resolvedPath), ShouldBeTrue)
 		})
 	})
 }

+ 1 - 1
pkg/services/provisioning/dashboards/testdata/test-configs/dashboards-from-disk/dev-dashboards.yaml

@@ -6,7 +6,7 @@ providers:
   folder: 'developers'
   editable: true
   disableDeletion: true
-  updateIntervalSeconds: 10
+  updateIntervalSeconds: 15
   type: file
   options:
     path: /var/lib/grafana/dashboards

+ 1 - 1
pkg/services/provisioning/dashboards/testdata/test-configs/version-0/version-0.yaml

@@ -3,7 +3,7 @@
   folder: 'developers'
   editable: true
   disableDeletion: true
-  updateIntervalSeconds: 10
+  updateIntervalSeconds: 15
   type: file
   options:
     path: /var/lib/grafana/dashboards

+ 10 - 9
pkg/services/rendering/interface.go

@@ -13,15 +13,16 @@ var ErrNoRenderer = errors.New("No renderer plugin found nor is an external rend
 var ErrPhantomJSNotInstalled = errors.New("PhantomJS executable not found")
 
 type Opts struct {
-	Width    int
-	Height   int
-	Timeout  time.Duration
-	OrgId    int64
-	UserId   int64
-	OrgRole  models.RoleType
-	Path     string
-	Encoding string
-	Timezone string
+	Width           int
+	Height          int
+	Timeout         time.Duration
+	OrgId           int64
+	UserId          int64
+	OrgRole         models.RoleType
+	Path            string
+	Encoding        string
+	Timezone        string
+	ConcurrentLimit int
 }
 
 type RenderResult struct {

+ 19 - 6
pkg/services/rendering/rendering.go

@@ -24,12 +24,13 @@ func init() {
 }
 
 type RenderingService struct {
-	log          log.Logger
-	pluginClient *plugin.Client
-	grpcPlugin   pluginModel.RendererPlugin
-	pluginInfo   *plugins.RendererPlugin
-	renderAction renderFunc
-	domain       string
+	log             log.Logger
+	pluginClient    *plugin.Client
+	grpcPlugin      pluginModel.RendererPlugin
+	pluginInfo      *plugins.RendererPlugin
+	renderAction    renderFunc
+	domain          string
+	inProgressCount int
 
 	Cfg *setting.Cfg `inject:""`
 }
@@ -90,6 +91,18 @@ func (rs *RenderingService) Run(ctx context.Context) error {
 }
 
 func (rs *RenderingService) Render(ctx context.Context, opts Opts) (*RenderResult, error) {
+	if rs.inProgressCount > opts.ConcurrentLimit {
+		return &RenderResult{
+			FilePath: filepath.Join(setting.HomePath, "public/img/rendering_limit.png"),
+		}, nil
+	}
+
+	defer func() {
+		rs.inProgressCount -= 1
+	}()
+
+	rs.inProgressCount += 1
+
 	if rs.renderAction != nil {
 		return rs.renderAction(ctx, opts)
 	} else {

+ 6 - 0
pkg/services/sqlstore/alert.go

@@ -60,6 +60,10 @@ func deleteAlertByIdInternal(alertId int64, reason string, sess *DBSession) erro
 		return err
 	}
 
+	if _, err := sess.Exec("DELETE FROM alert_notification_state WHERE alert_id = ?", alertId); err != nil {
+		return err
+	}
+
 	return nil
 }
 
@@ -275,6 +279,8 @@ func SetAlertState(cmd *m.SetAlertStateCommand) error {
 		}
 
 		sess.ID(alert.Id).Update(&alert)
+
+		cmd.Result = alert
 		return nil
 	})
 }

+ 113 - 28
pkg/services/sqlstore/alert_notification.go

@@ -3,6 +3,7 @@ package sqlstore
 import (
 	"bytes"
 	"context"
+	"errors"
 	"fmt"
 	"strings"
 	"time"
@@ -18,16 +19,23 @@ func init() {
 	bus.AddHandler("sql", DeleteAlertNotification)
 	bus.AddHandler("sql", GetAlertNotificationsToSend)
 	bus.AddHandler("sql", GetAllAlertNotifications)
-	bus.AddHandlerCtx("sql", RecordNotificationJournal)
-	bus.AddHandlerCtx("sql", GetLatestNotification)
-	bus.AddHandlerCtx("sql", CleanNotificationJournal)
+	bus.AddHandlerCtx("sql", GetOrCreateAlertNotificationState)
+	bus.AddHandlerCtx("sql", SetAlertNotificationStateToCompleteCommand)
+	bus.AddHandlerCtx("sql", SetAlertNotificationStateToPendingCommand)
 }
 
 func DeleteAlertNotification(cmd *m.DeleteAlertNotificationCommand) error {
 	return inTransaction(func(sess *DBSession) error {
 		sql := "DELETE FROM alert_notification WHERE alert_notification.org_id = ? AND alert_notification.id = ?"
-		_, err := sess.Exec(sql, cmd.OrgId, cmd.Id)
-		return err
+		if _, err := sess.Exec(sql, cmd.OrgId, cmd.Id); err != nil {
+			return err
+		}
+
+		if _, err := sess.Exec("DELETE FROM alert_notification_state WHERE alert_notification_state.org_id = ? AND alert_notification_state.notifier_id = ?", cmd.OrgId, cmd.Id); err != nil {
+			return err
+		}
+
+		return nil
 	})
 }
 
@@ -229,46 +237,123 @@ func UpdateAlertNotification(cmd *m.UpdateAlertNotificationCommand) error {
 	})
 }
 
-func RecordNotificationJournal(ctx context.Context, cmd *m.RecordNotificationJournalCommand) error {
+func SetAlertNotificationStateToCompleteCommand(ctx context.Context, cmd *m.SetAlertNotificationStateToCompleteCommand) error {
 	return inTransactionCtx(ctx, func(sess *DBSession) error {
-		journalEntry := &m.AlertNotificationJournal{
-			OrgId:      cmd.OrgId,
-			AlertId:    cmd.AlertId,
-			NotifierId: cmd.NotifierId,
-			SentAt:     cmd.SentAt,
-			Success:    cmd.Success,
+		version := cmd.Version
+		var current m.AlertNotificationState
+		sess.ID(cmd.Id).Get(&current)
+
+		newVersion := cmd.Version + 1
+
+		sql := `UPDATE alert_notification_state SET
+			state = ?,
+			version = ?,
+			updated_at = ?
+		WHERE
+			id = ?`
+
+		_, err := sess.Exec(sql, m.AlertNotificationStateCompleted, newVersion, timeNow().Unix(), cmd.Id)
+
+		if err != nil {
+			return err
 		}
 
-		_, err := sess.Insert(journalEntry)
-		return err
+		if current.Version != version {
+			sqlog.Error("notification state out of sync. the notification is marked as complete but has been modified between set as pending and completion.", "notifierId", current.NotifierId)
+		}
+
+		return nil
 	})
 }
 
-func GetLatestNotification(ctx context.Context, cmd *m.GetLatestNotificationQuery) error {
-	return inTransactionCtx(ctx, func(sess *DBSession) error {
-		nj := &m.AlertNotificationJournal{}
-
-		_, err := sess.Desc("alert_notification_journal.sent_at").
-			Limit(1).
-			Where("alert_notification_journal.org_id = ? AND alert_notification_journal.alert_id = ? AND alert_notification_journal.notifier_id = ?", cmd.OrgId, cmd.AlertId, cmd.NotifierId).Get(nj)
+func SetAlertNotificationStateToPendingCommand(ctx context.Context, cmd *m.SetAlertNotificationStateToPendingCommand) error {
+	return withDbSession(ctx, func(sess *DBSession) error {
+		newVersion := cmd.Version + 1
+		sql := `UPDATE alert_notification_state SET
+			state = ?,
+			version = ?,
+			updated_at = ?,
+			alert_rule_state_updated_version = ?
+		WHERE
+			id = ? AND
+			(version = ? OR alert_rule_state_updated_version < ?)`
+
+		res, err := sess.Exec(sql,
+			m.AlertNotificationStatePending,
+			newVersion,
+			timeNow().Unix(),
+			cmd.AlertRuleStateUpdatedVersion,
+			cmd.Id,
+			cmd.Version,
+			cmd.AlertRuleStateUpdatedVersion)
 
 		if err != nil {
 			return err
 		}
 
-		if nj.AlertId == 0 && nj.Id == 0 && nj.NotifierId == 0 && nj.OrgId == 0 {
-			return m.ErrJournalingNotFound
+		affected, _ := res.RowsAffected()
+		if affected == 0 {
+			return m.ErrAlertNotificationStateVersionConflict
 		}
 
-		cmd.Result = nj
+		cmd.ResultVersion = newVersion
+
 		return nil
 	})
 }
 
-func CleanNotificationJournal(ctx context.Context, cmd *m.CleanNotificationJournalCommand) error {
+func GetOrCreateAlertNotificationState(ctx context.Context, cmd *m.GetOrCreateNotificationStateQuery) error {
 	return inTransactionCtx(ctx, func(sess *DBSession) error {
-		sql := "DELETE FROM alert_notification_journal WHERE alert_notification_journal.org_id = ? AND alert_notification_journal.alert_id = ? AND alert_notification_journal.notifier_id = ?"
-		_, err := sess.Exec(sql, cmd.OrgId, cmd.AlertId, cmd.NotifierId)
-		return err
+		nj := &m.AlertNotificationState{}
+
+		exist, err := getAlertNotificationState(sess, cmd, nj)
+
+		// if exists, return it, otherwise create it with default values
+		if err != nil {
+			return err
+		}
+
+		if exist {
+			cmd.Result = nj
+			return nil
+		}
+
+		notificationState := &m.AlertNotificationState{
+			OrgId:      cmd.OrgId,
+			AlertId:    cmd.AlertId,
+			NotifierId: cmd.NotifierId,
+			State:      m.AlertNotificationStateUnknown,
+			UpdatedAt:  timeNow().Unix(),
+		}
+
+		if _, err := sess.Insert(notificationState); err != nil {
+			if dialect.IsUniqueConstraintViolation(err) {
+				exist, err = getAlertNotificationState(sess, cmd, nj)
+
+				if err != nil {
+					return err
+				}
+
+				if !exist {
+					return errors.New("Should not happen")
+				}
+
+				cmd.Result = nj
+				return nil
+			}
+
+			return err
+		}
+
+		cmd.Result = notificationState
+		return nil
 	})
 }
+
+func getAlertNotificationState(sess *DBSession, cmd *m.GetOrCreateNotificationStateQuery, nj *m.AlertNotificationState) (bool, error) {
+	return sess.
+		Where("alert_notification_state.org_id = ?", cmd.OrgId).
+		Where("alert_notification_state.alert_id = ?", cmd.AlertId).
+		Where("alert_notification_state.notifier_id = ?", cmd.NotifierId).
+		Get(nj)
+}

+ 129 - 46
pkg/services/sqlstore/alert_notification_test.go

@@ -6,7 +6,7 @@ import (
 	"time"
 
 	"github.com/grafana/grafana/pkg/components/simplejson"
-	m "github.com/grafana/grafana/pkg/models"
+	"github.com/grafana/grafana/pkg/models"
 	. "github.com/smartystreets/goconvey/convey"
 )
 
@@ -14,50 +14,133 @@ func TestAlertNotificationSQLAccess(t *testing.T) {
 	Convey("Testing Alert notification sql access", t, func() {
 		InitTestDB(t)
 
-		Convey("Alert notification journal", func() {
-			var alertId int64 = 5
-			var orgId int64 = 5
-			var notifierId int64 = 5
+		Convey("Alert notification state", func() {
+			var alertID int64 = 7
+			var orgID int64 = 5
+			var notifierID int64 = 10
+			oldTimeNow := timeNow
+			now := time.Date(2018, 9, 30, 0, 0, 0, 0, time.UTC)
+			timeNow = func() time.Time { return now }
+
+			Convey("Get no existing state should create a new state", func() {
+				query := &models.GetOrCreateNotificationStateQuery{AlertId: alertID, OrgId: orgID, NotifierId: notifierID}
+				err := GetOrCreateAlertNotificationState(context.Background(), query)
+				So(err, ShouldBeNil)
+				So(query.Result, ShouldNotBeNil)
+				So(query.Result.State, ShouldEqual, "unknown")
+				So(query.Result.Version, ShouldEqual, 0)
+				So(query.Result.UpdatedAt, ShouldEqual, now.Unix())
+
+				Convey("Get existing state should not create a new state", func() {
+					query2 := &models.GetOrCreateNotificationStateQuery{AlertId: alertID, OrgId: orgID, NotifierId: notifierID}
+					err := GetOrCreateAlertNotificationState(context.Background(), query2)
+					So(err, ShouldBeNil)
+					So(query2.Result, ShouldNotBeNil)
+					So(query2.Result.Id, ShouldEqual, query.Result.Id)
+					So(query2.Result.UpdatedAt, ShouldEqual, now.Unix())
+				})
 
-			Convey("Getting last journal should raise error if no one exists", func() {
-				query := &m.GetLatestNotificationQuery{AlertId: alertId, OrgId: orgId, NotifierId: notifierId}
-				err := GetLatestNotification(context.Background(), query)
-				So(err, ShouldEqual, m.ErrJournalingNotFound)
+				Convey("Update existing state to pending with correct version should update database", func() {
+					s := *query.Result
 
-				Convey("shoulbe be able to record two journaling events", func() {
-					createCmd := &m.RecordNotificationJournalCommand{AlertId: alertId, NotifierId: notifierId, OrgId: orgId, Success: true, SentAt: 1}
+					cmd := models.SetAlertNotificationStateToPendingCommand{
+						Id:                           s.Id,
+						Version:                      s.Version,
+						AlertRuleStateUpdatedVersion: s.AlertRuleStateUpdatedVersion,
+					}
 
-					err := RecordNotificationJournal(context.Background(), createCmd)
+					err := SetAlertNotificationStateToPendingCommand(context.Background(), &cmd)
 					So(err, ShouldBeNil)
+					So(cmd.ResultVersion, ShouldEqual, 1)
 
-					createCmd.SentAt += 1000 //increase epoch
-
-					err = RecordNotificationJournal(context.Background(), createCmd)
+					query2 := &models.GetOrCreateNotificationStateQuery{AlertId: alertID, OrgId: orgID, NotifierId: notifierID}
+					err = GetOrCreateAlertNotificationState(context.Background(), query2)
 					So(err, ShouldBeNil)
+					So(query2.Result.Version, ShouldEqual, 1)
+					So(query2.Result.State, ShouldEqual, models.AlertNotificationStatePending)
+					So(query2.Result.UpdatedAt, ShouldEqual, now.Unix())
+
+					Convey("Update existing state to completed should update database", func() {
+						s := *query.Result
+						setStateCmd := models.SetAlertNotificationStateToCompleteCommand{
+							Id:      s.Id,
+							Version: cmd.ResultVersion,
+						}
+						err := SetAlertNotificationStateToCompleteCommand(context.Background(), &setStateCmd)
+						So(err, ShouldBeNil)
+
+						query3 := &models.GetOrCreateNotificationStateQuery{AlertId: alertID, OrgId: orgID, NotifierId: notifierID}
+						err = GetOrCreateAlertNotificationState(context.Background(), query3)
+						So(err, ShouldBeNil)
+						So(query3.Result.Version, ShouldEqual, 2)
+						So(query3.Result.State, ShouldEqual, models.AlertNotificationStateCompleted)
+						So(query3.Result.UpdatedAt, ShouldEqual, now.Unix())
+					})
+
+					Convey("Update existing state to completed should update database. regardless of version", func() {
+						s := *query.Result
+						unknownVersion := int64(1000)
+						cmd := models.SetAlertNotificationStateToCompleteCommand{
+							Id:      s.Id,
+							Version: unknownVersion,
+						}
+						err := SetAlertNotificationStateToCompleteCommand(context.Background(), &cmd)
+						So(err, ShouldBeNil)
 
-					Convey("get last journaling event", func() {
-						err := GetLatestNotification(context.Background(), query)
+						query3 := &models.GetOrCreateNotificationStateQuery{AlertId: alertID, OrgId: orgID, NotifierId: notifierID}
+						err = GetOrCreateAlertNotificationState(context.Background(), query3)
 						So(err, ShouldBeNil)
-						So(query.Result.SentAt, ShouldEqual, 1001)
-
-						Convey("be able to clear all journaling for an notifier", func() {
-							cmd := &m.CleanNotificationJournalCommand{AlertId: alertId, NotifierId: notifierId, OrgId: orgId}
-							err := CleanNotificationJournal(context.Background(), cmd)
-							So(err, ShouldBeNil)
-
-							Convey("querying for last junaling should raise error", func() {
-								query := &m.GetLatestNotificationQuery{AlertId: alertId, OrgId: orgId, NotifierId: notifierId}
-								err := GetLatestNotification(context.Background(), query)
-								So(err, ShouldEqual, m.ErrJournalingNotFound)
-							})
-						})
+						So(query3.Result.Version, ShouldEqual, unknownVersion+1)
+						So(query3.Result.State, ShouldEqual, models.AlertNotificationStateCompleted)
+						So(query3.Result.UpdatedAt, ShouldEqual, now.Unix())
 					})
 				})
+
+				Convey("Update existing state to pending with incorrect version should return version mismatch error", func() {
+					s := *query.Result
+					s.Version = 1000
+					cmd := models.SetAlertNotificationStateToPendingCommand{
+						Id:                           s.NotifierId,
+						Version:                      s.Version,
+						AlertRuleStateUpdatedVersion: s.AlertRuleStateUpdatedVersion,
+					}
+					err := SetAlertNotificationStateToPendingCommand(context.Background(), &cmd)
+					So(err, ShouldEqual, models.ErrAlertNotificationStateVersionConflict)
+				})
+
+				Convey("Updating existing state to pending with incorrect version since alert rule state update version is higher", func() {
+					s := *query.Result
+					cmd := models.SetAlertNotificationStateToPendingCommand{
+						Id:                           s.Id,
+						Version:                      s.Version,
+						AlertRuleStateUpdatedVersion: 1000,
+					}
+					err := SetAlertNotificationStateToPendingCommand(context.Background(), &cmd)
+					So(err, ShouldBeNil)
+
+					So(cmd.ResultVersion, ShouldEqual, 1)
+				})
+
+				Convey("different version and same alert state change version should return error", func() {
+					s := *query.Result
+					s.Version = 1000
+					cmd := models.SetAlertNotificationStateToPendingCommand{
+						Id:                           s.Id,
+						Version:                      s.Version,
+						AlertRuleStateUpdatedVersion: s.AlertRuleStateUpdatedVersion,
+					}
+					err := SetAlertNotificationStateToPendingCommand(context.Background(), &cmd)
+					So(err, ShouldNotBeNil)
+				})
+			})
+
+			Reset(func() {
+				timeNow = oldTimeNow
 			})
 		})
 
 		Convey("Alert notifications should be empty", func() {
-			cmd := &m.GetAlertNotificationsQuery{
+			cmd := &models.GetAlertNotificationsQuery{
 				OrgId: 2,
 				Name:  "email",
 			}
@@ -68,7 +151,7 @@ func TestAlertNotificationSQLAccess(t *testing.T) {
 		})
 
 		Convey("Cannot save alert notifier with send reminder = true", func() {
-			cmd := &m.CreateAlertNotificationCommand{
+			cmd := &models.CreateAlertNotificationCommand{
 				Name:         "ops",
 				Type:         "email",
 				OrgId:        1,
@@ -78,7 +161,7 @@ func TestAlertNotificationSQLAccess(t *testing.T) {
 
 			Convey("and missing frequency", func() {
 				err := CreateAlertNotificationCommand(cmd)
-				So(err, ShouldEqual, m.ErrNotificationFrequencyNotFound)
+				So(err, ShouldEqual, models.ErrNotificationFrequencyNotFound)
 			})
 
 			Convey("invalid frequency", func() {
@@ -90,7 +173,7 @@ func TestAlertNotificationSQLAccess(t *testing.T) {
 		})
 
 		Convey("Cannot update alert notifier with send reminder = false", func() {
-			cmd := &m.CreateAlertNotificationCommand{
+			cmd := &models.CreateAlertNotificationCommand{
 				Name:         "ops update",
 				Type:         "email",
 				OrgId:        1,
@@ -101,14 +184,14 @@ func TestAlertNotificationSQLAccess(t *testing.T) {
 			err := CreateAlertNotificationCommand(cmd)
 			So(err, ShouldBeNil)
 
-			updateCmd := &m.UpdateAlertNotificationCommand{
+			updateCmd := &models.UpdateAlertNotificationCommand{
 				Id:           cmd.Result.Id,
 				SendReminder: true,
 			}
 
 			Convey("and missing frequency", func() {
 				err := UpdateAlertNotification(updateCmd)
-				So(err, ShouldEqual, m.ErrNotificationFrequencyNotFound)
+				So(err, ShouldEqual, models.ErrNotificationFrequencyNotFound)
 			})
 
 			Convey("invalid frequency", func() {
@@ -121,7 +204,7 @@ func TestAlertNotificationSQLAccess(t *testing.T) {
 		})
 
 		Convey("Can save Alert Notification", func() {
-			cmd := &m.CreateAlertNotificationCommand{
+			cmd := &models.CreateAlertNotificationCommand{
 				Name:         "ops",
 				Type:         "email",
 				OrgId:        1,
@@ -143,7 +226,7 @@ func TestAlertNotificationSQLAccess(t *testing.T) {
 			})
 
 			Convey("Can update alert notification", func() {
-				newCmd := &m.UpdateAlertNotificationCommand{
+				newCmd := &models.UpdateAlertNotificationCommand{
 					Name:         "NewName",
 					Type:         "webhook",
 					OrgId:        cmd.Result.OrgId,
@@ -159,7 +242,7 @@ func TestAlertNotificationSQLAccess(t *testing.T) {
 			})
 
 			Convey("Can update alert notification to disable sending of reminders", func() {
-				newCmd := &m.UpdateAlertNotificationCommand{
+				newCmd := &models.UpdateAlertNotificationCommand{
 					Name:         "NewName",
 					Type:         "webhook",
 					OrgId:        cmd.Result.OrgId,
@@ -174,12 +257,12 @@ func TestAlertNotificationSQLAccess(t *testing.T) {
 		})
 
 		Convey("Can search using an array of ids", func() {
-			cmd1 := m.CreateAlertNotificationCommand{Name: "nagios", Type: "webhook", OrgId: 1, SendReminder: true, Frequency: "10s", Settings: simplejson.New()}
-			cmd2 := m.CreateAlertNotificationCommand{Name: "slack", Type: "webhook", OrgId: 1, SendReminder: true, Frequency: "10s", Settings: simplejson.New()}
-			cmd3 := m.CreateAlertNotificationCommand{Name: "ops2", Type: "email", OrgId: 1, SendReminder: true, Frequency: "10s", Settings: simplejson.New()}
-			cmd4 := m.CreateAlertNotificationCommand{IsDefault: true, Name: "default", Type: "email", OrgId: 1, SendReminder: true, Frequency: "10s", Settings: simplejson.New()}
+			cmd1 := models.CreateAlertNotificationCommand{Name: "nagios", Type: "webhook", OrgId: 1, SendReminder: true, Frequency: "10s", Settings: simplejson.New()}
+			cmd2 := models.CreateAlertNotificationCommand{Name: "slack", Type: "webhook", OrgId: 1, SendReminder: true, Frequency: "10s", Settings: simplejson.New()}
+			cmd3 := models.CreateAlertNotificationCommand{Name: "ops2", Type: "email", OrgId: 1, SendReminder: true, Frequency: "10s", Settings: simplejson.New()}
+			cmd4 := models.CreateAlertNotificationCommand{IsDefault: true, Name: "default", Type: "email", OrgId: 1, SendReminder: true, Frequency: "10s", Settings: simplejson.New()}
 
-			otherOrg := m.CreateAlertNotificationCommand{Name: "default", Type: "email", OrgId: 2, SendReminder: true, Frequency: "10s", Settings: simplejson.New()}
+			otherOrg := models.CreateAlertNotificationCommand{Name: "default", Type: "email", OrgId: 2, SendReminder: true, Frequency: "10s", Settings: simplejson.New()}
 
 			So(CreateAlertNotificationCommand(&cmd1), ShouldBeNil)
 			So(CreateAlertNotificationCommand(&cmd2), ShouldBeNil)
@@ -188,7 +271,7 @@ func TestAlertNotificationSQLAccess(t *testing.T) {
 			So(CreateAlertNotificationCommand(&otherOrg), ShouldBeNil)
 
 			Convey("search", func() {
-				query := &m.GetAlertNotificationsToSendQuery{
+				query := &models.GetAlertNotificationsToSendQuery{
 					Ids:   []int64{cmd1.Result.Id, cmd2.Result.Id, 112341231},
 					OrgId: 1,
 				}
@@ -199,7 +282,7 @@ func TestAlertNotificationSQLAccess(t *testing.T) {
 			})
 
 			Convey("all", func() {
-				query := &m.GetAllAlertNotificationsQuery{
+				query := &models.GetAllAlertNotificationsQuery{
 					OrgId: 1,
 				}
 

+ 0 - 23
pkg/services/sqlstore/dashboard_service_integration_test.go

@@ -932,29 +932,6 @@ func TestIntegratedDashboardService(t *testing.T) {
 	})
 }
 
-type scenarioContext struct {
-	dashboardGuardianMock *guardian.FakeDashboardGuardian
-}
-
-type scenarioFunc func(c *scenarioContext)
-
-func dashboardGuardianScenario(desc string, mock *guardian.FakeDashboardGuardian, fn scenarioFunc) {
-	Convey(desc, func() {
-		origNewDashboardGuardian := guardian.New
-		guardian.MockDashboardGuardian(mock)
-
-		sc := &scenarioContext{
-			dashboardGuardianMock: mock,
-		}
-
-		defer func() {
-			guardian.New = origNewDashboardGuardian
-		}()
-
-		fn(sc)
-	})
-}
-
 type dashboardPermissionScenarioContext struct {
 	dashboardGuardianMock *guardian.FakeDashboardGuardian
 }

+ 23 - 0
pkg/services/sqlstore/migrations/alert_mig.go

@@ -107,4 +107,27 @@ func addAlertMigrations(mg *Migrator) {
 
 	mg.AddMigration("create notification_journal table v1", NewAddTableMigration(notification_journal))
 	mg.AddMigration("add index notification_journal org_id & alert_id & notifier_id", NewAddIndexMigration(notification_journal, notification_journal.Indices[0]))
+
+	mg.AddMigration("drop alert_notification_journal", NewDropTableMigration("alert_notification_journal"))
+
+	alert_notification_state := Table{
+		Name: "alert_notification_state",
+		Columns: []*Column{
+			{Name: "id", Type: DB_BigInt, IsPrimaryKey: true, IsAutoIncrement: true},
+			{Name: "org_id", Type: DB_BigInt, Nullable: false},
+			{Name: "alert_id", Type: DB_BigInt, Nullable: false},
+			{Name: "notifier_id", Type: DB_BigInt, Nullable: false},
+			{Name: "state", Type: DB_NVarchar, Length: 50, Nullable: false},
+			{Name: "version", Type: DB_BigInt, Nullable: false},
+			{Name: "updated_at", Type: DB_BigInt, Nullable: false},
+			{Name: "alert_rule_state_updated_version", Type: DB_BigInt, Nullable: false},
+		},
+		Indices: []*Index{
+			{Cols: []string{"org_id", "alert_id", "notifier_id"}, Type: UniqueIndex},
+		},
+	}
+
+	mg.AddMigration("create alert_notification_state table v1", NewAddTableMigration(alert_notification_state))
+	mg.AddMigration("add index alert_notification_state org_id & alert_id & notifier_id",
+		NewAddIndexMigration(alert_notification_state, alert_notification_state.Indices[0]))
 }

+ 2 - 0
pkg/services/sqlstore/migrator/dialect.go

@@ -44,6 +44,8 @@ type Dialect interface {
 
 	CleanDB() error
 	NoOpSql() string
+
+	IsUniqueConstraintViolation(err error) bool
 }
 
 func NewDialect(engine *xorm.Engine) Dialect {

+ 12 - 0
pkg/services/sqlstore/migrator/mysql_dialect.go

@@ -5,6 +5,8 @@ import (
 	"strconv"
 	"strings"
 
+	"github.com/VividCortex/mysqlerr"
+	"github.com/go-sql-driver/mysql"
 	"github.com/go-xorm/xorm"
 )
 
@@ -125,3 +127,13 @@ func (db *Mysql) CleanDB() error {
 
 	return nil
 }
+
+func (db *Mysql) IsUniqueConstraintViolation(err error) bool {
+	if driverErr, ok := err.(*mysql.MySQLError); ok {
+		if driverErr.Number == mysqlerr.ER_DUP_ENTRY {
+			return true
+		}
+	}
+
+	return false
+}

+ 11 - 0
pkg/services/sqlstore/migrator/postgres_dialect.go

@@ -6,6 +6,7 @@ import (
 	"strings"
 
 	"github.com/go-xorm/xorm"
+	"github.com/lib/pq"
 )
 
 type Postgres struct {
@@ -136,3 +137,13 @@ func (db *Postgres) CleanDB() error {
 
 	return nil
 }
+
+func (db *Postgres) IsUniqueConstraintViolation(err error) bool {
+	if driverErr, ok := err.(*pq.Error); ok {
+		if driverErr.Code == "23505" {
+			return true
+		}
+	}
+
+	return false
+}

+ 11 - 0
pkg/services/sqlstore/migrator/sqlite_dialect.go

@@ -4,6 +4,7 @@ import (
 	"fmt"
 
 	"github.com/go-xorm/xorm"
+	sqlite3 "github.com/mattn/go-sqlite3"
 )
 
 type Sqlite3 struct {
@@ -82,3 +83,13 @@ func (db *Sqlite3) DropIndexSql(tableName string, index *Index) string {
 func (db *Sqlite3) CleanDB() error {
 	return nil
 }
+
+func (db *Sqlite3) IsUniqueConstraintViolation(err error) bool {
+	if driverErr, ok := err.(sqlite3.Error); ok {
+		if driverErr.ExtendedCode == sqlite3.ErrConstraintUnique {
+			return true
+		}
+	}
+
+	return false
+}

+ 0 - 4
pkg/services/sqlstore/transactions_test.go

@@ -10,10 +10,6 @@ import (
 	. "github.com/smartystreets/goconvey/convey"
 )
 
-type testQuery struct {
-	result bool
-}
-
 var ProvokedError = errors.New("testing error.")
 
 func TestTransaction(t *testing.T) {

+ 9 - 4
pkg/setting/setting.go

@@ -166,6 +166,7 @@ var (
 	// Alerting
 	AlertingEnabled            bool
 	ExecuteAlerts              bool
+	AlertingRenderLimit        int
 	AlertingErrorOrTimeout     string
 	AlertingNoDataOrNullValues string
 
@@ -196,10 +197,13 @@ type Cfg struct {
 	Smtp SmtpSettings
 
 	// Rendering
-	ImagesDir                        string
-	PhantomDir                       string
-	RendererUrl                      string
-	RendererCallbackUrl              string
+	ImagesDir             string
+	PhantomDir            string
+	RendererUrl           string
+	RendererCallbackUrl   string
+	RendererLimit         int
+	RendererLimitAlerting int
+
 	DisableBruteForceLoginProtection bool
 
 	TempDataLifetime time.Duration
@@ -677,6 +681,7 @@ func (cfg *Cfg) Load(args *CommandLineArgs) error {
 	alerting := iniFile.Section("alerting")
 	AlertingEnabled = alerting.Key("enabled").MustBool(true)
 	ExecuteAlerts = alerting.Key("execute_alerts").MustBool(true)
+	AlertingRenderLimit = alerting.Key("concurrent_render_limit").MustInt(5)
 	AlertingErrorOrTimeout = alerting.Key("error_or_timeout").MustString("alerting")
 	AlertingNoDataOrNullValues = alerting.Key("nodata_or_nullvalues").MustString("no_data")
 

+ 9 - 5
pkg/social/social.go

@@ -46,10 +46,14 @@ func (e *Error) Error() string {
 	return e.s
 }
 
+const (
+	grafanaCom = "grafana_com"
+)
+
 var (
 	SocialBaseUrl = "/login/"
 	SocialMap     = make(map[string]SocialConnector)
-	allOauthes    = []string{"github", "gitlab", "google", "generic_oauth", "grafananet", "grafana_com"}
+	allOauthes    = []string{"github", "gitlab", "google", "generic_oauth", "grafananet", grafanaCom}
 )
 
 func NewOAuthService() {
@@ -82,7 +86,7 @@ func NewOAuthService() {
 		}
 
 		if name == "grafananet" {
-			name = "grafana_com"
+			name = grafanaCom
 		}
 
 		setting.OAuthService.OAuthInfos[name] = info
@@ -159,7 +163,7 @@ func NewOAuthService() {
 			}
 		}
 
-		if name == "grafana_com" {
+		if name == grafanaCom {
 			config = oauth2.Config{
 				ClientID:     info.ClientId,
 				ClientSecret: info.ClientSecret,
@@ -171,7 +175,7 @@ func NewOAuthService() {
 				Scopes:      info.Scopes,
 			}
 
-			SocialMap["grafana_com"] = &SocialGrafanaCom{
+			SocialMap[grafanaCom] = &SocialGrafanaCom{
 				SocialBase: &SocialBase{
 					Config: &config,
 					log:    logger,
@@ -194,7 +198,7 @@ var GetOAuthProviders = func(cfg *setting.Cfg) map[string]bool {
 
 	for _, name := range allOauthes {
 		if name == "grafananet" {
-			name = "grafana_com"
+			name = grafanaCom
 		}
 
 		sec := cfg.Raw.Section("auth." + name)

+ 1 - 2
pkg/tsdb/cloudwatch/credentials.go

@@ -42,8 +42,7 @@ func GetCredentials(dsInfo *DatasourceInfo) (*credentials.Credentials, error) {
 	accessKeyId := ""
 	secretAccessKey := ""
 	sessionToken := ""
-	var expiration *time.Time
-	expiration = nil
+	var expiration *time.Time = nil
 	if dsInfo.AuthType == "arn" && strings.Index(dsInfo.AssumeRoleArn, "arn:aws:iam:") == 0 {
 		params := &sts.AssumeRoleInput{
 			RoleArn:         aws.String(dsInfo.AssumeRoleArn),

+ 1 - 1
pkg/tsdb/cloudwatch/metric_find_query.go

@@ -235,7 +235,7 @@ func parseMultiSelectValue(input string) []string {
 func (e *CloudWatchExecutor) handleGetRegions(ctx context.Context, parameters *simplejson.Json, queryContext *tsdb.TsdbQuery) ([]suggestData, error) {
 	regions := []string{
 		"ap-northeast-1", "ap-northeast-2", "ap-southeast-1", "ap-southeast-2", "ap-south-1", "ca-central-1", "cn-north-1", "cn-northwest-1",
-		"eu-central-1", "eu-west-1", "eu-west-2", "eu-west-3", "sa-east-1", "us-east-1", "us-east-2", "us-gov-west-1", "us-west-1", "us-west-2",
+		"eu-central-1", "eu-west-1", "eu-west-2", "eu-west-3", "sa-east-1", "us-east-1", "us-east-2", "us-gov-west-1", "us-west-1", "us-west-2", "us-isob-east-1", "us-iso-east-1",
 	}
 
 	result := make([]suggestData, 0)

+ 21 - 8
pkg/tsdb/elasticsearch/response_parser.go

@@ -13,6 +13,19 @@ import (
 	"github.com/grafana/grafana/pkg/tsdb/elasticsearch/client"
 )
 
+const (
+	// Metric types
+	countType         = "count"
+	percentilesType   = "percentiles"
+	extendedStatsType = "extended_stats"
+	// Bucket types
+	dateHistType    = "date_histogram"
+	histogramType   = "histogram"
+	filtersType     = "filters"
+	termsType       = "terms"
+	geohashGridType = "geohash_grid"
+)
+
 type responseParser struct {
 	Responses []*es.SearchResponse
 	Targets   []*Query
@@ -81,7 +94,7 @@ func (rp *responseParser) processBuckets(aggs map[string]interface{}, target *Qu
 		}
 
 		if depth == maxDepth {
-			if aggDef.Type == "date_histogram" {
+			if aggDef.Type == dateHistType {
 				err = rp.processMetrics(esAgg, target, series, props)
 			} else {
 				err = rp.processAggregationDocs(esAgg, aggDef, target, table, props)
@@ -149,7 +162,7 @@ func (rp *responseParser) processMetrics(esAgg *simplejson.Json, target *Query,
 		}
 
 		switch metric.Type {
-		case "count":
+		case countType:
 			newSeries := tsdb.TimeSeries{
 				Tags: make(map[string]string),
 			}
@@ -164,10 +177,10 @@ func (rp *responseParser) processMetrics(esAgg *simplejson.Json, target *Query,
 			for k, v := range props {
 				newSeries.Tags[k] = v
 			}
-			newSeries.Tags["metric"] = "count"
+			newSeries.Tags["metric"] = countType
 			*series = append(*series, &newSeries)
 
-		case "percentiles":
+		case percentilesType:
 			buckets := esAgg.Get("buckets").MustArray()
 			if len(buckets) == 0 {
 				break
@@ -198,7 +211,7 @@ func (rp *responseParser) processMetrics(esAgg *simplejson.Json, target *Query,
 				}
 				*series = append(*series, &newSeries)
 			}
-		case "extended_stats":
+		case extendedStatsType:
 			buckets := esAgg.Get("buckets").MustArray()
 
 			metaKeys := make([]string, 0)
@@ -312,9 +325,9 @@ func (rp *responseParser) processAggregationDocs(esAgg *simplejson.Json, aggDef
 
 		for _, metric := range target.Metrics {
 			switch metric.Type {
-			case "count":
+			case countType:
 				addMetricValue(&values, rp.getMetricName(metric.Type), castToNullFloat(bucket.Get("doc_count")))
-			case "extended_stats":
+			case extendedStatsType:
 				metaKeys := make([]string, 0)
 				meta := metric.Meta.MustMap()
 				for k := range meta {
@@ -366,7 +379,7 @@ func (rp *responseParser) processAggregationDocs(esAgg *simplejson.Json, aggDef
 func (rp *responseParser) trimDatapoints(series *tsdb.TimeSeriesSlice, target *Query) {
 	var histogram *BucketAgg
 	for _, bucketAgg := range target.BucketAggs {
-		if bucketAgg.Type == "date_histogram" {
+		if bucketAgg.Type == dateHistType {
 			histogram = bucketAgg
 			break
 		}

+ 5 - 5
pkg/tsdb/elasticsearch/time_series_query.go

@@ -75,15 +75,15 @@ func (e *timeSeriesQuery) execute() (*tsdb.Response, error) {
 		// iterate backwards to create aggregations bottom-down
 		for _, bucketAgg := range q.BucketAggs {
 			switch bucketAgg.Type {
-			case "date_histogram":
+			case dateHistType:
 				aggBuilder = addDateHistogramAgg(aggBuilder, bucketAgg, from, to)
-			case "histogram":
+			case histogramType:
 				aggBuilder = addHistogramAgg(aggBuilder, bucketAgg)
-			case "filters":
+			case filtersType:
 				aggBuilder = addFiltersAgg(aggBuilder, bucketAgg)
-			case "terms":
+			case termsType:
 				aggBuilder = addTermsAgg(aggBuilder, bucketAgg, q.Metrics)
-			case "geohash_grid":
+			case geohashGridType:
 				aggBuilder = addGeoHashGridAgg(aggBuilder, bucketAgg)
 			}
 		}

+ 120 - 0
pkg/tsdb/stackdriver/annotation_query.go

@@ -0,0 +1,120 @@
+package stackdriver
+
+import (
+	"context"
+	"strconv"
+	"strings"
+	"time"
+
+	"github.com/grafana/grafana/pkg/tsdb"
+)
+
+func (e *StackdriverExecutor) executeAnnotationQuery(ctx context.Context, tsdbQuery *tsdb.TsdbQuery) (*tsdb.Response, error) {
+	result := &tsdb.Response{
+		Results: make(map[string]*tsdb.QueryResult),
+	}
+
+	firstQuery := tsdbQuery.Queries[0]
+
+	queries, err := e.buildQueries(tsdbQuery)
+	if err != nil {
+		return nil, err
+	}
+
+	queryRes, resp, err := e.executeQuery(ctx, queries[0], tsdbQuery)
+	if err != nil {
+		return nil, err
+	}
+	title := firstQuery.Model.Get("title").MustString()
+	text := firstQuery.Model.Get("text").MustString()
+	tags := firstQuery.Model.Get("tags").MustString()
+	err = e.parseToAnnotations(queryRes, resp, queries[0], title, text, tags)
+	result.Results[firstQuery.RefId] = queryRes
+
+	return result, err
+}
+
+func (e *StackdriverExecutor) parseToAnnotations(queryRes *tsdb.QueryResult, data StackdriverResponse, query *StackdriverQuery, title string, text string, tags string) error {
+	annotations := make([]map[string]string, 0)
+
+	for _, series := range data.TimeSeries {
+		// reverse the order to be ascending
+		for i := len(series.Points) - 1; i >= 0; i-- {
+			point := series.Points[i]
+			value := strconv.FormatFloat(point.Value.DoubleValue, 'f', 6, 64)
+			if series.ValueType == "STRING" {
+				value = point.Value.StringValue
+			}
+			annotation := make(map[string]string)
+			annotation["time"] = point.Interval.EndTime.UTC().Format(time.RFC3339)
+			annotation["title"] = formatAnnotationText(title, value, series.Metric.Type, series.Metric.Labels, series.Resource.Labels)
+			annotation["tags"] = tags
+			annotation["text"] = formatAnnotationText(text, value, series.Metric.Type, series.Metric.Labels, series.Resource.Labels)
+			annotations = append(annotations, annotation)
+		}
+	}
+
+	transformAnnotationToTable(annotations, queryRes)
+	return nil
+}
+
+func transformAnnotationToTable(data []map[string]string, result *tsdb.QueryResult) {
+	table := &tsdb.Table{
+		Columns: make([]tsdb.TableColumn, 4),
+		Rows:    make([]tsdb.RowValues, 0),
+	}
+	table.Columns[0].Text = "time"
+	table.Columns[1].Text = "title"
+	table.Columns[2].Text = "tags"
+	table.Columns[3].Text = "text"
+
+	for _, r := range data {
+		values := make([]interface{}, 4)
+		values[0] = r["time"]
+		values[1] = r["title"]
+		values[2] = r["tags"]
+		values[3] = r["text"]
+		table.Rows = append(table.Rows, values)
+	}
+	result.Tables = append(result.Tables, table)
+	result.Meta.Set("rowCount", len(data))
+	slog.Info("anno", "len", len(data))
+}
+
+func formatAnnotationText(annotationText string, pointValue string, metricType string, metricLabels map[string]string, resourceLabels map[string]string) string {
+	result := legendKeyFormat.ReplaceAllFunc([]byte(annotationText), func(in []byte) []byte {
+		metaPartName := strings.Replace(string(in), "{{", "", 1)
+		metaPartName = strings.Replace(metaPartName, "}}", "", 1)
+		metaPartName = strings.TrimSpace(metaPartName)
+
+		if metaPartName == "metric.type" {
+			return []byte(metricType)
+		}
+
+		metricPart := replaceWithMetricPart(metaPartName, metricType)
+
+		if metricPart != nil {
+			return metricPart
+		}
+
+		if metaPartName == "metric.value" {
+			return []byte(pointValue)
+		}
+
+		metaPartName = strings.Replace(metaPartName, "metric.label.", "", 1)
+
+		if val, exists := metricLabels[metaPartName]; exists {
+			return []byte(val)
+		}
+
+		metaPartName = strings.Replace(metaPartName, "resource.label.", "", 1)
+
+		if val, exists := resourceLabels[metaPartName]; exists {
+			return []byte(val)
+		}
+
+		return in
+	})
+
+	return string(result)
+}

+ 33 - 0
pkg/tsdb/stackdriver/annotation_query_test.go

@@ -0,0 +1,33 @@
+package stackdriver
+
+import (
+	"testing"
+
+	"github.com/grafana/grafana/pkg/components/simplejson"
+	"github.com/grafana/grafana/pkg/tsdb"
+
+	. "github.com/smartystreets/goconvey/convey"
+)
+
+func TestStackdriverAnnotationQuery(t *testing.T) {
+	Convey("Stackdriver Annotation Query Executor", t, func() {
+		executor := &StackdriverExecutor{}
+		Convey("When parsing the stackdriver api response", func() {
+			data, err := loadTestFile("./test-data/2-series-response-no-agg.json")
+			So(err, ShouldBeNil)
+			So(len(data.TimeSeries), ShouldEqual, 3)
+
+			res := &tsdb.QueryResult{Meta: simplejson.New(), RefId: "annotationQuery"}
+			query := &StackdriverQuery{}
+			err = executor.parseToAnnotations(res, data, query, "atitle {{metric.label.instance_name}} {{metric.value}}", "atext {{resource.label.zone}}", "atag")
+			So(err, ShouldBeNil)
+
+			Convey("Should return annotations table", func() {
+				So(len(res.Tables), ShouldEqual, 1)
+				So(len(res.Tables[0].Rows), ShouldEqual, 9)
+				So(res.Tables[0].Rows[0][1], ShouldEqual, "atitle collector-asia-east-1 9.856650")
+				So(res.Tables[0].Rows[0][3], ShouldEqual, "atext asia-east1-a")
+			})
+		})
+	})
+}

+ 460 - 0
pkg/tsdb/stackdriver/stackdriver.go

@@ -0,0 +1,460 @@
+package stackdriver
+
+import (
+	"context"
+	"encoding/json"
+	"errors"
+	"fmt"
+	"io/ioutil"
+	"math"
+	"net/http"
+	"net/url"
+	"path"
+	"regexp"
+	"strconv"
+	"strings"
+	"time"
+
+	"golang.org/x/net/context/ctxhttp"
+
+	"github.com/grafana/grafana/pkg/api/pluginproxy"
+	"github.com/grafana/grafana/pkg/components/null"
+	"github.com/grafana/grafana/pkg/components/simplejson"
+	"github.com/grafana/grafana/pkg/log"
+	"github.com/grafana/grafana/pkg/models"
+	"github.com/grafana/grafana/pkg/plugins"
+	"github.com/grafana/grafana/pkg/setting"
+	"github.com/grafana/grafana/pkg/tsdb"
+	"github.com/opentracing/opentracing-go"
+)
+
+var (
+	slog             log.Logger
+	legendKeyFormat  *regexp.Regexp
+	metricNameFormat *regexp.Regexp
+)
+
+// StackdriverExecutor executes queries for the Stackdriver datasource
+type StackdriverExecutor struct {
+	httpClient *http.Client
+	dsInfo     *models.DataSource
+}
+
+// NewStackdriverExecutor initializes a http client
+func NewStackdriverExecutor(dsInfo *models.DataSource) (tsdb.TsdbQueryEndpoint, error) {
+	httpClient, err := dsInfo.GetHttpClient()
+	if err != nil {
+		return nil, err
+	}
+
+	return &StackdriverExecutor{
+		httpClient: httpClient,
+		dsInfo:     dsInfo,
+	}, nil
+}
+
+func init() {
+	slog = log.New("tsdb.stackdriver")
+	tsdb.RegisterTsdbQueryEndpoint("stackdriver", NewStackdriverExecutor)
+	legendKeyFormat = regexp.MustCompile(`\{\{\s*(.+?)\s*\}\}`)
+	metricNameFormat = regexp.MustCompile(`([\w\d_]+)\.googleapis\.com/(.+)`)
+}
+
+// Query takes in the frontend queries, parses them into the Stackdriver query format
+// executes the queries against the Stackdriver API and parses the response into
+// the time series or table format
+func (e *StackdriverExecutor) Query(ctx context.Context, dsInfo *models.DataSource, tsdbQuery *tsdb.TsdbQuery) (*tsdb.Response, error) {
+	var result *tsdb.Response
+	var err error
+	queryType := tsdbQuery.Queries[0].Model.Get("type").MustString("")
+
+	switch queryType {
+	case "annotationQuery":
+		result, err = e.executeAnnotationQuery(ctx, tsdbQuery)
+	case "timeSeriesQuery":
+		fallthrough
+	default:
+		result, err = e.executeTimeSeriesQuery(ctx, tsdbQuery)
+	}
+
+	return result, err
+}
+
+func (e *StackdriverExecutor) executeTimeSeriesQuery(ctx context.Context, tsdbQuery *tsdb.TsdbQuery) (*tsdb.Response, error) {
+	result := &tsdb.Response{
+		Results: make(map[string]*tsdb.QueryResult),
+	}
+
+	queries, err := e.buildQueries(tsdbQuery)
+	if err != nil {
+		return nil, err
+	}
+
+	for _, query := range queries {
+		queryRes, resp, err := e.executeQuery(ctx, query, tsdbQuery)
+		if err != nil {
+			return nil, err
+		}
+		err = e.parseResponse(queryRes, resp, query)
+		if err != nil {
+			queryRes.Error = err
+		}
+		result.Results[query.RefID] = queryRes
+	}
+
+	return result, nil
+}
+
+func (e *StackdriverExecutor) buildQueries(tsdbQuery *tsdb.TsdbQuery) ([]*StackdriverQuery, error) {
+	stackdriverQueries := []*StackdriverQuery{}
+
+	startTime, err := tsdbQuery.TimeRange.ParseFrom()
+	if err != nil {
+		return nil, err
+	}
+
+	endTime, err := tsdbQuery.TimeRange.ParseTo()
+	if err != nil {
+		return nil, err
+	}
+
+	durationSeconds := int(endTime.Sub(startTime).Seconds())
+
+	for _, query := range tsdbQuery.Queries {
+		var target string
+
+		metricType := query.Model.Get("metricType").MustString()
+		filterParts := query.Model.Get("filters").MustArray()
+
+		params := url.Values{}
+		params.Add("interval.startTime", startTime.UTC().Format(time.RFC3339))
+		params.Add("interval.endTime", endTime.UTC().Format(time.RFC3339))
+		params.Add("filter", buildFilterString(metricType, filterParts))
+		params.Add("view", query.Model.Get("view").MustString("FULL"))
+		setAggParams(&params, query, durationSeconds)
+
+		target = params.Encode()
+
+		if setting.Env == setting.DEV {
+			slog.Debug("Stackdriver request", "params", params)
+		}
+
+		groupBys := query.Model.Get("groupBys").MustArray()
+		groupBysAsStrings := make([]string, 0)
+		for _, groupBy := range groupBys {
+			groupBysAsStrings = append(groupBysAsStrings, groupBy.(string))
+		}
+
+		aliasBy := query.Model.Get("aliasBy").MustString()
+
+		stackdriverQueries = append(stackdriverQueries, &StackdriverQuery{
+			Target:   target,
+			Params:   params,
+			RefID:    query.RefId,
+			GroupBys: groupBysAsStrings,
+			AliasBy:  aliasBy,
+		})
+	}
+
+	return stackdriverQueries, nil
+}
+
+func buildFilterString(metricType string, filterParts []interface{}) string {
+	filterString := ""
+	for i, part := range filterParts {
+		mod := i % 4
+		if part == "AND" {
+			filterString += " "
+		} else if mod == 2 {
+			filterString += fmt.Sprintf(`"%s"`, part)
+		} else {
+			filterString += part.(string)
+		}
+	}
+	return strings.Trim(fmt.Sprintf(`metric.type="%s" %s`, metricType, filterString), " ")
+}
+
+func setAggParams(params *url.Values, query *tsdb.Query, durationSeconds int) {
+	primaryAggregation := query.Model.Get("primaryAggregation").MustString()
+	perSeriesAligner := query.Model.Get("perSeriesAligner").MustString()
+	alignmentPeriod := query.Model.Get("alignmentPeriod").MustString()
+
+	if primaryAggregation == "" {
+		primaryAggregation = "REDUCE_NONE"
+	}
+
+	if perSeriesAligner == "" {
+		perSeriesAligner = "ALIGN_MEAN"
+	}
+
+	if alignmentPeriod == "grafana-auto" || alignmentPeriod == "" {
+		alignmentPeriodValue := int(math.Max(float64(query.IntervalMs)/1000, 60.0))
+		alignmentPeriod = "+" + strconv.Itoa(alignmentPeriodValue) + "s"
+	}
+
+	if alignmentPeriod == "stackdriver-auto" {
+		alignmentPeriodValue := int(math.Max(float64(durationSeconds), 60.0))
+		if alignmentPeriodValue < 60*60*23 {
+			alignmentPeriod = "+60s"
+		} else if alignmentPeriodValue < 60*60*24*6 {
+			alignmentPeriod = "+300s"
+		} else {
+			alignmentPeriod = "+3600s"
+		}
+	}
+
+	re := regexp.MustCompile("[0-9]+")
+	seconds, err := strconv.ParseInt(re.FindString(alignmentPeriod), 10, 64)
+	if err != nil || seconds > 3600 {
+		alignmentPeriod = "+3600s"
+	}
+
+	params.Add("aggregation.crossSeriesReducer", primaryAggregation)
+	params.Add("aggregation.perSeriesAligner", perSeriesAligner)
+	params.Add("aggregation.alignmentPeriod", alignmentPeriod)
+
+	groupBys := query.Model.Get("groupBys").MustArray()
+	if len(groupBys) > 0 {
+		for i := 0; i < len(groupBys); i++ {
+			params.Add("aggregation.groupByFields", groupBys[i].(string))
+		}
+	}
+}
+
+func (e *StackdriverExecutor) executeQuery(ctx context.Context, query *StackdriverQuery, tsdbQuery *tsdb.TsdbQuery) (*tsdb.QueryResult, StackdriverResponse, error) {
+	queryResult := &tsdb.QueryResult{Meta: simplejson.New(), RefId: query.RefID}
+
+	req, err := e.createRequest(ctx, e.dsInfo)
+	if err != nil {
+		queryResult.Error = err
+		return queryResult, StackdriverResponse{}, nil
+	}
+
+	req.URL.RawQuery = query.Params.Encode()
+	queryResult.Meta.Set("rawQuery", req.URL.RawQuery)
+	alignmentPeriod, ok := req.URL.Query()["aggregation.alignmentPeriod"]
+
+	if ok {
+		re := regexp.MustCompile("[0-9]+")
+		seconds, err := strconv.ParseInt(re.FindString(alignmentPeriod[0]), 10, 64)
+		if err == nil {
+			queryResult.Meta.Set("alignmentPeriod", seconds)
+		}
+	}
+
+	span, ctx := opentracing.StartSpanFromContext(ctx, "stackdriver query")
+	span.SetTag("target", query.Target)
+	span.SetTag("from", tsdbQuery.TimeRange.From)
+	span.SetTag("until", tsdbQuery.TimeRange.To)
+	span.SetTag("datasource_id", e.dsInfo.Id)
+	span.SetTag("org_id", e.dsInfo.OrgId)
+
+	defer span.Finish()
+
+	opentracing.GlobalTracer().Inject(
+		span.Context(),
+		opentracing.HTTPHeaders,
+		opentracing.HTTPHeadersCarrier(req.Header))
+
+	res, err := ctxhttp.Do(ctx, e.httpClient, req)
+	if err != nil {
+		queryResult.Error = err
+		return queryResult, StackdriverResponse{}, nil
+	}
+
+	data, err := e.unmarshalResponse(res)
+	if err != nil {
+		queryResult.Error = err
+		return queryResult, StackdriverResponse{}, nil
+	}
+
+	return queryResult, data, nil
+}
+
+func (e *StackdriverExecutor) unmarshalResponse(res *http.Response) (StackdriverResponse, error) {
+	body, err := ioutil.ReadAll(res.Body)
+	defer res.Body.Close()
+	if err != nil {
+		return StackdriverResponse{}, err
+	}
+
+	if res.StatusCode/100 != 2 {
+		slog.Error("Request failed", "status", res.Status, "body", string(body))
+		return StackdriverResponse{}, fmt.Errorf(string(body))
+	}
+
+	var data StackdriverResponse
+	err = json.Unmarshal(body, &data)
+	if err != nil {
+		slog.Error("Failed to unmarshal Stackdriver response", "error", err, "status", res.Status, "body", string(body))
+		return StackdriverResponse{}, err
+	}
+
+	return data, nil
+}
+
+func (e *StackdriverExecutor) parseResponse(queryRes *tsdb.QueryResult, data StackdriverResponse, query *StackdriverQuery) error {
+	metricLabels := make(map[string][]string)
+	resourceLabels := make(map[string][]string)
+
+	for _, series := range data.TimeSeries {
+		points := make([]tsdb.TimePoint, 0)
+
+		// reverse the order to be ascending
+		for i := len(series.Points) - 1; i >= 0; i-- {
+			point := series.Points[i]
+			value := point.Value.DoubleValue
+
+			if series.ValueType == "INT64" {
+				parsedValue, err := strconv.ParseFloat(point.Value.IntValue, 64)
+				if err == nil {
+					value = parsedValue
+				}
+			}
+
+			if series.ValueType == "BOOL" {
+				if point.Value.BoolValue {
+					value = 1
+				} else {
+					value = 0
+				}
+			}
+
+			points = append(points, tsdb.NewTimePoint(null.FloatFrom(value), float64((point.Interval.EndTime).Unix())*1000))
+		}
+
+		defaultMetricName := series.Metric.Type
+
+		for key, value := range series.Metric.Labels {
+			if !containsLabel(metricLabels[key], value) {
+				metricLabels[key] = append(metricLabels[key], value)
+			}
+			if len(query.GroupBys) == 0 || containsLabel(query.GroupBys, "metric.label."+key) {
+				defaultMetricName += " " + value
+			}
+		}
+
+		for key, value := range series.Resource.Labels {
+			if !containsLabel(resourceLabels[key], value) {
+				resourceLabels[key] = append(resourceLabels[key], value)
+			}
+
+			if containsLabel(query.GroupBys, "resource.label."+key) {
+				defaultMetricName += " " + value
+			}
+		}
+
+		metricName := formatLegendKeys(series.Metric.Type, defaultMetricName, series.Metric.Labels, series.Resource.Labels, query)
+
+		queryRes.Series = append(queryRes.Series, &tsdb.TimeSeries{
+			Name:   metricName,
+			Points: points,
+		})
+	}
+
+	queryRes.Meta.Set("resourceLabels", resourceLabels)
+	queryRes.Meta.Set("metricLabels", metricLabels)
+	queryRes.Meta.Set("groupBys", query.GroupBys)
+
+	return nil
+}
+
+func containsLabel(labels []string, newLabel string) bool {
+	for _, val := range labels {
+		if val == newLabel {
+			return true
+		}
+	}
+	return false
+}
+
+func formatLegendKeys(metricType string, defaultMetricName string, metricLabels map[string]string, resourceLabels map[string]string, query *StackdriverQuery) string {
+	if query.AliasBy == "" {
+		return defaultMetricName
+	}
+
+	result := legendKeyFormat.ReplaceAllFunc([]byte(query.AliasBy), func(in []byte) []byte {
+		metaPartName := strings.Replace(string(in), "{{", "", 1)
+		metaPartName = strings.Replace(metaPartName, "}}", "", 1)
+		metaPartName = strings.TrimSpace(metaPartName)
+
+		if metaPartName == "metric.type" {
+			return []byte(metricType)
+		}
+
+		metricPart := replaceWithMetricPart(metaPartName, metricType)
+
+		if metricPart != nil {
+			return metricPart
+		}
+
+		metaPartName = strings.Replace(metaPartName, "metric.label.", "", 1)
+
+		if val, exists := metricLabels[metaPartName]; exists {
+			return []byte(val)
+		}
+
+		metaPartName = strings.Replace(metaPartName, "resource.label.", "", 1)
+
+		if val, exists := resourceLabels[metaPartName]; exists {
+			return []byte(val)
+		}
+
+		return in
+	})
+
+	return string(result)
+}
+
+func replaceWithMetricPart(metaPartName string, metricType string) []byte {
+	// https://cloud.google.com/monitoring/api/v3/metrics-details#label_names
+	shortMatches := metricNameFormat.FindStringSubmatch(metricType)
+
+	if metaPartName == "metric.name" {
+		if len(shortMatches) > 0 {
+			return []byte(shortMatches[2])
+		}
+	}
+
+	if metaPartName == "metric.service" {
+		if len(shortMatches) > 0 {
+			return []byte(shortMatches[1])
+		}
+	}
+
+	return nil
+}
+
+func (e *StackdriverExecutor) createRequest(ctx context.Context, dsInfo *models.DataSource) (*http.Request, error) {
+	u, _ := url.Parse(dsInfo.Url)
+	u.Path = path.Join(u.Path, "render")
+
+	req, err := http.NewRequest(http.MethodGet, "https://monitoring.googleapis.com/", nil)
+	if err != nil {
+		slog.Error("Failed to create request", "error", err)
+		return nil, fmt.Errorf("Failed to create request. error: %v", err)
+	}
+
+	req.Header.Set("Content-Type", "application/json")
+	req.Header.Set("User-Agent", fmt.Sprintf("Grafana/%s", setting.BuildVersion))
+
+	// find plugin
+	plugin, ok := plugins.DataSources[dsInfo.Type]
+	if !ok {
+		return nil, errors.New("Unable to find datasource plugin Stackdriver")
+	}
+	projectName := dsInfo.JsonData.Get("defaultProject").MustString()
+	proxyPass := fmt.Sprintf("stackdriver%s", "v3/projects/"+projectName+"/timeSeries")
+
+	var stackdriverRoute *plugins.AppPluginRoute
+	for _, route := range plugin.Routes {
+		if route.Path == "stackdriver" {
+			stackdriverRoute = route
+			break
+		}
+	}
+
+	pluginproxy.ApplyRoute(ctx, req, proxyPass, stackdriverRoute, dsInfo)
+
+	return req, nil
+}

+ 357 - 0
pkg/tsdb/stackdriver/stackdriver_test.go

@@ -0,0 +1,357 @@
+package stackdriver
+
+import (
+	"encoding/json"
+	"fmt"
+	"io/ioutil"
+	"testing"
+	"time"
+
+	"github.com/grafana/grafana/pkg/components/simplejson"
+	"github.com/grafana/grafana/pkg/tsdb"
+
+	. "github.com/smartystreets/goconvey/convey"
+)
+
+func TestStackdriver(t *testing.T) {
+	Convey("Stackdriver", t, func() {
+		executor := &StackdriverExecutor{}
+
+		Convey("Parse queries from frontend and build Stackdriver API queries", func() {
+			fromStart := time.Date(2018, 3, 15, 13, 0, 0, 0, time.UTC).In(time.Local)
+			tsdbQuery := &tsdb.TsdbQuery{
+				TimeRange: &tsdb.TimeRange{
+					From: fmt.Sprintf("%v", fromStart.Unix()*1000),
+					To:   fmt.Sprintf("%v", fromStart.Add(34*time.Minute).Unix()*1000),
+				},
+				Queries: []*tsdb.Query{
+					{
+						Model: simplejson.NewFromAny(map[string]interface{}{
+							"metricType": "a/metric/type",
+							"view":       "FULL",
+							"aliasBy":    "testalias",
+							"type":       "timeSeriesQuery",
+						}),
+						RefId: "A",
+					},
+				},
+			}
+
+			Convey("and query has no aggregation set", func() {
+				queries, err := executor.buildQueries(tsdbQuery)
+				So(err, ShouldBeNil)
+
+				So(len(queries), ShouldEqual, 1)
+				So(queries[0].RefID, ShouldEqual, "A")
+				So(queries[0].Target, ShouldEqual, "aggregation.alignmentPeriod=%2B60s&aggregation.crossSeriesReducer=REDUCE_NONE&aggregation.perSeriesAligner=ALIGN_MEAN&filter=metric.type%3D%22a%2Fmetric%2Ftype%22&interval.endTime=2018-03-15T13%3A34%3A00Z&interval.startTime=2018-03-15T13%3A00%3A00Z&view=FULL")
+				So(len(queries[0].Params), ShouldEqual, 7)
+				So(queries[0].Params["interval.startTime"][0], ShouldEqual, "2018-03-15T13:00:00Z")
+				So(queries[0].Params["interval.endTime"][0], ShouldEqual, "2018-03-15T13:34:00Z")
+				So(queries[0].Params["aggregation.perSeriesAligner"][0], ShouldEqual, "ALIGN_MEAN")
+				So(queries[0].Params["filter"][0], ShouldEqual, "metric.type=\"a/metric/type\"")
+				So(queries[0].Params["view"][0], ShouldEqual, "FULL")
+				So(queries[0].AliasBy, ShouldEqual, "testalias")
+			})
+
+			Convey("and query has filters", func() {
+				tsdbQuery.Queries[0].Model = simplejson.NewFromAny(map[string]interface{}{
+					"metricType": "a/metric/type",
+					"filters":    []interface{}{"key", "=", "value", "AND", "key2", "=", "value2"},
+				})
+
+				queries, err := executor.buildQueries(tsdbQuery)
+				So(err, ShouldBeNil)
+				So(len(queries), ShouldEqual, 1)
+				So(queries[0].Params["filter"][0], ShouldEqual, `metric.type="a/metric/type" key="value" key2="value2"`)
+			})
+
+			Convey("and alignmentPeriod is set to grafana-auto", func() {
+				Convey("and IntervalMs is larger than 60000", func() {
+					tsdbQuery.Queries[0].IntervalMs = 1000000
+					tsdbQuery.Queries[0].Model = simplejson.NewFromAny(map[string]interface{}{
+						"alignmentPeriod": "grafana-auto",
+						"filters":         []interface{}{"key", "=", "value", "AND", "key2", "=", "value2"},
+					})
+
+					queries, err := executor.buildQueries(tsdbQuery)
+					So(err, ShouldBeNil)
+					So(queries[0].Params["aggregation.alignmentPeriod"][0], ShouldEqual, `+1000s`)
+				})
+				Convey("and IntervalMs is less than 60000", func() {
+					tsdbQuery.Queries[0].IntervalMs = 30000
+					tsdbQuery.Queries[0].Model = simplejson.NewFromAny(map[string]interface{}{
+						"alignmentPeriod": "grafana-auto",
+						"filters":         []interface{}{"key", "=", "value", "AND", "key2", "=", "value2"},
+					})
+
+					queries, err := executor.buildQueries(tsdbQuery)
+					So(err, ShouldBeNil)
+					So(queries[0].Params["aggregation.alignmentPeriod"][0], ShouldEqual, `+60s`)
+				})
+			})
+
+			Convey("and alignmentPeriod is set to stackdriver-auto", func() {
+				Convey("and range is two hours", func() {
+					tsdbQuery.TimeRange.From = "1538033322461"
+					tsdbQuery.TimeRange.To = "1538040522461"
+					tsdbQuery.Queries[0].Model = simplejson.NewFromAny(map[string]interface{}{
+						"target":          "target",
+						"alignmentPeriod": "stackdriver-auto",
+					})
+
+					queries, err := executor.buildQueries(tsdbQuery)
+					So(err, ShouldBeNil)
+					So(queries[0].Params["aggregation.alignmentPeriod"][0], ShouldEqual, `+60s`)
+				})
+
+				Convey("and range is 22 hours", func() {
+					tsdbQuery.TimeRange.From = "1538034524922"
+					tsdbQuery.TimeRange.To = "1538113724922"
+					tsdbQuery.Queries[0].Model = simplejson.NewFromAny(map[string]interface{}{
+						"target":          "target",
+						"alignmentPeriod": "stackdriver-auto",
+					})
+
+					queries, err := executor.buildQueries(tsdbQuery)
+					So(err, ShouldBeNil)
+					So(queries[0].Params["aggregation.alignmentPeriod"][0], ShouldEqual, `+60s`)
+				})
+
+				Convey("and range is 23 hours", func() {
+					tsdbQuery.TimeRange.From = "1538034567985"
+					tsdbQuery.TimeRange.To = "1538117367985"
+					tsdbQuery.Queries[0].Model = simplejson.NewFromAny(map[string]interface{}{
+						"target":          "target",
+						"alignmentPeriod": "stackdriver-auto",
+					})
+
+					queries, err := executor.buildQueries(tsdbQuery)
+					So(err, ShouldBeNil)
+					So(queries[0].Params["aggregation.alignmentPeriod"][0], ShouldEqual, `+300s`)
+				})
+
+				Convey("and range is 7 days", func() {
+					tsdbQuery.TimeRange.From = "1538036324073"
+					tsdbQuery.TimeRange.To = "1538641124073"
+					tsdbQuery.Queries[0].Model = simplejson.NewFromAny(map[string]interface{}{
+						"target":          "target",
+						"alignmentPeriod": "stackdriver-auto",
+					})
+
+					queries, err := executor.buildQueries(tsdbQuery)
+					So(err, ShouldBeNil)
+					So(queries[0].Params["aggregation.alignmentPeriod"][0], ShouldEqual, `+3600s`)
+				})
+			})
+
+			Convey("and alignmentPeriod is set in frontend", func() {
+				Convey("and alignment period is too big", func() {
+					tsdbQuery.Queries[0].IntervalMs = 1000
+					tsdbQuery.Queries[0].Model = simplejson.NewFromAny(map[string]interface{}{
+						"alignmentPeriod": "+360000s",
+					})
+
+					queries, err := executor.buildQueries(tsdbQuery)
+					So(err, ShouldBeNil)
+					So(queries[0].Params["aggregation.alignmentPeriod"][0], ShouldEqual, `+3600s`)
+				})
+
+				Convey("and alignment period is within accepted range", func() {
+					tsdbQuery.Queries[0].IntervalMs = 1000
+					tsdbQuery.Queries[0].Model = simplejson.NewFromAny(map[string]interface{}{
+						"alignmentPeriod": "+600s",
+					})
+
+					queries, err := executor.buildQueries(tsdbQuery)
+					So(err, ShouldBeNil)
+					So(queries[0].Params["aggregation.alignmentPeriod"][0], ShouldEqual, `+600s`)
+				})
+			})
+
+			Convey("and query has aggregation mean set", func() {
+				tsdbQuery.Queries[0].Model = simplejson.NewFromAny(map[string]interface{}{
+					"metricType":         "a/metric/type",
+					"primaryAggregation": "REDUCE_MEAN",
+					"view":               "FULL",
+				})
+
+				queries, err := executor.buildQueries(tsdbQuery)
+				So(err, ShouldBeNil)
+
+				So(len(queries), ShouldEqual, 1)
+				So(queries[0].RefID, ShouldEqual, "A")
+				So(queries[0].Target, ShouldEqual, "aggregation.alignmentPeriod=%2B60s&aggregation.crossSeriesReducer=REDUCE_MEAN&aggregation.perSeriesAligner=ALIGN_MEAN&filter=metric.type%3D%22a%2Fmetric%2Ftype%22&interval.endTime=2018-03-15T13%3A34%3A00Z&interval.startTime=2018-03-15T13%3A00%3A00Z&view=FULL")
+				So(len(queries[0].Params), ShouldEqual, 7)
+				So(queries[0].Params["interval.startTime"][0], ShouldEqual, "2018-03-15T13:00:00Z")
+				So(queries[0].Params["interval.endTime"][0], ShouldEqual, "2018-03-15T13:34:00Z")
+				So(queries[0].Params["aggregation.crossSeriesReducer"][0], ShouldEqual, "REDUCE_MEAN")
+				So(queries[0].Params["aggregation.perSeriesAligner"][0], ShouldEqual, "ALIGN_MEAN")
+				So(queries[0].Params["aggregation.alignmentPeriod"][0], ShouldEqual, "+60s")
+				So(queries[0].Params["filter"][0], ShouldEqual, "metric.type=\"a/metric/type\"")
+				So(queries[0].Params["view"][0], ShouldEqual, "FULL")
+			})
+
+			Convey("and query has group bys", func() {
+				tsdbQuery.Queries[0].Model = simplejson.NewFromAny(map[string]interface{}{
+					"metricType":         "a/metric/type",
+					"primaryAggregation": "REDUCE_NONE",
+					"groupBys":           []interface{}{"metric.label.group1", "metric.label.group2"},
+					"view":               "FULL",
+				})
+
+				queries, err := executor.buildQueries(tsdbQuery)
+				So(err, ShouldBeNil)
+
+				So(len(queries), ShouldEqual, 1)
+				So(queries[0].RefID, ShouldEqual, "A")
+				So(queries[0].Target, ShouldEqual, "aggregation.alignmentPeriod=%2B60s&aggregation.crossSeriesReducer=REDUCE_NONE&aggregation.groupByFields=metric.label.group1&aggregation.groupByFields=metric.label.group2&aggregation.perSeriesAligner=ALIGN_MEAN&filter=metric.type%3D%22a%2Fmetric%2Ftype%22&interval.endTime=2018-03-15T13%3A34%3A00Z&interval.startTime=2018-03-15T13%3A00%3A00Z&view=FULL")
+				So(len(queries[0].Params), ShouldEqual, 8)
+				So(queries[0].Params["interval.startTime"][0], ShouldEqual, "2018-03-15T13:00:00Z")
+				So(queries[0].Params["interval.endTime"][0], ShouldEqual, "2018-03-15T13:34:00Z")
+				So(queries[0].Params["aggregation.perSeriesAligner"][0], ShouldEqual, "ALIGN_MEAN")
+				So(queries[0].Params["aggregation.groupByFields"][0], ShouldEqual, "metric.label.group1")
+				So(queries[0].Params["aggregation.groupByFields"][1], ShouldEqual, "metric.label.group2")
+				So(queries[0].Params["filter"][0], ShouldEqual, "metric.type=\"a/metric/type\"")
+				So(queries[0].Params["view"][0], ShouldEqual, "FULL")
+			})
+
+		})
+
+		Convey("Parse stackdriver response in the time series format", func() {
+			Convey("when data from query aggregated to one time series", func() {
+				data, err := loadTestFile("./test-data/1-series-response-agg-one-metric.json")
+				So(err, ShouldBeNil)
+				So(len(data.TimeSeries), ShouldEqual, 1)
+
+				res := &tsdb.QueryResult{Meta: simplejson.New(), RefId: "A"}
+				query := &StackdriverQuery{}
+				err = executor.parseResponse(res, data, query)
+				So(err, ShouldBeNil)
+
+				So(len(res.Series), ShouldEqual, 1)
+				So(res.Series[0].Name, ShouldEqual, "serviceruntime.googleapis.com/api/request_count")
+				So(len(res.Series[0].Points), ShouldEqual, 3)
+
+				Convey("timestamps should be in ascending order", func() {
+					So(res.Series[0].Points[0][0].Float64, ShouldEqual, 0.05)
+					So(res.Series[0].Points[0][1].Float64, ShouldEqual, 1536670020000)
+
+					So(res.Series[0].Points[1][0].Float64, ShouldEqual, 1.05)
+					So(res.Series[0].Points[1][1].Float64, ShouldEqual, 1536670080000)
+
+					So(res.Series[0].Points[2][0].Float64, ShouldEqual, 1.0666666666667)
+					So(res.Series[0].Points[2][1].Float64, ShouldEqual, 1536670260000)
+				})
+			})
+
+			Convey("when data from query with no aggregation", func() {
+				data, err := loadTestFile("./test-data/2-series-response-no-agg.json")
+				So(err, ShouldBeNil)
+				So(len(data.TimeSeries), ShouldEqual, 3)
+
+				res := &tsdb.QueryResult{Meta: simplejson.New(), RefId: "A"}
+				query := &StackdriverQuery{}
+				err = executor.parseResponse(res, data, query)
+				So(err, ShouldBeNil)
+
+				Convey("Should add labels to metric name", func() {
+					So(len(res.Series), ShouldEqual, 3)
+					So(res.Series[0].Name, ShouldEqual, "compute.googleapis.com/instance/cpu/usage_time collector-asia-east-1")
+					So(res.Series[1].Name, ShouldEqual, "compute.googleapis.com/instance/cpu/usage_time collector-europe-west-1")
+					So(res.Series[2].Name, ShouldEqual, "compute.googleapis.com/instance/cpu/usage_time collector-us-east-1")
+				})
+
+				Convey("Should parse to time series", func() {
+					So(len(res.Series[0].Points), ShouldEqual, 3)
+					So(res.Series[0].Points[0][0].Float64, ShouldEqual, 9.8566497180145)
+					So(res.Series[0].Points[1][0].Float64, ShouldEqual, 9.7323568146676)
+					So(res.Series[0].Points[2][0].Float64, ShouldEqual, 9.7730520330369)
+				})
+
+				Convey("Should add meta for labels to the response", func() {
+					metricLabels := res.Meta.Get("metricLabels").Interface().(map[string][]string)
+					So(metricLabels, ShouldNotBeNil)
+					So(len(metricLabels["instance_name"]), ShouldEqual, 3)
+					So(metricLabels["instance_name"][0], ShouldEqual, "collector-asia-east-1")
+					So(metricLabels["instance_name"][1], ShouldEqual, "collector-europe-west-1")
+					So(metricLabels["instance_name"][2], ShouldEqual, "collector-us-east-1")
+
+					resourceLabels := res.Meta.Get("resourceLabels").Interface().(map[string][]string)
+					So(resourceLabels, ShouldNotBeNil)
+					So(len(resourceLabels["zone"]), ShouldEqual, 3)
+					So(resourceLabels["zone"][0], ShouldEqual, "asia-east1-a")
+					So(resourceLabels["zone"][1], ShouldEqual, "europe-west1-b")
+					So(resourceLabels["zone"][2], ShouldEqual, "us-east1-b")
+
+					So(len(resourceLabels["project_id"]), ShouldEqual, 1)
+					So(resourceLabels["project_id"][0], ShouldEqual, "grafana-prod")
+				})
+			})
+
+			Convey("when data from query with no aggregation and group bys", func() {
+				data, err := loadTestFile("./test-data/2-series-response-no-agg.json")
+				So(err, ShouldBeNil)
+				So(len(data.TimeSeries), ShouldEqual, 3)
+
+				res := &tsdb.QueryResult{Meta: simplejson.New(), RefId: "A"}
+				query := &StackdriverQuery{GroupBys: []string{"metric.label.instance_name", "resource.label.zone"}}
+				err = executor.parseResponse(res, data, query)
+				So(err, ShouldBeNil)
+
+				Convey("Should add instance name and zone labels to metric name", func() {
+					So(len(res.Series), ShouldEqual, 3)
+					So(res.Series[0].Name, ShouldEqual, "compute.googleapis.com/instance/cpu/usage_time collector-asia-east-1 asia-east1-a")
+					So(res.Series[1].Name, ShouldEqual, "compute.googleapis.com/instance/cpu/usage_time collector-europe-west-1 europe-west1-b")
+					So(res.Series[2].Name, ShouldEqual, "compute.googleapis.com/instance/cpu/usage_time collector-us-east-1 us-east1-b")
+				})
+			})
+
+			Convey("when data from query with no aggregation and alias by", func() {
+				data, err := loadTestFile("./test-data/2-series-response-no-agg.json")
+				So(err, ShouldBeNil)
+				So(len(data.TimeSeries), ShouldEqual, 3)
+
+				res := &tsdb.QueryResult{Meta: simplejson.New(), RefId: "A"}
+
+				Convey("and the alias pattern is for metric type, a metric label and a resource label", func() {
+
+					query := &StackdriverQuery{AliasBy: "{{metric.type}} - {{metric.label.instance_name}} - {{resource.label.zone}}", GroupBys: []string{"metric.label.instance_name", "resource.label.zone"}}
+					err = executor.parseResponse(res, data, query)
+					So(err, ShouldBeNil)
+
+					Convey("Should use alias by formatting and only show instance name", func() {
+						So(len(res.Series), ShouldEqual, 3)
+						So(res.Series[0].Name, ShouldEqual, "compute.googleapis.com/instance/cpu/usage_time - collector-asia-east-1 - asia-east1-a")
+						So(res.Series[1].Name, ShouldEqual, "compute.googleapis.com/instance/cpu/usage_time - collector-europe-west-1 - europe-west1-b")
+						So(res.Series[2].Name, ShouldEqual, "compute.googleapis.com/instance/cpu/usage_time - collector-us-east-1 - us-east1-b")
+					})
+				})
+
+				Convey("and the alias pattern is for metric name", func() {
+
+					query := &StackdriverQuery{AliasBy: "metric {{metric.name}} service {{metric.service}}", GroupBys: []string{"metric.label.instance_name", "resource.label.zone"}}
+					err = executor.parseResponse(res, data, query)
+					So(err, ShouldBeNil)
+
+					Convey("Should use alias by formatting and only show instance name", func() {
+						So(len(res.Series), ShouldEqual, 3)
+						So(res.Series[0].Name, ShouldEqual, "metric instance/cpu/usage_time service compute")
+						So(res.Series[1].Name, ShouldEqual, "metric instance/cpu/usage_time service compute")
+						So(res.Series[2].Name, ShouldEqual, "metric instance/cpu/usage_time service compute")
+					})
+				})
+			})
+		})
+	})
+}
+
+func loadTestFile(path string) (StackdriverResponse, error) {
+	var data StackdriverResponse
+
+	jsonBody, err := ioutil.ReadFile(path)
+	if err != nil {
+		return data, err
+	}
+	err = json.Unmarshal(jsonBody, &data)
+	return data, err
+}

+ 46 - 0
pkg/tsdb/stackdriver/test-data/1-series-response-agg-one-metric.json

@@ -0,0 +1,46 @@
+{
+  "timeSeries": [
+    {
+      "metric": {
+        "type": "serviceruntime.googleapis.com\/api\/request_count"
+      },
+      "resource": {
+        "type": "consumed_api",
+        "labels": {
+          "project_id": "grafana-prod"
+        }
+      },
+      "metricKind": "GAUGE",
+      "valueType": "DOUBLE",
+      "points": [
+        {
+          "interval": {
+            "startTime": "2018-09-11T12:51:00Z",
+            "endTime": "2018-09-11T12:51:00Z"
+          },
+          "value": {
+            "doubleValue": 1.0666666666667
+          }
+        },
+        {
+          "interval": {
+            "startTime": "2018-09-11T12:48:00Z",
+            "endTime": "2018-09-11T12:48:00Z"
+          },
+          "value": {
+            "doubleValue": 1.05
+          }
+        },
+        {
+          "interval": {
+            "startTime": "2018-09-11T12:47:00Z",
+            "endTime": "2018-09-11T12:47:00Z"
+          },
+          "value": {
+            "doubleValue": 0.05
+          }
+        }
+      ]
+    }
+  ]
+}

+ 145 - 0
pkg/tsdb/stackdriver/test-data/2-series-response-no-agg.json

@@ -0,0 +1,145 @@
+{
+  "timeSeries": [
+    {
+      "metric": {
+        "labels": {
+          "instance_name": "collector-asia-east-1"
+        },
+        "type": "compute.googleapis.com\/instance\/cpu\/usage_time"
+      },
+      "resource": {
+        "type": "gce_instance",
+        "labels": {
+          "instance_id": "1119268429530133111",
+          "zone": "asia-east1-a",
+          "project_id": "grafana-prod"
+        }
+      },
+      "metricKind": "DELTA",
+      "valueType": "DOUBLE",
+      "points": [
+        {
+          "interval": {
+            "startTime": "2018-09-11T12:30:00Z",
+            "endTime": "2018-09-11T12:31:00Z"
+          },
+          "value": {
+            "doubleValue": 9.7730520330369
+          }
+        },
+        {
+          "interval": {
+            "startTime": "2018-09-11T12:29:00Z",
+            "endTime": "2018-09-11T12:30:00Z"
+          },
+          "value": {
+            "doubleValue": 9.7323568146676
+          }
+        },
+        {
+          "interval": {
+            "startTime": "2018-09-11T12:28:00Z",
+            "endTime": "2018-09-11T12:29:00Z"
+          },
+          "value": {
+            "doubleValue": 9.8566497180145
+          }
+        }
+      ]
+    },
+    {
+      "metric": {
+        "labels": {
+          "instance_name": "collector-europe-west-1"
+        },
+        "type": "compute.googleapis.com\/instance\/cpu\/usage_time"
+      },
+      "resource": {
+        "type": "gce_instance",
+        "labels": {
+          "instance_id": "22241654114540837222",
+          "zone": "europe-west1-b",
+          "project_id": "grafana-prod"
+        }
+      },
+      "metricKind": "DELTA",
+      "valueType": "DOUBLE",
+      "points": [
+        {
+          "interval": {
+            "startTime": "2018-09-11T12:30:00Z",
+            "endTime": "2018-09-11T12:31:00Z"
+          },
+          "value": {
+            "doubleValue": 8.8210971239023
+          }
+        },
+        {
+          "interval": {
+            "startTime": "2018-09-11T12:29:00Z",
+            "endTime": "2018-09-11T12:30:00Z"
+          },
+          "value": {
+            "doubleValue": 8.9689492364414
+          }
+        },
+        {
+          "interval": {
+            "startTime": "2018-09-11T12:28:00Z",
+            "endTime": "2018-09-11T12:29:00Z"
+          },
+          "value": {
+            "doubleValue": 9.0238475054502
+          }
+        }
+      ]
+    },
+    {
+      "metric": {
+        "labels": {
+          "instance_name": "collector-us-east-1"
+        },
+        "type": "compute.googleapis.com\/instance\/cpu\/usage_time"
+      },
+      "resource": {
+        "type": "gce_instance",
+        "labels": {
+          "instance_id": "3332264424035095333",
+          "zone": "us-east1-b",
+          "project_id": "grafana-prod"
+        }
+      },
+      "metricKind": "DELTA",
+      "valueType": "DOUBLE",
+      "points": [
+        {
+          "interval": {
+            "startTime": "2018-09-11T12:30:00Z",
+            "endTime": "2018-09-11T12:31:00Z"
+          },
+          "value": {
+            "doubleValue": 30.807846801355
+          }
+        },
+        {
+          "interval": {
+            "startTime": "2018-09-11T12:29:00Z",
+            "endTime": "2018-09-11T12:30:00Z"
+          },
+          "value": {
+            "doubleValue": 30.903974115849
+          }
+        },
+        {
+          "interval": {
+            "startTime": "2018-09-11T12:28:00Z",
+            "endTime": "2018-09-11T12:29:00Z"
+          },
+          "value": {
+            "doubleValue": 30.829426143318
+          }
+        }
+      ]
+    }
+  ]
+}

+ 43 - 0
pkg/tsdb/stackdriver/types.go

@@ -0,0 +1,43 @@
+package stackdriver
+
+import (
+	"net/url"
+	"time"
+)
+
+// StackdriverQuery is the query that Grafana sends from the frontend
+type StackdriverQuery struct {
+	Target   string
+	Params   url.Values
+	RefID    string
+	GroupBys []string
+	AliasBy  string
+}
+
+// StackdriverResponse is the data returned from the external Google Stackdriver API
+type StackdriverResponse struct {
+	TimeSeries []struct {
+		Metric struct {
+			Labels map[string]string `json:"labels"`
+			Type   string            `json:"type"`
+		} `json:"metric"`
+		Resource struct {
+			Type   string            `json:"type"`
+			Labels map[string]string `json:"labels"`
+		} `json:"resource"`
+		MetricKind string `json:"metricKind"`
+		ValueType  string `json:"valueType"`
+		Points     []struct {
+			Interval struct {
+				StartTime time.Time `json:"startTime"`
+				EndTime   time.Time `json:"endTime"`
+			} `json:"interval"`
+			Value struct {
+				DoubleValue float64 `json:"doubleValue"`
+				StringValue string  `json:"stringValue"`
+				BoolValue   bool    `json:"boolValue"`
+				IntValue    string  `json:"int64Value"`
+			} `json:"value"`
+		} `json:"points"`
+	} `json:"timeSeries"`
+}

+ 39 - 0
public/app/core/components/LayoutSelector/LayoutSelector.tsx

@@ -0,0 +1,39 @@
+import React, { SFC } from 'react';
+
+export type LayoutMode = LayoutModes.Grid | LayoutModes.List;
+
+export enum LayoutModes {
+  Grid = 'grid',
+  List = 'list',
+}
+
+interface Props {
+  mode: LayoutMode;
+  onLayoutModeChanged: (mode: LayoutMode) => {};
+}
+
+const LayoutSelector: SFC<Props> = props => {
+  const { mode, onLayoutModeChanged } = props;
+  return (
+    <div className="layout-selector">
+      <button
+        onClick={() => {
+          onLayoutModeChanged(LayoutModes.List);
+        }}
+        className={mode === LayoutModes.List ? 'active' : ''}
+      >
+        <i className="fa fa-list" />
+      </button>
+      <button
+        onClick={() => {
+          onLayoutModeChanged(LayoutModes.Grid);
+        }}
+        className={mode === LayoutModes.Grid ? 'active' : ''}
+      >
+        <i className="fa fa-th" />
+      </button>
+    </div>
+  );
+};
+
+export default LayoutSelector;

部分文件因为文件数量过多而无法显示