Browse Source

Merge branch 'master' of github.com:grafana/grafana

Torkel Ödegaard 8 years ago
parent
commit
a081f90403
100 changed files with 6309 additions and 3005 deletions
  1. 19 2
      CHANGELOG.md
  2. 5 1
      conf/defaults.ini
  3. 5 1
      conf/sample.ini
  4. 15 0
      docs/sources/administration/metrics.md
  5. 12 1
      docs/sources/installation/configuration.md
  6. 5 3
      docs/sources/installation/debian.md
  7. 8 7
      docs/sources/installation/rpm.md
  8. 3 3
      docs/sources/installation/upgrading.md
  9. 1 1
      docs/sources/installation/windows.md
  10. 1 1
      packaging/publish/publish_both.sh
  11. 1 1
      pkg/api/admin_users.go
  12. 142 141
      pkg/api/api.go
  13. 2 2
      pkg/api/cloudwatch/cloudwatch.go
  14. 1 1
      pkg/api/cloudwatch/metrics.go
  15. 3 3
      pkg/api/dashboard_snapshot.go
  16. 11 1
      pkg/api/http_server.go
  17. 1 1
      pkg/api/login.go
  18. 1 1
      pkg/api/login_oauth.go
  19. 0 55
      pkg/api/metrics.go
  20. 1 1
      pkg/api/org.go
  21. 2 2
      pkg/api/org_invite.go
  22. 115 0
      pkg/api/route_register.go
  23. 185 0
      pkg/api/route_register_test.go
  24. 2 2
      pkg/api/signup.go
  25. 1 1
      pkg/cmd/grafana-server/server.go
  26. 88 0
      pkg/components/imguploader/gcsuploader.go
  27. 24 0
      pkg/components/imguploader/gcsuploader_test.go
  28. 13 2
      pkg/components/imguploader/imguploader.go
  29. 23 0
      pkg/components/imguploader/imguploader_test.go
  30. 2 1
      pkg/components/imguploader/s3uploader.go
  31. 2 1
      pkg/components/imguploader/s3uploader_test.go
  32. 2 1
      pkg/components/imguploader/webdavuploader.go
  33. 3 2
      pkg/components/imguploader/webdavuploader_test.go
  34. 0 122
      pkg/metrics/EMWA.go
  35. 0 46
      pkg/metrics/combos.go
  36. 0 61
      pkg/metrics/common.go
  37. 0 61
      pkg/metrics/counter.go
  38. 0 11
      pkg/metrics/delta.go
  39. 0 83
      pkg/metrics/gauge.go
  40. 0 107
      pkg/metrics/graphite.go
  41. 0 77
      pkg/metrics/graphite_test.go
  42. 396 0
      pkg/metrics/graphitebridge/graphite.go
  43. 503 0
      pkg/metrics/graphitebridge/graphite_test.go
  44. 0 189
      pkg/metrics/histogram.go
  45. 0 90
      pkg/metrics/histogram_test.go
  46. 38 0
      pkg/metrics/init.go
  47. 0 221
      pkg/metrics/meter.go
  48. 383 131
      pkg/metrics/metrics.go
  49. 0 135
      pkg/metrics/publish.go
  50. 0 37
      pkg/metrics/registry.go
  51. 0 607
      pkg/metrics/sample.go
  52. 0 367
      pkg/metrics/sample_test.go
  53. 53 17
      pkg/metrics/settings.go
  54. 0 310
      pkg/metrics/timer.go
  55. 3 3
      pkg/middleware/logger.go
  56. 2 2
      pkg/middleware/middleware.go
  57. 149 13
      pkg/middleware/request_metrics.go
  58. 25 13
      pkg/models/datasource.go
  59. 2 2
      pkg/services/alerting/eval_handler.go
  60. 4 1
      pkg/services/alerting/notifier.go
  61. 0 2
      pkg/services/alerting/notifiers/dingding.go
  62. 0 2
      pkg/services/alerting/notifiers/email.go
  63. 2 3
      pkg/services/alerting/notifiers/line.go
  64. 0 2
      pkg/services/alerting/notifiers/opsgenie.go
  65. 0 2
      pkg/services/alerting/notifiers/pagerduty.go
  66. 0 2
      pkg/services/alerting/notifiers/pushover.go
  67. 0 2
      pkg/services/alerting/notifiers/sensu.go
  68. 0 2
      pkg/services/alerting/notifiers/slack.go
  69. 0 2
      pkg/services/alerting/notifiers/telegram.go
  70. 0 2
      pkg/services/alerting/notifiers/threema.go
  71. 0 2
      pkg/services/alerting/notifiers/victorops.go
  72. 0 2
      pkg/services/alerting/notifiers/webhook.go
  73. 1 1
      pkg/services/alerting/reader.go
  74. 1 16
      pkg/services/alerting/result_handler.go
  75. 1 1
      pkg/services/sqlstore/dashboard.go
  76. 1 1
      pkg/services/sqlstore/datasource.go
  77. 17 14
      pkg/tsdb/prometheus/prometheus.go
  78. 7 0
      public/app/core/utils/kbn.js
  79. 3 0
      public/app/features/dashboard/model.ts
  80. 0 1
      public/app/plugins/datasource/graphite/query_ctrl.ts
  81. 106 0
      public/app/plugins/datasource/mysql/mode-sql.js
  82. 1 1
      public/app/plugins/datasource/mysql/partials/query.editor.html
  83. 202 0
      vendor/cloud.google.com/go/LICENSE
  84. 437 0
      vendor/cloud.google.com/go/compute/metadata/metadata.go
  85. 256 0
      vendor/cloud.google.com/go/iam/iam.go
  86. 94 0
      vendor/cloud.google.com/go/internal/optional/optional.go
  87. 56 0
      vendor/cloud.google.com/go/internal/retry.go
  88. 6 0
      vendor/cloud.google.com/go/internal/version/update_version.sh
  89. 71 0
      vendor/cloud.google.com/go/internal/version/version.go
  90. 252 0
      vendor/cloud.google.com/go/storage/acl.go
  91. 590 0
      vendor/cloud.google.com/go/storage/bucket.go
  92. 201 0
      vendor/cloud.google.com/go/storage/copy.go
  93. 161 0
      vendor/cloud.google.com/go/storage/doc.go
  94. 26 0
      vendor/cloud.google.com/go/storage/go17.go
  95. 108 0
      vendor/cloud.google.com/go/storage/iam.go
  96. 43 0
      vendor/cloud.google.com/go/storage/invoke.go
  97. 26 0
      vendor/cloud.google.com/go/storage/not_go17.go
  98. 74 0
      vendor/cloud.google.com/go/storage/reader.go
  99. 1117 0
      vendor/cloud.google.com/go/storage/storage.go
  100. 192 0
      vendor/cloud.google.com/go/storage/writer.go

+ 19 - 2
CHANGELOG.md

@@ -7,11 +7,28 @@
 - UX changes to nav & side menu
 - UX changes to nav & side menu
 - New dashboard grid layout system
 - New dashboard grid layout system
 
 
-# 4.5.0 (unreleased)
+# 4.6.0 (unreleased)
 
 
-## Enhancements
+## New Features
+* **GCS**: Adds support for Google Cloud Storage [#8370](https://github.com/grafana/grafana/issues/8370) thx [@chuhlomin](https://github.com/chuhlomin)
+* **Prometheus**: Adds /metrics endpoint for exposing Grafana metrics. [#9187](https://github.com/grafana/grafana/pull/9187)
+* **Graph**: Add support for local formating in axis. [#1395](https://github.com/grafana/grafana/issues/1395), thx [@m0nhawk](https://github.com/m0nhawk)
+
+## Breaking changes
+* **Metrics**: The metric structure for internal metrics about Grafana published to graphite has changed. This might break dashboards for internal metrics. 
+
+# 4.5.1 (2017-09-15)
+
+## Fixes
+* **MySQL**: Fixed issue with query editor not showing [#9247](https://github.com/grafana/grafana/issues/9247)
+
+# 4.5.0 (2017-09-14)
 
 
+## Fixes & Enhancements since beta1
+* **Security**: Security fix for api vulnerability (in multiple org setups).
 * **Shortcuts**: Adds shortcut for creating new dashboard [#8876](https://github.com/grafana/grafana/pull/8876) thx [@mtanda](https://github.com/mtanda)
 * **Shortcuts**: Adds shortcut for creating new dashboard [#8876](https://github.com/grafana/grafana/pull/8876) thx [@mtanda](https://github.com/mtanda)
+* **Graph**: Right Y-Axis label position fixed [#9172](https://github.com/grafana/grafana/pull/9172)
+* **General**: Improve rounding of time intervals [#9197](https://github.com/grafana/grafana/pull/9197), thx [@alin-amana](https://github.com/alin-amana)
 
 
 # 4.5.0-beta1 (2017-09-05)
 # 4.5.0-beta1 (2017-09-05)
 
 

+ 5 - 1
conf/defaults.ini

@@ -454,7 +454,7 @@ url = https://grafana.com
 
 
 #################################### External Image Storage ##############
 #################################### External Image Storage ##############
 [external_image_storage]
 [external_image_storage]
-# You can choose between (s3, webdav)
+# You can choose between (s3, webdav, gcs)
 provider =
 provider =
 
 
 [external_image_storage.s3]
 [external_image_storage.s3]
@@ -467,3 +467,7 @@ url =
 username =
 username =
 password =
 password =
 public_url =
 public_url =
+
+[external_image_storage.gcs]
+key_file =
+bucket =

+ 5 - 1
conf/sample.ini

@@ -399,7 +399,7 @@
 #################################### External image storage ##########################
 #################################### External image storage ##########################
 [external_image_storage]
 [external_image_storage]
 # Used for uploading images to public servers so they can be included in slack/email messages.
 # Used for uploading images to public servers so they can be included in slack/email messages.
-# you can choose between (s3, webdav)
+# you can choose between (s3, webdav, gcs)
 ;provider =
 ;provider =
 
 
 [external_image_storage.s3]
 [external_image_storage.s3]
@@ -412,3 +412,7 @@
 ;public_url =
 ;public_url =
 ;username =
 ;username =
 ;password =
 ;password =
+
+[external_image_storage.gcs]
+;key_file =
+;bucket =

+ 15 - 0
docs/sources/administration/metrics.md

@@ -0,0 +1,15 @@
++++
+title = "Internal metrics"
+description = "Internal metrics exposed by Grafana"
+keywords = ["grafana", "metrics", "internal metrics"]
+type = "docs"
+[menu.docs]
+parent = "admin"
+weight = 8
++++
+
+# Internal metrics
+
+Grafana collects some metrics about it self internally. Currently Grafana supports pushing metrics to graphite and exposing them to be scraped by Prometheus.
+
+To enabled internal metrics you have to enable it under the [metrics] section in your [grafana.ini](http://docs.grafana.org/installation/configuration/#enabled-6) config file.If you want to push metrics to graphite you have also have to configure the [metrics.graphite](http://docs.grafana.org/installation/configuration/#metrics-graphite) section.

+ 12 - 1
docs/sources/installation/configuration.md

@@ -645,7 +645,7 @@ Time to live for snapshots.
 These options control how images should be made public so they can be shared on services like slack.
 These options control how images should be made public so they can be shared on services like slack.
 
 
 ### provider
 ### provider
-You can choose between (s3, webdav). If left empty Grafana will ignore the upload action.
+You can choose between (s3, webdav, gcs). If left empty Grafana will ignore the upload action.
 
 
 ## [external_image_storage.s3]
 ## [external_image_storage.s3]
 
 
@@ -677,6 +677,17 @@ basic auth username
 ### password
 ### password
 basic auth password
 basic auth password
 
 
+## [external_image_storage.gcs]
+
+### key_file
+Path to JSON key file associated with a Google service account to authenticate and authorize.
+Service Account keys can be created and downloaded from https://console.developers.google.com/permissions/serviceaccounts.
+
+Service Account should have "Storage Object Writer" role.
+
+### bucket name
+Bucket Name on Google Cloud Storage. 
+
 ## [alerting]
 ## [alerting]
 
 
 ### enabled
 ### enabled

+ 5 - 3
docs/sources/installation/debian.md

@@ -15,7 +15,9 @@ weight = 1
 
 
 Description | Download
 Description | Download
 ------------ | -------------
 ------------ | -------------
-Stable for Debian-based Linux | [grafana_4.5.0_amd64.deb](https://s3-us-west-2.amazonaws.com/grafana-releases/release/grafana_4.5.0_amd64.deb)
+Stable for Debian-based Linux | [grafana_4.5.1_amd64.deb](https://s3-us-west-2.amazonaws.com/grafana-releases/release/grafana_4.5.1_amd64.deb)
+
+<!-- Beta for Debian-based Linux | [grafana_4.5.0-beta1_amd64.deb](https://s3-us-west-2.amazonaws.com/grafana-releases/release/grafana_4.5.0-beta1_amd64.deb) -->
 
 
 Read [Upgrading Grafana]({{< relref "installation/upgrading.md" >}}) for tips and guidance on updating an existing
 Read [Upgrading Grafana]({{< relref "installation/upgrading.md" >}}) for tips and guidance on updating an existing
 installation.
 installation.
@@ -24,9 +26,9 @@ installation.
 
 
 
 
 ```bash
 ```bash
-wget https://s3-us-west-2.amazonaws.com/grafana-releases/release/grafana_4.5.0_amd64.deb
+wget https://s3-us-west-2.amazonaws.com/grafana-releases/release/grafana_4.5.1_amd64.deb
 sudo apt-get install -y adduser libfontconfig
 sudo apt-get install -y adduser libfontconfig
-sudo dpkg -i grafana_4.4.3_amd64.deb
+sudo dpkg -i grafana_4.5.1_amd64.deb
 ```
 ```
 
 
 <!--
 <!--

+ 8 - 7
docs/sources/installation/rpm.md

@@ -15,7 +15,9 @@ weight = 2
 
 
 Description | Download
 Description | Download
 ------------ | -------------
 ------------ | -------------
-Stable for CentOS / Fedora / OpenSuse / Redhat Linux | [4.5.0 (x86-64 rpm)](https://s3-us-west-2.amazonaws.com/grafana-releases/release/grafana-4.5.0-1.x86_64.rpm)
+Stable for CentOS / Fedora / OpenSuse / Redhat Linux | [4.5.1 (x86-64 rpm)](https://s3-us-west-2.amazonaws.com/grafana-releases/release/grafana-4.5.1-1.x86_64.rpm)
+
+<!-- Latest Beta for CentOS / Fedora / OpenSuse / Redhat Linux | [4.5.0-beta1 (x86-64 rpm)](https://s3-us-west-2.amazonaws.com/grafana-releases/release/grafana-4.5.0-beta1.x86_64.rpm) -->
 
 
 Read [Upgrading Grafana]({{< relref "installation/upgrading.md" >}}) for tips and guidance on updating an existing
 Read [Upgrading Grafana]({{< relref "installation/upgrading.md" >}}) for tips and guidance on updating an existing
 installation.
 installation.
@@ -24,19 +26,19 @@ installation.
 
 
 You can install Grafana using Yum directly.
 You can install Grafana using Yum directly.
 
 
-    $ sudo yum install https://s3-us-west-2.amazonaws.com/grafana-releases/release/grafana-4.5.0-1.x86_64.rpm
+    $ sudo yum install https://s3-us-west-2.amazonaws.com/grafana-releases/release/grafana-4.5.1-1.x86_64.rpm
 
 
 Or install manually using `rpm`.
 Or install manually using `rpm`.
 
 
 #### On CentOS / Fedora / Redhat:
 #### On CentOS / Fedora / Redhat:
 
 
-    $ wget https://s3-us-west-2.amazonaws.com/grafana-releases/release/grafana-4.5.0-1.x86_64.rpm
+    $ wget https://s3-us-west-2.amazonaws.com/grafana-releases/release/grafana-4.5.1-1.x86_64.rpm
     $ sudo yum install initscripts fontconfig
     $ sudo yum install initscripts fontconfig
-    $ sudo rpm -Uvh grafana-4.5.0-1.x86_64.rpm
+    $ sudo rpm -Uvh grafana-4.5.1-1.x86_64.rpm
 
 
 #### On OpenSuse:
 #### On OpenSuse:
 
 
-    $ sudo rpm -i --nodeps grafana-4.5.0-1.x86_64.rpm
+    $ sudo rpm -i --nodeps grafana-4.5.1-1.x86_64.rpm
 
 
 ## Install via YUM Repository
 ## Install via YUM Repository
 
 
@@ -52,8 +54,7 @@ Add the following to a new file at `/etc/yum.repos.d/grafana.repo`
     sslverify=1
     sslverify=1
     sslcacert=/etc/pki/tls/certs/ca-bundle.crt
     sslcacert=/etc/pki/tls/certs/ca-bundle.crt
 
 
-There is also a testing repository if you want beta or release
-candidates.
+There is also a testing repository if you want beta or release candidates.
 
 
     baseurl=https://packagecloud.io/grafana/testing/el/6/$basearch
     baseurl=https://packagecloud.io/grafana/testing/el/6/$basearch
 
 

+ 3 - 3
docs/sources/installation/upgrading.md

@@ -94,10 +94,10 @@ to the same location (and overwrite the existing files). This might overwrite yo
 recommend you place your config changes in a file named  `<grafana_install_dir>/conf/custom.ini`
 recommend you place your config changes in a file named  `<grafana_install_dir>/conf/custom.ini`
 as this will make upgrades easier without risking losing your config changes.
 as this will make upgrades easier without risking losing your config changes.
 
 
-## Upgrading form 1.x
+## Upgrading from 1.x
 
 
 [Migrating from 1.x to 2.x]({{< relref "installation/migrating_to2.md" >}})
 [Migrating from 1.x to 2.x]({{< relref "installation/migrating_to2.md" >}})
 
 
-## Upgrading form 2.x
+## Upgrading from 2.x
 
 
-We are not aware of any issues upgrading directly from 2.x to 4.x but to on the safe side go via 3.x.
+We are not aware of any issues upgrading directly from 2.x to 4.x but to be on the safe side go via 3.x => 4.x.

+ 1 - 1
docs/sources/installation/windows.md

@@ -13,7 +13,7 @@ weight = 3
 
 
 Description | Download
 Description | Download
 ------------ | -------------
 ------------ | -------------
-Latest stable package for Windows | [grafana.4.5.0.windows-x64.zip](https://s3-us-west-2.amazonaws.com/grafana-releases/release/grafana-4.5.0.windows-x64.zip)
+Latest stable package for Windows | [grafana.4.5.1.windows-x64.zip](https://s3-us-west-2.amazonaws.com/grafana-releases/release/grafana-4.5.1.windows-x64.zip)
 
 
 Read [Upgrading Grafana]({{< relref "installation/upgrading.md" >}}) for tips and guidance on updating an existing
 Read [Upgrading Grafana]({{< relref "installation/upgrading.md" >}}) for tips and guidance on updating an existing
 installation.
 installation.

+ 1 - 1
packaging/publish/publish_both.sh

@@ -1,5 +1,5 @@
 #! /usr/bin/env bash
 #! /usr/bin/env bash
-version=4.4.2
+version=4.5.1
 
 
 wget https://s3-us-west-2.amazonaws.com/grafana-releases/release/grafana_${version}_amd64.deb
 wget https://s3-us-west-2.amazonaws.com/grafana-releases/release/grafana_${version}_amd64.deb
 
 

+ 1 - 1
pkg/api/admin_users.go

@@ -35,7 +35,7 @@ func AdminCreateUser(c *middleware.Context, form dtos.AdminCreateUserForm) {
 		return
 		return
 	}
 	}
 
 
-	metrics.M_Api_Admin_User_Create.Inc(1)
+	metrics.M_Api_Admin_User_Create.Inc()
 
 
 	user := cmd.Result
 	user := cmd.Result
 
 

+ 142 - 141
pkg/api/api.go

@@ -10,7 +10,7 @@ import (
 
 
 // Register adds http routes
 // Register adds http routes
 func (hs *HttpServer) registerRoutes() {
 func (hs *HttpServer) registerRoutes() {
-	r := hs.macaron
+	macaronR := hs.macaron
 	reqSignedIn := middleware.Auth(&middleware.AuthOptions{ReqSignedIn: true})
 	reqSignedIn := middleware.Auth(&middleware.AuthOptions{ReqSignedIn: true})
 	reqGrafanaAdmin := middleware.Auth(&middleware.AuthOptions{ReqSignedIn: true, ReqGrafanaAdmin: true})
 	reqGrafanaAdmin := middleware.Auth(&middleware.AuthOptions{ReqSignedIn: true, ReqGrafanaAdmin: true})
 	reqEditorRole := middleware.RoleAuth(m.ROLE_EDITOR, m.ROLE_ADMIN)
 	reqEditorRole := middleware.RoleAuth(m.ROLE_EDITOR, m.ROLE_ADMIN)
@@ -19,7 +19,9 @@ func (hs *HttpServer) registerRoutes() {
 	bind := binding.Bind
 	bind := binding.Bind
 
 
 	// automatically set HEAD for every GET
 	// automatically set HEAD for every GET
-	r.SetAutoHead(true)
+	macaronR.SetAutoHead(true)
+
+	r := newRouteRegister(middleware.RequestMetrics)
 
 
 	// not logged in views
 	// not logged in views
 	r.Get("/", reqSignedIn, Index)
 	r.Get("/", reqSignedIn, Index)
@@ -98,198 +100,195 @@ func (hs *HttpServer) registerRoutes() {
 	r.Get("/api/login/ping", quota("session"), LoginApiPing)
 	r.Get("/api/login/ping", quota("session"), LoginApiPing)
 
 
 	// authed api
 	// authed api
-	r.Group("/api", func() {
+	r.Group("/api", func(apiRoute RouteRegister) {
 
 
 		// user (signed in)
 		// user (signed in)
-		r.Group("/user", func() {
-			r.Get("/", wrap(GetSignedInUser))
-			r.Put("/", bind(m.UpdateUserCommand{}), wrap(UpdateSignedInUser))
-			r.Post("/using/:id", wrap(UserSetUsingOrg))
-			r.Get("/orgs", wrap(GetSignedInUserOrgList))
-
-			r.Post("/stars/dashboard/:id", wrap(StarDashboard))
-			r.Delete("/stars/dashboard/:id", wrap(UnstarDashboard))
-
-			r.Put("/password", bind(m.ChangeUserPasswordCommand{}), wrap(ChangeUserPassword))
-			r.Get("/quotas", wrap(GetUserQuotas))
-			r.Put("/helpflags/:id", wrap(SetHelpFlag))
+		apiRoute.Group("/user", func(userRoute RouteRegister) {
+			userRoute.Get("/", wrap(GetSignedInUser))
+			userRoute.Put("/", bind(m.UpdateUserCommand{}), wrap(UpdateSignedInUser))
+			userRoute.Post("/using/:id", wrap(UserSetUsingOrg))
+			userRoute.Get("/orgs", wrap(GetSignedInUserOrgList))
+
+			userRoute.Post("/stars/dashboard/:id", wrap(StarDashboard))
+			userRoute.Delete("/stars/dashboard/:id", wrap(UnstarDashboard))
+
+			userRoute.Put("/password", bind(m.ChangeUserPasswordCommand{}), wrap(ChangeUserPassword))
+			userRoute.Get("/quotas", wrap(GetUserQuotas))
+			userRoute.Put("/helpflags/:id", wrap(SetHelpFlag))
 			// For dev purpose
 			// For dev purpose
-			r.Get("/helpflags/clear", wrap(ClearHelpFlags))
+			userRoute.Get("/helpflags/clear", wrap(ClearHelpFlags))
 
 
-			r.Get("/preferences", wrap(GetUserPreferences))
-			r.Put("/preferences", bind(dtos.UpdatePrefsCmd{}), wrap(UpdateUserPreferences))
+			userRoute.Get("/preferences", wrap(GetUserPreferences))
+			userRoute.Put("/preferences", bind(dtos.UpdatePrefsCmd{}), wrap(UpdateUserPreferences))
 		})
 		})
 
 
 		// users (admin permission required)
 		// users (admin permission required)
-		r.Group("/users", func() {
-			r.Get("/", wrap(SearchUsers))
-			r.Get("/search", wrap(SearchUsersWithPaging))
-			r.Get("/:id", wrap(GetUserById))
-			r.Get("/:id/orgs", wrap(GetUserOrgList))
+		apiRoute.Group("/users", func(usersRoute RouteRegister) {
+			usersRoute.Get("/", wrap(SearchUsers))
+			usersRoute.Get("/search", wrap(SearchUsersWithPaging))
+			usersRoute.Get("/:id", wrap(GetUserById))
+			usersRoute.Get("/:id/orgs", wrap(GetUserOrgList))
 			// query parameters /users/lookup?loginOrEmail=admin@example.com
 			// query parameters /users/lookup?loginOrEmail=admin@example.com
-			r.Get("/lookup", wrap(GetUserByLoginOrEmail))
-			r.Put("/:id", bind(m.UpdateUserCommand{}), wrap(UpdateUser))
-			r.Post("/:id/using/:orgId", wrap(UpdateUserActiveOrg))
+			usersRoute.Get("/lookup", wrap(GetUserByLoginOrEmail))
+			usersRoute.Put("/:id", bind(m.UpdateUserCommand{}), wrap(UpdateUser))
+			usersRoute.Post("/:id/using/:orgId", wrap(UpdateUserActiveOrg))
 		}, reqGrafanaAdmin)
 		}, reqGrafanaAdmin)
 
 
 		// org information available to all users.
 		// org information available to all users.
-		r.Group("/org", func() {
-			r.Get("/", wrap(GetOrgCurrent))
-			r.Get("/quotas", wrap(GetOrgQuotas))
+		apiRoute.Group("/org", func(orgRoute RouteRegister) {
+			orgRoute.Get("/", wrap(GetOrgCurrent))
+			orgRoute.Get("/quotas", wrap(GetOrgQuotas))
 		})
 		})
 
 
 		// current org
 		// current org
-		r.Group("/org", func() {
-			r.Put("/", bind(dtos.UpdateOrgForm{}), wrap(UpdateOrgCurrent))
-			r.Put("/address", bind(dtos.UpdateOrgAddressForm{}), wrap(UpdateOrgAddressCurrent))
-			r.Post("/users", quota("user"), bind(m.AddOrgUserCommand{}), wrap(AddOrgUserToCurrentOrg))
-			r.Get("/users", wrap(GetOrgUsersForCurrentOrg))
-			r.Patch("/users/:userId", bind(m.UpdateOrgUserCommand{}), wrap(UpdateOrgUserForCurrentOrg))
-			r.Delete("/users/:userId", wrap(RemoveOrgUserForCurrentOrg))
+		apiRoute.Group("/org", func(orgRoute RouteRegister) {
+			orgRoute.Put("/", bind(dtos.UpdateOrgForm{}), wrap(UpdateOrgCurrent))
+			orgRoute.Put("/address", bind(dtos.UpdateOrgAddressForm{}), wrap(UpdateOrgAddressCurrent))
+			orgRoute.Post("/users", quota("user"), bind(m.AddOrgUserCommand{}), wrap(AddOrgUserToCurrentOrg))
+			orgRoute.Get("/users", wrap(GetOrgUsersForCurrentOrg))
+			orgRoute.Patch("/users/:userId", bind(m.UpdateOrgUserCommand{}), wrap(UpdateOrgUserForCurrentOrg))
+			orgRoute.Delete("/users/:userId", wrap(RemoveOrgUserForCurrentOrg))
 
 
 			// invites
 			// invites
-			r.Get("/invites", wrap(GetPendingOrgInvites))
-			r.Post("/invites", quota("user"), bind(dtos.AddInviteForm{}), wrap(AddOrgInvite))
-			r.Patch("/invites/:code/revoke", wrap(RevokeInvite))
+			orgRoute.Get("/invites", wrap(GetPendingOrgInvites))
+			orgRoute.Post("/invites", quota("user"), bind(dtos.AddInviteForm{}), wrap(AddOrgInvite))
+			orgRoute.Patch("/invites/:code/revoke", wrap(RevokeInvite))
 
 
 			// prefs
 			// prefs
-			r.Get("/preferences", wrap(GetOrgPreferences))
-			r.Put("/preferences", bind(dtos.UpdatePrefsCmd{}), wrap(UpdateOrgPreferences))
+			orgRoute.Get("/preferences", wrap(GetOrgPreferences))
+			orgRoute.Put("/preferences", bind(dtos.UpdatePrefsCmd{}), wrap(UpdateOrgPreferences))
 		}, reqOrgAdmin)
 		}, reqOrgAdmin)
 
 
 		// create new org
 		// create new org
-		r.Post("/orgs", quota("org"), bind(m.CreateOrgCommand{}), wrap(CreateOrg))
+		apiRoute.Post("/orgs", quota("org"), bind(m.CreateOrgCommand{}), wrap(CreateOrg))
 
 
 		// search all orgs
 		// search all orgs
-		r.Get("/orgs", reqGrafanaAdmin, wrap(SearchOrgs))
+		apiRoute.Get("/orgs", reqGrafanaAdmin, wrap(SearchOrgs))
 
 
 		// orgs (admin routes)
 		// orgs (admin routes)
-		r.Group("/orgs/:orgId", func() {
-			r.Get("/", wrap(GetOrgById))
-			r.Put("/", bind(dtos.UpdateOrgForm{}), wrap(UpdateOrg))
-			r.Put("/address", bind(dtos.UpdateOrgAddressForm{}), wrap(UpdateOrgAddress))
-			r.Delete("/", wrap(DeleteOrgById))
-			r.Get("/users", wrap(GetOrgUsers))
-			r.Post("/users", bind(m.AddOrgUserCommand{}), wrap(AddOrgUser))
-			r.Patch("/users/:userId", bind(m.UpdateOrgUserCommand{}), wrap(UpdateOrgUser))
-			r.Delete("/users/:userId", wrap(RemoveOrgUser))
-			r.Get("/quotas", wrap(GetOrgQuotas))
-			r.Put("/quotas/:target", bind(m.UpdateOrgQuotaCmd{}), wrap(UpdateOrgQuota))
+		apiRoute.Group("/orgs/:orgId", func(orgsRoute RouteRegister) {
+			orgsRoute.Get("/", wrap(GetOrgById))
+			orgsRoute.Put("/", bind(dtos.UpdateOrgForm{}), wrap(UpdateOrg))
+			orgsRoute.Put("/address", bind(dtos.UpdateOrgAddressForm{}), wrap(UpdateOrgAddress))
+			orgsRoute.Delete("/", wrap(DeleteOrgById))
+			orgsRoute.Get("/users", wrap(GetOrgUsers))
+			orgsRoute.Post("/users", bind(m.AddOrgUserCommand{}), wrap(AddOrgUser))
+			orgsRoute.Patch("/users/:userId", bind(m.UpdateOrgUserCommand{}), wrap(UpdateOrgUser))
+			orgsRoute.Delete("/users/:userId", wrap(RemoveOrgUser))
+			orgsRoute.Get("/quotas", wrap(GetOrgQuotas))
+			orgsRoute.Put("/quotas/:target", bind(m.UpdateOrgQuotaCmd{}), wrap(UpdateOrgQuota))
 		}, reqGrafanaAdmin)
 		}, reqGrafanaAdmin)
 
 
 		// orgs (admin routes)
 		// orgs (admin routes)
-		r.Group("/orgs/name/:name", func() {
-			r.Get("/", wrap(GetOrgByName))
+		apiRoute.Group("/orgs/name/:name", func(orgsRoute RouteRegister) {
+			orgsRoute.Get("/", wrap(GetOrgByName))
 		}, reqGrafanaAdmin)
 		}, reqGrafanaAdmin)
 
 
 		// auth api keys
 		// auth api keys
-		r.Group("/auth/keys", func() {
-			r.Get("/", wrap(GetApiKeys))
-			r.Post("/", quota("api_key"), bind(m.AddApiKeyCommand{}), wrap(AddApiKey))
-			r.Delete("/:id", wrap(DeleteApiKey))
+		apiRoute.Group("/auth/keys", func(keysRoute RouteRegister) {
+			keysRoute.Get("/", wrap(GetApiKeys))
+			keysRoute.Post("/", quota("api_key"), bind(m.AddApiKeyCommand{}), wrap(AddApiKey))
+			keysRoute.Delete("/:id", wrap(DeleteApiKey))
 		}, reqOrgAdmin)
 		}, reqOrgAdmin)
 
 
 		// Preferences
 		// Preferences
-		r.Group("/preferences", func() {
-			r.Post("/set-home-dash", bind(m.SavePreferencesCommand{}), wrap(SetHomeDashboard))
+		apiRoute.Group("/preferences", func(prefRoute RouteRegister) {
+			prefRoute.Post("/set-home-dash", bind(m.SavePreferencesCommand{}), wrap(SetHomeDashboard))
 		})
 		})
 
 
 		// Data sources
 		// Data sources
-		r.Group("/datasources", func() {
-			r.Get("/", wrap(GetDataSources))
-			r.Post("/", quota("data_source"), bind(m.AddDataSourceCommand{}), AddDataSource)
-			r.Put("/:id", bind(m.UpdateDataSourceCommand{}), wrap(UpdateDataSource))
-			r.Delete("/:id", DeleteDataSourceById)
-			r.Delete("/name/:name", DeleteDataSourceByName)
-			r.Get("/:id", wrap(GetDataSourceById))
-			r.Get("/name/:name", wrap(GetDataSourceByName))
+		apiRoute.Group("/datasources", func(datasourceRoute RouteRegister) {
+			datasourceRoute.Get("/", wrap(GetDataSources))
+			datasourceRoute.Post("/", quota("data_source"), bind(m.AddDataSourceCommand{}), AddDataSource)
+			datasourceRoute.Put("/:id", bind(m.UpdateDataSourceCommand{}), wrap(UpdateDataSource))
+			datasourceRoute.Delete("/:id", DeleteDataSourceById)
+			datasourceRoute.Delete("/name/:name", DeleteDataSourceByName)
+			datasourceRoute.Get("/:id", wrap(GetDataSourceById))
+			datasourceRoute.Get("/name/:name", wrap(GetDataSourceByName))
 		}, reqOrgAdmin)
 		}, reqOrgAdmin)
 
 
-		r.Get("/datasources/id/:name", wrap(GetDataSourceIdByName), reqSignedIn)
+		apiRoute.Get("/datasources/id/:name", wrap(GetDataSourceIdByName), reqSignedIn)
 
 
-		r.Get("/plugins", wrap(GetPluginList))
-		r.Get("/plugins/:pluginId/settings", wrap(GetPluginSettingById))
-		r.Get("/plugins/:pluginId/markdown/:name", wrap(GetPluginMarkdown))
+		apiRoute.Get("/plugins", wrap(GetPluginList))
+		apiRoute.Get("/plugins/:pluginId/settings", wrap(GetPluginSettingById))
+		apiRoute.Get("/plugins/:pluginId/markdown/:name", wrap(GetPluginMarkdown))
 
 
-		r.Group("/plugins", func() {
-			r.Get("/:pluginId/dashboards/", wrap(GetPluginDashboards))
-			r.Post("/:pluginId/settings", bind(m.UpdatePluginSettingCmd{}), wrap(UpdatePluginSetting))
+		apiRoute.Group("/plugins", func(pluginRoute RouteRegister) {
+			pluginRoute.Get("/:pluginId/dashboards/", wrap(GetPluginDashboards))
+			pluginRoute.Post("/:pluginId/settings", bind(m.UpdatePluginSettingCmd{}), wrap(UpdatePluginSetting))
 		}, reqOrgAdmin)
 		}, reqOrgAdmin)
 
 
-		r.Get("/frontend/settings/", GetFrontendSettings)
-		r.Any("/datasources/proxy/:id/*", reqSignedIn, hs.ProxyDataSourceRequest)
-		r.Any("/datasources/proxy/:id", reqSignedIn, hs.ProxyDataSourceRequest)
+		apiRoute.Get("/frontend/settings/", GetFrontendSettings)
+		apiRoute.Any("/datasources/proxy/:id/*", reqSignedIn, hs.ProxyDataSourceRequest)
+		apiRoute.Any("/datasources/proxy/:id", reqSignedIn, hs.ProxyDataSourceRequest)
 
 
 		// Dashboard
 		// Dashboard
-		r.Group("/dashboards", func() {
-			r.Get("/db/:slug", GetDashboard)
-			r.Delete("/db/:slug", reqEditorRole, DeleteDashboard)
+		apiRoute.Group("/dashboards", func(dashboardRoute RouteRegister) {
+			dashboardRoute.Get("/db/:slug", GetDashboard)
+			dashboardRoute.Delete("/db/:slug", reqEditorRole, DeleteDashboard)
 
 
-			r.Get("/id/:dashboardId/versions", wrap(GetDashboardVersions))
-			r.Get("/id/:dashboardId/versions/:id", wrap(GetDashboardVersion))
-			r.Post("/id/:dashboardId/restore", reqEditorRole, bind(dtos.RestoreDashboardVersionCommand{}), wrap(RestoreDashboardVersion))
+			dashboardRoute.Get("/id/:dashboardId/versions", wrap(GetDashboardVersions))
+			dashboardRoute.Get("/id/:dashboardId/versions/:id", wrap(GetDashboardVersion))
+			dashboardRoute.Post("/id/:dashboardId/restore", reqEditorRole, bind(dtos.RestoreDashboardVersionCommand{}), wrap(RestoreDashboardVersion))
 
 
-			r.Post("/calculate-diff", bind(dtos.CalculateDiffOptions{}), wrap(CalculateDashboardDiff))
+			dashboardRoute.Post("/calculate-diff", bind(dtos.CalculateDiffOptions{}), wrap(CalculateDashboardDiff))
 
 
-			r.Post("/db", reqEditorRole, bind(m.SaveDashboardCommand{}), wrap(PostDashboard))
-			r.Get("/file/:file", GetDashboardFromJsonFile)
-			r.Get("/home", wrap(GetHomeDashboard))
-			r.Get("/tags", GetDashboardTags)
-			r.Post("/import", bind(dtos.ImportDashboardCommand{}), wrap(ImportDashboard))
+			dashboardRoute.Post("/db", reqEditorRole, bind(m.SaveDashboardCommand{}), wrap(PostDashboard))
+			dashboardRoute.Get("/file/:file", GetDashboardFromJsonFile)
+			dashboardRoute.Get("/home", wrap(GetHomeDashboard))
+			dashboardRoute.Get("/tags", GetDashboardTags)
+			dashboardRoute.Post("/import", bind(dtos.ImportDashboardCommand{}), wrap(ImportDashboard))
 		})
 		})
 
 
 		// Dashboard snapshots
 		// Dashboard snapshots
-		r.Group("/dashboard/snapshots", func() {
-			r.Get("/", wrap(SearchDashboardSnapshots))
+		apiRoute.Group("/dashboard/snapshots", func(dashboardRoute RouteRegister) {
+			dashboardRoute.Get("/", wrap(SearchDashboardSnapshots))
 		})
 		})
 
 
 		// Playlist
 		// Playlist
-		r.Group("/playlists", func() {
-			r.Get("/", wrap(SearchPlaylists))
-			r.Get("/:id", ValidateOrgPlaylist, wrap(GetPlaylist))
-			r.Get("/:id/items", ValidateOrgPlaylist, wrap(GetPlaylistItems))
-			r.Get("/:id/dashboards", ValidateOrgPlaylist, wrap(GetPlaylistDashboards))
-			r.Delete("/:id", reqEditorRole, ValidateOrgPlaylist, wrap(DeletePlaylist))
-			r.Put("/:id", reqEditorRole, bind(m.UpdatePlaylistCommand{}), ValidateOrgPlaylist, wrap(UpdatePlaylist))
-			r.Post("/", reqEditorRole, bind(m.CreatePlaylistCommand{}), wrap(CreatePlaylist))
+		apiRoute.Group("/playlists", func(playlistRoute RouteRegister) {
+			playlistRoute.Get("/", wrap(SearchPlaylists))
+			playlistRoute.Get("/:id", ValidateOrgPlaylist, wrap(GetPlaylist))
+			playlistRoute.Get("/:id/items", ValidateOrgPlaylist, wrap(GetPlaylistItems))
+			playlistRoute.Get("/:id/dashboards", ValidateOrgPlaylist, wrap(GetPlaylistDashboards))
+			playlistRoute.Delete("/:id", reqEditorRole, ValidateOrgPlaylist, wrap(DeletePlaylist))
+			playlistRoute.Put("/:id", reqEditorRole, bind(m.UpdatePlaylistCommand{}), ValidateOrgPlaylist, wrap(UpdatePlaylist))
+			playlistRoute.Post("/", reqEditorRole, bind(m.CreatePlaylistCommand{}), wrap(CreatePlaylist))
 		})
 		})
 
 
 		// Search
 		// Search
-		r.Get("/search/", Search)
-
-		// metrics
-		r.Post("/tsdb/query", bind(dtos.MetricRequest{}), wrap(QueryMetrics))
-		r.Get("/tsdb/testdata/scenarios", wrap(GetTestDataScenarios))
-		r.Get("/tsdb/testdata/gensql", reqGrafanaAdmin, wrap(GenerateSqlTestData))
-		r.Get("/tsdb/testdata/random-walk", wrap(GetTestDataRandomWalk))
+		apiRoute.Get("/search/", Search)
 
 
 		// metrics
 		// metrics
-		r.Get("/metrics", wrap(GetInternalMetrics))
-
-		r.Group("/alerts", func() {
-			r.Post("/test", bind(dtos.AlertTestCommand{}), wrap(AlertTest))
-			r.Post("/:alertId/pause", bind(dtos.PauseAlertCommand{}), wrap(PauseAlert), reqEditorRole)
-			r.Get("/:alertId", ValidateOrgAlert, wrap(GetAlert))
-			r.Get("/", wrap(GetAlerts))
-			r.Get("/states-for-dashboard", wrap(GetAlertStatesForDashboard))
+		apiRoute.Post("/tsdb/query", bind(dtos.MetricRequest{}), wrap(QueryMetrics))
+		apiRoute.Get("/tsdb/testdata/scenarios", wrap(GetTestDataScenarios))
+		apiRoute.Get("/tsdb/testdata/gensql", reqGrafanaAdmin, wrap(GenerateSqlTestData))
+		apiRoute.Get("/tsdb/testdata/random-walk", wrap(GetTestDataRandomWalk))
+
+		apiRoute.Group("/alerts", func(alertsRoute RouteRegister) {
+			alertsRoute.Post("/test", bind(dtos.AlertTestCommand{}), wrap(AlertTest))
+			alertsRoute.Post("/:alertId/pause", bind(dtos.PauseAlertCommand{}), wrap(PauseAlert), reqEditorRole)
+			alertsRoute.Get("/:alertId", ValidateOrgAlert, wrap(GetAlert))
+			alertsRoute.Get("/", wrap(GetAlerts))
+			alertsRoute.Get("/states-for-dashboard", wrap(GetAlertStatesForDashboard))
 		})
 		})
 
 
-		r.Get("/alert-notifications", wrap(GetAlertNotifications))
-		r.Get("/alert-notifiers", wrap(GetAlertNotifiers))
+		apiRoute.Get("/alert-notifications", wrap(GetAlertNotifications))
+		apiRoute.Get("/alert-notifiers", wrap(GetAlertNotifiers))
 
 
-		r.Group("/alert-notifications", func() {
-			r.Post("/test", bind(dtos.NotificationTestCommand{}), wrap(NotificationTest))
-			r.Post("/", bind(m.CreateAlertNotificationCommand{}), wrap(CreateAlertNotification))
-			r.Put("/:notificationId", bind(m.UpdateAlertNotificationCommand{}), wrap(UpdateAlertNotification))
-			r.Get("/:notificationId", wrap(GetAlertNotificationById))
-			r.Delete("/:notificationId", wrap(DeleteAlertNotification))
+		apiRoute.Group("/alert-notifications", func(alertNotifications RouteRegister) {
+			alertNotifications.Post("/test", bind(dtos.NotificationTestCommand{}), wrap(NotificationTest))
+			alertNotifications.Post("/", bind(m.CreateAlertNotificationCommand{}), wrap(CreateAlertNotification))
+			alertNotifications.Put("/:notificationId", bind(m.UpdateAlertNotificationCommand{}), wrap(UpdateAlertNotification))
+			alertNotifications.Get("/:notificationId", wrap(GetAlertNotificationById))
+			alertNotifications.Delete("/:notificationId", wrap(DeleteAlertNotification))
 		}, reqEditorRole)
 		}, reqEditorRole)
 
 
-		r.Get("/annotations", wrap(GetAnnotations))
-		r.Post("/annotations/mass-delete", reqOrgAdmin, bind(dtos.DeleteAnnotationsCmd{}), wrap(DeleteAnnotations))
+		apiRoute.Get("/annotations", wrap(GetAnnotations))
+		apiRoute.Post("/annotations/mass-delete", reqOrgAdmin, bind(dtos.DeleteAnnotationsCmd{}), wrap(DeleteAnnotations))
 
 
-		r.Group("/annotations", func() {
-			r.Post("/", bind(dtos.PostAnnotationsCmd{}), wrap(PostAnnotation))
+		apiRoute.Group("/annotations", func(annotationsRoute RouteRegister) {
+			annotationsRoute.Post("/", bind(dtos.PostAnnotationsCmd{}), wrap(PostAnnotation))
 		}, reqEditorRole)
 		}, reqEditorRole)
 
 
 		// error test
 		// error test
@@ -298,16 +297,16 @@ func (hs *HttpServer) registerRoutes() {
 	}, reqSignedIn)
 	}, reqSignedIn)
 
 
 	// admin api
 	// admin api
-	r.Group("/api/admin", func() {
-		r.Get("/settings", AdminGetSettings)
-		r.Post("/users", bind(dtos.AdminCreateUserForm{}), AdminCreateUser)
-		r.Put("/users/:id/password", bind(dtos.AdminUpdateUserPasswordForm{}), AdminUpdateUserPassword)
-		r.Put("/users/:id/permissions", bind(dtos.AdminUpdateUserPermissionsForm{}), AdminUpdateUserPermissions)
-		r.Delete("/users/:id", AdminDeleteUser)
-		r.Get("/users/:id/quotas", wrap(GetUserQuotas))
-		r.Put("/users/:id/quotas/:target", bind(m.UpdateUserQuotaCmd{}), wrap(UpdateUserQuota))
-		r.Get("/stats", AdminGetStats)
-		r.Post("/pause-all-alerts", bind(dtos.PauseAllAlertsCommand{}), wrap(PauseAllAlerts))
+	r.Group("/api/admin", func(adminRoute RouteRegister) {
+		adminRoute.Get("/settings", AdminGetSettings)
+		adminRoute.Post("/users", bind(dtos.AdminCreateUserForm{}), AdminCreateUser)
+		adminRoute.Put("/users/:id/password", bind(dtos.AdminUpdateUserPasswordForm{}), AdminUpdateUserPassword)
+		adminRoute.Put("/users/:id/permissions", bind(dtos.AdminUpdateUserPermissionsForm{}), AdminUpdateUserPermissions)
+		adminRoute.Delete("/users/:id", AdminDeleteUser)
+		adminRoute.Get("/users/:id/quotas", wrap(GetUserQuotas))
+		adminRoute.Put("/users/:id/quotas/:target", bind(m.UpdateUserQuotaCmd{}), wrap(UpdateUserQuota))
+		adminRoute.Get("/stats", AdminGetStats)
+		adminRoute.Post("/pause-all-alerts", bind(dtos.PauseAllAlertsCommand{}), wrap(PauseAllAlerts))
 	}, reqGrafanaAdmin)
 	}, reqGrafanaAdmin)
 
 
 	// rendering
 	// rendering
@@ -326,7 +325,9 @@ func (hs *HttpServer) registerRoutes() {
 	// streams
 	// streams
 	//r.Post("/api/streams/push", reqSignedIn, bind(dtos.StreamMessage{}), liveConn.PushToStream)
 	//r.Post("/api/streams/push", reqSignedIn, bind(dtos.StreamMessage{}), liveConn.PushToStream)
 
 
-	InitAppPluginRoutes(r)
+	r.Register(macaronR)
+
+	InitAppPluginRoutes(macaronR)
 
 
-	r.NotFound(NotFoundHandler)
+	macaronR.NotFound(NotFoundHandler)
 }
 }

+ 2 - 2
pkg/api/cloudwatch/cloudwatch.go

@@ -266,7 +266,7 @@ func handleGetMetricStatistics(req *cwRequest, c *middleware.Context) {
 		c.JsonApiErr(500, "Unable to call AWS API", err)
 		c.JsonApiErr(500, "Unable to call AWS API", err)
 		return
 		return
 	}
 	}
-	metrics.M_Aws_CloudWatch_GetMetricStatistics.Inc(1)
+	metrics.M_Aws_CloudWatch_GetMetricStatistics.Inc()
 
 
 	c.JSON(200, resp)
 	c.JSON(200, resp)
 }
 }
@@ -302,7 +302,7 @@ func handleListMetrics(req *cwRequest, c *middleware.Context) {
 	var resp cloudwatch.ListMetricsOutput
 	var resp cloudwatch.ListMetricsOutput
 	err = svc.ListMetricsPages(params,
 	err = svc.ListMetricsPages(params,
 		func(page *cloudwatch.ListMetricsOutput, lastPage bool) bool {
 		func(page *cloudwatch.ListMetricsOutput, lastPage bool) bool {
-			metrics.M_Aws_CloudWatch_ListMetrics.Inc(1)
+			metrics.M_Aws_CloudWatch_ListMetrics.Inc()
 			metrics, _ := awsutil.ValuesAtPath(page, "Metrics")
 			metrics, _ := awsutil.ValuesAtPath(page, "Metrics")
 			for _, metric := range metrics {
 			for _, metric := range metrics {
 				resp.Metrics = append(resp.Metrics, metric.(*cloudwatch.Metric))
 				resp.Metrics = append(resp.Metrics, metric.(*cloudwatch.Metric))

+ 1 - 1
pkg/api/cloudwatch/metrics.go

@@ -275,7 +275,7 @@ func getAllMetrics(cwData *datasourceInfo) (cloudwatch.ListMetricsOutput, error)
 	var resp cloudwatch.ListMetricsOutput
 	var resp cloudwatch.ListMetricsOutput
 	err = svc.ListMetricsPages(params,
 	err = svc.ListMetricsPages(params,
 		func(page *cloudwatch.ListMetricsOutput, lastPage bool) bool {
 		func(page *cloudwatch.ListMetricsOutput, lastPage bool) bool {
-			metrics.M_Aws_CloudWatch_ListMetrics.Inc(1)
+			metrics.M_Aws_CloudWatch_ListMetrics.Inc()
 			metrics, _ := awsutil.ValuesAtPath(page, "Metrics")
 			metrics, _ := awsutil.ValuesAtPath(page, "Metrics")
 			for _, metric := range metrics {
 			for _, metric := range metrics {
 				resp.Metrics = append(resp.Metrics, metric.(*cloudwatch.Metric))
 				resp.Metrics = append(resp.Metrics, metric.(*cloudwatch.Metric))

+ 3 - 3
pkg/api/dashboard_snapshot.go

@@ -34,13 +34,13 @@ func CreateDashboardSnapshot(c *middleware.Context, cmd m.CreateDashboardSnapsho
 
 
 		cmd.OrgId = -1
 		cmd.OrgId = -1
 		cmd.UserId = -1
 		cmd.UserId = -1
-		metrics.M_Api_Dashboard_Snapshot_External.Inc(1)
+		metrics.M_Api_Dashboard_Snapshot_External.Inc()
 	} else {
 	} else {
 		cmd.Key = util.GetRandomString(32)
 		cmd.Key = util.GetRandomString(32)
 		cmd.DeleteKey = util.GetRandomString(32)
 		cmd.DeleteKey = util.GetRandomString(32)
 		cmd.OrgId = c.OrgId
 		cmd.OrgId = c.OrgId
 		cmd.UserId = c.UserId
 		cmd.UserId = c.UserId
-		metrics.M_Api_Dashboard_Snapshot_Create.Inc(1)
+		metrics.M_Api_Dashboard_Snapshot_Create.Inc()
 	}
 	}
 
 
 	if err := bus.Dispatch(&cmd); err != nil {
 	if err := bus.Dispatch(&cmd); err != nil {
@@ -84,7 +84,7 @@ func GetDashboardSnapshot(c *middleware.Context) {
 		},
 		},
 	}
 	}
 
 
-	metrics.M_Api_Dashboard_Snapshot_Get.Inc(1)
+	metrics.M_Api_Dashboard_Snapshot_Get.Inc()
 
 
 	c.Resp.Header().Set("Cache-Control", "public, max-age=3600")
 	c.Resp.Header().Set("Cache-Control", "public, max-age=3600")
 	c.JSON(200, dto)
 	c.JSON(200, dto)

+ 11 - 1
pkg/api/http_server.go

@@ -11,6 +11,8 @@ import (
 	"path"
 	"path"
 	"time"
 	"time"
 
 
+	"github.com/prometheus/client_golang/prometheus/promhttp"
+
 	gocache "github.com/patrickmn/go-cache"
 	gocache "github.com/patrickmn/go-cache"
 	macaron "gopkg.in/macaron.v1"
 	macaron "gopkg.in/macaron.v1"
 
 
@@ -165,9 +167,9 @@ func (hs *HttpServer) newMacaron() *macaron.Macaron {
 	}))
 	}))
 
 
 	m.Use(hs.healthHandler)
 	m.Use(hs.healthHandler)
+	m.Use(hs.metricsEndpoint)
 	m.Use(middleware.GetContextHandler())
 	m.Use(middleware.GetContextHandler())
 	m.Use(middleware.Sessioner(&setting.SessionOptions))
 	m.Use(middleware.Sessioner(&setting.SessionOptions))
-	m.Use(middleware.RequestMetrics())
 	m.Use(middleware.OrgRedirect())
 	m.Use(middleware.OrgRedirect())
 
 
 	// needs to be after context handler
 	// needs to be after context handler
@@ -180,6 +182,14 @@ func (hs *HttpServer) newMacaron() *macaron.Macaron {
 	return m
 	return m
 }
 }
 
 
+func (hs *HttpServer) metricsEndpoint(ctx *macaron.Context) {
+	if ctx.Req.Method != "GET" || ctx.Req.URL.Path != "/metrics" {
+		return
+	}
+
+	promhttp.Handler().ServeHTTP(ctx.Resp, ctx.Req.Request)
+}
+
 func (hs *HttpServer) healthHandler(ctx *macaron.Context) {
 func (hs *HttpServer) healthHandler(ctx *macaron.Context) {
 	if ctx.Req.Method != "GET" || ctx.Req.URL.Path != "/api/health" {
 	if ctx.Req.Method != "GET" || ctx.Req.URL.Path != "/api/health" {
 		return
 		return

+ 1 - 1
pkg/api/login.go

@@ -127,7 +127,7 @@ func LoginPost(c *middleware.Context, cmd dtos.LoginCommand) Response {
 		c.SetCookie("redirect_to", "", -1, setting.AppSubUrl+"/")
 		c.SetCookie("redirect_to", "", -1, setting.AppSubUrl+"/")
 	}
 	}
 
 
-	metrics.M_Api_Login_Post.Inc(1)
+	metrics.M_Api_Login_Post.Inc()
 
 
 	return Json(200, result)
 	return Json(200, result)
 }
 }

+ 1 - 1
pkg/api/login_oauth.go

@@ -186,7 +186,7 @@ func OAuthLogin(ctx *middleware.Context) {
 	// login
 	// login
 	loginUserWithUser(userQuery.Result, ctx)
 	loginUserWithUser(userQuery.Result, ctx)
 
 
-	metrics.M_Api_Login_OAuth.Inc(1)
+	metrics.M_Api_Login_OAuth.Inc()
 
 
 	if redirectTo, _ := url.QueryUnescape(ctx.GetCookie("redirect_to")); len(redirectTo) > 0 {
 	if redirectTo, _ := url.QueryUnescape(ctx.GetCookie("redirect_to")); len(redirectTo) > 0 {
 		ctx.SetCookie("redirect_to", "", -1, setting.AppSubUrl+"/")
 		ctx.SetCookie("redirect_to", "", -1, setting.AppSubUrl+"/")

+ 0 - 55
pkg/api/metrics.go

@@ -2,13 +2,10 @@ package api
 
 
 import (
 import (
 	"context"
 	"context"
-	"encoding/json"
-	"net/http"
 
 
 	"github.com/grafana/grafana/pkg/api/dtos"
 	"github.com/grafana/grafana/pkg/api/dtos"
 	"github.com/grafana/grafana/pkg/bus"
 	"github.com/grafana/grafana/pkg/bus"
 	"github.com/grafana/grafana/pkg/components/simplejson"
 	"github.com/grafana/grafana/pkg/components/simplejson"
-	"github.com/grafana/grafana/pkg/metrics"
 	"github.com/grafana/grafana/pkg/middleware"
 	"github.com/grafana/grafana/pkg/middleware"
 	"github.com/grafana/grafana/pkg/models"
 	"github.com/grafana/grafana/pkg/models"
 	"github.com/grafana/grafana/pkg/tsdb"
 	"github.com/grafana/grafana/pkg/tsdb"
@@ -79,58 +76,6 @@ func GetTestDataScenarios(c *middleware.Context) Response {
 	return Json(200, &result)
 	return Json(200, &result)
 }
 }
 
 
-func GetInternalMetrics(c *middleware.Context) Response {
-	if metrics.UseNilMetrics {
-		return Json(200, util.DynMap{"message": "Metrics disabled"})
-	}
-
-	snapshots := metrics.MetricStats.GetSnapshots()
-
-	resp := make(map[string]interface{})
-
-	for _, m := range snapshots {
-		metricName := m.Name() + m.StringifyTags()
-
-		switch metric := m.(type) {
-		case metrics.Gauge:
-			resp[metricName] = map[string]interface{}{
-				"value": metric.Value(),
-			}
-		case metrics.Counter:
-			resp[metricName] = map[string]interface{}{
-				"count": metric.Count(),
-			}
-		case metrics.Timer:
-			percentiles := metric.Percentiles([]float64{0.25, 0.75, 0.90, 0.99})
-			resp[metricName] = map[string]interface{}{
-				"count": metric.Count(),
-				"min":   metric.Min(),
-				"max":   metric.Max(),
-				"mean":  metric.Mean(),
-				"std":   metric.StdDev(),
-				"p25":   percentiles[0],
-				"p75":   percentiles[1],
-				"p90":   percentiles[2],
-				"p99":   percentiles[3],
-			}
-		}
-	}
-
-	var b []byte
-	var err error
-	if b, err = json.MarshalIndent(resp, "", " "); err != nil {
-		return ApiError(500, "body json marshal", err)
-	}
-
-	return &NormalResponse{
-		body:   b,
-		status: 200,
-		header: http.Header{
-			"Content-Type": []string{"application/json"},
-		},
-	}
-}
-
 // Genereates a index out of range error
 // Genereates a index out of range error
 func GenerateError(c *middleware.Context) Response {
 func GenerateError(c *middleware.Context) Response {
 	var array []string
 	var array []string

+ 1 - 1
pkg/api/org.go

@@ -89,7 +89,7 @@ func CreateOrg(c *middleware.Context, cmd m.CreateOrgCommand) Response {
 		return ApiError(500, "Failed to create organization", err)
 		return ApiError(500, "Failed to create organization", err)
 	}
 	}
 
 
-	metrics.M_Api_Org_Create.Inc(1)
+	metrics.M_Api_Org_Create.Inc()
 
 
 	return Json(200, &util.DynMap{
 	return Json(200, &util.DynMap{
 		"orgId":   cmd.Result.Id,
 		"orgId":   cmd.Result.Id,

+ 2 - 2
pkg/api/org_invite.go

@@ -187,8 +187,8 @@ func CompleteInvite(c *middleware.Context, completeInvite dtos.CompleteInviteFor
 
 
 	loginUserWithUser(user, c)
 	loginUserWithUser(user, c)
 
 
-	metrics.M_Api_User_SignUpCompleted.Inc(1)
-	metrics.M_Api_User_SignUpInvite.Inc(1)
+	metrics.M_Api_User_SignUpCompleted.Inc()
+	metrics.M_Api_User_SignUpInvite.Inc()
 
 
 	return ApiSuccess("User created and logged in")
 	return ApiSuccess("User created and logged in")
 }
 }

+ 115 - 0
pkg/api/route_register.go

@@ -0,0 +1,115 @@
+package api
+
+import (
+	"net/http"
+
+	macaron "gopkg.in/macaron.v1"
+)
+
+type Router interface {
+	Handle(method, pattern string, handlers []macaron.Handler) *macaron.Route
+}
+
+type RouteRegister interface {
+	Get(string, ...macaron.Handler)
+	Post(string, ...macaron.Handler)
+	Delete(string, ...macaron.Handler)
+	Put(string, ...macaron.Handler)
+	Patch(string, ...macaron.Handler)
+	Any(string, ...macaron.Handler)
+
+	Group(string, func(RouteRegister), ...macaron.Handler)
+
+	Register(Router) *macaron.Router
+}
+
+type RegisterNamedMiddleware func(name string) macaron.Handler
+
+func newRouteRegister(namedMiddleware ...RegisterNamedMiddleware) RouteRegister {
+	return &routeRegister{
+		prefix:          "",
+		routes:          []route{},
+		subfixHandlers:  []macaron.Handler{},
+		namedMiddleware: namedMiddleware,
+	}
+}
+
+type route struct {
+	method   string
+	pattern  string
+	handlers []macaron.Handler
+}
+
+type routeRegister struct {
+	prefix          string
+	subfixHandlers  []macaron.Handler
+	namedMiddleware []RegisterNamedMiddleware
+	routes          []route
+	groups          []*routeRegister
+}
+
+func (rr *routeRegister) Group(pattern string, fn func(rr RouteRegister), handlers ...macaron.Handler) {
+	group := &routeRegister{
+		prefix:          rr.prefix + pattern,
+		subfixHandlers:  append(rr.subfixHandlers, handlers...),
+		routes:          []route{},
+		namedMiddleware: rr.namedMiddleware,
+	}
+
+	fn(group)
+	rr.groups = append(rr.groups, group)
+}
+
+func (rr *routeRegister) Register(router Router) *macaron.Router {
+	for _, r := range rr.routes {
+		router.Handle(r.method, r.pattern, r.handlers)
+	}
+
+	for _, g := range rr.groups {
+		g.Register(router)
+	}
+
+	return &macaron.Router{}
+}
+
+func (rr *routeRegister) route(pattern, method string, handlers ...macaron.Handler) {
+	//inject tracing
+
+	h := make([]macaron.Handler, 0)
+	for _, fn := range rr.namedMiddleware {
+		h = append(h, fn(pattern))
+	}
+
+	h = append(h, rr.subfixHandlers...)
+	h = append(h, handlers...)
+
+	rr.routes = append(rr.routes, route{
+		method:   method,
+		pattern:  rr.prefix + pattern,
+		handlers: h,
+	})
+}
+
+func (rr *routeRegister) Get(pattern string, handlers ...macaron.Handler) {
+	rr.route(pattern, http.MethodGet, handlers...)
+}
+
+func (rr *routeRegister) Post(pattern string, handlers ...macaron.Handler) {
+	rr.route(pattern, http.MethodPost, handlers...)
+}
+
+func (rr *routeRegister) Delete(pattern string, handlers ...macaron.Handler) {
+	rr.route(pattern, http.MethodDelete, handlers...)
+}
+
+func (rr *routeRegister) Put(pattern string, handlers ...macaron.Handler) {
+	rr.route(pattern, http.MethodPut, handlers...)
+}
+
+func (rr *routeRegister) Patch(pattern string, handlers ...macaron.Handler) {
+	rr.route(pattern, http.MethodPatch, handlers...)
+}
+
+func (rr *routeRegister) Any(pattern string, handlers ...macaron.Handler) {
+	rr.route(pattern, "*", handlers...)
+}

+ 185 - 0
pkg/api/route_register_test.go

@@ -0,0 +1,185 @@
+package api
+
+import (
+	"strconv"
+	"testing"
+
+	macaron "gopkg.in/macaron.v1"
+)
+
+type fakeRouter struct {
+	route []route
+}
+
+func (fr *fakeRouter) Handle(method, pattern string, handlers []macaron.Handler) *macaron.Route {
+	fr.route = append(fr.route, route{
+		pattern:  pattern,
+		method:   method,
+		handlers: handlers,
+	})
+
+	return &macaron.Route{}
+}
+
+func emptyHandlers(n int) []macaron.Handler {
+	res := []macaron.Handler{}
+	for i := 1; n >= i; i++ {
+		res = append(res, emptyHandler(strconv.Itoa(i)))
+	}
+	return res
+}
+
+func emptyHandler(name string) macaron.Handler {
+	return struct{ name string }{name: name}
+}
+
+func TestRouteSimpleRegister(t *testing.T) {
+	testTable := []route{
+		{method: "DELETE", pattern: "/admin", handlers: emptyHandlers(2)},
+		{method: "GET", pattern: "/down", handlers: emptyHandlers(3)},
+	}
+
+	// Setup
+	rr := newRouteRegister(func(name string) macaron.Handler {
+		return emptyHandler(name)
+	})
+
+	rr.Delete("/admin", emptyHandler("1"))
+	rr.Get("/down", emptyHandler("1"), emptyHandler("2"))
+
+	fr := &fakeRouter{}
+	rr.Register(fr)
+
+	// Validation
+	if len(fr.route) != len(testTable) {
+		t.Errorf("want %v routes, got %v", len(testTable), len(fr.route))
+	}
+
+	for i := range testTable {
+		if testTable[i].method != fr.route[i].method {
+			t.Errorf("want %s got %v", testTable[i].method, fr.route[i].method)
+		}
+
+		if testTable[i].pattern != fr.route[i].pattern {
+			t.Errorf("want %s got %v", testTable[i].pattern, fr.route[i].pattern)
+		}
+
+		if len(testTable[i].handlers) != len(fr.route[i].handlers) {
+			t.Errorf("want %d handlers got %d handlers \ntestcase: %v\nroute: %v\n",
+				len(testTable[i].handlers),
+				len(fr.route[i].handlers),
+				testTable[i],
+				fr.route[i])
+		}
+	}
+}
+
+func TestRouteGroupedRegister(t *testing.T) {
+	testTable := []route{
+		{method: "DELETE", pattern: "/admin", handlers: emptyHandlers(1)},
+		{method: "GET", pattern: "/down", handlers: emptyHandlers(2)},
+		{method: "POST", pattern: "/user", handlers: emptyHandlers(1)},
+		{method: "PUT", pattern: "/user/friends", handlers: emptyHandlers(1)},
+		{method: "DELETE", pattern: "/user/admin", handlers: emptyHandlers(2)},
+		{method: "GET", pattern: "/user/admin/all", handlers: emptyHandlers(4)},
+	}
+
+	// Setup
+	rr := newRouteRegister()
+
+	rr.Delete("/admin", emptyHandler("1"))
+	rr.Get("/down", emptyHandler("1"), emptyHandler("2"))
+
+	rr.Group("/user", func(user RouteRegister) {
+		user.Post("", emptyHandler("1"))
+		user.Put("/friends", emptyHandler("2"))
+
+		user.Group("/admin", func(admin RouteRegister) {
+			admin.Delete("", emptyHandler("3"))
+			admin.Get("/all", emptyHandler("3"), emptyHandler("4"), emptyHandler("5"))
+
+		}, emptyHandler("3"))
+	})
+
+	fr := &fakeRouter{}
+	rr.Register(fr)
+
+	// Validation
+	if len(fr.route) != len(testTable) {
+		t.Errorf("want %v routes, got %v", len(testTable), len(fr.route))
+	}
+
+	for i := range testTable {
+		if testTable[i].method != fr.route[i].method {
+			t.Errorf("want %s got %v", testTable[i].method, fr.route[i].method)
+		}
+
+		if testTable[i].pattern != fr.route[i].pattern {
+			t.Errorf("want %s got %v", testTable[i].pattern, fr.route[i].pattern)
+		}
+
+		if len(testTable[i].handlers) != len(fr.route[i].handlers) {
+			t.Errorf("want %d handlers got %d handlers \ntestcase: %v\nroute: %v\n",
+				len(testTable[i].handlers),
+				len(fr.route[i].handlers),
+				testTable[i],
+				fr.route[i])
+		}
+	}
+}
+
+func TestNamedMiddlewareRouteRegister(t *testing.T) {
+	testTable := []route{
+		{method: "DELETE", pattern: "/admin", handlers: emptyHandlers(2)},
+		{method: "GET", pattern: "/down", handlers: emptyHandlers(3)},
+		{method: "POST", pattern: "/user", handlers: emptyHandlers(2)},
+		{method: "PUT", pattern: "/user/friends", handlers: emptyHandlers(2)},
+		{method: "DELETE", pattern: "/user/admin", handlers: emptyHandlers(3)},
+		{method: "GET", pattern: "/user/admin/all", handlers: emptyHandlers(5)},
+	}
+
+	// Setup
+	rr := newRouteRegister(func(name string) macaron.Handler {
+		return emptyHandler(name)
+	})
+
+	rr.Delete("/admin", emptyHandler("1"))
+	rr.Get("/down", emptyHandler("1"), emptyHandler("2"))
+
+	rr.Group("/user", func(user RouteRegister) {
+		user.Post("", emptyHandler("1"))
+		user.Put("/friends", emptyHandler("2"))
+
+		user.Group("/admin", func(admin RouteRegister) {
+			admin.Delete("", emptyHandler("3"))
+			admin.Get("/all", emptyHandler("3"), emptyHandler("4"), emptyHandler("5"))
+
+		}, emptyHandler("3"))
+	})
+
+	fr := &fakeRouter{}
+	rr.Register(fr)
+
+	// Validation
+	if len(fr.route) != len(testTable) {
+		t.Errorf("want %v routes, got %v", len(testTable), len(fr.route))
+	}
+
+	for i := range testTable {
+		if testTable[i].method != fr.route[i].method {
+			t.Errorf("want %s got %v", testTable[i].method, fr.route[i].method)
+		}
+
+		if testTable[i].pattern != fr.route[i].pattern {
+			t.Errorf("want %s got %v", testTable[i].pattern, fr.route[i].pattern)
+		}
+
+		if len(testTable[i].handlers) != len(fr.route[i].handlers) {
+			t.Errorf("want %d handlers got %d handlers \ntestcase: %v\nroute: %v\n",
+				len(testTable[i].handlers),
+				len(fr.route[i].handlers),
+				testTable[i],
+				fr.route[i])
+		}
+	}
+}

+ 2 - 2
pkg/api/signup.go

@@ -47,7 +47,7 @@ func SignUp(c *middleware.Context, form dtos.SignUpForm) Response {
 		Code:  cmd.Code,
 		Code:  cmd.Code,
 	})
 	})
 
 
-	metrics.M_Api_User_SignUpStarted.Inc(1)
+	metrics.M_Api_User_SignUpStarted.Inc()
 
 
 	return Json(200, util.DynMap{"status": "SignUpCreated"})
 	return Json(200, util.DynMap{"status": "SignUpCreated"})
 }
 }
@@ -111,7 +111,7 @@ func SignUpStep2(c *middleware.Context, form dtos.SignUpStep2Form) Response {
 	}
 	}
 
 
 	loginUserWithUser(user, c)
 	loginUserWithUser(user, c)
-	metrics.M_Api_User_SignUpCompleted.Inc(1)
+	metrics.M_Api_User_SignUpCompleted.Inc()
 
 
 	return Json(200, apiResponse)
 	return Json(200, apiResponse)
 }
 }

+ 1 - 1
pkg/cmd/grafana-server/server.go

@@ -54,7 +54,7 @@ func (g *GrafanaServerImpl) Start() {
 	g.writePIDFile()
 	g.writePIDFile()
 
 
 	initSql()
 	initSql()
-	metrics.Init()
+	metrics.Init(setting.Cfg)
 	search.Init()
 	search.Init()
 	login.Init()
 	login.Init()
 	social.NewOAuthService()
 	social.NewOAuthService()

+ 88 - 0
pkg/components/imguploader/gcsuploader.go

@@ -0,0 +1,88 @@
+package imguploader
+
+import (
+	"context"
+	"fmt"
+	"io/ioutil"
+	"net/http"
+	"os"
+
+	"github.com/grafana/grafana/pkg/log"
+	"github.com/grafana/grafana/pkg/util"
+	"golang.org/x/oauth2/google"
+)
+
+const (
+	tokenUrl  string = "https://www.googleapis.com/auth/devstorage.read_write"
+	uploadUrl string = "https://www.googleapis.com/upload/storage/v1/b/%s/o?uploadType=media&name=%s&predefinedAcl=publicRead"
+)
+
+type GCSUploader struct {
+	keyFile string
+	bucket  string
+	log     log.Logger
+}
+
+func NewGCSUploader(keyFile, bucket string) *GCSUploader {
+	return &GCSUploader{
+		keyFile: keyFile,
+		bucket:  bucket,
+		log:     log.New("gcsuploader"),
+	}
+}
+
+func (u *GCSUploader) Upload(ctx context.Context, imageDiskPath string) (string, error) {
+	key := util.GetRandomString(20) + ".png"
+
+	u.log.Debug("Opening key file ", u.keyFile)
+	data, err := ioutil.ReadFile(u.keyFile)
+	if err != nil {
+		return "", err
+	}
+
+	u.log.Debug("Creating JWT conf")
+	conf, err := google.JWTConfigFromJSON(data, tokenUrl)
+	if err != nil {
+		return "", err
+	}
+
+	u.log.Debug("Creating HTTP client")
+	client := conf.Client(ctx)
+	err = u.uploadFile(client, imageDiskPath, key)
+	if err != nil {
+		return "", err
+	}
+
+	return fmt.Sprintf("https://storage.googleapis.com/%s/%s", u.bucket, key), nil
+}
+
+func (u *GCSUploader) uploadFile(client *http.Client, imageDiskPath, key string) error {
+	u.log.Debug("Opening image file ", imageDiskPath)
+
+	fileReader, err := os.Open(imageDiskPath)
+	if err != nil {
+		return err
+	}
+
+	reqUrl := fmt.Sprintf(uploadUrl, u.bucket, key)
+	u.log.Debug("Request URL: ", reqUrl)
+
+	req, err := http.NewRequest("POST", reqUrl, fileReader)
+	if err != nil {
+		return err
+	}
+
+	req.Header.Add("Content-Type", "image/png")
+	u.log.Debug("Sending POST request to GCS")
+
+	resp, err := client.Do(req)
+	if err != nil {
+		return err
+	}
+
+	if resp.StatusCode != 200 {
+		return fmt.Errorf("GCS response status code %d", resp.StatusCode)
+	}
+
+	return nil
+}

+ 24 - 0
pkg/components/imguploader/gcsuploader_test.go

@@ -0,0 +1,24 @@
+package imguploader
+
+import (
+	"context"
+	"testing"
+
+	"github.com/grafana/grafana/pkg/setting"
+	. "github.com/smartystreets/goconvey/convey"
+)
+
+func TestUploadToGCS(t *testing.T) {
+	SkipConvey("[Integration test] for external_image_store.gcs", t, func() {
+		setting.NewConfigContext(&setting.CommandLineArgs{
+			HomePath: "../../../",
+		})
+
+		gcsUploader, _ := NewImageUploader()
+
+		path, err := gcsUploader.Upload(context.Background(), "../../../public/img/logo_transparent_400x.png")
+
+		So(err, ShouldBeNil)
+		So(path, ShouldNotEqual, "")
+	})
+}

+ 13 - 2
pkg/components/imguploader/imguploader.go

@@ -1,6 +1,7 @@
 package imguploader
 package imguploader
 
 
 import (
 import (
+	"context"
 	"fmt"
 	"fmt"
 	"regexp"
 	"regexp"
 
 
@@ -8,13 +9,13 @@ import (
 )
 )
 
 
 type ImageUploader interface {
 type ImageUploader interface {
-	Upload(path string) (string, error)
+	Upload(ctx context.Context, path string) (string, error)
 }
 }
 
 
 type NopImageUploader struct {
 type NopImageUploader struct {
 }
 }
 
 
-func (NopImageUploader) Upload(path string) (string, error) {
+func (NopImageUploader) Upload(ctx context.Context, path string) (string, error) {
 	return "", nil
 	return "", nil
 }
 }
 
 
@@ -52,6 +53,16 @@ func NewImageUploader() (ImageUploader, error) {
 		password := webdavSec.Key("password").String()
 		password := webdavSec.Key("password").String()
 
 
 		return NewWebdavImageUploader(url, username, password, public_url)
 		return NewWebdavImageUploader(url, username, password, public_url)
+	case "gcs":
+		gcssec, err := setting.Cfg.GetSection("external_image_storage.gcs")
+		if err != nil {
+			return nil, err
+		}
+
+		keyFile := gcssec.Key("key_file").MustString("")
+		bucketName := gcssec.Key("bucket").MustString("")
+
+		return NewGCSUploader(keyFile, bucketName), nil
 	}
 	}
 
 
 	return NopImageUploader{}, nil
 	return NopImageUploader{}, nil

+ 23 - 0
pkg/components/imguploader/imguploader_test.go

@@ -96,5 +96,28 @@ func TestImageUploaderFactory(t *testing.T) {
 			So(original.username, ShouldEqual, "username")
 			So(original.username, ShouldEqual, "username")
 			So(original.password, ShouldEqual, "password")
 			So(original.password, ShouldEqual, "password")
 		})
 		})
+
+		Convey("GCS uploader", func() {
+			var err error
+
+			setting.NewConfigContext(&setting.CommandLineArgs{
+				HomePath: "../../../",
+			})
+
+			setting.ImageUploadProvider = "gcs"
+
+			gcpSec, err := setting.Cfg.GetSection("external_image_storage.gcs")
+			gcpSec.NewKey("key_file", "/etc/secrets/project-79a52befa3f6.json")
+			gcpSec.NewKey("bucket", "project-grafana-east")
+
+			uploader, err := NewImageUploader()
+
+			So(err, ShouldBeNil)
+			original, ok := uploader.(*GCSUploader)
+
+			So(ok, ShouldBeTrue)
+			So(original.keyFile, ShouldEqual, "/etc/secrets/project-79a52befa3f6.json")
+			So(original.bucket, ShouldEqual, "project-grafana-east")
+		})
 	})
 	})
 }
 }

+ 2 - 1
pkg/components/imguploader/s3uploader.go

@@ -1,6 +1,7 @@
 package imguploader
 package imguploader
 
 
 import (
 import (
+	"context"
 	"os"
 	"os"
 	"time"
 	"time"
 
 
@@ -34,7 +35,7 @@ func NewS3Uploader(region, bucket, acl, accessKey, secretKey string) *S3Uploader
 	}
 	}
 }
 }
 
 
-func (u *S3Uploader) Upload(imageDiskPath string) (string, error) {
+func (u *S3Uploader) Upload(ctx context.Context, imageDiskPath string) (string, error) {
 	sess, err := session.NewSession()
 	sess, err := session.NewSession()
 	if err != nil {
 	if err != nil {
 		return "", err
 		return "", err

+ 2 - 1
pkg/components/imguploader/s3uploader_test.go

@@ -1,6 +1,7 @@
 package imguploader
 package imguploader
 
 
 import (
 import (
+	"context"
 	"testing"
 	"testing"
 
 
 	"github.com/grafana/grafana/pkg/setting"
 	"github.com/grafana/grafana/pkg/setting"
@@ -15,7 +16,7 @@ func TestUploadToS3(t *testing.T) {
 
 
 		s3Uploader, _ := NewImageUploader()
 		s3Uploader, _ := NewImageUploader()
 
 
-		path, err := s3Uploader.Upload("../../../public/img/logo_transparent_400x.png")
+		path, err := s3Uploader.Upload(context.Background(), "../../../public/img/logo_transparent_400x.png")
 
 
 		So(err, ShouldBeNil)
 		So(err, ShouldBeNil)
 		So(path, ShouldNotEqual, "")
 		So(path, ShouldNotEqual, "")

+ 2 - 1
pkg/components/imguploader/webdavuploader.go

@@ -2,6 +2,7 @@ package imguploader
 
 
 import (
 import (
 	"bytes"
 	"bytes"
+	"context"
 	"fmt"
 	"fmt"
 	"io/ioutil"
 	"io/ioutil"
 	"net"
 	"net"
@@ -33,7 +34,7 @@ var netClient = &http.Client{
 	Transport: netTransport,
 	Transport: netTransport,
 }
 }
 
 
-func (u *WebdavUploader) Upload(pa string) (string, error) {
+func (u *WebdavUploader) Upload(ctx context.Context, pa string) (string, error) {
 	url, _ := url.Parse(u.url)
 	url, _ := url.Parse(u.url)
 	filename := util.GetRandomString(20) + ".png"
 	filename := util.GetRandomString(20) + ".png"
 	url.Path = path.Join(url.Path, filename)
 	url.Path = path.Join(url.Path, filename)

+ 3 - 2
pkg/components/imguploader/webdavuploader_test.go

@@ -1,6 +1,7 @@
 package imguploader
 package imguploader
 
 
 import (
 import (
+	"context"
 	"testing"
 	"testing"
 
 
 	. "github.com/smartystreets/goconvey/convey"
 	. "github.com/smartystreets/goconvey/convey"
@@ -11,7 +12,7 @@ func TestUploadToWebdav(t *testing.T) {
 	// Can be tested with this docker container: https://hub.docker.com/r/morrisjobke/webdav/
 	// Can be tested with this docker container: https://hub.docker.com/r/morrisjobke/webdav/
 	SkipConvey("[Integration test] for external_image_store.webdav", t, func() {
 	SkipConvey("[Integration test] for external_image_store.webdav", t, func() {
 		webdavUploader, _ := NewWebdavImageUploader("http://localhost:8888/webdav/", "test", "test", "")
 		webdavUploader, _ := NewWebdavImageUploader("http://localhost:8888/webdav/", "test", "test", "")
-		path, err := webdavUploader.Upload("../../../public/img/logo_transparent_400x.png")
+		path, err := webdavUploader.Upload(context.Background(), "../../../public/img/logo_transparent_400x.png")
 
 
 		So(err, ShouldBeNil)
 		So(err, ShouldBeNil)
 		So(path, ShouldStartWith, "http://localhost:8888/webdav/")
 		So(path, ShouldStartWith, "http://localhost:8888/webdav/")
@@ -19,7 +20,7 @@ func TestUploadToWebdav(t *testing.T) {
 
 
 	SkipConvey("[Integration test] for external_image_store.webdav with public url", t, func() {
 	SkipConvey("[Integration test] for external_image_store.webdav with public url", t, func() {
 		webdavUploader, _ := NewWebdavImageUploader("http://localhost:8888/webdav/", "test", "test", "http://publicurl:8888/webdav")
 		webdavUploader, _ := NewWebdavImageUploader("http://localhost:8888/webdav/", "test", "test", "http://publicurl:8888/webdav")
-		path, err := webdavUploader.Upload("../../../public/img/logo_transparent_400x.png")
+		path, err := webdavUploader.Upload(context.Background(), "../../../public/img/logo_transparent_400x.png")
 
 
 		So(err, ShouldBeNil)
 		So(err, ShouldBeNil)
 		So(path, ShouldStartWith, "http://publicurl:8888/webdav/")
 		So(path, ShouldStartWith, "http://publicurl:8888/webdav/")

+ 0 - 122
pkg/metrics/EMWA.go

@@ -1,122 +0,0 @@
-// includes code from
-// https://raw.githubusercontent.com/rcrowley/go-metrics/master/sample.go
-// Copyright 2012 Richard Crowley. All rights reserved.
-
-package metrics
-
-import (
-	"math"
-	"sync"
-	"sync/atomic"
-)
-
-// EWMAs continuously calculate an exponentially-weighted moving average
-// based on an outside source of clock ticks.
-type EWMA interface {
-	Rate() float64
-	Snapshot() EWMA
-	Tick()
-	Update(int64)
-}
-
-// NewEWMA constructs a new EWMA with the given alpha.
-func NewEWMA(alpha float64) EWMA {
-	if UseNilMetrics {
-		return NilEWMA{}
-	}
-	return &StandardEWMA{alpha: alpha}
-}
-
-// NewEWMA1 constructs a new EWMA for a one-minute moving average.
-func NewEWMA1() EWMA {
-	return NewEWMA(1 - math.Exp(-5.0/60.0/1))
-}
-
-// NewEWMA5 constructs a new EWMA for a five-minute moving average.
-func NewEWMA5() EWMA {
-	return NewEWMA(1 - math.Exp(-5.0/60.0/5))
-}
-
-// NewEWMA15 constructs a new EWMA for a fifteen-minute moving average.
-func NewEWMA15() EWMA {
-	return NewEWMA(1 - math.Exp(-5.0/60.0/15))
-}
-
-// EWMASnapshot is a read-only copy of another EWMA.
-type EWMASnapshot float64
-
-// Rate returns the rate of events per second at the time the snapshot was
-// taken.
-func (a EWMASnapshot) Rate() float64 { return float64(a) }
-
-// Snapshot returns the snapshot.
-func (a EWMASnapshot) Snapshot() EWMA { return a }
-
-// Tick panics.
-func (EWMASnapshot) Tick() {
-	panic("Tick called on an EWMASnapshot")
-}
-
-// Update panics.
-func (EWMASnapshot) Update(int64) {
-	panic("Update called on an EWMASnapshot")
-}
-
-// NilEWMA is a no-op EWMA.
-type NilEWMA struct{}
-
-// Rate is a no-op.
-func (NilEWMA) Rate() float64 { return 0.0 }
-
-// Snapshot is a no-op.
-func (NilEWMA) Snapshot() EWMA { return NilEWMA{} }
-
-// Tick is a no-op.
-func (NilEWMA) Tick() {}
-
-// Update is a no-op.
-func (NilEWMA) Update(n int64) {}
-
-// StandardEWMA is the standard implementation of an EWMA and tracks the number
-// of uncounted events and processes them on each tick.  It uses the
-// sync/atomic package to manage uncounted events.
-type StandardEWMA struct {
-	uncounted int64 // /!\ this should be the first member to ensure 64-bit alignment
-	alpha     float64
-	rate      float64
-	init      bool
-	mutex     sync.Mutex
-}
-
-// Rate returns the moving average rate of events per second.
-func (a *StandardEWMA) Rate() float64 {
-	a.mutex.Lock()
-	defer a.mutex.Unlock()
-	return a.rate * float64(1e9)
-}
-
-// Snapshot returns a read-only copy of the EWMA.
-func (a *StandardEWMA) Snapshot() EWMA {
-	return EWMASnapshot(a.Rate())
-}
-
-// Tick ticks the clock to update the moving average.  It assumes it is called
-// every five seconds.
-func (a *StandardEWMA) Tick() {
-	count := atomic.LoadInt64(&a.uncounted)
-	atomic.AddInt64(&a.uncounted, -count)
-	instantRate := float64(count) / float64(5e9)
-	a.mutex.Lock()
-	defer a.mutex.Unlock()
-	if a.init {
-		a.rate += a.alpha * (instantRate - a.rate)
-	} else {
-		a.init = true
-		a.rate = instantRate
-	}
-}
-
-// Update adds n uncounted events.
-func (a *StandardEWMA) Update(n int64) {
-	atomic.AddInt64(&a.uncounted, n)
-}

+ 0 - 46
pkg/metrics/combos.go

@@ -1,46 +0,0 @@
-package metrics
-
-// type comboCounterRef struct {
-// 	*MetricMeta
-// 	usageCounter  Counter
-// 	metricCounter Counter
-// }
-//
-// func RegComboCounter(name string, tagStrings ...string) Counter {
-// 	meta := NewMetricMeta(name, tagStrings)
-// 	cr := &comboCounterRef{
-// 		MetricMeta:    meta,
-// 		usageCounter:  NewCounter(meta),
-// 		metricCounter: NewCounter(meta),
-// 	}
-//
-// 	UsageStats.Register(cr.usageCounter)
-// 	MetricStats.Register(cr.metricCounter)
-//
-// 	return cr
-// }
-//
-// func (c comboCounterRef) Clear() {
-// 	c.usageCounter.Clear()
-// 	c.metricCounter.Clear()
-// }
-//
-// func (c comboCounterRef) Count() int64 {
-// 	panic("Count called on a combocounter ref")
-// }
-//
-// // Dec panics.
-// func (c comboCounterRef) Dec(i int64) {
-// 	c.usageCounter.Dec(i)
-// 	c.metricCounter.Dec(i)
-// }
-//
-// // Inc panics.
-// func (c comboCounterRef) Inc(i int64) {
-// 	c.usageCounter.Inc(i)
-// 	c.metricCounter.Inc(i)
-// }
-//
-// func (c comboCounterRef) Snapshot() Metric {
-// 	return c.metricCounter.Snapshot()
-// }

+ 0 - 61
pkg/metrics/common.go

@@ -1,61 +0,0 @@
-package metrics
-
-import "github.com/grafana/grafana/pkg/log"
-
-type MetricMeta struct {
-	tags map[string]string
-	name string
-}
-
-func NewMetricMeta(name string, tagStrings []string) *MetricMeta {
-	if len(tagStrings)%2 != 0 {
-		log.Fatal(3, "Metrics: tags array is missing value for key, %v", tagStrings)
-	}
-
-	tags := make(map[string]string)
-	for i := 0; i < len(tagStrings); i += 2 {
-		tags[tagStrings[i]] = tagStrings[i+1]
-	}
-
-	return &MetricMeta{
-		tags: tags,
-		name: name,
-	}
-}
-
-func (m *MetricMeta) Name() string {
-	return m.name
-}
-
-func (m *MetricMeta) GetTagsCopy() map[string]string {
-	if len(m.tags) == 0 {
-		return make(map[string]string)
-	}
-
-	copy := make(map[string]string)
-	for k2, v2 := range m.tags {
-		copy[k2] = v2
-	}
-
-	return copy
-}
-
-func (m *MetricMeta) StringifyTags() string {
-	if len(m.tags) == 0 {
-		return ""
-	}
-
-	str := ""
-	for key, value := range m.tags {
-		str += "." + key + "_" + value
-	}
-
-	return str
-}
-
-type Metric interface {
-	Name() string
-	GetTagsCopy() map[string]string
-	StringifyTags() string
-	Snapshot() Metric
-}

+ 0 - 61
pkg/metrics/counter.go

@@ -1,61 +0,0 @@
-package metrics
-
-import "sync/atomic"
-
-// Counters hold an int64 value that can be incremented and decremented.
-type Counter interface {
-	Metric
-
-	Clear()
-	Count() int64
-	Dec(int64)
-	Inc(int64)
-}
-
-// NewCounter constructs a new StandardCounter.
-func NewCounter(meta *MetricMeta) Counter {
-	return &StandardCounter{
-		MetricMeta: meta,
-		count:      0,
-	}
-}
-
-func RegCounter(name string, tagStrings ...string) Counter {
-	cr := NewCounter(NewMetricMeta(name, tagStrings))
-	MetricStats.Register(cr)
-	return cr
-}
-
-// StandardCounter is the standard implementation of a Counter and uses the
-// sync/atomic package to manage a single int64 value.
-type StandardCounter struct {
-	count int64 //Due to a bug in golang the 64bit variable need to come first to be 64bit aligned. https://golang.org/pkg/sync/atomic/#pkg-note-BUG
-	*MetricMeta
-}
-
-// Clear sets the counter to zero.
-func (c *StandardCounter) Clear() {
-	atomic.StoreInt64(&c.count, 0)
-}
-
-// Count returns the current count.
-func (c *StandardCounter) Count() int64 {
-	return atomic.LoadInt64(&c.count)
-}
-
-// Dec decrements the counter by the given amount.
-func (c *StandardCounter) Dec(i int64) {
-	atomic.AddInt64(&c.count, -i)
-}
-
-// Inc increments the counter by the given amount.
-func (c *StandardCounter) Inc(i int64) {
-	atomic.AddInt64(&c.count, i)
-}
-
-func (c *StandardCounter) Snapshot() Metric {
-	return &StandardCounter{
-		MetricMeta: c.MetricMeta,
-		count:      c.count,
-	}
-}

+ 0 - 11
pkg/metrics/delta.go

@@ -1,11 +0,0 @@
-package metrics
-
-import "math"
-
-func calculateDelta(oldValue, newValue int64) int64 {
-	if oldValue < newValue {
-		return newValue - oldValue
-	} else {
-		return (math.MaxInt64 - oldValue) + (newValue - math.MinInt64) + 1
-	}
-}

+ 0 - 83
pkg/metrics/gauge.go

@@ -1,83 +0,0 @@
-// includes code from
-// https://raw.githubusercontent.com/rcrowley/go-metrics/master/sample.go
-// Copyright 2012 Richard Crowley. All rights reserved.
-
-package metrics
-
-import "sync/atomic"
-
-// Gauges hold an int64 value that can be set arbitrarily.
-type Gauge interface {
-	Metric
-
-	Update(int64)
-	Value() int64
-}
-
-func NewGauge(meta *MetricMeta) Gauge {
-	if UseNilMetrics {
-		return NilGauge{}
-	}
-	return &StandardGauge{
-		MetricMeta: meta,
-		value:      0,
-	}
-}
-
-func RegGauge(name string, tagStrings ...string) Gauge {
-	tr := NewGauge(NewMetricMeta(name, tagStrings))
-	MetricStats.Register(tr)
-	return tr
-}
-
-// GaugeSnapshot is a read-only copy of another Gauge.
-type GaugeSnapshot struct {
-	value int64
-	*MetricMeta
-}
-
-// Snapshot returns the snapshot.
-func (g GaugeSnapshot) Snapshot() Metric { return g }
-
-// Update panics.
-func (GaugeSnapshot) Update(int64) {
-	panic("Update called on a GaugeSnapshot")
-}
-
-// Value returns the value at the time the snapshot was taken.
-func (g GaugeSnapshot) Value() int64 { return g.value }
-
-// NilGauge is a no-op Gauge.
-type NilGauge struct{ *MetricMeta }
-
-// Snapshot is a no-op.
-func (NilGauge) Snapshot() Metric { return NilGauge{} }
-
-// Update is a no-op.
-func (NilGauge) Update(v int64) {}
-
-// Value is a no-op.
-func (NilGauge) Value() int64 { return 0 }
-
-// StandardGauge is the standard implementation of a Gauge and uses the
-// sync/atomic package to manage a single int64 value.
-// atomic needs 64-bit aligned memory which is ensure for first word
-type StandardGauge struct {
-	value int64
-	*MetricMeta
-}
-
-// Snapshot returns a read-only copy of the gauge.
-func (g *StandardGauge) Snapshot() Metric {
-	return GaugeSnapshot{MetricMeta: g.MetricMeta, value: g.value}
-}
-
-// Update updates the gauge's value.
-func (g *StandardGauge) Update(v int64) {
-	atomic.StoreInt64(&g.value, v)
-}
-
-// Value returns the gauge's current value.
-func (g *StandardGauge) Value() int64 {
-	return atomic.LoadInt64(&g.value)
-}

+ 0 - 107
pkg/metrics/graphite.go

@@ -1,107 +0,0 @@
-package metrics
-
-import (
-	"bytes"
-	"fmt"
-	"net"
-	"strings"
-	"time"
-
-	"github.com/grafana/grafana/pkg/log"
-	"github.com/grafana/grafana/pkg/setting"
-)
-
-type GraphitePublisher struct {
-	address    string
-	protocol   string
-	prefix     string
-	prevCounts map[string]int64
-}
-
-func CreateGraphitePublisher() (*GraphitePublisher, error) {
-	graphiteSection, err := setting.Cfg.GetSection("metrics.graphite")
-	if err != nil {
-		return nil, nil
-	}
-
-	address := graphiteSection.Key("address").String()
-	if address == "" {
-		return nil, nil
-	}
-
-	publisher := &GraphitePublisher{}
-	publisher.prevCounts = make(map[string]int64)
-	publisher.protocol = "tcp"
-	publisher.prefix = graphiteSection.Key("prefix").MustString("prod.grafana.%(instance_name)s")
-	publisher.address = address
-
-	safeInstanceName := strings.Replace(setting.InstanceName, ".", "_", -1)
-	prefix := graphiteSection.Key("prefix").Value()
-
-	if prefix == "" {
-		prefix = "prod.grafana.%(instance_name)s."
-	}
-
-	publisher.prefix = strings.Replace(prefix, "%(instance_name)s", safeInstanceName, -1)
-	return publisher, nil
-}
-
-func (this *GraphitePublisher) Publish(metrics []Metric) {
-	conn, err := net.DialTimeout(this.protocol, this.address, time.Second*5)
-
-	if err != nil {
-		log.Error(3, "Metrics: GraphitePublisher:  Failed to connect to %s!", err)
-		return
-	}
-
-	buf := bytes.NewBufferString("")
-	now := time.Now().Unix()
-
-	for _, m := range metrics {
-		metricName := this.prefix + m.Name() + m.StringifyTags()
-
-		switch metric := m.(type) {
-		case Counter:
-			this.addCount(buf, metricName+".count", metric.Count(), now)
-		case Gauge:
-			this.addCount(buf, metricName, metric.Value(), now)
-		case Timer:
-			percentiles := metric.Percentiles([]float64{0.25, 0.75, 0.90, 0.99})
-			this.addCount(buf, metricName+".count", metric.Count(), now)
-			this.addInt(buf, metricName+".max", metric.Max(), now)
-			this.addInt(buf, metricName+".min", metric.Min(), now)
-			this.addFloat(buf, metricName+".mean", metric.Mean(), now)
-			this.addFloat(buf, metricName+".std", metric.StdDev(), now)
-			this.addFloat(buf, metricName+".p25", percentiles[0], now)
-			this.addFloat(buf, metricName+".p75", percentiles[1], now)
-			this.addFloat(buf, metricName+".p90", percentiles[2], now)
-			this.addFloat(buf, metricName+".p99", percentiles[3], now)
-		}
-	}
-
-	log.Trace("Metrics: GraphitePublisher.Publish() \n%s", buf)
-	_, err = conn.Write(buf.Bytes())
-
-	if err != nil {
-		log.Error(3, "Metrics: GraphitePublisher: Failed to send metrics! %s", err)
-	}
-}
-
-func (this *GraphitePublisher) addInt(buf *bytes.Buffer, metric string, value int64, now int64) {
-	buf.WriteString(fmt.Sprintf("%s %d %d\n", metric, value, now))
-}
-
-func (this *GraphitePublisher) addFloat(buf *bytes.Buffer, metric string, value float64, now int64) {
-	buf.WriteString(fmt.Sprintf("%s %f %d\n", metric, value, now))
-}
-
-func (this *GraphitePublisher) addCount(buf *bytes.Buffer, metric string, value int64, now int64) {
-	delta := value
-
-	if last, ok := this.prevCounts[metric]; ok {
-		delta = calculateDelta(last, value)
-	}
-
-	this.prevCounts[metric] = value
-	buf.WriteString(fmt.Sprintf("%s %d %d\n", metric, delta, now))
-}

+ 0 - 77
pkg/metrics/graphite_test.go

@@ -1,77 +0,0 @@
-package metrics
-
-import (
-	"testing"
-
-	"github.com/grafana/grafana/pkg/setting"
-
-	. "github.com/smartystreets/goconvey/convey"
-)
-
-func TestGraphitePublisher(t *testing.T) {
-
-	setting.CustomInitPath = "conf/does_not_exist.ini"
-
-	Convey("Test graphite prefix replacement", t, func() {
-		var err error
-		err = setting.NewConfigContext(&setting.CommandLineArgs{
-			HomePath: "../../",
-		})
-
-		So(err, ShouldBeNil)
-
-		sec, err := setting.Cfg.NewSection("metrics.graphite")
-		sec.NewKey("prefix", "prod.grafana.%(instance_name)s.")
-		sec.NewKey("address", "localhost:2001")
-
-		So(err, ShouldBeNil)
-
-		setting.InstanceName = "hostname.with.dots.com"
-		publisher, err := CreateGraphitePublisher()
-
-		So(err, ShouldBeNil)
-		So(publisher, ShouldNotBeNil)
-
-		So(publisher.prefix, ShouldEqual, "prod.grafana.hostname_with_dots_com.")
-		So(publisher.address, ShouldEqual, "localhost:2001")
-	})
-
-	Convey("Test graphite publisher default prefix", t, func() {
-		var err error
-		err = setting.NewConfigContext(&setting.CommandLineArgs{
-			HomePath: "../../",
-		})
-
-		So(err, ShouldBeNil)
-
-		sec, err := setting.Cfg.NewSection("metrics.graphite")
-		sec.NewKey("address", "localhost:2001")
-
-		So(err, ShouldBeNil)
-
-		setting.InstanceName = "hostname.with.dots.com"
-		publisher, err := CreateGraphitePublisher()
-
-		So(err, ShouldBeNil)
-		So(publisher, ShouldNotBeNil)
-
-		So(publisher.prefix, ShouldEqual, "prod.grafana.hostname_with_dots_com.")
-		So(publisher.address, ShouldEqual, "localhost:2001")
-	})
-
-	Convey("Test graphite publisher default values", t, func() {
-		var err error
-		err = setting.NewConfigContext(&setting.CommandLineArgs{
-			HomePath: "../../",
-		})
-
-		So(err, ShouldBeNil)
-
-		_, err = setting.Cfg.NewSection("metrics.graphite")
-
-		publisher, err := CreateGraphitePublisher()
-
-		So(err, ShouldBeNil)
-		So(publisher, ShouldBeNil)
-	})
-}

+ 396 - 0
pkg/metrics/graphitebridge/graphite.go

@@ -0,0 +1,396 @@
+// Copyright 2016 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package graphite provides a bridge to push Prometheus metrics to a Graphite
+// server.
+package graphitebridge
+
+import (
+	"bufio"
+	"errors"
+	"fmt"
+	"io"
+	"net"
+	"sort"
+	"strings"
+	"time"
+
+	"github.com/prometheus/common/expfmt"
+	"github.com/prometheus/common/model"
+	"golang.org/x/net/context"
+
+	dto "github.com/prometheus/client_model/go"
+
+	"github.com/prometheus/client_golang/prometheus"
+)
+
+const (
+	defaultInterval       = 15 * time.Second
+	millisecondsPerSecond = 1000
+)
+
+// HandlerErrorHandling defines how a Handler serving metrics will handle
+// errors.
+type HandlerErrorHandling int
+
+// These constants cause handlers serving metrics to behave as described if
+// errors are encountered.
+const (
+	// Ignore errors and try to push as many metrics to Graphite as possible.
+	ContinueOnError HandlerErrorHandling = iota
+
+	// Abort the push to Graphite upon the first error encountered.
+	AbortOnError
+)
+
+var metricCategoryPrefix []string = []string{"proxy_", "api_", "page_", "alerting_", "aws_", "db_", "stat_", "go_", "process_"}
+var trimMetricPrefix []string = []string{"grafana_"}
+
+// Config defines the Graphite bridge config.
+type Config struct {
+	// The url to push data to. Required.
+	URL string
+
+	// The prefix for the pushed Graphite metrics. Defaults to empty string.
+	Prefix string
+
+	// The interval to use for pushing data to Graphite. Defaults to 15 seconds.
+	Interval time.Duration
+
+	// The timeout for pushing metrics to Graphite. Defaults to 15 seconds.
+	Timeout time.Duration
+
+	// The Gatherer to use for metrics. Defaults to prometheus.DefaultGatherer.
+	Gatherer prometheus.Gatherer
+
+	// The logger that messages are written to. Defaults to no logging.
+	Logger Logger
+
+	// ErrorHandling defines how errors are handled. Note that errors are
+	// logged regardless of the configured ErrorHandling provided Logger
+	// is not nil.
+	ErrorHandling HandlerErrorHandling
+
+	// Graphite does not support ever increasing counter the same way
+	// prometheus does. Rollups and ingestion might cannot handle ever
+	// increasing counters. This option allows enabled the caller to
+	// calculate the delta by saving the last sent counter in memory
+	// and subtraction it from the collected value before sending.
+	CountersAsDelta bool
+}
+
+// Bridge pushes metrics to the configured Graphite server.
+type Bridge struct {
+	url              string
+	prefix           string
+	countersAsDetlas bool
+	interval         time.Duration
+	timeout          time.Duration
+
+	errorHandling HandlerErrorHandling
+	logger        Logger
+
+	g prometheus.Gatherer
+
+	lastValue map[model.Fingerprint]float64
+}
+
+// Logger is the minimal interface Bridge needs for logging. Note that
+// log.Logger from the standard library implements this interface, and it is
+// easy to implement by custom loggers, if they don't do so already anyway.
+type Logger interface {
+	Println(v ...interface{})
+}
+
+// NewBridge returns a pointer to a new Bridge struct.
+func NewBridge(c *Config) (*Bridge, error) {
+	b := &Bridge{}
+
+	if c.URL == "" {
+		return nil, errors.New("missing URL")
+	}
+	b.url = c.URL
+
+	if c.Gatherer == nil {
+		b.g = prometheus.DefaultGatherer
+	} else {
+		b.g = c.Gatherer
+	}
+
+	if c.Logger != nil {
+		b.logger = c.Logger
+	}
+
+	if c.Prefix != "" {
+		b.prefix = c.Prefix
+	}
+
+	var z time.Duration
+	if c.Interval == z {
+		b.interval = defaultInterval
+	} else {
+		b.interval = c.Interval
+	}
+
+	if c.Timeout == z {
+		b.timeout = defaultInterval
+	} else {
+		b.timeout = c.Timeout
+	}
+
+	b.errorHandling = c.ErrorHandling
+	b.lastValue = map[model.Fingerprint]float64{}
+	b.countersAsDetlas = c.CountersAsDelta
+
+	return b, nil
+}
+
+// Run starts the event loop that pushes Prometheus metrics to Graphite at the
+// configured interval.
+func (b *Bridge) Run(ctx context.Context) {
+	ticker := time.NewTicker(b.interval)
+	defer ticker.Stop()
+	for {
+		select {
+		case <-ticker.C:
+			if err := b.Push(); err != nil && b.logger != nil {
+				b.logger.Println("error pushing to Graphite:", err)
+			}
+		case <-ctx.Done():
+			return
+		}
+	}
+}
+
+// Push pushes Prometheus metrics to the configured Graphite server.
+func (b *Bridge) Push() error {
+	mfs, err := b.g.Gather()
+	if err != nil || len(mfs) == 0 {
+		switch b.errorHandling {
+		case AbortOnError:
+			return err
+		case ContinueOnError:
+			if b.logger != nil {
+				b.logger.Println("continue on error:", err)
+			}
+		default:
+			panic("unrecognized error handling value")
+		}
+	}
+
+	conn, err := net.DialTimeout("tcp", b.url, b.timeout)
+	if err != nil {
+		return err
+	}
+	defer conn.Close()
+
+	return b.writeMetrics(conn, mfs, b.prefix, model.Now())
+}
+
+func (b *Bridge) writeMetrics(w io.Writer, mfs []*dto.MetricFamily, prefix string, now model.Time) error {
+	for _, mf := range mfs {
+		vec, err := expfmt.ExtractSamples(&expfmt.DecodeOptions{
+			Timestamp: now,
+		}, mf)
+		if err != nil {
+			return err
+		}
+
+		buf := bufio.NewWriter(w)
+		for _, s := range vec {
+			if err := writePrefix(buf, prefix); err != nil {
+				return err
+			}
+
+			if err := writeMetric(buf, s.Metric, mf); err != nil {
+				return err
+			}
+
+			value := b.replaceCounterWithDelta(mf, s.Metric, s.Value)
+			if _, err := fmt.Fprintf(buf, " %g %d\n", value, int64(s.Timestamp)/millisecondsPerSecond); err != nil {
+				return err
+			}
+			if err := buf.Flush(); err != nil {
+				return err
+			}
+		}
+	}
+
+	return nil
+}
+
+func writeMetric(buf *bufio.Writer, m model.Metric, mf *dto.MetricFamily) error {
+	metricName, hasName := m[model.MetricNameLabel]
+	numLabels := len(m) - 1
+	if !hasName {
+		numLabels = len(m)
+	}
+	for _, v := range metricCategoryPrefix {
+		if strings.HasPrefix(string(metricName), v) {
+			group := strings.Replace(v, "_", " ", 1)
+			metricName = model.LabelValue(strings.Replace(string(metricName), v, group, 1))
+		}
+	}
+
+	for _, v := range trimMetricPrefix {
+		if strings.HasPrefix(string(metricName), v) {
+			metricName = model.LabelValue(strings.Replace(string(metricName), v, "", 1))
+		}
+	}
+
+	labelStrings := make([]string, 0, numLabels)
+	for label, value := range m {
+		if label != model.MetricNameLabel {
+			labelStrings = append(labelStrings, fmt.Sprintf("%s %s", string(label), string(value)))
+		}
+	}
+
+	var err error
+	switch numLabels {
+	case 0:
+		if hasName {
+			if err := writeSanitized(buf, string(metricName)); err != nil {
+				return err
+			}
+		}
+	default:
+		sort.Strings(labelStrings)
+		if err = writeSanitized(buf, string(metricName)); err != nil {
+			return err
+		}
+		for _, s := range labelStrings {
+			if err = buf.WriteByte('.'); err != nil {
+				return err
+			}
+			if err = writeSanitized(buf, s); err != nil {
+				return err
+			}
+		}
+	}
+
+	if err = addExtentionConventionForRollups(buf, mf, m); err != nil {
+		return err
+	}
+
+	return nil
+}
+
+func addExtentionConventionForRollups(buf *bufio.Writer, mf *dto.MetricFamily, m model.Metric) error {
+	// Adding `.count` `.sum` suffix makes it possible to configure
+	// different rollup strategies based on metric type
+
+	mfType := mf.GetType()
+	var err error
+	if mfType == dto.MetricType_COUNTER {
+		if _, err = fmt.Fprint(buf, ".count"); err != nil {
+			return err
+		}
+	}
+
+	if mfType == dto.MetricType_SUMMARY || mfType == dto.MetricType_HISTOGRAM {
+		if strings.HasSuffix(string(m[model.MetricNameLabel]), "_count") {
+			if _, err = fmt.Fprint(buf, ".count"); err != nil {
+				return err
+			}
+		}
+	}
+	if mfType == dto.MetricType_HISTOGRAM {
+		if strings.HasSuffix(string(m[model.MetricNameLabel]), "_sum") {
+			if _, err = fmt.Fprint(buf, ".sum"); err != nil {
+				return err
+			}
+		}
+	}
+
+	return nil
+}
+
+func writePrefix(buf *bufio.Writer, s string) error {
+	for _, c := range s {
+		if _, err := buf.WriteRune(replaceInvalid(c)); err != nil {
+			return err
+		}
+	}
+
+	return nil
+}
+
+func writeSanitized(buf *bufio.Writer, s string) error {
+	prevUnderscore := false
+
+	for _, c := range s {
+		c = replaceInvalidRune(c)
+		if c == '_' {
+			if prevUnderscore {
+				continue
+			}
+			prevUnderscore = true
+		} else {
+			prevUnderscore = false
+		}
+		if _, err := buf.WriteRune(c); err != nil {
+			return err
+		}
+	}
+
+	return nil
+}
+
+func replaceInvalid(c rune) rune {
+	if c == ' ' || c == '.' {
+		return '.'
+	}
+	return replaceInvalidRune(c)
+}
+
+func replaceInvalidRune(c rune) rune {
+	if c == ' ' {
+		return '.'
+	}
+	if !((c >= 'a' && c <= 'z') || (c >= 'A' && c <= 'Z') || c == '_' || c == ':' || (c >= '0' && c <= '9')) {
+		return '_'
+	}
+	return c
+}
+
+func (b *Bridge) replaceCounterWithDelta(mf *dto.MetricFamily, metric model.Metric, value model.SampleValue) float64 {
+	if !b.countersAsDetlas {
+		return float64(value)
+	}
+
+	mfType := mf.GetType()
+	if mfType == dto.MetricType_COUNTER {
+		return b.returnDelta(metric, value)
+	}
+
+	if mfType == dto.MetricType_SUMMARY {
+		if strings.HasSuffix(string(metric[model.MetricNameLabel]), "_count") {
+			return b.returnDelta(metric, value)
+		}
+	}
+
+	return float64(value)
+}
+
+func (b *Bridge) returnDelta(metric model.Metric, value model.SampleValue) float64 {
+	key := metric.Fingerprint()
+	_, exists := b.lastValue[key]
+	if !exists {
+		b.lastValue[key] = 0
+	}
+
+	delta := float64(value) - b.lastValue[key]
+	b.lastValue[key] = float64(value)
+
+	return delta
+}

+ 503 - 0
pkg/metrics/graphitebridge/graphite_test.go

@@ -0,0 +1,503 @@
+package graphitebridge
+
+import (
+	"bufio"
+	"bytes"
+	"io"
+	"net"
+	"regexp"
+	"testing"
+	"time"
+
+	"github.com/prometheus/client_golang/prometheus"
+	dto "github.com/prometheus/client_model/go"
+	"github.com/prometheus/common/model"
+)
+
+func TestCountersAsDelta(t *testing.T) {
+	b, _ := NewBridge(&Config{
+		URL:             "localhost:12345",
+		CountersAsDelta: true,
+	})
+	ty := dto.MetricType(0)
+	mf := &dto.MetricFamily{
+		Type:   &ty,
+		Metric: []*dto.Metric{},
+	}
+	m := model.Metric{}
+
+	var want float64
+	var got float64
+	want = float64(1)
+	got = b.replaceCounterWithDelta(mf, m, model.SampleValue(1))
+	if got != want {
+		t.Fatalf("want %v got %v", want, got)
+	}
+
+	got = b.replaceCounterWithDelta(mf, m, model.SampleValue(2))
+	if got != want {
+		t.Fatalf("want %v got %v", want, got)
+	}
+}
+
+func TestCountersAsDeltaDisabled(t *testing.T) {
+	b, _ := NewBridge(&Config{
+		URL:             "localhost:12345",
+		CountersAsDelta: false,
+	})
+	ty := dto.MetricType(0)
+	mf := &dto.MetricFamily{
+		Type:   &ty,
+		Metric: []*dto.Metric{},
+	}
+	m := model.Metric{}
+
+	var want float64
+	var got float64
+	want = float64(1)
+	got = b.replaceCounterWithDelta(mf, m, model.SampleValue(1))
+	if got != want {
+		t.Fatalf("want %v got %v", want, got)
+	}
+
+	want = float64(2)
+	got = b.replaceCounterWithDelta(mf, m, model.SampleValue(2))
+	if got != want {
+		t.Fatalf("want %v got %v", want, got)
+	}
+}
+
+func TestSanitize(t *testing.T) {
+	testCases := []struct {
+		in, out string
+	}{
+		{in: "hello", out: "hello"},
+		{in: "hE/l1o", out: "hE_l1o"},
+		{in: "he,*ll(.o", out: "he_ll_o"},
+		{in: "hello_there%^&", out: "hello_there_"},
+	}
+
+	var buf bytes.Buffer
+	w := bufio.NewWriter(&buf)
+
+	for i, tc := range testCases {
+		if err := writeSanitized(w, tc.in); err != nil {
+			t.Fatalf("write failed: %v", err)
+		}
+		if err := w.Flush(); err != nil {
+			t.Fatalf("flush failed: %v", err)
+		}
+
+		if want, got := tc.out, buf.String(); want != got {
+			t.Fatalf("test case index %d: got sanitized string %s, want %s", i, got, want)
+		}
+
+		buf.Reset()
+	}
+}
+
+func TestSanitizePrefix(t *testing.T) {
+	testCases := []struct {
+		in, out string
+	}{
+		{in: "service.prod.", out: "service.prod."},
+		{in: "service.prod", out: "service.prod"},
+	}
+
+	var buf bytes.Buffer
+	w := bufio.NewWriter(&buf)
+
+	for i, tc := range testCases {
+		if err := writePrefix(w, tc.in); err != nil {
+			t.Fatalf("write failed: %v", err)
+		}
+		if err := w.Flush(); err != nil {
+			t.Fatalf("flush failed: %v", err)
+		}
+
+		if want, got := tc.out, buf.String(); want != got {
+			t.Fatalf("test case index %d: got sanitized string %s, want %s", i, got, want)
+		}
+
+		buf.Reset()
+	}
+}
+
+func TestWriteSummary(t *testing.T) {
+	sumVec := prometheus.NewSummaryVec(
+		prometheus.SummaryOpts{
+			Name:        "name",
+			Help:        "docstring",
+			ConstLabels: prometheus.Labels{"constname": "constvalue"},
+			Objectives:  map[float64]float64{0.5: 0.05, 0.9: 0.01, 0.99: 0.001},
+		},
+		[]string{"labelname"},
+	)
+
+	reg := prometheus.NewRegistry()
+	reg.MustRegister(sumVec)
+
+	b, err := NewBridge(&Config{
+		URL:             "localhost:8080",
+		Gatherer:        reg,
+		CountersAsDelta: true,
+	})
+	if err != nil {
+		t.Fatalf("cannot create bridge. err: %v", err)
+	}
+
+	sumVec.WithLabelValues("val1").Observe(float64(10))
+	sumVec.WithLabelValues("val1").Observe(float64(20))
+	sumVec.WithLabelValues("val1").Observe(float64(30))
+	sumVec.WithLabelValues("val2").Observe(float64(20))
+	sumVec.WithLabelValues("val2").Observe(float64(30))
+	sumVec.WithLabelValues("val2").Observe(float64(40))
+
+	mfs, err := reg.Gather()
+	if err != nil {
+		t.Fatalf("error: %v", err)
+	}
+
+	now := model.Time(1477043083)
+	var buf bytes.Buffer
+	err = b.writeMetrics(&buf, mfs, "prefix.", now)
+	if err != nil {
+		t.Fatalf("error: %v", err)
+	}
+
+	want := `prefix.name.constname.constvalue.labelname.val1.quantile.0_5 20 1477043
+prefix.name.constname.constvalue.labelname.val1.quantile.0_9 30 1477043
+prefix.name.constname.constvalue.labelname.val1.quantile.0_99 30 1477043
+prefix.name_sum.constname.constvalue.labelname.val1 60 1477043
+prefix.name_count.constname.constvalue.labelname.val1.count 3 1477043
+prefix.name.constname.constvalue.labelname.val2.quantile.0_5 30 1477043
+prefix.name.constname.constvalue.labelname.val2.quantile.0_9 40 1477043
+prefix.name.constname.constvalue.labelname.val2.quantile.0_99 40 1477043
+prefix.name_sum.constname.constvalue.labelname.val2 90 1477043
+prefix.name_count.constname.constvalue.labelname.val2.count 3 1477043
+`
+
+	if got := buf.String(); want != got {
+		t.Fatalf("wanted \n%s\n, got \n%s\n", want, got)
+	}
+}
+
+func TestWriteHistogram(t *testing.T) {
+	histVec := prometheus.NewHistogramVec(
+		prometheus.HistogramOpts{
+			Name:        "name",
+			Help:        "docstring",
+			ConstLabels: prometheus.Labels{"constname": "constvalue"},
+			Buckets:     []float64{0.01, 0.02, 0.05, 0.1},
+		},
+		[]string{"labelname"},
+	)
+
+	reg := prometheus.NewRegistry()
+	reg.MustRegister(histVec)
+
+	b, err := NewBridge(&Config{
+		URL:             "localhost:8080",
+		Gatherer:        reg,
+		CountersAsDelta: true,
+	})
+	if err != nil {
+		t.Fatalf("error creating bridge: %v", err)
+	}
+
+	histVec.WithLabelValues("val1").Observe(float64(10))
+	histVec.WithLabelValues("val1").Observe(float64(20))
+	histVec.WithLabelValues("val1").Observe(float64(30))
+	histVec.WithLabelValues("val2").Observe(float64(20))
+	histVec.WithLabelValues("val2").Observe(float64(30))
+	histVec.WithLabelValues("val2").Observe(float64(40))
+
+	mfs, err := reg.Gather()
+	if err != nil {
+		t.Fatalf("error: %v", err)
+	}
+
+	now := model.Time(1477043083)
+	var buf bytes.Buffer
+	err = b.writeMetrics(&buf, mfs, "prefix.", now)
+	if err != nil {
+		t.Fatalf("error: %v", err)
+	}
+
+	want := `prefix.name_bucket.constname.constvalue.labelname.val1.le.0_01 0 1477043
+prefix.name_bucket.constname.constvalue.labelname.val1.le.0_02 0 1477043
+prefix.name_bucket.constname.constvalue.labelname.val1.le.0_05 0 1477043
+prefix.name_bucket.constname.constvalue.labelname.val1.le.0_1 0 1477043
+prefix.name_sum.constname.constvalue.labelname.val1.sum 60 1477043
+prefix.name_count.constname.constvalue.labelname.val1.count 3 1477043
+prefix.name_bucket.constname.constvalue.labelname.val1.le._Inf 3 1477043
+prefix.name_bucket.constname.constvalue.labelname.val2.le.0_01 0 1477043
+prefix.name_bucket.constname.constvalue.labelname.val2.le.0_02 0 1477043
+prefix.name_bucket.constname.constvalue.labelname.val2.le.0_05 0 1477043
+prefix.name_bucket.constname.constvalue.labelname.val2.le.0_1 0 1477043
+prefix.name_sum.constname.constvalue.labelname.val2.sum 90 1477043
+prefix.name_count.constname.constvalue.labelname.val2.count 3 1477043
+prefix.name_bucket.constname.constvalue.labelname.val2.le._Inf 3 1477043
+`
+	if got := buf.String(); want != got {
+		t.Fatalf("wanted \n%s\n, got \n%s\n", want, got)
+	}
+}
+
+func TestCounterVec(t *testing.T) {
+	cntVec := prometheus.NewCounterVec(
+		prometheus.CounterOpts{
+			Name:        "page_response",
+			Help:        "docstring",
+			ConstLabels: prometheus.Labels{"constname": "constvalue"},
+		},
+		[]string{"labelname"},
+	)
+
+	reg := prometheus.NewRegistry()
+	reg.MustRegister(cntVec)
+
+	cntVec.WithLabelValues("val1").Inc()
+	cntVec.WithLabelValues("val2").Inc()
+
+	b, err := NewBridge(&Config{
+		URL:             "localhost:8080",
+		Gatherer:        reg,
+		CountersAsDelta: true,
+	})
+	if err != nil {
+		t.Fatalf("error creating bridge: %v", err)
+	}
+
+	// first collect
+	mfs, err := reg.Gather()
+	if err != nil {
+		t.Fatalf("error: %v", err)
+	}
+
+	var buf bytes.Buffer
+	err = b.writeMetrics(&buf, mfs, "prefix.", model.Time(1477043083))
+	if err != nil {
+		t.Fatalf("error: %v", err)
+	}
+
+	want := `prefix.page.response.constname.constvalue.labelname.val1.count 1 1477043
+prefix.page.response.constname.constvalue.labelname.val2.count 1 1477043
+`
+	if got := buf.String(); want != got {
+		t.Fatalf("wanted \n%s\n, got \n%s\n", want, got)
+	}
+
+	//next collect
+	cntVec.WithLabelValues("val1").Inc()
+	cntVec.WithLabelValues("val2").Inc()
+
+	mfs, err = reg.Gather()
+	if err != nil {
+		t.Fatalf("error: %v", err)
+	}
+
+	buf = bytes.Buffer{}
+	err = b.writeMetrics(&buf, mfs, "prefix.", model.Time(1477053083))
+	if err != nil {
+		t.Fatalf("error: %v", err)
+	}
+
+	want2 := `prefix.page.response.constname.constvalue.labelname.val1.count 1 1477053
+prefix.page.response.constname.constvalue.labelname.val2.count 1 1477053
+`
+	if got := buf.String(); want2 != got {
+		t.Fatalf("wanted \n%s\n, got \n%s\n", want2, got)
+	}
+}
+
+func TestCounter(t *testing.T) {
+	cntVec := prometheus.NewCounter(
+		prometheus.CounterOpts{
+			Name:        "page_response",
+			Help:        "docstring",
+			ConstLabels: prometheus.Labels{"constname": "constvalue"},
+		})
+
+	reg := prometheus.NewRegistry()
+	reg.MustRegister(cntVec)
+
+	cntVec.Inc()
+
+	b, err := NewBridge(&Config{
+		URL:             "localhost:8080",
+		Gatherer:        reg,
+		CountersAsDelta: true,
+	})
+	if err != nil {
+		t.Fatalf("error creating bridge: %v", err)
+	}
+
+	// first collect
+	mfs, err := reg.Gather()
+	if err != nil {
+		t.Fatalf("error: %v", err)
+	}
+
+	var buf bytes.Buffer
+	err = b.writeMetrics(&buf, mfs, "prefix.", model.Time(1477043083))
+	if err != nil {
+		t.Fatalf("error: %v", err)
+	}
+
+	want := "prefix.page.response.constname.constvalue.count 1 1477043\n"
+	if got := buf.String(); want != got {
+		t.Fatalf("wanted \n%s\n, got \n%s\n", want, got)
+	}
+
+	//next collect
+	cntVec.Inc()
+
+	mfs, err = reg.Gather()
+	if err != nil {
+		t.Fatalf("error: %v", err)
+	}
+
+	buf = bytes.Buffer{}
+	err = b.writeMetrics(&buf, mfs, "prefix.", model.Time(1477053083))
+	if err != nil {
+		t.Fatalf("error: %v", err)
+	}
+
+	want2 := "prefix.page.response.constname.constvalue.count 1 1477053\n"
+	if got := buf.String(); want2 != got {
+		t.Fatalf("wanted \n%s\n, got \n%s\n", want2, got)
+	}
+}
+
+func TestTrimGrafanaNamespace(t *testing.T) {
+	cntVec := prometheus.NewCounter(
+		prometheus.CounterOpts{
+			Name:        "grafana_http_request_total",
+			Help:        "docstring",
+			ConstLabels: prometheus.Labels{"constname": "constvalue"},
+		})
+
+	reg := prometheus.NewRegistry()
+	reg.MustRegister(cntVec)
+
+	cntVec.Inc()
+
+	b, err := NewBridge(&Config{
+		URL:             "localhost:8080",
+		Gatherer:        reg,
+		CountersAsDelta: true,
+	})
+	if err != nil {
+		t.Fatalf("error creating bridge: %v", err)
+	}
+
+	// first collect
+	mfs, err := reg.Gather()
+	if err != nil {
+		t.Fatalf("error: %v", err)
+	}
+
+	var buf bytes.Buffer
+	err = b.writeMetrics(&buf, mfs, "prefix.", model.Time(1477043083))
+	if err != nil {
+		t.Fatalf("error: %v", err)
+	}
+
+	want := "prefix.http_request_total.constname.constvalue.count 1 1477043\n"
+	if got := buf.String(); want != got {
+		t.Fatalf("wanted \n%s\n, got \n%s\n", want, got)
+	}
+}
+
+func TestPush(t *testing.T) {
+	reg := prometheus.NewRegistry()
+	cntVec := prometheus.NewCounterVec(
+		prometheus.CounterOpts{
+			Name:        "name",
+			Help:        "docstring",
+			ConstLabels: prometheus.Labels{"constname": "constvalue"},
+		},
+		[]string{"labelname"},
+	)
+	cntVec.WithLabelValues("val1").Inc()
+	cntVec.WithLabelValues("val2").Inc()
+	reg.MustRegister(cntVec)
+
+	host := "localhost"
+	port := ":56789"
+	b, err := NewBridge(&Config{
+		URL:      host + port,
+		Gatherer: reg,
+		Prefix:   "prefix.",
+	})
+	if err != nil {
+		t.Fatalf("error creating bridge: %v", err)
+	}
+
+	nmg, err := newMockGraphite(port)
+	if err != nil {
+		t.Fatalf("error creating mock graphite: %v", err)
+	}
+	defer nmg.Close()
+
+	err = b.Push()
+	if err != nil {
+		t.Fatalf("error pushing: %v", err)
+	}
+
+	wants := []string{
+		"prefix.name.constname.constvalue.labelname.val1.count 1",
+		"prefix.name.constname.constvalue.labelname.val2.count 1",
+	}
+
+	select {
+	case got := <-nmg.readc:
+		for _, want := range wants {
+			matched, err := regexp.MatchString(want, got)
+			if err != nil {
+				t.Fatalf("error pushing: %v", err)
+			}
+			if !matched {
+				t.Fatalf("missing metric:\nno match for %s received by server:\n%s", want, got)
+			}
+		}
+		return
+	case err := <-nmg.errc:
+		t.Fatalf("error reading push: %v", err)
+	case <-time.After(50 * time.Millisecond):
+		t.Fatalf("no result from graphite server")
+	}
+}
+
+func newMockGraphite(port string) (*mockGraphite, error) {
+	readc := make(chan string)
+	errc := make(chan error)
+	ln, err := net.Listen("tcp", port)
+	if err != nil {
+		return nil, err
+	}
+
+	go func() {
+		conn, err := ln.Accept()
+		if err != nil {
+			errc <- err
+		}
+		var b bytes.Buffer
+		io.Copy(&b, conn)
+		readc <- b.String()
+	}()
+
+	return &mockGraphite{
+		readc:    readc,
+		errc:     errc,
+		Listener: ln,
+	}, nil
+}
+
+type mockGraphite struct {
+	readc chan string
+	errc  chan error
+
+	net.Listener
+}

+ 0 - 189
pkg/metrics/histogram.go

@@ -1,189 +0,0 @@
-// includes code from
-// https://raw.githubusercontent.com/rcrowley/go-metrics/master/sample.go
-// Copyright 2012 Richard Crowley. All rights reserved.
-
-package metrics
-
-// Histograms calculate distribution statistics from a series of int64 values.
-type Histogram interface {
-	Metric
-
-	Clear()
-	Count() int64
-	Max() int64
-	Mean() float64
-	Min() int64
-	Percentile(float64) float64
-	Percentiles([]float64) []float64
-	StdDev() float64
-	Sum() int64
-	Update(int64)
-	Variance() float64
-}
-
-func NewHistogram(meta *MetricMeta, s Sample) Histogram {
-	return &StandardHistogram{
-		MetricMeta: meta,
-		sample:     s,
-	}
-}
-
-// HistogramSnapshot is a read-only copy of another Histogram.
-type HistogramSnapshot struct {
-	*MetricMeta
-	sample *SampleSnapshot
-}
-
-// Clear panics.
-func (*HistogramSnapshot) Clear() {
-	panic("Clear called on a HistogramSnapshot")
-}
-
-// Count returns the number of samples recorded at the time the snapshot was
-// taken.
-func (h *HistogramSnapshot) Count() int64 { return h.sample.Count() }
-
-// Max returns the maximum value in the sample at the time the snapshot was
-// taken.
-func (h *HistogramSnapshot) Max() int64 { return h.sample.Max() }
-
-// Mean returns the mean of the values in the sample at the time the snapshot
-// was taken.
-func (h *HistogramSnapshot) Mean() float64 { return h.sample.Mean() }
-
-// Min returns the minimum value in the sample at the time the snapshot was
-// taken.
-func (h *HistogramSnapshot) Min() int64 { return h.sample.Min() }
-
-// Percentile returns an arbitrary percentile of values in the sample at the
-// time the snapshot was taken.
-func (h *HistogramSnapshot) Percentile(p float64) float64 {
-	return h.sample.Percentile(p)
-}
-
-// Percentiles returns a slice of arbitrary percentiles of values in the sample
-// at the time the snapshot was taken.
-func (h *HistogramSnapshot) Percentiles(ps []float64) []float64 {
-	return h.sample.Percentiles(ps)
-}
-
-// Sample returns the Sample underlying the histogram.
-func (h *HistogramSnapshot) Sample() Sample { return h.sample }
-
-// Snapshot returns the snapshot.
-func (h *HistogramSnapshot) Snapshot() Metric { return h }
-
-// StdDev returns the standard deviation of the values in the sample at the
-// time the snapshot was taken.
-func (h *HistogramSnapshot) StdDev() float64 { return h.sample.StdDev() }
-
-// Sum returns the sum in the sample at the time the snapshot was taken.
-func (h *HistogramSnapshot) Sum() int64 { return h.sample.Sum() }
-
-// Update panics.
-func (*HistogramSnapshot) Update(int64) {
-	panic("Update called on a HistogramSnapshot")
-}
-
-// Variance returns the variance of inputs at the time the snapshot was taken.
-func (h *HistogramSnapshot) Variance() float64 { return h.sample.Variance() }
-
-// NilHistogram is a no-op Histogram.
-type NilHistogram struct {
-	*MetricMeta
-}
-
-// Clear is a no-op.
-func (NilHistogram) Clear() {}
-
-// Count is a no-op.
-func (NilHistogram) Count() int64 { return 0 }
-
-// Max is a no-op.
-func (NilHistogram) Max() int64 { return 0 }
-
-// Mean is a no-op.
-func (NilHistogram) Mean() float64 { return 0.0 }
-
-// Min is a no-op.
-func (NilHistogram) Min() int64 { return 0 }
-
-// Percentile is a no-op.
-func (NilHistogram) Percentile(p float64) float64 { return 0.0 }
-
-// Percentiles is a no-op.
-func (NilHistogram) Percentiles(ps []float64) []float64 {
-	return make([]float64, len(ps))
-}
-
-// Sample is a no-op.
-func (NilHistogram) Sample() Sample { return NilSample{} }
-
-// Snapshot is a no-op.
-func (n NilHistogram) Snapshot() Metric { return n }
-
-// StdDev is a no-op.
-func (NilHistogram) StdDev() float64 { return 0.0 }
-
-// Sum is a no-op.
-func (NilHistogram) Sum() int64 { return 0 }
-
-// Update is a no-op.
-func (NilHistogram) Update(v int64) {}
-
-// Variance is a no-op.
-func (NilHistogram) Variance() float64 { return 0.0 }
-
-// StandardHistogram is the standard implementation of a Histogram and uses a
-// Sample to bound its memory use.
-type StandardHistogram struct {
-	*MetricMeta
-	sample Sample
-}
-
-// Clear clears the histogram and its sample.
-func (h *StandardHistogram) Clear() { h.sample.Clear() }
-
-// Count returns the number of samples recorded since the histogram was last
-// cleared.
-func (h *StandardHistogram) Count() int64 { return h.sample.Count() }
-
-// Max returns the maximum value in the sample.
-func (h *StandardHistogram) Max() int64 { return h.sample.Max() }
-
-// Mean returns the mean of the values in the sample.
-func (h *StandardHistogram) Mean() float64 { return h.sample.Mean() }
-
-// Min returns the minimum value in the sample.
-func (h *StandardHistogram) Min() int64 { return h.sample.Min() }
-
-// Percentile returns an arbitrary percentile of the values in the sample.
-func (h *StandardHistogram) Percentile(p float64) float64 {
-	return h.sample.Percentile(p)
-}
-
-// Percentiles returns a slice of arbitrary percentiles of the values in the
-// sample.
-func (h *StandardHistogram) Percentiles(ps []float64) []float64 {
-	return h.sample.Percentiles(ps)
-}
-
-// Sample returns the Sample underlying the histogram.
-func (h *StandardHistogram) Sample() Sample { return h.sample }
-
-// Snapshot returns a read-only copy of the histogram.
-func (h *StandardHistogram) Snapshot() Metric {
-	return &HistogramSnapshot{sample: h.sample.Snapshot().(*SampleSnapshot)}
-}
-
-// StdDev returns the standard deviation of the values in the sample.
-func (h *StandardHistogram) StdDev() float64 { return h.sample.StdDev() }
-
-// Sum returns the sum in the sample.
-func (h *StandardHistogram) Sum() int64 { return h.sample.Sum() }
-
-// Update samples a new value.
-func (h *StandardHistogram) Update(v int64) { h.sample.Update(v) }
-
-// Variance returns the variance of the values in the sample.
-func (h *StandardHistogram) Variance() float64 { return h.sample.Variance() }

+ 0 - 90
pkg/metrics/histogram_test.go

@@ -1,90 +0,0 @@
-// includes code from
-// https://raw.githubusercontent.com/rcrowley/go-metrics/master/sample.go
-// Copyright 2012 Richard Crowley. All rights reserved.
-
-package metrics
-
-import "testing"
-
-func BenchmarkHistogram(b *testing.B) {
-	h := NewHistogram(nil, NewUniformSample(100))
-	b.ResetTimer()
-	for i := 0; i < b.N; i++ {
-		h.Update(int64(i))
-	}
-}
-
-func TestHistogram10000(t *testing.T) {
-	h := NewHistogram(nil, NewUniformSample(100000))
-	for i := 1; i <= 10000; i++ {
-		h.Update(int64(i))
-	}
-	testHistogram10000(t, h)
-}
-
-func TestHistogramEmpty(t *testing.T) {
-	h := NewHistogram(nil, NewUniformSample(100))
-	if count := h.Count(); 0 != count {
-		t.Errorf("h.Count(): 0 != %v\n", count)
-	}
-	if min := h.Min(); 0 != min {
-		t.Errorf("h.Min(): 0 != %v\n", min)
-	}
-	if max := h.Max(); 0 != max {
-		t.Errorf("h.Max(): 0 != %v\n", max)
-	}
-	if mean := h.Mean(); 0.0 != mean {
-		t.Errorf("h.Mean(): 0.0 != %v\n", mean)
-	}
-	if stdDev := h.StdDev(); 0.0 != stdDev {
-		t.Errorf("h.StdDev(): 0.0 != %v\n", stdDev)
-	}
-	ps := h.Percentiles([]float64{0.5, 0.75, 0.99})
-	if 0.0 != ps[0] {
-		t.Errorf("median: 0.0 != %v\n", ps[0])
-	}
-	if 0.0 != ps[1] {
-		t.Errorf("75th percentile: 0.0 != %v\n", ps[1])
-	}
-	if 0.0 != ps[2] {
-		t.Errorf("99th percentile: 0.0 != %v\n", ps[2])
-	}
-}
-
-func TestHistogramSnapshot(t *testing.T) {
-	h := NewHistogram(nil, NewUniformSample(100000))
-	for i := 1; i <= 10000; i++ {
-		h.Update(int64(i))
-	}
-	snapshot := h.Snapshot().(Histogram)
-	h.Update(0)
-	testHistogram10000(t, snapshot)
-}
-
-func testHistogram10000(t *testing.T, h Histogram) {
-	if count := h.Count(); 10000 != count {
-		t.Errorf("h.Count(): 10000 != %v\n", count)
-	}
-	if min := h.Min(); 1 != min {
-		t.Errorf("h.Min(): 1 != %v\n", min)
-	}
-	if max := h.Max(); 10000 != max {
-		t.Errorf("h.Max(): 10000 != %v\n", max)
-	}
-	if mean := h.Mean(); 5000.5 != mean {
-		t.Errorf("h.Mean(): 5000.5 != %v\n", mean)
-	}
-	if stdDev := h.StdDev(); 2886.751331514372 != stdDev {
-		t.Errorf("h.StdDev(): 2886.751331514372 != %v\n", stdDev)
-	}
-	ps := h.Percentiles([]float64{0.5, 0.75, 0.99})
-	if 5000.5 != ps[0] {
-		t.Errorf("median: 5000.5 != %v\n", ps[0])
-	}
-	if 7500.75 != ps[1] {
-		t.Errorf("75th percentile: 7500.75 != %v\n", ps[1])
-	}
-	if 9900.99 != ps[2] {
-		t.Errorf("99th percentile: 9900.99 != %v\n", ps[2])
-	}
-}

+ 38 - 0
pkg/metrics/init.go

@@ -0,0 +1,38 @@
+package metrics
+
+import (
+	"context"
+
+	ini "gopkg.in/ini.v1"
+
+	"github.com/grafana/grafana/pkg/log"
+	"github.com/grafana/grafana/pkg/metrics/graphitebridge"
+)
+
+var metricsLogger log.Logger = log.New("metrics")
+
+type logWrapper struct {
+	logger log.Logger
+}
+
+func (lw *logWrapper) Println(v ...interface{}) {
+	lw.logger.Info("graphite metric bridge", v...)
+}
+
+func Init(file *ini.File) {
+	cfg := ReadSettings(file)
+	internalInit(cfg)
+}
+
+func internalInit(settings *MetricSettings) {
+	initMetricVars(settings)
+
+	if settings.GraphiteBridgeConfig != nil {
+		bridge, err := graphitebridge.NewBridge(settings.GraphiteBridgeConfig)
+		if err != nil {
+			metricsLogger.Error("failed to create graphite bridge", "error", err)
+		} else {
+			go bridge.Run(context.Background())
+		}
+	}
+}

+ 0 - 221
pkg/metrics/meter.go

@@ -1,221 +0,0 @@
-// includes code from
-// https://raw.githubusercontent.com/rcrowley/go-metrics/master/sample.go
-// Copyright 2012 Richard Crowley. All rights reserved.
-
-package metrics
-
-import (
-	"sync"
-	"time"
-)
-
-// Meters count events to produce exponentially-weighted moving average rates
-// at one-, five-, and fifteen-minutes and a mean rate.
-type Meter interface {
-	Metric
-
-	Count() int64
-	Mark(int64)
-	Rate1() float64
-	Rate5() float64
-	Rate15() float64
-	RateMean() float64
-}
-
-// NewMeter constructs a new StandardMeter and launches a goroutine.
-func NewMeter(meta *MetricMeta) Meter {
-	if UseNilMetrics {
-		return NilMeter{}
-	}
-
-	m := newStandardMeter(meta)
-	arbiter.Lock()
-	defer arbiter.Unlock()
-	arbiter.meters = append(arbiter.meters, m)
-	if !arbiter.started {
-		arbiter.started = true
-		go arbiter.tick()
-	}
-	return m
-}
-
-type MeterSnapshot struct {
-	*MetricMeta
-	count                          int64
-	rate1, rate5, rate15, rateMean float64
-}
-
-// Count returns the count of events at the time the snapshot was taken.
-func (m *MeterSnapshot) Count() int64 { return m.count }
-
-// Mark panics.
-func (*MeterSnapshot) Mark(n int64) {
-	panic("Mark called on a MeterSnapshot")
-}
-
-// Rate1 returns the one-minute moving average rate of events per second at the
-// time the snapshot was taken.
-func (m *MeterSnapshot) Rate1() float64 { return m.rate1 }
-
-// Rate5 returns the five-minute moving average rate of events per second at
-// the time the snapshot was taken.
-func (m *MeterSnapshot) Rate5() float64 { return m.rate5 }
-
-// Rate15 returns the fifteen-minute moving average rate of events per second
-// at the time the snapshot was taken.
-func (m *MeterSnapshot) Rate15() float64 { return m.rate15 }
-
-// RateMean returns the meter's mean rate of events per second at the time the
-// snapshot was taken.
-func (m *MeterSnapshot) RateMean() float64 { return m.rateMean }
-
-// Snapshot returns the snapshot.
-func (m *MeterSnapshot) Snapshot() Metric { return m }
-
-// NilMeter is a no-op Meter.
-type NilMeter struct{ *MetricMeta }
-
-// Count is a no-op.
-func (NilMeter) Count() int64 { return 0 }
-
-// Mark is a no-op.
-func (NilMeter) Mark(n int64) {}
-
-// Rate1 is a no-op.
-func (NilMeter) Rate1() float64 { return 0.0 }
-
-// Rate5 is a no-op.
-func (NilMeter) Rate5() float64 { return 0.0 }
-
-// Rate15is a no-op.
-func (NilMeter) Rate15() float64 { return 0.0 }
-
-// RateMean is a no-op.
-func (NilMeter) RateMean() float64 { return 0.0 }
-
-// Snapshot is a no-op.
-func (NilMeter) Snapshot() Metric { return NilMeter{} }
-
-// StandardMeter is the standard implementation of a Meter.
-type StandardMeter struct {
-	*MetricMeta
-	lock        sync.RWMutex
-	snapshot    *MeterSnapshot
-	a1, a5, a15 EWMA
-	startTime   time.Time
-}
-
-func newStandardMeter(meta *MetricMeta) *StandardMeter {
-	return &StandardMeter{
-		MetricMeta: meta,
-		snapshot:   &MeterSnapshot{MetricMeta: meta},
-		a1:         NewEWMA1(),
-		a5:         NewEWMA5(),
-		a15:        NewEWMA15(),
-		startTime:  time.Now(),
-	}
-}
-
-// Count returns the number of events recorded.
-func (m *StandardMeter) Count() int64 {
-	m.lock.RLock()
-	count := m.snapshot.count
-	m.lock.RUnlock()
-	return count
-}
-
-// Mark records the occurrence of n events.
-func (m *StandardMeter) Mark(n int64) {
-	m.lock.Lock()
-	defer m.lock.Unlock()
-	m.snapshot.count += n
-	m.a1.Update(n)
-	m.a5.Update(n)
-	m.a15.Update(n)
-	m.updateSnapshot()
-}
-
-// Rate1 returns the one-minute moving average rate of events per second.
-func (m *StandardMeter) Rate1() float64 {
-	m.lock.RLock()
-	rate1 := m.snapshot.rate1
-	m.lock.RUnlock()
-	return rate1
-}
-
-// Rate5 returns the five-minute moving average rate of events per second.
-func (m *StandardMeter) Rate5() float64 {
-	m.lock.RLock()
-	rate5 := m.snapshot.rate5
-	m.lock.RUnlock()
-	return rate5
-}
-
-// Rate15 returns the fifteen-minute moving average rate of events per second.
-func (m *StandardMeter) Rate15() float64 {
-	m.lock.RLock()
-	rate15 := m.snapshot.rate15
-	m.lock.RUnlock()
-	return rate15
-}
-
-// RateMean returns the meter's mean rate of events per second.
-func (m *StandardMeter) RateMean() float64 {
-	m.lock.RLock()
-	rateMean := m.snapshot.rateMean
-	m.lock.RUnlock()
-	return rateMean
-}
-
-// Snapshot returns a read-only copy of the meter.
-func (m *StandardMeter) Snapshot() Metric {
-	m.lock.RLock()
-	snapshot := *m.snapshot
-	m.lock.RUnlock()
-	return &snapshot
-}
-
-func (m *StandardMeter) updateSnapshot() {
-	// should run with write lock held on m.lock
-	snapshot := m.snapshot
-	snapshot.rate1 = m.a1.Rate()
-	snapshot.rate5 = m.a5.Rate()
-	snapshot.rate15 = m.a15.Rate()
-	snapshot.rateMean = float64(snapshot.count) / time.Since(m.startTime).Seconds()
-}
-
-func (m *StandardMeter) tick() {
-	m.lock.Lock()
-	defer m.lock.Unlock()
-	m.a1.Tick()
-	m.a5.Tick()
-	m.a15.Tick()
-	m.updateSnapshot()
-}
-
-type meterArbiter struct {
-	sync.RWMutex
-	started bool
-	meters  []*StandardMeter
-	ticker  *time.Ticker
-}
-
-var arbiter = meterArbiter{ticker: time.NewTicker(5e9)}
-
-// Ticks meters on the scheduled interval
-func (ma *meterArbiter) tick() {
-	for {
-		select {
-		case <-ma.ticker.C:
-			ma.tickMeters()
-		}
-	}
-}
-
-func (ma *meterArbiter) tickMeters() {
-	ma.RLock()
-	defer ma.RUnlock()
-	for _, meter := range ma.meters {
-		meter.tick()
-	}
-}

+ 383 - 131
pkg/metrics/metrics.go

@@ -1,146 +1,398 @@
 package metrics
 package metrics
 
 
-var MetricStats Registry
-var UseNilMetrics bool
+import (
+	"bytes"
+	"encoding/json"
+	"net/http"
+	"runtime"
+	"strings"
+	"time"
 
 
-func init() {
-	// init with nil metrics
-	initMetricVars(&MetricSettings{})
-}
+	"github.com/grafana/grafana/pkg/bus"
+	"github.com/grafana/grafana/pkg/models"
+	"github.com/grafana/grafana/pkg/plugins"
+	"github.com/grafana/grafana/pkg/setting"
+	"github.com/prometheus/client_golang/prometheus"
+)
+
+const exporterName = "grafana"
 
 
 var (
 var (
-	M_Instance_Start                       Counter
-	M_Page_Status_200                      Counter
-	M_Page_Status_500                      Counter
-	M_Page_Status_404                      Counter
-	M_Page_Status_Unknown                  Counter
-	M_Api_Status_200                       Counter
-	M_Api_Status_404                       Counter
-	M_Api_Status_500                       Counter
-	M_Api_Status_Unknown                   Counter
-	M_Proxy_Status_200                     Counter
-	M_Proxy_Status_404                     Counter
-	M_Proxy_Status_500                     Counter
-	M_Proxy_Status_Unknown                 Counter
-	M_Api_User_SignUpStarted               Counter
-	M_Api_User_SignUpCompleted             Counter
-	M_Api_User_SignUpInvite                Counter
-	M_Api_Dashboard_Save                   Timer
-	M_Api_Dashboard_Get                    Timer
-	M_Api_Dashboard_Search                 Timer
-	M_Api_Admin_User_Create                Counter
-	M_Api_Login_Post                       Counter
-	M_Api_Login_OAuth                      Counter
-	M_Api_Org_Create                       Counter
-	M_Api_Dashboard_Snapshot_Create        Counter
-	M_Api_Dashboard_Snapshot_External      Counter
-	M_Api_Dashboard_Snapshot_Get           Counter
-	M_Models_Dashboard_Insert              Counter
-	M_Alerting_Result_State_Alerting       Counter
-	M_Alerting_Result_State_Ok             Counter
-	M_Alerting_Result_State_Paused         Counter
-	M_Alerting_Result_State_NoData         Counter
-	M_Alerting_Result_State_Pending        Counter
-	M_Alerting_Notification_Sent_Slack     Counter
-	M_Alerting_Notification_Sent_Email     Counter
-	M_Alerting_Notification_Sent_Webhook   Counter
-	M_Alerting_Notification_Sent_DingDing  Counter
-	M_Alerting_Notification_Sent_PagerDuty Counter
-	M_Alerting_Notification_Sent_LINE      Counter
-	M_Alerting_Notification_Sent_Victorops Counter
-	M_Alerting_Notification_Sent_OpsGenie  Counter
-	M_Alerting_Notification_Sent_Telegram  Counter
-	M_Alerting_Notification_Sent_Threema   Counter
-	M_Alerting_Notification_Sent_Sensu     Counter
-	M_Alerting_Notification_Sent_Pushover  Counter
-	M_Aws_CloudWatch_GetMetricStatistics   Counter
-	M_Aws_CloudWatch_ListMetrics           Counter
-	M_DB_DataSource_QueryById              Counter
+	M_Instance_Start       prometheus.Counter
+	M_Page_Status          *prometheus.CounterVec
+	M_Api_Status           *prometheus.CounterVec
+	M_Proxy_Status         *prometheus.CounterVec
+	M_Http_Request_Total   *prometheus.CounterVec
+	M_Http_Request_Summary *prometheus.SummaryVec
+
+	M_Api_User_SignUpStarted   prometheus.Counter
+	M_Api_User_SignUpCompleted prometheus.Counter
+	M_Api_User_SignUpInvite    prometheus.Counter
+	M_Api_Dashboard_Save       prometheus.Summary
+	M_Api_Dashboard_Get        prometheus.Summary
+	M_Api_Dashboard_Search     prometheus.Summary
+	M_Api_Admin_User_Create    prometheus.Counter
+	M_Api_Login_Post           prometheus.Counter
+	M_Api_Login_OAuth          prometheus.Counter
+	M_Api_Org_Create           prometheus.Counter
+
+	M_Api_Dashboard_Snapshot_Create      prometheus.Counter
+	M_Api_Dashboard_Snapshot_External    prometheus.Counter
+	M_Api_Dashboard_Snapshot_Get         prometheus.Counter
+	M_Api_Dashboard_Insert               prometheus.Counter
+	M_Alerting_Result_State              *prometheus.CounterVec
+	M_Alerting_Notification_Sent         *prometheus.CounterVec
+	M_Aws_CloudWatch_GetMetricStatistics prometheus.Counter
+	M_Aws_CloudWatch_ListMetrics         prometheus.Counter
+	M_DB_DataSource_QueryById            prometheus.Counter
 
 
 	// Timers
 	// Timers
-	M_DataSource_ProxyReq_Timer Timer
-	M_Alerting_Execution_Time   Timer
+	M_DataSource_ProxyReq_Timer prometheus.Summary
+	M_Alerting_Execution_Time   prometheus.Summary
 
 
 	// StatTotals
 	// StatTotals
-	M_Alerting_Active_Alerts Gauge
-	M_StatTotal_Dashboards   Gauge
-	M_StatTotal_Users        Gauge
-	M_StatTotal_Orgs         Gauge
-	M_StatTotal_Playlists    Gauge
+	M_Alerting_Active_Alerts prometheus.Gauge
+	M_StatTotal_Dashboards   prometheus.Gauge
+	M_StatTotal_Users        prometheus.Gauge
+	M_StatTotal_Orgs         prometheus.Gauge
+	M_StatTotal_Playlists    prometheus.Gauge
 )
 )
 
 
+func init() {
+	M_Instance_Start = prometheus.NewCounter(prometheus.CounterOpts{
+		Name:      "instance_start_total",
+		Help:      "counter for started instances",
+		Namespace: exporterName,
+	})
+
+	M_Page_Status = prometheus.NewCounterVec(
+		prometheus.CounterOpts{
+			Name:      "page_response_status_total",
+			Help:      "page http response status",
+			Namespace: exporterName,
+		},
+		[]string{"code"},
+	)
+
+	M_Api_Status = prometheus.NewCounterVec(
+		prometheus.CounterOpts{
+			Name:      "api_response_status_total",
+			Help:      "api http response status",
+			Namespace: exporterName,
+		},
+		[]string{"code"},
+	)
+
+	M_Proxy_Status = prometheus.NewCounterVec(
+		prometheus.CounterOpts{
+			Name:      "proxy_response_status_total",
+			Help:      "proxy http response status",
+			Namespace: exporterName,
+		},
+		[]string{"code"},
+	)
+
+	M_Http_Request_Total = prometheus.NewCounterVec(
+		prometheus.CounterOpts{
+			Name: "http_request_total",
+			Help: "http request counter",
+		},
+		[]string{"handler", "statuscode", "method"},
+	)
+
+	M_Http_Request_Summary = prometheus.NewSummaryVec(
+		prometheus.SummaryOpts{
+			Name: "http_request_duration_milleseconds",
+			Help: "http request summary",
+		},
+		[]string{"handler", "statuscode", "method"},
+	)
+
+	M_Api_User_SignUpStarted = prometheus.NewCounter(prometheus.CounterOpts{
+		Name:      "api_user_signup_started_total",
+		Help:      "amount of users who started the signup flow",
+		Namespace: exporterName,
+	})
+
+	M_Api_User_SignUpCompleted = prometheus.NewCounter(prometheus.CounterOpts{
+		Name:      "api_user_signup_completed_total",
+		Help:      "amount of users who completed the signup flow",
+		Namespace: exporterName,
+	})
+
+	M_Api_User_SignUpInvite = prometheus.NewCounter(prometheus.CounterOpts{
+		Name:      "api_user_signup_invite_total",
+		Help:      "amount of users who have been invited",
+		Namespace: exporterName,
+	})
+
+	M_Api_Dashboard_Save = prometheus.NewSummary(prometheus.SummaryOpts{
+		Name:      "api_dashboard_save_milleseconds",
+		Help:      "summary for dashboard save duration",
+		Namespace: exporterName,
+	})
+
+	M_Api_Dashboard_Get = prometheus.NewSummary(prometheus.SummaryOpts{
+		Name:      "api_dashboard_get_milleseconds",
+		Help:      "summary for dashboard get duration",
+		Namespace: exporterName,
+	})
+
+	M_Api_Dashboard_Search = prometheus.NewSummary(prometheus.SummaryOpts{
+		Name:      "api_dashboard_search_milleseconds",
+		Help:      "summary for dashboard search duration",
+		Namespace: exporterName,
+	})
+
+	M_Api_Admin_User_Create = prometheus.NewCounter(prometheus.CounterOpts{
+		Name:      "api_admin_user_created_total",
+		Help:      "api admin user created counter",
+		Namespace: exporterName,
+	})
+
+	M_Api_Login_Post = prometheus.NewCounter(prometheus.CounterOpts{
+		Name:      "api_login_post_total",
+		Help:      "api login post counter",
+		Namespace: exporterName,
+	})
+
+	M_Api_Login_OAuth = prometheus.NewCounter(prometheus.CounterOpts{
+		Name:      "api_login_oauth_total",
+		Help:      "api login oauth counter",
+		Namespace: exporterName,
+	})
+
+	M_Api_Org_Create = prometheus.NewCounter(prometheus.CounterOpts{
+		Name:      "api_org_create_total",
+		Help:      "api org created counter",
+		Namespace: exporterName,
+	})
+
+	M_Api_Dashboard_Snapshot_Create = prometheus.NewCounter(prometheus.CounterOpts{
+		Name:      "api_dashboard_snapshot_create_total",
+		Help:      "dashboard snapshots created",
+		Namespace: exporterName,
+	})
+
+	M_Api_Dashboard_Snapshot_External = prometheus.NewCounter(prometheus.CounterOpts{
+		Name:      "api_dashboard_snapshot_external_total",
+		Help:      "external dashboard snapshots created",
+		Namespace: exporterName,
+	})
+
+	M_Api_Dashboard_Snapshot_Get = prometheus.NewCounter(prometheus.CounterOpts{
+		Name:      "api_dashboard_snapshot_get_total",
+		Help:      "loaded dashboards",
+		Namespace: exporterName,
+	})
+
+	M_Api_Dashboard_Insert = prometheus.NewCounter(prometheus.CounterOpts{
+		Name:      "api_models_dashboard_insert_total",
+		Help:      "dashboards inserted ",
+		Namespace: exporterName,
+	})
+
+	M_Alerting_Result_State = prometheus.NewCounterVec(prometheus.CounterOpts{
+		Name:      "alerting_result_total",
+		Help:      "alert execution result counter",
+		Namespace: exporterName,
+	}, []string{"state"})
+
+	M_Alerting_Notification_Sent = prometheus.NewCounterVec(prometheus.CounterOpts{
+		Name:      "alerting_notification_sent_total",
+		Help:      "counter for how many alert notifications been sent",
+		Namespace: exporterName,
+	}, []string{"type"})
+
+	M_Aws_CloudWatch_GetMetricStatistics = prometheus.NewCounter(prometheus.CounterOpts{
+		Name:      "aws_cloudwatch_get_metric_statistics_total",
+		Help:      "counter for getting metric statistics from aws",
+		Namespace: exporterName,
+	})
+
+	M_Aws_CloudWatch_ListMetrics = prometheus.NewCounter(prometheus.CounterOpts{
+		Name:      "aws_cloudwatch_list_metrics_total",
+		Help:      "counter for getting list of metrics from aws",
+		Namespace: exporterName,
+	})
+
+	M_DB_DataSource_QueryById = prometheus.NewCounter(prometheus.CounterOpts{
+		Name:      "db_datasource_query_by_id_total",
+		Help:      "counter for getting datasource by id",
+		Namespace: exporterName,
+	})
+
+	M_DataSource_ProxyReq_Timer = prometheus.NewSummary(prometheus.SummaryOpts{
+		Name:      "api_dataproxy_request_all_milleseconds",
+		Help:      "summary for dashboard search duration",
+		Namespace: exporterName,
+	})
+
+	M_Alerting_Execution_Time = prometheus.NewSummary(prometheus.SummaryOpts{
+		Name:      "alerting_execution_time_milliseconds",
+		Help:      "summary of alert exeuction duration",
+		Namespace: exporterName,
+	})
+
+	M_Alerting_Active_Alerts = prometheus.NewGauge(prometheus.GaugeOpts{
+		Name:      "alerting_active_alerts",
+		Help:      "amount of active alerts",
+		Namespace: exporterName,
+	})
+
+	M_StatTotal_Dashboards = prometheus.NewGauge(prometheus.GaugeOpts{
+		Name:      "stat_totals_dashboard",
+		Help:      "total amount of dashboards",
+		Namespace: exporterName,
+	})
+
+	M_StatTotal_Users = prometheus.NewGauge(prometheus.GaugeOpts{
+		Name:      "stat_total_users",
+		Help:      "total amount of users",
+		Namespace: exporterName,
+	})
+
+	M_StatTotal_Orgs = prometheus.NewGauge(prometheus.GaugeOpts{
+		Name:      "stat_total_orgs",
+		Help:      "total amount of orgs",
+		Namespace: exporterName,
+	})
+
+	M_StatTotal_Playlists = prometheus.NewGauge(prometheus.GaugeOpts{
+		Name:      "stat_total_playlists",
+		Help:      "total amount of playlists",
+		Namespace: exporterName,
+	})
+}
+
 func initMetricVars(settings *MetricSettings) {
 func initMetricVars(settings *MetricSettings) {
-	UseNilMetrics = settings.Enabled == false
-	MetricStats = NewRegistry()
-
-	M_Instance_Start = RegCounter("instance_start")
-
-	M_Page_Status_200 = RegCounter("page.resp_status", "code", "200")
-	M_Page_Status_500 = RegCounter("page.resp_status", "code", "500")
-	M_Page_Status_404 = RegCounter("page.resp_status", "code", "404")
-	M_Page_Status_Unknown = RegCounter("page.resp_status", "code", "unknown")
-
-	M_Api_Status_200 = RegCounter("api.resp_status", "code", "200")
-	M_Api_Status_404 = RegCounter("api.resp_status", "code", "404")
-	M_Api_Status_500 = RegCounter("api.resp_status", "code", "500")
-	M_Api_Status_Unknown = RegCounter("api.resp_status", "code", "unknown")
-
-	M_Proxy_Status_200 = RegCounter("proxy.resp_status", "code", "200")
-	M_Proxy_Status_404 = RegCounter("proxy.resp_status", "code", "404")
-	M_Proxy_Status_500 = RegCounter("proxy.resp_status", "code", "500")
-	M_Proxy_Status_Unknown = RegCounter("proxy.resp_status", "code", "unknown")
-
-	M_Api_User_SignUpStarted = RegCounter("api.user.signup_started")
-	M_Api_User_SignUpCompleted = RegCounter("api.user.signup_completed")
-	M_Api_User_SignUpInvite = RegCounter("api.user.signup_invite")
-
-	M_Api_Dashboard_Save = RegTimer("api.dashboard.save")
-	M_Api_Dashboard_Get = RegTimer("api.dashboard.get")
-	M_Api_Dashboard_Search = RegTimer("api.dashboard.search")
-
-	M_Api_Admin_User_Create = RegCounter("api.admin.user_create")
-	M_Api_Login_Post = RegCounter("api.login.post")
-	M_Api_Login_OAuth = RegCounter("api.login.oauth")
-	M_Api_Org_Create = RegCounter("api.org.create")
-
-	M_Api_Dashboard_Snapshot_Create = RegCounter("api.dashboard_snapshot.create")
-	M_Api_Dashboard_Snapshot_External = RegCounter("api.dashboard_snapshot.external")
-	M_Api_Dashboard_Snapshot_Get = RegCounter("api.dashboard_snapshot.get")
-
-	M_Models_Dashboard_Insert = RegCounter("models.dashboard.insert")
-
-	M_Alerting_Result_State_Alerting = RegCounter("alerting.result", "state", "alerting")
-	M_Alerting_Result_State_Ok = RegCounter("alerting.result", "state", "ok")
-	M_Alerting_Result_State_Paused = RegCounter("alerting.result", "state", "paused")
-	M_Alerting_Result_State_NoData = RegCounter("alerting.result", "state", "no_data")
-	M_Alerting_Result_State_Pending = RegCounter("alerting.result", "state", "pending")
-
-	M_Alerting_Notification_Sent_Slack = RegCounter("alerting.notifications_sent", "type", "slack")
-	M_Alerting_Notification_Sent_Email = RegCounter("alerting.notifications_sent", "type", "email")
-	M_Alerting_Notification_Sent_Webhook = RegCounter("alerting.notifications_sent", "type", "webhook")
-	M_Alerting_Notification_Sent_DingDing = RegCounter("alerting.notifications_sent", "type", "dingding")
-	M_Alerting_Notification_Sent_PagerDuty = RegCounter("alerting.notifications_sent", "type", "pagerduty")
-	M_Alerting_Notification_Sent_Victorops = RegCounter("alerting.notifications_sent", "type", "victorops")
-	M_Alerting_Notification_Sent_OpsGenie = RegCounter("alerting.notifications_sent", "type", "opsgenie")
-	M_Alerting_Notification_Sent_Telegram = RegCounter("alerting.notifications_sent", "type", "telegram")
-	M_Alerting_Notification_Sent_Threema = RegCounter("alerting.notifications_sent", "type", "threema")
-	M_Alerting_Notification_Sent_Sensu = RegCounter("alerting.notifications_sent", "type", "sensu")
-	M_Alerting_Notification_Sent_LINE = RegCounter("alerting.notifications_sent", "type", "LINE")
-	M_Alerting_Notification_Sent_Pushover = RegCounter("alerting.notifications_sent", "type", "pushover")
-
-	M_Aws_CloudWatch_GetMetricStatistics = RegCounter("aws.cloudwatch.get_metric_statistics")
-	M_Aws_CloudWatch_ListMetrics = RegCounter("aws.cloudwatch.list_metrics")
-
-	M_DB_DataSource_QueryById = RegCounter("db.datasource.query_by_id")
+	prometheus.MustRegister(
+		M_Instance_Start,
+		M_Page_Status,
+		M_Api_Status,
+		M_Proxy_Status,
+		M_Http_Request_Total,
+		M_Http_Request_Summary,
+		M_Api_User_SignUpStarted,
+		M_Api_User_SignUpCompleted,
+		M_Api_User_SignUpInvite,
+		M_Api_Dashboard_Save,
+		M_Api_Dashboard_Get,
+		M_Api_Dashboard_Search,
+		M_DataSource_ProxyReq_Timer,
+		M_Alerting_Execution_Time,
+		M_Api_Admin_User_Create,
+		M_Api_Login_Post,
+		M_Api_Login_OAuth,
+		M_Api_Org_Create,
+		M_Api_Dashboard_Snapshot_Create,
+		M_Api_Dashboard_Snapshot_External,
+		M_Api_Dashboard_Snapshot_Get,
+		M_Api_Dashboard_Insert,
+		M_Alerting_Result_State,
+		M_Alerting_Notification_Sent,
+		M_Aws_CloudWatch_GetMetricStatistics,
+		M_Aws_CloudWatch_ListMetrics,
+		M_DB_DataSource_QueryById,
+		M_Alerting_Active_Alerts,
+		M_StatTotal_Dashboards,
+		M_StatTotal_Users,
+		M_StatTotal_Orgs,
+		M_StatTotal_Playlists)
 
 
-	// Timers
-	M_DataSource_ProxyReq_Timer = RegTimer("api.dataproxy.request.all")
-	M_Alerting_Execution_Time = RegTimer("alerting.execution_time")
+	go instrumentationLoop(settings)
+}
 
 
-	// StatTotals
-	M_Alerting_Active_Alerts = RegGauge("alerting.active_alerts")
-	M_StatTotal_Dashboards = RegGauge("stat_totals", "stat", "dashboards")
-	M_StatTotal_Users = RegGauge("stat_totals", "stat", "users")
-	M_StatTotal_Orgs = RegGauge("stat_totals", "stat", "orgs")
-	M_StatTotal_Playlists = RegGauge("stat_totals", "stat", "playlists")
+func instrumentationLoop(settings *MetricSettings) chan struct{} {
+	M_Instance_Start.Inc()
+
+	onceEveryDayTick := time.NewTicker(time.Hour * 24)
+	secondTicker := time.NewTicker(time.Second * time.Duration(settings.IntervalSeconds))
+
+	for {
+		select {
+		case <-onceEveryDayTick.C:
+			sendUsageStats()
+		case <-secondTicker.C:
+			updateTotalStats()
+		}
+	}
+}
+
+var metricPublishCounter int64 = 0
+
+func updateTotalStats() {
+	metricPublishCounter++
+	if metricPublishCounter == 1 || metricPublishCounter%10 == 0 {
+		statsQuery := models.GetSystemStatsQuery{}
+		if err := bus.Dispatch(&statsQuery); err != nil {
+			metricsLogger.Error("Failed to get system stats", "error", err)
+			return
+		}
+
+		M_StatTotal_Dashboards.Set(float64(statsQuery.Result.Dashboards))
+		M_StatTotal_Users.Set(float64(statsQuery.Result.Users))
+		M_StatTotal_Playlists.Set(float64(statsQuery.Result.Playlists))
+		M_StatTotal_Orgs.Set(float64(statsQuery.Result.Orgs))
+	}
+}
+
+func sendUsageStats() {
+	if !setting.ReportingEnabled {
+		return
+	}
+
+	metricsLogger.Debug("Sending anonymous usage stats to stats.grafana.org")
+
+	version := strings.Replace(setting.BuildVersion, ".", "_", -1)
+
+	metrics := map[string]interface{}{}
+	report := map[string]interface{}{
+		"version": version,
+		"metrics": metrics,
+		"os":      runtime.GOOS,
+		"arch":    runtime.GOARCH,
+	}
+
+	statsQuery := models.GetSystemStatsQuery{}
+	if err := bus.Dispatch(&statsQuery); err != nil {
+		metricsLogger.Error("Failed to get system stats", "error", err)
+		return
+	}
+
+	metrics["stats.dashboards.count"] = statsQuery.Result.Dashboards
+	metrics["stats.users.count"] = statsQuery.Result.Users
+	metrics["stats.orgs.count"] = statsQuery.Result.Orgs
+	metrics["stats.playlist.count"] = statsQuery.Result.Playlists
+	metrics["stats.plugins.apps.count"] = len(plugins.Apps)
+	metrics["stats.plugins.panels.count"] = len(plugins.Panels)
+	metrics["stats.plugins.datasources.count"] = len(plugins.DataSources)
+	metrics["stats.alerts.count"] = statsQuery.Result.Alerts
+	metrics["stats.active_users.count"] = statsQuery.Result.ActiveUsers
+	metrics["stats.datasources.count"] = statsQuery.Result.Datasources
+
+	dsStats := models.GetDataSourceStatsQuery{}
+	if err := bus.Dispatch(&dsStats); err != nil {
+		metricsLogger.Error("Failed to get datasource stats", "error", err)
+		return
+	}
+
+	// send counters for each data source
+	// but ignore any custom data sources
+	// as sending that name could be sensitive information
+	dsOtherCount := 0
+	for _, dsStat := range dsStats.Result {
+		if models.IsKnownDataSourcePlugin(dsStat.Type) {
+			metrics["stats.ds."+dsStat.Type+".count"] = dsStat.Count
+		} else {
+			dsOtherCount += dsStat.Count
+		}
+	}
+	metrics["stats.ds.other.count"] = dsOtherCount
+
+	out, _ := json.MarshalIndent(report, "", " ")
+	data := bytes.NewBuffer(out)
+
+	client := http.Client{Timeout: time.Duration(5 * time.Second)}
+	go client.Post("https://stats.grafana.org/grafana-usage-report", "application/json", data)
 }
 }

+ 0 - 135
pkg/metrics/publish.go

@@ -1,135 +0,0 @@
-package metrics
-
-import (
-	"bytes"
-	"encoding/json"
-	"net/http"
-	"runtime"
-	"strings"
-	"time"
-
-	"github.com/grafana/grafana/pkg/bus"
-	"github.com/grafana/grafana/pkg/log"
-	m "github.com/grafana/grafana/pkg/models"
-	"github.com/grafana/grafana/pkg/plugins"
-	"github.com/grafana/grafana/pkg/setting"
-)
-
-var metricsLogger log.Logger = log.New("metrics")
-var metricPublishCounter int64 = 0
-
-func Init() {
-	settings := readSettings()
-	initMetricVars(settings)
-	go instrumentationLoop(settings)
-}
-
-func instrumentationLoop(settings *MetricSettings) chan struct{} {
-	M_Instance_Start.Inc(1)
-
-	onceEveryDayTick := time.NewTicker(time.Hour * 24)
-	secondTicker := time.NewTicker(time.Second * time.Duration(settings.IntervalSeconds))
-
-	for {
-		select {
-		case <-onceEveryDayTick.C:
-			sendUsageStats()
-		case <-secondTicker.C:
-			if settings.Enabled {
-				sendMetrics(settings)
-			}
-		}
-	}
-}
-
-func sendMetrics(settings *MetricSettings) {
-	if len(settings.Publishers) == 0 {
-		return
-	}
-
-	updateTotalStats()
-
-	metrics := MetricStats.GetSnapshots()
-	for _, publisher := range settings.Publishers {
-		publisher.Publish(metrics)
-	}
-}
-
-func updateTotalStats() {
-
-	// every interval also publish totals
-	metricPublishCounter++
-	if metricPublishCounter%10 == 0 {
-		// get stats
-		statsQuery := m.GetSystemStatsQuery{}
-		if err := bus.Dispatch(&statsQuery); err != nil {
-			metricsLogger.Error("Failed to get system stats", "error", err)
-			return
-		}
-
-		M_StatTotal_Dashboards.Update(statsQuery.Result.Dashboards)
-		M_StatTotal_Users.Update(statsQuery.Result.Users)
-		M_StatTotal_Playlists.Update(statsQuery.Result.Playlists)
-		M_StatTotal_Orgs.Update(statsQuery.Result.Orgs)
-	}
-}
-
-func sendUsageStats() {
-	if !setting.ReportingEnabled {
-		return
-	}
-
-	metricsLogger.Debug("Sending anonymous usage stats to stats.grafana.org")
-
-	version := strings.Replace(setting.BuildVersion, ".", "_", -1)
-
-	metrics := map[string]interface{}{}
-	report := map[string]interface{}{
-		"version": version,
-		"metrics": metrics,
-		"os":      runtime.GOOS,
-		"arch":    runtime.GOARCH,
-	}
-
-	statsQuery := m.GetSystemStatsQuery{}
-	if err := bus.Dispatch(&statsQuery); err != nil {
-		metricsLogger.Error("Failed to get system stats", "error", err)
-		return
-	}
-
-	metrics["stats.dashboards.count"] = statsQuery.Result.Dashboards
-	metrics["stats.users.count"] = statsQuery.Result.Users
-	metrics["stats.orgs.count"] = statsQuery.Result.Orgs
-	metrics["stats.playlist.count"] = statsQuery.Result.Playlists
-	metrics["stats.plugins.apps.count"] = len(plugins.Apps)
-	metrics["stats.plugins.panels.count"] = len(plugins.Panels)
-	metrics["stats.plugins.datasources.count"] = len(plugins.DataSources)
-	metrics["stats.alerts.count"] = statsQuery.Result.Alerts
-	metrics["stats.active_users.count"] = statsQuery.Result.ActiveUsers
-	metrics["stats.datasources.count"] = statsQuery.Result.Datasources
-
-	dsStats := m.GetDataSourceStatsQuery{}
-	if err := bus.Dispatch(&dsStats); err != nil {
-		metricsLogger.Error("Failed to get datasource stats", "error", err)
-		return
-	}
-
-	// send counters for each data source
-	// but ignore any custom data sources
-	// as sending that name could be sensitive information
-	dsOtherCount := 0
-	for _, dsStat := range dsStats.Result {
-		if m.IsKnownDataSourcePlugin(dsStat.Type) {
-			metrics["stats.ds."+dsStat.Type+".count"] = dsStat.Count
-		} else {
-			dsOtherCount += dsStat.Count
-		}
-	}
-	metrics["stats.ds.other.count"] = dsOtherCount
-
-	out, _ := json.MarshalIndent(report, "", " ")
-	data := bytes.NewBuffer(out)
-
-	client := http.Client{Timeout: time.Duration(5 * time.Second)}
-	go client.Post("https://stats.grafana.org/grafana-usage-report", "application/json", data)
-}

+ 0 - 37
pkg/metrics/registry.go

@@ -1,37 +0,0 @@
-package metrics
-
-import "sync"
-
-type Registry interface {
-	GetSnapshots() []Metric
-	Register(metric Metric)
-}
-
-// The standard implementation of a Registry is a mutex-protected map
-// of names to metrics.
-type StandardRegistry struct {
-	metrics []Metric
-	mutex   sync.Mutex
-}
-
-// Create a new registry.
-func NewRegistry() Registry {
-	return &StandardRegistry{
-		metrics: make([]Metric, 0),
-	}
-}
-
-func (r *StandardRegistry) Register(metric Metric) {
-	r.mutex.Lock()
-	defer r.mutex.Unlock()
-	r.metrics = append(r.metrics, metric)
-}
-
-// Call the given function for each registered metric.
-func (r *StandardRegistry) GetSnapshots() []Metric {
-	metrics := make([]Metric, len(r.metrics))
-	for i, metric := range r.metrics {
-		metrics[i] = metric.Snapshot()
-	}
-	return metrics
-}

+ 0 - 607
pkg/metrics/sample.go

@@ -1,607 +0,0 @@
-// includes code from
-// https://raw.githubusercontent.com/rcrowley/go-metrics/master/sample.go
-// Copyright 2012 Richard Crowley. All rights reserved.
-
-package metrics
-
-import (
-	"math"
-	"math/rand"
-	"sort"
-	"sync"
-	"time"
-)
-
-const rescaleThreshold = time.Hour
-
-// Samples maintain a statistically-significant selection of values from
-// a stream.
-type Sample interface {
-	Clear()
-	Count() int64
-	Max() int64
-	Mean() float64
-	Min() int64
-	Percentile(float64) float64
-	Percentiles([]float64) []float64
-	Size() int
-	Snapshot() Sample
-	StdDev() float64
-	Sum() int64
-	Update(int64)
-	Values() []int64
-	Variance() float64
-}
-
-// ExpDecaySample is an exponentially-decaying sample using a forward-decaying
-// priority reservoir.  See Cormode et al's "Forward Decay: A Practical Time
-// Decay Model for Streaming Systems".
-//
-// <http://www.research.att.com/people/Cormode_Graham/library/publications/CormodeShkapenyukSrivastavaXu09.pdf>
-type ExpDecaySample struct {
-	alpha         float64
-	count         int64
-	mutex         sync.Mutex
-	reservoirSize int
-	t0, t1        time.Time
-	values        *expDecaySampleHeap
-}
-
-// NewExpDecaySample constructs a new exponentially-decaying sample with the
-// given reservoir size and alpha.
-func NewExpDecaySample(reservoirSize int, alpha float64) Sample {
-	s := &ExpDecaySample{
-		alpha:         alpha,
-		reservoirSize: reservoirSize,
-		t0:            time.Now(),
-		values:        newExpDecaySampleHeap(reservoirSize),
-	}
-	s.t1 = s.t0.Add(rescaleThreshold)
-	return s
-}
-
-// Clear clears all samples.
-func (s *ExpDecaySample) Clear() {
-	s.mutex.Lock()
-	defer s.mutex.Unlock()
-	s.count = 0
-	s.t0 = time.Now()
-	s.t1 = s.t0.Add(rescaleThreshold)
-	s.values.Clear()
-}
-
-// Count returns the number of samples recorded, which may exceed the
-// reservoir size.
-func (s *ExpDecaySample) Count() int64 {
-	s.mutex.Lock()
-	defer s.mutex.Unlock()
-	return s.count
-}
-
-// Max returns the maximum value in the sample, which may not be the maximum
-// value ever to be part of the sample.
-func (s *ExpDecaySample) Max() int64 {
-	return SampleMax(s.Values())
-}
-
-// Mean returns the mean of the values in the sample.
-func (s *ExpDecaySample) Mean() float64 {
-	return SampleMean(s.Values())
-}
-
-// Min returns the minimum value in the sample, which may not be the minimum
-// value ever to be part of the sample.
-func (s *ExpDecaySample) Min() int64 {
-	return SampleMin(s.Values())
-}
-
-// Percentile returns an arbitrary percentile of values in the sample.
-func (s *ExpDecaySample) Percentile(p float64) float64 {
-	return SamplePercentile(s.Values(), p)
-}
-
-// Percentiles returns a slice of arbitrary percentiles of values in the
-// sample.
-func (s *ExpDecaySample) Percentiles(ps []float64) []float64 {
-	return SamplePercentiles(s.Values(), ps)
-}
-
-// Size returns the size of the sample, which is at most the reservoir size.
-func (s *ExpDecaySample) Size() int {
-	s.mutex.Lock()
-	defer s.mutex.Unlock()
-	return s.values.Size()
-}
-
-// Snapshot returns a read-only copy of the sample.
-func (s *ExpDecaySample) Snapshot() Sample {
-	s.mutex.Lock()
-	defer s.mutex.Unlock()
-	vals := s.values.Values()
-	values := make([]int64, len(vals))
-	for i, v := range vals {
-		values[i] = v.v
-	}
-	return &SampleSnapshot{
-		count:  s.count,
-		values: values,
-	}
-}
-
-// StdDev returns the standard deviation of the values in the sample.
-func (s *ExpDecaySample) StdDev() float64 {
-	return SampleStdDev(s.Values())
-}
-
-// Sum returns the sum of the values in the sample.
-func (s *ExpDecaySample) Sum() int64 {
-	return SampleSum(s.Values())
-}
-
-// Update samples a new value.
-func (s *ExpDecaySample) Update(v int64) {
-	s.update(time.Now(), v)
-}
-
-// Values returns a copy of the values in the sample.
-func (s *ExpDecaySample) Values() []int64 {
-	s.mutex.Lock()
-	defer s.mutex.Unlock()
-	vals := s.values.Values()
-	values := make([]int64, len(vals))
-	for i, v := range vals {
-		values[i] = v.v
-	}
-	return values
-}
-
-// Variance returns the variance of the values in the sample.
-func (s *ExpDecaySample) Variance() float64 {
-	return SampleVariance(s.Values())
-}
-
-// update samples a new value at a particular timestamp.  This is a method all
-// its own to facilitate testing.
-func (s *ExpDecaySample) update(t time.Time, v int64) {
-	s.mutex.Lock()
-	defer s.mutex.Unlock()
-	s.count++
-	if s.values.Size() == s.reservoirSize {
-		s.values.Pop()
-	}
-	s.values.Push(expDecaySample{
-		k: math.Exp(t.Sub(s.t0).Seconds()*s.alpha) / rand.Float64(),
-		v: v,
-	})
-	if t.After(s.t1) {
-		values := s.values.Values()
-		t0 := s.t0
-		s.values.Clear()
-		s.t0 = t
-		s.t1 = s.t0.Add(rescaleThreshold)
-		for _, v := range values {
-			v.k = v.k * math.Exp(-s.alpha*s.t0.Sub(t0).Seconds())
-			s.values.Push(v)
-		}
-	}
-}
-
-// NilSample is a no-op Sample.
-type NilSample struct{}
-
-// Clear is a no-op.
-func (NilSample) Clear() {}
-
-// Count is a no-op.
-func (NilSample) Count() int64 { return 0 }
-
-// Max is a no-op.
-func (NilSample) Max() int64 { return 0 }
-
-// Mean is a no-op.
-func (NilSample) Mean() float64 { return 0.0 }
-
-// Min is a no-op.
-func (NilSample) Min() int64 { return 0 }
-
-// Percentile is a no-op.
-func (NilSample) Percentile(p float64) float64 { return 0.0 }
-
-// Percentiles is a no-op.
-func (NilSample) Percentiles(ps []float64) []float64 {
-	return make([]float64, len(ps))
-}
-
-// Size is a no-op.
-func (NilSample) Size() int { return 0 }
-
-// Sample is a no-op.
-func (NilSample) Snapshot() Sample { return NilSample{} }
-
-// StdDev is a no-op.
-func (NilSample) StdDev() float64 { return 0.0 }
-
-// Sum is a no-op.
-func (NilSample) Sum() int64 { return 0 }
-
-// Update is a no-op.
-func (NilSample) Update(v int64) {}
-
-// Values is a no-op.
-func (NilSample) Values() []int64 { return []int64{} }
-
-// Variance is a no-op.
-func (NilSample) Variance() float64 { return 0.0 }
-
-// SampleMax returns the maximum value of the slice of int64.
-func SampleMax(values []int64) int64 {
-	if 0 == len(values) {
-		return 0
-	}
-	var max int64 = math.MinInt64
-	for _, v := range values {
-		if max < v {
-			max = v
-		}
-	}
-	return max
-}
-
-// SampleMean returns the mean value of the slice of int64.
-func SampleMean(values []int64) float64 {
-	if 0 == len(values) {
-		return 0.0
-	}
-	return float64(SampleSum(values)) / float64(len(values))
-}
-
-// SampleMin returns the minimum value of the slice of int64.
-func SampleMin(values []int64) int64 {
-	if 0 == len(values) {
-		return 0
-	}
-	var min int64 = math.MaxInt64
-	for _, v := range values {
-		if min > v {
-			min = v
-		}
-	}
-	return min
-}
-
-// SamplePercentiles returns an arbitrary percentile of the slice of int64.
-func SamplePercentile(values int64Slice, p float64) float64 {
-	return SamplePercentiles(values, []float64{p})[0]
-}
-
-// SamplePercentiles returns a slice of arbitrary percentiles of the slice of
-// int64.
-func SamplePercentiles(values int64Slice, ps []float64) []float64 {
-	scores := make([]float64, len(ps))
-	size := len(values)
-	if size > 0 {
-		sort.Sort(values)
-		for i, p := range ps {
-			pos := p * float64(size+1)
-			if pos < 1.0 {
-				scores[i] = float64(values[0])
-			} else if pos >= float64(size) {
-				scores[i] = float64(values[size-1])
-			} else {
-				lower := float64(values[int(pos)-1])
-				upper := float64(values[int(pos)])
-				scores[i] = lower + (pos-math.Floor(pos))*(upper-lower)
-			}
-		}
-	}
-	return scores
-}
-
-// SampleSnapshot is a read-only copy of another Sample.
-type SampleSnapshot struct {
-	count  int64
-	values []int64
-}
-
-// Clear panics.
-func (*SampleSnapshot) Clear() {
-	panic("Clear called on a SampleSnapshot")
-}
-
-// Count returns the count of inputs at the time the snapshot was taken.
-func (s *SampleSnapshot) Count() int64 { return s.count }
-
-// Max returns the maximal value at the time the snapshot was taken.
-func (s *SampleSnapshot) Max() int64 { return SampleMax(s.values) }
-
-// Mean returns the mean value at the time the snapshot was taken.
-func (s *SampleSnapshot) Mean() float64 { return SampleMean(s.values) }
-
-// Min returns the minimal value at the time the snapshot was taken.
-func (s *SampleSnapshot) Min() int64 { return SampleMin(s.values) }
-
-// Percentile returns an arbitrary percentile of values at the time the
-// snapshot was taken.
-func (s *SampleSnapshot) Percentile(p float64) float64 {
-	return SamplePercentile(s.values, p)
-}
-
-// Percentiles returns a slice of arbitrary percentiles of values at the time
-// the snapshot was taken.
-func (s *SampleSnapshot) Percentiles(ps []float64) []float64 {
-	return SamplePercentiles(s.values, ps)
-}
-
-// Size returns the size of the sample at the time the snapshot was taken.
-func (s *SampleSnapshot) Size() int { return len(s.values) }
-
-// Snapshot returns the snapshot.
-func (s *SampleSnapshot) Snapshot() Sample { return s }
-
-// StdDev returns the standard deviation of values at the time the snapshot was
-// taken.
-func (s *SampleSnapshot) StdDev() float64 { return SampleStdDev(s.values) }
-
-// Sum returns the sum of values at the time the snapshot was taken.
-func (s *SampleSnapshot) Sum() int64 { return SampleSum(s.values) }
-
-// Update panics.
-func (*SampleSnapshot) Update(int64) {
-	panic("Update called on a SampleSnapshot")
-}
-
-// Values returns a copy of the values in the sample.
-func (s *SampleSnapshot) Values() []int64 {
-	values := make([]int64, len(s.values))
-	copy(values, s.values)
-	return values
-}
-
-// Variance returns the variance of values at the time the snapshot was taken.
-func (s *SampleSnapshot) Variance() float64 { return SampleVariance(s.values) }
-
-// SampleStdDev returns the standard deviation of the slice of int64.
-func SampleStdDev(values []int64) float64 {
-	return math.Sqrt(SampleVariance(values))
-}
-
-// SampleSum returns the sum of the slice of int64.
-func SampleSum(values []int64) int64 {
-	var sum int64
-	for _, v := range values {
-		sum += v
-	}
-	return sum
-}
-
-// SampleVariance returns the variance of the slice of int64.
-func SampleVariance(values []int64) float64 {
-	if 0 == len(values) {
-		return 0.0
-	}
-	m := SampleMean(values)
-	var sum float64
-	for _, v := range values {
-		d := float64(v) - m
-		sum += d * d
-	}
-	return sum / float64(len(values))
-}
-
-// A uniform sample using Vitter's Algorithm R.
-//
-// <http://www.cs.umd.edu/~samir/498/vitter.pdf>
-type UniformSample struct {
-	count         int64
-	mutex         sync.Mutex
-	reservoirSize int
-	values        []int64
-}
-
-// NewUniformSample constructs a new uniform sample with the given reservoir
-// size.
-func NewUniformSample(reservoirSize int) Sample {
-	return &UniformSample{
-		reservoirSize: reservoirSize,
-		values:        make([]int64, 0, reservoirSize),
-	}
-}
-
-// Clear clears all samples.
-func (s *UniformSample) Clear() {
-	s.mutex.Lock()
-	defer s.mutex.Unlock()
-	s.count = 0
-	s.values = make([]int64, 0, s.reservoirSize)
-}
-
-// Count returns the number of samples recorded, which may exceed the
-// reservoir size.
-func (s *UniformSample) Count() int64 {
-	s.mutex.Lock()
-	defer s.mutex.Unlock()
-	return s.count
-}
-
-// Max returns the maximum value in the sample, which may not be the maximum
-// value ever to be part of the sample.
-func (s *UniformSample) Max() int64 {
-	s.mutex.Lock()
-	defer s.mutex.Unlock()
-	return SampleMax(s.values)
-}
-
-// Mean returns the mean of the values in the sample.
-func (s *UniformSample) Mean() float64 {
-	s.mutex.Lock()
-	defer s.mutex.Unlock()
-	return SampleMean(s.values)
-}
-
-// Min returns the minimum value in the sample, which may not be the minimum
-// value ever to be part of the sample.
-func (s *UniformSample) Min() int64 {
-	s.mutex.Lock()
-	defer s.mutex.Unlock()
-	return SampleMin(s.values)
-}
-
-// Percentile returns an arbitrary percentile of values in the sample.
-func (s *UniformSample) Percentile(p float64) float64 {
-	s.mutex.Lock()
-	defer s.mutex.Unlock()
-	return SamplePercentile(s.values, p)
-}
-
-// Percentiles returns a slice of arbitrary percentiles of values in the
-// sample.
-func (s *UniformSample) Percentiles(ps []float64) []float64 {
-	s.mutex.Lock()
-	defer s.mutex.Unlock()
-	return SamplePercentiles(s.values, ps)
-}
-
-// Size returns the size of the sample, which is at most the reservoir size.
-func (s *UniformSample) Size() int {
-	s.mutex.Lock()
-	defer s.mutex.Unlock()
-	return len(s.values)
-}
-
-// Snapshot returns a read-only copy of the sample.
-func (s *UniformSample) Snapshot() Sample {
-	s.mutex.Lock()
-	defer s.mutex.Unlock()
-	values := make([]int64, len(s.values))
-	copy(values, s.values)
-	return &SampleSnapshot{
-		count:  s.count,
-		values: values,
-	}
-}
-
-// StdDev returns the standard deviation of the values in the sample.
-func (s *UniformSample) StdDev() float64 {
-	s.mutex.Lock()
-	defer s.mutex.Unlock()
-	return SampleStdDev(s.values)
-}
-
-// Sum returns the sum of the values in the sample.
-func (s *UniformSample) Sum() int64 {
-	s.mutex.Lock()
-	defer s.mutex.Unlock()
-	return SampleSum(s.values)
-}
-
-// Update samples a new value.
-func (s *UniformSample) Update(v int64) {
-	s.mutex.Lock()
-	defer s.mutex.Unlock()
-	s.count++
-	if len(s.values) < s.reservoirSize {
-		s.values = append(s.values, v)
-	} else {
-		r := rand.Int63n(s.count)
-		if r < int64(len(s.values)) {
-			s.values[int(r)] = v
-		}
-	}
-}
-
-// Values returns a copy of the values in the sample.
-func (s *UniformSample) Values() []int64 {
-	s.mutex.Lock()
-	defer s.mutex.Unlock()
-	values := make([]int64, len(s.values))
-	copy(values, s.values)
-	return values
-}
-
-// Variance returns the variance of the values in the sample.
-func (s *UniformSample) Variance() float64 {
-	s.mutex.Lock()
-	defer s.mutex.Unlock()
-	return SampleVariance(s.values)
-}
-
-// expDecaySample represents an individual sample in a heap.
-type expDecaySample struct {
-	k float64
-	v int64
-}
-
-func newExpDecaySampleHeap(reservoirSize int) *expDecaySampleHeap {
-	return &expDecaySampleHeap{make([]expDecaySample, 0, reservoirSize)}
-}
-
-// expDecaySampleHeap is a min-heap of expDecaySamples.
-// The internal implementation is copied from the standard library's container/heap
-type expDecaySampleHeap struct {
-	s []expDecaySample
-}
-
-func (h *expDecaySampleHeap) Clear() {
-	h.s = h.s[:0]
-}
-
-func (h *expDecaySampleHeap) Push(s expDecaySample) {
-	n := len(h.s)
-	h.s = h.s[0 : n+1]
-	h.s[n] = s
-	h.up(n)
-}
-
-func (h *expDecaySampleHeap) Pop() expDecaySample {
-	n := len(h.s) - 1
-	h.s[0], h.s[n] = h.s[n], h.s[0]
-	h.down(0, n)
-
-	n = len(h.s)
-	s := h.s[n-1]
-	h.s = h.s[0 : n-1]
-	return s
-}
-
-func (h *expDecaySampleHeap) Size() int {
-	return len(h.s)
-}
-
-func (h *expDecaySampleHeap) Values() []expDecaySample {
-	return h.s
-}
-
-func (h *expDecaySampleHeap) up(j int) {
-	for {
-		i := (j - 1) / 2 // parent
-		if i == j || !(h.s[j].k < h.s[i].k) {
-			break
-		}
-		h.s[i], h.s[j] = h.s[j], h.s[i]
-		j = i
-	}
-}
-
-func (h *expDecaySampleHeap) down(i, n int) {
-	for {
-		j1 := 2*i + 1
-		if j1 >= n || j1 < 0 { // j1 < 0 after int overflow
-			break
-		}
-		j := j1 // left child
-		if j2 := j1 + 1; j2 < n && !(h.s[j1].k < h.s[j2].k) {
-			j = j2 // = 2*i + 2  // right child
-		}
-		if !(h.s[j].k < h.s[i].k) {
-			break
-		}
-		h.s[i], h.s[j] = h.s[j], h.s[i]
-		i = j
-	}
-}
-
-type int64Slice []int64
-
-func (p int64Slice) Len() int           { return len(p) }
-func (p int64Slice) Less(i, j int) bool { return p[i] < p[j] }
-func (p int64Slice) Swap(i, j int)      { p[i], p[j] = p[j], p[i] }

+ 0 - 367
pkg/metrics/sample_test.go

@@ -1,367 +0,0 @@
-// includes code from
-// https://raw.githubusercontent.com/rcrowley/go-metrics/master/sample.go
-// Copyright 2012 Richard Crowley. All rights reserved.
-
-package metrics
-
-import (
-	"math/rand"
-	"runtime"
-	"testing"
-	"time"
-)
-
-// Benchmark{Compute,Copy}{1000,1000000} demonstrate that, even for relatively
-// expensive computations like Variance, the cost of copying the Sample, as
-// approximated by a make and copy, is much greater than the cost of the
-// computation for small samples and only slightly less for large samples.
-func BenchmarkCompute1000(b *testing.B) {
-	s := make([]int64, 1000)
-	for i := 0; i < len(s); i++ {
-		s[i] = int64(i)
-	}
-	b.ResetTimer()
-	for i := 0; i < b.N; i++ {
-		SampleVariance(s)
-	}
-}
-func BenchmarkCompute1000000(b *testing.B) {
-	s := make([]int64, 1000000)
-	for i := 0; i < len(s); i++ {
-		s[i] = int64(i)
-	}
-	b.ResetTimer()
-	for i := 0; i < b.N; i++ {
-		SampleVariance(s)
-	}
-}
-func BenchmarkCopy1000(b *testing.B) {
-	s := make([]int64, 1000)
-	for i := 0; i < len(s); i++ {
-		s[i] = int64(i)
-	}
-	b.ResetTimer()
-	for i := 0; i < b.N; i++ {
-		sCopy := make([]int64, len(s))
-		copy(sCopy, s)
-	}
-}
-func BenchmarkCopy1000000(b *testing.B) {
-	s := make([]int64, 1000000)
-	for i := 0; i < len(s); i++ {
-		s[i] = int64(i)
-	}
-	b.ResetTimer()
-	for i := 0; i < b.N; i++ {
-		sCopy := make([]int64, len(s))
-		copy(sCopy, s)
-	}
-}
-
-func BenchmarkExpDecaySample257(b *testing.B) {
-	benchmarkSample(b, NewExpDecaySample(257, 0.015))
-}
-
-func BenchmarkExpDecaySample514(b *testing.B) {
-	benchmarkSample(b, NewExpDecaySample(514, 0.015))
-}
-
-func BenchmarkExpDecaySample1028(b *testing.B) {
-	benchmarkSample(b, NewExpDecaySample(1028, 0.015))
-}
-
-func BenchmarkUniformSample257(b *testing.B) {
-	benchmarkSample(b, NewUniformSample(257))
-}
-
-func BenchmarkUniformSample514(b *testing.B) {
-	benchmarkSample(b, NewUniformSample(514))
-}
-
-func BenchmarkUniformSample1028(b *testing.B) {
-	benchmarkSample(b, NewUniformSample(1028))
-}
-
-func TestExpDecaySample10(t *testing.T) {
-	rand.Seed(1)
-	s := NewExpDecaySample(100, 0.99)
-	for i := 0; i < 10; i++ {
-		s.Update(int64(i))
-	}
-	if size := s.Count(); 10 != size {
-		t.Errorf("s.Count(): 10 != %v\n", size)
-	}
-	if size := s.Size(); 10 != size {
-		t.Errorf("s.Size(): 10 != %v\n", size)
-	}
-	if l := len(s.Values()); 10 != l {
-		t.Errorf("len(s.Values()): 10 != %v\n", l)
-	}
-	for _, v := range s.Values() {
-		if v > 10 || v < 0 {
-			t.Errorf("out of range [0, 10): %v\n", v)
-		}
-	}
-}
-
-func TestExpDecaySample100(t *testing.T) {
-	rand.Seed(1)
-	s := NewExpDecaySample(1000, 0.01)
-	for i := 0; i < 100; i++ {
-		s.Update(int64(i))
-	}
-	if size := s.Count(); 100 != size {
-		t.Errorf("s.Count(): 100 != %v\n", size)
-	}
-	if size := s.Size(); 100 != size {
-		t.Errorf("s.Size(): 100 != %v\n", size)
-	}
-	if l := len(s.Values()); 100 != l {
-		t.Errorf("len(s.Values()): 100 != %v\n", l)
-	}
-	for _, v := range s.Values() {
-		if v > 100 || v < 0 {
-			t.Errorf("out of range [0, 100): %v\n", v)
-		}
-	}
-}
-
-func TestExpDecaySample1000(t *testing.T) {
-	rand.Seed(1)
-	s := NewExpDecaySample(100, 0.99)
-	for i := 0; i < 1000; i++ {
-		s.Update(int64(i))
-	}
-	if size := s.Count(); 1000 != size {
-		t.Errorf("s.Count(): 1000 != %v\n", size)
-	}
-	if size := s.Size(); 100 != size {
-		t.Errorf("s.Size(): 100 != %v\n", size)
-	}
-	if l := len(s.Values()); 100 != l {
-		t.Errorf("len(s.Values()): 100 != %v\n", l)
-	}
-	for _, v := range s.Values() {
-		if v > 1000 || v < 0 {
-			t.Errorf("out of range [0, 1000): %v\n", v)
-		}
-	}
-}
-
-// This test makes sure that the sample's priority is not amplified by using
-// nanosecond duration since start rather than second duration since start.
-// The priority becomes +Inf quickly after starting if this is done,
-// effectively freezing the set of samples until a rescale step happens.
-func TestExpDecaySampleNanosecondRegression(t *testing.T) {
-	rand.Seed(1)
-	s := NewExpDecaySample(100, 0.99)
-	for i := 0; i < 100; i++ {
-		s.Update(10)
-	}
-	time.Sleep(1 * time.Millisecond)
-	for i := 0; i < 100; i++ {
-		s.Update(20)
-	}
-	v := s.Values()
-	avg := float64(0)
-	for i := 0; i < len(v); i++ {
-		avg += float64(v[i])
-	}
-	avg /= float64(len(v))
-	if avg > 16 || avg < 14 {
-		t.Errorf("out of range [14, 16]: %v\n", avg)
-	}
-}
-
-func TestExpDecaySampleRescale(t *testing.T) {
-	s := NewExpDecaySample(2, 0.001).(*ExpDecaySample)
-	s.update(time.Now(), 1)
-	s.update(time.Now().Add(time.Hour+time.Microsecond), 1)
-	for _, v := range s.values.Values() {
-		if v.k == 0.0 {
-			t.Fatal("v.k == 0.0")
-		}
-	}
-}
-
-func TestExpDecaySampleSnapshot(t *testing.T) {
-	now := time.Now()
-	rand.Seed(1)
-	s := NewExpDecaySample(100, 0.99)
-	for i := 1; i <= 10000; i++ {
-		s.(*ExpDecaySample).update(now.Add(time.Duration(i)), int64(i))
-	}
-	snapshot := s.Snapshot()
-	s.Update(1)
-	testExpDecaySampleStatistics(t, snapshot)
-}
-
-func TestExpDecaySampleStatistics(t *testing.T) {
-	now := time.Now()
-	rand.Seed(1)
-	s := NewExpDecaySample(100, 0.99)
-	for i := 1; i <= 10000; i++ {
-		s.(*ExpDecaySample).update(now.Add(time.Duration(i)), int64(i))
-	}
-	testExpDecaySampleStatistics(t, s)
-}
-
-func TestUniformSample(t *testing.T) {
-	rand.Seed(1)
-	s := NewUniformSample(100)
-	for i := 0; i < 1000; i++ {
-		s.Update(int64(i))
-	}
-	if size := s.Count(); 1000 != size {
-		t.Errorf("s.Count(): 1000 != %v\n", size)
-	}
-	if size := s.Size(); 100 != size {
-		t.Errorf("s.Size(): 100 != %v\n", size)
-	}
-	if l := len(s.Values()); 100 != l {
-		t.Errorf("len(s.Values()): 100 != %v\n", l)
-	}
-	for _, v := range s.Values() {
-		if v > 1000 || v < 0 {
-			t.Errorf("out of range [0, 100): %v\n", v)
-		}
-	}
-}
-
-func TestUniformSampleIncludesTail(t *testing.T) {
-	rand.Seed(1)
-	s := NewUniformSample(100)
-	max := 100
-	for i := 0; i < max; i++ {
-		s.Update(int64(i))
-	}
-	v := s.Values()
-	sum := 0
-	exp := (max - 1) * max / 2
-	for i := 0; i < len(v); i++ {
-		sum += int(v[i])
-	}
-	if exp != sum {
-		t.Errorf("sum: %v != %v\n", exp, sum)
-	}
-}
-
-func TestUniformSampleSnapshot(t *testing.T) {
-	s := NewUniformSample(100)
-	for i := 1; i <= 10000; i++ {
-		s.Update(int64(i))
-	}
-	snapshot := s.Snapshot()
-	s.Update(1)
-	testUniformSampleStatistics(t, snapshot)
-}
-
-func TestUniformSampleStatistics(t *testing.T) {
-	rand.Seed(1)
-	s := NewUniformSample(100)
-	for i := 1; i <= 10000; i++ {
-		s.Update(int64(i))
-	}
-	testUniformSampleStatistics(t, s)
-}
-
-func benchmarkSample(b *testing.B, s Sample) {
-	var memStats runtime.MemStats
-	runtime.ReadMemStats(&memStats)
-	pauseTotalNs := memStats.PauseTotalNs
-	b.ResetTimer()
-	for i := 0; i < b.N; i++ {
-		s.Update(1)
-	}
-	b.StopTimer()
-	runtime.GC()
-	runtime.ReadMemStats(&memStats)
-	b.Logf("GC cost: %d ns/op", int(memStats.PauseTotalNs-pauseTotalNs)/b.N)
-}
-
-func testExpDecaySampleStatistics(t *testing.T, s Sample) {
-	if count := s.Count(); 10000 != count {
-		t.Errorf("s.Count(): 10000 != %v\n", count)
-	}
-	if min := s.Min(); 107 != min {
-		t.Errorf("s.Min(): 107 != %v\n", min)
-	}
-	if max := s.Max(); 10000 != max {
-		t.Errorf("s.Max(): 10000 != %v\n", max)
-	}
-	if mean := s.Mean(); 4965.98 != mean {
-		t.Errorf("s.Mean(): 4965.98 != %v\n", mean)
-	}
-	if stdDev := s.StdDev(); 2959.825156930727 != stdDev {
-		t.Errorf("s.StdDev(): 2959.825156930727 != %v\n", stdDev)
-	}
-	ps := s.Percentiles([]float64{0.5, 0.75, 0.99})
-	if 4615 != ps[0] {
-		t.Errorf("median: 4615 != %v\n", ps[0])
-	}
-	if 7672 != ps[1] {
-		t.Errorf("75th percentile: 7672 != %v\n", ps[1])
-	}
-	if 9998.99 != ps[2] {
-		t.Errorf("99th percentile: 9998.99 != %v\n", ps[2])
-	}
-}
-
-func testUniformSampleStatistics(t *testing.T, s Sample) {
-	if count := s.Count(); 10000 != count {
-		t.Errorf("s.Count(): 10000 != %v\n", count)
-	}
-	if min := s.Min(); 37 != min {
-		t.Errorf("s.Min(): 37 != %v\n", min)
-	}
-	if max := s.Max(); 9989 != max {
-		t.Errorf("s.Max(): 9989 != %v\n", max)
-	}
-	if mean := s.Mean(); 4748.14 != mean {
-		t.Errorf("s.Mean(): 4748.14 != %v\n", mean)
-	}
-	if stdDev := s.StdDev(); 2826.684117548333 != stdDev {
-		t.Errorf("s.StdDev(): 2826.684117548333 != %v\n", stdDev)
-	}
-	ps := s.Percentiles([]float64{0.5, 0.75, 0.99})
-	if 4599 != ps[0] {
-		t.Errorf("median: 4599 != %v\n", ps[0])
-	}
-	if 7380.5 != ps[1] {
-		t.Errorf("75th percentile: 7380.5 != %v\n", ps[1])
-	}
-	if 9986.429999999998 != ps[2] {
-		t.Errorf("99th percentile: 9986.429999999998 != %v\n", ps[2])
-	}
-}
-
-// TestUniformSampleConcurrentUpdateCount would expose data race problems with
-// concurrent Update and Count calls on Sample when test is called with -race
-// argument
-func TestUniformSampleConcurrentUpdateCount(t *testing.T) {
-	if testing.Short() {
-		t.Skip("skipping in short mode")
-	}
-	s := NewUniformSample(100)
-	for i := 0; i < 100; i++ {
-		s.Update(int64(i))
-	}
-	quit := make(chan struct{})
-	go func() {
-		t := time.NewTicker(10 * time.Millisecond)
-		for {
-			select {
-			case <-t.C:
-				s.Update(rand.Int63())
-			case <-quit:
-				t.Stop()
-				return
-			}
-		}
-	}()
-	for i := 0; i < 1000; i++ {
-		s.Count()
-		time.Sleep(5 * time.Millisecond)
-	}
-	quit <- struct{}{}
-}

+ 53 - 17
pkg/metrics/settings.go

@@ -1,25 +1,27 @@
 package metrics
 package metrics
 
 
-import "github.com/grafana/grafana/pkg/setting"
+import (
+	"strings"
+	"time"
 
 
-type MetricPublisher interface {
-	Publish(metrics []Metric)
-}
+	"github.com/grafana/grafana/pkg/metrics/graphitebridge"
+	"github.com/grafana/grafana/pkg/setting"
+	"github.com/prometheus/client_golang/prometheus"
+	ini "gopkg.in/ini.v1"
+)
 
 
 type MetricSettings struct {
 type MetricSettings struct {
-	Enabled         bool
-	IntervalSeconds int64
-
-	Publishers []MetricPublisher
+	Enabled              bool
+	IntervalSeconds      int64
+	GraphiteBridgeConfig *graphitebridge.Config
 }
 }
 
 
-func readSettings() *MetricSettings {
+func ReadSettings(file *ini.File) *MetricSettings {
 	var settings = &MetricSettings{
 	var settings = &MetricSettings{
-		Enabled:    false,
-		Publishers: make([]MetricPublisher, 0),
+		Enabled: false,
 	}
 	}
 
 
-	var section, err = setting.Cfg.GetSection("metrics")
+	var section, err = file.GetSection("metrics")
 	if err != nil {
 	if err != nil {
 		metricsLogger.Crit("Unable to find metrics config section", "error", err)
 		metricsLogger.Crit("Unable to find metrics config section", "error", err)
 		return nil
 		return nil
@@ -32,12 +34,46 @@ func readSettings() *MetricSettings {
 		return settings
 		return settings
 	}
 	}
 
 
-	if graphitePublisher, err := CreateGraphitePublisher(); err != nil {
-		metricsLogger.Error("Failed to init Graphite metric publisher", "error", err)
-	} else if graphitePublisher != nil {
-		metricsLogger.Info("Metrics publisher initialized", "type", "graphite")
-		settings.Publishers = append(settings.Publishers, graphitePublisher)
+	cfg, err := parseGraphiteSettings(settings, file)
+	if err != nil {
+		metricsLogger.Crit("Unable to parse metrics graphite section", "error", err)
+		return nil
 	}
 	}
 
 
+	settings.GraphiteBridgeConfig = cfg
+
 	return settings
 	return settings
 }
 }
+
+func parseGraphiteSettings(settings *MetricSettings, file *ini.File) (*graphitebridge.Config, error) {
+	graphiteSection, err := setting.Cfg.GetSection("metrics.graphite")
+	if err != nil {
+		return nil, nil
+	}
+
+	address := graphiteSection.Key("address").String()
+	if address == "" {
+		return nil, nil
+	}
+
+	cfg := &graphitebridge.Config{
+		URL:             address,
+		Prefix:          graphiteSection.Key("prefix").MustString("prod.grafana.%(instance_name)s"),
+		CountersAsDelta: true,
+		Gatherer:        prometheus.DefaultGatherer,
+		Interval:        time.Duration(settings.IntervalSeconds) * time.Second,
+		Timeout:         10 * time.Second,
+		Logger:          &logWrapper{logger: metricsLogger},
+		ErrorHandling:   graphitebridge.ContinueOnError,
+	}
+
+	safeInstanceName := strings.Replace(setting.InstanceName, ".", "_", -1)
+	prefix := graphiteSection.Key("prefix").Value()
+
+	if prefix == "" {
+		prefix = "prod.grafana.%(instance_name)s."
+	}
+
+	cfg.Prefix = strings.Replace(prefix, "%(instance_name)s", safeInstanceName, -1)
+	return cfg, nil
+}

+ 0 - 310
pkg/metrics/timer.go

@@ -1,310 +0,0 @@
-// includes code from
-// https://raw.githubusercontent.com/rcrowley/go-metrics/master/sample.go
-// Copyright 2012 Richard Crowley. All rights reserved.
-
-package metrics
-
-import (
-	"sync"
-	"time"
-)
-
-// Timers capture the duration and rate of events.
-type Timer interface {
-	Metric
-
-	Count() int64
-	Max() int64
-	Mean() float64
-	Min() int64
-	Percentile(float64) float64
-	Percentiles([]float64) []float64
-	Rate1() float64
-	Rate5() float64
-	Rate15() float64
-	RateMean() float64
-	StdDev() float64
-	Sum() int64
-	Time(func())
-	Update(time.Duration)
-	UpdateSince(time.Time)
-	Variance() float64
-}
-
-// NewCustomTimer constructs a new StandardTimer from a Histogram and a Meter.
-func NewCustomTimer(meta *MetricMeta, h Histogram, m Meter) Timer {
-	if UseNilMetrics {
-		return NilTimer{}
-	}
-	return &StandardTimer{
-		MetricMeta: meta,
-		histogram:  h,
-		meter:      m,
-	}
-}
-
-// NewTimer constructs a new StandardTimer using an exponentially-decaying
-// sample with the same reservoir size and alpha as UNIX load averages.
-func NewTimer(meta *MetricMeta) Timer {
-	if UseNilMetrics {
-		return NilTimer{}
-	}
-	return &StandardTimer{
-		MetricMeta: meta,
-		histogram:  NewHistogram(meta, NewExpDecaySample(1028, 0.015)),
-		meter:      NewMeter(meta),
-	}
-}
-
-func RegTimer(name string, tagStrings ...string) Timer {
-	tr := NewTimer(NewMetricMeta(name, tagStrings))
-	MetricStats.Register(tr)
-	return tr
-}
-
-// NilTimer is a no-op Timer.
-type NilTimer struct {
-	*MetricMeta
-	h Histogram
-	m Meter
-}
-
-// Count is a no-op.
-func (NilTimer) Count() int64 { return 0 }
-
-// Max is a no-op.
-func (NilTimer) Max() int64 { return 0 }
-
-// Mean is a no-op.
-func (NilTimer) Mean() float64 { return 0.0 }
-
-// Min is a no-op.
-func (NilTimer) Min() int64 { return 0 }
-
-// Percentile is a no-op.
-func (NilTimer) Percentile(p float64) float64 { return 0.0 }
-
-// Percentiles is a no-op.
-func (NilTimer) Percentiles(ps []float64) []float64 {
-	return make([]float64, len(ps))
-}
-
-// Rate1 is a no-op.
-func (NilTimer) Rate1() float64 { return 0.0 }
-
-// Rate5 is a no-op.
-func (NilTimer) Rate5() float64 { return 0.0 }
-
-// Rate15 is a no-op.
-func (NilTimer) Rate15() float64 { return 0.0 }
-
-// RateMean is a no-op.
-func (NilTimer) RateMean() float64 { return 0.0 }
-
-// Snapshot is a no-op.
-func (n NilTimer) Snapshot() Metric { return n }
-
-// StdDev is a no-op.
-func (NilTimer) StdDev() float64 { return 0.0 }
-
-// Sum is a no-op.
-func (NilTimer) Sum() int64 { return 0 }
-
-// Time is a no-op.
-func (NilTimer) Time(func()) {}
-
-// Update is a no-op.
-func (NilTimer) Update(time.Duration) {}
-
-// UpdateSince is a no-op.
-func (NilTimer) UpdateSince(time.Time) {}
-
-// Variance is a no-op.
-func (NilTimer) Variance() float64 { return 0.0 }
-
-// StandardTimer is the standard implementation of a Timer and uses a Histogram
-// and Meter.
-type StandardTimer struct {
-	*MetricMeta
-	histogram Histogram
-	meter     Meter
-	mutex     sync.Mutex
-}
-
-// Count returns the number of events recorded.
-func (t *StandardTimer) Count() int64 {
-	return t.histogram.Count()
-}
-
-// Max returns the maximum value in the sample.
-func (t *StandardTimer) Max() int64 {
-	return t.histogram.Max()
-}
-
-// Mean returns the mean of the values in the sample.
-func (t *StandardTimer) Mean() float64 {
-	return t.histogram.Mean()
-}
-
-// Min returns the minimum value in the sample.
-func (t *StandardTimer) Min() int64 {
-	return t.histogram.Min()
-}
-
-// Percentile returns an arbitrary percentile of the values in the sample.
-func (t *StandardTimer) Percentile(p float64) float64 {
-	return t.histogram.Percentile(p)
-}
-
-// Percentiles returns a slice of arbitrary percentiles of the values in the
-// sample.
-func (t *StandardTimer) Percentiles(ps []float64) []float64 {
-	return t.histogram.Percentiles(ps)
-}
-
-// Rate1 returns the one-minute moving average rate of events per second.
-func (t *StandardTimer) Rate1() float64 {
-	return t.meter.Rate1()
-}
-
-// Rate5 returns the five-minute moving average rate of events per second.
-func (t *StandardTimer) Rate5() float64 {
-	return t.meter.Rate5()
-}
-
-// Rate15 returns the fifteen-minute moving average rate of events per second.
-func (t *StandardTimer) Rate15() float64 {
-	return t.meter.Rate15()
-}
-
-// RateMean returns the meter's mean rate of events per second.
-func (t *StandardTimer) RateMean() float64 {
-	return t.meter.RateMean()
-}
-
-// Snapshot returns a read-only copy of the timer.
-func (t *StandardTimer) Snapshot() Metric {
-	t.mutex.Lock()
-	defer t.mutex.Unlock()
-	return &TimerSnapshot{
-		MetricMeta: t.MetricMeta,
-		histogram:  t.histogram.Snapshot().(*HistogramSnapshot),
-		meter:      t.meter.Snapshot().(*MeterSnapshot),
-	}
-}
-
-// StdDev returns the standard deviation of the values in the sample.
-func (t *StandardTimer) StdDev() float64 {
-	return t.histogram.StdDev()
-}
-
-// Sum returns the sum in the sample.
-func (t *StandardTimer) Sum() int64 {
-	return t.histogram.Sum()
-}
-
-// Record the duration of the execution of the given function.
-func (t *StandardTimer) Time(f func()) {
-	ts := time.Now()
-	f()
-	t.Update(time.Since(ts))
-}
-
-// Record the duration of an event.
-func (t *StandardTimer) Update(d time.Duration) {
-	t.mutex.Lock()
-	defer t.mutex.Unlock()
-	t.histogram.Update(int64(d))
-	t.meter.Mark(1)
-}
-
-// Record the duration of an event that started at a time and ends now.
-func (t *StandardTimer) UpdateSince(ts time.Time) {
-	t.mutex.Lock()
-	defer t.mutex.Unlock()
-	sinceMs := time.Since(ts) / time.Millisecond
-	t.histogram.Update(int64(sinceMs))
-	t.meter.Mark(1)
-}
-
-// Variance returns the variance of the values in the sample.
-func (t *StandardTimer) Variance() float64 {
-	return t.histogram.Variance()
-}
-
-// TimerSnapshot is a read-only copy of another Timer.
-type TimerSnapshot struct {
-	*MetricMeta
-	histogram *HistogramSnapshot
-	meter     *MeterSnapshot
-}
-
-// Count returns the number of events recorded at the time the snapshot was
-// taken.
-func (t *TimerSnapshot) Count() int64 { return t.histogram.Count() }
-
-// Max returns the maximum value at the time the snapshot was taken.
-func (t *TimerSnapshot) Max() int64 { return t.histogram.Max() }
-
-// Mean returns the mean value at the time the snapshot was taken.
-func (t *TimerSnapshot) Mean() float64 { return t.histogram.Mean() }
-
-// Min returns the minimum value at the time the snapshot was taken.
-func (t *TimerSnapshot) Min() int64 { return t.histogram.Min() }
-
-// Percentile returns an arbitrary percentile of sampled values at the time the
-// snapshot was taken.
-func (t *TimerSnapshot) Percentile(p float64) float64 {
-	return t.histogram.Percentile(p)
-}
-
-// Percentiles returns a slice of arbitrary percentiles of sampled values at
-// the time the snapshot was taken.
-func (t *TimerSnapshot) Percentiles(ps []float64) []float64 {
-	return t.histogram.Percentiles(ps)
-}
-
-// Rate1 returns the one-minute moving average rate of events per second at the
-// time the snapshot was taken.
-func (t *TimerSnapshot) Rate1() float64 { return t.meter.Rate1() }
-
-// Rate5 returns the five-minute moving average rate of events per second at
-// the time the snapshot was taken.
-func (t *TimerSnapshot) Rate5() float64 { return t.meter.Rate5() }
-
-// Rate15 returns the fifteen-minute moving average rate of events per second
-// at the time the snapshot was taken.
-func (t *TimerSnapshot) Rate15() float64 { return t.meter.Rate15() }
-
-// RateMean returns the meter's mean rate of events per second at the time the
-// snapshot was taken.
-func (t *TimerSnapshot) RateMean() float64 { return t.meter.RateMean() }
-
-// Snapshot returns the snapshot.
-func (t *TimerSnapshot) Snapshot() Metric { return t }
-
-// StdDev returns the standard deviation of the values at the time the snapshot
-// was taken.
-func (t *TimerSnapshot) StdDev() float64 { return t.histogram.StdDev() }
-
-// Sum returns the sum at the time the snapshot was taken.
-func (t *TimerSnapshot) Sum() int64 { return t.histogram.Sum() }
-
-// Time panics.
-func (*TimerSnapshot) Time(func()) {
-	panic("Time called on a TimerSnapshot")
-}
-
-// Update panics.
-func (*TimerSnapshot) Update(time.Duration) {
-	panic("Update called on a TimerSnapshot")
-}
-
-// UpdateSince panics.
-func (*TimerSnapshot) UpdateSince(time.Time) {
-	panic("UpdateSince called on a TimerSnapshot")
-}
-
-// Variance returns the variance of the values at the time the snapshot was
-// taken.
-func (t *TimerSnapshot) Variance() float64 { return t.histogram.Variance() }

+ 3 - 3
pkg/middleware/logger.go

@@ -19,8 +19,8 @@ import (
 	"net/http"
 	"net/http"
 	"time"
 	"time"
 
 
-	"github.com/grafana/grafana/pkg/metrics"
 	"github.com/grafana/grafana/pkg/setting"
 	"github.com/grafana/grafana/pkg/setting"
+	"github.com/prometheus/client_golang/prometheus"
 	"gopkg.in/macaron.v1"
 	"gopkg.in/macaron.v1"
 )
 )
 
 
@@ -35,8 +35,8 @@ func Logger() macaron.Handler {
 		timeTakenMs := time.Since(start) / time.Millisecond
 		timeTakenMs := time.Since(start) / time.Millisecond
 
 
 		if timer, ok := c.Data["perfmon.timer"]; ok {
 		if timer, ok := c.Data["perfmon.timer"]; ok {
-			timerTyped := timer.(metrics.Timer)
-			timerTyped.Update(timeTakenMs)
+			timerTyped := timer.(prometheus.Summary)
+			timerTyped.Observe(float64(timeTakenMs))
 		}
 		}
 
 
 		status := rw.Status()
 		status := rw.Status()

+ 2 - 2
pkg/middleware/middleware.go

@@ -10,10 +10,10 @@ import (
 	"github.com/grafana/grafana/pkg/components/apikeygen"
 	"github.com/grafana/grafana/pkg/components/apikeygen"
 	"github.com/grafana/grafana/pkg/log"
 	"github.com/grafana/grafana/pkg/log"
 	l "github.com/grafana/grafana/pkg/login"
 	l "github.com/grafana/grafana/pkg/login"
-	"github.com/grafana/grafana/pkg/metrics"
 	m "github.com/grafana/grafana/pkg/models"
 	m "github.com/grafana/grafana/pkg/models"
 	"github.com/grafana/grafana/pkg/setting"
 	"github.com/grafana/grafana/pkg/setting"
 	"github.com/grafana/grafana/pkg/util"
 	"github.com/grafana/grafana/pkg/util"
+	"github.com/prometheus/client_golang/prometheus"
 )
 )
 
 
 type Context struct {
 type Context struct {
@@ -251,7 +251,7 @@ func (ctx *Context) HasHelpFlag(flag m.HelpFlags1) bool {
 	return ctx.HelpFlags1.HasFlag(flag)
 	return ctx.HelpFlags1.HasFlag(flag)
 }
 }
 
 
-func (ctx *Context) TimeRequest(timer metrics.Timer) {
+func (ctx *Context) TimeRequest(timer prometheus.Summary) {
 	ctx.Data["perfmon.timer"] = timer
 	ctx.Data["perfmon.timer"] = timer
 }
 }
 
 

+ 149 - 13
pkg/middleware/request_metrics.go

@@ -2,19 +2,28 @@ package middleware
 
 
 import (
 import (
 	"net/http"
 	"net/http"
+	"strconv"
 	"strings"
 	"strings"
+	"time"
 
 
 	"github.com/grafana/grafana/pkg/metrics"
 	"github.com/grafana/grafana/pkg/metrics"
 	"gopkg.in/macaron.v1"
 	"gopkg.in/macaron.v1"
 )
 )
 
 
-func RequestMetrics() macaron.Handler {
+func RequestMetrics(handler string) macaron.Handler {
 	return func(res http.ResponseWriter, req *http.Request, c *macaron.Context) {
 	return func(res http.ResponseWriter, req *http.Request, c *macaron.Context) {
 		rw := res.(macaron.ResponseWriter)
 		rw := res.(macaron.ResponseWriter)
+		now := time.Now()
 		c.Next()
 		c.Next()
 
 
 		status := rw.Status()
 		status := rw.Status()
 
 
+		code := sanitizeCode(status)
+		method := sanitizeMethod(req.Method)
+		metrics.M_Http_Request_Total.WithLabelValues(handler, code, method).Inc()
+		duration := time.Since(now).Nanoseconds() / int64(time.Millisecond)
+		metrics.M_Http_Request_Summary.WithLabelValues(handler, code, method).Observe(float64(duration))
+
 		if strings.HasPrefix(req.RequestURI, "/api/datasources/proxy") {
 		if strings.HasPrefix(req.RequestURI, "/api/datasources/proxy") {
 			countProxyRequests(status)
 			countProxyRequests(status)
 		} else if strings.HasPrefix(req.RequestURI, "/api/") {
 		} else if strings.HasPrefix(req.RequestURI, "/api/") {
@@ -28,38 +37,165 @@ func RequestMetrics() macaron.Handler {
 func countApiRequests(status int) {
 func countApiRequests(status int) {
 	switch status {
 	switch status {
 	case 200:
 	case 200:
-		metrics.M_Api_Status_200.Inc(1)
+		metrics.M_Api_Status.WithLabelValues("200").Inc()
 	case 404:
 	case 404:
-		metrics.M_Api_Status_404.Inc(1)
+		metrics.M_Api_Status.WithLabelValues("404").Inc()
 	case 500:
 	case 500:
-		metrics.M_Api_Status_500.Inc(1)
+		metrics.M_Api_Status.WithLabelValues("500").Inc()
 	default:
 	default:
-		metrics.M_Api_Status_Unknown.Inc(1)
+		metrics.M_Api_Status.WithLabelValues("unknown").Inc()
 	}
 	}
 }
 }
 
 
 func countPageRequests(status int) {
 func countPageRequests(status int) {
 	switch status {
 	switch status {
 	case 200:
 	case 200:
-		metrics.M_Page_Status_200.Inc(1)
+		metrics.M_Page_Status.WithLabelValues("200").Inc()
 	case 404:
 	case 404:
-		metrics.M_Page_Status_404.Inc(1)
+		metrics.M_Page_Status.WithLabelValues("404").Inc()
 	case 500:
 	case 500:
-		metrics.M_Page_Status_500.Inc(1)
+		metrics.M_Page_Status.WithLabelValues("500").Inc()
 	default:
 	default:
-		metrics.M_Page_Status_Unknown.Inc(1)
+		metrics.M_Page_Status.WithLabelValues("unknown").Inc()
 	}
 	}
 }
 }
 
 
 func countProxyRequests(status int) {
 func countProxyRequests(status int) {
 	switch status {
 	switch status {
 	case 200:
 	case 200:
-		metrics.M_Proxy_Status_200.Inc(1)
+		metrics.M_Proxy_Status.WithLabelValues("200").Inc()
 	case 404:
 	case 404:
-		metrics.M_Proxy_Status_404.Inc(1)
+		metrics.M_Proxy_Status.WithLabelValues("400").Inc()
 	case 500:
 	case 500:
-		metrics.M_Proxy_Status_500.Inc(1)
+		metrics.M_Proxy_Status.WithLabelValues("500").Inc()
+	default:
+		metrics.M_Proxy_Status.WithLabelValues("unknown").Inc()
+	}
+}
+
+func sanitizeMethod(m string) string {
+	switch m {
+	case "GET", "get":
+		return "get"
+	case "PUT", "put":
+		return "put"
+	case "HEAD", "head":
+		return "head"
+	case "POST", "post":
+		return "post"
+	case "DELETE", "delete":
+		return "delete"
+	case "CONNECT", "connect":
+		return "connect"
+	case "OPTIONS", "options":
+		return "options"
+	case "NOTIFY", "notify":
+		return "notify"
+	default:
+		return strings.ToLower(m)
+	}
+}
+
+// If the wrapped http.Handler has not set a status code, i.e. the value is
+// currently 0, santizeCode will return 200, for consistency with behavior in
+// the stdlib.
+func sanitizeCode(s int) string {
+	switch s {
+	case 100:
+		return "100"
+	case 101:
+		return "101"
+
+	case 200, 0:
+		return "200"
+	case 201:
+		return "201"
+	case 202:
+		return "202"
+	case 203:
+		return "203"
+	case 204:
+		return "204"
+	case 205:
+		return "205"
+	case 206:
+		return "206"
+
+	case 300:
+		return "300"
+	case 301:
+		return "301"
+	case 302:
+		return "302"
+	case 304:
+		return "304"
+	case 305:
+		return "305"
+	case 307:
+		return "307"
+
+	case 400:
+		return "400"
+	case 401:
+		return "401"
+	case 402:
+		return "402"
+	case 403:
+		return "403"
+	case 404:
+		return "404"
+	case 405:
+		return "405"
+	case 406:
+		return "406"
+	case 407:
+		return "407"
+	case 408:
+		return "408"
+	case 409:
+		return "409"
+	case 410:
+		return "410"
+	case 411:
+		return "411"
+	case 412:
+		return "412"
+	case 413:
+		return "413"
+	case 414:
+		return "414"
+	case 415:
+		return "415"
+	case 416:
+		return "416"
+	case 417:
+		return "417"
+	case 418:
+		return "418"
+
+	case 500:
+		return "500"
+	case 501:
+		return "501"
+	case 502:
+		return "502"
+	case 503:
+		return "503"
+	case 504:
+		return "504"
+	case 505:
+		return "505"
+
+	case 428:
+		return "428"
+	case 429:
+		return "429"
+	case 431:
+		return "431"
+	case 511:
+		return "511"
+
 	default:
 	default:
-		metrics.M_Proxy_Status_Unknown.Inc(1)
+		return strconv.Itoa(s)
 	}
 	}
 }
 }

+ 25 - 13
pkg/models/datasource.go

@@ -54,19 +54,31 @@ type DataSource struct {
 }
 }
 
 
 var knownDatasourcePlugins map[string]bool = map[string]bool{
 var knownDatasourcePlugins map[string]bool = map[string]bool{
-	DS_ES:          true,
-	DS_GRAPHITE:    true,
-	DS_INFLUXDB:    true,
-	DS_INFLUXDB_08: true,
-	DS_KAIROSDB:    true,
-	DS_CLOUDWATCH:  true,
-	DS_PROMETHEUS:  true,
-	DS_OPENTSDB:    true,
-	"opennms":      true,
-	"druid":        true,
-	"dalmatinerdb": true,
-	"gnocci":       true,
-	"zabbix":       true,
+	DS_ES:                                 true,
+	DS_GRAPHITE:                           true,
+	DS_INFLUXDB:                           true,
+	DS_INFLUXDB_08:                        true,
+	DS_KAIROSDB:                           true,
+	DS_CLOUDWATCH:                         true,
+	DS_PROMETHEUS:                         true,
+	DS_OPENTSDB:                           true,
+	"opennms":                             true,
+	"druid":                               true,
+	"dalmatinerdb":                        true,
+	"gnocci":                              true,
+	"zabbix":                              true,
+	"newrelic-app":                        true,
+	"grafana-datadog-datasource":          true,
+	"grafana-simple-json":                 true,
+	"grafana-splunk-datasource":           true,
+	"udoprog-heroic-datasource":           true,
+	"grafana-openfalcon-datasource":       true,
+	"opennms-datasource":                  true,
+	"rackerlabs-blueflood-datasource":     true,
+	"crate-datasource":                    true,
+	"ayoungprogrammer-finance-datasource": true,
+	"monasca-datasource":                  true,
+	"vertamedia-clickhouse-datasource":    true,
 }
 }
 
 
 func IsKnownDataSourcePlugin(dsType string) bool {
 func IsKnownDataSourcePlugin(dsType string) bool {

+ 2 - 2
pkg/services/alerting/eval_handler.go

@@ -63,8 +63,8 @@ func (e *DefaultEvalHandler) Eval(context *EvalContext) {
 	context.EndTime = time.Now()
 	context.EndTime = time.Now()
 	context.Rule.State = e.getNewState(context)
 	context.Rule.State = e.getNewState(context)
 
 
-	elapsedTime := context.EndTime.Sub(context.StartTime) / time.Millisecond
-	metrics.M_Alerting_Execution_Time.Update(elapsedTime)
+	elapsedTime := context.EndTime.Sub(context.StartTime).Nanoseconds() / int64(time.Millisecond)
+	metrics.M_Alerting_Execution_Time.Observe(float64(elapsedTime))
 }
 }
 
 
 // This should be move into evalContext once its been refactored.
 // This should be move into evalContext once its been refactored.

+ 4 - 1
pkg/services/alerting/notifier.go

@@ -10,6 +10,8 @@ import (
 	"github.com/grafana/grafana/pkg/components/imguploader"
 	"github.com/grafana/grafana/pkg/components/imguploader"
 	"github.com/grafana/grafana/pkg/components/renderer"
 	"github.com/grafana/grafana/pkg/components/renderer"
 	"github.com/grafana/grafana/pkg/log"
 	"github.com/grafana/grafana/pkg/log"
+	"github.com/grafana/grafana/pkg/metrics"
+
 	m "github.com/grafana/grafana/pkg/models"
 	m "github.com/grafana/grafana/pkg/models"
 )
 )
 
 
@@ -66,6 +68,7 @@ func (n *notificationService) sendNotifications(context *EvalContext, notifiers
 	for _, notifier := range notifiers {
 	for _, notifier := range notifiers {
 		not := notifier //avoid updating scope variable in go routine
 		not := notifier //avoid updating scope variable in go routine
 		n.log.Info("Sending notification", "type", not.GetType(), "id", not.GetNotifierId(), "isDefault", not.GetIsDefault())
 		n.log.Info("Sending notification", "type", not.GetType(), "id", not.GetNotifierId(), "isDefault", not.GetIsDefault())
+		metrics.M_Alerting_Notification_Sent.WithLabelValues(not.GetType()).Inc()
 		g.Go(func() error { return not.Notify(context) })
 		g.Go(func() error { return not.Notify(context) })
 	}
 	}
 
 
@@ -97,7 +100,7 @@ func (n *notificationService) uploadImage(context *EvalContext) (err error) {
 		context.ImageOnDiskPath = imagePath
 		context.ImageOnDiskPath = imagePath
 	}
 	}
 
 
-	context.ImagePublicUrl, err = uploader.Upload(context.ImageOnDiskPath)
+	context.ImagePublicUrl, err = uploader.Upload(context.Ctx, context.ImageOnDiskPath)
 	if err != nil {
 	if err != nil {
 		return err
 		return err
 	}
 	}

+ 0 - 2
pkg/services/alerting/notifiers/dingding.go

@@ -4,7 +4,6 @@ import (
 	"github.com/grafana/grafana/pkg/bus"
 	"github.com/grafana/grafana/pkg/bus"
 	"github.com/grafana/grafana/pkg/components/simplejson"
 	"github.com/grafana/grafana/pkg/components/simplejson"
 	"github.com/grafana/grafana/pkg/log"
 	"github.com/grafana/grafana/pkg/log"
-	"github.com/grafana/grafana/pkg/metrics"
 	m "github.com/grafana/grafana/pkg/models"
 	m "github.com/grafana/grafana/pkg/models"
 	"github.com/grafana/grafana/pkg/services/alerting"
 	"github.com/grafana/grafana/pkg/services/alerting"
 )
 )
@@ -47,7 +46,6 @@ type DingDingNotifier struct {
 
 
 func (this *DingDingNotifier) Notify(evalContext *alerting.EvalContext) error {
 func (this *DingDingNotifier) Notify(evalContext *alerting.EvalContext) error {
 	this.log.Info("Sending dingding")
 	this.log.Info("Sending dingding")
-	metrics.M_Alerting_Notification_Sent_DingDing.Inc(1)
 
 
 	messageUrl, err := evalContext.GetRuleUrl()
 	messageUrl, err := evalContext.GetRuleUrl()
 	if err != nil {
 	if err != nil {

+ 0 - 2
pkg/services/alerting/notifiers/email.go

@@ -6,7 +6,6 @@ import (
 
 
 	"github.com/grafana/grafana/pkg/bus"
 	"github.com/grafana/grafana/pkg/bus"
 	"github.com/grafana/grafana/pkg/log"
 	"github.com/grafana/grafana/pkg/log"
-	"github.com/grafana/grafana/pkg/metrics"
 	m "github.com/grafana/grafana/pkg/models"
 	m "github.com/grafana/grafana/pkg/models"
 	"github.com/grafana/grafana/pkg/services/alerting"
 	"github.com/grafana/grafana/pkg/services/alerting"
 	"github.com/grafana/grafana/pkg/setting"
 	"github.com/grafana/grafana/pkg/setting"
@@ -61,7 +60,6 @@ func NewEmailNotifier(model *m.AlertNotification) (alerting.Notifier, error) {
 
 
 func (this *EmailNotifier) Notify(evalContext *alerting.EvalContext) error {
 func (this *EmailNotifier) Notify(evalContext *alerting.EvalContext) error {
 	this.log.Info("Sending alert notification to", "addresses", this.Addresses)
 	this.log.Info("Sending alert notification to", "addresses", this.Addresses)
-	metrics.M_Alerting_Notification_Sent_Email.Inc(1)
 
 
 	ruleUrl, err := evalContext.GetRuleUrl()
 	ruleUrl, err := evalContext.GetRuleUrl()
 	if err != nil {
 	if err != nil {

+ 2 - 3
pkg/services/alerting/notifiers/line.go

@@ -2,12 +2,12 @@ package notifiers
 
 
 import (
 import (
 	"fmt"
 	"fmt"
+	"net/url"
+
 	"github.com/grafana/grafana/pkg/bus"
 	"github.com/grafana/grafana/pkg/bus"
 	"github.com/grafana/grafana/pkg/log"
 	"github.com/grafana/grafana/pkg/log"
-	"github.com/grafana/grafana/pkg/metrics"
 	m "github.com/grafana/grafana/pkg/models"
 	m "github.com/grafana/grafana/pkg/models"
 	"github.com/grafana/grafana/pkg/services/alerting"
 	"github.com/grafana/grafana/pkg/services/alerting"
-	"net/url"
 )
 )
 
 
 func init() {
 func init() {
@@ -53,7 +53,6 @@ type LineNotifier struct {
 
 
 func (this *LineNotifier) Notify(evalContext *alerting.EvalContext) error {
 func (this *LineNotifier) Notify(evalContext *alerting.EvalContext) error {
 	this.log.Info("Executing line notification", "ruleId", evalContext.Rule.Id, "notification", this.Name)
 	this.log.Info("Executing line notification", "ruleId", evalContext.Rule.Id, "notification", this.Name)
-	metrics.M_Alerting_Notification_Sent_LINE.Inc(1)
 
 
 	var err error
 	var err error
 	switch evalContext.Rule.State {
 	switch evalContext.Rule.State {

+ 0 - 2
pkg/services/alerting/notifiers/opsgenie.go

@@ -7,7 +7,6 @@ import (
 	"github.com/grafana/grafana/pkg/bus"
 	"github.com/grafana/grafana/pkg/bus"
 	"github.com/grafana/grafana/pkg/components/simplejson"
 	"github.com/grafana/grafana/pkg/components/simplejson"
 	"github.com/grafana/grafana/pkg/log"
 	"github.com/grafana/grafana/pkg/log"
-	"github.com/grafana/grafana/pkg/metrics"
 	m "github.com/grafana/grafana/pkg/models"
 	m "github.com/grafana/grafana/pkg/models"
 	"github.com/grafana/grafana/pkg/services/alerting"
 	"github.com/grafana/grafana/pkg/services/alerting"
 )
 )
@@ -65,7 +64,6 @@ type OpsGenieNotifier struct {
 }
 }
 
 
 func (this *OpsGenieNotifier) Notify(evalContext *alerting.EvalContext) error {
 func (this *OpsGenieNotifier) Notify(evalContext *alerting.EvalContext) error {
-	metrics.M_Alerting_Notification_Sent_OpsGenie.Inc(1)
 
 
 	var err error
 	var err error
 	switch evalContext.Rule.State {
 	switch evalContext.Rule.State {

+ 0 - 2
pkg/services/alerting/notifiers/pagerduty.go

@@ -6,7 +6,6 @@ import (
 	"github.com/grafana/grafana/pkg/bus"
 	"github.com/grafana/grafana/pkg/bus"
 	"github.com/grafana/grafana/pkg/components/simplejson"
 	"github.com/grafana/grafana/pkg/components/simplejson"
 	"github.com/grafana/grafana/pkg/log"
 	"github.com/grafana/grafana/pkg/log"
-	"github.com/grafana/grafana/pkg/metrics"
 	m "github.com/grafana/grafana/pkg/models"
 	m "github.com/grafana/grafana/pkg/models"
 	"github.com/grafana/grafana/pkg/services/alerting"
 	"github.com/grafana/grafana/pkg/services/alerting"
 )
 )
@@ -63,7 +62,6 @@ type PagerdutyNotifier struct {
 }
 }
 
 
 func (this *PagerdutyNotifier) Notify(evalContext *alerting.EvalContext) error {
 func (this *PagerdutyNotifier) Notify(evalContext *alerting.EvalContext) error {
-	metrics.M_Alerting_Notification_Sent_PagerDuty.Inc(1)
 
 
 	if evalContext.Rule.State == m.AlertStateOK && !this.AutoResolve {
 	if evalContext.Rule.State == m.AlertStateOK && !this.AutoResolve {
 		this.log.Info("Not sending a trigger to Pagerduty", "state", evalContext.Rule.State, "auto resolve", this.AutoResolve)
 		this.log.Info("Not sending a trigger to Pagerduty", "state", evalContext.Rule.State, "auto resolve", this.AutoResolve)

+ 0 - 2
pkg/services/alerting/notifiers/pushover.go

@@ -7,7 +7,6 @@ import (
 
 
 	"github.com/grafana/grafana/pkg/bus"
 	"github.com/grafana/grafana/pkg/bus"
 	"github.com/grafana/grafana/pkg/log"
 	"github.com/grafana/grafana/pkg/log"
-	"github.com/grafana/grafana/pkg/metrics"
 	m "github.com/grafana/grafana/pkg/models"
 	m "github.com/grafana/grafana/pkg/models"
 	"github.com/grafana/grafana/pkg/services/alerting"
 	"github.com/grafana/grafana/pkg/services/alerting"
 )
 )
@@ -125,7 +124,6 @@ type PushoverNotifier struct {
 }
 }
 
 
 func (this *PushoverNotifier) Notify(evalContext *alerting.EvalContext) error {
 func (this *PushoverNotifier) Notify(evalContext *alerting.EvalContext) error {
-	metrics.M_Alerting_Notification_Sent_Pushover.Inc(1)
 	ruleUrl, err := evalContext.GetRuleUrl()
 	ruleUrl, err := evalContext.GetRuleUrl()
 	if err != nil {
 	if err != nil {
 		this.log.Error("Failed get rule link", "error", err)
 		this.log.Error("Failed get rule link", "error", err)

+ 0 - 2
pkg/services/alerting/notifiers/sensu.go

@@ -7,7 +7,6 @@ import (
 	"github.com/grafana/grafana/pkg/bus"
 	"github.com/grafana/grafana/pkg/bus"
 	"github.com/grafana/grafana/pkg/components/simplejson"
 	"github.com/grafana/grafana/pkg/components/simplejson"
 	"github.com/grafana/grafana/pkg/log"
 	"github.com/grafana/grafana/pkg/log"
-	"github.com/grafana/grafana/pkg/metrics"
 	m "github.com/grafana/grafana/pkg/models"
 	m "github.com/grafana/grafana/pkg/models"
 	"github.com/grafana/grafana/pkg/services/alerting"
 	"github.com/grafana/grafana/pkg/services/alerting"
 )
 )
@@ -74,7 +73,6 @@ type SensuNotifier struct {
 
 
 func (this *SensuNotifier) Notify(evalContext *alerting.EvalContext) error {
 func (this *SensuNotifier) Notify(evalContext *alerting.EvalContext) error {
 	this.log.Info("Sending sensu result")
 	this.log.Info("Sending sensu result")
-	metrics.M_Alerting_Notification_Sent_Sensu.Inc(1)
 
 
 	bodyJSON := simplejson.New()
 	bodyJSON := simplejson.New()
 	bodyJSON.Set("ruleId", evalContext.Rule.Id)
 	bodyJSON.Set("ruleId", evalContext.Rule.Id)

+ 0 - 2
pkg/services/alerting/notifiers/slack.go

@@ -6,7 +6,6 @@ import (
 
 
 	"github.com/grafana/grafana/pkg/bus"
 	"github.com/grafana/grafana/pkg/bus"
 	"github.com/grafana/grafana/pkg/log"
 	"github.com/grafana/grafana/pkg/log"
-	"github.com/grafana/grafana/pkg/metrics"
 	m "github.com/grafana/grafana/pkg/models"
 	m "github.com/grafana/grafana/pkg/models"
 	"github.com/grafana/grafana/pkg/services/alerting"
 	"github.com/grafana/grafana/pkg/services/alerting"
 	"github.com/grafana/grafana/pkg/setting"
 	"github.com/grafana/grafana/pkg/setting"
@@ -79,7 +78,6 @@ type SlackNotifier struct {
 
 
 func (this *SlackNotifier) Notify(evalContext *alerting.EvalContext) error {
 func (this *SlackNotifier) Notify(evalContext *alerting.EvalContext) error {
 	this.log.Info("Executing slack notification", "ruleId", evalContext.Rule.Id, "notification", this.Name)
 	this.log.Info("Executing slack notification", "ruleId", evalContext.Rule.Id, "notification", this.Name)
-	metrics.M_Alerting_Notification_Sent_Slack.Inc(1)
 
 
 	ruleUrl, err := evalContext.GetRuleUrl()
 	ruleUrl, err := evalContext.GetRuleUrl()
 	if err != nil {
 	if err != nil {

+ 0 - 2
pkg/services/alerting/notifiers/telegram.go

@@ -6,7 +6,6 @@ import (
 	"github.com/grafana/grafana/pkg/bus"
 	"github.com/grafana/grafana/pkg/bus"
 	"github.com/grafana/grafana/pkg/components/simplejson"
 	"github.com/grafana/grafana/pkg/components/simplejson"
 	"github.com/grafana/grafana/pkg/log"
 	"github.com/grafana/grafana/pkg/log"
-	"github.com/grafana/grafana/pkg/metrics"
 	m "github.com/grafana/grafana/pkg/models"
 	m "github.com/grafana/grafana/pkg/models"
 	"github.com/grafana/grafana/pkg/services/alerting"
 	"github.com/grafana/grafana/pkg/services/alerting"
 )
 )
@@ -80,7 +79,6 @@ func NewTelegramNotifier(model *m.AlertNotification) (alerting.Notifier, error)
 func (this *TelegramNotifier) Notify(evalContext *alerting.EvalContext) error {
 func (this *TelegramNotifier) Notify(evalContext *alerting.EvalContext) error {
 	this.log.Info("Sending alert notification to", "bot_token", this.BotToken)
 	this.log.Info("Sending alert notification to", "bot_token", this.BotToken)
 	this.log.Info("Sending alert notification to", "chat_id", this.ChatID)
 	this.log.Info("Sending alert notification to", "chat_id", this.ChatID)
-	metrics.M_Alerting_Notification_Sent_Telegram.Inc(1)
 
 
 	bodyJSON := simplejson.New()
 	bodyJSON := simplejson.New()
 
 

+ 0 - 2
pkg/services/alerting/notifiers/threema.go

@@ -7,7 +7,6 @@ import (
 
 
 	"github.com/grafana/grafana/pkg/bus"
 	"github.com/grafana/grafana/pkg/bus"
 	"github.com/grafana/grafana/pkg/log"
 	"github.com/grafana/grafana/pkg/log"
-	"github.com/grafana/grafana/pkg/metrics"
 	m "github.com/grafana/grafana/pkg/models"
 	m "github.com/grafana/grafana/pkg/models"
 	"github.com/grafana/grafana/pkg/services/alerting"
 	"github.com/grafana/grafana/pkg/services/alerting"
 )
 )
@@ -118,7 +117,6 @@ func NewThreemaNotifier(model *m.AlertNotification) (alerting.Notifier, error) {
 func (notifier *ThreemaNotifier) Notify(evalContext *alerting.EvalContext) error {
 func (notifier *ThreemaNotifier) Notify(evalContext *alerting.EvalContext) error {
 	notifier.log.Info("Sending alert notification from", "threema_id", notifier.GatewayID)
 	notifier.log.Info("Sending alert notification from", "threema_id", notifier.GatewayID)
 	notifier.log.Info("Sending alert notification to", "threema_id", notifier.RecipientID)
 	notifier.log.Info("Sending alert notification to", "threema_id", notifier.RecipientID)
-	metrics.M_Alerting_Notification_Sent_Threema.Inc(1)
 
 
 	// Set up basic API request data
 	// Set up basic API request data
 	data := url.Values{}
 	data := url.Values{}

+ 0 - 2
pkg/services/alerting/notifiers/victorops.go

@@ -6,7 +6,6 @@ import (
 	"github.com/grafana/grafana/pkg/bus"
 	"github.com/grafana/grafana/pkg/bus"
 	"github.com/grafana/grafana/pkg/components/simplejson"
 	"github.com/grafana/grafana/pkg/components/simplejson"
 	"github.com/grafana/grafana/pkg/log"
 	"github.com/grafana/grafana/pkg/log"
-	"github.com/grafana/grafana/pkg/metrics"
 	"github.com/grafana/grafana/pkg/models"
 	"github.com/grafana/grafana/pkg/models"
 	"github.com/grafana/grafana/pkg/services/alerting"
 	"github.com/grafana/grafana/pkg/services/alerting"
 	"github.com/grafana/grafana/pkg/setting"
 	"github.com/grafana/grafana/pkg/setting"
@@ -72,7 +71,6 @@ type VictoropsNotifier struct {
 // Notify sends notification to Victorops via POST to URL endpoint
 // Notify sends notification to Victorops via POST to URL endpoint
 func (this *VictoropsNotifier) Notify(evalContext *alerting.EvalContext) error {
 func (this *VictoropsNotifier) Notify(evalContext *alerting.EvalContext) error {
 	this.log.Info("Executing victorops notification", "ruleId", evalContext.Rule.Id, "notification", this.Name)
 	this.log.Info("Executing victorops notification", "ruleId", evalContext.Rule.Id, "notification", this.Name)
-	metrics.M_Alerting_Notification_Sent_Victorops.Inc(1)
 
 
 	ruleUrl, err := evalContext.GetRuleUrl()
 	ruleUrl, err := evalContext.GetRuleUrl()
 	if err != nil {
 	if err != nil {

+ 0 - 2
pkg/services/alerting/notifiers/webhook.go

@@ -4,7 +4,6 @@ import (
 	"github.com/grafana/grafana/pkg/bus"
 	"github.com/grafana/grafana/pkg/bus"
 	"github.com/grafana/grafana/pkg/components/simplejson"
 	"github.com/grafana/grafana/pkg/components/simplejson"
 	"github.com/grafana/grafana/pkg/log"
 	"github.com/grafana/grafana/pkg/log"
-	"github.com/grafana/grafana/pkg/metrics"
 	m "github.com/grafana/grafana/pkg/models"
 	m "github.com/grafana/grafana/pkg/models"
 	"github.com/grafana/grafana/pkg/services/alerting"
 	"github.com/grafana/grafana/pkg/services/alerting"
 )
 )
@@ -68,7 +67,6 @@ type WebhookNotifier struct {
 
 
 func (this *WebhookNotifier) Notify(evalContext *alerting.EvalContext) error {
 func (this *WebhookNotifier) Notify(evalContext *alerting.EvalContext) error {
 	this.log.Info("Sending webhook")
 	this.log.Info("Sending webhook")
-	metrics.M_Alerting_Notification_Sent_Webhook.Inc(1)
 
 
 	bodyJSON := simplejson.New()
 	bodyJSON := simplejson.New()
 	bodyJSON.Set("title", evalContext.GetNotificationTitle())
 	bodyJSON.Set("title", evalContext.GetNotificationTitle())

+ 1 - 1
pkg/services/alerting/reader.go

@@ -59,7 +59,7 @@ func (arr *DefaultRuleReader) Fetch() []*Rule {
 		}
 		}
 	}
 	}
 
 
-	metrics.M_Alerting_Active_Alerts.Update(int64(len(res)))
+	metrics.M_Alerting_Active_Alerts.Set(float64(len(res)))
 	return res
 	return res
 }
 }
 
 

+ 1 - 16
pkg/services/alerting/result_handler.go

@@ -42,7 +42,7 @@ func (handler *DefaultResultHandler) Handle(evalContext *EvalContext) error {
 		annotationData.Set("noData", true)
 		annotationData.Set("noData", true)
 	}
 	}
 
 
-	countStateResult(evalContext.Rule.State)
+	metrics.M_Alerting_Result_State.WithLabelValues(string(evalContext.Rule.State)).Inc()
 	if evalContext.ShouldUpdateAlertState() {
 	if evalContext.ShouldUpdateAlertState() {
 		handler.log.Info("New state change", "alertId", evalContext.Rule.Id, "newState", evalContext.Rule.State, "prev state", evalContext.PrevAlertState)
 		handler.log.Info("New state change", "alertId", evalContext.Rule.Id, "newState", evalContext.Rule.State, "prev state", evalContext.PrevAlertState)
 
 
@@ -95,18 +95,3 @@ func (handler *DefaultResultHandler) Handle(evalContext *EvalContext) error {
 
 
 	return nil
 	return nil
 }
 }
-
-func countStateResult(state m.AlertStateType) {
-	switch state {
-	case m.AlertStatePending:
-		metrics.M_Alerting_Result_State_Pending.Inc(1)
-	case m.AlertStateAlerting:
-		metrics.M_Alerting_Result_State_Alerting.Inc(1)
-	case m.AlertStateOK:
-		metrics.M_Alerting_Result_State_Ok.Inc(1)
-	case m.AlertStatePaused:
-		metrics.M_Alerting_Result_State_Paused.Inc(1)
-	case m.AlertStateNoData:
-		metrics.M_Alerting_Result_State_NoData.Inc(1)
-	}
-}

+ 1 - 1
pkg/services/sqlstore/dashboard.go

@@ -75,7 +75,7 @@ func SaveDashboard(cmd *m.SaveDashboardCommand) error {
 
 
 		if dash.Id == 0 {
 		if dash.Id == 0 {
 			dash.Version = 1
 			dash.Version = 1
-			metrics.M_Models_Dashboard_Insert.Inc(1)
+			metrics.M_Api_Dashboard_Insert.Inc()
 			dash.Data.Set("version", dash.Version)
 			dash.Data.Set("version", dash.Version)
 			affectedRows, err = sess.Insert(dash)
 			affectedRows, err = sess.Insert(dash)
 		} else {
 		} else {

+ 1 - 1
pkg/services/sqlstore/datasource.go

@@ -20,7 +20,7 @@ func init() {
 }
 }
 
 
 func GetDataSourceById(query *m.GetDataSourceByIdQuery) error {
 func GetDataSourceById(query *m.GetDataSourceByIdQuery) error {
-	metrics.M_DB_DataSource_QueryById.Inc(1)
+	metrics.M_DB_DataSource_QueryById.Inc()
 
 
 	datasource := m.DataSource{OrgId: query.OrgId, Id: query.Id}
 	datasource := m.DataSource{OrgId: query.OrgId, Id: query.Id}
 	has, err := x.Get(&datasource)
 	has, err := x.Get(&datasource)

+ 17 - 14
pkg/tsdb/prometheus/prometheus.go

@@ -13,8 +13,11 @@ import (
 	"github.com/grafana/grafana/pkg/log"
 	"github.com/grafana/grafana/pkg/log"
 	"github.com/grafana/grafana/pkg/models"
 	"github.com/grafana/grafana/pkg/models"
 	"github.com/grafana/grafana/pkg/tsdb"
 	"github.com/grafana/grafana/pkg/tsdb"
-	"github.com/prometheus/client_golang/api/prometheus"
-	pmodel "github.com/prometheus/common/model"
+	api "github.com/prometheus/client_golang/api"
+	apiv1 "github.com/prometheus/client_golang/api/prometheus/v1"
+	"github.com/prometheus/common/model"
+	//api "github.com/prometheus/client_golang/api"
+	//apiv1 "github.com/prometheus/client_golang/api/prometheus/v1"
 )
 )
 
 
 type PrometheusExecutor struct {
 type PrometheusExecutor struct {
@@ -57,26 +60,26 @@ func init() {
 	legendFormat = regexp.MustCompile(`\{\{\s*(.+?)\s*\}\}`)
 	legendFormat = regexp.MustCompile(`\{\{\s*(.+?)\s*\}\}`)
 }
 }
 
 
-func (e *PrometheusExecutor) getClient() (prometheus.QueryAPI, error) {
-	cfg := prometheus.Config{
-		Address:   e.DataSource.Url,
-		Transport: e.Transport,
+func (e *PrometheusExecutor) getClient() (apiv1.API, error) {
+	cfg := api.Config{
+		Address:      e.DataSource.Url,
+		RoundTripper: e.Transport,
 	}
 	}
 
 
 	if e.BasicAuth {
 	if e.BasicAuth {
-		cfg.Transport = basicAuthTransport{
+		cfg.RoundTripper = basicAuthTransport{
 			Transport: e.Transport,
 			Transport: e.Transport,
 			username:  e.BasicAuthUser,
 			username:  e.BasicAuthUser,
 			password:  e.BasicAuthPassword,
 			password:  e.BasicAuthPassword,
 		}
 		}
 	}
 	}
 
 
-	client, err := prometheus.New(cfg)
+	client, err := api.NewClient(cfg)
 	if err != nil {
 	if err != nil {
 		return nil, err
 		return nil, err
 	}
 	}
 
 
-	return prometheus.NewQueryAPI(client), nil
+	return apiv1.NewAPI(client), nil
 }
 }
 
 
 func (e *PrometheusExecutor) Execute(ctx context.Context, queries tsdb.QuerySlice, queryContext *tsdb.QueryContext) *tsdb.BatchResult {
 func (e *PrometheusExecutor) Execute(ctx context.Context, queries tsdb.QuerySlice, queryContext *tsdb.QueryContext) *tsdb.BatchResult {
@@ -92,7 +95,7 @@ func (e *PrometheusExecutor) Execute(ctx context.Context, queries tsdb.QuerySlic
 		return result.WithError(err)
 		return result.WithError(err)
 	}
 	}
 
 
-	timeRange := prometheus.Range{
+	timeRange := apiv1.Range{
 		Start: query.Start,
 		Start: query.Start,
 		End:   query.End,
 		End:   query.End,
 		Step:  query.Step,
 		Step:  query.Step,
@@ -112,7 +115,7 @@ func (e *PrometheusExecutor) Execute(ctx context.Context, queries tsdb.QuerySlic
 	return result
 	return result
 }
 }
 
 
-func formatLegend(metric pmodel.Metric, query *PrometheusQuery) string {
+func formatLegend(metric model.Metric, query *PrometheusQuery) string {
 	if query.LegendFormat == "" {
 	if query.LegendFormat == "" {
 		return metric.String()
 		return metric.String()
 	}
 	}
@@ -121,7 +124,7 @@ func formatLegend(metric pmodel.Metric, query *PrometheusQuery) string {
 		labelName := strings.Replace(string(in), "{{", "", 1)
 		labelName := strings.Replace(string(in), "{{", "", 1)
 		labelName = strings.Replace(labelName, "}}", "", 1)
 		labelName = strings.Replace(labelName, "}}", "", 1)
 		labelName = strings.TrimSpace(labelName)
 		labelName = strings.TrimSpace(labelName)
-		if val, exists := metric[pmodel.LabelName(labelName)]; exists {
+		if val, exists := metric[model.LabelName(labelName)]; exists {
 			return []byte(val)
 			return []byte(val)
 		}
 		}
 
 
@@ -165,11 +168,11 @@ func parseQuery(queries tsdb.QuerySlice, queryContext *tsdb.QueryContext) (*Prom
 	}, nil
 	}, nil
 }
 }
 
 
-func parseResponse(value pmodel.Value, query *PrometheusQuery) (map[string]*tsdb.QueryResult, error) {
+func parseResponse(value model.Value, query *PrometheusQuery) (map[string]*tsdb.QueryResult, error) {
 	queryResults := make(map[string]*tsdb.QueryResult)
 	queryResults := make(map[string]*tsdb.QueryResult)
 	queryRes := tsdb.NewQueryResult()
 	queryRes := tsdb.NewQueryResult()
 
 
-	data, ok := value.(pmodel.Matrix)
+	data, ok := value.(model.Matrix)
 	if !ok {
 	if !ok {
 		return queryResults, fmt.Errorf("Unsupported result format: %s", value.Type().String())
 		return queryResults, fmt.Errorf("Unsupported result format: %s", value.Type().String())
 	}
 	}

+ 7 - 0
public/app/core/utils/kbn.js

@@ -389,12 +389,17 @@ function($, _) {
     return value.toExponential(decimals);
     return value.toExponential(decimals);
   };
   };
 
 
+  kbn.valueFormats.locale = function(value, decimals) {
+    return value.toLocaleString(undefined, {maximumFractionDigits: decimals});
+  };
+
   // Currencies
   // Currencies
   kbn.valueFormats.currencyUSD = kbn.formatBuilders.currency('$');
   kbn.valueFormats.currencyUSD = kbn.formatBuilders.currency('$');
   kbn.valueFormats.currencyGBP = kbn.formatBuilders.currency('£');
   kbn.valueFormats.currencyGBP = kbn.formatBuilders.currency('£');
   kbn.valueFormats.currencyEUR = kbn.formatBuilders.currency('€');
   kbn.valueFormats.currencyEUR = kbn.formatBuilders.currency('€');
   kbn.valueFormats.currencyJPY = kbn.formatBuilders.currency('¥');
   kbn.valueFormats.currencyJPY = kbn.formatBuilders.currency('¥');
   kbn.valueFormats.currencyRUB = kbn.formatBuilders.currency('₽');
   kbn.valueFormats.currencyRUB = kbn.formatBuilders.currency('₽');
+  kbn.valueFormats.currencyUAH = kbn.formatBuilders.currency('₴');
 
 
   // Data (Binary)
   // Data (Binary)
   kbn.valueFormats.bits   = kbn.formatBuilders.binarySIPrefix('b');
   kbn.valueFormats.bits   = kbn.formatBuilders.binarySIPrefix('b');
@@ -708,6 +713,7 @@ function($, _) {
           {text: 'hexadecimal (0x)',    value: 'hex0x'      },
           {text: 'hexadecimal (0x)',    value: 'hex0x'      },
           {text: 'hexadecimal',         value: 'hex'        },
           {text: 'hexadecimal',         value: 'hex'        },
           {text: 'scientific notation', value: 'sci'        },
           {text: 'scientific notation', value: 'sci'        },
+          {text: 'locale format',       value: 'locale'     },
         ]
         ]
       },
       },
       {
       {
@@ -718,6 +724,7 @@ function($, _) {
           {text: 'Euro (€)',    value: 'currencyEUR'},
           {text: 'Euro (€)',    value: 'currencyEUR'},
           {text: 'Yen (¥)',     value: 'currencyJPY'},
           {text: 'Yen (¥)',     value: 'currencyJPY'},
           {text: 'Rubles (₽)',  value: 'currencyRUB'},
           {text: 'Rubles (₽)',  value: 'currencyRUB'},
+          {text: 'Hryvnias (₴)',  value: 'currencyUAH'},
         ]
         ]
       },
       },
       {
       {

+ 3 - 0
public/app/features/dashboard/model.ts

@@ -238,6 +238,9 @@ export class DashboardModel {
     delete newPanel.repeatIteration;
     delete newPanel.repeatIteration;
     delete newPanel.repeatPanelId;
     delete newPanel.repeatPanelId;
     delete newPanel.scopedVars;
     delete newPanel.scopedVars;
+    if (newPanel.alert) {
+      delete newPanel.thresholds;
+    }
     delete newPanel.alert;
     delete newPanel.alert;
 
 
     row.addPanel(newPanel);
     row.addPanel(newPanel);

+ 0 - 1
public/app/plugins/datasource/graphite/query_ctrl.ts

@@ -184,7 +184,6 @@ export class GraphiteQueryCtrl extends QueryCtrl {
       altSegments.unshift(this.uiSegmentSrv.newSegment('*'));
       altSegments.unshift(this.uiSegmentSrv.newSegment('*'));
       return altSegments;
       return altSegments;
     }).catch(err => {
     }).catch(err => {
-      appEvents.emit('alert-error', ['Error', err]);
       return [];
       return [];
     });
     });
   }
   }

+ 106 - 0
public/app/plugins/datasource/mysql/mode-sql.js

@@ -0,0 +1,106 @@
+// jshint ignore: start
+// jscs: disable
+
+ace.define("ace/mode/sql_highlight_rules",["require","exports","module","ace/lib/oop","ace/mode/text_highlight_rules"], function(require, exports, module) {
+"use strict";
+
+var oop = require("../lib/oop");
+var TextHighlightRules = require("./text_highlight_rules").TextHighlightRules;
+
+var SqlHighlightRules = function() {
+
+    var keywords = (
+        "select|insert|update|delete|from|where|and|or|group|by|order|limit|offset|having|as|case|" +
+        "when|else|end|type|left|right|join|on|outer|desc|asc|union|create|table|primary|key|if|" +
+        "foreign|not|references|default|null|inner|cross|natural|database|drop|grant"
+    );
+
+    var builtinConstants = (
+        "true|false"
+    );
+
+    var builtinFunctions = (
+        "avg|count|first|last|max|min|sum|ucase|lcase|mid|len|round|rank|now|format|" +
+        "coalesce|ifnull|isnull|nvl"
+    );
+
+    var dataTypes = (
+        "int|numeric|decimal|date|varchar|char|bigint|float|double|bit|binary|text|set|timestamp|" +
+        "money|real|number|integer"
+    );
+
+    var keywordMapper = this.createKeywordMapper({
+        "support.function": builtinFunctions,
+        "keyword": keywords,
+        "constant.language": builtinConstants,
+        "storage.type": dataTypes
+    }, "identifier", true);
+
+    this.$rules = {
+        "start" : [ {
+            token : "comment",
+            regex : "--.*$"
+        },  {
+            token : "comment",
+            start : "/\\*",
+            end : "\\*/"
+        }, {
+            token : "string",           // " string
+            regex : '".*?"'
+        }, {
+            token : "string",           // ' string
+            regex : "'.*?'"
+        }, {
+            token : "string",           // ` string (apache drill)
+            regex : "`.*?`"
+        }, {
+            token : "constant.numeric", // float
+            regex : "[+-]?\\d+(?:(?:\\.\\d*)?(?:[eE][+-]?\\d+)?)?\\b"
+        }, {
+            token : keywordMapper,
+            regex : "[a-zA-Z_$][a-zA-Z0-9_$]*\\b"
+        }, {
+            token : "keyword.operator",
+            regex : "\\+|\\-|\\/|\\/\\/|%|<@>|@>|<@|&|\\^|~|<|>|<=|=>|==|!=|<>|="
+        }, {
+            token : "paren.lparen",
+            regex : "[\\(]"
+        }, {
+            token : "paren.rparen",
+            regex : "[\\)]"
+        }, {
+            token : "text",
+            regex : "\\s+"
+        } ]
+    };
+    this.normalizeRules();
+};
+
+oop.inherits(SqlHighlightRules, TextHighlightRules);
+
+exports.SqlHighlightRules = SqlHighlightRules;
+});
+
+ace.define("ace/mode/sql",["require","exports","module","ace/lib/oop","ace/mode/text","ace/mode/sql_highlight_rules"], function(require, exports, module) {
+"use strict";
+
+var oop = require("../lib/oop");
+var TextMode = require("./text").Mode;
+var SqlHighlightRules = require("./sql_highlight_rules").SqlHighlightRules;
+
+var Mode = function() {
+    this.HighlightRules = SqlHighlightRules;
+    this.$behaviour = this.$defaultBehaviour;
+};
+oop.inherits(Mode, TextMode);
+
+(function() {
+
+    this.lineCommentStart = "--";
+
+    this.$id = "ace/mode/sql";
+}).call(Mode.prototype);
+
+exports.Mode = Mode;
+
+});

+ 1 - 1
public/app/plugins/datasource/mysql/partials/query.editor.html

@@ -1,7 +1,7 @@
 <query-editor-row query-ctrl="ctrl" can-collapse="false">
 <query-editor-row query-ctrl="ctrl" can-collapse="false">
 	<div class="gf-form-inline">
 	<div class="gf-form-inline">
 		<div class="gf-form gf-form--grow">
 		<div class="gf-form gf-form--grow">
-			<code-editor content="ctrl.target.rawSql" on-change="ctrl.panelCtrl.refresh()" data-mode="sql">
+			<code-editor content="ctrl.target.rawSql" datasource="ctrl.datasource" on-change="ctrl.panelCtrl.refresh()" data-mode="sql">
 			</code-editor>
 			</code-editor>
 		</div>
 		</div>
 	</div>
 	</div>

+ 202 - 0
vendor/cloud.google.com/go/LICENSE

@@ -0,0 +1,202 @@
+
+                                 Apache License
+                           Version 2.0, January 2004
+                        http://www.apache.org/licenses/
+
+   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+   1. Definitions.
+
+      "License" shall mean the terms and conditions for use, reproduction,
+      and distribution as defined by Sections 1 through 9 of this document.
+
+      "Licensor" shall mean the copyright owner or entity authorized by
+      the copyright owner that is granting the License.
+
+      "Legal Entity" shall mean the union of the acting entity and all
+      other entities that control, are controlled by, or are under common
+      control with that entity. For the purposes of this definition,
+      "control" means (i) the power, direct or indirect, to cause the
+      direction or management of such entity, whether by contract or
+      otherwise, or (ii) ownership of fifty percent (50%) or more of the
+      outstanding shares, or (iii) beneficial ownership of such entity.
+
+      "You" (or "Your") shall mean an individual or Legal Entity
+      exercising permissions granted by this License.
+
+      "Source" form shall mean the preferred form for making modifications,
+      including but not limited to software source code, documentation
+      source, and configuration files.
+
+      "Object" form shall mean any form resulting from mechanical
+      transformation or translation of a Source form, including but
+      not limited to compiled object code, generated documentation,
+      and conversions to other media types.
+
+      "Work" shall mean the work of authorship, whether in Source or
+      Object form, made available under the License, as indicated by a
+      copyright notice that is included in or attached to the work
+      (an example is provided in the Appendix below).
+
+      "Derivative Works" shall mean any work, whether in Source or Object
+      form, that is based on (or derived from) the Work and for which the
+      editorial revisions, annotations, elaborations, or other modifications
+      represent, as a whole, an original work of authorship. For the purposes
+      of this License, Derivative Works shall not include works that remain
+      separable from, or merely link (or bind by name) to the interfaces of,
+      the Work and Derivative Works thereof.
+
+      "Contribution" shall mean any work of authorship, including
+      the original version of the Work and any modifications or additions
+      to that Work or Derivative Works thereof, that is intentionally
+      submitted to Licensor for inclusion in the Work by the copyright owner
+      or by an individual or Legal Entity authorized to submit on behalf of
+      the copyright owner. For the purposes of this definition, "submitted"
+      means any form of electronic, verbal, or written communication sent
+      to the Licensor or its representatives, including but not limited to
+      communication on electronic mailing lists, source code control systems,
+      and issue tracking systems that are managed by, or on behalf of, the
+      Licensor for the purpose of discussing and improving the Work, but
+      excluding communication that is conspicuously marked or otherwise
+      designated in writing by the copyright owner as "Not a Contribution."
+
+      "Contributor" shall mean Licensor and any individual or Legal Entity
+      on behalf of whom a Contribution has been received by Licensor and
+      subsequently incorporated within the Work.
+
+   2. Grant of Copyright License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      copyright license to reproduce, prepare Derivative Works of,
+      publicly display, publicly perform, sublicense, and distribute the
+      Work and such Derivative Works in Source or Object form.
+
+   3. Grant of Patent License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      (except as stated in this section) patent license to make, have made,
+      use, offer to sell, sell, import, and otherwise transfer the Work,
+      where such license applies only to those patent claims licensable
+      by such Contributor that are necessarily infringed by their
+      Contribution(s) alone or by combination of their Contribution(s)
+      with the Work to which such Contribution(s) was submitted. If You
+      institute patent litigation against any entity (including a
+      cross-claim or counterclaim in a lawsuit) alleging that the Work
+      or a Contribution incorporated within the Work constitutes direct
+      or contributory patent infringement, then any patent licenses
+      granted to You under this License for that Work shall terminate
+      as of the date such litigation is filed.
+
+   4. Redistribution. You may reproduce and distribute copies of the
+      Work or Derivative Works thereof in any medium, with or without
+      modifications, and in Source or Object form, provided that You
+      meet the following conditions:
+
+      (a) You must give any other recipients of the Work or
+          Derivative Works a copy of this License; and
+
+      (b) You must cause any modified files to carry prominent notices
+          stating that You changed the files; and
+
+      (c) You must retain, in the Source form of any Derivative Works
+          that You distribute, all copyright, patent, trademark, and
+          attribution notices from the Source form of the Work,
+          excluding those notices that do not pertain to any part of
+          the Derivative Works; and
+
+      (d) If the Work includes a "NOTICE" text file as part of its
+          distribution, then any Derivative Works that You distribute must
+          include a readable copy of the attribution notices contained
+          within such NOTICE file, excluding those notices that do not
+          pertain to any part of the Derivative Works, in at least one
+          of the following places: within a NOTICE text file distributed
+          as part of the Derivative Works; within the Source form or
+          documentation, if provided along with the Derivative Works; or,
+          within a display generated by the Derivative Works, if and
+          wherever such third-party notices normally appear. The contents
+          of the NOTICE file are for informational purposes only and
+          do not modify the License. You may add Your own attribution
+          notices within Derivative Works that You distribute, alongside
+          or as an addendum to the NOTICE text from the Work, provided
+          that such additional attribution notices cannot be construed
+          as modifying the License.
+
+      You may add Your own copyright statement to Your modifications and
+      may provide additional or different license terms and conditions
+      for use, reproduction, or distribution of Your modifications, or
+      for any such Derivative Works as a whole, provided Your use,
+      reproduction, and distribution of the Work otherwise complies with
+      the conditions stated in this License.
+
+   5. Submission of Contributions. Unless You explicitly state otherwise,
+      any Contribution intentionally submitted for inclusion in the Work
+      by You to the Licensor shall be under the terms and conditions of
+      this License, without any additional terms or conditions.
+      Notwithstanding the above, nothing herein shall supersede or modify
+      the terms of any separate license agreement you may have executed
+      with Licensor regarding such Contributions.
+
+   6. Trademarks. This License does not grant permission to use the trade
+      names, trademarks, service marks, or product names of the Licensor,
+      except as required for reasonable and customary use in describing the
+      origin of the Work and reproducing the content of the NOTICE file.
+
+   7. Disclaimer of Warranty. Unless required by applicable law or
+      agreed to in writing, Licensor provides the Work (and each
+      Contributor provides its Contributions) on an "AS IS" BASIS,
+      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+      implied, including, without limitation, any warranties or conditions
+      of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+      PARTICULAR PURPOSE. You are solely responsible for determining the
+      appropriateness of using or redistributing the Work and assume any
+      risks associated with Your exercise of permissions under this License.
+
+   8. Limitation of Liability. In no event and under no legal theory,
+      whether in tort (including negligence), contract, or otherwise,
+      unless required by applicable law (such as deliberate and grossly
+      negligent acts) or agreed to in writing, shall any Contributor be
+      liable to You for damages, including any direct, indirect, special,
+      incidental, or consequential damages of any character arising as a
+      result of this License or out of the use or inability to use the
+      Work (including but not limited to damages for loss of goodwill,
+      work stoppage, computer failure or malfunction, or any and all
+      other commercial damages or losses), even if such Contributor
+      has been advised of the possibility of such damages.
+
+   9. Accepting Warranty or Additional Liability. While redistributing
+      the Work or Derivative Works thereof, You may choose to offer,
+      and charge a fee for, acceptance of support, warranty, indemnity,
+      or other liability obligations and/or rights consistent with this
+      License. However, in accepting such obligations, You may act only
+      on Your own behalf and on Your sole responsibility, not on behalf
+      of any other Contributor, and only if You agree to indemnify,
+      defend, and hold each Contributor harmless for any liability
+      incurred by, or claims asserted against, such Contributor by reason
+      of your accepting any such warranty or additional liability.
+
+   END OF TERMS AND CONDITIONS
+
+   APPENDIX: How to apply the Apache License to your work.
+
+      To apply the Apache License to your work, attach the following
+      boilerplate notice, with the fields enclosed by brackets "[]"
+      replaced with your own identifying information. (Don't include
+      the brackets!)  The text should be enclosed in the appropriate
+      comment syntax for the file format. We also recommend that a
+      file or class name and description of purpose be included on the
+      same "printed page" as the copyright notice for easier
+      identification within third-party archives.
+
+   Copyright 2014 Google Inc.
+
+   Licensed under the Apache License, Version 2.0 (the "License");
+   you may not use this file except in compliance with the License.
+   You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.

+ 437 - 0
vendor/cloud.google.com/go/compute/metadata/metadata.go

@@ -0,0 +1,437 @@
+// Copyright 2014 Google Inc. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package metadata provides access to Google Compute Engine (GCE)
+// metadata and API service accounts.
+//
+// This package is a wrapper around the GCE metadata service,
+// as documented at https://developers.google.com/compute/docs/metadata.
+package metadata // import "cloud.google.com/go/compute/metadata"
+
+import (
+	"encoding/json"
+	"fmt"
+	"io/ioutil"
+	"net"
+	"net/http"
+	"net/url"
+	"os"
+	"runtime"
+	"strings"
+	"sync"
+	"time"
+
+	"golang.org/x/net/context"
+	"golang.org/x/net/context/ctxhttp"
+)
+
+const (
+	// metadataIP is the documented metadata server IP address.
+	metadataIP = "169.254.169.254"
+
+	// metadataHostEnv is the environment variable specifying the
+	// GCE metadata hostname.  If empty, the default value of
+	// metadataIP ("169.254.169.254") is used instead.
+	// This is variable name is not defined by any spec, as far as
+	// I know; it was made up for the Go package.
+	metadataHostEnv = "GCE_METADATA_HOST"
+
+	userAgent = "gcloud-golang/0.1"
+)
+
+type cachedValue struct {
+	k    string
+	trim bool
+	mu   sync.Mutex
+	v    string
+}
+
+var (
+	projID  = &cachedValue{k: "project/project-id", trim: true}
+	projNum = &cachedValue{k: "project/numeric-project-id", trim: true}
+	instID  = &cachedValue{k: "instance/id", trim: true}
+)
+
+var (
+	metaClient = &http.Client{
+		Transport: &http.Transport{
+			Dial: (&net.Dialer{
+				Timeout:   2 * time.Second,
+				KeepAlive: 30 * time.Second,
+			}).Dial,
+			ResponseHeaderTimeout: 2 * time.Second,
+		},
+	}
+	subscribeClient = &http.Client{
+		Transport: &http.Transport{
+			Dial: (&net.Dialer{
+				Timeout:   2 * time.Second,
+				KeepAlive: 30 * time.Second,
+			}).Dial,
+		},
+	}
+)
+
+// NotDefinedError is returned when requested metadata is not defined.
+//
+// The underlying string is the suffix after "/computeMetadata/v1/".
+//
+// This error is not returned if the value is defined to be the empty
+// string.
+type NotDefinedError string
+
+func (suffix NotDefinedError) Error() string {
+	return fmt.Sprintf("metadata: GCE metadata %q not defined", string(suffix))
+}
+
+// Get returns a value from the metadata service.
+// The suffix is appended to "http://${GCE_METADATA_HOST}/computeMetadata/v1/".
+//
+// If the GCE_METADATA_HOST environment variable is not defined, a default of
+// 169.254.169.254 will be used instead.
+//
+// If the requested metadata is not defined, the returned error will
+// be of type NotDefinedError.
+func Get(suffix string) (string, error) {
+	val, _, err := getETag(metaClient, suffix)
+	return val, err
+}
+
+// getETag returns a value from the metadata service as well as the associated
+// ETag using the provided client. This func is otherwise equivalent to Get.
+func getETag(client *http.Client, suffix string) (value, etag string, err error) {
+	// Using a fixed IP makes it very difficult to spoof the metadata service in
+	// a container, which is an important use-case for local testing of cloud
+	// deployments. To enable spoofing of the metadata service, the environment
+	// variable GCE_METADATA_HOST is first inspected to decide where metadata
+	// requests shall go.
+	host := os.Getenv(metadataHostEnv)
+	if host == "" {
+		// Using 169.254.169.254 instead of "metadata" here because Go
+		// binaries built with the "netgo" tag and without cgo won't
+		// know the search suffix for "metadata" is
+		// ".google.internal", and this IP address is documented as
+		// being stable anyway.
+		host = metadataIP
+	}
+	url := "http://" + host + "/computeMetadata/v1/" + suffix
+	req, _ := http.NewRequest("GET", url, nil)
+	req.Header.Set("Metadata-Flavor", "Google")
+	req.Header.Set("User-Agent", userAgent)
+	res, err := client.Do(req)
+	if err != nil {
+		return "", "", err
+	}
+	defer res.Body.Close()
+	if res.StatusCode == http.StatusNotFound {
+		return "", "", NotDefinedError(suffix)
+	}
+	if res.StatusCode != 200 {
+		return "", "", fmt.Errorf("status code %d trying to fetch %s", res.StatusCode, url)
+	}
+	all, err := ioutil.ReadAll(res.Body)
+	if err != nil {
+		return "", "", err
+	}
+	return string(all), res.Header.Get("Etag"), nil
+}
+
+func getTrimmed(suffix string) (s string, err error) {
+	s, err = Get(suffix)
+	s = strings.TrimSpace(s)
+	return
+}
+
+func (c *cachedValue) get() (v string, err error) {
+	defer c.mu.Unlock()
+	c.mu.Lock()
+	if c.v != "" {
+		return c.v, nil
+	}
+	if c.trim {
+		v, err = getTrimmed(c.k)
+	} else {
+		v, err = Get(c.k)
+	}
+	if err == nil {
+		c.v = v
+	}
+	return
+}
+
+var (
+	onGCEOnce sync.Once
+	onGCE     bool
+)
+
+// OnGCE reports whether this process is running on Google Compute Engine.
+func OnGCE() bool {
+	onGCEOnce.Do(initOnGCE)
+	return onGCE
+}
+
+func initOnGCE() {
+	onGCE = testOnGCE()
+}
+
+func testOnGCE() bool {
+	// The user explicitly said they're on GCE, so trust them.
+	if os.Getenv(metadataHostEnv) != "" {
+		return true
+	}
+
+	ctx, cancel := context.WithCancel(context.Background())
+	defer cancel()
+
+	resc := make(chan bool, 2)
+
+	// Try two strategies in parallel.
+	// See https://github.com/GoogleCloudPlatform/google-cloud-go/issues/194
+	go func() {
+		req, _ := http.NewRequest("GET", "http://"+metadataIP, nil)
+		req.Header.Set("User-Agent", userAgent)
+		res, err := ctxhttp.Do(ctx, metaClient, req)
+		if err != nil {
+			resc <- false
+			return
+		}
+		defer res.Body.Close()
+		resc <- res.Header.Get("Metadata-Flavor") == "Google"
+	}()
+
+	go func() {
+		addrs, err := net.LookupHost("metadata.google.internal")
+		if err != nil || len(addrs) == 0 {
+			resc <- false
+			return
+		}
+		resc <- strsContains(addrs, metadataIP)
+	}()
+
+	tryHarder := systemInfoSuggestsGCE()
+	if tryHarder {
+		res := <-resc
+		if res {
+			// The first strategy succeeded, so let's use it.
+			return true
+		}
+		// Wait for either the DNS or metadata server probe to
+		// contradict the other one and say we are running on
+		// GCE. Give it a lot of time to do so, since the system
+		// info already suggests we're running on a GCE BIOS.
+		timer := time.NewTimer(5 * time.Second)
+		defer timer.Stop()
+		select {
+		case res = <-resc:
+			return res
+		case <-timer.C:
+			// Too slow. Who knows what this system is.
+			return false
+		}
+	}
+
+	// There's no hint from the system info that we're running on
+	// GCE, so use the first probe's result as truth, whether it's
+	// true or false. The goal here is to optimize for speed for
+	// users who are NOT running on GCE. We can't assume that
+	// either a DNS lookup or an HTTP request to a blackholed IP
+	// address is fast. Worst case this should return when the
+	// metaClient's Transport.ResponseHeaderTimeout or
+	// Transport.Dial.Timeout fires (in two seconds).
+	return <-resc
+}
+
+// systemInfoSuggestsGCE reports whether the local system (without
+// doing network requests) suggests that we're running on GCE. If this
+// returns true, testOnGCE tries a bit harder to reach its metadata
+// server.
+func systemInfoSuggestsGCE() bool {
+	if runtime.GOOS != "linux" {
+		// We don't have any non-Linux clues available, at least yet.
+		return false
+	}
+	slurp, _ := ioutil.ReadFile("/sys/class/dmi/id/product_name")
+	name := strings.TrimSpace(string(slurp))
+	return name == "Google" || name == "Google Compute Engine"
+}
+
+// Subscribe subscribes to a value from the metadata service.
+// The suffix is appended to "http://${GCE_METADATA_HOST}/computeMetadata/v1/".
+// The suffix may contain query parameters.
+//
+// Subscribe calls fn with the latest metadata value indicated by the provided
+// suffix. If the metadata value is deleted, fn is called with the empty string
+// and ok false. Subscribe blocks until fn returns a non-nil error or the value
+// is deleted. Subscribe returns the error value returned from the last call to
+// fn, which may be nil when ok == false.
+func Subscribe(suffix string, fn func(v string, ok bool) error) error {
+	const failedSubscribeSleep = time.Second * 5
+
+	// First check to see if the metadata value exists at all.
+	val, lastETag, err := getETag(subscribeClient, suffix)
+	if err != nil {
+		return err
+	}
+
+	if err := fn(val, true); err != nil {
+		return err
+	}
+
+	ok := true
+	if strings.ContainsRune(suffix, '?') {
+		suffix += "&wait_for_change=true&last_etag="
+	} else {
+		suffix += "?wait_for_change=true&last_etag="
+	}
+	for {
+		val, etag, err := getETag(subscribeClient, suffix+url.QueryEscape(lastETag))
+		if err != nil {
+			if _, deleted := err.(NotDefinedError); !deleted {
+				time.Sleep(failedSubscribeSleep)
+				continue // Retry on other errors.
+			}
+			ok = false
+		}
+		lastETag = etag
+
+		if err := fn(val, ok); err != nil || !ok {
+			return err
+		}
+	}
+}
+
+// ProjectID returns the current instance's project ID string.
+func ProjectID() (string, error) { return projID.get() }
+
+// NumericProjectID returns the current instance's numeric project ID.
+func NumericProjectID() (string, error) { return projNum.get() }
+
+// InternalIP returns the instance's primary internal IP address.
+func InternalIP() (string, error) {
+	return getTrimmed("instance/network-interfaces/0/ip")
+}
+
+// ExternalIP returns the instance's primary external (public) IP address.
+func ExternalIP() (string, error) {
+	return getTrimmed("instance/network-interfaces/0/access-configs/0/external-ip")
+}
+
+// Hostname returns the instance's hostname. This will be of the form
+// "<instanceID>.c.<projID>.internal".
+func Hostname() (string, error) {
+	return getTrimmed("instance/hostname")
+}
+
+// InstanceTags returns the list of user-defined instance tags,
+// assigned when initially creating a GCE instance.
+func InstanceTags() ([]string, error) {
+	var s []string
+	j, err := Get("instance/tags")
+	if err != nil {
+		return nil, err
+	}
+	if err := json.NewDecoder(strings.NewReader(j)).Decode(&s); err != nil {
+		return nil, err
+	}
+	return s, nil
+}
+
+// InstanceID returns the current VM's numeric instance ID.
+func InstanceID() (string, error) {
+	return instID.get()
+}
+
+// InstanceName returns the current VM's instance ID string.
+func InstanceName() (string, error) {
+	host, err := Hostname()
+	if err != nil {
+		return "", err
+	}
+	return strings.Split(host, ".")[0], nil
+}
+
+// Zone returns the current VM's zone, such as "us-central1-b".
+func Zone() (string, error) {
+	zone, err := getTrimmed("instance/zone")
+	// zone is of the form "projects/<projNum>/zones/<zoneName>".
+	if err != nil {
+		return "", err
+	}
+	return zone[strings.LastIndex(zone, "/")+1:], nil
+}
+
+// InstanceAttributes returns the list of user-defined attributes,
+// assigned when initially creating a GCE VM instance. The value of an
+// attribute can be obtained with InstanceAttributeValue.
+func InstanceAttributes() ([]string, error) { return lines("instance/attributes/") }
+
+// ProjectAttributes returns the list of user-defined attributes
+// applying to the project as a whole, not just this VM.  The value of
+// an attribute can be obtained with ProjectAttributeValue.
+func ProjectAttributes() ([]string, error) { return lines("project/attributes/") }
+
+func lines(suffix string) ([]string, error) {
+	j, err := Get(suffix)
+	if err != nil {
+		return nil, err
+	}
+	s := strings.Split(strings.TrimSpace(j), "\n")
+	for i := range s {
+		s[i] = strings.TrimSpace(s[i])
+	}
+	return s, nil
+}
+
+// InstanceAttributeValue returns the value of the provided VM
+// instance attribute.
+//
+// If the requested attribute is not defined, the returned error will
+// be of type NotDefinedError.
+//
+// InstanceAttributeValue may return ("", nil) if the attribute was
+// defined to be the empty string.
+func InstanceAttributeValue(attr string) (string, error) {
+	return Get("instance/attributes/" + attr)
+}
+
+// ProjectAttributeValue returns the value of the provided
+// project attribute.
+//
+// If the requested attribute is not defined, the returned error will
+// be of type NotDefinedError.
+//
+// ProjectAttributeValue may return ("", nil) if the attribute was
+// defined to be the empty string.
+func ProjectAttributeValue(attr string) (string, error) {
+	return Get("project/attributes/" + attr)
+}
+
+// Scopes returns the service account scopes for the given account.
+// The account may be empty or the string "default" to use the instance's
+// main account.
+func Scopes(serviceAccount string) ([]string, error) {
+	if serviceAccount == "" {
+		serviceAccount = "default"
+	}
+	return lines("instance/service-accounts/" + serviceAccount + "/scopes")
+}
+
+func strsContains(ss []string, s string) bool {
+	for _, v := range ss {
+		if v == s {
+			return true
+		}
+	}
+	return false
+}

+ 256 - 0
vendor/cloud.google.com/go/iam/iam.go

@@ -0,0 +1,256 @@
+// Copyright 2016 Google Inc. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package iam supports the resource-specific operations of Google Cloud
+// IAM (Identity and Access Management) for the Google Cloud Libraries.
+// See https://cloud.google.com/iam for more about IAM.
+//
+// Users of the Google Cloud Libraries will typically not use this package
+// directly. Instead they will begin with some resource that supports IAM, like
+// a pubsub topic, and call its IAM method to get a Handle for that resource.
+package iam
+
+import (
+	"golang.org/x/net/context"
+	pb "google.golang.org/genproto/googleapis/iam/v1"
+	"google.golang.org/grpc"
+)
+
+// client abstracts the IAMPolicy API to allow multiple implementations.
+type client interface {
+	Get(ctx context.Context, resource string) (*pb.Policy, error)
+	Set(ctx context.Context, resource string, p *pb.Policy) error
+	Test(ctx context.Context, resource string, perms []string) ([]string, error)
+}
+
+// grpcClient implements client for the standard gRPC-based IAMPolicy service.
+type grpcClient struct {
+	c pb.IAMPolicyClient
+}
+
+func (g *grpcClient) Get(ctx context.Context, resource string) (*pb.Policy, error) {
+	proto, err := g.c.GetIamPolicy(ctx, &pb.GetIamPolicyRequest{Resource: resource})
+	if err != nil {
+		return nil, err
+	}
+	return proto, nil
+}
+func (g *grpcClient) Set(ctx context.Context, resource string, p *pb.Policy) error {
+	_, err := g.c.SetIamPolicy(ctx, &pb.SetIamPolicyRequest{
+		Resource: resource,
+		Policy:   p,
+	})
+	return err
+}
+
+func (g *grpcClient) Test(ctx context.Context, resource string, perms []string) ([]string, error) {
+	res, err := g.c.TestIamPermissions(ctx, &pb.TestIamPermissionsRequest{
+		Resource:    resource,
+		Permissions: perms,
+	})
+	if err != nil {
+		return nil, err
+	}
+	return res.Permissions, nil
+}
+
+// A Handle provides IAM operations for a resource.
+type Handle struct {
+	c        client
+	resource string
+}
+
+// InternalNewHandle is for use by the Google Cloud Libraries only.
+//
+// InternalNewHandle returns a Handle for resource.
+// The conn parameter refers to a server that must support the IAMPolicy service.
+func InternalNewHandle(conn *grpc.ClientConn, resource string) *Handle {
+	return InternalNewHandleClient(&grpcClient{c: pb.NewIAMPolicyClient(conn)}, resource)
+}
+
+// InternalNewHandleClient is for use by the Google Cloud Libraries only.
+//
+// InternalNewHandleClient returns a Handle for resource using the given
+// client implementation.
+func InternalNewHandleClient(c client, resource string) *Handle {
+	return &Handle{
+		c:        c,
+		resource: resource,
+	}
+}
+
+// Policy retrieves the IAM policy for the resource.
+func (h *Handle) Policy(ctx context.Context) (*Policy, error) {
+	proto, err := h.c.Get(ctx, h.resource)
+	if err != nil {
+		return nil, err
+	}
+	return &Policy{InternalProto: proto}, nil
+}
+
+// SetPolicy replaces the resource's current policy with the supplied Policy.
+//
+// If policy was created from a prior call to Get, then the modification will
+// only succeed if the policy has not changed since the Get.
+func (h *Handle) SetPolicy(ctx context.Context, policy *Policy) error {
+	return h.c.Set(ctx, h.resource, policy.InternalProto)
+}
+
+// TestPermissions returns the subset of permissions that the caller has on the resource.
+func (h *Handle) TestPermissions(ctx context.Context, permissions []string) ([]string, error) {
+	return h.c.Test(ctx, h.resource, permissions)
+}
+
+// A RoleName is a name representing a collection of permissions.
+type RoleName string
+
+// Common role names.
+const (
+	Owner  RoleName = "roles/owner"
+	Editor RoleName = "roles/editor"
+	Viewer RoleName = "roles/viewer"
+)
+
+const (
+	// AllUsers is a special member that denotes all users, even unauthenticated ones.
+	AllUsers = "allUsers"
+
+	// AllAuthenticatedUsers is a special member that denotes all authenticated users.
+	AllAuthenticatedUsers = "allAuthenticatedUsers"
+)
+
+// A Policy is a list of Bindings representing roles
+// granted to members.
+//
+// The zero Policy is a valid policy with no bindings.
+type Policy struct {
+	// TODO(jba): when type aliases are available, put Policy into an internal package
+	// and provide an exported alias here.
+
+	// This field is exported for use by the Google Cloud Libraries only.
+	// It may become unexported in a future release.
+	InternalProto *pb.Policy
+}
+
+// Members returns the list of members with the supplied role.
+// The return value should not be modified. Use Add and Remove
+// to modify the members of a role.
+func (p *Policy) Members(r RoleName) []string {
+	b := p.binding(r)
+	if b == nil {
+		return nil
+	}
+	return b.Members
+}
+
+// HasRole reports whether member has role r.
+func (p *Policy) HasRole(member string, r RoleName) bool {
+	return memberIndex(member, p.binding(r)) >= 0
+}
+
+// Add adds member member to role r if it is not already present.
+// A new binding is created if there is no binding for the role.
+func (p *Policy) Add(member string, r RoleName) {
+	b := p.binding(r)
+	if b == nil {
+		if p.InternalProto == nil {
+			p.InternalProto = &pb.Policy{}
+		}
+		p.InternalProto.Bindings = append(p.InternalProto.Bindings, &pb.Binding{
+			Role:    string(r),
+			Members: []string{member},
+		})
+		return
+	}
+	if memberIndex(member, b) < 0 {
+		b.Members = append(b.Members, member)
+		return
+	}
+}
+
+// Remove removes member from role r if it is present.
+func (p *Policy) Remove(member string, r RoleName) {
+	bi := p.bindingIndex(r)
+	if bi < 0 {
+		return
+	}
+	bindings := p.InternalProto.Bindings
+	b := bindings[bi]
+	mi := memberIndex(member, b)
+	if mi < 0 {
+		return
+	}
+	// Order doesn't matter for bindings or members, so to remove, move the last item
+	// into the removed spot and shrink the slice.
+	if len(b.Members) == 1 {
+		// Remove binding.
+		last := len(bindings) - 1
+		bindings[bi] = bindings[last]
+		bindings[last] = nil
+		p.InternalProto.Bindings = bindings[:last]
+		return
+	}
+	// Remove member.
+	// TODO(jba): worry about multiple copies of m?
+	last := len(b.Members) - 1
+	b.Members[mi] = b.Members[last]
+	b.Members[last] = ""
+	b.Members = b.Members[:last]
+}
+
+// Roles returns the names of all the roles that appear in the Policy.
+func (p *Policy) Roles() []RoleName {
+	if p.InternalProto == nil {
+		return nil
+	}
+	var rns []RoleName
+	for _, b := range p.InternalProto.Bindings {
+		rns = append(rns, RoleName(b.Role))
+	}
+	return rns
+}
+
+// binding returns the Binding for the suppied role, or nil if there isn't one.
+func (p *Policy) binding(r RoleName) *pb.Binding {
+	i := p.bindingIndex(r)
+	if i < 0 {
+		return nil
+	}
+	return p.InternalProto.Bindings[i]
+}
+
+func (p *Policy) bindingIndex(r RoleName) int {
+	if p.InternalProto == nil {
+		return -1
+	}
+	for i, b := range p.InternalProto.Bindings {
+		if b.Role == string(r) {
+			return i
+		}
+	}
+	return -1
+}
+
+// memberIndex returns the index of m in b's Members, or -1 if not found.
+func memberIndex(m string, b *pb.Binding) int {
+	if b == nil {
+		return -1
+	}
+	for i, mm := range b.Members {
+		if mm == m {
+			return i
+		}
+	}
+	return -1
+}

+ 94 - 0
vendor/cloud.google.com/go/internal/optional/optional.go

@@ -0,0 +1,94 @@
+// Copyright 2016 Google Inc. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package optional provides versions of primitive types that can
+// be nil. These are useful in methods that update some of an API object's
+// fields.
+package optional
+
+import (
+	"fmt"
+	"strings"
+)
+
+type (
+	// Bool is either a bool or nil.
+	Bool interface{}
+
+	// String is either a string or nil.
+	String interface{}
+
+	// Int is either an int or nil.
+	Int interface{}
+
+	// Uint is either a uint or nil.
+	Uint interface{}
+
+	// Float64 is either a float64 or nil.
+	Float64 interface{}
+)
+
+// ToBool returns its argument as a bool.
+// It panics if its argument is nil or not a bool.
+func ToBool(v Bool) bool {
+	x, ok := v.(bool)
+	if !ok {
+		doPanic("Bool", v)
+	}
+	return x
+}
+
+// ToString returns its argument as a string.
+// It panics if its argument is nil or not a string.
+func ToString(v String) string {
+	x, ok := v.(string)
+	if !ok {
+		doPanic("String", v)
+	}
+	return x
+}
+
+// ToInt returns its argument as an int.
+// It panics if its argument is nil or not an int.
+func ToInt(v Int) int {
+	x, ok := v.(int)
+	if !ok {
+		doPanic("Int", v)
+	}
+	return x
+}
+
+// ToUint returns its argument as a uint.
+// It panics if its argument is nil or not a uint.
+func ToUint(v Uint) uint {
+	x, ok := v.(uint)
+	if !ok {
+		doPanic("Uint", v)
+	}
+	return x
+}
+
+// ToFloat64 returns its argument as a float64.
+// It panics if its argument is nil or not a float64.
+func ToFloat64(v Float64) float64 {
+	x, ok := v.(float64)
+	if !ok {
+		doPanic("Float64", v)
+	}
+	return x
+}
+
+func doPanic(capType string, v interface{}) {
+	panic(fmt.Sprintf("optional.%s value should be %s, got %T", capType, strings.ToLower(capType), v))
+}

+ 56 - 0
vendor/cloud.google.com/go/internal/retry.go

@@ -0,0 +1,56 @@
+// Copyright 2016 Google Inc. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package internal
+
+import (
+	"fmt"
+	"time"
+
+	gax "github.com/googleapis/gax-go"
+
+	"golang.org/x/net/context"
+)
+
+// Retry calls the supplied function f repeatedly according to the provided
+// backoff parameters. It returns when one of the following occurs:
+// When f's first return value is true, Retry immediately returns with f's second
+// return value.
+// When the provided context is done, Retry returns with an error that
+// includes both ctx.Error() and the last error returned by f.
+func Retry(ctx context.Context, bo gax.Backoff, f func() (stop bool, err error)) error {
+	return retry(ctx, bo, f, gax.Sleep)
+}
+
+func retry(ctx context.Context, bo gax.Backoff, f func() (stop bool, err error),
+	sleep func(context.Context, time.Duration) error) error {
+	var lastErr error
+	for {
+		stop, err := f()
+		if stop {
+			return err
+		}
+		// Remember the last "real" error from f.
+		if err != nil && err != context.Canceled && err != context.DeadlineExceeded {
+			lastErr = err
+		}
+		p := bo.Pause()
+		if cerr := sleep(ctx, p); cerr != nil {
+			if lastErr != nil {
+				return fmt.Errorf("%v; last function err: %v", cerr, lastErr)
+			}
+			return cerr
+		}
+	}
+}

+ 6 - 0
vendor/cloud.google.com/go/internal/version/update_version.sh

@@ -0,0 +1,6 @@
+#!/bin/bash
+
+today=$(date +%Y%m%d)
+
+sed -i -r -e 's/const Repo = "([0-9]{8})"/const Repo = "'$today'"/' $GOFILE
+

+ 71 - 0
vendor/cloud.google.com/go/internal/version/version.go

@@ -0,0 +1,71 @@
+// Copyright 2016 Google Inc. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//go:generate ./update_version.sh
+
+// Package version contains version information for Google Cloud Client
+// Libraries for Go, as reported in request headers.
+package version
+
+import (
+	"runtime"
+	"strings"
+	"unicode"
+)
+
+// Repo is the current version of the client libraries in this
+// repo. It should be a date in YYYYMMDD format.
+const Repo = "20170621"
+
+// Go returns the Go runtime version. The returned string
+// has no whitespace.
+func Go() string {
+	return goVersion
+}
+
+var goVersion = goVer(runtime.Version())
+
+const develPrefix = "devel +"
+
+func goVer(s string) string {
+	if strings.HasPrefix(s, develPrefix) {
+		s = s[len(develPrefix):]
+		if p := strings.IndexFunc(s, unicode.IsSpace); p >= 0 {
+			s = s[:p]
+		}
+		return s
+	}
+
+	if strings.HasPrefix(s, "go1") {
+		s = s[2:]
+		var prerelease string
+		if p := strings.IndexFunc(s, notSemverRune); p >= 0 {
+			s, prerelease = s[:p], s[p:]
+		}
+		if strings.HasSuffix(s, ".") {
+			s += "0"
+		} else if strings.Count(s, ".") < 2 {
+			s += ".0"
+		}
+		if prerelease != "" {
+			s += "-" + prerelease
+		}
+		return s
+	}
+	return ""
+}
+
+func notSemverRune(r rune) bool {
+	return strings.IndexRune("0123456789.", r) < 0
+}

+ 252 - 0
vendor/cloud.google.com/go/storage/acl.go

@@ -0,0 +1,252 @@
+// Copyright 2014 Google Inc. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package storage
+
+import (
+	"fmt"
+	"net/http"
+	"reflect"
+
+	"golang.org/x/net/context"
+	"google.golang.org/api/googleapi"
+	raw "google.golang.org/api/storage/v1"
+)
+
+// ACLRole is the level of access to grant.
+type ACLRole string
+
+const (
+	RoleOwner  ACLRole = "OWNER"
+	RoleReader ACLRole = "READER"
+	RoleWriter ACLRole = "WRITER"
+)
+
+// ACLEntity refers to a user or group.
+// They are sometimes referred to as grantees.
+//
+// It could be in the form of:
+// "user-<userId>", "user-<email>", "group-<groupId>", "group-<email>",
+// "domain-<domain>" and "project-team-<projectId>".
+//
+// Or one of the predefined constants: AllUsers, AllAuthenticatedUsers.
+type ACLEntity string
+
+const (
+	AllUsers              ACLEntity = "allUsers"
+	AllAuthenticatedUsers ACLEntity = "allAuthenticatedUsers"
+)
+
+// ACLRule represents a grant for a role to an entity (user, group or team) for a Google Cloud Storage object or bucket.
+type ACLRule struct {
+	Entity ACLEntity
+	Role   ACLRole
+}
+
+// ACLHandle provides operations on an access control list for a Google Cloud Storage bucket or object.
+type ACLHandle struct {
+	c           *Client
+	bucket      string
+	object      string
+	isDefault   bool
+	userProject string // for requester-pays buckets
+}
+
+// Delete permanently deletes the ACL entry for the given entity.
+func (a *ACLHandle) Delete(ctx context.Context, entity ACLEntity) error {
+	if a.object != "" {
+		return a.objectDelete(ctx, entity)
+	}
+	if a.isDefault {
+		return a.bucketDefaultDelete(ctx, entity)
+	}
+	return a.bucketDelete(ctx, entity)
+}
+
+// Set sets the permission level for the given entity.
+func (a *ACLHandle) Set(ctx context.Context, entity ACLEntity, role ACLRole) error {
+	if a.object != "" {
+		return a.objectSet(ctx, entity, role, false)
+	}
+	if a.isDefault {
+		return a.objectSet(ctx, entity, role, true)
+	}
+	return a.bucketSet(ctx, entity, role)
+}
+
+// List retrieves ACL entries.
+func (a *ACLHandle) List(ctx context.Context) ([]ACLRule, error) {
+	if a.object != "" {
+		return a.objectList(ctx)
+	}
+	if a.isDefault {
+		return a.bucketDefaultList(ctx)
+	}
+	return a.bucketList(ctx)
+}
+
+func (a *ACLHandle) bucketDefaultList(ctx context.Context) ([]ACLRule, error) {
+	var acls *raw.ObjectAccessControls
+	var err error
+	err = runWithRetry(ctx, func() error {
+		req := a.c.raw.DefaultObjectAccessControls.List(a.bucket)
+		a.configureCall(req, ctx)
+		acls, err = req.Do()
+		return err
+	})
+	if err != nil {
+		return nil, fmt.Errorf("storage: error listing default object ACL for bucket %q: %v", a.bucket, err)
+	}
+	return toACLRules(acls.Items), nil
+}
+
+func (a *ACLHandle) bucketDefaultDelete(ctx context.Context, entity ACLEntity) error {
+	err := runWithRetry(ctx, func() error {
+		req := a.c.raw.DefaultObjectAccessControls.Delete(a.bucket, string(entity))
+		a.configureCall(req, ctx)
+		return req.Do()
+	})
+	if err != nil {
+		return fmt.Errorf("storage: error deleting default ACL entry for bucket %q, entity %q: %v", a.bucket, entity, err)
+	}
+	return nil
+}
+
+func (a *ACLHandle) bucketList(ctx context.Context) ([]ACLRule, error) {
+	var acls *raw.BucketAccessControls
+	var err error
+	err = runWithRetry(ctx, func() error {
+		req := a.c.raw.BucketAccessControls.List(a.bucket)
+		a.configureCall(req, ctx)
+		acls, err = req.Do()
+		return err
+	})
+	if err != nil {
+		return nil, fmt.Errorf("storage: error listing bucket ACL for bucket %q: %v", a.bucket, err)
+	}
+	r := make([]ACLRule, len(acls.Items))
+	for i, v := range acls.Items {
+		r[i].Entity = ACLEntity(v.Entity)
+		r[i].Role = ACLRole(v.Role)
+	}
+	return r, nil
+}
+
+func (a *ACLHandle) bucketSet(ctx context.Context, entity ACLEntity, role ACLRole) error {
+	acl := &raw.BucketAccessControl{
+		Bucket: a.bucket,
+		Entity: string(entity),
+		Role:   string(role),
+	}
+	err := runWithRetry(ctx, func() error {
+		req := a.c.raw.BucketAccessControls.Update(a.bucket, string(entity), acl)
+		a.configureCall(req, ctx)
+		_, err := req.Do()
+		return err
+	})
+	if err != nil {
+		return fmt.Errorf("storage: error updating bucket ACL entry for bucket %q, entity %q: %v", a.bucket, entity, err)
+	}
+	return nil
+}
+
+func (a *ACLHandle) bucketDelete(ctx context.Context, entity ACLEntity) error {
+	err := runWithRetry(ctx, func() error {
+		req := a.c.raw.BucketAccessControls.Delete(a.bucket, string(entity))
+		a.configureCall(req, ctx)
+		return req.Do()
+	})
+	if err != nil {
+		return fmt.Errorf("storage: error deleting bucket ACL entry for bucket %q, entity %q: %v", a.bucket, entity, err)
+	}
+	return nil
+}
+
+func (a *ACLHandle) objectList(ctx context.Context) ([]ACLRule, error) {
+	var acls *raw.ObjectAccessControls
+	var err error
+	err = runWithRetry(ctx, func() error {
+		req := a.c.raw.ObjectAccessControls.List(a.bucket, a.object)
+		a.configureCall(req, ctx)
+		acls, err = req.Do()
+		return err
+	})
+	if err != nil {
+		return nil, fmt.Errorf("storage: error listing object ACL for bucket %q, file %q: %v", a.bucket, a.object, err)
+	}
+	return toACLRules(acls.Items), nil
+}
+
+func (a *ACLHandle) objectSet(ctx context.Context, entity ACLEntity, role ACLRole, isBucketDefault bool) error {
+	type setRequest interface {
+		Do(opts ...googleapi.CallOption) (*raw.ObjectAccessControl, error)
+		Header() http.Header
+	}
+
+	acl := &raw.ObjectAccessControl{
+		Bucket: a.bucket,
+		Entity: string(entity),
+		Role:   string(role),
+	}
+	var req setRequest
+	if isBucketDefault {
+		req = a.c.raw.DefaultObjectAccessControls.Update(a.bucket, string(entity), acl)
+	} else {
+		req = a.c.raw.ObjectAccessControls.Update(a.bucket, a.object, string(entity), acl)
+	}
+	a.configureCall(req, ctx)
+	err := runWithRetry(ctx, func() error {
+		_, err := req.Do()
+		return err
+	})
+	if err != nil {
+		if isBucketDefault {
+			return fmt.Errorf("storage: error updating default ACL entry for bucket %q, entity %q: %v", a.bucket, entity, err)
+		} else {
+			return fmt.Errorf("storage: error updating object ACL entry for bucket %q, object %q, entity %q: %v", a.bucket, a.object, entity, err)
+		}
+	}
+	return nil
+}
+
+func (a *ACLHandle) objectDelete(ctx context.Context, entity ACLEntity) error {
+	err := runWithRetry(ctx, func() error {
+		req := a.c.raw.ObjectAccessControls.Delete(a.bucket, a.object, string(entity))
+		a.configureCall(req, ctx)
+		return req.Do()
+	})
+	if err != nil {
+		return fmt.Errorf("storage: error deleting object ACL entry for bucket %q, file %q, entity %q: %v", a.bucket, a.object, entity, err)
+	}
+	return nil
+}
+
+func (a *ACLHandle) configureCall(call interface {
+	Header() http.Header
+}, ctx context.Context) {
+	vc := reflect.ValueOf(call)
+	vc.MethodByName("Context").Call([]reflect.Value{reflect.ValueOf(ctx)})
+	if a.userProject != "" {
+		vc.MethodByName("UserProject").Call([]reflect.Value{reflect.ValueOf(a.userProject)})
+	}
+	setClientHeader(call.Header())
+}
+
+func toACLRules(items []*raw.ObjectAccessControl) []ACLRule {
+	r := make([]ACLRule, 0, len(items))
+	for _, item := range items {
+		r = append(r, ACLRule{Entity: ACLEntity(item.Entity), Role: ACLRole(item.Role)})
+	}
+	return r
+}

+ 590 - 0
vendor/cloud.google.com/go/storage/bucket.go

@@ -0,0 +1,590 @@
+// Copyright 2014 Google Inc. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package storage
+
+import (
+	"fmt"
+	"net/http"
+	"reflect"
+	"time"
+
+	"cloud.google.com/go/internal/optional"
+	"golang.org/x/net/context"
+	"google.golang.org/api/googleapi"
+	"google.golang.org/api/iterator"
+	raw "google.golang.org/api/storage/v1"
+)
+
+// BucketHandle provides operations on a Google Cloud Storage bucket.
+// Use Client.Bucket to get a handle.
+type BucketHandle struct {
+	c                *Client
+	name             string
+	acl              ACLHandle
+	defaultObjectACL ACLHandle
+	conds            *BucketConditions
+	userProject      string // project for requester-pays buckets
+}
+
+// Bucket returns a BucketHandle, which provides operations on the named bucket.
+// This call does not perform any network operations.
+//
+// The supplied name must contain only lowercase letters, numbers, dashes,
+// underscores, and dots. The full specification for valid bucket names can be
+// found at:
+//   https://cloud.google.com/storage/docs/bucket-naming
+func (c *Client) Bucket(name string) *BucketHandle {
+	return &BucketHandle{
+		c:    c,
+		name: name,
+		acl: ACLHandle{
+			c:      c,
+			bucket: name,
+		},
+		defaultObjectACL: ACLHandle{
+			c:         c,
+			bucket:    name,
+			isDefault: true,
+		},
+	}
+}
+
+// Create creates the Bucket in the project.
+// If attrs is nil the API defaults will be used.
+func (b *BucketHandle) Create(ctx context.Context, projectID string, attrs *BucketAttrs) error {
+	var bkt *raw.Bucket
+	if attrs != nil {
+		bkt = attrs.toRawBucket()
+	} else {
+		bkt = &raw.Bucket{}
+	}
+	bkt.Name = b.name
+	req := b.c.raw.Buckets.Insert(projectID, bkt)
+	setClientHeader(req.Header())
+	return runWithRetry(ctx, func() error { _, err := req.Context(ctx).Do(); return err })
+}
+
+// Delete deletes the Bucket.
+func (b *BucketHandle) Delete(ctx context.Context) error {
+	req, err := b.newDeleteCall()
+	if err != nil {
+		return err
+	}
+	return runWithRetry(ctx, func() error { return req.Context(ctx).Do() })
+}
+
+func (b *BucketHandle) newDeleteCall() (*raw.BucketsDeleteCall, error) {
+	req := b.c.raw.Buckets.Delete(b.name)
+	setClientHeader(req.Header())
+	if err := applyBucketConds("BucketHandle.Delete", b.conds, req); err != nil {
+		return nil, err
+	}
+	if b.userProject != "" {
+		req.UserProject(b.userProject)
+	}
+	return req, nil
+}
+
+// ACL returns an ACLHandle, which provides access to the bucket's access control list.
+// This controls who can list, create or overwrite the objects in a bucket.
+// This call does not perform any network operations.
+func (b *BucketHandle) ACL() *ACLHandle {
+	return &b.acl
+}
+
+// DefaultObjectACL returns an ACLHandle, which provides access to the bucket's default object ACLs.
+// These ACLs are applied to newly created objects in this bucket that do not have a defined ACL.
+// This call does not perform any network operations.
+func (b *BucketHandle) DefaultObjectACL() *ACLHandle {
+	return &b.defaultObjectACL
+}
+
+// Object returns an ObjectHandle, which provides operations on the named object.
+// This call does not perform any network operations.
+//
+// name must consist entirely of valid UTF-8-encoded runes. The full specification
+// for valid object names can be found at:
+//   https://cloud.google.com/storage/docs/bucket-naming
+func (b *BucketHandle) Object(name string) *ObjectHandle {
+	return &ObjectHandle{
+		c:      b.c,
+		bucket: b.name,
+		object: name,
+		acl: ACLHandle{
+			c:           b.c,
+			bucket:      b.name,
+			object:      name,
+			userProject: b.userProject,
+		},
+		gen:         -1,
+		userProject: b.userProject,
+	}
+}
+
+// Attrs returns the metadata for the bucket.
+func (b *BucketHandle) Attrs(ctx context.Context) (*BucketAttrs, error) {
+	req, err := b.newGetCall()
+	if err != nil {
+		return nil, err
+	}
+	var resp *raw.Bucket
+	err = runWithRetry(ctx, func() error {
+		resp, err = req.Context(ctx).Do()
+		return err
+	})
+	if e, ok := err.(*googleapi.Error); ok && e.Code == http.StatusNotFound {
+		return nil, ErrBucketNotExist
+	}
+	if err != nil {
+		return nil, err
+	}
+	return newBucket(resp), nil
+}
+
+func (b *BucketHandle) newGetCall() (*raw.BucketsGetCall, error) {
+	req := b.c.raw.Buckets.Get(b.name).Projection("full")
+	setClientHeader(req.Header())
+	if err := applyBucketConds("BucketHandle.Attrs", b.conds, req); err != nil {
+		return nil, err
+	}
+	if b.userProject != "" {
+		req.UserProject(b.userProject)
+	}
+	return req, nil
+}
+
+func (b *BucketHandle) Update(ctx context.Context, uattrs BucketAttrsToUpdate) (*BucketAttrs, error) {
+	req, err := b.newPatchCall(&uattrs)
+	if err != nil {
+		return nil, err
+	}
+	// TODO(jba): retry iff metagen is set?
+	rb, err := req.Context(ctx).Do()
+	if err != nil {
+		return nil, err
+	}
+	return newBucket(rb), nil
+}
+
+func (b *BucketHandle) newPatchCall(uattrs *BucketAttrsToUpdate) (*raw.BucketsPatchCall, error) {
+	rb := uattrs.toRawBucket()
+	req := b.c.raw.Buckets.Patch(b.name, rb).Projection("full")
+	setClientHeader(req.Header())
+	if err := applyBucketConds("BucketHandle.Update", b.conds, req); err != nil {
+		return nil, err
+	}
+	if b.userProject != "" {
+		req.UserProject(b.userProject)
+	}
+	return req, nil
+}
+
+// BucketAttrs represents the metadata for a Google Cloud Storage bucket.
+type BucketAttrs struct {
+	// Name is the name of the bucket.
+	Name string
+
+	// ACL is the list of access control rules on the bucket.
+	ACL []ACLRule
+
+	// DefaultObjectACL is the list of access controls to
+	// apply to new objects when no object ACL is provided.
+	DefaultObjectACL []ACLRule
+
+	// Location is the location of the bucket. It defaults to "US".
+	Location string
+
+	// MetaGeneration is the metadata generation of the bucket.
+	MetaGeneration int64
+
+	// StorageClass is the default storage class of the bucket. This defines
+	// how objects in the bucket are stored and determines the SLA
+	// and the cost of storage. Typical values are "MULTI_REGIONAL",
+	// "REGIONAL", "NEARLINE", "COLDLINE", "STANDARD" and
+	// "DURABLE_REDUCED_AVAILABILITY". Defaults to "STANDARD", which
+	// is equivalent to "MULTI_REGIONAL" or "REGIONAL" depending on
+	// the bucket's location settings.
+	StorageClass string
+
+	// Created is the creation time of the bucket.
+	Created time.Time
+
+	// VersioningEnabled reports whether this bucket has versioning enabled.
+	// This field is read-only.
+	VersioningEnabled bool
+
+	// Labels are the bucket's labels.
+	Labels map[string]string
+
+	// RequesterPays reports whether the bucket is a Requester Pays bucket.
+	RequesterPays bool
+}
+
+func newBucket(b *raw.Bucket) *BucketAttrs {
+	if b == nil {
+		return nil
+	}
+	bucket := &BucketAttrs{
+		Name:              b.Name,
+		Location:          b.Location,
+		MetaGeneration:    b.Metageneration,
+		StorageClass:      b.StorageClass,
+		Created:           convertTime(b.TimeCreated),
+		VersioningEnabled: b.Versioning != nil && b.Versioning.Enabled,
+		Labels:            b.Labels,
+		RequesterPays:     b.Billing != nil && b.Billing.RequesterPays,
+	}
+	acl := make([]ACLRule, len(b.Acl))
+	for i, rule := range b.Acl {
+		acl[i] = ACLRule{
+			Entity: ACLEntity(rule.Entity),
+			Role:   ACLRole(rule.Role),
+		}
+	}
+	bucket.ACL = acl
+	objACL := make([]ACLRule, len(b.DefaultObjectAcl))
+	for i, rule := range b.DefaultObjectAcl {
+		objACL[i] = ACLRule{
+			Entity: ACLEntity(rule.Entity),
+			Role:   ACLRole(rule.Role),
+		}
+	}
+	bucket.DefaultObjectACL = objACL
+	return bucket
+}
+
+// toRawBucket copies the editable attribute from b to the raw library's Bucket type.
+func (b *BucketAttrs) toRawBucket() *raw.Bucket {
+	var acl []*raw.BucketAccessControl
+	if len(b.ACL) > 0 {
+		acl = make([]*raw.BucketAccessControl, len(b.ACL))
+		for i, rule := range b.ACL {
+			acl[i] = &raw.BucketAccessControl{
+				Entity: string(rule.Entity),
+				Role:   string(rule.Role),
+			}
+		}
+	}
+	dACL := toRawObjectACL(b.DefaultObjectACL)
+	// Copy label map.
+	var labels map[string]string
+	if len(b.Labels) > 0 {
+		labels = make(map[string]string, len(b.Labels))
+		for k, v := range b.Labels {
+			labels[k] = v
+		}
+	}
+	// Ignore VersioningEnabled if it is false. This is OK because
+	// we only call this method when creating a bucket, and by default
+	// new buckets have versioning off.
+	var v *raw.BucketVersioning
+	if b.VersioningEnabled {
+		v = &raw.BucketVersioning{Enabled: true}
+	}
+	var bb *raw.BucketBilling
+	if b.RequesterPays {
+		bb = &raw.BucketBilling{RequesterPays: true}
+	}
+	return &raw.Bucket{
+		Name:             b.Name,
+		DefaultObjectAcl: dACL,
+		Location:         b.Location,
+		StorageClass:     b.StorageClass,
+		Acl:              acl,
+		Versioning:       v,
+		Labels:           labels,
+		Billing:          bb,
+	}
+}
+
+type BucketAttrsToUpdate struct {
+	// VersioningEnabled, if set, updates whether the bucket uses versioning.
+	VersioningEnabled optional.Bool
+
+	// RequesterPays, if set, updates whether the bucket is a Requester Pays bucket.
+	RequesterPays optional.Bool
+
+	setLabels    map[string]string
+	deleteLabels map[string]bool
+}
+
+// SetLabel causes a label to be added or modified when ua is used
+// in a call to Bucket.Update.
+func (ua *BucketAttrsToUpdate) SetLabel(name, value string) {
+	if ua.setLabels == nil {
+		ua.setLabels = map[string]string{}
+	}
+	ua.setLabels[name] = value
+}
+
+// DeleteLabel causes a label to be deleted when ua is used in a
+// call to Bucket.Update.
+func (ua *BucketAttrsToUpdate) DeleteLabel(name string) {
+	if ua.deleteLabels == nil {
+		ua.deleteLabels = map[string]bool{}
+	}
+	ua.deleteLabels[name] = true
+}
+
+func (ua *BucketAttrsToUpdate) toRawBucket() *raw.Bucket {
+	rb := &raw.Bucket{}
+	if ua.VersioningEnabled != nil {
+		rb.Versioning = &raw.BucketVersioning{
+			Enabled:         optional.ToBool(ua.VersioningEnabled),
+			ForceSendFields: []string{"Enabled"},
+		}
+	}
+	if ua.RequesterPays != nil {
+		rb.Billing = &raw.BucketBilling{
+			RequesterPays:   optional.ToBool(ua.RequesterPays),
+			ForceSendFields: []string{"RequesterPays"},
+		}
+	}
+	if ua.setLabels != nil || ua.deleteLabels != nil {
+		rb.Labels = map[string]string{}
+		for k, v := range ua.setLabels {
+			rb.Labels[k] = v
+		}
+		if len(rb.Labels) == 0 && len(ua.deleteLabels) > 0 {
+			rb.ForceSendFields = append(rb.ForceSendFields, "Labels")
+		}
+		for l := range ua.deleteLabels {
+			rb.NullFields = append(rb.NullFields, "Labels."+l)
+		}
+	}
+	return rb
+}
+
+// If returns a new BucketHandle that applies a set of preconditions.
+// Preconditions already set on the BucketHandle are ignored.
+// Operations on the new handle will only occur if the preconditions are
+// satisfied. The only valid preconditions for buckets are MetagenerationMatch
+// and MetagenerationNotMatch.
+func (b *BucketHandle) If(conds BucketConditions) *BucketHandle {
+	b2 := *b
+	b2.conds = &conds
+	return &b2
+}
+
+// BucketConditions constrain bucket methods to act on specific metagenerations.
+//
+// The zero value is an empty set of constraints.
+type BucketConditions struct {
+	// MetagenerationMatch specifies that the bucket must have the given
+	// metageneration for the operation to occur.
+	// If MetagenerationMatch is zero, it has no effect.
+	MetagenerationMatch int64
+
+	// MetagenerationNotMatch specifies that the bucket must not have the given
+	// metageneration for the operation to occur.
+	// If MetagenerationNotMatch is zero, it has no effect.
+	MetagenerationNotMatch int64
+}
+
+func (c *BucketConditions) validate(method string) error {
+	if *c == (BucketConditions{}) {
+		return fmt.Errorf("storage: %s: empty conditions", method)
+	}
+	if c.MetagenerationMatch != 0 && c.MetagenerationNotMatch != 0 {
+		return fmt.Errorf("storage: %s: multiple conditions specified for metageneration", method)
+	}
+	return nil
+}
+
+// UserProject returns a new BucketHandle that passes the project ID as the user
+// project for all subsequent calls. A user project is required for all operations
+// on requester-pays buckets.
+func (b *BucketHandle) UserProject(projectID string) *BucketHandle {
+	b2 := *b
+	b2.userProject = projectID
+	b2.acl.userProject = projectID
+	b2.defaultObjectACL.userProject = projectID
+	return &b2
+}
+
+// applyBucketConds modifies the provided call using the conditions in conds.
+// call is something that quacks like a *raw.WhateverCall.
+func applyBucketConds(method string, conds *BucketConditions, call interface{}) error {
+	if conds == nil {
+		return nil
+	}
+	if err := conds.validate(method); err != nil {
+		return err
+	}
+	cval := reflect.ValueOf(call)
+	switch {
+	case conds.MetagenerationMatch != 0:
+		if !setConditionField(cval, "IfMetagenerationMatch", conds.MetagenerationMatch) {
+			return fmt.Errorf("storage: %s: ifMetagenerationMatch not supported", method)
+		}
+	case conds.MetagenerationNotMatch != 0:
+		if !setConditionField(cval, "IfMetagenerationNotMatch", conds.MetagenerationNotMatch) {
+			return fmt.Errorf("storage: %s: ifMetagenerationNotMatch not supported", method)
+		}
+	}
+	return nil
+}
+
+// Objects returns an iterator over the objects in the bucket that match the Query q.
+// If q is nil, no filtering is done.
+func (b *BucketHandle) Objects(ctx context.Context, q *Query) *ObjectIterator {
+	it := &ObjectIterator{
+		ctx:    ctx,
+		bucket: b,
+	}
+	it.pageInfo, it.nextFunc = iterator.NewPageInfo(
+		it.fetch,
+		func() int { return len(it.items) },
+		func() interface{} { b := it.items; it.items = nil; return b })
+	if q != nil {
+		it.query = *q
+	}
+	return it
+}
+
+// An ObjectIterator is an iterator over ObjectAttrs.
+type ObjectIterator struct {
+	ctx      context.Context
+	bucket   *BucketHandle
+	query    Query
+	pageInfo *iterator.PageInfo
+	nextFunc func() error
+	items    []*ObjectAttrs
+}
+
+// PageInfo supports pagination. See the google.golang.org/api/iterator package for details.
+func (it *ObjectIterator) PageInfo() *iterator.PageInfo { return it.pageInfo }
+
+// Next returns the next result. Its second return value is iterator.Done if
+// there are no more results. Once Next returns iterator.Done, all subsequent
+// calls will return iterator.Done.
+//
+// If Query.Delimiter is non-empty, some of the ObjectAttrs returned by Next will
+// have a non-empty Prefix field, and a zero value for all other fields. These
+// represent prefixes.
+func (it *ObjectIterator) Next() (*ObjectAttrs, error) {
+	if err := it.nextFunc(); err != nil {
+		return nil, err
+	}
+	item := it.items[0]
+	it.items = it.items[1:]
+	return item, nil
+}
+
+func (it *ObjectIterator) fetch(pageSize int, pageToken string) (string, error) {
+	req := it.bucket.c.raw.Objects.List(it.bucket.name)
+	setClientHeader(req.Header())
+	req.Projection("full")
+	req.Delimiter(it.query.Delimiter)
+	req.Prefix(it.query.Prefix)
+	req.Versions(it.query.Versions)
+	req.PageToken(pageToken)
+	if it.bucket.userProject != "" {
+		req.UserProject(it.bucket.userProject)
+	}
+	if pageSize > 0 {
+		req.MaxResults(int64(pageSize))
+	}
+	var resp *raw.Objects
+	var err error
+	err = runWithRetry(it.ctx, func() error {
+		resp, err = req.Context(it.ctx).Do()
+		return err
+	})
+	if err != nil {
+		if e, ok := err.(*googleapi.Error); ok && e.Code == http.StatusNotFound {
+			err = ErrBucketNotExist
+		}
+		return "", err
+	}
+	for _, item := range resp.Items {
+		it.items = append(it.items, newObject(item))
+	}
+	for _, prefix := range resp.Prefixes {
+		it.items = append(it.items, &ObjectAttrs{Prefix: prefix})
+	}
+	return resp.NextPageToken, nil
+}
+
+// TODO(jbd): Add storage.buckets.update.
+
+// Buckets returns an iterator over the buckets in the project. You may
+// optionally set the iterator's Prefix field to restrict the list to buckets
+// whose names begin with the prefix. By default, all buckets in the project
+// are returned.
+func (c *Client) Buckets(ctx context.Context, projectID string) *BucketIterator {
+	it := &BucketIterator{
+		ctx:       ctx,
+		client:    c,
+		projectID: projectID,
+	}
+	it.pageInfo, it.nextFunc = iterator.NewPageInfo(
+		it.fetch,
+		func() int { return len(it.buckets) },
+		func() interface{} { b := it.buckets; it.buckets = nil; return b })
+	return it
+}
+
+// A BucketIterator is an iterator over BucketAttrs.
+type BucketIterator struct {
+	// Prefix restricts the iterator to buckets whose names begin with it.
+	Prefix string
+
+	ctx       context.Context
+	client    *Client
+	projectID string
+	buckets   []*BucketAttrs
+	pageInfo  *iterator.PageInfo
+	nextFunc  func() error
+}
+
+// Next returns the next result. Its second return value is iterator.Done if
+// there are no more results. Once Next returns iterator.Done, all subsequent
+// calls will return iterator.Done.
+func (it *BucketIterator) Next() (*BucketAttrs, error) {
+	if err := it.nextFunc(); err != nil {
+		return nil, err
+	}
+	b := it.buckets[0]
+	it.buckets = it.buckets[1:]
+	return b, nil
+}
+
+// PageInfo supports pagination. See the google.golang.org/api/iterator package for details.
+func (it *BucketIterator) PageInfo() *iterator.PageInfo { return it.pageInfo }
+
+func (it *BucketIterator) fetch(pageSize int, pageToken string) (string, error) {
+	req := it.client.raw.Buckets.List(it.projectID)
+	setClientHeader(req.Header())
+	req.Projection("full")
+	req.Prefix(it.Prefix)
+	req.PageToken(pageToken)
+	if pageSize > 0 {
+		req.MaxResults(int64(pageSize))
+	}
+	var resp *raw.Buckets
+	var err error
+	err = runWithRetry(it.ctx, func() error {
+		resp, err = req.Context(it.ctx).Do()
+		return err
+	})
+	if err != nil {
+		return "", err
+	}
+	for _, item := range resp.Items {
+		it.buckets = append(it.buckets, newBucket(item))
+	}
+	return resp.NextPageToken, nil
+}

+ 201 - 0
vendor/cloud.google.com/go/storage/copy.go

@@ -0,0 +1,201 @@
+// Copyright 2016 Google Inc. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package storage
+
+import (
+	"errors"
+	"fmt"
+
+	"golang.org/x/net/context"
+	raw "google.golang.org/api/storage/v1"
+)
+
+// CopierFrom creates a Copier that can copy src to dst.
+// You can immediately call Run on the returned Copier, or
+// you can configure it first.
+//
+// For Requester Pays buckets, the user project of dst is billed, unless it is empty,
+// in which case the user project of src is billed.
+func (dst *ObjectHandle) CopierFrom(src *ObjectHandle) *Copier {
+	return &Copier{dst: dst, src: src}
+}
+
+// A Copier copies a source object to a destination.
+type Copier struct {
+	// ObjectAttrs are optional attributes to set on the destination object.
+	// Any attributes must be initialized before any calls on the Copier. Nil
+	// or zero-valued attributes are ignored.
+	ObjectAttrs
+
+	// RewriteToken can be set before calling Run to resume a copy
+	// operation. After Run returns a non-nil error, RewriteToken will
+	// have been updated to contain the value needed to resume the copy.
+	RewriteToken string
+
+	// ProgressFunc can be used to monitor the progress of a multi-RPC copy
+	// operation. If ProgressFunc is not nil and copying requires multiple
+	// calls to the underlying service (see
+	// https://cloud.google.com/storage/docs/json_api/v1/objects/rewrite), then
+	// ProgressFunc will be invoked after each call with the number of bytes of
+	// content copied so far and the total size in bytes of the source object.
+	//
+	// ProgressFunc is intended to make upload progress available to the
+	// application. For example, the implementation of ProgressFunc may update
+	// a progress bar in the application's UI, or log the result of
+	// float64(copiedBytes)/float64(totalBytes).
+	//
+	// ProgressFunc should return quickly without blocking.
+	ProgressFunc func(copiedBytes, totalBytes uint64)
+
+	dst, src *ObjectHandle
+}
+
+// Run performs the copy.
+func (c *Copier) Run(ctx context.Context) (*ObjectAttrs, error) {
+	if err := c.src.validate(); err != nil {
+		return nil, err
+	}
+	if err := c.dst.validate(); err != nil {
+		return nil, err
+	}
+	// Convert destination attributes to raw form, omitting the bucket.
+	// If the bucket is included but name or content-type aren't, the service
+	// returns a 400 with "Required" as the only message. Omitting the bucket
+	// does not cause any problems.
+	rawObject := c.ObjectAttrs.toRawObject("")
+	for {
+		res, err := c.callRewrite(ctx, rawObject)
+		if err != nil {
+			return nil, err
+		}
+		if c.ProgressFunc != nil {
+			c.ProgressFunc(uint64(res.TotalBytesRewritten), uint64(res.ObjectSize))
+		}
+		if res.Done { // Finished successfully.
+			return newObject(res.Resource), nil
+		}
+	}
+}
+
+func (c *Copier) callRewrite(ctx context.Context, rawObj *raw.Object) (*raw.RewriteResponse, error) {
+	call := c.dst.c.raw.Objects.Rewrite(c.src.bucket, c.src.object, c.dst.bucket, c.dst.object, rawObj)
+
+	call.Context(ctx).Projection("full")
+	if c.RewriteToken != "" {
+		call.RewriteToken(c.RewriteToken)
+	}
+	if err := applyConds("Copy destination", c.dst.gen, c.dst.conds, call); err != nil {
+		return nil, err
+	}
+	if c.dst.userProject != "" {
+		call.UserProject(c.dst.userProject)
+	} else if c.src.userProject != "" {
+		call.UserProject(c.src.userProject)
+	}
+	if err := applySourceConds(c.src.gen, c.src.conds, call); err != nil {
+		return nil, err
+	}
+	if err := setEncryptionHeaders(call.Header(), c.dst.encryptionKey, false); err != nil {
+		return nil, err
+	}
+	if err := setEncryptionHeaders(call.Header(), c.src.encryptionKey, true); err != nil {
+		return nil, err
+	}
+	var res *raw.RewriteResponse
+	var err error
+	setClientHeader(call.Header())
+	err = runWithRetry(ctx, func() error { res, err = call.Do(); return err })
+	if err != nil {
+		return nil, err
+	}
+	c.RewriteToken = res.RewriteToken
+	return res, nil
+}
+
+// ComposerFrom creates a Composer that can compose srcs into dst.
+// You can immediately call Run on the returned Composer, or you can
+// configure it first.
+//
+// The encryption key for the destination object will be used to decrypt all
+// source objects and encrypt the destination object. It is an error
+// to specify an encryption key for any of the source objects.
+func (dst *ObjectHandle) ComposerFrom(srcs ...*ObjectHandle) *Composer {
+	return &Composer{dst: dst, srcs: srcs}
+}
+
+// A Composer composes source objects into a destination object.
+//
+// For Requester Pays buckets, the user project of dst is billed.
+type Composer struct {
+	// ObjectAttrs are optional attributes to set on the destination object.
+	// Any attributes must be initialized before any calls on the Composer. Nil
+	// or zero-valued attributes are ignored.
+	ObjectAttrs
+
+	dst  *ObjectHandle
+	srcs []*ObjectHandle
+}
+
+// Run performs the compose operation.
+func (c *Composer) Run(ctx context.Context) (*ObjectAttrs, error) {
+	if err := c.dst.validate(); err != nil {
+		return nil, err
+	}
+	if len(c.srcs) == 0 {
+		return nil, errors.New("storage: at least one source object must be specified")
+	}
+
+	req := &raw.ComposeRequest{}
+	// Compose requires a non-empty Destination, so we always set it,
+	// even if the caller-provided ObjectAttrs is the zero value.
+	req.Destination = c.ObjectAttrs.toRawObject(c.dst.bucket)
+	for _, src := range c.srcs {
+		if err := src.validate(); err != nil {
+			return nil, err
+		}
+		if src.bucket != c.dst.bucket {
+			return nil, fmt.Errorf("storage: all source objects must be in bucket %q, found %q", c.dst.bucket, src.bucket)
+		}
+		if src.encryptionKey != nil {
+			return nil, fmt.Errorf("storage: compose source %s.%s must not have encryption key", src.bucket, src.object)
+		}
+		srcObj := &raw.ComposeRequestSourceObjects{
+			Name: src.object,
+		}
+		if err := applyConds("ComposeFrom source", src.gen, src.conds, composeSourceObj{srcObj}); err != nil {
+			return nil, err
+		}
+		req.SourceObjects = append(req.SourceObjects, srcObj)
+	}
+
+	call := c.dst.c.raw.Objects.Compose(c.dst.bucket, c.dst.object, req).Context(ctx)
+	if err := applyConds("ComposeFrom destination", c.dst.gen, c.dst.conds, call); err != nil {
+		return nil, err
+	}
+	if c.dst.userProject != "" {
+		call.UserProject(c.dst.userProject)
+	}
+	if err := setEncryptionHeaders(call.Header(), c.dst.encryptionKey, false); err != nil {
+		return nil, err
+	}
+	var obj *raw.Object
+	var err error
+	setClientHeader(call.Header())
+	err = runWithRetry(ctx, func() error { obj, err = call.Do(); return err })
+	if err != nil {
+		return nil, err
+	}
+	return newObject(obj), nil
+}

+ 161 - 0
vendor/cloud.google.com/go/storage/doc.go

@@ -0,0 +1,161 @@
+// Copyright 2016 Google Inc. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+/*
+Package storage provides an easy way to work with Google Cloud Storage.
+Google Cloud Storage stores data in named objects, which are grouped into buckets.
+
+More information about Google Cloud Storage is available at
+https://cloud.google.com/storage/docs.
+
+All of the methods of this package use exponential backoff to retry calls
+that fail with certain errors, as described in
+https://cloud.google.com/storage/docs/exponential-backoff.
+
+Note: This package is in beta.  Some backwards-incompatible changes may occur.
+
+
+Creating a Client
+
+To start working with this package, create a client:
+
+    ctx := context.Background()
+    client, err := storage.NewClient(ctx)
+    if err != nil {
+        // TODO: Handle error.
+    }
+
+Buckets
+
+A Google Cloud Storage bucket is a collection of objects. To work with a
+bucket, make a bucket handle:
+
+    bkt := client.Bucket(bucketName)
+
+A handle is a reference to a bucket. You can have a handle even if the
+bucket doesn't exist yet. To create a bucket in Google Cloud Storage,
+call Create on the handle:
+
+    if err := bkt.Create(ctx, projectID, nil); err != nil {
+        // TODO: Handle error.
+    }
+
+Note that although buckets are associated with projects, bucket names are
+global across all projects.
+
+Each bucket has associated metadata, represented in this package by
+BucketAttrs. The third argument to BucketHandle.Create allows you to set
+the intial BucketAttrs of a bucket. To retrieve a bucket's attributes, use
+Attrs:
+
+    attrs, err := bkt.Attrs(ctx)
+    if err != nil {
+        // TODO: Handle error.
+    }
+    fmt.Printf("bucket %s, created at %s, is located in %s with storage class %s\n",
+        attrs.Name, attrs.Created, attrs.Location, attrs.StorageClass)
+
+Objects
+
+An object holds arbitrary data as a sequence of bytes, like a file. You
+refer to objects using a handle, just as with buckets. You can use the
+standard Go io.Reader and io.Writer interfaces to read and write
+object data:
+
+    obj := bkt.Object("data")
+    // Write something to obj.
+    // w implements io.Writer.
+    w := obj.NewWriter(ctx)
+    // Write some text to obj. This will overwrite whatever is there.
+    if _, err := fmt.Fprintf(w, "This object contains text.\n"); err != nil {
+        // TODO: Handle error.
+    }
+    // Close, just like writing a file.
+    if err := w.Close(); err != nil {
+        // TODO: Handle error.
+    }
+
+    // Read it back.
+    r, err := obj.NewReader(ctx)
+    if err != nil {
+        // TODO: Handle error.
+    }
+    defer r.Close()
+    if _, err := io.Copy(os.Stdout, r); err != nil {
+        // TODO: Handle error.
+    }
+    // Prints "This object contains text."
+
+Objects also have attributes, which you can fetch with Attrs:
+
+    objAttrs, err := obj.Attrs(ctx)
+    if err != nil {
+        // TODO: Handle error.
+    }
+    fmt.Printf("object %s has size %d and can be read using %s\n",
+        objAttrs.Name, objAttrs.Size, objAttrs.MediaLink)
+
+ACLs
+
+Both objects and buckets have ACLs (Access Control Lists). An ACL is a list of
+ACLRules, each of which specifies the role of a user, group or project. ACLs
+are suitable for fine-grained control, but you may prefer using IAM to control
+access at the project level (see
+https://cloud.google.com/storage/docs/access-control/iam).
+
+To list the ACLs of a bucket or object, obtain an ACLHandle and call its List method:
+
+    acls, err := obj.ACL().List(ctx)
+    if err != nil {
+        // TODO: Handle error.
+    }
+    for _, rule := range acls {
+        fmt.Printf("%s has role %s\n", rule.Entity, rule.Role)
+    }
+
+You can also set and delete ACLs.
+
+Conditions
+
+Every object has a generation and a metageneration. The generation changes
+whenever the content changes, and the metageneration changes whenever the
+metadata changes. Conditions let you check these values before an operation;
+the operation only executes if the conditions match. You can use conditions to
+prevent race conditions in read-modify-write operations.
+
+For example, say you've read an object's metadata into objAttrs. Now
+you want to write to that object, but only if its contents haven't changed
+since you read it. Here is how to express that:
+
+    w = obj.If(storage.Conditions{GenerationMatch: objAttrs.Generation}).NewWriter(ctx)
+    // Proceed with writing as above.
+
+Signed URLs
+
+You can obtain a URL that lets anyone read or write an object for a limited time.
+You don't need to create a client to do this. See the documentation of
+SignedURL for details.
+
+    url, err := storage.SignedURL(bucketName, "shared-object", opts)
+    if err != nil {
+        // TODO: Handle error.
+    }
+    fmt.Println(url)
+
+Authentication
+
+See examples of authorization and authentication at
+https://godoc.org/cloud.google.com/go#pkg-examples.
+*/
+package storage // import "cloud.google.com/go/storage"

+ 26 - 0
vendor/cloud.google.com/go/storage/go17.go

@@ -0,0 +1,26 @@
+// Copyright 2017 Google Inc. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// +build go1.7
+
+package storage
+
+import (
+	"context"
+	"net/http"
+)
+
+func withContext(r *http.Request, ctx context.Context) *http.Request {
+	return r.WithContext(ctx)
+}

+ 108 - 0
vendor/cloud.google.com/go/storage/iam.go

@@ -0,0 +1,108 @@
+// Copyright 2017 Google Inc. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package storage
+
+import (
+	"cloud.google.com/go/iam"
+	"golang.org/x/net/context"
+	raw "google.golang.org/api/storage/v1"
+	iampb "google.golang.org/genproto/googleapis/iam/v1"
+)
+
+// IAM provides access to IAM access control for the bucket.
+func (b *BucketHandle) IAM() *iam.Handle {
+	return iam.InternalNewHandleClient(&iamClient{raw: b.c.raw}, b.name)
+}
+
+// iamClient implements the iam.client interface.
+type iamClient struct {
+	raw *raw.Service
+}
+
+func (c *iamClient) Get(ctx context.Context, resource string) (*iampb.Policy, error) {
+	req := c.raw.Buckets.GetIamPolicy(resource)
+	setClientHeader(req.Header())
+	var rp *raw.Policy
+	var err error
+	err = runWithRetry(ctx, func() error {
+		rp, err = req.Context(ctx).Do()
+		return err
+	})
+	if err != nil {
+		return nil, err
+	}
+	return iamFromStoragePolicy(rp), nil
+}
+
+func (c *iamClient) Set(ctx context.Context, resource string, p *iampb.Policy) error {
+	rp := iamToStoragePolicy(p)
+	req := c.raw.Buckets.SetIamPolicy(resource, rp)
+	setClientHeader(req.Header())
+	return runWithRetry(ctx, func() error {
+		_, err := req.Context(ctx).Do()
+		return err
+	})
+}
+
+func (c *iamClient) Test(ctx context.Context, resource string, perms []string) ([]string, error) {
+	req := c.raw.Buckets.TestIamPermissions(resource, perms)
+	setClientHeader(req.Header())
+	var res *raw.TestIamPermissionsResponse
+	var err error
+	err = runWithRetry(ctx, func() error {
+		res, err = req.Context(ctx).Do()
+		return err
+	})
+	if err != nil {
+		return nil, err
+	}
+	return res.Permissions, nil
+}
+
+func iamToStoragePolicy(ip *iampb.Policy) *raw.Policy {
+	return &raw.Policy{
+		Bindings: iamToStorageBindings(ip.Bindings),
+		Etag:     string(ip.Etag),
+	}
+}
+
+func iamToStorageBindings(ibs []*iampb.Binding) []*raw.PolicyBindings {
+	var rbs []*raw.PolicyBindings
+	for _, ib := range ibs {
+		rbs = append(rbs, &raw.PolicyBindings{
+			Role:    ib.Role,
+			Members: ib.Members,
+		})
+	}
+	return rbs
+}
+
+func iamFromStoragePolicy(rp *raw.Policy) *iampb.Policy {
+	return &iampb.Policy{
+		Bindings: iamFromStorageBindings(rp.Bindings),
+		Etag:     []byte(rp.Etag),
+	}
+}
+
+func iamFromStorageBindings(rbs []*raw.PolicyBindings) []*iampb.Binding {
+	var ibs []*iampb.Binding
+	for _, rb := range rbs {
+		ibs = append(ibs, &iampb.Binding{
+			Role:    rb.Role,
+			Members: rb.Members,
+		})
+	}
+	return ibs
+}

+ 43 - 0
vendor/cloud.google.com/go/storage/invoke.go

@@ -0,0 +1,43 @@
+// Copyright 2014 Google Inc. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package storage
+
+import (
+	"cloud.google.com/go/internal"
+	gax "github.com/googleapis/gax-go"
+	"golang.org/x/net/context"
+	"google.golang.org/api/googleapi"
+)
+
+// runWithRetry calls the function until it returns nil or a non-retryable error, or
+// the context is done.
+func runWithRetry(ctx context.Context, call func() error) error {
+	return internal.Retry(ctx, gax.Backoff{}, func() (stop bool, err error) {
+		err = call()
+		if err == nil {
+			return true, nil
+		}
+		e, ok := err.(*googleapi.Error)
+		if !ok {
+			return true, err
+		}
+		// Retry on 429 and 5xx, according to
+		// https://cloud.google.com/storage/docs/exponential-backoff.
+		if e.Code == 429 || (e.Code >= 500 && e.Code < 600) {
+			return false, nil
+		}
+		return true, err
+	})
+}

+ 26 - 0
vendor/cloud.google.com/go/storage/not_go17.go

@@ -0,0 +1,26 @@
+// Copyright 2017 Google Inc. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// +build !go1.7
+
+package storage
+
+import (
+	"net/http"
+)
+
+func withContext(r *http.Request, _ interface{}) *http.Request {
+	// In Go 1.6 and below, ignore the context.
+	return r
+}

+ 74 - 0
vendor/cloud.google.com/go/storage/reader.go

@@ -0,0 +1,74 @@
+// Copyright 2016 Google Inc. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package storage
+
+import (
+	"fmt"
+	"hash/crc32"
+	"io"
+)
+
+var crc32cTable = crc32.MakeTable(crc32.Castagnoli)
+
+// Reader reads a Cloud Storage object.
+// It implements io.Reader.
+type Reader struct {
+	body         io.ReadCloser
+	remain, size int64
+	contentType  string
+	checkCRC     bool   // should we check the CRC?
+	wantCRC      uint32 // the CRC32c value the server sent in the header
+	gotCRC       uint32 // running crc
+}
+
+// Close closes the Reader. It must be called when done reading.
+func (r *Reader) Close() error {
+	return r.body.Close()
+}
+
+func (r *Reader) Read(p []byte) (int, error) {
+	n, err := r.body.Read(p)
+	if r.remain != -1 {
+		r.remain -= int64(n)
+	}
+	if r.checkCRC {
+		r.gotCRC = crc32.Update(r.gotCRC, crc32cTable, p[:n])
+		// Check CRC here. It would be natural to check it in Close, but
+		// everybody defers Close on the assumption that it doesn't return
+		// anything worth looking at.
+		if r.remain == 0 && r.gotCRC != r.wantCRC {
+			return n, fmt.Errorf("storage: bad CRC on read: got %d, want %d",
+				r.gotCRC, r.wantCRC)
+		}
+	}
+	return n, err
+}
+
+// Size returns the size of the object in bytes.
+// The returned value is always the same and is not affected by
+// calls to Read or Close.
+func (r *Reader) Size() int64 {
+	return r.size
+}
+
+// Remain returns the number of bytes left to read, or -1 if unknown.
+func (r *Reader) Remain() int64 {
+	return r.remain
+}
+
+// ContentType returns the content type of the object.
+func (r *Reader) ContentType() string {
+	return r.contentType
+}

+ 1117 - 0
vendor/cloud.google.com/go/storage/storage.go

@@ -0,0 +1,1117 @@
+// Copyright 2014 Google Inc. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package storage
+
+import (
+	"bytes"
+	"crypto"
+	"crypto/rand"
+	"crypto/rsa"
+	"crypto/sha256"
+	"crypto/x509"
+	"encoding/base64"
+	"encoding/pem"
+	"errors"
+	"fmt"
+	"io"
+	"io/ioutil"
+	"net/http"
+	"net/url"
+	"reflect"
+	"strconv"
+	"strings"
+	"time"
+	"unicode/utf8"
+
+	"google.golang.org/api/option"
+	htransport "google.golang.org/api/transport/http"
+
+	"cloud.google.com/go/internal/optional"
+	"cloud.google.com/go/internal/version"
+	"golang.org/x/net/context"
+	"google.golang.org/api/googleapi"
+	raw "google.golang.org/api/storage/v1"
+)
+
+var (
+	ErrBucketNotExist = errors.New("storage: bucket doesn't exist")
+	ErrObjectNotExist = errors.New("storage: object doesn't exist")
+)
+
+const userAgent = "gcloud-golang-storage/20151204"
+
+const (
+	// ScopeFullControl grants permissions to manage your
+	// data and permissions in Google Cloud Storage.
+	ScopeFullControl = raw.DevstorageFullControlScope
+
+	// ScopeReadOnly grants permissions to
+	// view your data in Google Cloud Storage.
+	ScopeReadOnly = raw.DevstorageReadOnlyScope
+
+	// ScopeReadWrite grants permissions to manage your
+	// data in Google Cloud Storage.
+	ScopeReadWrite = raw.DevstorageReadWriteScope
+)
+
+var xGoogHeader = fmt.Sprintf("gl-go/%s gccl/%s", version.Go(), version.Repo)
+
+func setClientHeader(headers http.Header) {
+	headers.Set("x-goog-api-client", xGoogHeader)
+}
+
+// Client is a client for interacting with Google Cloud Storage.
+//
+// Clients should be reused instead of created as needed.
+// The methods of Client are safe for concurrent use by multiple goroutines.
+type Client struct {
+	hc  *http.Client
+	raw *raw.Service
+}
+
+// NewClient creates a new Google Cloud Storage client.
+// The default scope is ScopeFullControl. To use a different scope, like ScopeReadOnly, use option.WithScopes.
+func NewClient(ctx context.Context, opts ...option.ClientOption) (*Client, error) {
+	o := []option.ClientOption{
+		option.WithScopes(ScopeFullControl),
+		option.WithUserAgent(userAgent),
+	}
+	opts = append(o, opts...)
+	hc, ep, err := htransport.NewClient(ctx, opts...)
+	if err != nil {
+		return nil, fmt.Errorf("dialing: %v", err)
+	}
+	rawService, err := raw.New(hc)
+	if err != nil {
+		return nil, fmt.Errorf("storage client: %v", err)
+	}
+	if ep != "" {
+		rawService.BasePath = ep
+	}
+	return &Client{
+		hc:  hc,
+		raw: rawService,
+	}, nil
+}
+
+// Close closes the Client.
+//
+// Close need not be called at program exit.
+func (c *Client) Close() error {
+	c.hc = nil
+	return nil
+}
+
+// SignedURLOptions allows you to restrict the access to the signed URL.
+type SignedURLOptions struct {
+	// GoogleAccessID represents the authorizer of the signed URL generation.
+	// It is typically the Google service account client email address from
+	// the Google Developers Console in the form of "xxx@developer.gserviceaccount.com".
+	// Required.
+	GoogleAccessID string
+
+	// PrivateKey is the Google service account private key. It is obtainable
+	// from the Google Developers Console.
+	// At https://console.developers.google.com/project/<your-project-id>/apiui/credential,
+	// create a service account client ID or reuse one of your existing service account
+	// credentials. Click on the "Generate new P12 key" to generate and download
+	// a new private key. Once you download the P12 file, use the following command
+	// to convert it into a PEM file.
+	//
+	//    $ openssl pkcs12 -in key.p12 -passin pass:notasecret -out key.pem -nodes
+	//
+	// Provide the contents of the PEM file as a byte slice.
+	// Exactly one of PrivateKey or SignBytes must be non-nil.
+	PrivateKey []byte
+
+	// SignBytes is a function for implementing custom signing.
+	// If your application is running on Google App Engine, you can use appengine's internal signing function:
+	//     ctx := appengine.NewContext(request)
+	//     acc, _ := appengine.ServiceAccount(ctx)
+	//     url, err := SignedURL("bucket", "object", &SignedURLOptions{
+	//     	GoogleAccessID: acc,
+	//     	SignBytes: func(b []byte) ([]byte, error) {
+	//     		_, signedBytes, err := appengine.SignBytes(ctx, b)
+	//     		return signedBytes, err
+	//     	},
+	//     	// etc.
+	//     })
+	//
+	// Exactly one of PrivateKey or SignBytes must be non-nil.
+	SignBytes func([]byte) ([]byte, error)
+
+	// Method is the HTTP method to be used with the signed URL.
+	// Signed URLs can be used with GET, HEAD, PUT, and DELETE requests.
+	// Required.
+	Method string
+
+	// Expires is the expiration time on the signed URL. It must be
+	// a datetime in the future.
+	// Required.
+	Expires time.Time
+
+	// ContentType is the content type header the client must provide
+	// to use the generated signed URL.
+	// Optional.
+	ContentType string
+
+	// Headers is a list of extention headers the client must provide
+	// in order to use the generated signed URL.
+	// Optional.
+	Headers []string
+
+	// MD5 is the base64 encoded MD5 checksum of the file.
+	// If provided, the client should provide the exact value on the request
+	// header in order to use the signed URL.
+	// Optional.
+	MD5 string
+}
+
+// SignedURL returns a URL for the specified object. Signed URLs allow
+// the users access to a restricted resource for a limited time without having a
+// Google account or signing in. For more information about the signed
+// URLs, see https://cloud.google.com/storage/docs/accesscontrol#Signed-URLs.
+func SignedURL(bucket, name string, opts *SignedURLOptions) (string, error) {
+	if opts == nil {
+		return "", errors.New("storage: missing required SignedURLOptions")
+	}
+	if opts.GoogleAccessID == "" {
+		return "", errors.New("storage: missing required GoogleAccessID")
+	}
+	if (opts.PrivateKey == nil) == (opts.SignBytes == nil) {
+		return "", errors.New("storage: exactly one of PrivateKey or SignedBytes must be set")
+	}
+	if opts.Method == "" {
+		return "", errors.New("storage: missing required method option")
+	}
+	if opts.Expires.IsZero() {
+		return "", errors.New("storage: missing required expires option")
+	}
+	if opts.MD5 != "" {
+		md5, err := base64.StdEncoding.DecodeString(opts.MD5)
+		if err != nil || len(md5) != 16 {
+			return "", errors.New("storage: invalid MD5 checksum")
+		}
+	}
+
+	signBytes := opts.SignBytes
+	if opts.PrivateKey != nil {
+		key, err := parseKey(opts.PrivateKey)
+		if err != nil {
+			return "", err
+		}
+		signBytes = func(b []byte) ([]byte, error) {
+			sum := sha256.Sum256(b)
+			return rsa.SignPKCS1v15(
+				rand.Reader,
+				key,
+				crypto.SHA256,
+				sum[:],
+			)
+		}
+	}
+
+	u := &url.URL{
+		Path: fmt.Sprintf("/%s/%s", bucket, name),
+	}
+
+	buf := &bytes.Buffer{}
+	fmt.Fprintf(buf, "%s\n", opts.Method)
+	fmt.Fprintf(buf, "%s\n", opts.MD5)
+	fmt.Fprintf(buf, "%s\n", opts.ContentType)
+	fmt.Fprintf(buf, "%d\n", opts.Expires.Unix())
+	if len(opts.Headers) > 0 {
+		fmt.Fprintf(buf, "%s\n", strings.Join(opts.Headers, "\n"))
+	}
+	fmt.Fprintf(buf, "%s", u.String())
+
+	b, err := signBytes(buf.Bytes())
+	if err != nil {
+		return "", err
+	}
+	encoded := base64.StdEncoding.EncodeToString(b)
+	u.Scheme = "https"
+	u.Host = "storage.googleapis.com"
+	q := u.Query()
+	q.Set("GoogleAccessId", opts.GoogleAccessID)
+	q.Set("Expires", fmt.Sprintf("%d", opts.Expires.Unix()))
+	q.Set("Signature", string(encoded))
+	u.RawQuery = q.Encode()
+	return u.String(), nil
+}
+
+// ObjectHandle provides operations on an object in a Google Cloud Storage bucket.
+// Use BucketHandle.Object to get a handle.
+type ObjectHandle struct {
+	c             *Client
+	bucket        string
+	object        string
+	acl           ACLHandle
+	gen           int64 // a negative value indicates latest
+	conds         *Conditions
+	encryptionKey []byte // AES-256 key
+	userProject   string // for requester-pays buckets
+}
+
+// ACL provides access to the object's access control list.
+// This controls who can read and write this object.
+// This call does not perform any network operations.
+func (o *ObjectHandle) ACL() *ACLHandle {
+	return &o.acl
+}
+
+// Generation returns a new ObjectHandle that operates on a specific generation
+// of the object.
+// By default, the handle operates on the latest generation. Not
+// all operations work when given a specific generation; check the API
+// endpoints at https://cloud.google.com/storage/docs/json_api/ for details.
+func (o *ObjectHandle) Generation(gen int64) *ObjectHandle {
+	o2 := *o
+	o2.gen = gen
+	return &o2
+}
+
+// If returns a new ObjectHandle that applies a set of preconditions.
+// Preconditions already set on the ObjectHandle are ignored.
+// Operations on the new handle will only occur if the preconditions are
+// satisfied. See https://cloud.google.com/storage/docs/generations-preconditions
+// for more details.
+func (o *ObjectHandle) If(conds Conditions) *ObjectHandle {
+	o2 := *o
+	o2.conds = &conds
+	return &o2
+}
+
+// Key returns a new ObjectHandle that uses the supplied encryption
+// key to encrypt and decrypt the object's contents.
+//
+// Encryption key must be a 32-byte AES-256 key.
+// See https://cloud.google.com/storage/docs/encryption for details.
+func (o *ObjectHandle) Key(encryptionKey []byte) *ObjectHandle {
+	o2 := *o
+	o2.encryptionKey = encryptionKey
+	return &o2
+}
+
+// Attrs returns meta information about the object.
+// ErrObjectNotExist will be returned if the object is not found.
+func (o *ObjectHandle) Attrs(ctx context.Context) (*ObjectAttrs, error) {
+	if err := o.validate(); err != nil {
+		return nil, err
+	}
+	call := o.c.raw.Objects.Get(o.bucket, o.object).Projection("full").Context(ctx)
+	if err := applyConds("Attrs", o.gen, o.conds, call); err != nil {
+		return nil, err
+	}
+	if o.userProject != "" {
+		call.UserProject(o.userProject)
+	}
+	if err := setEncryptionHeaders(call.Header(), o.encryptionKey, false); err != nil {
+		return nil, err
+	}
+	var obj *raw.Object
+	var err error
+	setClientHeader(call.Header())
+	err = runWithRetry(ctx, func() error { obj, err = call.Do(); return err })
+	if e, ok := err.(*googleapi.Error); ok && e.Code == http.StatusNotFound {
+		return nil, ErrObjectNotExist
+	}
+	if err != nil {
+		return nil, err
+	}
+	return newObject(obj), nil
+}
+
+// Update updates an object with the provided attributes.
+// All zero-value attributes are ignored.
+// ErrObjectNotExist will be returned if the object is not found.
+func (o *ObjectHandle) Update(ctx context.Context, uattrs ObjectAttrsToUpdate) (*ObjectAttrs, error) {
+	if err := o.validate(); err != nil {
+		return nil, err
+	}
+	var attrs ObjectAttrs
+	// Lists of fields to send, and set to null, in the JSON.
+	var forceSendFields, nullFields []string
+	if uattrs.ContentType != nil {
+		attrs.ContentType = optional.ToString(uattrs.ContentType)
+		forceSendFields = append(forceSendFields, "ContentType")
+	}
+	if uattrs.ContentLanguage != nil {
+		attrs.ContentLanguage = optional.ToString(uattrs.ContentLanguage)
+		// For ContentLanguage It's an error to send the empty string.
+		// Instead we send a null.
+		if attrs.ContentLanguage == "" {
+			nullFields = append(nullFields, "ContentLanguage")
+		} else {
+			forceSendFields = append(forceSendFields, "ContentLanguage")
+		}
+	}
+	if uattrs.ContentEncoding != nil {
+		attrs.ContentEncoding = optional.ToString(uattrs.ContentEncoding)
+		forceSendFields = append(forceSendFields, "ContentEncoding")
+	}
+	if uattrs.ContentDisposition != nil {
+		attrs.ContentDisposition = optional.ToString(uattrs.ContentDisposition)
+		forceSendFields = append(forceSendFields, "ContentDisposition")
+	}
+	if uattrs.CacheControl != nil {
+		attrs.CacheControl = optional.ToString(uattrs.CacheControl)
+		forceSendFields = append(forceSendFields, "CacheControl")
+	}
+	if uattrs.Metadata != nil {
+		attrs.Metadata = uattrs.Metadata
+		if len(attrs.Metadata) == 0 {
+			// Sending the empty map is a no-op. We send null instead.
+			nullFields = append(nullFields, "Metadata")
+		} else {
+			forceSendFields = append(forceSendFields, "Metadata")
+		}
+	}
+	if uattrs.ACL != nil {
+		attrs.ACL = uattrs.ACL
+		// It's an error to attempt to delete the ACL, so
+		// we don't append to nullFields here.
+		forceSendFields = append(forceSendFields, "Acl")
+	}
+	rawObj := attrs.toRawObject(o.bucket)
+	rawObj.ForceSendFields = forceSendFields
+	rawObj.NullFields = nullFields
+	call := o.c.raw.Objects.Patch(o.bucket, o.object, rawObj).Projection("full").Context(ctx)
+	if err := applyConds("Update", o.gen, o.conds, call); err != nil {
+		return nil, err
+	}
+	if o.userProject != "" {
+		call.UserProject(o.userProject)
+	}
+	if err := setEncryptionHeaders(call.Header(), o.encryptionKey, false); err != nil {
+		return nil, err
+	}
+	var obj *raw.Object
+	var err error
+	setClientHeader(call.Header())
+	err = runWithRetry(ctx, func() error { obj, err = call.Do(); return err })
+	if e, ok := err.(*googleapi.Error); ok && e.Code == http.StatusNotFound {
+		return nil, ErrObjectNotExist
+	}
+	if err != nil {
+		return nil, err
+	}
+	return newObject(obj), nil
+}
+
+// ObjectAttrsToUpdate is used to update the attributes of an object.
+// Only fields set to non-nil values will be updated.
+// Set a field to its zero value to delete it.
+//
+// For example, to change ContentType and delete ContentEncoding and
+// Metadata, use
+//    ObjectAttrsToUpdate{
+//        ContentType: "text/html",
+//        ContentEncoding: "",
+//        Metadata: map[string]string{},
+//    }
+type ObjectAttrsToUpdate struct {
+	ContentType        optional.String
+	ContentLanguage    optional.String
+	ContentEncoding    optional.String
+	ContentDisposition optional.String
+	CacheControl       optional.String
+	Metadata           map[string]string // set to map[string]string{} to delete
+	ACL                []ACLRule
+}
+
+// Delete deletes the single specified object.
+func (o *ObjectHandle) Delete(ctx context.Context) error {
+	if err := o.validate(); err != nil {
+		return err
+	}
+	call := o.c.raw.Objects.Delete(o.bucket, o.object).Context(ctx)
+	if err := applyConds("Delete", o.gen, o.conds, call); err != nil {
+		return err
+	}
+	if o.userProject != "" {
+		call.UserProject(o.userProject)
+	}
+	// Encryption doesn't apply to Delete.
+	setClientHeader(call.Header())
+	err := runWithRetry(ctx, func() error { return call.Do() })
+	switch e := err.(type) {
+	case nil:
+		return nil
+	case *googleapi.Error:
+		if e.Code == http.StatusNotFound {
+			return ErrObjectNotExist
+		}
+	}
+	return err
+}
+
+// NewReader creates a new Reader to read the contents of the
+// object.
+// ErrObjectNotExist will be returned if the object is not found.
+//
+// The caller must call Close on the returned Reader when done reading.
+func (o *ObjectHandle) NewReader(ctx context.Context) (*Reader, error) {
+	return o.NewRangeReader(ctx, 0, -1)
+}
+
+// NewRangeReader reads part of an object, reading at most length bytes
+// starting at the given offset. If length is negative, the object is read
+// until the end.
+func (o *ObjectHandle) NewRangeReader(ctx context.Context, offset, length int64) (*Reader, error) {
+	if err := o.validate(); err != nil {
+		return nil, err
+	}
+	if offset < 0 {
+		return nil, fmt.Errorf("storage: invalid offset %d < 0", offset)
+	}
+	if o.conds != nil {
+		if err := o.conds.validate("NewRangeReader"); err != nil {
+			return nil, err
+		}
+	}
+	u := &url.URL{
+		Scheme:   "https",
+		Host:     "storage.googleapis.com",
+		Path:     fmt.Sprintf("/%s/%s", o.bucket, o.object),
+		RawQuery: conditionsQuery(o.gen, o.conds),
+	}
+	verb := "GET"
+	if length == 0 {
+		verb = "HEAD"
+	}
+	req, err := http.NewRequest(verb, u.String(), nil)
+	if err != nil {
+		return nil, err
+	}
+	req = withContext(req, ctx)
+	if length < 0 && offset > 0 {
+		req.Header.Set("Range", fmt.Sprintf("bytes=%d-", offset))
+	} else if length > 0 {
+		req.Header.Set("Range", fmt.Sprintf("bytes=%d-%d", offset, offset+length-1))
+	}
+	if o.userProject != "" {
+		req.Header.Set("X-Goog-User-Project", o.userProject)
+	}
+	if err := setEncryptionHeaders(req.Header, o.encryptionKey, false); err != nil {
+		return nil, err
+	}
+	var res *http.Response
+	err = runWithRetry(ctx, func() error {
+		res, err = o.c.hc.Do(req)
+		if err != nil {
+			return err
+		}
+		if res.StatusCode == http.StatusNotFound {
+			res.Body.Close()
+			return ErrObjectNotExist
+		}
+		if res.StatusCode < 200 || res.StatusCode > 299 {
+			body, _ := ioutil.ReadAll(res.Body)
+			res.Body.Close()
+			return &googleapi.Error{
+				Code:   res.StatusCode,
+				Header: res.Header,
+				Body:   string(body),
+			}
+		}
+		if offset > 0 && length != 0 && res.StatusCode != http.StatusPartialContent {
+			res.Body.Close()
+			return errors.New("storage: partial request not satisfied")
+		}
+		return nil
+	})
+	if err != nil {
+		return nil, err
+	}
+
+	var size int64 // total size of object, even if a range was requested.
+	if res.StatusCode == http.StatusPartialContent {
+		cr := strings.TrimSpace(res.Header.Get("Content-Range"))
+		if !strings.HasPrefix(cr, "bytes ") || !strings.Contains(cr, "/") {
+			return nil, fmt.Errorf("storage: invalid Content-Range %q", cr)
+		}
+		size, err = strconv.ParseInt(cr[strings.LastIndex(cr, "/")+1:], 10, 64)
+		if err != nil {
+			return nil, fmt.Errorf("storage: invalid Content-Range %q", cr)
+		}
+	} else {
+		size = res.ContentLength
+	}
+
+	remain := res.ContentLength
+	body := res.Body
+	if length == 0 {
+		remain = 0
+		body.Close()
+		body = emptyBody
+	}
+	var (
+		checkCRC bool
+		crc      uint32
+	)
+	// Even if there is a CRC header, we can't compute the hash on partial data.
+	if remain == size {
+		crc, checkCRC = parseCRC32c(res)
+	}
+	return &Reader{
+		body:        body,
+		size:        size,
+		remain:      remain,
+		contentType: res.Header.Get("Content-Type"),
+		wantCRC:     crc,
+		checkCRC:    checkCRC,
+	}, nil
+}
+
+func parseCRC32c(res *http.Response) (uint32, bool) {
+	const prefix = "crc32c="
+	for _, spec := range res.Header["X-Goog-Hash"] {
+		if strings.HasPrefix(spec, prefix) {
+			c, err := decodeUint32(spec[len(prefix):])
+			if err == nil {
+				return c, true
+			}
+		}
+	}
+	return 0, false
+}
+
+var emptyBody = ioutil.NopCloser(strings.NewReader(""))
+
+// NewWriter returns a storage Writer that writes to the GCS object
+// associated with this ObjectHandle.
+//
+// A new object will be created unless an object with this name already exists.
+// Otherwise any previous object with the same name will be replaced.
+// The object will not be available (and any previous object will remain)
+// until Close has been called.
+//
+// Attributes can be set on the object by modifying the returned Writer's
+// ObjectAttrs field before the first call to Write. If no ContentType
+// attribute is specified, the content type will be automatically sniffed
+// using net/http.DetectContentType.
+//
+// It is the caller's responsibility to call Close when writing is done.
+func (o *ObjectHandle) NewWriter(ctx context.Context) *Writer {
+	return &Writer{
+		ctx:         ctx,
+		o:           o,
+		donec:       make(chan struct{}),
+		ObjectAttrs: ObjectAttrs{Name: o.object},
+		ChunkSize:   googleapi.DefaultUploadChunkSize,
+	}
+}
+
+func (o *ObjectHandle) validate() error {
+	if o.bucket == "" {
+		return errors.New("storage: bucket name is empty")
+	}
+	if o.object == "" {
+		return errors.New("storage: object name is empty")
+	}
+	if !utf8.ValidString(o.object) {
+		return fmt.Errorf("storage: object name %q is not valid UTF-8", o.object)
+	}
+	return nil
+}
+
+// parseKey converts the binary contents of a private key file
+// to an *rsa.PrivateKey. It detects whether the private key is in a
+// PEM container or not. If so, it extracts the the private key
+// from PEM container before conversion. It only supports PEM
+// containers with no passphrase.
+func parseKey(key []byte) (*rsa.PrivateKey, error) {
+	if block, _ := pem.Decode(key); block != nil {
+		key = block.Bytes
+	}
+	parsedKey, err := x509.ParsePKCS8PrivateKey(key)
+	if err != nil {
+		parsedKey, err = x509.ParsePKCS1PrivateKey(key)
+		if err != nil {
+			return nil, err
+		}
+	}
+	parsed, ok := parsedKey.(*rsa.PrivateKey)
+	if !ok {
+		return nil, errors.New("oauth2: private key is invalid")
+	}
+	return parsed, nil
+}
+
+func toRawObjectACL(oldACL []ACLRule) []*raw.ObjectAccessControl {
+	var acl []*raw.ObjectAccessControl
+	if len(oldACL) > 0 {
+		acl = make([]*raw.ObjectAccessControl, len(oldACL))
+		for i, rule := range oldACL {
+			acl[i] = &raw.ObjectAccessControl{
+				Entity: string(rule.Entity),
+				Role:   string(rule.Role),
+			}
+		}
+	}
+	return acl
+}
+
+// toRawObject copies the editable attributes from o to the raw library's Object type.
+func (o *ObjectAttrs) toRawObject(bucket string) *raw.Object {
+	acl := toRawObjectACL(o.ACL)
+	return &raw.Object{
+		Bucket:             bucket,
+		Name:               o.Name,
+		ContentType:        o.ContentType,
+		ContentEncoding:    o.ContentEncoding,
+		ContentLanguage:    o.ContentLanguage,
+		CacheControl:       o.CacheControl,
+		ContentDisposition: o.ContentDisposition,
+		StorageClass:       o.StorageClass,
+		Acl:                acl,
+		Metadata:           o.Metadata,
+	}
+}
+
+// ObjectAttrs represents the metadata for a Google Cloud Storage (GCS) object.
+type ObjectAttrs struct {
+	// Bucket is the name of the bucket containing this GCS object.
+	// This field is read-only.
+	Bucket string
+
+	// Name is the name of the object within the bucket.
+	// This field is read-only.
+	Name string
+
+	// ContentType is the MIME type of the object's content.
+	ContentType string
+
+	// ContentLanguage is the content language of the object's content.
+	ContentLanguage string
+
+	// CacheControl is the Cache-Control header to be sent in the response
+	// headers when serving the object data.
+	CacheControl string
+
+	// ACL is the list of access control rules for the object.
+	ACL []ACLRule
+
+	// Owner is the owner of the object. This field is read-only.
+	//
+	// If non-zero, it is in the form of "user-<userId>".
+	Owner string
+
+	// Size is the length of the object's content. This field is read-only.
+	Size int64
+
+	// ContentEncoding is the encoding of the object's content.
+	ContentEncoding string
+
+	// ContentDisposition is the optional Content-Disposition header of the object
+	// sent in the response headers.
+	ContentDisposition string
+
+	// MD5 is the MD5 hash of the object's content. This field is read-only.
+	MD5 []byte
+
+	// CRC32C is the CRC32 checksum of the object's content using
+	// the Castagnoli93 polynomial. This field is read-only.
+	CRC32C uint32
+
+	// MediaLink is an URL to the object's content. This field is read-only.
+	MediaLink string
+
+	// Metadata represents user-provided metadata, in key/value pairs.
+	// It can be nil if no metadata is provided.
+	Metadata map[string]string
+
+	// Generation is the generation number of the object's content.
+	// This field is read-only.
+	Generation int64
+
+	// Metageneration is the version of the metadata for this
+	// object at this generation. This field is used for preconditions
+	// and for detecting changes in metadata. A metageneration number
+	// is only meaningful in the context of a particular generation
+	// of a particular object. This field is read-only.
+	Metageneration int64
+
+	// StorageClass is the storage class of the object.
+	// This value defines how objects in the bucket are stored and
+	// determines the SLA and the cost of storage. Typical values are
+	// "MULTI_REGIONAL", "REGIONAL", "NEARLINE", "COLDLINE", "STANDARD"
+	// and "DURABLE_REDUCED_AVAILABILITY".
+	// It defaults to "STANDARD", which is equivalent to "MULTI_REGIONAL"
+	// or "REGIONAL" depending on the bucket's location settings.
+	StorageClass string
+
+	// Created is the time the object was created. This field is read-only.
+	Created time.Time
+
+	// Deleted is the time the object was deleted.
+	// If not deleted, it is the zero value. This field is read-only.
+	Deleted time.Time
+
+	// Updated is the creation or modification time of the object.
+	// For buckets with versioning enabled, changing an object's
+	// metadata does not change this property. This field is read-only.
+	Updated time.Time
+
+	// CustomerKeySHA256 is the base64-encoded SHA-256 hash of the
+	// customer-supplied encryption key for the object. It is empty if there is
+	// no customer-supplied encryption key.
+	// See // https://cloud.google.com/storage/docs/encryption for more about
+	// encryption in Google Cloud Storage.
+	CustomerKeySHA256 string
+
+	// Prefix is set only for ObjectAttrs which represent synthetic "directory
+	// entries" when iterating over buckets using Query.Delimiter. See
+	// ObjectIterator.Next. When set, no other fields in ObjectAttrs will be
+	// populated.
+	Prefix string
+}
+
+// convertTime converts a time in RFC3339 format to time.Time.
+// If any error occurs in parsing, the zero-value time.Time is silently returned.
+func convertTime(t string) time.Time {
+	var r time.Time
+	if t != "" {
+		r, _ = time.Parse(time.RFC3339, t)
+	}
+	return r
+}
+
+func newObject(o *raw.Object) *ObjectAttrs {
+	if o == nil {
+		return nil
+	}
+	acl := make([]ACLRule, len(o.Acl))
+	for i, rule := range o.Acl {
+		acl[i] = ACLRule{
+			Entity: ACLEntity(rule.Entity),
+			Role:   ACLRole(rule.Role),
+		}
+	}
+	owner := ""
+	if o.Owner != nil {
+		owner = o.Owner.Entity
+	}
+	md5, _ := base64.StdEncoding.DecodeString(o.Md5Hash)
+	crc32c, _ := decodeUint32(o.Crc32c)
+	var sha256 string
+	if o.CustomerEncryption != nil {
+		sha256 = o.CustomerEncryption.KeySha256
+	}
+	return &ObjectAttrs{
+		Bucket:            o.Bucket,
+		Name:              o.Name,
+		ContentType:       o.ContentType,
+		ContentLanguage:   o.ContentLanguage,
+		CacheControl:      o.CacheControl,
+		ACL:               acl,
+		Owner:             owner,
+		ContentEncoding:   o.ContentEncoding,
+		Size:              int64(o.Size),
+		MD5:               md5,
+		CRC32C:            crc32c,
+		MediaLink:         o.MediaLink,
+		Metadata:          o.Metadata,
+		Generation:        o.Generation,
+		Metageneration:    o.Metageneration,
+		StorageClass:      o.StorageClass,
+		CustomerKeySHA256: sha256,
+		Created:           convertTime(o.TimeCreated),
+		Deleted:           convertTime(o.TimeDeleted),
+		Updated:           convertTime(o.Updated),
+	}
+}
+
+// Decode a uint32 encoded in Base64 in big-endian byte order.
+func decodeUint32(b64 string) (uint32, error) {
+	d, err := base64.StdEncoding.DecodeString(b64)
+	if err != nil {
+		return 0, err
+	}
+	if len(d) != 4 {
+		return 0, fmt.Errorf("storage: %q does not encode a 32-bit value", d)
+	}
+	return uint32(d[0])<<24 + uint32(d[1])<<16 + uint32(d[2])<<8 + uint32(d[3]), nil
+}
+
+// Encode a uint32 as Base64 in big-endian byte order.
+func encodeUint32(u uint32) string {
+	b := []byte{byte(u >> 24), byte(u >> 16), byte(u >> 8), byte(u)}
+	return base64.StdEncoding.EncodeToString(b)
+}
+
+// Query represents a query to filter objects from a bucket.
+type Query struct {
+	// Delimiter returns results in a directory-like fashion.
+	// Results will contain only objects whose names, aside from the
+	// prefix, do not contain delimiter. Objects whose names,
+	// aside from the prefix, contain delimiter will have their name,
+	// truncated after the delimiter, returned in prefixes.
+	// Duplicate prefixes are omitted.
+	// Optional.
+	Delimiter string
+
+	// Prefix is the prefix filter to query objects
+	// whose names begin with this prefix.
+	// Optional.
+	Prefix string
+
+	// Versions indicates whether multiple versions of the same
+	// object will be included in the results.
+	Versions bool
+}
+
+// contentTyper implements ContentTyper to enable an
+// io.ReadCloser to specify its MIME type.
+type contentTyper struct {
+	io.Reader
+	t string
+}
+
+func (c *contentTyper) ContentType() string {
+	return c.t
+}
+
+// Conditions constrain methods to act on specific generations of
+// objects.
+//
+// The zero value is an empty set of constraints. Not all conditions or
+// combinations of conditions are applicable to all methods.
+// See https://cloud.google.com/storage/docs/generations-preconditions
+// for details on how these operate.
+type Conditions struct {
+	// Generation constraints.
+	// At most one of the following can be set to a non-zero value.
+
+	// GenerationMatch specifies that the object must have the given generation
+	// for the operation to occur.
+	// If GenerationMatch is zero, it has no effect.
+	// Use DoesNotExist to specify that the object does not exist in the bucket.
+	GenerationMatch int64
+
+	// GenerationNotMatch specifies that the object must not have the given
+	// generation for the operation to occur.
+	// If GenerationNotMatch is zero, it has no effect.
+	GenerationNotMatch int64
+
+	// DoesNotExist specifies that the object must not exist in the bucket for
+	// the operation to occur.
+	// If DoesNotExist is false, it has no effect.
+	DoesNotExist bool
+
+	// Metadata generation constraints.
+	// At most one of the following can be set to a non-zero value.
+
+	// MetagenerationMatch specifies that the object must have the given
+	// metageneration for the operation to occur.
+	// If MetagenerationMatch is zero, it has no effect.
+	MetagenerationMatch int64
+
+	// MetagenerationNotMatch specifies that the object must not have the given
+	// metageneration for the operation to occur.
+	// If MetagenerationNotMatch is zero, it has no effect.
+	MetagenerationNotMatch int64
+}
+
+func (c *Conditions) validate(method string) error {
+	if *c == (Conditions{}) {
+		return fmt.Errorf("storage: %s: empty conditions", method)
+	}
+	if !c.isGenerationValid() {
+		return fmt.Errorf("storage: %s: multiple conditions specified for generation", method)
+	}
+	if !c.isMetagenerationValid() {
+		return fmt.Errorf("storage: %s: multiple conditions specified for metageneration", method)
+	}
+	return nil
+}
+
+func (c *Conditions) isGenerationValid() bool {
+	n := 0
+	if c.GenerationMatch != 0 {
+		n++
+	}
+	if c.GenerationNotMatch != 0 {
+		n++
+	}
+	if c.DoesNotExist {
+		n++
+	}
+	return n <= 1
+}
+
+func (c *Conditions) isMetagenerationValid() bool {
+	return c.MetagenerationMatch == 0 || c.MetagenerationNotMatch == 0
+}
+
+// applyConds modifies the provided call using the conditions in conds.
+// call is something that quacks like a *raw.WhateverCall.
+func applyConds(method string, gen int64, conds *Conditions, call interface{}) error {
+	cval := reflect.ValueOf(call)
+	if gen >= 0 {
+		if !setConditionField(cval, "Generation", gen) {
+			return fmt.Errorf("storage: %s: generation not supported", method)
+		}
+	}
+	if conds == nil {
+		return nil
+	}
+	if err := conds.validate(method); err != nil {
+		return err
+	}
+	switch {
+	case conds.GenerationMatch != 0:
+		if !setConditionField(cval, "IfGenerationMatch", conds.GenerationMatch) {
+			return fmt.Errorf("storage: %s: ifGenerationMatch not supported", method)
+		}
+	case conds.GenerationNotMatch != 0:
+		if !setConditionField(cval, "IfGenerationNotMatch", conds.GenerationNotMatch) {
+			return fmt.Errorf("storage: %s: ifGenerationNotMatch not supported", method)
+		}
+	case conds.DoesNotExist:
+		if !setConditionField(cval, "IfGenerationMatch", int64(0)) {
+			return fmt.Errorf("storage: %s: DoesNotExist not supported", method)
+		}
+	}
+	switch {
+	case conds.MetagenerationMatch != 0:
+		if !setConditionField(cval, "IfMetagenerationMatch", conds.MetagenerationMatch) {
+			return fmt.Errorf("storage: %s: ifMetagenerationMatch not supported", method)
+		}
+	case conds.MetagenerationNotMatch != 0:
+		if !setConditionField(cval, "IfMetagenerationNotMatch", conds.MetagenerationNotMatch) {
+			return fmt.Errorf("storage: %s: ifMetagenerationNotMatch not supported", method)
+		}
+	}
+	return nil
+}
+
+func applySourceConds(gen int64, conds *Conditions, call *raw.ObjectsRewriteCall) error {
+	if gen >= 0 {
+		call.SourceGeneration(gen)
+	}
+	if conds == nil {
+		return nil
+	}
+	if err := conds.validate("CopyTo source"); err != nil {
+		return err
+	}
+	switch {
+	case conds.GenerationMatch != 0:
+		call.IfSourceGenerationMatch(conds.GenerationMatch)
+	case conds.GenerationNotMatch != 0:
+		call.IfSourceGenerationNotMatch(conds.GenerationNotMatch)
+	case conds.DoesNotExist:
+		call.IfSourceGenerationMatch(0)
+	}
+	switch {
+	case conds.MetagenerationMatch != 0:
+		call.IfSourceMetagenerationMatch(conds.MetagenerationMatch)
+	case conds.MetagenerationNotMatch != 0:
+		call.IfSourceMetagenerationNotMatch(conds.MetagenerationNotMatch)
+	}
+	return nil
+}
+
+// setConditionField sets a field on a *raw.WhateverCall.
+// We can't use anonymous interfaces because the return type is
+// different, since the field setters are builders.
+func setConditionField(call reflect.Value, name string, value interface{}) bool {
+	m := call.MethodByName(name)
+	if !m.IsValid() {
+		return false
+	}
+	m.Call([]reflect.Value{reflect.ValueOf(value)})
+	return true
+}
+
+// conditionsQuery returns the generation and conditions as a URL query
+// string suitable for URL.RawQuery.  It assumes that the conditions
+// have been validated.
+func conditionsQuery(gen int64, conds *Conditions) string {
+	// URL escapes are elided because integer strings are URL-safe.
+	var buf []byte
+
+	appendParam := func(s string, n int64) {
+		if len(buf) > 0 {
+			buf = append(buf, '&')
+		}
+		buf = append(buf, s...)
+		buf = strconv.AppendInt(buf, n, 10)
+	}
+
+	if gen >= 0 {
+		appendParam("generation=", gen)
+	}
+	if conds == nil {
+		return string(buf)
+	}
+	switch {
+	case conds.GenerationMatch != 0:
+		appendParam("ifGenerationMatch=", conds.GenerationMatch)
+	case conds.GenerationNotMatch != 0:
+		appendParam("ifGenerationNotMatch=", conds.GenerationNotMatch)
+	case conds.DoesNotExist:
+		appendParam("ifGenerationMatch=", 0)
+	}
+	switch {
+	case conds.MetagenerationMatch != 0:
+		appendParam("ifMetagenerationMatch=", conds.MetagenerationMatch)
+	case conds.MetagenerationNotMatch != 0:
+		appendParam("ifMetagenerationNotMatch=", conds.MetagenerationNotMatch)
+	}
+	return string(buf)
+}
+
+// composeSourceObj wraps a *raw.ComposeRequestSourceObjects, but adds the methods
+// that modifyCall searches for by name.
+type composeSourceObj struct {
+	src *raw.ComposeRequestSourceObjects
+}
+
+func (c composeSourceObj) Generation(gen int64) {
+	c.src.Generation = gen
+}
+
+func (c composeSourceObj) IfGenerationMatch(gen int64) {
+	// It's safe to overwrite ObjectPreconditions, since its only field is
+	// IfGenerationMatch.
+	c.src.ObjectPreconditions = &raw.ComposeRequestSourceObjectsObjectPreconditions{
+		IfGenerationMatch: gen,
+	}
+}
+
+func setEncryptionHeaders(headers http.Header, key []byte, copySource bool) error {
+	if key == nil {
+		return nil
+	}
+	// TODO(jbd): Ask the API team to return a more user-friendly error
+	// and avoid doing this check at the client level.
+	if len(key) != 32 {
+		return errors.New("storage: not a 32-byte AES-256 key")
+	}
+	var cs string
+	if copySource {
+		cs = "copy-source-"
+	}
+	headers.Set("x-goog-"+cs+"encryption-algorithm", "AES256")
+	headers.Set("x-goog-"+cs+"encryption-key", base64.StdEncoding.EncodeToString(key))
+	keyHash := sha256.Sum256(key)
+	headers.Set("x-goog-"+cs+"encryption-key-sha256", base64.StdEncoding.EncodeToString(keyHash[:]))
+	return nil
+}
+
+// TODO(jbd): Add storage.objects.watch.

+ 192 - 0
vendor/cloud.google.com/go/storage/writer.go

@@ -0,0 +1,192 @@
+// Copyright 2014 Google Inc. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package storage
+
+import (
+	"encoding/base64"
+	"errors"
+	"fmt"
+	"io"
+	"unicode/utf8"
+
+	"golang.org/x/net/context"
+	"google.golang.org/api/googleapi"
+	raw "google.golang.org/api/storage/v1"
+)
+
+// A Writer writes a Cloud Storage object.
+type Writer struct {
+	// ObjectAttrs are optional attributes to set on the object. Any attributes
+	// must be initialized before the first Write call. Nil or zero-valued
+	// attributes are ignored.
+	ObjectAttrs
+
+	// SendCRC specifies whether to transmit a CRC32C field. It should be set
+	// to true in addition to setting the Writer's CRC32C field, because zero
+	// is a valid CRC and normally a zero would not be transmitted.
+	SendCRC32C bool
+
+	// ChunkSize controls the maximum number of bytes of the object that the
+	// Writer will attempt to send to the server in a single request. Objects
+	// smaller than the size will be sent in a single request, while larger
+	// objects will be split over multiple requests. The size will be rounded up
+	// to the nearest multiple of 256K. If zero, chunking will be disabled and
+	// the object will be uploaded in a single request.
+	//
+	// ChunkSize will default to a reasonable value. Any custom configuration
+	// must be done before the first Write call.
+	ChunkSize int
+
+	// ProgressFunc can be used to monitor the progress of a large write.
+	// operation. If ProgressFunc is not nil and writing requires multiple
+	// calls to the underlying service (see
+	// https://cloud.google.com/storage/docs/json_api/v1/how-tos/resumable-upload),
+	// then ProgressFunc will be invoked after each call with the number of bytes of
+	// content copied so far.
+	//
+	// ProgressFunc should return quickly without blocking.
+	ProgressFunc func(int64)
+
+	ctx context.Context
+	o   *ObjectHandle
+
+	opened bool
+	pw     *io.PipeWriter
+
+	donec chan struct{} // closed after err and obj are set.
+	err   error
+	obj   *ObjectAttrs
+}
+
+func (w *Writer) open() error {
+	attrs := w.ObjectAttrs
+	// Check the developer didn't change the object Name (this is unfortunate, but
+	// we don't want to store an object under the wrong name).
+	if attrs.Name != w.o.object {
+		return fmt.Errorf("storage: Writer.Name %q does not match object name %q", attrs.Name, w.o.object)
+	}
+	if !utf8.ValidString(attrs.Name) {
+		return fmt.Errorf("storage: object name %q is not valid UTF-8", attrs.Name)
+	}
+	pr, pw := io.Pipe()
+	w.pw = pw
+	w.opened = true
+
+	if w.ChunkSize < 0 {
+		return errors.New("storage: Writer.ChunkSize must non-negative")
+	}
+	mediaOpts := []googleapi.MediaOption{
+		googleapi.ChunkSize(w.ChunkSize),
+	}
+	if c := attrs.ContentType; c != "" {
+		mediaOpts = append(mediaOpts, googleapi.ContentType(c))
+	}
+
+	go func() {
+		defer close(w.donec)
+
+		rawObj := attrs.toRawObject(w.o.bucket)
+		if w.SendCRC32C {
+			rawObj.Crc32c = encodeUint32(attrs.CRC32C)
+		}
+		if w.MD5 != nil {
+			rawObj.Md5Hash = base64.StdEncoding.EncodeToString(w.MD5)
+		}
+		call := w.o.c.raw.Objects.Insert(w.o.bucket, rawObj).
+			Media(pr, mediaOpts...).
+			Projection("full").
+			Context(w.ctx)
+		if w.ProgressFunc != nil {
+			call.ProgressUpdater(func(n, _ int64) { w.ProgressFunc(n) })
+		}
+		if err := setEncryptionHeaders(call.Header(), w.o.encryptionKey, false); err != nil {
+			w.err = err
+			pr.CloseWithError(w.err)
+			return
+		}
+		var resp *raw.Object
+		err := applyConds("NewWriter", w.o.gen, w.o.conds, call)
+		if err == nil {
+			if w.o.userProject != "" {
+				call.UserProject(w.o.userProject)
+			}
+			setClientHeader(call.Header())
+			// We will only retry here if the initial POST, which obtains a URI for
+			// the resumable upload, fails with a retryable error. The upload itself
+			// has its own retry logic.
+			err = runWithRetry(w.ctx, func() error {
+				var err2 error
+				resp, err2 = call.Do()
+				return err2
+			})
+		}
+		if err != nil {
+			w.err = err
+			pr.CloseWithError(w.err)
+			return
+		}
+		w.obj = newObject(resp)
+	}()
+	return nil
+}
+
+// Write appends to w. It implements the io.Writer interface.
+//
+// Since writes happen asynchronously, Write may return a nil
+// error even though the write failed (or will fail). Always
+// use the error returned from Writer.Close to determine if
+// the upload was successful.
+func (w *Writer) Write(p []byte) (n int, err error) {
+	if w.err != nil {
+		return 0, w.err
+	}
+	if !w.opened {
+		if err := w.open(); err != nil {
+			return 0, err
+		}
+	}
+	return w.pw.Write(p)
+}
+
+// Close completes the write operation and flushes any buffered data.
+// If Close doesn't return an error, metadata about the written object
+// can be retrieved by calling Attrs.
+func (w *Writer) Close() error {
+	if !w.opened {
+		if err := w.open(); err != nil {
+			return err
+		}
+	}
+	if err := w.pw.Close(); err != nil {
+		return err
+	}
+	<-w.donec
+	return w.err
+}
+
+// CloseWithError aborts the write operation with the provided error.
+// CloseWithError always returns nil.
+func (w *Writer) CloseWithError(err error) error {
+	if !w.opened {
+		return nil
+	}
+	return w.pw.CloseWithError(err)
+}
+
+// Attrs returns metadata about a successfully-written object.
+// It's only valid to call it after Close returns nil.
+func (w *Writer) Attrs() *ObjectAttrs {
+	return w.obj
+}

Some files were not shown because too many files changed in this diff