Bläddra i källkod

Merge branch 'master' into footer

Conflicts:
	pkg/api/frontendsettings.go
Torkel Ödegaard 9 år sedan
förälder
incheckning
9a7817a271
100 ändrade filer med 3890 tillägg och 2023 borttagningar
  1. 4 0
      .floo
  2. 13 0
      .flooignore
  3. 15 8
      .github/ISSUE_TEMPLATE.md
  4. 37 1
      CHANGELOG.md
  5. 7 2
      Godeps/Godeps.json
  6. 1127 0
      Godeps/_workspace/src/github.com/aws/aws-sdk-go/service/sts/api.go
  7. 12 0
      Godeps/_workspace/src/github.com/aws/aws-sdk-go/service/sts/customizations.go
  8. 39 0
      Godeps/_workspace/src/github.com/aws/aws-sdk-go/service/sts/customizations_test.go
  9. 149 0
      Godeps/_workspace/src/github.com/aws/aws-sdk-go/service/sts/examples_test.go
  10. 130 0
      Godeps/_workspace/src/github.com/aws/aws-sdk-go/service/sts/service.go
  11. 38 0
      Godeps/_workspace/src/github.com/aws/aws-sdk-go/service/sts/stsiface/interface.go
  12. 6 2
      Godeps/_workspace/src/github.com/mattn/go-sqlite3/.travis.yml
  13. 23 4
      Godeps/_workspace/src/github.com/mattn/go-sqlite3/README.md
  14. 4 0
      Godeps/_workspace/src/github.com/mattn/go-sqlite3/backup.go
  15. 336 0
      Godeps/_workspace/src/github.com/mattn/go-sqlite3/callback.go
  16. 29 12
      Godeps/_workspace/src/github.com/mattn/go-sqlite3/doc.go
  17. 0 242
      Godeps/_workspace/src/github.com/mattn/go-sqlite3/error_test.go
  18. 389 197
      Godeps/_workspace/src/github.com/mattn/go-sqlite3/sqlite3-binding.c
  19. 319 228
      Godeps/_workspace/src/github.com/mattn/go-sqlite3/sqlite3-binding.h
  20. 409 56
      Godeps/_workspace/src/github.com/mattn/go-sqlite3/sqlite3.go
  21. 0 83
      Godeps/_workspace/src/github.com/mattn/go-sqlite3/sqlite3_fts3_test.go
  22. 13 0
      Godeps/_workspace/src/github.com/mattn/go-sqlite3/sqlite3_fts5.go
  23. 13 0
      Godeps/_workspace/src/github.com/mattn/go-sqlite3/sqlite3_icu.go
  24. 12 0
      Godeps/_workspace/src/github.com/mattn/go-sqlite3/sqlite3_json1.go
  25. 14 0
      Godeps/_workspace/src/github.com/mattn/go-sqlite3/sqlite3_libsqlite3.go
  26. 63 0
      Godeps/_workspace/src/github.com/mattn/go-sqlite3/sqlite3_load_extension.go
  27. 23 0
      Godeps/_workspace/src/github.com/mattn/go-sqlite3/sqlite3_omit_load_extension.go
  28. 0 1
      Godeps/_workspace/src/github.com/mattn/go-sqlite3/sqlite3_other.go
  29. 0 947
      Godeps/_workspace/src/github.com/mattn/go-sqlite3/sqlite3_test.go
  30. 7 10
      Godeps/_workspace/src/github.com/mattn/go-sqlite3/sqlite3_test/sqltest.go
  31. 1 1
      Godeps/_workspace/src/github.com/mattn/go-sqlite3/sqlite3_windows.go
  32. 60 5
      Godeps/_workspace/src/github.com/mattn/go-sqlite3/sqlite3ext.h
  33. 22 3
      README.md
  34. 3 5
      build.go
  35. 5 2
      conf/defaults.ini
  36. 5 2
      conf/sample.ini
  37. 7 0
      docker/blocks/graphite/fig
  38. 8 0
      docker/blocks/influxdb/fig
  39. 7 1
      docker/blocks/opentsdb/fig
  40. 10 0
      docker/blocks/prometheus/fig
  41. 2 0
      docs/sources/datasources/cloudwatch.md
  42. 1 1
      docs/sources/datasources/kairosdb.md
  43. 3 3
      docs/sources/datasources/opentsdb.md
  44. 33 6
      docs/sources/guides/whats-new-in-v3.md
  45. 1 0
      docs/sources/http_api/overview.md
  46. 100 0
      docs/sources/http_api/preferences.md
  47. 1 1
      docs/sources/index.md
  48. 1 1
      docs/sources/installation/configuration.md
  49. 3 10
      docs/sources/installation/debian.md
  50. 28 3
      docs/sources/installation/mac.md
  51. 4 23
      docs/sources/installation/rpm.md
  52. 1 1
      docs/sources/installation/windows.md
  53. 1 1
      karma.conf.js
  54. 2 2
      latest.json
  55. 2 2
      package.json
  56. 5 5
      packaging/publish/publish.sh
  57. 82 32
      pkg/api/cloudwatch/cloudwatch.go
  58. 11 9
      pkg/api/cloudwatch/metrics.go
  59. 6 4
      pkg/api/cloudwatch/metrics_test.go
  60. 1 0
      pkg/api/dtos/plugins.go
  61. 6 3
      pkg/api/frontendsettings.go
  62. 1 1
      pkg/api/pluginproxy/pluginproxy.go
  63. 6 0
      pkg/api/plugins.go
  64. 1 1
      pkg/api/render.go
  65. 7 8
      pkg/cmd/grafana-cli/commands/upgrade_command.go
  66. 15 7
      pkg/cmd/grafana-cli/services/services.go
  67. 2 0
      pkg/components/renderer/renderer.go
  68. 0 2
      pkg/plugins/app_plugin.go
  69. 10 1
      pkg/plugins/queries.go
  70. 2 2
      pkg/plugins/update_checker.go
  71. 3 1
      pkg/services/sqlstore/preferences.go
  72. 18 6
      pkg/setting/setting.go
  73. 4 0
      public/app/app.ts
  74. 3 1
      public/app/core/directives/metric_segment.js
  75. 29 22
      public/app/core/services/datasource_srv.js
  76. 1 1
      public/app/core/utils/datemath.ts
  77. 2 0
      public/app/core/utils/kbn.js
  78. 5 4
      public/app/features/annotations/annotations_srv.js
  79. 1 1
      public/app/features/annotations/editor_ctrl.js
  80. 1 1
      public/app/features/dashboard/dashboardSrv.js
  81. 2 1
      public/app/features/dashboard/dynamicDashboardSrv.js
  82. 1 1
      public/app/features/dashboard/partials/settings.html
  83. 15 3
      public/app/features/dashboard/rowCtrl.js
  84. 2 2
      public/app/features/dashboard/shareModalCtrl.js
  85. 1 1
      public/app/features/dashboard/timeSrv.js
  86. 11 9
      public/app/features/org/partials/newOrg.html
  87. 1 1
      public/app/features/panel/metrics_ds_selector.ts
  88. 19 1
      public/app/features/panel/panel_ctrl.ts
  89. 1 1
      public/app/features/panel/panel_directive.ts
  90. 16 8
      public/app/features/templating/templateSrv.js
  91. 11 4
      public/app/features/templating/templateValuesSrv.js
  92. 7 0
      public/app/plugins/datasource/cloudwatch/partials/config.html
  93. 11 3
      public/app/plugins/datasource/elasticsearch/datasource.js
  94. 3 3
      public/app/plugins/datasource/elasticsearch/partials/bucket_agg.html
  95. 7 3
      public/app/plugins/datasource/influxdb/datasource.ts
  96. 1 1
      public/app/plugins/datasource/influxdb/influx_query.ts
  97. 1 1
      public/app/plugins/datasource/influxdb/partials/query.editor.html
  98. 1 1
      public/app/plugins/datasource/influxdb/partials/query.options.html
  99. 13 0
      public/app/plugins/datasource/influxdb/specs/influx_query_specs.ts
  100. 2 1
      public/app/plugins/datasource/opentsdb/config_ctrl.ts

+ 4 - 0
.floo

@@ -0,0 +1,4 @@
+{
+  "url": "https://floobits.com/raintank/grafana"
+}
+

+ 13 - 0
.flooignore

@@ -0,0 +1,13 @@
+#*
+*.o
+*.pyc
+*.pyo
+*~
+extern/
+node_modules/
+tmp/
+data/
+vendor/
+public_gen/
+dist/
+

+ 15 - 8
.github/ISSUE_TEMPLATE.md

@@ -1,12 +1,19 @@
-Thank you! For helping us make Grafana even better.
+Thank you for helping us make Grafana even better!
 
-To help us respond to your issues faster, please make sure to add as much information as possible.
+To help us respond to your issues more quickly, please make sure to add as much information as possible.
 
-If this issue is about a plugin, please open the issue in that repository.
+If this issue is about a plugin, please open the issue in that plugin's repository.
 
-Start your issues title with [Feature Request] / [Bug] / [Question] or no tag if your unsure.
+Start your issue's title with [Feature Request] / [Bug] / [Question] or no tag if you're unsure. Also, please be aware that GitHub now supports uploading of screenshots; look at the bottom of this input field.
 
-Ex
-* What grafana version are you using?
-* What datasource are you using?
-* What OS are you running grafana on?
+Please include some basic information:
+- What Grafana version are you using?
+- What datasource are you using?
+- What OS are you running grafana on?
+- What did you do?
+- What was the expected result?
+- What happenend instead?
+
+If your question/bug relates to a metric query / unexpected data visualization, please include:
+- An image or text representation of your metric query
+- The raw query and response from your data source (check this in chrome dev tools network tab)

+ 37 - 1
CHANGELOG.md

@@ -1,11 +1,47 @@
+# 3.1.0 (unreleased)
+
+### Enhancements
+* **Singlestat**: Add support for range to text mappings, closes [#1319](https://github.com/grafana/grafana/issues/1319)
+* **Graph**: Adds sort order options for graph tooltip, closes  [#1189](https://github.com/grafana/grafana/issues/1189)
+* **Theme**: Add default theme to config file [#5011](https://github.com/grafana/grafana/pull/5011)
+
+# 3.0.3 Patch release (2016-05-23)
+* **Annotations**: Annotations can now use a template variable as data source, closes [#5054](https://github.com/grafana/grafana/issues/5054)
+* **Time picker**: Fixed issue timepicker and UTC when reading time from URL, fixes [#5078](https://github.com/grafana/grafana/issues/5078)
+* **CloudWatch**: Support for Multiple Account by AssumeRole, closes [#3522](https://github.com/grafana/grafana/issues/3522)
+* **Singlestat**: Fixed alignment and minium height issue, fixes [#5113](https://github.com/grafana/grafana/issues/5113), fixes [#4679](https://github.com/grafana/grafana/issues/4679)
+* **Share modal**: Fixed link when using grafana under dashboard sub url, fixes [#5109](https://github.com/grafana/grafana/issues/5109)
+* **Prometheus**: Fixed bug in query editor that caused it not to load when reloading page, fixes [#5107](https://github.com/grafana/grafana/issues/5107)
+* **Elasticsearch**: Fixed bug when template variable query returns numeric values, fixes [#5097](https://github.com/grafana/grafana/issues/5097), fixes [#5088](https://github.com/grafana/grafana/issues/5088)
+* **Logging**: Fixed issue with reading logging level value, fixes [#5079](https://github.com/grafana/grafana/issues/5079)
+* **Timepicker**: Fixed issue with timepicker and UTC when reading time from URL, fixes [#5078](https://github.com/grafana/grafana/issues/5078)
+* **Docs**: Added docs for org & user preferences HTTP API, closes [#5069](https://github.com/grafana/grafana/issues/5069)
+* **Plugin list panel**: Now shows correct enable state for apps when not enabled, fixes [#5068](https://github.com/grafana/grafana/issues/5068)
+* **Elasticsearch**: Templating & Annotation queries that use template variables are now formatted correctly, fixes [#5135](https://github.com/grafana/grafana/issues/5135)
+
+# 3.0.2 Patch release (2016-05-16)
+
+* **Templating**: Fixed issue mixing row repeat and panel repeats, fixes [#4988](https://github.com/grafana/grafana/issues/4988)
+* **Templating**: Fixed issue detecting dependencies in nested variables, fixes [#4987](https://github.com/grafana/grafana/issues/4987), fixes [#4986](https://github.com/grafana/grafana/issues/4986)
+* **Graph**: Fixed broken PNG rendering in graph panel, fixes [#5025](https://github.com/grafana/grafana/issues/5025)
+* **Graph**: Fixed broken xaxis on graph panel, fixes [#5024](https://github.com/grafana/grafana/issues/5024)
+
+* **Influxdb**: Fixes crash when hiding middle serie, fixes [#5005](https://github.com/grafana/grafana/issues/5005)
+
+# 3.0.1 Stable (2016-05-11)
+
+### Bug fixes
+* **Templating**: Fixed issue with new data source variable not persisting current selected value, fixes [#4934](https://github.com/grafana/grafana/issues/4934)
+
 # 3.0.0-beta7 (2016-05-02)
 
 ### Bug fixes
 * **Dashboard title**: Fixed max dashboard title width (media query) for large screens,  fixes [#4859](https://github.com/grafana/grafana/issues/4859)
 * **Annotations**: Fixed issue with entering annotation edit view, fixes [#4857](https://github.com/grafana/grafana/issues/4857)
 * **Remove query**: Fixed issue with removing query for data sources without collapsable query editors, fixes [#4856](https://github.com/grafana/grafana/issues/4856)
-* **Graphite PNG*: Fixed issue graphite png rendering option, fixes [#4864](https://github.com/grafana/grafana/issues/4864)
+* **Graphite PNG**: Fixed issue graphite png rendering option, fixes [#4864](https://github.com/grafana/grafana/issues/4864)
 * **InfluxDB**: Fixed issue missing plus group by iconn, fixes [#4862](https://github.com/grafana/grafana/issues/4862)
+* **Graph**: Fixes missing line mode for thresholds, fixes [#4902](https://github.com/grafana/grafana/pull/4902)
 
 ### Enhancements
 * **InfluxDB**: Added new functions moving_average and difference to query editor, closes [#4698](https://github.com/grafana/grafana/issues/4698)

+ 7 - 2
Godeps/Godeps.json

@@ -1,7 +1,6 @@
 {
 	"ImportPath": "github.com/grafana/grafana",
 	"GoVersion": "go1.5.1",
-	"GodepVersion": "v60",
 	"Packages": [
 		"./pkg/..."
 	],
@@ -125,6 +124,11 @@
 			"Comment": "v1.0.0",
 			"Rev": "abb928e07c4108683d6b4d0b6ca08fe6bc0eee5f"
 		},
+		{
+			"ImportPath": "github.com/aws/aws-sdk-go/service/sts",
+			"Comment": "v1.0.0",
+			"Rev": "abb928e07c4108683d6b4d0b6ca08fe6bc0eee5f"
+		},
 		{
 			"ImportPath": "github.com/bmizerany/assert",
 			"Comment": "release.r60-6-ge17e998",
@@ -276,7 +280,8 @@
 		},
 		{
 			"ImportPath": "github.com/mattn/go-sqlite3",
-			"Rev": "e28cd440fabdd39b9520344bc26829f61db40ece"
+			"Comment": "v1.1.0-67-g7204887",
+			"Rev": "7204887cf3a42df1cfaa5505dc3a3427f6dded8b"
 		},
 		{
 			"ImportPath": "github.com/rainycape/unidecode",

+ 1127 - 0
Godeps/_workspace/src/github.com/aws/aws-sdk-go/service/sts/api.go

@@ -0,0 +1,1127 @@
+// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT.
+
+// Package sts provides a client for AWS Security Token Service.
+package sts
+
+import (
+	"time"
+
+	"github.com/aws/aws-sdk-go/aws/awsutil"
+	"github.com/aws/aws-sdk-go/aws/request"
+)
+
+const opAssumeRole = "AssumeRole"
+
+// AssumeRoleRequest generates a request for the AssumeRole operation.
+func (c *STS) AssumeRoleRequest(input *AssumeRoleInput) (req *request.Request, output *AssumeRoleOutput) {
+	op := &request.Operation{
+		Name:       opAssumeRole,
+		HTTPMethod: "POST",
+		HTTPPath:   "/",
+	}
+
+	if input == nil {
+		input = &AssumeRoleInput{}
+	}
+
+	req = c.newRequest(op, input, output)
+	output = &AssumeRoleOutput{}
+	req.Data = output
+	return
+}
+
+// Returns a set of temporary security credentials (consisting of an access
+// key ID, a secret access key, and a security token) that you can use to access
+// AWS resources that you might not normally have access to. Typically, you
+// use AssumeRole for cross-account access or federation.
+//
+// Important: You cannot call AssumeRole by using AWS account credentials;
+// access will be denied. You must use IAM user credentials or temporary security
+// credentials to call AssumeRole.
+//
+// For cross-account access, imagine that you own multiple accounts and need
+// to access resources in each account. You could create long-term credentials
+// in each account to access those resources. However, managing all those credentials
+// and remembering which one can access which account can be time consuming.
+// Instead, you can create one set of long-term credentials in one account and
+// then use temporary security credentials to access all the other accounts
+// by assuming roles in those accounts. For more information about roles, see
+// IAM Roles (Delegation and Federation) (http://docs.aws.amazon.com/IAM/latest/UserGuide/roles-toplevel.html)
+// in the Using IAM.
+//
+// For federation, you can, for example, grant single sign-on access to the
+// AWS Management Console. If you already have an identity and authentication
+// system in your corporate network, you don't have to recreate user identities
+// in AWS in order to grant those user identities access to AWS. Instead, after
+// a user has been authenticated, you call AssumeRole (and specify the role
+// with the appropriate permissions) to get temporary security credentials for
+// that user. With those temporary security credentials, you construct a sign-in
+// URL that users can use to access the console. For more information, see Common
+// Scenarios for Temporary Credentials (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp.html#sts-introduction)
+// in the Using IAM.
+//
+// The temporary security credentials are valid for the duration that you specified
+// when calling AssumeRole, which can be from 900 seconds (15 minutes) to 3600
+// seconds (1 hour). The default is 1 hour.
+//
+// Optionally, you can pass an IAM access policy to this operation. If you
+// choose not to pass a policy, the temporary security credentials that are
+// returned by the operation have the permissions that are defined in the access
+// policy of the role that is being assumed. If you pass a policy to this operation,
+// the temporary security credentials that are returned by the operation have
+// the permissions that are allowed by both the access policy of the role that
+// is being assumed, and the policy that you pass. This gives you a way to further
+// restrict the permissions for the resulting temporary security credentials.
+// You cannot use the passed policy to grant permissions that are in excess
+// of those allowed by the access policy of the role that is being assumed.
+// For more information, see Permissions for AssumeRole, AssumeRoleWithSAML,
+// and AssumeRoleWithWebIdentity (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_control-access_assumerole.html)
+// in the Using IAM.
+//
+// To assume a role, your AWS account must be trusted by the role. The trust
+// relationship is defined in the role's trust policy when the role is created.
+// You must also have a policy that allows you to call sts:AssumeRole.
+//
+//  Using MFA with AssumeRole
+//
+// You can optionally include multi-factor authentication (MFA) information
+// when you call AssumeRole. This is useful for cross-account scenarios in which
+// you want to make sure that the user who is assuming the role has been authenticated
+// using an AWS MFA device. In that scenario, the trust policy of the role being
+// assumed includes a condition that tests for MFA authentication; if the caller
+// does not include valid MFA information, the request to assume the role is
+// denied. The condition in a trust policy that tests for MFA authentication
+// might look like the following example.
+//
+//  "Condition": {"Bool": {"aws:MultiFactorAuthPresent": true}}
+//
+// For more information, see Configuring MFA-Protected API Access (http://docs.aws.amazon.com/IAM/latest/UserGuide/MFAProtectedAPI.html)
+// in the Using IAM guide.
+//
+// To use MFA with AssumeRole, you pass values for the SerialNumber and TokenCode
+// parameters. The SerialNumber value identifies the user's hardware or virtual
+// MFA device. The TokenCode is the time-based one-time password (TOTP) that
+// the MFA devices produces.
+func (c *STS) AssumeRole(input *AssumeRoleInput) (*AssumeRoleOutput, error) {
+	req, out := c.AssumeRoleRequest(input)
+	err := req.Send()
+	return out, err
+}
+
+const opAssumeRoleWithSAML = "AssumeRoleWithSAML"
+
+// AssumeRoleWithSAMLRequest generates a request for the AssumeRoleWithSAML operation.
+func (c *STS) AssumeRoleWithSAMLRequest(input *AssumeRoleWithSAMLInput) (req *request.Request, output *AssumeRoleWithSAMLOutput) {
+	op := &request.Operation{
+		Name:       opAssumeRoleWithSAML,
+		HTTPMethod: "POST",
+		HTTPPath:   "/",
+	}
+
+	if input == nil {
+		input = &AssumeRoleWithSAMLInput{}
+	}
+
+	req = c.newRequest(op, input, output)
+	output = &AssumeRoleWithSAMLOutput{}
+	req.Data = output
+	return
+}
+
+// Returns a set of temporary security credentials for users who have been authenticated
+// via a SAML authentication response. This operation provides a mechanism for
+// tying an enterprise identity store or directory to role-based AWS access
+// without user-specific credentials or configuration.
+//
+// The temporary security credentials returned by this operation consist of
+// an access key ID, a secret access key, and a security token. Applications
+// can use these temporary security credentials to sign calls to AWS services.
+// The credentials are valid for the duration that you specified when calling
+// AssumeRoleWithSAML, which can be up to 3600 seconds (1 hour) or until the
+// time specified in the SAML authentication response's SessionNotOnOrAfter
+// value, whichever is shorter.
+//
+// The maximum duration for a session is 1 hour, and the minimum duration is
+// 15 minutes, even if values outside this range are specified.  Optionally,
+// you can pass an IAM access policy to this operation. If you choose not to
+// pass a policy, the temporary security credentials that are returned by the
+// operation have the permissions that are defined in the access policy of the
+// role that is being assumed. If you pass a policy to this operation, the temporary
+// security credentials that are returned by the operation have the permissions
+// that are allowed by both the access policy of the role that is being assumed,
+// and the policy that you pass. This gives you a way to further restrict the
+// permissions for the resulting temporary security credentials. You cannot
+// use the passed policy to grant permissions that are in excess of those allowed
+// by the access policy of the role that is being assumed. For more information,
+// see Permissions for AssumeRole, AssumeRoleWithSAML, and AssumeRoleWithWebIdentity
+// (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_control-access_assumerole.html)
+// in the Using IAM.
+//
+// Before your application can call AssumeRoleWithSAML, you must configure
+// your SAML identity provider (IdP) to issue the claims required by AWS. Additionally,
+// you must use AWS Identity and Access Management (IAM) to create a SAML provider
+// entity in your AWS account that represents your identity provider, and create
+// an IAM role that specifies this SAML provider in its trust policy.
+//
+// Calling AssumeRoleWithSAML does not require the use of AWS security credentials.
+// The identity of the caller is validated by using keys in the metadata document
+// that is uploaded for the SAML provider entity for your identity provider.
+//
+// For more information, see the following resources:
+//
+//   About SAML 2.0-based Federation (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_saml.html)
+// in the Using IAM.   Creating SAML Identity Providers (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_create_saml.html)
+// in the Using IAM.   Configuring a Relying Party and Claims (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_create_saml_relying-party.html)
+// in the Using IAM.   Creating a Role for SAML 2.0 Federation (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_create_for-idp_saml.html)
+// in the Using IAM.
+func (c *STS) AssumeRoleWithSAML(input *AssumeRoleWithSAMLInput) (*AssumeRoleWithSAMLOutput, error) {
+	req, out := c.AssumeRoleWithSAMLRequest(input)
+	err := req.Send()
+	return out, err
+}
+
+const opAssumeRoleWithWebIdentity = "AssumeRoleWithWebIdentity"
+
+// AssumeRoleWithWebIdentityRequest generates a request for the AssumeRoleWithWebIdentity operation.
+func (c *STS) AssumeRoleWithWebIdentityRequest(input *AssumeRoleWithWebIdentityInput) (req *request.Request, output *AssumeRoleWithWebIdentityOutput) {
+	op := &request.Operation{
+		Name:       opAssumeRoleWithWebIdentity,
+		HTTPMethod: "POST",
+		HTTPPath:   "/",
+	}
+
+	if input == nil {
+		input = &AssumeRoleWithWebIdentityInput{}
+	}
+
+	req = c.newRequest(op, input, output)
+	output = &AssumeRoleWithWebIdentityOutput{}
+	req.Data = output
+	return
+}
+
+// Returns a set of temporary security credentials for users who have been authenticated
+// in a mobile or web application with a web identity provider, such as Amazon
+// Cognito, Login with Amazon, Facebook, Google, or any OpenID Connect-compatible
+// identity provider.
+//
+//  For mobile applications, we recommend that you use Amazon Cognito. You
+// can use Amazon Cognito with the AWS SDK for iOS (http://aws.amazon.com/sdkforios/)
+// and the AWS SDK for Android (http://aws.amazon.com/sdkforandroid/) to uniquely
+// identify a user and supply the user with a consistent identity throughout
+// the lifetime of an application.
+//
+// To learn more about Amazon Cognito, see Amazon Cognito Overview (http://docs.aws.amazon.com/mobile/sdkforandroid/developerguide/cognito-auth.html#d0e840)
+// in the AWS SDK for Android Developer Guide guide and Amazon Cognito Overview
+// (http://docs.aws.amazon.com/mobile/sdkforios/developerguide/cognito-auth.html#d0e664)
+// in the AWS SDK for iOS Developer Guide.
+//
+//  Calling AssumeRoleWithWebIdentity does not require the use of AWS security
+// credentials. Therefore, you can distribute an application (for example, on
+// mobile devices) that requests temporary security credentials without including
+// long-term AWS credentials in the application, and without deploying server-based
+// proxy services that use long-term AWS credentials. Instead, the identity
+// of the caller is validated by using a token from the web identity provider.
+//
+// The temporary security credentials returned by this API consist of an access
+// key ID, a secret access key, and a security token. Applications can use these
+// temporary security credentials to sign calls to AWS service APIs. The credentials
+// are valid for the duration that you specified when calling AssumeRoleWithWebIdentity,
+// which can be from 900 seconds (15 minutes) to 3600 seconds (1 hour). By default,
+// the temporary security credentials are valid for 1 hour.
+//
+// Optionally, you can pass an IAM access policy to this operation. If you
+// choose not to pass a policy, the temporary security credentials that are
+// returned by the operation have the permissions that are defined in the access
+// policy of the role that is being assumed. If you pass a policy to this operation,
+// the temporary security credentials that are returned by the operation have
+// the permissions that are allowed by both the access policy of the role that
+// is being assumed, and the policy that you pass. This gives you a way to further
+// restrict the permissions for the resulting temporary security credentials.
+// You cannot use the passed policy to grant permissions that are in excess
+// of those allowed by the access policy of the role that is being assumed.
+// For more information, see Permissions for AssumeRole, AssumeRoleWithSAML,
+// and AssumeRoleWithWebIdentity (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_control-access_assumerole.html)
+// in the Using IAM.
+//
+// Before your application can call AssumeRoleWithWebIdentity, you must have
+// an identity token from a supported identity provider and create a role that
+// the application can assume. The role that your application assumes must trust
+// the identity provider that is associated with the identity token. In other
+// words, the identity provider must be specified in the role's trust policy.
+//
+// For more information about how to use web identity federation and the AssumeRoleWithWebIdentity
+// API, see the following resources:
+//
+//   Using Web Identity Federation APIs for Mobile Apps (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_oidc_manual)
+// and Federation Through a Web-based Identity Provider (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#api_assumerolewithwebidentity).
+//    Web Identity Federation Playground (https://web-identity-federation-playground.s3.amazonaws.com/index.html).
+// This interactive website lets you walk through the process of authenticating
+// via Login with Amazon, Facebook, or Google, getting temporary security credentials,
+// and then using those credentials to make a request to AWS.   AWS SDK for
+// iOS (http://aws.amazon.com/sdkforios/) and AWS SDK for Android (http://aws.amazon.com/sdkforandroid/).
+// These toolkits contain sample apps that show how to invoke the identity providers,
+// and then how to use the information from these providers to get and use temporary
+// security credentials.   Web Identity Federation with Mobile Applications
+// (http://aws.amazon.com/articles/4617974389850313). This article discusses
+// web identity federation and shows an example of how to use web identity federation
+// to get access to content in Amazon S3.
+func (c *STS) AssumeRoleWithWebIdentity(input *AssumeRoleWithWebIdentityInput) (*AssumeRoleWithWebIdentityOutput, error) {
+	req, out := c.AssumeRoleWithWebIdentityRequest(input)
+	err := req.Send()
+	return out, err
+}
+
+const opDecodeAuthorizationMessage = "DecodeAuthorizationMessage"
+
+// DecodeAuthorizationMessageRequest generates a request for the DecodeAuthorizationMessage operation.
+func (c *STS) DecodeAuthorizationMessageRequest(input *DecodeAuthorizationMessageInput) (req *request.Request, output *DecodeAuthorizationMessageOutput) {
+	op := &request.Operation{
+		Name:       opDecodeAuthorizationMessage,
+		HTTPMethod: "POST",
+		HTTPPath:   "/",
+	}
+
+	if input == nil {
+		input = &DecodeAuthorizationMessageInput{}
+	}
+
+	req = c.newRequest(op, input, output)
+	output = &DecodeAuthorizationMessageOutput{}
+	req.Data = output
+	return
+}
+
+// Decodes additional information about the authorization status of a request
+// from an encoded message returned in response to an AWS request.
+//
+// For example, if a user is not authorized to perform an action that he or
+// she has requested, the request returns a Client.UnauthorizedOperation response
+// (an HTTP 403 response). Some AWS actions additionally return an encoded message
+// that can provide details about this authorization failure.
+//
+//  Only certain AWS actions return an encoded authorization message. The documentation
+// for an individual action indicates whether that action returns an encoded
+// message in addition to returning an HTTP code.  The message is encoded because
+// the details of the authorization status can constitute privileged information
+// that the user who requested the action should not see. To decode an authorization
+// status message, a user must be granted permissions via an IAM policy to request
+// the DecodeAuthorizationMessage (sts:DecodeAuthorizationMessage) action.
+//
+// The decoded message includes the following type of information:
+//
+//  Whether the request was denied due to an explicit deny or due to the absence
+// of an explicit allow. For more information, see Determining Whether a Request
+// is Allowed or Denied (http://docs.aws.amazon.com/IAM/latest/UserGuide/reference_policies_evaluation-logic.html#policy-eval-denyallow)
+// in the Using IAM.  The principal who made the request. The requested action.
+// The requested resource. The values of condition keys in the context of the
+// user's request.
+func (c *STS) DecodeAuthorizationMessage(input *DecodeAuthorizationMessageInput) (*DecodeAuthorizationMessageOutput, error) {
+	req, out := c.DecodeAuthorizationMessageRequest(input)
+	err := req.Send()
+	return out, err
+}
+
+const opGetFederationToken = "GetFederationToken"
+
+// GetFederationTokenRequest generates a request for the GetFederationToken operation.
+func (c *STS) GetFederationTokenRequest(input *GetFederationTokenInput) (req *request.Request, output *GetFederationTokenOutput) {
+	op := &request.Operation{
+		Name:       opGetFederationToken,
+		HTTPMethod: "POST",
+		HTTPPath:   "/",
+	}
+
+	if input == nil {
+		input = &GetFederationTokenInput{}
+	}
+
+	req = c.newRequest(op, input, output)
+	output = &GetFederationTokenOutput{}
+	req.Data = output
+	return
+}
+
+// Returns a set of temporary security credentials (consisting of an access
+// key ID, a secret access key, and a security token) for a federated user.
+// A typical use is in a proxy application that gets temporary security credentials
+// on behalf of distributed applications inside a corporate network. Because
+// you must call the GetFederationToken action using the long-term security
+// credentials of an IAM user, this call is appropriate in contexts where those
+// credentials can be safely stored, usually in a server-based application.
+//
+//   If you are creating a mobile-based or browser-based app that can authenticate
+// users using a web identity provider like Login with Amazon, Facebook, Google,
+// or an OpenID Connect-compatible identity provider, we recommend that you
+// use Amazon Cognito (http://aws.amazon.com/cognito/) or AssumeRoleWithWebIdentity.
+// For more information, see Federation Through a Web-based Identity Provider
+// (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#api_assumerolewithwebidentity).
+//
+//  The GetFederationToken action must be called by using the long-term AWS
+// security credentials of an IAM user. You can also call GetFederationToken
+// using the security credentials of an AWS account (root), but this is not
+// recommended. Instead, we recommend that you create an IAM user for the purpose
+// of the proxy application and then attach a policy to the IAM user that limits
+// federated users to only the actions and resources they need access to. For
+// more information, see IAM Best Practices (http://docs.aws.amazon.com/IAM/latest/UserGuide/best-practices.html)
+// in the Using IAM.
+//
+// The temporary security credentials that are obtained by using the long-term
+// credentials of an IAM user are valid for the specified duration, between
+// 900 seconds (15 minutes) and 129600 seconds (36 hours). Temporary credentials
+// that are obtained by using AWS account (root) credentials have a maximum
+// duration of 3600 seconds (1 hour)
+//
+//  Permissions
+//
+// The permissions for the temporary security credentials returned by GetFederationToken
+// are determined by a combination of the following:
+//
+//  The policy or policies that are attached to the IAM user whose credentials
+// are used to call GetFederationToken. The policy that is passed as a parameter
+// in the call.  The passed policy is attached to the temporary security credentials
+// that result from the GetFederationToken API call--that is, to the federated
+// user. When the federated user makes an AWS request, AWS evaluates the policy
+// attached to the federated user in combination with the policy or policies
+// attached to the IAM user whose credentials were used to call GetFederationToken.
+// AWS allows the federated user's request only when both the federated user
+// and the IAM user are explicitly allowed to perform the requested action.
+// The passed policy cannot grant more permissions than those that are defined
+// in the IAM user policy.
+//
+// A typical use case is that the permissions of the IAM user whose credentials
+// are used to call GetFederationToken are designed to allow access to all the
+// actions and resources that any federated user will need. Then, for individual
+// users, you pass a policy to the operation that scopes down the permissions
+// to a level that's appropriate to that individual user, using a policy that
+// allows only a subset of permissions that are granted to the IAM user.
+//
+// If you do not pass a policy, the resulting temporary security credentials
+// have no effective permissions. The only exception is when the temporary security
+// credentials are used to access a resource that has a resource-based policy
+// that specifically allows the federated user to access the resource.
+//
+// For more information about how permissions work, see Permissions for GetFederationToken
+// (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_control-access_getfederationtoken.html).
+// For information about using GetFederationToken to create temporary security
+// credentials, see GetFederationToken—Federation Through a Custom Identity
+// Broker (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#api_getfederationtoken).
+func (c *STS) GetFederationToken(input *GetFederationTokenInput) (*GetFederationTokenOutput, error) {
+	req, out := c.GetFederationTokenRequest(input)
+	err := req.Send()
+	return out, err
+}
+
+const opGetSessionToken = "GetSessionToken"
+
+// GetSessionTokenRequest generates a request for the GetSessionToken operation.
+func (c *STS) GetSessionTokenRequest(input *GetSessionTokenInput) (req *request.Request, output *GetSessionTokenOutput) {
+	op := &request.Operation{
+		Name:       opGetSessionToken,
+		HTTPMethod: "POST",
+		HTTPPath:   "/",
+	}
+
+	if input == nil {
+		input = &GetSessionTokenInput{}
+	}
+
+	req = c.newRequest(op, input, output)
+	output = &GetSessionTokenOutput{}
+	req.Data = output
+	return
+}
+
+// Returns a set of temporary credentials for an AWS account or IAM user. The
+// credentials consist of an access key ID, a secret access key, and a security
+// token. Typically, you use GetSessionToken if you want to use MFA to protect
+// programmatic calls to specific AWS APIs like Amazon EC2 StopInstances. MFA-enabled
+// IAM users would need to call GetSessionToken and submit an MFA code that
+// is associated with their MFA device. Using the temporary security credentials
+// that are returned from the call, IAM users can then make programmatic calls
+// to APIs that require MFA authentication. If you do not supply a correct MFA
+// code, then the API returns an access denied error.
+//
+// The GetSessionToken action must be called by using the long-term AWS security
+// credentials of the AWS account or an IAM user. Credentials that are created
+// by IAM users are valid for the duration that you specify, between 900 seconds
+// (15 minutes) and 129600 seconds (36 hours); credentials that are created
+// by using account credentials have a maximum duration of 3600 seconds (1 hour).
+//
+//  We recommend that you do not call GetSessionToken with root account credentials.
+// Instead, follow our best practices (http://docs.aws.amazon.com/IAM/latest/UserGuide/best-practices.html#create-iam-users)
+// by creating one or more IAM users, giving them the necessary permissions,
+// and using IAM users for everyday interaction with AWS.
+//
+//  The permissions associated with the temporary security credentials returned
+// by GetSessionToken are based on the permissions associated with account or
+// IAM user whose credentials are used to call the action. If GetSessionToken
+// is called using root account credentials, the temporary credentials have
+// root account permissions. Similarly, if GetSessionToken is called using the
+// credentials of an IAM user, the temporary credentials have the same permissions
+// as the IAM user.
+//
+// For more information about using GetSessionToken to create temporary credentials,
+// go to Temporary Credentials for Users in Untrusted Environments (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#api_getsessiontoken)
+// in the Using IAM.
+func (c *STS) GetSessionToken(input *GetSessionTokenInput) (*GetSessionTokenOutput, error) {
+	req, out := c.GetSessionTokenRequest(input)
+	err := req.Send()
+	return out, err
+}
+
+type AssumeRoleInput struct {
+	// The duration, in seconds, of the role session. The value can range from 900
+	// seconds (15 minutes) to 3600 seconds (1 hour). By default, the value is set
+	// to 3600 seconds.
+	DurationSeconds *int64 `min:"900" type:"integer"`
+
+	// A unique identifier that is used by third parties when assuming roles in
+	// their customers' accounts. For each role that the third party can assume,
+	// they should instruct their customers to ensure the role's trust policy checks
+	// for the external ID that the third party generated. Each time the third party
+	// assumes the role, they should pass the customer's external ID. The external
+	// ID is useful in order to help third parties bind a role to the customer who
+	// created it. For more information about the external ID, see How to Use an
+	// External ID When Granting Access to Your AWS Resources to a Third Party (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_create_for-user_externalid.html)
+	// in the Using IAM.
+	ExternalId *string `min:"2" type:"string"`
+
+	// An IAM policy in JSON format.
+	//
+	// This parameter is optional. If you pass a policy, the temporary security
+	// credentials that are returned by the operation have the permissions that
+	// are allowed by both (the intersection of) the access policy of the role that
+	// is being assumed, and the policy that you pass. This gives you a way to further
+	// restrict the permissions for the resulting temporary security credentials.
+	// You cannot use the passed policy to grant permissions that are in excess
+	// of those allowed by the access policy of the role that is being assumed.
+	// For more information, see Permissions for AssumeRole, AssumeRoleWithSAML,
+	// and AssumeRoleWithWebIdentity (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_control-access_assumerole.html)
+	// in the Using IAM.
+	//
+	// The policy plain text must be 2048 bytes or shorter. However, an internal
+	// conversion compresses it into a packed binary format with a separate limit.
+	// The PackedPolicySize response element indicates by percentage how close to
+	// the upper size limit the policy is, with 100% equaling the maximum allowed
+	// size.
+	Policy *string `min:"1" type:"string"`
+
+	// The Amazon Resource Name (ARN) of the role to assume.
+	RoleArn *string `min:"20" type:"string" required:"true"`
+
+	// An identifier for the assumed role session.
+	//
+	// Use the role session name to uniquely identity a session when the same role
+	// is assumed by different principals or for different reasons. In cross-account
+	// scenarios, the role session name is visible to, and can be logged by the
+	// account that owns the role. The role session name is also used in the ARN
+	// of the assumed role principal. This means that subsequent cross-account API
+	// requests using the temporary security credentials will expose the role session
+	// name to the external account in their CloudTrail logs.
+	RoleSessionName *string `min:"2" type:"string" required:"true"`
+
+	// The identification number of the MFA device that is associated with the user
+	// who is making the AssumeRole call. Specify this value if the trust policy
+	// of the role being assumed includes a condition that requires MFA authentication.
+	// The value is either the serial number for a hardware device (such as GAHT12345678)
+	// or an Amazon Resource Name (ARN) for a virtual device (such as arn:aws:iam::123456789012:mfa/user).
+	SerialNumber *string `min:"9" type:"string"`
+
+	// The value provided by the MFA device, if the trust policy of the role being
+	// assumed requires MFA (that is, if the policy includes a condition that tests
+	// for MFA). If the role being assumed requires MFA and if the TokenCode value
+	// is missing or expired, the AssumeRole call returns an "access denied" error.
+	TokenCode *string `min:"6" type:"string"`
+
+	metadataAssumeRoleInput `json:"-" xml:"-"`
+}
+
+type metadataAssumeRoleInput struct {
+	SDKShapeTraits bool `type:"structure"`
+}
+
+// String returns the string representation
+func (s AssumeRoleInput) String() string {
+	return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s AssumeRoleInput) GoString() string {
+	return s.String()
+}
+
+// Contains the response to a successful AssumeRole request, including temporary
+// AWS credentials that can be used to make AWS requests.
+type AssumeRoleOutput struct {
+	// The Amazon Resource Name (ARN) and the assumed role ID, which are identifiers
+	// that you can use to refer to the resulting temporary security credentials.
+	// For example, you can reference these credentials as a principal in a resource-based
+	// policy by using the ARN or assumed role ID. The ARN and ID include the RoleSessionName
+	// that you specified when you called AssumeRole.
+	AssumedRoleUser *AssumedRoleUser `type:"structure"`
+
+	// The temporary security credentials, which include an access key ID, a secret
+	// access key, and a security (or session) token.
+	Credentials *Credentials `type:"structure"`
+
+	// A percentage value that indicates the size of the policy in packed form.
+	// The service rejects any policy with a packed size greater than 100 percent,
+	// which means the policy exceeded the allowed space.
+	PackedPolicySize *int64 `type:"integer"`
+
+	metadataAssumeRoleOutput `json:"-" xml:"-"`
+}
+
+type metadataAssumeRoleOutput struct {
+	SDKShapeTraits bool `type:"structure"`
+}
+
+// String returns the string representation
+func (s AssumeRoleOutput) String() string {
+	return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s AssumeRoleOutput) GoString() string {
+	return s.String()
+}
+
+type AssumeRoleWithSAMLInput struct {
+	// The duration, in seconds, of the role session. The value can range from 900
+	// seconds (15 minutes) to 3600 seconds (1 hour). By default, the value is set
+	// to 3600 seconds. An expiration can also be specified in the SAML authentication
+	// response's SessionNotOnOrAfter value. The actual expiration time is whichever
+	// value is shorter.
+	//
+	// The maximum duration for a session is 1 hour, and the minimum duration is
+	// 15 minutes, even if values outside this range are specified.
+	DurationSeconds *int64 `min:"900" type:"integer"`
+
+	// An IAM policy in JSON format.
+	//
+	// The policy parameter is optional. If you pass a policy, the temporary security
+	// credentials that are returned by the operation have the permissions that
+	// are allowed by both the access policy of the role that is being assumed,
+	// and the policy that you pass. This gives you a way to further restrict the
+	// permissions for the resulting temporary security credentials. You cannot
+	// use the passed policy to grant permissions that are in excess of those allowed
+	// by the access policy of the role that is being assumed. For more information,
+	// Permissions for AssumeRole, AssumeRoleWithSAML, and AssumeRoleWithWebIdentity
+	// (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_control-access_assumerole.html)
+	// in the Using IAM.
+	//
+	// The policy plain text must be 2048 bytes or shorter. However, an internal
+	// conversion compresses it into a packed binary format with a separate limit.
+	// The PackedPolicySize response element indicates by percentage how close to
+	// the upper size limit the policy is, with 100% equaling the maximum allowed
+	// size.
+	Policy *string `min:"1" type:"string"`
+
+	// The Amazon Resource Name (ARN) of the SAML provider in IAM that describes
+	// the IdP.
+	PrincipalArn *string `min:"20" type:"string" required:"true"`
+
+	// The Amazon Resource Name (ARN) of the role that the caller is assuming.
+	RoleArn *string `min:"20" type:"string" required:"true"`
+
+	// The base-64 encoded SAML authentication response provided by the IdP.
+	//
+	// For more information, see Configuring a Relying Party and Adding Claims
+	// (http://docs.aws.amazon.com/IAM/latest/UserGuide/create-role-saml-IdP-tasks.html)
+	// in the Using IAM guide.
+	SAMLAssertion *string `min:"4" type:"string" required:"true"`
+
+	metadataAssumeRoleWithSAMLInput `json:"-" xml:"-"`
+}
+
+type metadataAssumeRoleWithSAMLInput struct {
+	SDKShapeTraits bool `type:"structure"`
+}
+
+// String returns the string representation
+func (s AssumeRoleWithSAMLInput) String() string {
+	return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s AssumeRoleWithSAMLInput) GoString() string {
+	return s.String()
+}
+
+// Contains the response to a successful AssumeRoleWithSAML request, including
+// temporary AWS credentials that can be used to make AWS requests.
+type AssumeRoleWithSAMLOutput struct {
+	// The identifiers for the temporary security credentials that the operation
+	// returns.
+	AssumedRoleUser *AssumedRoleUser `type:"structure"`
+
+	// The value of the Recipient attribute of the SubjectConfirmationData element
+	// of the SAML assertion.
+	Audience *string `type:"string"`
+
+	// AWS credentials for API authentication.
+	Credentials *Credentials `type:"structure"`
+
+	// The value of the Issuer element of the SAML assertion.
+	Issuer *string `type:"string"`
+
+	// A hash value based on the concatenation of the Issuer response value, the
+	// AWS account ID, and the friendly name (the last part of the ARN) of the SAML
+	// provider in IAM. The combination of NameQualifier and Subject can be used
+	// to uniquely identify a federated user.
+	//
+	// The following pseudocode shows how the hash value is calculated:
+	//
+	//  BASE64 ( SHA1 ( "https://example.com/saml" + "123456789012" + "/MySAMLIdP"
+	// ) )
+	NameQualifier *string `type:"string"`
+
+	// A percentage value that indicates the size of the policy in packed form.
+	// The service rejects any policy with a packed size greater than 100 percent,
+	// which means the policy exceeded the allowed space.
+	PackedPolicySize *int64 `type:"integer"`
+
+	// The value of the NameID element in the Subject element of the SAML assertion.
+	Subject *string `type:"string"`
+
+	// The format of the name ID, as defined by the Format attribute in the NameID
+	// element of the SAML assertion. Typical examples of the format are transient
+	// or persistent.
+	//
+	//  If the format includes the prefix urn:oasis:names:tc:SAML:2.0:nameid-format,
+	// that prefix is removed. For example, urn:oasis:names:tc:SAML:2.0:nameid-format:transient
+	// is returned as transient. If the format includes any other prefix, the format
+	// is returned with no modifications.
+	SubjectType *string `type:"string"`
+
+	metadataAssumeRoleWithSAMLOutput `json:"-" xml:"-"`
+}
+
+type metadataAssumeRoleWithSAMLOutput struct {
+	SDKShapeTraits bool `type:"structure"`
+}
+
+// String returns the string representation
+func (s AssumeRoleWithSAMLOutput) String() string {
+	return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s AssumeRoleWithSAMLOutput) GoString() string {
+	return s.String()
+}
+
+type AssumeRoleWithWebIdentityInput struct {
+	// The duration, in seconds, of the role session. The value can range from 900
+	// seconds (15 minutes) to 3600 seconds (1 hour). By default, the value is set
+	// to 3600 seconds.
+	DurationSeconds *int64 `min:"900" type:"integer"`
+
+	// An IAM policy in JSON format.
+	//
+	// The policy parameter is optional. If you pass a policy, the temporary security
+	// credentials that are returned by the operation have the permissions that
+	// are allowed by both the access policy of the role that is being assumed,
+	// and the policy that you pass. This gives you a way to further restrict the
+	// permissions for the resulting temporary security credentials. You cannot
+	// use the passed policy to grant permissions that are in excess of those allowed
+	// by the access policy of the role that is being assumed. For more information,
+	// see Permissions for AssumeRoleWithWebIdentity (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_control-access_assumerole.html)
+	// in the Using IAM.
+	//
+	// The policy plain text must be 2048 bytes or shorter. However, an internal
+	// conversion compresses it into a packed binary format with a separate limit.
+	// The PackedPolicySize response element indicates by percentage how close to
+	// the upper size limit the policy is, with 100% equaling the maximum allowed
+	// size.
+	Policy *string `min:"1" type:"string"`
+
+	// The fully qualified host component of the domain name of the identity provider.
+	//
+	// Specify this value only for OAuth 2.0 access tokens. Currently www.amazon.com
+	// and graph.facebook.com are the only supported identity providers for OAuth
+	// 2.0 access tokens. Do not include URL schemes and port numbers.
+	//
+	// Do not specify this value for OpenID Connect ID tokens.
+	ProviderId *string `min:"4" type:"string"`
+
+	// The Amazon Resource Name (ARN) of the role that the caller is assuming.
+	RoleArn *string `min:"20" type:"string" required:"true"`
+
+	// An identifier for the assumed role session. Typically, you pass the name
+	// or identifier that is associated with the user who is using your application.
+	// That way, the temporary security credentials that your application will use
+	// are associated with that user. This session name is included as part of the
+	// ARN and assumed role ID in the AssumedRoleUser response element.
+	RoleSessionName *string `min:"2" type:"string" required:"true"`
+
+	// The OAuth 2.0 access token or OpenID Connect ID token that is provided by
+	// the identity provider. Your application must get this token by authenticating
+	// the user who is using your application with a web identity provider before
+	// the application makes an AssumeRoleWithWebIdentity call.
+	WebIdentityToken *string `min:"4" type:"string" required:"true"`
+
+	metadataAssumeRoleWithWebIdentityInput `json:"-" xml:"-"`
+}
+
+type metadataAssumeRoleWithWebIdentityInput struct {
+	SDKShapeTraits bool `type:"structure"`
+}
+
+// String returns the string representation
+func (s AssumeRoleWithWebIdentityInput) String() string {
+	return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s AssumeRoleWithWebIdentityInput) GoString() string {
+	return s.String()
+}
+
+// Contains the response to a successful AssumeRoleWithWebIdentity request,
+// including temporary AWS credentials that can be used to make AWS requests.
+type AssumeRoleWithWebIdentityOutput struct {
+	// The Amazon Resource Name (ARN) and the assumed role ID, which are identifiers
+	// that you can use to refer to the resulting temporary security credentials.
+	// For example, you can reference these credentials as a principal in a resource-based
+	// policy by using the ARN or assumed role ID. The ARN and ID include the RoleSessionName
+	// that you specified when you called AssumeRole.
+	AssumedRoleUser *AssumedRoleUser `type:"structure"`
+
+	// The intended audience (also known as client ID) of the web identity token.
+	// This is traditionally the client identifier issued to the application that
+	// requested the web identity token.
+	Audience *string `type:"string"`
+
+	// The temporary security credentials, which include an access key ID, a secret
+	// access key, and a security token.
+	Credentials *Credentials `type:"structure"`
+
+	// A percentage value that indicates the size of the policy in packed form.
+	// The service rejects any policy with a packed size greater than 100 percent,
+	// which means the policy exceeded the allowed space.
+	PackedPolicySize *int64 `type:"integer"`
+
+	// The issuing authority of the web identity token presented. For OpenID Connect
+	// ID Tokens this contains the value of the iss field. For OAuth 2.0 access
+	// tokens, this contains the value of the ProviderId parameter that was passed
+	// in the AssumeRoleWithWebIdentity request.
+	Provider *string `type:"string"`
+
+	// The unique user identifier that is returned by the identity provider. This
+	// identifier is associated with the WebIdentityToken that was submitted with
+	// the AssumeRoleWithWebIdentity call. The identifier is typically unique to
+	// the user and the application that acquired the WebIdentityToken (pairwise
+	// identifier). For OpenID Connect ID tokens, this field contains the value
+	// returned by the identity provider as the token's sub (Subject) claim.
+	SubjectFromWebIdentityToken *string `min:"6" type:"string"`
+
+	metadataAssumeRoleWithWebIdentityOutput `json:"-" xml:"-"`
+}
+
+type metadataAssumeRoleWithWebIdentityOutput struct {
+	SDKShapeTraits bool `type:"structure"`
+}
+
+// String returns the string representation
+func (s AssumeRoleWithWebIdentityOutput) String() string {
+	return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s AssumeRoleWithWebIdentityOutput) GoString() string {
+	return s.String()
+}
+
+// The identifiers for the temporary security credentials that the operation
+// returns.
+type AssumedRoleUser struct {
+	// The ARN of the temporary security credentials that are returned from the
+	// AssumeRole action. For more information about ARNs and how to use them in
+	// policies, see IAM Identifiers (http://docs.aws.amazon.com/IAM/latest/UserGuide/reference_identifiers.html)
+	// in Using IAM.
+	Arn *string `min:"20" type:"string" required:"true"`
+
+	// A unique identifier that contains the role ID and the role session name of
+	// the role that is being assumed. The role ID is generated by AWS when the
+	// role is created.
+	AssumedRoleId *string `min:"2" type:"string" required:"true"`
+
+	metadataAssumedRoleUser `json:"-" xml:"-"`
+}
+
+type metadataAssumedRoleUser struct {
+	SDKShapeTraits bool `type:"structure"`
+}
+
+// String returns the string representation
+func (s AssumedRoleUser) String() string {
+	return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s AssumedRoleUser) GoString() string {
+	return s.String()
+}
+
+// AWS credentials for API authentication.
+type Credentials struct {
+	// The access key ID that identifies the temporary security credentials.
+	AccessKeyId *string `min:"16" type:"string" required:"true"`
+
+	// The date on which the current credentials expire.
+	Expiration *time.Time `type:"timestamp" timestampFormat:"iso8601" required:"true"`
+
+	// The secret access key that can be used to sign requests.
+	SecretAccessKey *string `type:"string" required:"true"`
+
+	// The token that users must pass to the service API to use the temporary credentials.
+	SessionToken *string `type:"string" required:"true"`
+
+	metadataCredentials `json:"-" xml:"-"`
+}
+
+type metadataCredentials struct {
+	SDKShapeTraits bool `type:"structure"`
+}
+
+// String returns the string representation
+func (s Credentials) String() string {
+	return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s Credentials) GoString() string {
+	return s.String()
+}
+
+type DecodeAuthorizationMessageInput struct {
+	// The encoded message that was returned with the response.
+	EncodedMessage *string `min:"1" type:"string" required:"true"`
+
+	metadataDecodeAuthorizationMessageInput `json:"-" xml:"-"`
+}
+
+type metadataDecodeAuthorizationMessageInput struct {
+	SDKShapeTraits bool `type:"structure"`
+}
+
+// String returns the string representation
+func (s DecodeAuthorizationMessageInput) String() string {
+	return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s DecodeAuthorizationMessageInput) GoString() string {
+	return s.String()
+}
+
+// A document that contains additional information about the authorization status
+// of a request from an encoded message that is returned in response to an AWS
+// request.
+type DecodeAuthorizationMessageOutput struct {
+	// An XML document that contains the decoded message. For more information,
+	// see DecodeAuthorizationMessage.
+	DecodedMessage *string `type:"string"`
+
+	metadataDecodeAuthorizationMessageOutput `json:"-" xml:"-"`
+}
+
+type metadataDecodeAuthorizationMessageOutput struct {
+	SDKShapeTraits bool `type:"structure"`
+}
+
+// String returns the string representation
+func (s DecodeAuthorizationMessageOutput) String() string {
+	return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s DecodeAuthorizationMessageOutput) GoString() string {
+	return s.String()
+}
+
+// Identifiers for the federated user that is associated with the credentials.
+type FederatedUser struct {
+	// The ARN that specifies the federated user that is associated with the credentials.
+	// For more information about ARNs and how to use them in policies, see IAM
+	// Identifiers (http://docs.aws.amazon.com/IAM/latest/UserGuide/reference_identifiers.html)
+	// in Using IAM.
+	Arn *string `min:"20" type:"string" required:"true"`
+
+	// The string that identifies the federated user associated with the credentials,
+	// similar to the unique ID of an IAM user.
+	FederatedUserId *string `min:"2" type:"string" required:"true"`
+
+	metadataFederatedUser `json:"-" xml:"-"`
+}
+
+type metadataFederatedUser struct {
+	SDKShapeTraits bool `type:"structure"`
+}
+
+// String returns the string representation
+func (s FederatedUser) String() string {
+	return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s FederatedUser) GoString() string {
+	return s.String()
+}
+
+type GetFederationTokenInput struct {
+	// The duration, in seconds, that the session should last. Acceptable durations
+	// for federation sessions range from 900 seconds (15 minutes) to 129600 seconds
+	// (36 hours), with 43200 seconds (12 hours) as the default. Sessions obtained
+	// using AWS account (root) credentials are restricted to a maximum of 3600
+	// seconds (one hour). If the specified duration is longer than one hour, the
+	// session obtained by using AWS account (root) credentials defaults to one
+	// hour.
+	DurationSeconds *int64 `min:"900" type:"integer"`
+
+	// The name of the federated user. The name is used as an identifier for the
+	// temporary security credentials (such as Bob). For example, you can reference
+	// the federated user name in a resource-based policy, such as in an Amazon
+	// S3 bucket policy.
+	Name *string `min:"2" type:"string" required:"true"`
+
+	// An IAM policy in JSON format that is passed with the GetFederationToken call
+	// and evaluated along with the policy or policies that are attached to the
+	// IAM user whose credentials are used to call GetFederationToken. The passed
+	// policy is used to scope down the permissions that are available to the IAM
+	// user, by allowing only a subset of the permissions that are granted to the
+	// IAM user. The passed policy cannot grant more permissions than those granted
+	// to the IAM user. The final permissions for the federated user are the most
+	// restrictive set based on the intersection of the passed policy and the IAM
+	// user policy.
+	//
+	// If you do not pass a policy, the resulting temporary security credentials
+	// have no effective permissions. The only exception is when the temporary security
+	// credentials are used to access a resource that has a resource-based policy
+	// that specifically allows the federated user to access the resource.
+	//
+	// The policy plain text must be 2048 bytes or shorter. However, an internal
+	// conversion compresses it into a packed binary format with a separate limit.
+	// The PackedPolicySize response element indicates by percentage how close to
+	// the upper size limit the policy is, with 100% equaling the maximum allowed
+	// size.  For more information about how permissions work, see Permissions for
+	// GetFederationToken (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_control-access_getfederationtoken.html).
+	Policy *string `min:"1" type:"string"`
+
+	metadataGetFederationTokenInput `json:"-" xml:"-"`
+}
+
+type metadataGetFederationTokenInput struct {
+	SDKShapeTraits bool `type:"structure"`
+}
+
+// String returns the string representation
+func (s GetFederationTokenInput) String() string {
+	return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s GetFederationTokenInput) GoString() string {
+	return s.String()
+}
+
+// Contains the response to a successful GetFederationToken request, including
+// temporary AWS credentials that can be used to make AWS requests.
+type GetFederationTokenOutput struct {
+	// Credentials for the service API authentication.
+	Credentials *Credentials `type:"structure"`
+
+	// Identifiers for the federated user associated with the credentials (such
+	// as arn:aws:sts::123456789012:federated-user/Bob or 123456789012:Bob). You
+	// can use the federated user's ARN in your resource-based policies, such as
+	// an Amazon S3 bucket policy.
+	FederatedUser *FederatedUser `type:"structure"`
+
+	// A percentage value indicating the size of the policy in packed form. The
+	// service rejects policies for which the packed size is greater than 100 percent
+	// of the allowed value.
+	PackedPolicySize *int64 `type:"integer"`
+
+	metadataGetFederationTokenOutput `json:"-" xml:"-"`
+}
+
+type metadataGetFederationTokenOutput struct {
+	SDKShapeTraits bool `type:"structure"`
+}
+
+// String returns the string representation
+func (s GetFederationTokenOutput) String() string {
+	return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s GetFederationTokenOutput) GoString() string {
+	return s.String()
+}
+
+type GetSessionTokenInput struct {
+	// The duration, in seconds, that the credentials should remain valid. Acceptable
+	// durations for IAM user sessions range from 900 seconds (15 minutes) to 129600
+	// seconds (36 hours), with 43200 seconds (12 hours) as the default. Sessions
+	// for AWS account owners are restricted to a maximum of 3600 seconds (one hour).
+	// If the duration is longer than one hour, the session for AWS account owners
+	// defaults to one hour.
+	DurationSeconds *int64 `min:"900" type:"integer"`
+
+	// The identification number of the MFA device that is associated with the IAM
+	// user who is making the GetSessionToken call. Specify this value if the IAM
+	// user has a policy that requires MFA authentication. The value is either the
+	// serial number for a hardware device (such as GAHT12345678) or an Amazon Resource
+	// Name (ARN) for a virtual device (such as arn:aws:iam::123456789012:mfa/user).
+	// You can find the device for an IAM user by going to the AWS Management Console
+	// and viewing the user's security credentials.
+	SerialNumber *string `min:"9" type:"string"`
+
+	// The value provided by the MFA device, if MFA is required. If any policy requires
+	// the IAM user to submit an MFA code, specify this value. If MFA authentication
+	// is required, and the user does not provide a code when requesting a set of
+	// temporary security credentials, the user will receive an "access denied"
+	// response when requesting resources that require MFA authentication.
+	TokenCode *string `min:"6" type:"string"`
+
+	metadataGetSessionTokenInput `json:"-" xml:"-"`
+}
+
+type metadataGetSessionTokenInput struct {
+	SDKShapeTraits bool `type:"structure"`
+}
+
+// String returns the string representation
+func (s GetSessionTokenInput) String() string {
+	return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s GetSessionTokenInput) GoString() string {
+	return s.String()
+}
+
+// Contains the response to a successful GetSessionToken request, including
+// temporary AWS credentials that can be used to make AWS requests.
+type GetSessionTokenOutput struct {
+	// The session credentials for API authentication.
+	Credentials *Credentials `type:"structure"`
+
+	metadataGetSessionTokenOutput `json:"-" xml:"-"`
+}
+
+type metadataGetSessionTokenOutput struct {
+	SDKShapeTraits bool `type:"structure"`
+}
+
+// String returns the string representation
+func (s GetSessionTokenOutput) String() string {
+	return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s GetSessionTokenOutput) GoString() string {
+	return s.String()
+}

+ 12 - 0
Godeps/_workspace/src/github.com/aws/aws-sdk-go/service/sts/customizations.go

@@ -0,0 +1,12 @@
+package sts
+
+import "github.com/aws/aws-sdk-go/aws/request"
+
+func init() {
+	initRequest = func(r *request.Request) {
+		switch r.Operation.Name {
+		case opAssumeRoleWithSAML, opAssumeRoleWithWebIdentity:
+			r.Handlers.Sign.Clear() // these operations are unsigned
+		}
+	}
+}

+ 39 - 0
Godeps/_workspace/src/github.com/aws/aws-sdk-go/service/sts/customizations_test.go

@@ -0,0 +1,39 @@
+package sts_test
+
+import (
+	"testing"
+
+	"github.com/stretchr/testify/assert"
+
+	"github.com/aws/aws-sdk-go/aws"
+	"github.com/aws/aws-sdk-go/awstesting/unit"
+	"github.com/aws/aws-sdk-go/service/sts"
+)
+
+var svc = sts.New(unit.Session, &aws.Config{
+	Region: aws.String("mock-region"),
+})
+
+func TestUnsignedRequest_AssumeRoleWithSAML(t *testing.T) {
+	req, _ := svc.AssumeRoleWithSAMLRequest(&sts.AssumeRoleWithSAMLInput{
+		PrincipalArn:  aws.String("ARN01234567890123456789"),
+		RoleArn:       aws.String("ARN01234567890123456789"),
+		SAMLAssertion: aws.String("ASSERT"),
+	})
+
+	err := req.Sign()
+	assert.NoError(t, err)
+	assert.Equal(t, "", req.HTTPRequest.Header.Get("Authorization"))
+}
+
+func TestUnsignedRequest_AssumeRoleWithWebIdentity(t *testing.T) {
+	req, _ := svc.AssumeRoleWithWebIdentityRequest(&sts.AssumeRoleWithWebIdentityInput{
+		RoleArn:          aws.String("ARN01234567890123456789"),
+		RoleSessionName:  aws.String("SESSION"),
+		WebIdentityToken: aws.String("TOKEN"),
+	})
+
+	err := req.Sign()
+	assert.NoError(t, err)
+	assert.Equal(t, "", req.HTTPRequest.Header.Get("Authorization"))
+}

+ 149 - 0
Godeps/_workspace/src/github.com/aws/aws-sdk-go/service/sts/examples_test.go

@@ -0,0 +1,149 @@
+// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT.
+
+package sts_test
+
+import (
+	"bytes"
+	"fmt"
+	"time"
+
+	"github.com/aws/aws-sdk-go/aws"
+	"github.com/aws/aws-sdk-go/aws/session"
+	"github.com/aws/aws-sdk-go/service/sts"
+)
+
+var _ time.Duration
+var _ bytes.Buffer
+
+func ExampleSTS_AssumeRole() {
+	svc := sts.New(session.New())
+
+	params := &sts.AssumeRoleInput{
+		RoleArn:         aws.String("arnType"),             // Required
+		RoleSessionName: aws.String("roleSessionNameType"), // Required
+		DurationSeconds: aws.Int64(1),
+		ExternalId:      aws.String("externalIdType"),
+		Policy:          aws.String("sessionPolicyDocumentType"),
+		SerialNumber:    aws.String("serialNumberType"),
+		TokenCode:       aws.String("tokenCodeType"),
+	}
+	resp, err := svc.AssumeRole(params)
+
+	if err != nil {
+		// Print the error, cast err to awserr.Error to get the Code and
+		// Message from an error.
+		fmt.Println(err.Error())
+		return
+	}
+
+	// Pretty-print the response data.
+	fmt.Println(resp)
+}
+
+func ExampleSTS_AssumeRoleWithSAML() {
+	svc := sts.New(session.New())
+
+	params := &sts.AssumeRoleWithSAMLInput{
+		PrincipalArn:    aws.String("arnType"),           // Required
+		RoleArn:         aws.String("arnType"),           // Required
+		SAMLAssertion:   aws.String("SAMLAssertionType"), // Required
+		DurationSeconds: aws.Int64(1),
+		Policy:          aws.String("sessionPolicyDocumentType"),
+	}
+	resp, err := svc.AssumeRoleWithSAML(params)
+
+	if err != nil {
+		// Print the error, cast err to awserr.Error to get the Code and
+		// Message from an error.
+		fmt.Println(err.Error())
+		return
+	}
+
+	// Pretty-print the response data.
+	fmt.Println(resp)
+}
+
+func ExampleSTS_AssumeRoleWithWebIdentity() {
+	svc := sts.New(session.New())
+
+	params := &sts.AssumeRoleWithWebIdentityInput{
+		RoleArn:          aws.String("arnType"),             // Required
+		RoleSessionName:  aws.String("roleSessionNameType"), // Required
+		WebIdentityToken: aws.String("clientTokenType"),     // Required
+		DurationSeconds:  aws.Int64(1),
+		Policy:           aws.String("sessionPolicyDocumentType"),
+		ProviderId:       aws.String("urlType"),
+	}
+	resp, err := svc.AssumeRoleWithWebIdentity(params)
+
+	if err != nil {
+		// Print the error, cast err to awserr.Error to get the Code and
+		// Message from an error.
+		fmt.Println(err.Error())
+		return
+	}
+
+	// Pretty-print the response data.
+	fmt.Println(resp)
+}
+
+func ExampleSTS_DecodeAuthorizationMessage() {
+	svc := sts.New(session.New())
+
+	params := &sts.DecodeAuthorizationMessageInput{
+		EncodedMessage: aws.String("encodedMessageType"), // Required
+	}
+	resp, err := svc.DecodeAuthorizationMessage(params)
+
+	if err != nil {
+		// Print the error, cast err to awserr.Error to get the Code and
+		// Message from an error.
+		fmt.Println(err.Error())
+		return
+	}
+
+	// Pretty-print the response data.
+	fmt.Println(resp)
+}
+
+func ExampleSTS_GetFederationToken() {
+	svc := sts.New(session.New())
+
+	params := &sts.GetFederationTokenInput{
+		Name:            aws.String("userNameType"), // Required
+		DurationSeconds: aws.Int64(1),
+		Policy:          aws.String("sessionPolicyDocumentType"),
+	}
+	resp, err := svc.GetFederationToken(params)
+
+	if err != nil {
+		// Print the error, cast err to awserr.Error to get the Code and
+		// Message from an error.
+		fmt.Println(err.Error())
+		return
+	}
+
+	// Pretty-print the response data.
+	fmt.Println(resp)
+}
+
+func ExampleSTS_GetSessionToken() {
+	svc := sts.New(session.New())
+
+	params := &sts.GetSessionTokenInput{
+		DurationSeconds: aws.Int64(1),
+		SerialNumber:    aws.String("serialNumberType"),
+		TokenCode:       aws.String("tokenCodeType"),
+	}
+	resp, err := svc.GetSessionToken(params)
+
+	if err != nil {
+		// Print the error, cast err to awserr.Error to get the Code and
+		// Message from an error.
+		fmt.Println(err.Error())
+		return
+	}
+
+	// Pretty-print the response data.
+	fmt.Println(resp)
+}

+ 130 - 0
Godeps/_workspace/src/github.com/aws/aws-sdk-go/service/sts/service.go

@@ -0,0 +1,130 @@
+// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT.
+
+package sts
+
+import (
+	"github.com/aws/aws-sdk-go/aws"
+	"github.com/aws/aws-sdk-go/aws/client"
+	"github.com/aws/aws-sdk-go/aws/client/metadata"
+	"github.com/aws/aws-sdk-go/aws/request"
+	"github.com/aws/aws-sdk-go/private/protocol/query"
+	"github.com/aws/aws-sdk-go/private/signer/v4"
+)
+
+// The AWS Security Token Service (STS) is a web service that enables you to
+// request temporary, limited-privilege credentials for AWS Identity and Access
+// Management (IAM) users or for users that you authenticate (federated users).
+// This guide provides descriptions of the STS API. For more detailed information
+// about using this service, go to Temporary Security Credentials (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp.html).
+//
+//  As an alternative to using the API, you can use one of the AWS SDKs, which
+// consist of libraries and sample code for various programming languages and
+// platforms (Java, Ruby, .NET, iOS, Android, etc.). The SDKs provide a convenient
+// way to create programmatic access to STS. For example, the SDKs take care
+// of cryptographically signing requests, managing errors, and retrying requests
+// automatically. For information about the AWS SDKs, including how to download
+// and install them, see the Tools for Amazon Web Services page (http://aws.amazon.com/tools/).
+//  For information about setting up signatures and authorization through the
+// API, go to Signing AWS API Requests (http://docs.aws.amazon.com/general/latest/gr/signing_aws_api_requests.html"
+// target="_blank) in the AWS General Reference. For general information about
+// the Query API, go to Making Query Requests (http://docs.aws.amazon.com/IAM/latest/UserGuide/IAM_UsingQueryAPI.html"
+// target="_blank) in Using IAM. For information about using security tokens
+// with other AWS products, go to AWS Services That Work with IAM (http://docs.aws.amazon.com/IAM/latest/UserGuide/reference_aws-services-that-work-with-iam.html)
+// in the Using IAM.
+//
+// If you're new to AWS and need additional technical information about a specific
+// AWS product, you can find the product's technical documentation at http://aws.amazon.com/documentation/
+// (http://aws.amazon.com/documentation/" target="_blank).
+//
+//  Endpoints
+//
+// The AWS Security Token Service (STS) has a default endpoint of https://sts.amazonaws.com
+// that maps to the US East (N. Virginia) region. Additional regions are available,
+// but must first be activated in the AWS Management Console before you can
+// use a different region's endpoint. For more information about activating
+// a region for STS see Activating STS in a New Region (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html)
+// in the Using IAM.
+//
+// For information about STS endpoints, see Regions and Endpoints (http://docs.aws.amazon.com/general/latest/gr/rande.html#sts_region)
+// in the AWS General Reference.
+//
+//  Recording API requests
+//
+// STS supports AWS CloudTrail, which is a service that records AWS calls for
+// your AWS account and delivers log files to an Amazon S3 bucket. By using
+// information collected by CloudTrail, you can determine what requests were
+// successfully made to STS, who made the request, when it was made, and so
+// on. To learn more about CloudTrail, including how to turn it on and find
+// your log files, see the AWS CloudTrail User Guide (http://docs.aws.amazon.com/awscloudtrail/latest/userguide/what_is_cloud_trail_top_level.html).
+//The service client's operations are safe to be used concurrently.
+// It is not safe to mutate any of the client's properties though.
+type STS struct {
+	*client.Client
+}
+
+// Used for custom client initialization logic
+var initClient func(*client.Client)
+
+// Used for custom request initialization logic
+var initRequest func(*request.Request)
+
+// A ServiceName is the name of the service the client will make API calls to.
+const ServiceName = "sts"
+
+// New creates a new instance of the STS client with a session.
+// If additional configuration is needed for the client instance use the optional
+// aws.Config parameter to add your extra config.
+//
+// Example:
+//     // Create a STS client from just a session.
+//     svc := sts.New(mySession)
+//
+//     // Create a STS client with additional configuration
+//     svc := sts.New(mySession, aws.NewConfig().WithRegion("us-west-2"))
+func New(p client.ConfigProvider, cfgs ...*aws.Config) *STS {
+	c := p.ClientConfig(ServiceName, cfgs...)
+	return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion)
+}
+
+// newClient creates, initializes and returns a new service client instance.
+func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *STS {
+	svc := &STS{
+		Client: client.New(
+			cfg,
+			metadata.ClientInfo{
+				ServiceName:   ServiceName,
+				SigningRegion: signingRegion,
+				Endpoint:      endpoint,
+				APIVersion:    "2011-06-15",
+			},
+			handlers,
+		),
+	}
+
+	// Handlers
+	svc.Handlers.Sign.PushBack(v4.Sign)
+	svc.Handlers.Build.PushBack(query.Build)
+	svc.Handlers.Unmarshal.PushBack(query.Unmarshal)
+	svc.Handlers.UnmarshalMeta.PushBack(query.UnmarshalMeta)
+	svc.Handlers.UnmarshalError.PushBack(query.UnmarshalError)
+
+	// Run custom client initialization if present
+	if initClient != nil {
+		initClient(svc.Client)
+	}
+
+	return svc
+}
+
+// newRequest creates a new request for a STS operation and runs any
+// custom request initialization.
+func (c *STS) newRequest(op *request.Operation, params, data interface{}) *request.Request {
+	req := c.NewRequest(op, params, data)
+
+	// Run custom request initialization if present
+	if initRequest != nil {
+		initRequest(req)
+	}
+
+	return req
+}

+ 38 - 0
Godeps/_workspace/src/github.com/aws/aws-sdk-go/service/sts/stsiface/interface.go

@@ -0,0 +1,38 @@
+// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT.
+
+// Package stsiface provides an interface for the AWS Security Token Service.
+package stsiface
+
+import (
+	"github.com/aws/aws-sdk-go/aws/request"
+	"github.com/aws/aws-sdk-go/service/sts"
+)
+
+// STSAPI is the interface type for sts.STS.
+type STSAPI interface {
+	AssumeRoleRequest(*sts.AssumeRoleInput) (*request.Request, *sts.AssumeRoleOutput)
+
+	AssumeRole(*sts.AssumeRoleInput) (*sts.AssumeRoleOutput, error)
+
+	AssumeRoleWithSAMLRequest(*sts.AssumeRoleWithSAMLInput) (*request.Request, *sts.AssumeRoleWithSAMLOutput)
+
+	AssumeRoleWithSAML(*sts.AssumeRoleWithSAMLInput) (*sts.AssumeRoleWithSAMLOutput, error)
+
+	AssumeRoleWithWebIdentityRequest(*sts.AssumeRoleWithWebIdentityInput) (*request.Request, *sts.AssumeRoleWithWebIdentityOutput)
+
+	AssumeRoleWithWebIdentity(*sts.AssumeRoleWithWebIdentityInput) (*sts.AssumeRoleWithWebIdentityOutput, error)
+
+	DecodeAuthorizationMessageRequest(*sts.DecodeAuthorizationMessageInput) (*request.Request, *sts.DecodeAuthorizationMessageOutput)
+
+	DecodeAuthorizationMessage(*sts.DecodeAuthorizationMessageInput) (*sts.DecodeAuthorizationMessageOutput, error)
+
+	GetFederationTokenRequest(*sts.GetFederationTokenInput) (*request.Request, *sts.GetFederationTokenOutput)
+
+	GetFederationToken(*sts.GetFederationTokenInput) (*sts.GetFederationTokenOutput, error)
+
+	GetSessionTokenRequest(*sts.GetSessionTokenInput) (*request.Request, *sts.GetSessionTokenOutput)
+
+	GetSessionToken(*sts.GetSessionTokenInput) (*sts.GetSessionTokenOutput, error)
+}
+
+var _ STSAPI = (*sts.STS)(nil)

+ 6 - 2
Godeps/_workspace/src/github.com/mattn/go-sqlite3/.travis.yml

@@ -1,9 +1,13 @@
 language: go
+sudo: required
+dist: trusty
 go:
+  - 1.5
+  - 1.6
   - tip
 before_install:
-  - go get github.com/axw/gocov/gocov
   - go get github.com/mattn/goveralls
   - go get golang.org/x/tools/cmd/cover
 script:
-    - $HOME/gopath/bin/goveralls -repotoken 3qJVUE0iQwqnCbmNcDsjYu1nh4J4KIFXx
+  - $HOME/gopath/bin/goveralls -repotoken 3qJVUE0iQwqnCbmNcDsjYu1nh4J4KIFXx
+  - go test -v . -tags "libsqlite3"

+ 23 - 4
Godeps/_workspace/src/github.com/mattn/go-sqlite3/README.md

@@ -1,8 +1,9 @@
 go-sqlite3
 ==========
 
-[![Build Status](https://travis-ci.org/mattn/go-sqlite3.png?branch=master)](https://travis-ci.org/mattn/go-sqlite3)
-[![Coverage Status](https://coveralls.io/repos/mattn/go-sqlite3/badge.png?branch=master)](https://coveralls.io/r/mattn/go-sqlite3?branch=master)
+[![Build Status](https://travis-ci.org/mattn/go-sqlite3.svg?branch=master)](https://travis-ci.org/mattn/go-sqlite3)
+[![Coverage Status](https://coveralls.io/repos/mattn/go-sqlite3/badge.svg?branch=master)](https://coveralls.io/r/mattn/go-sqlite3?branch=master)
+[![GoDoc](https://godoc.org/github.com/mattn/go-sqlite3?status.svg)](http://godoc.org/github.com/mattn/go-sqlite3)
 
 Description
 -----------
@@ -16,6 +17,10 @@ This package can be installed with the go get command:
 
     go get github.com/mattn/go-sqlite3
     
+_go-sqlite3_ is *cgo* package.
+If you want to build your app using go-sqlite3, you need gcc.
+However, if you install _go-sqlite3_ with `go install github.com/mattn/go-sqlite3`, you don't need gcc to build your app anymore.
+    
 Documentation
 -------------
 
@@ -26,6 +31,20 @@ Examples can be found under the `./_example` directory
 FAQ
 ---
 
+* Want to build go-sqlite3 with libsqlite3 on my linux.
+
+    Use `go build --tags "libsqlite3 linux"`
+
+* Want to build go-sqlite3 with libsqlite3 on OS X.
+
+    Install sqlite3 from homebrew: `brew install sqlite3`
+
+    Use `go build --tags "libsqlite3 darwin"`
+
+* Want to build go-sqlite3 with icu extension.
+
+   Use `go build --tags "icu"`
+
 * Can't build go-sqlite3 on windows 64bit.
 
     > Probably, you are using go 1.0, go1.0 has a problem when it comes to compiling/linking on windows 64bit. 
@@ -36,7 +55,7 @@ FAQ
     > You can pass some arguments into the connection string, for example, a URI.
     > See: https://github.com/mattn/go-sqlite3/issues/39
 
-* Do you want cross compiling? mingw on Linux or Mac?
+* Do you want to cross compile? mingw on Linux or Mac?
 
     > See: https://github.com/mattn/go-sqlite3/issues/106
     > See also: http://www.limitlessfx.com/cross-compile-golang-app-for-windows-from-linux.html
@@ -54,7 +73,7 @@ sqlite3-binding.c, sqlite3-binding.h, sqlite3ext.h
 
 The -binding suffix was added to avoid build failures under gccgo.
 
-In this repository, those files are amalgamation code that copied from SQLite3. The license of those codes are depend on the license of SQLite3.
+In this repository, those files are an amalgamation of code that was copied from SQLite3. The license of that code is the same as the license of SQLite3.
 
 Author
 ------

+ 4 - 0
Godeps/_workspace/src/github.com/mattn/go-sqlite3/backup.go

@@ -6,7 +6,11 @@
 package sqlite3
 
 /*
+#ifndef USE_LIBSQLITE3
 #include <sqlite3-binding.h>
+#else
+#include <sqlite3.h>
+#endif
 #include <stdlib.h>
 */
 import "C"

+ 336 - 0
Godeps/_workspace/src/github.com/mattn/go-sqlite3/callback.go

@@ -0,0 +1,336 @@
+// Copyright (C) 2014 Yasuhiro Matsumoto <mattn.jp@gmail.com>.
+//
+// Use of this source code is governed by an MIT-style
+// license that can be found in the LICENSE file.
+
+package sqlite3
+
+// You can't export a Go function to C and have definitions in the C
+// preamble in the same file, so we have to have callbackTrampoline in
+// its own file. Because we need a separate file anyway, the support
+// code for SQLite custom functions is in here.
+
+/*
+#include <sqlite3-binding.h>
+#include <stdlib.h>
+
+void _sqlite3_result_text(sqlite3_context* ctx, const char* s);
+void _sqlite3_result_blob(sqlite3_context* ctx, const void* b, int l);
+*/
+import "C"
+
+import (
+	"errors"
+	"fmt"
+	"math"
+	"reflect"
+	"sync"
+	"unsafe"
+)
+
+//export callbackTrampoline
+func callbackTrampoline(ctx *C.sqlite3_context, argc int, argv **C.sqlite3_value) {
+	args := (*[(math.MaxInt32 - 1) / unsafe.Sizeof((*C.sqlite3_value)(nil))]*C.sqlite3_value)(unsafe.Pointer(argv))[:argc:argc]
+	fi := lookupHandle(uintptr(C.sqlite3_user_data(ctx))).(*functionInfo)
+	fi.Call(ctx, args)
+}
+
+//export stepTrampoline
+func stepTrampoline(ctx *C.sqlite3_context, argc int, argv **C.sqlite3_value) {
+	args := (*[(math.MaxInt32 - 1) / unsafe.Sizeof((*C.sqlite3_value)(nil))]*C.sqlite3_value)(unsafe.Pointer(argv))[:argc:argc]
+	ai := lookupHandle(uintptr(C.sqlite3_user_data(ctx))).(*aggInfo)
+	ai.Step(ctx, args)
+}
+
+//export doneTrampoline
+func doneTrampoline(ctx *C.sqlite3_context) {
+	handle := uintptr(C.sqlite3_user_data(ctx))
+	ai := lookupHandle(handle).(*aggInfo)
+	ai.Done(ctx)
+}
+
+// Use handles to avoid passing Go pointers to C.
+
+type handleVal struct {
+	db  *SQLiteConn
+	val interface{}
+}
+
+var handleLock sync.Mutex
+var handleVals = make(map[uintptr]handleVal)
+var handleIndex uintptr = 100
+
+func newHandle(db *SQLiteConn, v interface{}) uintptr {
+	handleLock.Lock()
+	defer handleLock.Unlock()
+	i := handleIndex
+	handleIndex++
+	handleVals[i] = handleVal{db, v}
+	return i
+}
+
+func lookupHandle(handle uintptr) interface{} {
+	handleLock.Lock()
+	defer handleLock.Unlock()
+	r, ok := handleVals[handle]
+	if !ok {
+		if handle >= 100 && handle < handleIndex {
+			panic("deleted handle")
+		} else {
+			panic("invalid handle")
+		}
+	}
+	return r.val
+}
+
+func deleteHandles(db *SQLiteConn) {
+	handleLock.Lock()
+	defer handleLock.Unlock()
+	for handle, val := range handleVals {
+		if val.db == db {
+			delete(handleVals, handle)
+		}
+	}
+}
+
+// This is only here so that tests can refer to it.
+type callbackArgRaw C.sqlite3_value
+
+type callbackArgConverter func(*C.sqlite3_value) (reflect.Value, error)
+
+type callbackArgCast struct {
+	f   callbackArgConverter
+	typ reflect.Type
+}
+
+func (c callbackArgCast) Run(v *C.sqlite3_value) (reflect.Value, error) {
+	val, err := c.f(v)
+	if err != nil {
+		return reflect.Value{}, err
+	}
+	if !val.Type().ConvertibleTo(c.typ) {
+		return reflect.Value{}, fmt.Errorf("cannot convert %s to %s", val.Type(), c.typ)
+	}
+	return val.Convert(c.typ), nil
+}
+
+func callbackArgInt64(v *C.sqlite3_value) (reflect.Value, error) {
+	if C.sqlite3_value_type(v) != C.SQLITE_INTEGER {
+		return reflect.Value{}, fmt.Errorf("argument must be an INTEGER")
+	}
+	return reflect.ValueOf(int64(C.sqlite3_value_int64(v))), nil
+}
+
+func callbackArgBool(v *C.sqlite3_value) (reflect.Value, error) {
+	if C.sqlite3_value_type(v) != C.SQLITE_INTEGER {
+		return reflect.Value{}, fmt.Errorf("argument must be an INTEGER")
+	}
+	i := int64(C.sqlite3_value_int64(v))
+	val := false
+	if i != 0 {
+		val = true
+	}
+	return reflect.ValueOf(val), nil
+}
+
+func callbackArgFloat64(v *C.sqlite3_value) (reflect.Value, error) {
+	if C.sqlite3_value_type(v) != C.SQLITE_FLOAT {
+		return reflect.Value{}, fmt.Errorf("argument must be a FLOAT")
+	}
+	return reflect.ValueOf(float64(C.sqlite3_value_double(v))), nil
+}
+
+func callbackArgBytes(v *C.sqlite3_value) (reflect.Value, error) {
+	switch C.sqlite3_value_type(v) {
+	case C.SQLITE_BLOB:
+		l := C.sqlite3_value_bytes(v)
+		p := C.sqlite3_value_blob(v)
+		return reflect.ValueOf(C.GoBytes(p, l)), nil
+	case C.SQLITE_TEXT:
+		l := C.sqlite3_value_bytes(v)
+		c := unsafe.Pointer(C.sqlite3_value_text(v))
+		return reflect.ValueOf(C.GoBytes(c, l)), nil
+	default:
+		return reflect.Value{}, fmt.Errorf("argument must be BLOB or TEXT")
+	}
+}
+
+func callbackArgString(v *C.sqlite3_value) (reflect.Value, error) {
+	switch C.sqlite3_value_type(v) {
+	case C.SQLITE_BLOB:
+		l := C.sqlite3_value_bytes(v)
+		p := (*C.char)(C.sqlite3_value_blob(v))
+		return reflect.ValueOf(C.GoStringN(p, l)), nil
+	case C.SQLITE_TEXT:
+		c := (*C.char)(unsafe.Pointer(C.sqlite3_value_text(v)))
+		return reflect.ValueOf(C.GoString(c)), nil
+	default:
+		return reflect.Value{}, fmt.Errorf("argument must be BLOB or TEXT")
+	}
+}
+
+func callbackArgGeneric(v *C.sqlite3_value) (reflect.Value, error) {
+	switch C.sqlite3_value_type(v) {
+	case C.SQLITE_INTEGER:
+		return callbackArgInt64(v)
+	case C.SQLITE_FLOAT:
+		return callbackArgFloat64(v)
+	case C.SQLITE_TEXT:
+		return callbackArgString(v)
+	case C.SQLITE_BLOB:
+		return callbackArgBytes(v)
+	case C.SQLITE_NULL:
+		// Interpret NULL as a nil byte slice.
+		var ret []byte
+		return reflect.ValueOf(ret), nil
+	default:
+		panic("unreachable")
+	}
+}
+
+func callbackArg(typ reflect.Type) (callbackArgConverter, error) {
+	switch typ.Kind() {
+	case reflect.Interface:
+		if typ.NumMethod() != 0 {
+			return nil, errors.New("the only supported interface type is interface{}")
+		}
+		return callbackArgGeneric, nil
+	case reflect.Slice:
+		if typ.Elem().Kind() != reflect.Uint8 {
+			return nil, errors.New("the only supported slice type is []byte")
+		}
+		return callbackArgBytes, nil
+	case reflect.String:
+		return callbackArgString, nil
+	case reflect.Bool:
+		return callbackArgBool, nil
+	case reflect.Int64:
+		return callbackArgInt64, nil
+	case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Int, reflect.Uint:
+		c := callbackArgCast{callbackArgInt64, typ}
+		return c.Run, nil
+	case reflect.Float64:
+		return callbackArgFloat64, nil
+	case reflect.Float32:
+		c := callbackArgCast{callbackArgFloat64, typ}
+		return c.Run, nil
+	default:
+		return nil, fmt.Errorf("don't know how to convert to %s", typ)
+	}
+}
+
+func callbackConvertArgs(argv []*C.sqlite3_value, converters []callbackArgConverter, variadic callbackArgConverter) ([]reflect.Value, error) {
+	var args []reflect.Value
+
+	if len(argv) < len(converters) {
+		return nil, fmt.Errorf("function requires at least %d arguments", len(converters))
+	}
+
+	for i, arg := range argv[:len(converters)] {
+		v, err := converters[i](arg)
+		if err != nil {
+			return nil, err
+		}
+		args = append(args, v)
+	}
+
+	if variadic != nil {
+		for _, arg := range argv[len(converters):] {
+			v, err := variadic(arg)
+			if err != nil {
+				return nil, err
+			}
+			args = append(args, v)
+		}
+	}
+	return args, nil
+}
+
+type callbackRetConverter func(*C.sqlite3_context, reflect.Value) error
+
+func callbackRetInteger(ctx *C.sqlite3_context, v reflect.Value) error {
+	switch v.Type().Kind() {
+	case reflect.Int64:
+	case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Int, reflect.Uint:
+		v = v.Convert(reflect.TypeOf(int64(0)))
+	case reflect.Bool:
+		b := v.Interface().(bool)
+		if b {
+			v = reflect.ValueOf(int64(1))
+		} else {
+			v = reflect.ValueOf(int64(0))
+		}
+	default:
+		return fmt.Errorf("cannot convert %s to INTEGER", v.Type())
+	}
+
+	C.sqlite3_result_int64(ctx, C.sqlite3_int64(v.Interface().(int64)))
+	return nil
+}
+
+func callbackRetFloat(ctx *C.sqlite3_context, v reflect.Value) error {
+	switch v.Type().Kind() {
+	case reflect.Float64:
+	case reflect.Float32:
+		v = v.Convert(reflect.TypeOf(float64(0)))
+	default:
+		return fmt.Errorf("cannot convert %s to FLOAT", v.Type())
+	}
+
+	C.sqlite3_result_double(ctx, C.double(v.Interface().(float64)))
+	return nil
+}
+
+func callbackRetBlob(ctx *C.sqlite3_context, v reflect.Value) error {
+	if v.Type().Kind() != reflect.Slice || v.Type().Elem().Kind() != reflect.Uint8 {
+		return fmt.Errorf("cannot convert %s to BLOB", v.Type())
+	}
+	i := v.Interface()
+	if i == nil || len(i.([]byte)) == 0 {
+		C.sqlite3_result_null(ctx)
+	} else {
+		bs := i.([]byte)
+		C._sqlite3_result_blob(ctx, unsafe.Pointer(&bs[0]), C.int(len(bs)))
+	}
+	return nil
+}
+
+func callbackRetText(ctx *C.sqlite3_context, v reflect.Value) error {
+	if v.Type().Kind() != reflect.String {
+		return fmt.Errorf("cannot convert %s to TEXT", v.Type())
+	}
+	C._sqlite3_result_text(ctx, C.CString(v.Interface().(string)))
+	return nil
+}
+
+func callbackRet(typ reflect.Type) (callbackRetConverter, error) {
+	switch typ.Kind() {
+	case reflect.Slice:
+		if typ.Elem().Kind() != reflect.Uint8 {
+			return nil, errors.New("the only supported slice type is []byte")
+		}
+		return callbackRetBlob, nil
+	case reflect.String:
+		return callbackRetText, nil
+	case reflect.Bool, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Int, reflect.Uint:
+		return callbackRetInteger, nil
+	case reflect.Float32, reflect.Float64:
+		return callbackRetFloat, nil
+	default:
+		return nil, fmt.Errorf("don't know how to convert to %s", typ)
+	}
+}
+
+func callbackError(ctx *C.sqlite3_context, err error) {
+	cstr := C.CString(err.Error())
+	defer C.free(unsafe.Pointer(cstr))
+	C.sqlite3_result_error(ctx, cstr, -1)
+}
+
+// Test support code. Tests are not allowed to import "C", so we can't
+// declare any functions that use C.sqlite3_value.
+func callbackSyntheticForTests(v reflect.Value, err error) callbackArgConverter {
+	return func(*C.sqlite3_value) (reflect.Value, error) {
+		return v, err
+	}
+}

+ 29 - 12
Godeps/_workspace/src/github.com/mattn/go-sqlite3/doc.go

@@ -1,7 +1,7 @@
 /*
 Package sqlite3 provides interface to SQLite3 databases.
 
-This works as driver for database/sql.
+This works as a driver for database/sql.
 
 Installation
 
@@ -9,7 +9,7 @@ Installation
 
 Supported Types
 
-Currently, go-sqlite3 support following data types.
+Currently, go-sqlite3 supports the following data types.
 
     +------------------------------+
     |go        | sqlite3           |
@@ -26,14 +26,14 @@ Currently, go-sqlite3 support following data types.
 
 SQLite3 Extension
 
-You can write your own extension module for sqlite3. For example, below is a
-extension for Regexp matcher operation.
+You can write your own extension module for sqlite3. For example, below is an
+extension for a Regexp matcher operation.
 
     #include <pcre.h>
     #include <string.h>
     #include <stdio.h>
     #include <sqlite3ext.h>
-    
+
     SQLITE_EXTENSION_INIT1
     static void regexp_func(sqlite3_context *context, int argc, sqlite3_value **argv) {
       if (argc >= 2) {
@@ -44,7 +44,7 @@ extension for Regexp matcher operation.
         int vec[500];
         int n, rc;
         pcre* re = pcre_compile(pattern, 0, &errstr, &erroff, NULL);
-        rc = pcre_exec(re, NULL, target, strlen(target), 0, 0, vec, 500); 
+        rc = pcre_exec(re, NULL, target, strlen(target), 0, 0, vec, 500);
         if (rc <= 0) {
           sqlite3_result_error(context, errstr, 0);
           return;
@@ -52,7 +52,7 @@ extension for Regexp matcher operation.
         sqlite3_result_int(context, 1);
       }
     }
-    
+
     #ifdef _WIN32
     __declspec(dllexport)
     #endif
@@ -63,8 +63,8 @@ extension for Regexp matcher operation.
           (void*)db, regexp_func, NULL, NULL);
     }
 
-It need to build as so/dll shared library. And you need to register
-extension module like below.
+It needs to be built as a so/dll shared library. And you need to register
+the extension module like below.
 
 	sql.Register("sqlite3_with_extensions",
 		&sqlite3.SQLiteDriver{
@@ -79,9 +79,9 @@ Then, you can use this extension.
 
 Connection Hook
 
-You can hook and inject your codes when connection established. database/sql
-doesn't provide the way to get native go-sqlite3 interfaces. So if you want,
-you need to hook ConnectHook and get the SQLiteConn.
+You can hook and inject your code when the connection is established. database/sql
+doesn't provide a way to get native go-sqlite3 interfaces. So if you want,
+you need to set ConnectHook and get the SQLiteConn.
 
 	sql.Register("sqlite3_with_hook_example",
 			&sqlite3.SQLiteDriver{
@@ -91,5 +91,22 @@ you need to hook ConnectHook and get the SQLiteConn.
 					},
 			})
 
+Go SQlite3 Extensions
+
+If you want to register Go functions as SQLite extension functions,
+call RegisterFunction from ConnectHook.
+
+	regex = func(re, s string) (bool, error) {
+		return regexp.MatchString(re, s)
+	}
+	sql.Register("sqlite3_with_go_func",
+			&sqlite3.SQLiteDriver{
+					ConnectHook: func(conn *sqlite3.SQLiteConn) error {
+						return conn.RegisterFunc("regexp", regex, true)
+					},
+			})
+
+See the documentation of RegisterFunc for more details.
+
 */
 package sqlite3

+ 0 - 242
Godeps/_workspace/src/github.com/mattn/go-sqlite3/error_test.go

@@ -1,242 +0,0 @@
-// Copyright (C) 2014 Yasuhiro Matsumoto <mattn.jp@gmail.com>.
-//
-// Use of this source code is governed by an MIT-style
-// license that can be found in the LICENSE file.
-
-package sqlite3
-
-import (
-	"database/sql"
-	"io/ioutil"
-	"os"
-	"path"
-	"testing"
-)
-
-func TestSimpleError(t *testing.T) {
-	e := ErrError.Error()
-	if e != "SQL logic error or missing database" {
-		t.Error("wrong error code:" + e)
-	}
-}
-
-func TestCorruptDbErrors(t *testing.T) {
-	dirName, err := ioutil.TempDir("", "sqlite3")
-	if err != nil {
-		t.Fatal(err)
-	}
-	defer os.RemoveAll(dirName)
-
-	dbFileName := path.Join(dirName, "test.db")
-	f, err := os.Create(dbFileName)
-	if err != nil {
-		t.Error(err)
-	}
-	f.Write([]byte{1, 2, 3, 4, 5})
-	f.Close()
-
-	db, err := sql.Open("sqlite3", dbFileName)
-	if err == nil {
-		_, err = db.Exec("drop table foo")
-	}
-
-	sqliteErr := err.(Error)
-	if sqliteErr.Code != ErrNotADB {
-		t.Error("wrong error code for corrupted DB")
-	}
-	if err.Error() == "" {
-		t.Error("wrong error string for corrupted DB")
-	}
-	db.Close()
-}
-
-func TestSqlLogicErrors(t *testing.T) {
-	dirName, err := ioutil.TempDir("", "sqlite3")
-	if err != nil {
-		t.Fatal(err)
-	}
-	defer os.RemoveAll(dirName)
-
-	dbFileName := path.Join(dirName, "test.db")
-	db, err := sql.Open("sqlite3", dbFileName)
-	if err != nil {
-		t.Error(err)
-	}
-	defer db.Close()
-
-	_, err = db.Exec("CREATE TABLE Foo (id INTEGER PRIMARY KEY)")
-	if err != nil {
-		t.Error(err)
-	}
-
-	const expectedErr = "table Foo already exists"
-	_, err = db.Exec("CREATE TABLE Foo (id INTEGER PRIMARY KEY)")
-	if err.Error() != expectedErr {
-		t.Errorf("Unexpected error: %s, expected %s", err.Error(), expectedErr)
-	}
-
-}
-
-func TestExtendedErrorCodes_ForeignKey(t *testing.T) {
-	dirName, err := ioutil.TempDir("", "sqlite3-err")
-	if err != nil {
-		t.Fatal(err)
-	}
-	defer os.RemoveAll(dirName)
-
-	dbFileName := path.Join(dirName, "test.db")
-	db, err := sql.Open("sqlite3", dbFileName)
-	if err != nil {
-		t.Error(err)
-	}
-	defer db.Close()
-
-	_, err = db.Exec("PRAGMA foreign_keys=ON;")
-	if err != nil {
-		t.Errorf("PRAGMA foreign_keys=ON: %v", err)
-	}
-
-	_, err = db.Exec(`CREATE TABLE Foo (
-		id INTEGER PRIMARY KEY AUTOINCREMENT,
-		value INTEGER NOT NULL,
-		ref INTEGER NULL REFERENCES Foo (id),
-		UNIQUE(value)
-	);`)
-	if err != nil {
-		t.Error(err)
-	}
-
-	_, err = db.Exec("INSERT INTO Foo (ref, value) VALUES (100, 100);")
-	if err == nil {
-		t.Error("No error!")
-	} else {
-		sqliteErr := err.(Error)
-		if sqliteErr.Code != ErrConstraint {
-			t.Errorf("Wrong basic error code: %d != %d",
-				sqliteErr.Code, ErrConstraint)
-		}
-		if sqliteErr.ExtendedCode != ErrConstraintForeignKey {
-			t.Errorf("Wrong extended error code: %d != %d",
-				sqliteErr.ExtendedCode, ErrConstraintForeignKey)
-		}
-	}
-
-}
-
-func TestExtendedErrorCodes_NotNull(t *testing.T) {
-	dirName, err := ioutil.TempDir("", "sqlite3-err")
-	if err != nil {
-		t.Fatal(err)
-	}
-	defer os.RemoveAll(dirName)
-
-	dbFileName := path.Join(dirName, "test.db")
-	db, err := sql.Open("sqlite3", dbFileName)
-	if err != nil {
-		t.Error(err)
-	}
-	defer db.Close()
-
-	_, err = db.Exec("PRAGMA foreign_keys=ON;")
-	if err != nil {
-		t.Errorf("PRAGMA foreign_keys=ON: %v", err)
-	}
-
-	_, err = db.Exec(`CREATE TABLE Foo (
-		id INTEGER PRIMARY KEY AUTOINCREMENT,
-		value INTEGER NOT NULL,
-		ref INTEGER NULL REFERENCES Foo (id),
-		UNIQUE(value)
-	);`)
-	if err != nil {
-		t.Error(err)
-	}
-
-	res, err := db.Exec("INSERT INTO Foo (value) VALUES (100);")
-	if err != nil {
-		t.Fatalf("Creating first row: %v", err)
-	}
-
-	id, err := res.LastInsertId()
-	if err != nil {
-		t.Fatalf("Retrieving last insert id: %v", err)
-	}
-
-	_, err = db.Exec("INSERT INTO Foo (ref) VALUES (?);", id)
-	if err == nil {
-		t.Error("No error!")
-	} else {
-		sqliteErr := err.(Error)
-		if sqliteErr.Code != ErrConstraint {
-			t.Errorf("Wrong basic error code: %d != %d",
-				sqliteErr.Code, ErrConstraint)
-		}
-		if sqliteErr.ExtendedCode != ErrConstraintNotNull {
-			t.Errorf("Wrong extended error code: %d != %d",
-				sqliteErr.ExtendedCode, ErrConstraintNotNull)
-		}
-	}
-
-}
-
-func TestExtendedErrorCodes_Unique(t *testing.T) {
-	dirName, err := ioutil.TempDir("", "sqlite3-err")
-	if err != nil {
-		t.Fatal(err)
-	}
-	defer os.RemoveAll(dirName)
-
-	dbFileName := path.Join(dirName, "test.db")
-	db, err := sql.Open("sqlite3", dbFileName)
-	if err != nil {
-		t.Error(err)
-	}
-	defer db.Close()
-
-	_, err = db.Exec("PRAGMA foreign_keys=ON;")
-	if err != nil {
-		t.Errorf("PRAGMA foreign_keys=ON: %v", err)
-	}
-
-	_, err = db.Exec(`CREATE TABLE Foo (
-		id INTEGER PRIMARY KEY AUTOINCREMENT,
-		value INTEGER NOT NULL,
-		ref INTEGER NULL REFERENCES Foo (id),
-		UNIQUE(value)
-	);`)
-	if err != nil {
-		t.Error(err)
-	}
-
-	res, err := db.Exec("INSERT INTO Foo (value) VALUES (100);")
-	if err != nil {
-		t.Fatalf("Creating first row: %v", err)
-	}
-
-	id, err := res.LastInsertId()
-	if err != nil {
-		t.Fatalf("Retrieving last insert id: %v", err)
-	}
-
-	_, err = db.Exec("INSERT INTO Foo (ref, value) VALUES (?, 100);", id)
-	if err == nil {
-		t.Error("No error!")
-	} else {
-		sqliteErr := err.(Error)
-		if sqliteErr.Code != ErrConstraint {
-			t.Errorf("Wrong basic error code: %d != %d",
-				sqliteErr.Code, ErrConstraint)
-		}
-		if sqliteErr.ExtendedCode != ErrConstraintUnique {
-			t.Errorf("Wrong extended error code: %d != %d",
-				sqliteErr.ExtendedCode, ErrConstraintUnique)
-		}
-		extended := sqliteErr.Code.Extend(3).Error()
-		expected := "constraint failed"
-		if extended != expected {
-			t.Errorf("Wrong basic error code: %q != %q",
-				extended, expected)
-		}
-	}
-
-}

Filskillnaden har hållts tillbaka eftersom den är för stor
+ 389 - 197
Godeps/_workspace/src/github.com/mattn/go-sqlite3/sqlite3-binding.c


Filskillnaden har hållts tillbaka eftersom den är för stor
+ 319 - 228
Godeps/_workspace/src/github.com/mattn/go-sqlite3/sqlite3-binding.h


+ 409 - 56
Godeps/_workspace/src/github.com/mattn/go-sqlite3/sqlite3.go

@@ -8,8 +8,12 @@ package sqlite3
 /*
 #cgo CFLAGS: -std=gnu99
 #cgo CFLAGS: -DSQLITE_ENABLE_RTREE -DSQLITE_THREADSAFE
-#cgo CFLAGS: -DSQLITE_ENABLE_FTS3 -DSQLITE_ENABLE_FTS3_PARENTHESIS
+#cgo CFLAGS: -DSQLITE_ENABLE_FTS3 -DSQLITE_ENABLE_FTS3_PARENTHESIS -DSQLITE_ENABLE_FTS4_UNICODE61
+#ifndef USE_LIBSQLITE3
 #include <sqlite3-binding.h>
+#else
+#include <sqlite3.h>
+#endif
 #include <stdlib.h>
 #include <string.h>
 
@@ -25,6 +29,10 @@ package sqlite3
 # define SQLITE_OPEN_FULLMUTEX 0
 #endif
 
+#ifndef SQLITE_DETERMINISTIC
+# define SQLITE_DETERMINISTIC 0
+#endif
+
 static int
 _sqlite3_open_v2(const char *filename, sqlite3 **ppDb, int flags, const char *zVfs) {
 #ifdef SQLITE_OPEN_URI
@@ -48,24 +56,49 @@ _sqlite3_bind_blob(sqlite3_stmt *stmt, int n, void *p, int np) {
 #include <stdint.h>
 
 static int
-_sqlite3_exec(sqlite3* db, const char* pcmd, long* rowid, long* changes)
+_sqlite3_exec(sqlite3* db, const char* pcmd, long long* rowid, long long* changes)
 {
   int rv = sqlite3_exec(db, pcmd, 0, 0, 0);
-  *rowid = (long) sqlite3_last_insert_rowid(db);
-  *changes = (long) sqlite3_changes(db);
+  *rowid = (long long) sqlite3_last_insert_rowid(db);
+  *changes = (long long) sqlite3_changes(db);
   return rv;
 }
 
 static int
-_sqlite3_step(sqlite3_stmt* stmt, long* rowid, long* changes)
+_sqlite3_step(sqlite3_stmt* stmt, long long* rowid, long long* changes)
 {
   int rv = sqlite3_step(stmt);
   sqlite3* db = sqlite3_db_handle(stmt);
-  *rowid = (long) sqlite3_last_insert_rowid(db);
-  *changes = (long) sqlite3_changes(db);
+  *rowid = (long long) sqlite3_last_insert_rowid(db);
+  *changes = (long long) sqlite3_changes(db);
   return rv;
 }
 
+void _sqlite3_result_text(sqlite3_context* ctx, const char* s) {
+  sqlite3_result_text(ctx, s, -1, &free);
+}
+
+void _sqlite3_result_blob(sqlite3_context* ctx, const void* b, int l) {
+  sqlite3_result_blob(ctx, b, l, SQLITE_TRANSIENT);
+}
+
+
+int _sqlite3_create_function(
+  sqlite3 *db,
+  const char *zFunctionName,
+  int nArg,
+  int eTextRep,
+  uintptr_t pApp,
+  void (*xFunc)(sqlite3_context*,int,sqlite3_value**),
+  void (*xStep)(sqlite3_context*,int,sqlite3_value**),
+  void (*xFinal)(sqlite3_context*)
+) {
+  return sqlite3_create_function(db, zFunctionName, nArg, eTextRep, (void*) pApp, xFunc, xStep, xFinal);
+}
+
+void callbackTrampoline(sqlite3_context*, int, sqlite3_value**);
+void stepTrampoline(sqlite3_context*, int, sqlite3_value**);
+void doneTrampoline(sqlite3_context*);
 */
 import "C"
 import (
@@ -75,6 +108,7 @@ import (
 	"fmt"
 	"io"
 	"net/url"
+	"reflect"
 	"runtime"
 	"strconv"
 	"strings"
@@ -87,6 +121,10 @@ import (
 // into the database. When parsing a string from a timestamp or
 // datetime column, the formats are tried in order.
 var SQLiteTimestampFormats = []string{
+	// By default, store timestamps with whatever timezone they come with.
+	// When parsed, they will be returned with the same timezone.
+	"2006-01-02 15:04:05.999999999-07:00",
+	"2006-01-02T15:04:05.999999999-07:00",
 	"2006-01-02 15:04:05.999999999",
 	"2006-01-02T15:04:05.999999999",
 	"2006-01-02 15:04:05",
@@ -94,14 +132,13 @@ var SQLiteTimestampFormats = []string{
 	"2006-01-02 15:04",
 	"2006-01-02T15:04",
 	"2006-01-02",
-	"2006-01-02 15:04:05-07:00",
 }
 
 func init() {
 	sql.Register("sqlite3", &SQLiteDriver{})
 }
 
-// Return SQLite library Version information.
+// Version returns SQLite library version information.
 func Version() (libVersion string, libVersionNumber int, sourceId string) {
 	libVersion = C.GoString(C.sqlite3_libversion())
 	libVersionNumber = int(C.sqlite3_libversion_number())
@@ -117,8 +154,11 @@ type SQLiteDriver struct {
 
 // Conn struct.
 type SQLiteConn struct {
-	db  *C.sqlite3
-	loc *time.Location
+	db          *C.sqlite3
+	loc         *time.Location
+	txlock      string
+	funcs       []*functionInfo
+	aggregators []*aggInfo
 }
 
 // Tx struct.
@@ -152,6 +192,107 @@ type SQLiteRows struct {
 	cls      bool
 }
 
+type functionInfo struct {
+	f                 reflect.Value
+	argConverters     []callbackArgConverter
+	variadicConverter callbackArgConverter
+	retConverter      callbackRetConverter
+}
+
+func (fi *functionInfo) Call(ctx *C.sqlite3_context, argv []*C.sqlite3_value) {
+	args, err := callbackConvertArgs(argv, fi.argConverters, fi.variadicConverter)
+	if err != nil {
+		callbackError(ctx, err)
+		return
+	}
+
+	ret := fi.f.Call(args)
+
+	if len(ret) == 2 && ret[1].Interface() != nil {
+		callbackError(ctx, ret[1].Interface().(error))
+		return
+	}
+
+	err = fi.retConverter(ctx, ret[0])
+	if err != nil {
+		callbackError(ctx, err)
+		return
+	}
+}
+
+type aggInfo struct {
+	constructor reflect.Value
+
+	// Active aggregator objects for aggregations in flight. The
+	// aggregators are indexed by a counter stored in the aggregation
+	// user data space provided by sqlite.
+	active map[int64]reflect.Value
+	next   int64
+
+	stepArgConverters     []callbackArgConverter
+	stepVariadicConverter callbackArgConverter
+
+	doneRetConverter callbackRetConverter
+}
+
+func (ai *aggInfo) agg(ctx *C.sqlite3_context) (int64, reflect.Value, error) {
+	aggIdx := (*int64)(C.sqlite3_aggregate_context(ctx, C.int(8)))
+	if *aggIdx == 0 {
+		*aggIdx = ai.next
+		ret := ai.constructor.Call(nil)
+		if len(ret) == 2 && ret[1].Interface() != nil {
+			return 0, reflect.Value{}, ret[1].Interface().(error)
+		}
+		if ret[0].IsNil() {
+			return 0, reflect.Value{}, errors.New("aggregator constructor returned nil state")
+		}
+		ai.next++
+		ai.active[*aggIdx] = ret[0]
+	}
+	return *aggIdx, ai.active[*aggIdx], nil
+}
+
+func (ai *aggInfo) Step(ctx *C.sqlite3_context, argv []*C.sqlite3_value) {
+	_, agg, err := ai.agg(ctx)
+	if err != nil {
+		callbackError(ctx, err)
+		return
+	}
+
+	args, err := callbackConvertArgs(argv, ai.stepArgConverters, ai.stepVariadicConverter)
+	if err != nil {
+		callbackError(ctx, err)
+		return
+	}
+
+	ret := agg.MethodByName("Step").Call(args)
+	if len(ret) == 1 && ret[0].Interface() != nil {
+		callbackError(ctx, ret[0].Interface().(error))
+		return
+	}
+}
+
+func (ai *aggInfo) Done(ctx *C.sqlite3_context) {
+	idx, agg, err := ai.agg(ctx)
+	if err != nil {
+		callbackError(ctx, err)
+		return
+	}
+	defer func() { delete(ai.active, idx) }()
+
+	ret := agg.MethodByName("Done").Call(nil)
+	if len(ret) == 2 && ret[1].Interface() != nil {
+		callbackError(ctx, ret[1].Interface().(error))
+		return
+	}
+
+	err = ai.doneRetConverter(ctx, ret[0])
+	if err != nil {
+		callbackError(ctx, err)
+		return
+	}
+}
+
 // Commit transaction.
 func (tx *SQLiteTx) Commit() error {
 	_, err := tx.c.exec("COMMIT")
@@ -164,6 +305,208 @@ func (tx *SQLiteTx) Rollback() error {
 	return err
 }
 
+// RegisterFunc makes a Go function available as a SQLite function.
+//
+// The Go function can have arguments of the following types: any
+// numeric type except complex, bool, []byte, string and
+// interface{}. interface{} arguments are given the direct translation
+// of the SQLite data type: int64 for INTEGER, float64 for FLOAT,
+// []byte for BLOB, string for TEXT.
+//
+// The function can additionally be variadic, as long as the type of
+// the variadic argument is one of the above.
+//
+// If pure is true. SQLite will assume that the function's return
+// value depends only on its inputs, and make more aggressive
+// optimizations in its queries.
+//
+// See _example/go_custom_funcs for a detailed example.
+func (c *SQLiteConn) RegisterFunc(name string, impl interface{}, pure bool) error {
+	var fi functionInfo
+	fi.f = reflect.ValueOf(impl)
+	t := fi.f.Type()
+	if t.Kind() != reflect.Func {
+		return errors.New("Non-function passed to RegisterFunc")
+	}
+	if t.NumOut() != 1 && t.NumOut() != 2 {
+		return errors.New("SQLite functions must return 1 or 2 values")
+	}
+	if t.NumOut() == 2 && !t.Out(1).Implements(reflect.TypeOf((*error)(nil)).Elem()) {
+		return errors.New("Second return value of SQLite function must be error")
+	}
+
+	numArgs := t.NumIn()
+	if t.IsVariadic() {
+		numArgs--
+	}
+
+	for i := 0; i < numArgs; i++ {
+		conv, err := callbackArg(t.In(i))
+		if err != nil {
+			return err
+		}
+		fi.argConverters = append(fi.argConverters, conv)
+	}
+
+	if t.IsVariadic() {
+		conv, err := callbackArg(t.In(numArgs).Elem())
+		if err != nil {
+			return err
+		}
+		fi.variadicConverter = conv
+		// Pass -1 to sqlite so that it allows any number of
+		// arguments. The call helper verifies that the minimum number
+		// of arguments is present for variadic functions.
+		numArgs = -1
+	}
+
+	conv, err := callbackRet(t.Out(0))
+	if err != nil {
+		return err
+	}
+	fi.retConverter = conv
+
+	// fi must outlast the database connection, or we'll have dangling pointers.
+	c.funcs = append(c.funcs, &fi)
+
+	cname := C.CString(name)
+	defer C.free(unsafe.Pointer(cname))
+	opts := C.SQLITE_UTF8
+	if pure {
+		opts |= C.SQLITE_DETERMINISTIC
+	}
+	rv := C._sqlite3_create_function(c.db, cname, C.int(numArgs), C.int(opts), C.uintptr_t(newHandle(c, &fi)), (*[0]byte)(unsafe.Pointer(C.callbackTrampoline)), nil, nil)
+	if rv != C.SQLITE_OK {
+		return c.lastError()
+	}
+	return nil
+}
+
+// RegisterAggregator makes a Go type available as a SQLite aggregation function.
+//
+// Because aggregation is incremental, it's implemented in Go with a
+// type that has 2 methods: func Step(values) accumulates one row of
+// data into the accumulator, and func Done() ret finalizes and
+// returns the aggregate value. "values" and "ret" may be any type
+// supported by RegisterFunc.
+//
+// RegisterAggregator takes as implementation a constructor function
+// that constructs an instance of the aggregator type each time an
+// aggregation begins. The constructor must return a pointer to a
+// type, or an interface that implements Step() and Done().
+//
+// The constructor function and the Step/Done methods may optionally
+// return an error in addition to their other return values.
+//
+// See _example/go_custom_funcs for a detailed example.
+func (c *SQLiteConn) RegisterAggregator(name string, impl interface{}, pure bool) error {
+	var ai aggInfo
+	ai.constructor = reflect.ValueOf(impl)
+	t := ai.constructor.Type()
+	if t.Kind() != reflect.Func {
+		return errors.New("non-function passed to RegisterAggregator")
+	}
+	if t.NumOut() != 1 && t.NumOut() != 2 {
+		return errors.New("SQLite aggregator constructors must return 1 or 2 values")
+	}
+	if t.NumOut() == 2 && !t.Out(1).Implements(reflect.TypeOf((*error)(nil)).Elem()) {
+		return errors.New("Second return value of SQLite function must be error")
+	}
+	if t.NumIn() != 0 {
+		return errors.New("SQLite aggregator constructors must not have arguments")
+	}
+
+	agg := t.Out(0)
+	switch agg.Kind() {
+	case reflect.Ptr, reflect.Interface:
+	default:
+		return errors.New("SQlite aggregator constructor must return a pointer object")
+	}
+	stepFn, found := agg.MethodByName("Step")
+	if !found {
+		return errors.New("SQlite aggregator doesn't have a Step() function")
+	}
+	step := stepFn.Type
+	if step.NumOut() != 0 && step.NumOut() != 1 {
+		return errors.New("SQlite aggregator Step() function must return 0 or 1 values")
+	}
+	if step.NumOut() == 1 && !step.Out(0).Implements(reflect.TypeOf((*error)(nil)).Elem()) {
+		return errors.New("type of SQlite aggregator Step() return value must be error")
+	}
+
+	stepNArgs := step.NumIn()
+	start := 0
+	if agg.Kind() == reflect.Ptr {
+		// Skip over the method receiver
+		stepNArgs--
+		start++
+	}
+	if step.IsVariadic() {
+		stepNArgs--
+	}
+	for i := start; i < start+stepNArgs; i++ {
+		conv, err := callbackArg(step.In(i))
+		if err != nil {
+			return err
+		}
+		ai.stepArgConverters = append(ai.stepArgConverters, conv)
+	}
+	if step.IsVariadic() {
+		conv, err := callbackArg(t.In(start + stepNArgs).Elem())
+		if err != nil {
+			return err
+		}
+		ai.stepVariadicConverter = conv
+		// Pass -1 to sqlite so that it allows any number of
+		// arguments. The call helper verifies that the minimum number
+		// of arguments is present for variadic functions.
+		stepNArgs = -1
+	}
+
+	doneFn, found := agg.MethodByName("Done")
+	if !found {
+		return errors.New("SQlite aggregator doesn't have a Done() function")
+	}
+	done := doneFn.Type
+	doneNArgs := done.NumIn()
+	if agg.Kind() == reflect.Ptr {
+		// Skip over the method receiver
+		doneNArgs--
+	}
+	if doneNArgs != 0 {
+		return errors.New("SQlite aggregator Done() function must have no arguments")
+	}
+	if done.NumOut() != 1 && done.NumOut() != 2 {
+		return errors.New("SQLite aggregator Done() function must return 1 or 2 values")
+	}
+	if done.NumOut() == 2 && !done.Out(1).Implements(reflect.TypeOf((*error)(nil)).Elem()) {
+		return errors.New("second return value of SQLite aggregator Done() function must be error")
+	}
+
+	conv, err := callbackRet(done.Out(0))
+	if err != nil {
+		return err
+	}
+	ai.doneRetConverter = conv
+	ai.active = make(map[int64]reflect.Value)
+	ai.next = 1
+
+	// ai must outlast the database connection, or we'll have dangling pointers.
+	c.aggregators = append(c.aggregators, &ai)
+
+	cname := C.CString(name)
+	defer C.free(unsafe.Pointer(cname))
+	opts := C.SQLITE_UTF8
+	if pure {
+		opts |= C.SQLITE_DETERMINISTIC
+	}
+	rv := C._sqlite3_create_function(c.db, cname, C.int(stepNArgs), C.int(opts), C.uintptr_t(newHandle(c, &ai)), nil, (*[0]byte)(unsafe.Pointer(C.stepTrampoline)), (*[0]byte)(unsafe.Pointer(C.doneTrampoline)))
+	if rv != C.SQLITE_OK {
+		return c.lastError()
+	}
+	return nil
+}
+
 // AutoCommit return which currently auto commit or not.
 func (c *SQLiteConn) AutoCommit() bool {
 	return int(C.sqlite3_get_autocommit(c.db)) != 0
@@ -242,7 +585,7 @@ func (c *SQLiteConn) exec(cmd string) (driver.Result, error) {
 	pcmd := C.CString(cmd)
 	defer C.free(unsafe.Pointer(pcmd))
 
-	var rowid, changes C.long
+	var rowid, changes C.longlong
 	rv := C._sqlite3_exec(c.db, pcmd, &rowid, &changes)
 	if rv != C.SQLITE_OK {
 		return nil, c.lastError()
@@ -252,7 +595,7 @@ func (c *SQLiteConn) exec(cmd string) (driver.Result, error) {
 
 // Begin transaction.
 func (c *SQLiteConn) Begin() (driver.Tx, error) {
-	if _, err := c.exec("BEGIN"); err != nil {
+	if _, err := c.exec(c.txlock); err != nil {
 		return nil, err
 	}
 	return &SQLiteTx{c}, nil
@@ -263,22 +606,26 @@ func errorString(err Error) string {
 }
 
 // Open database and return a new connection.
-// You can specify DSN string with URI filename.
+// You can specify a DSN string using a URI as the filename.
 //   test.db
 //   file:test.db?cache=shared&mode=memory
 //   :memory:
 //   file::memory:
-// go-sqlite handle especially query parameters.
+// go-sqlite3 adds the following query parameters to those used by SQLite:
 //   _loc=XXX
 //     Specify location of time format. It's possible to specify "auto".
 //   _busy_timeout=XXX
 //     Specify value for sqlite3_busy_timeout.
+//   _txlock=XXX
+//     Specify locking behavior for transactions.  XXX can be "immediate",
+//     "deferred", "exclusive".
 func (d *SQLiteDriver) Open(dsn string) (driver.Conn, error) {
 	if C.sqlite3_threadsafe() == 0 {
 		return nil, errors.New("sqlite library was not compiled for thread-safe operation")
 	}
 
 	var loc *time.Location
+	txlock := "BEGIN"
 	busy_timeout := 5000
 	pos := strings.IndexRune(dsn, '?')
 	if pos >= 1 {
@@ -308,6 +655,20 @@ func (d *SQLiteDriver) Open(dsn string) (driver.Conn, error) {
 			busy_timeout = int(iv)
 		}
 
+		// _txlock
+		if val := params.Get("_txlock"); val != "" {
+			switch val {
+			case "immediate":
+				txlock = "BEGIN IMMEDIATE"
+			case "exclusive":
+				txlock = "BEGIN EXCLUSIVE"
+			case "deferred":
+				txlock = "BEGIN"
+			default:
+				return nil, fmt.Errorf("Invalid _txlock: %v", val)
+			}
+		}
+
 		if !strings.HasPrefix(dsn, "file:") {
 			dsn = dsn[:pos]
 		}
@@ -333,26 +694,11 @@ func (d *SQLiteDriver) Open(dsn string) (driver.Conn, error) {
 		return nil, Error{Code: ErrNo(rv)}
 	}
 
-	conn := &SQLiteConn{db: db, loc: loc}
+	conn := &SQLiteConn{db: db, loc: loc, txlock: txlock}
 
 	if len(d.Extensions) > 0 {
-		rv = C.sqlite3_enable_load_extension(db, 1)
-		if rv != C.SQLITE_OK {
-			return nil, errors.New(C.GoString(C.sqlite3_errmsg(db)))
-		}
-
-		for _, extension := range d.Extensions {
-			cext := C.CString(extension)
-			defer C.free(unsafe.Pointer(cext))
-			rv = C.sqlite3_load_extension(db, cext, nil, nil)
-			if rv != C.SQLITE_OK {
-				return nil, errors.New(C.GoString(C.sqlite3_errmsg(db)))
-			}
-		}
-
-		rv = C.sqlite3_enable_load_extension(db, 0)
-		if rv != C.SQLITE_OK {
-			return nil, errors.New(C.GoString(C.sqlite3_errmsg(db)))
+		if err := conn.loadExtensions(d.Extensions); err != nil {
+			return nil, err
 		}
 	}
 
@@ -367,6 +713,7 @@ func (d *SQLiteDriver) Open(dsn string) (driver.Conn, error) {
 
 // Close the connection.
 func (c *SQLiteConn) Close() error {
+	deleteHandles(c)
 	rv := C.sqlite3_close_v2(c.db)
 	if rv != C.SQLITE_OK {
 		return c.lastError()
@@ -376,7 +723,7 @@ func (c *SQLiteConn) Close() error {
 	return nil
 }
 
-// Prepare query string. Return a new statement.
+// Prepare the query string. Return a new statement.
 func (c *SQLiteConn) Prepare(query string) (driver.Stmt, error) {
 	pquery := C.CString(query)
 	defer C.free(unsafe.Pointer(pquery))
@@ -476,13 +823,13 @@ func (s *SQLiteStmt) bind(args []driver.Value) error {
 		case float64:
 			rv = C.sqlite3_bind_double(s.s, n, C.double(v))
 		case []byte:
-			var p *byte
-			if len(v) > 0 {
-				p = &v[0]
+			if len(v) == 0 {
+				rv = C._sqlite3_bind_blob(s.s, n, nil, 0)
+			} else {
+				rv = C._sqlite3_bind_blob(s.s, n, unsafe.Pointer(&v[0]), C.int(len(v)))
 			}
-			rv = C._sqlite3_bind_blob(s.s, n, unsafe.Pointer(p), C.int(len(v)))
 		case time.Time:
-			b := []byte(v.UTC().Format(SQLiteTimestampFormats[0]))
+			b := []byte(v.Format(SQLiteTimestampFormats[0]))
 			rv = C._sqlite3_bind_text(s.s, n, (*C.char)(unsafe.Pointer(&b[0])), C.int(len(b)))
 		}
 		if rv != C.SQLITE_OK {
@@ -517,7 +864,7 @@ func (s *SQLiteStmt) Exec(args []driver.Value) (driver.Result, error) {
 		C.sqlite3_clear_bindings(s.s)
 		return nil, err
 	}
-	var rowid, changes C.long
+	var rowid, changes C.longlong
 	rv := C._sqlite3_step(s.s, &rowid, &changes)
 	if rv != C.SQLITE_ROW && rv != C.SQLITE_OK && rv != C.SQLITE_DONE {
 		err := s.c.lastError()
@@ -554,6 +901,17 @@ func (rc *SQLiteRows) Columns() []string {
 	return rc.cols
 }
 
+// Return column types.
+func (rc *SQLiteRows) DeclTypes() []string {
+	if rc.decltype == nil {
+		rc.decltype = make([]string, rc.nc)
+		for i := 0; i < rc.nc; i++ {
+			rc.decltype[i] = strings.ToLower(C.GoString(C.sqlite3_column_decltype(rc.s.s, C.int(i))))
+		}
+	}
+	return rc.decltype
+}
+
 // Move cursor to next.
 func (rc *SQLiteRows) Next(dest []driver.Value) error {
 	rv := C.sqlite3_step(rc.s.s)
@@ -568,12 +926,7 @@ func (rc *SQLiteRows) Next(dest []driver.Value) error {
 		return nil
 	}
 
-	if rc.decltype == nil {
-		rc.decltype = make([]string, rc.nc)
-		for i := 0; i < rc.nc; i++ {
-			rc.decltype[i] = strings.ToLower(C.GoString(C.sqlite3_column_decltype(rc.s.s, C.int(i))))
-		}
-	}
+	rc.DeclTypes()
 
 	for i := range dest {
 		switch C.sqlite3_column_type(rc.s.s, C.int(i)) {
@@ -581,18 +934,15 @@ func (rc *SQLiteRows) Next(dest []driver.Value) error {
 			val := int64(C.sqlite3_column_int64(rc.s.s, C.int(i)))
 			switch rc.decltype[i] {
 			case "timestamp", "datetime", "date":
-				unixTimestamp := strconv.FormatInt(val, 10)
 				var t time.Time
-				if len(unixTimestamp) == 13 {
-					duration, err := time.ParseDuration(unixTimestamp + "ms")
-					if err != nil {
-						return fmt.Errorf("error parsing %s value %d, %s", rc.decltype[i], val, err)
-					}
-					epoch := time.Date(1970, 1, 1, 0, 0, 0, 0, time.UTC)
-					t = epoch.Add(duration)
+				// Assume a millisecond unix timestamp if it's 13 digits -- too
+				// large to be a reasonable timestamp in seconds.
+				if val > 1e12 || val < -1e12 {
+					val *= int64(time.Millisecond) // convert ms to nsec
 				} else {
-					t = time.Unix(val, 0)
+					val *= int64(time.Second) // convert sec to nsec
 				}
+				t = time.Unix(0, val).UTC()
 				if rc.s.c.loc != nil {
 					t = t.In(rc.s.c.loc)
 				}
@@ -624,11 +974,14 @@ func (rc *SQLiteRows) Next(dest []driver.Value) error {
 		case C.SQLITE_TEXT:
 			var err error
 			var timeVal time.Time
-			s := C.GoString((*C.char)(unsafe.Pointer(C.sqlite3_column_text(rc.s.s, C.int(i)))))
+
+			n := int(C.sqlite3_column_bytes(rc.s.s, C.int(i)))
+			s := C.GoStringN((*C.char)(unsafe.Pointer(C.sqlite3_column_text(rc.s.s, C.int(i)))), C.int(n))
 
 			switch rc.decltype[i] {
 			case "timestamp", "datetime", "date":
 				var t time.Time
+				s = strings.TrimSuffix(s, "Z")
 				for _, format := range SQLiteTimestampFormats {
 					if timeVal, err = time.ParseInLocation(format, s, time.UTC); err == nil {
 						t = timeVal

+ 0 - 83
Godeps/_workspace/src/github.com/mattn/go-sqlite3/sqlite3_fts3_test.go

@@ -1,83 +0,0 @@
-// Copyright (C) 2015 Yasuhiro Matsumoto <mattn.jp@gmail.com>.
-//
-// Use of this source code is governed by an MIT-style
-// license that can be found in the LICENSE file.
-
-package sqlite3
-
-import (
-	"database/sql"
-	"os"
-	"testing"
-)
-
-func TestFTS3(t *testing.T) {
-	tempFilename := TempFilename()
-	db, err := sql.Open("sqlite3", tempFilename)
-	if err != nil {
-		t.Fatal("Failed to open database:", err)
-	}
-	defer os.Remove(tempFilename)
-	defer db.Close()
-
-	_, err = db.Exec("DROP TABLE foo")
-	_, err = db.Exec("CREATE VIRTUAL TABLE foo USING fts3(id INTEGER PRIMARY KEY, value TEXT)")
-	if err != nil {
-		t.Fatal("Failed to create table:", err)
-	}
-
-	_, err = db.Exec("INSERT INTO foo(id, value) VALUES(?, ?)", 1, `今日の 晩御飯は 天麩羅よ`)
-	if err != nil {
-		t.Fatal("Failed to insert value:", err)
-	}
-
-	_, err = db.Exec("INSERT INTO foo(id, value) VALUES(?, ?)", 2, `今日は いい 天気だ`)
-	if err != nil {
-		t.Fatal("Failed to insert value:", err)
-	}
-
-	rows, err := db.Query("SELECT id, value FROM foo WHERE value MATCH '今日* 天*'")
-	if err != nil {
-		t.Fatal("Unable to query foo table:", err)
-	}
-	defer rows.Close()
-
-	for rows.Next() {
-		var id int
-		var value string
-
-		if err := rows.Scan(&id, &value); err != nil {
-			t.Error("Unable to scan results:", err)
-			continue
-		}
-
-		if id == 1 && value != `今日の 晩御飯は 天麩羅よ` {
-			t.Error("Value for id 1 should be `今日の 晩御飯は 天麩羅よ`, but:", value)
-		} else if id == 2 && value != `今日は いい 天気だ` {
-			t.Error("Value for id 2 should be `今日は いい 天気だ`, but:", value)
-		}
-	}
-
-	rows, err = db.Query("SELECT value FROM foo WHERE value MATCH '今日* 天麩羅*'")
-	if err != nil {
-		t.Fatal("Unable to query foo table:", err)
-	}
-	defer rows.Close()
-
-	var value string
-	if !rows.Next() {
-		t.Fatal("Result should be only one")
-	}
-
-	if err := rows.Scan(&value); err != nil {
-		t.Fatal("Unable to scan results:", err)
-	}
-
-	if value != `今日の 晩御飯は 天麩羅よ` {
-		t.Fatal("Value should be `今日の 晩御飯は 天麩羅よ`, but:", value)
-	}
-
-	if rows.Next() {
-		t.Fatal("Result should be only one")
-	}
-}

+ 13 - 0
Godeps/_workspace/src/github.com/mattn/go-sqlite3/sqlite3_fts5.go

@@ -0,0 +1,13 @@
+// Copyright (C) 2014 Yasuhiro Matsumoto <mattn.jp@gmail.com>.
+//
+// Use of this source code is governed by an MIT-style
+// license that can be found in the LICENSE file.
+// +build fts5
+
+package sqlite3
+
+/*
+#cgo CFLAGS: -DSQLITE_ENABLE_FTS5
+#cgo LDFLAGS: -lm
+*/
+import "C"

+ 13 - 0
Godeps/_workspace/src/github.com/mattn/go-sqlite3/sqlite3_icu.go

@@ -0,0 +1,13 @@
+// Copyright (C) 2014 Yasuhiro Matsumoto <mattn.jp@gmail.com>.
+//
+// Use of this source code is governed by an MIT-style
+// license that can be found in the LICENSE file.
+// +build icu 
+
+package sqlite3
+
+/*
+#cgo LDFLAGS: -licuuc -licui18n
+#cgo CFLAGS: -DSQLITE_ENABLE_ICU
+*/
+import "C"

+ 12 - 0
Godeps/_workspace/src/github.com/mattn/go-sqlite3/sqlite3_json1.go

@@ -0,0 +1,12 @@
+// Copyright (C) 2014 Yasuhiro Matsumoto <mattn.jp@gmail.com>.
+//
+// Use of this source code is governed by an MIT-style
+// license that can be found in the LICENSE file.
+// +build json1
+
+package sqlite3
+
+/*
+#cgo CFLAGS: -DSQLITE_ENABLE_JSON1
+*/
+import "C"

+ 14 - 0
Godeps/_workspace/src/github.com/mattn/go-sqlite3/sqlite3_libsqlite3.go

@@ -0,0 +1,14 @@
+// Copyright (C) 2014 Yasuhiro Matsumoto <mattn.jp@gmail.com>.
+//
+// Use of this source code is governed by an MIT-style
+// license that can be found in the LICENSE file.
+// +build libsqlite3
+
+package sqlite3
+
+/*
+#cgo CFLAGS: -DUSE_LIBSQLITE3
+#cgo linux LDFLAGS: -lsqlite3
+#cgo darwin LDFLAGS: -L/usr/local/opt/sqlite/lib -lsqlite3
+*/
+import "C"

+ 63 - 0
Godeps/_workspace/src/github.com/mattn/go-sqlite3/sqlite3_load_extension.go

@@ -0,0 +1,63 @@
+// Copyright (C) 2014 Yasuhiro Matsumoto <mattn.jp@gmail.com>.
+//
+// Use of this source code is governed by an MIT-style
+// license that can be found in the LICENSE file.
+// +build !sqlite_omit_load_extension
+
+package sqlite3
+
+/*
+#include <sqlite3-binding.h>
+#include <stdlib.h>
+*/
+import "C"
+import (
+	"errors"
+	"unsafe"
+)
+
+func (c *SQLiteConn) loadExtensions(extensions []string) error {
+	rv := C.sqlite3_enable_load_extension(c.db, 1)
+	if rv != C.SQLITE_OK {
+		return errors.New(C.GoString(C.sqlite3_errmsg(c.db)))
+	}
+
+	for _, extension := range extensions {
+		cext := C.CString(extension)
+		defer C.free(unsafe.Pointer(cext))
+		rv = C.sqlite3_load_extension(c.db, cext, nil, nil)
+		if rv != C.SQLITE_OK {
+			return errors.New(C.GoString(C.sqlite3_errmsg(c.db)))
+		}
+	}
+
+	rv = C.sqlite3_enable_load_extension(c.db, 0)
+	if rv != C.SQLITE_OK {
+		return errors.New(C.GoString(C.sqlite3_errmsg(c.db)))
+	}
+	return nil
+}
+
+func (c *SQLiteConn) LoadExtension(lib string, entry string) error {
+	rv := C.sqlite3_enable_load_extension(c.db, 1)
+	if rv != C.SQLITE_OK {
+		return errors.New(C.GoString(C.sqlite3_errmsg(c.db)))
+	}
+
+	clib := C.CString(lib)
+	defer C.free(unsafe.Pointer(clib))
+	centry := C.CString(entry)
+	defer C.free(unsafe.Pointer(centry))
+
+	rv = C.sqlite3_load_extension(c.db, clib, centry, nil)
+	if rv != C.SQLITE_OK {
+		return errors.New(C.GoString(C.sqlite3_errmsg(c.db)))
+	}
+
+	rv = C.sqlite3_enable_load_extension(c.db, 0)
+	if rv != C.SQLITE_OK {
+		return errors.New(C.GoString(C.sqlite3_errmsg(c.db)))
+	}
+
+	return nil
+}

+ 23 - 0
Godeps/_workspace/src/github.com/mattn/go-sqlite3/sqlite3_omit_load_extension.go

@@ -0,0 +1,23 @@
+// Copyright (C) 2014 Yasuhiro Matsumoto <mattn.jp@gmail.com>.
+//
+// Use of this source code is governed by an MIT-style
+// license that can be found in the LICENSE file.
+// +build sqlite_omit_load_extension
+
+package sqlite3
+
+/*
+#cgo CFLAGS: -DSQLITE_OMIT_LOAD_EXTENSION
+*/
+import "C"
+import (
+	"errors"
+)
+
+func (c *SQLiteConn) loadExtensions(extensions []string) error {
+	return errors.New("Extensions have been disabled for static builds")
+}
+
+func (c *SQLiteConn) LoadExtension(lib string, entry string) error {
+	return errors.New("Extensions have been disabled for static builds")
+}

+ 0 - 1
Godeps/_workspace/src/github.com/mattn/go-sqlite3/sqlite3_other.go

@@ -9,6 +9,5 @@ package sqlite3
 /*
 #cgo CFLAGS: -I.
 #cgo linux LDFLAGS: -ldl
-#cgo LDFLAGS: -lpthread
 */
 import "C"

+ 0 - 947
Godeps/_workspace/src/github.com/mattn/go-sqlite3/sqlite3_test.go

@@ -1,947 +0,0 @@
-// Copyright (C) 2014 Yasuhiro Matsumoto <mattn.jp@gmail.com>.
-//
-// Use of this source code is governed by an MIT-style
-// license that can be found in the LICENSE file.
-
-package sqlite3
-
-import (
-	"crypto/rand"
-	"database/sql"
-	"encoding/hex"
-	"net/url"
-	"os"
-	"path/filepath"
-	"strings"
-	"testing"
-	"time"
-
-	"github.com/mattn/go-sqlite3/sqlite3_test"
-)
-
-func TempFilename() string {
-	randBytes := make([]byte, 16)
-	rand.Read(randBytes)
-	return filepath.Join(os.TempDir(), "foo"+hex.EncodeToString(randBytes)+".db")
-}
-
-func TestOpen(t *testing.T) {
-	tempFilename := TempFilename()
-	db, err := sql.Open("sqlite3", tempFilename)
-	if err != nil {
-		t.Fatal("Failed to open database:", err)
-	}
-	defer os.Remove(tempFilename)
-	defer db.Close()
-
-	_, err = db.Exec("drop table foo")
-	_, err = db.Exec("create table foo (id integer)")
-	if err != nil {
-		t.Fatal("Failed to create table:", err)
-	}
-
-	if stat, err := os.Stat(tempFilename); err != nil || stat.IsDir() {
-		t.Error("Failed to create ./foo.db")
-	}
-}
-
-func TestClose(t *testing.T) {
-	tempFilename := TempFilename()
-	db, err := sql.Open("sqlite3", tempFilename)
-	if err != nil {
-		t.Fatal("Failed to open database:", err)
-	}
-	defer os.Remove(tempFilename)
-
-	_, err = db.Exec("drop table foo")
-	_, err = db.Exec("create table foo (id integer)")
-	if err != nil {
-		t.Fatal("Failed to create table:", err)
-	}
-
-	stmt, err := db.Prepare("select id from foo where id = ?")
-	if err != nil {
-		t.Fatal("Failed to select records:", err)
-	}
-
-	db.Close()
-	_, err = stmt.Exec(1)
-	if err == nil {
-		t.Fatal("Failed to operate closed statement")
-	}
-}
-
-func TestInsert(t *testing.T) {
-	tempFilename := TempFilename()
-	db, err := sql.Open("sqlite3", tempFilename)
-	if err != nil {
-		t.Fatal("Failed to open database:", err)
-	}
-	defer os.Remove(tempFilename)
-	defer db.Close()
-
-	_, err = db.Exec("drop table foo")
-	_, err = db.Exec("create table foo (id integer)")
-	if err != nil {
-		t.Fatal("Failed to create table:", err)
-	}
-
-	res, err := db.Exec("insert into foo(id) values(123)")
-	if err != nil {
-		t.Fatal("Failed to insert record:", err)
-	}
-	affected, _ := res.RowsAffected()
-	if affected != 1 {
-		t.Fatalf("Expected %d for affected rows, but %d:", 1, affected)
-	}
-
-	rows, err := db.Query("select id from foo")
-	if err != nil {
-		t.Fatal("Failed to select records:", err)
-	}
-	defer rows.Close()
-
-	rows.Next()
-
-	var result int
-	rows.Scan(&result)
-	if result != 123 {
-		t.Errorf("Fetched %q; expected %q", 123, result)
-	}
-}
-
-func TestUpdate(t *testing.T) {
-	tempFilename := TempFilename()
-	db, err := sql.Open("sqlite3", tempFilename)
-	if err != nil {
-		t.Fatal("Failed to open database:", err)
-	}
-	defer os.Remove(tempFilename)
-	defer db.Close()
-
-	_, err = db.Exec("drop table foo")
-	_, err = db.Exec("create table foo (id integer)")
-	if err != nil {
-		t.Fatal("Failed to create table:", err)
-	}
-
-	res, err := db.Exec("insert into foo(id) values(123)")
-	if err != nil {
-		t.Fatal("Failed to insert record:", err)
-	}
-	expected, err := res.LastInsertId()
-	if err != nil {
-		t.Fatal("Failed to get LastInsertId:", err)
-	}
-	affected, _ := res.RowsAffected()
-	if err != nil {
-		t.Fatal("Failed to get RowsAffected:", err)
-	}
-	if affected != 1 {
-		t.Fatalf("Expected %d for affected rows, but %d:", 1, affected)
-	}
-
-	res, err = db.Exec("update foo set id = 234")
-	if err != nil {
-		t.Fatal("Failed to update record:", err)
-	}
-	lastId, err := res.LastInsertId()
-	if err != nil {
-		t.Fatal("Failed to get LastInsertId:", err)
-	}
-	if expected != lastId {
-		t.Errorf("Expected %q for last Id, but %q:", expected, lastId)
-	}
-	affected, _ = res.RowsAffected()
-	if err != nil {
-		t.Fatal("Failed to get RowsAffected:", err)
-	}
-	if affected != 1 {
-		t.Fatalf("Expected %d for affected rows, but %d:", 1, affected)
-	}
-
-	rows, err := db.Query("select id from foo")
-	if err != nil {
-		t.Fatal("Failed to select records:", err)
-	}
-	defer rows.Close()
-
-	rows.Next()
-
-	var result int
-	rows.Scan(&result)
-	if result != 234 {
-		t.Errorf("Fetched %q; expected %q", 234, result)
-	}
-}
-
-func TestDelete(t *testing.T) {
-	tempFilename := TempFilename()
-	db, err := sql.Open("sqlite3", tempFilename)
-	if err != nil {
-		t.Fatal("Failed to open database:", err)
-	}
-	defer os.Remove(tempFilename)
-	defer db.Close()
-
-	_, err = db.Exec("drop table foo")
-	_, err = db.Exec("create table foo (id integer)")
-	if err != nil {
-		t.Fatal("Failed to create table:", err)
-	}
-
-	res, err := db.Exec("insert into foo(id) values(123)")
-	if err != nil {
-		t.Fatal("Failed to insert record:", err)
-	}
-	expected, err := res.LastInsertId()
-	if err != nil {
-		t.Fatal("Failed to get LastInsertId:", err)
-	}
-	affected, err := res.RowsAffected()
-	if err != nil {
-		t.Fatal("Failed to get RowsAffected:", err)
-	}
-	if affected != 1 {
-		t.Errorf("Expected %d for cout of affected rows, but %q:", 1, affected)
-	}
-
-	res, err = db.Exec("delete from foo where id = 123")
-	if err != nil {
-		t.Fatal("Failed to delete record:", err)
-	}
-	lastId, err := res.LastInsertId()
-	if err != nil {
-		t.Fatal("Failed to get LastInsertId:", err)
-	}
-	if expected != lastId {
-		t.Errorf("Expected %q for last Id, but %q:", expected, lastId)
-	}
-	affected, err = res.RowsAffected()
-	if err != nil {
-		t.Fatal("Failed to get RowsAffected:", err)
-	}
-	if affected != 1 {
-		t.Errorf("Expected %d for cout of affected rows, but %q:", 1, affected)
-	}
-
-	rows, err := db.Query("select id from foo")
-	if err != nil {
-		t.Fatal("Failed to select records:", err)
-	}
-	defer rows.Close()
-
-	if rows.Next() {
-		t.Error("Fetched row but expected not rows")
-	}
-}
-
-func TestBooleanRoundtrip(t *testing.T) {
-	tempFilename := TempFilename()
-	db, err := sql.Open("sqlite3", tempFilename)
-	if err != nil {
-		t.Fatal("Failed to open database:", err)
-	}
-	defer os.Remove(tempFilename)
-	defer db.Close()
-
-	_, err = db.Exec("DROP TABLE foo")
-	_, err = db.Exec("CREATE TABLE foo(id INTEGER, value BOOL)")
-	if err != nil {
-		t.Fatal("Failed to create table:", err)
-	}
-
-	_, err = db.Exec("INSERT INTO foo(id, value) VALUES(1, ?)", true)
-	if err != nil {
-		t.Fatal("Failed to insert true value:", err)
-	}
-
-	_, err = db.Exec("INSERT INTO foo(id, value) VALUES(2, ?)", false)
-	if err != nil {
-		t.Fatal("Failed to insert false value:", err)
-	}
-
-	rows, err := db.Query("SELECT id, value FROM foo")
-	if err != nil {
-		t.Fatal("Unable to query foo table:", err)
-	}
-	defer rows.Close()
-
-	for rows.Next() {
-		var id int
-		var value bool
-
-		if err := rows.Scan(&id, &value); err != nil {
-			t.Error("Unable to scan results:", err)
-			continue
-		}
-
-		if id == 1 && !value {
-			t.Error("Value for id 1 should be true, not false")
-
-		} else if id == 2 && value {
-			t.Error("Value for id 2 should be false, not true")
-		}
-	}
-}
-
-func TestTimestamp(t *testing.T) {
-	tempFilename := TempFilename()
-	db, err := sql.Open("sqlite3", tempFilename)
-	if err != nil {
-		t.Fatal("Failed to open database:", err)
-	}
-	defer os.Remove(tempFilename)
-	defer db.Close()
-
-	_, err = db.Exec("DROP TABLE foo")
-	_, err = db.Exec("CREATE TABLE foo(id INTEGER, ts timeSTAMP, dt DATETIME)")
-	if err != nil {
-		t.Fatal("Failed to create table:", err)
-	}
-
-	timestamp1 := time.Date(2012, time.April, 6, 22, 50, 0, 0, time.UTC)
-	timestamp2 := time.Date(2006, time.January, 2, 15, 4, 5, 123456789, time.UTC)
-	timestamp3 := time.Date(2012, time.November, 4, 0, 0, 0, 0, time.UTC)
-	tests := []struct {
-		value    interface{}
-		expected time.Time
-	}{
-		{"nonsense", time.Time{}},
-		{"0000-00-00 00:00:00", time.Time{}},
-		{timestamp1, timestamp1},
-		{timestamp1.Unix(), timestamp1},
-		{timestamp1.UnixNano() / int64(time.Millisecond), timestamp1},
-		{timestamp1.In(time.FixedZone("TEST", -7*3600)), timestamp1},
-		{timestamp1.Format("2006-01-02 15:04:05.000"), timestamp1},
-		{timestamp1.Format("2006-01-02T15:04:05.000"), timestamp1},
-		{timestamp1.Format("2006-01-02 15:04:05"), timestamp1},
-		{timestamp1.Format("2006-01-02T15:04:05"), timestamp1},
-		{timestamp2, timestamp2},
-		{"2006-01-02 15:04:05.123456789", timestamp2},
-		{"2006-01-02T15:04:05.123456789", timestamp2},
-		{"2012-11-04", timestamp3},
-		{"2012-11-04 00:00", timestamp3},
-		{"2012-11-04 00:00:00", timestamp3},
-		{"2012-11-04 00:00:00.000", timestamp3},
-		{"2012-11-04T00:00", timestamp3},
-		{"2012-11-04T00:00:00", timestamp3},
-		{"2012-11-04T00:00:00.000", timestamp3},
-	}
-	for i := range tests {
-		_, err = db.Exec("INSERT INTO foo(id, ts, dt) VALUES(?, ?, ?)", i, tests[i].value, tests[i].value)
-		if err != nil {
-			t.Fatal("Failed to insert timestamp:", err)
-		}
-	}
-
-	rows, err := db.Query("SELECT id, ts, dt FROM foo ORDER BY id ASC")
-	if err != nil {
-		t.Fatal("Unable to query foo table:", err)
-	}
-	defer rows.Close()
-
-	seen := 0
-	for rows.Next() {
-		var id int
-		var ts, dt time.Time
-
-		if err := rows.Scan(&id, &ts, &dt); err != nil {
-			t.Error("Unable to scan results:", err)
-			continue
-		}
-		if id < 0 || id >= len(tests) {
-			t.Error("Bad row id: ", id)
-			continue
-		}
-		seen++
-		if !tests[id].expected.Equal(ts) {
-			t.Errorf("Timestamp value for id %v (%v) should be %v, not %v", id, tests[id].value, tests[id].expected, dt)
-		}
-		if !tests[id].expected.Equal(dt) {
-			t.Errorf("Datetime value for id %v (%v) should be %v, not %v", id, tests[id].value, tests[id].expected, dt)
-		}
-	}
-
-	if seen != len(tests) {
-		t.Errorf("Expected to see %d rows", len(tests))
-	}
-}
-
-func TestBoolean(t *testing.T) {
-	tempFilename := TempFilename()
-	db, err := sql.Open("sqlite3", tempFilename)
-	if err != nil {
-		t.Fatal("Failed to open database:", err)
-	}
-
-	defer os.Remove(tempFilename)
-	defer db.Close()
-
-	_, err = db.Exec("CREATE TABLE foo(id INTEGER, fbool BOOLEAN)")
-	if err != nil {
-		t.Fatal("Failed to create table:", err)
-	}
-
-	bool1 := true
-	_, err = db.Exec("INSERT INTO foo(id, fbool) VALUES(1, ?)", bool1)
-	if err != nil {
-		t.Fatal("Failed to insert boolean:", err)
-	}
-
-	bool2 := false
-	_, err = db.Exec("INSERT INTO foo(id, fbool) VALUES(2, ?)", bool2)
-	if err != nil {
-		t.Fatal("Failed to insert boolean:", err)
-	}
-
-	bool3 := "nonsense"
-	_, err = db.Exec("INSERT INTO foo(id, fbool) VALUES(3, ?)", bool3)
-	if err != nil {
-		t.Fatal("Failed to insert nonsense:", err)
-	}
-
-	rows, err := db.Query("SELECT id, fbool FROM foo where fbool = ?", bool1)
-	if err != nil {
-		t.Fatal("Unable to query foo table:", err)
-	}
-	counter := 0
-
-	var id int
-	var fbool bool
-
-	for rows.Next() {
-		if err := rows.Scan(&id, &fbool); err != nil {
-			t.Fatal("Unable to scan results:", err)
-		}
-		counter++
-	}
-
-	if counter != 1 {
-		t.Fatalf("Expected 1 row but %v", counter)
-	}
-
-	if id != 1 && fbool != true {
-		t.Fatalf("Value for id 1 should be %v, not %v", bool1, fbool)
-	}
-
-	rows, err = db.Query("SELECT id, fbool FROM foo where fbool = ?", bool2)
-	if err != nil {
-		t.Fatal("Unable to query foo table:", err)
-	}
-
-	counter = 0
-
-	for rows.Next() {
-		if err := rows.Scan(&id, &fbool); err != nil {
-			t.Fatal("Unable to scan results:", err)
-		}
-		counter++
-	}
-
-	if counter != 1 {
-		t.Fatalf("Expected 1 row but %v", counter)
-	}
-
-	if id != 2 && fbool != false {
-		t.Fatalf("Value for id 2 should be %v, not %v", bool2, fbool)
-	}
-
-	// make sure "nonsense" triggered an error
-	rows, err = db.Query("SELECT id, fbool FROM foo where id=?;", 3)
-	if err != nil {
-		t.Fatal("Unable to query foo table:", err)
-	}
-
-	rows.Next()
-	err = rows.Scan(&id, &fbool)
-	if err == nil {
-		t.Error("Expected error from \"nonsense\" bool")
-	}
-}
-
-func TestFloat32(t *testing.T) {
-	tempFilename := TempFilename()
-	db, err := sql.Open("sqlite3", tempFilename)
-	if err != nil {
-		t.Fatal("Failed to open database:", err)
-	}
-
-	defer os.Remove(tempFilename)
-	defer db.Close()
-
-	_, err = db.Exec("CREATE TABLE foo(id INTEGER)")
-	if err != nil {
-		t.Fatal("Failed to create table:", err)
-	}
-
-	_, err = db.Exec("INSERT INTO foo(id) VALUES(null)")
-	if err != nil {
-		t.Fatal("Failed to insert null:", err)
-	}
-
-	rows, err := db.Query("SELECT id FROM foo")
-	if err != nil {
-		t.Fatal("Unable to query foo table:", err)
-	}
-
-	if !rows.Next() {
-		t.Fatal("Unable to query results:", err)
-	}
-
-	var id interface{}
-	if err := rows.Scan(&id); err != nil {
-		t.Fatal("Unable to scan results:", err)
-	}
-	if id != nil {
-		t.Error("Expected nil but not")
-	}
-}
-
-func TestNull(t *testing.T) {
-	tempFilename := TempFilename()
-	db, err := sql.Open("sqlite3", tempFilename)
-	if err != nil {
-		t.Fatal("Failed to open database:", err)
-	}
-
-	defer os.Remove(tempFilename)
-	defer db.Close()
-
-	rows, err := db.Query("SELECT 3.141592")
-	if err != nil {
-		t.Fatal("Unable to query foo table:", err)
-	}
-
-	if !rows.Next() {
-		t.Fatal("Unable to query results:", err)
-	}
-
-	var v interface{}
-	if err := rows.Scan(&v); err != nil {
-		t.Fatal("Unable to scan results:", err)
-	}
-	f, ok := v.(float64)
-	if !ok {
-		t.Error("Expected float but not")
-	}
-	if f != 3.141592 {
-		t.Error("Expected 3.141592 but not")
-	}
-}
-
-func TestTransaction(t *testing.T) {
-	tempFilename := TempFilename()
-	db, err := sql.Open("sqlite3", tempFilename)
-	if err != nil {
-		t.Fatal("Failed to open database:", err)
-	}
-
-	defer os.Remove(tempFilename)
-	defer db.Close()
-
-	_, err = db.Exec("CREATE TABLE foo(id INTEGER)")
-	if err != nil {
-		t.Fatal("Failed to create table:", err)
-	}
-
-	tx, err := db.Begin()
-	if err != nil {
-		t.Fatal("Failed to begin transaction:", err)
-	}
-
-	_, err = tx.Exec("INSERT INTO foo(id) VALUES(1)")
-	if err != nil {
-		t.Fatal("Failed to insert null:", err)
-	}
-
-	rows, err := tx.Query("SELECT id from foo")
-	if err != nil {
-		t.Fatal("Unable to query foo table:", err)
-	}
-
-	err = tx.Rollback()
-	if err != nil {
-		t.Fatal("Failed to rollback transaction:", err)
-	}
-
-	if rows.Next() {
-		t.Fatal("Unable to query results:", err)
-	}
-
-	tx, err = db.Begin()
-	if err != nil {
-		t.Fatal("Failed to begin transaction:", err)
-	}
-
-	_, err = tx.Exec("INSERT INTO foo(id) VALUES(1)")
-	if err != nil {
-		t.Fatal("Failed to insert null:", err)
-	}
-
-	err = tx.Commit()
-	if err != nil {
-		t.Fatal("Failed to commit transaction:", err)
-	}
-
-	rows, err = tx.Query("SELECT id from foo")
-	if err == nil {
-		t.Fatal("Expected failure to query")
-	}
-}
-
-func TestWAL(t *testing.T) {
-	tempFilename := TempFilename()
-	db, err := sql.Open("sqlite3", tempFilename)
-	if err != nil {
-		t.Fatal("Failed to open database:", err)
-	}
-
-	defer os.Remove(tempFilename)
-	defer db.Close()
-	if _, err = db.Exec("PRAGMA journal_mode=WAL;"); err != nil {
-		t.Fatal("Failed to Exec PRAGMA journal_mode:", err)
-	}
-	if _, err = db.Exec("PRAGMA locking_mode=EXCLUSIVE;"); err != nil {
-		t.Fatal("Failed to Exec PRAGMA locking_mode:", err)
-	}
-	if _, err = db.Exec("CREATE TABLE test (id SERIAL, user TEXT NOT NULL, name TEXT NOT NULL);"); err != nil {
-		t.Fatal("Failed to Exec CREATE TABLE:", err)
-	}
-	if _, err = db.Exec("INSERT INTO test (user, name) VALUES ('user','name');"); err != nil {
-		t.Fatal("Failed to Exec INSERT:", err)
-	}
-
-	trans, err := db.Begin()
-	if err != nil {
-		t.Fatal("Failed to Begin:", err)
-	}
-	s, err := trans.Prepare("INSERT INTO test (user, name) VALUES (?, ?);")
-	if err != nil {
-		t.Fatal("Failed to Prepare:", err)
-	}
-
-	var count int
-	if err = trans.QueryRow("SELECT count(user) FROM test;").Scan(&count); err != nil {
-		t.Fatal("Failed to QueryRow:", err)
-	}
-	if _, err = s.Exec("bbbb", "aaaa"); err != nil {
-		t.Fatal("Failed to Exec prepared statement:", err)
-	}
-	if err = s.Close(); err != nil {
-		t.Fatal("Failed to Close prepared statement:", err)
-	}
-	if err = trans.Commit(); err != nil {
-		t.Fatal("Failed to Commit:", err)
-	}
-}
-
-func TestTimezoneConversion(t *testing.T) {
-	zones := []string{"UTC", "US/Central", "US/Pacific", "Local"}
-	for _, tz := range zones {
-		tempFilename := TempFilename()
-		db, err := sql.Open("sqlite3", tempFilename+"?_loc="+url.QueryEscape(tz))
-		if err != nil {
-			t.Fatal("Failed to open database:", err)
-		}
-		defer os.Remove(tempFilename)
-		defer db.Close()
-
-		_, err = db.Exec("DROP TABLE foo")
-		_, err = db.Exec("CREATE TABLE foo(id INTEGER, ts TIMESTAMP, dt DATETIME)")
-		if err != nil {
-			t.Fatal("Failed to create table:", err)
-		}
-
-		loc, err := time.LoadLocation(tz)
-		if err != nil {
-			t.Fatal("Failed to load location:", err)
-		}
-
-		timestamp1 := time.Date(2012, time.April, 6, 22, 50, 0, 0, time.UTC)
-		timestamp2 := time.Date(2006, time.January, 2, 15, 4, 5, 123456789, time.UTC)
-		timestamp3 := time.Date(2012, time.November, 4, 0, 0, 0, 0, time.UTC)
-		tests := []struct {
-			value    interface{}
-			expected time.Time
-		}{
-			{"nonsense", time.Time{}.In(loc)},
-			{"0000-00-00 00:00:00", time.Time{}.In(loc)},
-			{timestamp1, timestamp1.In(loc)},
-			{timestamp1.Unix(), timestamp1.In(loc)},
-			{timestamp1.In(time.FixedZone("TEST", -7*3600)), timestamp1.In(loc)},
-			{timestamp1.Format("2006-01-02 15:04:05.000"), timestamp1.In(loc)},
-			{timestamp1.Format("2006-01-02T15:04:05.000"), timestamp1.In(loc)},
-			{timestamp1.Format("2006-01-02 15:04:05"), timestamp1.In(loc)},
-			{timestamp1.Format("2006-01-02T15:04:05"), timestamp1.In(loc)},
-			{timestamp2, timestamp2.In(loc)},
-			{"2006-01-02 15:04:05.123456789", timestamp2.In(loc)},
-			{"2006-01-02T15:04:05.123456789", timestamp2.In(loc)},
-			{"2012-11-04", timestamp3.In(loc)},
-			{"2012-11-04 00:00", timestamp3.In(loc)},
-			{"2012-11-04 00:00:00", timestamp3.In(loc)},
-			{"2012-11-04 00:00:00.000", timestamp3.In(loc)},
-			{"2012-11-04T00:00", timestamp3.In(loc)},
-			{"2012-11-04T00:00:00", timestamp3.In(loc)},
-			{"2012-11-04T00:00:00.000", timestamp3.In(loc)},
-		}
-		for i := range tests {
-			_, err = db.Exec("INSERT INTO foo(id, ts, dt) VALUES(?, ?, ?)", i, tests[i].value, tests[i].value)
-			if err != nil {
-				t.Fatal("Failed to insert timestamp:", err)
-			}
-		}
-
-		rows, err := db.Query("SELECT id, ts, dt FROM foo ORDER BY id ASC")
-		if err != nil {
-			t.Fatal("Unable to query foo table:", err)
-		}
-		defer rows.Close()
-
-		seen := 0
-		for rows.Next() {
-			var id int
-			var ts, dt time.Time
-
-			if err := rows.Scan(&id, &ts, &dt); err != nil {
-				t.Error("Unable to scan results:", err)
-				continue
-			}
-			if id < 0 || id >= len(tests) {
-				t.Error("Bad row id: ", id)
-				continue
-			}
-			seen++
-			if !tests[id].expected.Equal(ts) {
-				t.Errorf("Timestamp value for id %v (%v) should be %v, not %v", id, tests[id].value, tests[id].expected, ts)
-			}
-			if !tests[id].expected.Equal(dt) {
-				t.Errorf("Datetime value for id %v (%v) should be %v, not %v", id, tests[id].value, tests[id].expected, dt)
-			}
-			if tests[id].expected.Location().String() != ts.Location().String() {
-				t.Errorf("Location for id %v (%v) should be %v, not %v", id, tests[id].value, tests[id].expected.Location().String(), ts.Location().String())
-			}
-			if tests[id].expected.Location().String() != dt.Location().String() {
-				t.Errorf("Location for id %v (%v) should be %v, not %v", id, tests[id].value, tests[id].expected.Location().String(), dt.Location().String())
-			}
-		}
-
-		if seen != len(tests) {
-			t.Errorf("Expected to see %d rows", len(tests))
-		}
-	}
-}
-
-func TestSuite(t *testing.T) {
-	db, err := sql.Open("sqlite3", ":memory:")
-	if err != nil {
-		t.Fatal(err)
-	}
-	defer db.Close()
-
-	sqlite3_test.RunTests(t, db, sqlite3_test.SQLITE)
-}
-
-// TODO: Execer & Queryer currently disabled
-// https://github.com/mattn/go-sqlite3/issues/82
-func TestExecer(t *testing.T) {
-	tempFilename := TempFilename()
-	db, err := sql.Open("sqlite3", tempFilename)
-	if err != nil {
-		t.Fatal("Failed to open database:", err)
-	}
-	defer os.Remove(tempFilename)
-	defer db.Close()
-
-	_, err = db.Exec(`
-       create table foo (id integer); -- one comment
-       insert into foo(id) values(?);
-       insert into foo(id) values(?);
-       insert into foo(id) values(?); -- another comment
-       `, 1, 2, 3)
-	if err != nil {
-		t.Error("Failed to call db.Exec:", err)
-	}
-}
-
-func TestQueryer(t *testing.T) {
-	tempFilename := TempFilename()
-	db, err := sql.Open("sqlite3", tempFilename)
-	if err != nil {
-		t.Fatal("Failed to open database:", err)
-	}
-	defer os.Remove(tempFilename)
-	defer db.Close()
-
-	_, err = db.Exec(`
-	create table foo (id integer);
-	`)
-	if err != nil {
-		t.Error("Failed to call db.Query:", err)
-	}
-
-	rows, err := db.Query(`
-	insert into foo(id) values(?);
-	insert into foo(id) values(?);
-	insert into foo(id) values(?);
-	select id from foo order by id;
-	`, 3, 2, 1)
-	if err != nil {
-		t.Error("Failed to call db.Query:", err)
-	}
-	defer rows.Close()
-	n := 1
-	if rows != nil {
-		for rows.Next() {
-			var id int
-			err = rows.Scan(&id)
-			if err != nil {
-				t.Error("Failed to db.Query:", err)
-			}
-			if id != n {
-				t.Error("Failed to db.Query: not matched results")
-			}
-		}
-	}
-}
-
-func TestStress(t *testing.T) {
-	tempFilename := TempFilename()
-	db, err := sql.Open("sqlite3", tempFilename)
-	if err != nil {
-		t.Fatal("Failed to open database:", err)
-	}
-	db.Exec("CREATE TABLE foo (id int);")
-	db.Exec("INSERT INTO foo VALUES(1);")
-	db.Exec("INSERT INTO foo VALUES(2);")
-	db.Close()
-
-	for i := 0; i < 10000; i++ {
-		db, err := sql.Open("sqlite3", tempFilename)
-		if err != nil {
-			t.Fatal("Failed to open database:", err)
-		}
-
-		for j := 0; j < 3; j++ {
-			rows, err := db.Query("select * from foo where id=1;")
-			if err != nil {
-				t.Error("Failed to call db.Query:", err)
-			}
-			for rows.Next() {
-				var i int
-				if err := rows.Scan(&i); err != nil {
-					t.Errorf("Scan failed: %v\n", err)
-				}
-			}
-			if err := rows.Err(); err != nil {
-				t.Errorf("Post-scan failed: %v\n", err)
-			}
-			rows.Close()
-		}
-		db.Close()
-	}
-}
-
-func TestDateTimeLocal(t *testing.T) {
-	zone := "Asia/Tokyo"
-	tempFilename := TempFilename()
-	db, err := sql.Open("sqlite3", tempFilename+"?_loc="+zone)
-	if err != nil {
-		t.Fatal("Failed to open database:", err)
-	}
-	db.Exec("CREATE TABLE foo (dt datetime);")
-	db.Exec("INSERT INTO foo VALUES('2015-03-05 15:16:17');")
-
-	row := db.QueryRow("select * from foo")
-	var d time.Time
-	err = row.Scan(&d)
-	if err != nil {
-		t.Fatal("Failed to scan datetime:", err)
-	}
-	if d.Hour() == 15 || !strings.Contains(d.String(), "JST") {
-		t.Fatal("Result should have timezone", d)
-	}
-	db.Close()
-
-	db, err = sql.Open("sqlite3", tempFilename)
-	if err != nil {
-		t.Fatal("Failed to open database:", err)
-	}
-
-	row = db.QueryRow("select * from foo")
-	err = row.Scan(&d)
-	if err != nil {
-		t.Fatal("Failed to scan datetime:", err)
-	}
-	if d.UTC().Hour() != 15 || !strings.Contains(d.String(), "UTC") {
-		t.Fatalf("Result should not have timezone %v %v", zone, d.String())
-	}
-
-	_, err = db.Exec("DELETE FROM foo")
-	if err != nil {
-		t.Fatal("Failed to delete table:", err)
-	}
-	dt, err := time.Parse("2006/1/2 15/4/5 -0700 MST", "2015/3/5 15/16/17 +0900 JST")
-	if err != nil {
-		t.Fatal("Failed to parse datetime:", err)
-	}
-	db.Exec("INSERT INTO foo VALUES(?);", dt)
-
-	db.Close()
-	db, err = sql.Open("sqlite3", tempFilename+"?_loc="+zone)
-	if err != nil {
-		t.Fatal("Failed to open database:", err)
-	}
-
-	row = db.QueryRow("select * from foo")
-	err = row.Scan(&d)
-	if err != nil {
-		t.Fatal("Failed to scan datetime:", err)
-	}
-	if d.Hour() != 15 || !strings.Contains(d.String(), "JST") {
-		t.Fatalf("Result should have timezone %v %v", zone, d.String())
-	}
-}
-
-func TestVersion(t *testing.T) {
-	s, n, id := Version()
-	if s == "" || n == 0 || id == "" {
-		t.Errorf("Version failed %q, %d, %q\n", s, n, id)
-	}
-}
-
-func TestNumberNamedParams(t *testing.T) {
-	tempFilename := TempFilename()
-	db, err := sql.Open("sqlite3", tempFilename)
-	if err != nil {
-		t.Fatal("Failed to open database:", err)
-	}
-	defer os.Remove(tempFilename)
-	defer db.Close()
-
-	_, err = db.Exec(`
-	create table foo (id integer, name text, extra text);
-	`)
-	if err != nil {
-		t.Error("Failed to call db.Query:", err)
-	}
-
-	_, err = db.Exec(`insert into foo(id, name, extra) values($1, $2, $2)`, 1, "foo")
-	if err != nil {
-		t.Error("Failed to call db.Exec:", err)
-	}
-
-	row := db.QueryRow(`select id, extra from foo where id = $1 and extra = $2`, 1, "foo")
-	if row == nil {
-		t.Error("Failed to call db.QueryRow")
-	}
-	var id int
-	var extra string
-	err = row.Scan(&id, &extra)
-	if err != nil {
-		t.Error("Failed to db.Scan:", err)
-	}
-	if id != 1 || extra != "foo" {
-		t.Error("Failed to db.QueryRow: not matched results")
-	}
-}

+ 7 - 10
Godeps/_workspace/src/github.com/mattn/go-sqlite3/sqlite3_test/sqltest.go

@@ -275,12 +275,11 @@ func TestPreparedStmt(t *testing.T) {
 	}
 
 	const nRuns = 10
-	ch := make(chan bool)
+	var wg sync.WaitGroup
 	for i := 0; i < nRuns; i++ {
+		wg.Add(1)
 		go func() {
-			defer func() {
-				ch <- true
-			}()
+			defer wg.Done()
 			for j := 0; j < 10; j++ {
 				count := 0
 				if err := sel.QueryRow().Scan(&count); err != nil && err != sql.ErrNoRows {
@@ -294,9 +293,7 @@ func TestPreparedStmt(t *testing.T) {
 			}
 		}()
 	}
-	for i := 0; i < nRuns; i++ {
-		<-ch
-	}
+	wg.Wait()
 }
 
 // Benchmarks need to use panic() since b.Error errors are lost when
@@ -318,7 +315,7 @@ func BenchmarkQuery(b *testing.B) {
 		var i int
 		var f float64
 		var s string
-//		var t time.Time
+		//		var t time.Time
 		if err := db.QueryRow("select null, 1, 1.1, 'foo'").Scan(&n, &i, &f, &s); err != nil {
 			panic(err)
 		}
@@ -331,7 +328,7 @@ func BenchmarkParams(b *testing.B) {
 		var i int
 		var f float64
 		var s string
-//		var t time.Time
+		//		var t time.Time
 		if err := db.QueryRow("select ?, ?, ?, ?", nil, 1, 1.1, "foo").Scan(&n, &i, &f, &s); err != nil {
 			panic(err)
 		}
@@ -350,7 +347,7 @@ func BenchmarkStmt(b *testing.B) {
 		var i int
 		var f float64
 		var s string
-//		var t time.Time
+		//		var t time.Time
 		if err := st.QueryRow(nil, 1, 1.1, "foo").Scan(&n, &i, &f, &s); err != nil {
 			panic(err)
 		}

+ 1 - 1
Godeps/_workspace/src/github.com/mattn/go-sqlite3/sqlite3_windows.go

@@ -8,7 +8,7 @@ package sqlite3
 
 /*
 #cgo CFLAGS: -I. -fno-stack-check -fno-stack-protector -mno-stack-arg-probe
-#cgo windows,386 CFLAGS: -D_localtime32=localtime
+#cgo windows,386 CFLAGS: -D_USE_32BIT_TIME_T
 #cgo LDFLAGS: -lmingwex -lmingw32
 */
 import "C"

+ 60 - 5
Godeps/_workspace/src/github.com/mattn/go-sqlite3/sqlite3ext.h

@@ -28,7 +28,7 @@ typedef struct sqlite3_api_routines sqlite3_api_routines;
 ** WARNING:  In order to maintain backwards compatibility, add new
 ** interfaces to the end of this structure only.  If you insert new
 ** interfaces in the middle of this structure, then older different
-** versions of SQLite will not be able to load each others' shared
+** versions of SQLite will not be able to load each other's shared
 ** libraries!
 */
 struct sqlite3_api_routines {
@@ -250,11 +250,40 @@ struct sqlite3_api_routines {
   const char *(*uri_parameter)(const char*,const char*);
   char *(*vsnprintf)(int,char*,const char*,va_list);
   int (*wal_checkpoint_v2)(sqlite3*,const char*,int,int*,int*);
+  /* Version 3.8.7 and later */
+  int (*auto_extension)(void(*)(void));
+  int (*bind_blob64)(sqlite3_stmt*,int,const void*,sqlite3_uint64,
+                     void(*)(void*));
+  int (*bind_text64)(sqlite3_stmt*,int,const char*,sqlite3_uint64,
+                      void(*)(void*),unsigned char);
+  int (*cancel_auto_extension)(void(*)(void));
+  int (*load_extension)(sqlite3*,const char*,const char*,char**);
+  void *(*malloc64)(sqlite3_uint64);
+  sqlite3_uint64 (*msize)(void*);
+  void *(*realloc64)(void*,sqlite3_uint64);
+  void (*reset_auto_extension)(void);
+  void (*result_blob64)(sqlite3_context*,const void*,sqlite3_uint64,
+                        void(*)(void*));
+  void (*result_text64)(sqlite3_context*,const char*,sqlite3_uint64,
+                         void(*)(void*), unsigned char);
+  int (*strglob)(const char*,const char*);
+  /* Version 3.8.11 and later */
+  sqlite3_value *(*value_dup)(const sqlite3_value*);
+  void (*value_free)(sqlite3_value*);
+  int (*result_zeroblob64)(sqlite3_context*,sqlite3_uint64);
+  int (*bind_zeroblob64)(sqlite3_stmt*, int, sqlite3_uint64);
+  /* Version 3.9.0 and later */
+  unsigned int (*value_subtype)(sqlite3_value*);
+  void (*result_subtype)(sqlite3_context*,unsigned int);
+  /* Version 3.10.0 and later */
+  int (*status64)(int,sqlite3_int64*,sqlite3_int64*,int);
+  int (*strlike)(const char*,const char*,unsigned int);
+  int (*db_cacheflush)(sqlite3*);
 };
 
 /*
 ** The following macros redefine the API routines so that they are
-** redirected throught the global sqlite3_api structure.
+** redirected through the global sqlite3_api structure.
 **
 ** This header file is also used by the loadext.c source file
 ** (part of the main SQLite library - not an extension) so that
@@ -263,7 +292,7 @@ struct sqlite3_api_routines {
 ** the API.  So the redefinition macros are only valid if the
 ** SQLITE_CORE macros is undefined.
 */
-#ifndef SQLITE_CORE
+#if !defined(SQLITE_CORE) && !defined(SQLITE_OMIT_LOAD_EXTENSION)
 #define sqlite3_aggregate_context      sqlite3_api->aggregate_context
 #ifndef SQLITE_OMIT_DEPRECATED
 #define sqlite3_aggregate_count        sqlite3_api->aggregate_count
@@ -390,6 +419,7 @@ struct sqlite3_api_routines {
 #define sqlite3_value_text16le         sqlite3_api->value_text16le
 #define sqlite3_value_type             sqlite3_api->value_type
 #define sqlite3_vmprintf               sqlite3_api->vmprintf
+#define sqlite3_vsnprintf              sqlite3_api->vsnprintf
 #define sqlite3_overload_function      sqlite3_api->overload_function
 #define sqlite3_prepare_v2             sqlite3_api->prepare_v2
 #define sqlite3_prepare16_v2           sqlite3_api->prepare16_v2
@@ -467,9 +497,34 @@ struct sqlite3_api_routines {
 #define sqlite3_uri_parameter          sqlite3_api->uri_parameter
 #define sqlite3_uri_vsnprintf          sqlite3_api->vsnprintf
 #define sqlite3_wal_checkpoint_v2      sqlite3_api->wal_checkpoint_v2
-#endif /* SQLITE_CORE */
+/* Version 3.8.7 and later */
+#define sqlite3_auto_extension         sqlite3_api->auto_extension
+#define sqlite3_bind_blob64            sqlite3_api->bind_blob64
+#define sqlite3_bind_text64            sqlite3_api->bind_text64
+#define sqlite3_cancel_auto_extension  sqlite3_api->cancel_auto_extension
+#define sqlite3_load_extension         sqlite3_api->load_extension
+#define sqlite3_malloc64               sqlite3_api->malloc64
+#define sqlite3_msize                  sqlite3_api->msize
+#define sqlite3_realloc64              sqlite3_api->realloc64
+#define sqlite3_reset_auto_extension   sqlite3_api->reset_auto_extension
+#define sqlite3_result_blob64          sqlite3_api->result_blob64
+#define sqlite3_result_text64          sqlite3_api->result_text64
+#define sqlite3_strglob                sqlite3_api->strglob
+/* Version 3.8.11 and later */
+#define sqlite3_value_dup              sqlite3_api->value_dup
+#define sqlite3_value_free             sqlite3_api->value_free
+#define sqlite3_result_zeroblob64      sqlite3_api->result_zeroblob64
+#define sqlite3_bind_zeroblob64        sqlite3_api->bind_zeroblob64
+/* Version 3.9.0 and later */
+#define sqlite3_value_subtype          sqlite3_api->value_subtype
+#define sqlite3_result_subtype         sqlite3_api->result_subtype
+/* Version 3.10.0 and later */
+#define sqlite3_status64               sqlite3_api->status64
+#define sqlite3_strlike                sqlite3_api->strlike
+#define sqlite3_db_cacheflush          sqlite3_api->db_cacheflush
+#endif /* !defined(SQLITE_CORE) && !defined(SQLITE_OMIT_LOAD_EXTENSION) */
 
-#ifndef SQLITE_CORE
+#if !defined(SQLITE_CORE) && !defined(SQLITE_OMIT_LOAD_EXTENSION)
   /* This case when the file really is being compiled as a loadable 
   ** extension */
 # define SQLITE_EXTENSION_INIT1     const sqlite3_api_routines *sqlite3_api=0;

+ 22 - 3
README.md

@@ -16,6 +16,7 @@ Graphite, Elasticsearch, OpenTSDB, Prometheus and InfluxDB.
 - [What's New in Grafana 2.0](http://docs.grafana.org/guides/whats-new-in-v2/)
 - [What's New in Grafana 2.1](http://docs.grafana.org/guides/whats-new-in-v2-1/)
 - [What's New in Grafana 2.5](http://docs.grafana.org/guides/whats-new-in-v2-5/)
+- [What's New in Grafana 3.0](http://docs.grafana.org/guides/whats-new-in-v3/)
 
 ## Features
 ### Graphite Target Editor
@@ -78,7 +79,7 @@ the latest master builds [here](http://grafana.org/download/builds)
 ### Dependencies
 
 - Go 1.5
-- NodeJS
+- NodeJS v4+
 - [Godep](https://github.com/tools/godep)
 
 ### Get Code
@@ -87,8 +88,19 @@ the latest master builds [here](http://grafana.org/download/builds)
 go get github.com/grafana/grafana
 ```
 
+Since imports of dependencies use the absolute path github.com/grafana/grafana within the $GOPATH,
+you will need to put your version of the code in $GOPATH/src/github.com/grafana/grafana to be able
+to develop and build grafana on a cloned repository. To do so, you can clone your forked repository
+directly to $GOPATH/src/github.com/grafana or you can create a symbolic link from your version
+of the code to $GOPATH/src/github.com/grafana/grafana. The last options makes it possible to change
+easily the grafana repository you want to build.
+```bash
+go get github.com/*your_account*/grafana
+mkdir $GOPATH/src/github.com/grafana
+ln -s  github.com/*your_account*/grafana $GOPATH/src/github.com/grafana/grafana
+```
+
 ### Building the backend
-Replace X.Y.Z by actual version number.
 ```bash
 cd $GOPATH/src/github.com/grafana/grafana
 go run build.go setup            (only needed once to install godep)
@@ -98,7 +110,7 @@ go run build.go build
 
 ### Building frontend assets
 
-To build less to css for the frontend you will need a recent version of of node (v0.12.0),
+To build less to css for the frontend you will need a recent version of of **node (v4+)**,
 npm (v2.5.0) and grunt (v0.4.5). Run the following:
 
 ```bash
@@ -106,6 +118,13 @@ npm install
 npm run build
 ```
 
+To build the frontend assets only on changes:
+
+```bash
+sudo npm install -g grunt-cli # to do only once to install grunt command line interface
+grunt watch
+```
+
 ### Recompile backend on source change
 To rebuild on source change (requires that you executed godep restore)
 ```bash

+ 3 - 5
build.go

@@ -132,12 +132,10 @@ func readVersionFromPackageJson() {
 	if len(parts) > 1 {
 		linuxPackageVersion = parts[0]
 		linuxPackageIteration = parts[1]
-		if linuxPackageIteration != "" {
-			// add timestamp to iteration
-			linuxPackageIteration = fmt.Sprintf("%s%v", linuxPackageIteration, time.Now().Unix())
-		}
-		log.Println(fmt.Sprintf("teration %v", linuxPackageIteration))
 	}
+
+	// add timestamp to iteration
+	linuxPackageIteration = fmt.Sprintf("%d%s", time.Now().Unix(), linuxPackageIteration)
 }
 
 type linuxPackageOptions struct {

+ 5 - 2
conf/defaults.ini

@@ -172,6 +172,9 @@ verify_email_enabled = false
 # Background text for the user field on the login page
 login_hint = email or username
 
+# Default UI theme ("dark" or "light")
+default_theme = dark
+
 #################################### Anonymous Auth ##########################
 [auth.anonymous]
 # enable anonymous access
@@ -241,14 +244,14 @@ templates_pattern = emails/*.html
 
 #################################### Logging ##########################
 [log]
-# Either "console", "file", default is "console"
+# Either "console", "file", "syslog". Default is console and  file
 # Use comma to separate multiple modes, e.g. "console, file"
 mode = console, file
 
 # Buffer length of channel, keep it as it is if you don't know what it is.
 buffer_len = 10000
 
-# Either "Trace", "Debug", "Info", "Warn", "Error", "Critical", default is "Trace"
+# Either "Trace", "Debug", "Info", "Warn", "Error", "Critical", default is "Info"
 level = Info
 
 # For "console" mode only

+ 5 - 2
conf/sample.ini

@@ -155,6 +155,9 @@ check_for_updates = true
 # Background text for the user field on the login page
 ;login_hint = email or username
 
+# Default UI theme ("dark" or "light")
+;default_theme = dark
+
 #################################### Anonymous Auth ##########################
 [auth.anonymous]
 # enable anonymous access
@@ -223,14 +226,14 @@ check_for_updates = true
 
 #################################### Logging ##########################
 [log]
-# Either "console", "file", default is "console"
+# Either "console", "file", "syslog". Default is console and  file
 # Use comma to separate multiple modes, e.g. "console, file"
 ;mode = console, file
 
 # Buffer length of channel, keep it as it is if you don't know what it is.
 ;buffer_len = 10000
 
-# Either "Trace", "Debug", "Info", "Warn", "Error", "Critical", default is "Trace"
+# Either "Trace", "Debug", "Info", "Warn", "Error", "Critical", default is "Info"
 ;level = Info
 
 # For "console" mode only

+ 7 - 0
docker/blocks/graphite/fig

@@ -8,3 +8,10 @@ graphite:
     - /etc/localtime:/etc/localtime:ro
     - /etc/timezone:/etc/timezone:ro
 
+fake-graphite-data:
+  image: grafana/fake-data-gen
+  net: bridge
+  environment:
+    FD_DATASOURCE: graphite
+    FD_PORT: 2003
+

+ 8 - 0
docker/blocks/influxdb/fig

@@ -4,3 +4,11 @@ influxdb:
     - "2004:2004"
     - "8083:8083"
     - "8086:8086"
+
+fake-influxdb-data:
+  image: grafana/fake-data-gen
+  net: bridge
+  environment:
+    FD_DATASOURCE: influxdb
+    FD_PORT: 8086
+

+ 7 - 1
docker/blocks/opentsdb/fig

@@ -2,4 +2,10 @@ opentsdb:
   image: opower/opentsdb:latest
   ports:
     - "4242:4242"
-    
+
+fake-opentsdb-data:
+  image: grafana/fake-data-gen
+  net: bridge
+  environment:
+    FD_DATASOURCE: opentsdb
+

+ 10 - 0
docker/blocks/prometheus/fig

@@ -1,6 +1,16 @@
 prometheus:
   build: blocks/prometheus
+  net: bridge
   ports:
     - "9090:9090"
   volumes:
     - /var/docker/prometheus:/prometheus-data
+
+fake-prometheus-data:
+  image: grafana/fake-data-gen
+  net: bridge
+  ports:
+    - "9091:9091"
+  environment:
+    FD_DATASOURCE: prom
+

+ 2 - 0
docs/sources/datasources/cloudwatch.md

@@ -26,6 +26,8 @@ Name | The data source name, important that this is the same as in Grafana v1.x
 Default | Default data source means that it will be pre-selected for new panels.
 Credentials profile name | Specify the name of the profile to use (if you use `~/aws/credentials` file), leave blank for default. This option was introduced in Grafana 2.5.1
 Default Region | Used in query editor to set region (can be changed on per query basis)
+Custom Metrics namespace | Specify the CloudWatch namespace of Custom metrics
+Assume Role Arn | Specify the ARN of the role to assume
 
 ## Authentication
 

+ 1 - 1
docs/sources/datasources/kairosdb.md

@@ -30,7 +30,7 @@ Access | Proxy = access via Grafana backend, Direct = access directory from brow
 ## Query editor
 Open a graph in edit mode by click the title.
 
-![](/img/v2/kairos_query_editor.png)
+![](/img/v2/kairos_query_editor.jpg)
 
 For details on KairosDB metric queries checkout the official.
 - [Query Metrics - KairosDB 0.9.4 documentation](http://kairosdb.github.io/kairosdocs/restapi/QueryMetrics.html).

+ 3 - 3
docs/sources/datasources/opentsdb.md

@@ -7,10 +7,10 @@ page_keywords: grafana, opentsdb, documentation
 # OpenTSDB Guide
 The newest release of Grafana adds additional functionality when using an OpenTSDB Data source.
 
-![](/img/v2/add_OpenTSDB.jpg)
+![](/img/v2/add_OpenTSDB.png)
 
-1. Open the side menu by clicking the the Grafana icon in the top header. 
-2. In the side menu under the `Dashboards` link you should find a link named `Data Sources`.    
+1. Open the side menu by clicking the the Grafana icon in the top header.
+2. In the side menu under the `Dashboards` link you should find a link named `Data Sources`.
 
     > NOTE: If this link is missing in the side menu it means that your current user does not have the `Admin` role for the current organization.
 

+ 33 - 6
docs/sources/guides/whats-new-in-v3.md

@@ -39,12 +39,13 @@ entire experience right within Grafana.
 
 <img src="/img/v3/grafana_net_tour.png">
 
-A preview of [Grafana.net](http://grafana.net) is launching along with this release. We
-think it’s the perfect compliment to Grafana.
+[Grafana.net](https://grafana.net) offers a central repository where the community can come together to discover, create and
+share plugins (data sources, panels, apps) and dashboards.
 
-Grafana.net currently offers a central repository where the community
-can come together to discover and share plugins (Data Sources, Panels,
-Apps) and Dashboards for Grafana 3.0 and above.
+We are also working on a hosted Graphite-compatible data source that will be optimized for use with Grafana.
+It’ll be easy to combine your existing data source(s) with this OpenSaaS option. Finally, Grafana.net can
+also be a hub to manage all your Grafana instances. You’ll be able to monitor their health and availability,
+perform dashboard backups, and more.
 
 We are also working on a hosted Graphite-compatible Data Source that
 will be optimized for use with Grafana. It’ll be easy to combine your
@@ -65,7 +66,6 @@ Grafana 3.0 comes with a new command line tool called grafana-cli. You
 can easily install plugins from Grafana.net with it. For
 example:
 
-
 ```
 grafana-cli install grafana-pie-chart-panel
 ```
@@ -188,6 +188,33 @@ you can still install manually from [Grafana.net](http://grafana.net)
 * KairosDB: This data source has also no longer shipped with Grafana,
 you can install it manually from [Grafana.net](http://grafana.net)
 
+## Plugin showcase
+
+Discovering and installing plugins is very quick and easy with Grafana 3.0 and [Grafana.net](https://grafana.net). Here
+are a couple that I incurage you try!
+
+#### [Clock Panel](https://grafana.net/plugins/grafana-clock-panel)
+Support's both current time and count down mode.
+<img src="/img/v3/clock_panel.png">
+
+#### [Pie Chart Panel](https://grafana.net/plugins/grafana-piechart-panel)
+A simple pie chart panel is now available as an external plugin.
+<img src="/img/v3/pie_chart_panel.png">
+
+#### [WorldPing App](https://grafana.net/plugins/raintank-worldping-app)
+This is full blown Grafana App that adds new panels, data sources and pages to give
+feature rich global performance monitoring directly from your on-prem Grafana.
+
+<img src="/img/v3/wP-Screenshot-dash-web.png">
+
+#### [Zabbix App](https://grafana.net/plugins/alexanderzobnin-zabbix-app)
+This app contains the already very pouplar Zabbix data source plugin, 2 dashboards and a triggers panel. It is
+created and maintained by [Alexander Zobnin](https://github.com/alexanderzobnin/grafana-zabbix).
+
+<img src="/img/v3/zabbix_app.png">
+
+Checkout the full list of plugins on [Grafana.net](https://grafana.net/plugins)
+
 ## CHANGELOG
 
 For a detailed list and link to github issues for everything included

+ 1 - 0
docs/sources/http_api/overview.md

@@ -18,4 +18,5 @@ dashboards, creating users and updating data sources.
 * [User API](/http_api/user/)
 * [Admin API](/http_api/admin/)
 * [Snapshot API](/http_api/snapshot/)
+* [Preferences API](/http_api/preferences/)
 * [Other API](/http_api/other/)

+ 100 - 0
docs/sources/http_api/preferences.md

@@ -0,0 +1,100 @@
+----
+page_title: Preferences API
+page_description: Grafana Preferences API Reference
+page_keywords: grafana, preferences, http, api, documentation
+---
+
+# User and Org Preferences API
+
+Keys:
+
+- **theme** - One of: ``light``, ``dark``, or an empty string for the default theme
+- **homeDashboardId** - The numerical ``:id`` of a favorited dashboard, default: ``0``
+- **timezone** - One of: ``utc``, ``browser``, or an empty string for the default
+
+Omitting a key will cause the current value to be replaced with the
+system default value.
+
+## Get Current User Prefs
+
+`GET /api/user/preferences`
+
+**Example Request**:
+
+    GET /api/user/preferences HTTP/1.1
+    Accept: application/json
+    Content-Type: application/json
+    Authorization: Bearer eyJrIjoiT0tTcG1pUlY2RnVKZTFVaDFsNFZXdE9ZWmNrMkZYbk
+
+**Example Response**:
+
+    HTTP/1.1 200
+    Content-Type: application/json
+
+    {"theme":"","homeDashboardId":0,"timezone":""}
+
+## Update Current User Prefs
+
+`PUT /api/user/preferences`
+
+**Example Request**:
+
+    PUT /api/user/preferences HTTP/1.1
+    Accept: application/json
+    Content-Type: application/json
+    Authorization: Bearer eyJrIjoiT0tTcG1pUlY2RnVKZTFVaDFsNFZXdE9ZWmNrMkZYbk
+
+    {
+      "theme": "",
+      "homeDashboardId":0,
+      "timezone":"utc"
+    }
+
+**Example Response**:
+
+    HTTP/1.1 200
+    Content-Type: text/plain; charset=utf-8
+
+    {"message":"Preferences updated"}
+
+## Get Current Org Prefs
+
+`GET /api/org/preferences`
+
+**Example Request**:
+
+    GET /api/org/preferences HTTP/1.1
+    Accept: application/json
+    Content-Type: application/json
+    Authorization: Bearer eyJrIjoiT0tTcG1pUlY2RnVKZTFVaDFsNFZXdE9ZWmNrMkZYbk
+
+**Example Response**:
+
+    HTTP/1.1 200
+    Content-Type: application/json
+
+    {"theme":"","homeDashboardId":0,"timezone":""}
+
+## Update Current Org Prefs
+
+`PUT /api/org/preferences`
+
+**Example Request**:
+
+    PUT /api/org/preferences HTTP/1.1
+    Accept: application/json
+    Content-Type: application/json
+    Authorization: Bearer eyJrIjoiT0tTcG1pUlY2RnVKZTFVaDFsNFZXdE9ZWmNrMkZYbk
+
+    {
+      "theme": "",
+      "homeDashboardId":0,
+      "timezone":"utc"
+    }
+
+**Example Response**:
+
+    HTTP/1.1 200
+    Content-Type: text/plain; charset=utf-8
+
+    {"message":"Preferences updated"}

+ 1 - 1
docs/sources/index.md

@@ -1,6 +1,6 @@
 ---
 page_title: Grafana Installation
-page_description: Install guide for Grafana.
+page_description: Install guide for Grafana
 page_keywords: grafana, installation, documentation
 ---
 

+ 1 - 1
docs/sources/installation/configuration.md

@@ -186,7 +186,7 @@ Defaults to `admin`.
 
 ### admin_password
 
-The password of the default Grafana admin.  Defaults to `admin`.
+The password of the default Grafana admin. Set once on first-run.  Defaults to `admin`.
 
 ### login_remember_days
 

+ 3 - 10
docs/sources/installation/debian.md

@@ -10,20 +10,13 @@ page_keywords: grafana, installation, debian, ubuntu, guide
 
 Description | Download
 ------------ | -------------
-Stable .deb for Debian-based Linux | [grafana_2.6.0_amd64.deb](https://grafanarel.s3.amazonaws.com/builds/grafana_2.6.0_amd64.deb)
-Beta .deb for Debian-based Linux |   [grafana_3.0.0-beta71462173753_amd64.deb](https://grafanarel.s3.amazonaws.com/builds/grafana_3.0.0-beta71462173753_amd64.deb)
+Stable .deb for Debian-based Linux | [grafana_3.0.2-1463383025_amd64.deb](https://grafanarel.s3.amazonaws.com/builds/grafana_3.0.2-1463383025_amd64.deb)
 
 ## Install Stable
 
-    $ wget https://grafanarel.s3.amazonaws.com/builds/grafana_2.6.0_amd64.deb
+    $ wget https://grafanarel.s3.amazonaws.com/builds/grafana_3.0.2-1463383025_amd64.deb
     $ sudo apt-get install -y adduser libfontconfig
-    $ sudo dpkg -i grafana_2.6.0_amd64.deb
-
-## Install 3.0 Beta
-
-    $ wget https://grafanarel.s3.amazonaws.com/builds/grafana_3.0.0-beta71462173753_amd64.deb
-    $ sudo apt-get install -y adduser libfontconfig
-    $ sudo dpkg -i grafana_3.0.0-beta71462173753_amd64.deb
+    $ sudo dpkg -i grafana_3.0.2-1463383025_amd64.deb
 
 ## APT Repository
 

+ 28 - 3
docs/sources/installation/mac.md

@@ -6,8 +6,33 @@ page_keywords: grafana, installation, mac, osx, guide
 
 # Installing on Mac
 
-There is currently no binary build for Mac, but Grafana will happily build on Mac. Read the [build from
-source](/project/building_from_source) page for instructions on how to
-build it yourself.
+Installation can be done using [homebrew](http://brew.sh/)
+
+Install latest stable:
+
+```
+brew install grafana/grafana/grafana
+```
+
+To start grafana look at the command printed after the homebrew install completes.
+
+You can also add the grafana as tap.
+
+```
+brew tap grafana/grafana
+brew install grafana
+```
+
+Install latest unstable from master:
+
+```
+brew install --HEAD grafana/grafana/grafana
+```
+
+To upgrade use the reinstall command
+
+```
+brew reinstall --HEAD grafana/grafana/grafana
+```
 
 

+ 4 - 23
docs/sources/installation/rpm.md

@@ -10,43 +10,24 @@ page_keywords: grafana, installation, centos, fedora, opensuse, redhat, guide
 
 Description | Download
 ------------ | -------------
-Stable .RPM for CentOS / Fedora / OpenSuse / Redhat Linux | [grafana-2.6.0-1.x86_64.rpm](https://grafanarel.s3.amazonaws.com/builds/grafana-2.6.0-1.x86_64.rpm)
-Beta .RPM for CentOS / Fedor / OpenSuse / Redhat Linux | [grafana-3.0.0-beta71462173753.x86_64.rpm](https://grafanarel.s3.amazonaws.com/builds/grafana-3.0.0-beta71462173753.x86_64.rpm)
+Stable .RPM for CentOS / Fedora / OpenSuse / Redhat Linux | [grafana-3.0.2-1463383025.x86_64.rpm](https://grafanarel.s3.amazonaws.com/builds/grafana-3.0.2-1463383025.x86_64.rpm)
 
 ## Install Stable Release from package file
 
 You can install Grafana using Yum directly.
 
-    $ sudo yum install https://grafanarel.s3.amazonaws.com/builds/grafana-2.6.0-1.x86_64.rpm
+    $ sudo yum install https://grafanarel.s3.amazonaws.com/builds/grafana-3.0.2-1463383025.x86_64.rpm
 
 Or install manually using `rpm`.
 
 #### On CentOS / Fedora / Redhat:
 
     $ sudo yum install initscripts fontconfig
-    $ sudo rpm -Uvh grafana-2.6.0-1.x86_64.rpm
+    $ sudo rpm -Uvh grafana-3.0.2-1463383025.x86_64.rpm
 
 #### On OpenSuse:
 
-    $ sudo rpm -i --nodeps grafana-2.6.0-1.x86_64.rpm
-
-## Install Beta Release from package file
-
-You can install Grafana using Yum directly.
-
-    $ sudo yum install https://grafanarel.s3.amazonaws.com/builds/grafana-3.0.0-beta71462173753.x86_64.rpm
-
-Or install manually using `rpm`.
-
-#### On CentOS / Fedora / Redhat:
-
-    $ sudo yum install initscripts fontconfig
-    $ sudo rpm -Uvh grafana-3.0.0-beta71462173753.x86_64.rpm
-
-#### On OpenSuse:
-
-    $ sudo rpm -i --nodeps grafana-3.0.0-beta71462173753.x86_64.rpm
-
+    $ sudo rpm -i --nodeps grafana-3.0.2-1463383025.x86_64.rpm
 
 ## Install via YUM Repository
 

+ 1 - 1
docs/sources/installation/windows.md

@@ -10,7 +10,7 @@ page_keywords: grafana, installation, windows guide
 
 Description | Download
 ------------ | -------------
-Stable Zip package for Windows | [grafana.2.6.0.windows-x64.zip](https://grafanarel.s3.amazonaws.com/winbuilds/dist/grafana-2.5.0.windows-x64.zip)
+Stable Zip package for Windows | [grafana.3.0.2.windows-x64.zip](https://grafanarel.s3.amazonaws.com/winbuilds/dist/grafana-3.0.2.windows-x64.zip)
 
 ## Configure
 

+ 1 - 1
karma.conf.js

@@ -26,7 +26,7 @@ module.exports = function(config) {
     browsers: ['PhantomJS'],
     captureTimeout: 20000,
     singleRun: true,
-    autoWatchBatchDelay: 10000,
+    autoWatchBatchDelay: 1000,
     browserNoActivityTimeout: 60000,
 
   });

+ 2 - 2
latest.json

@@ -1,4 +1,4 @@
 {
-  "stable": "2.6.0",
-	"testing": "3.0.0-beta7"
+  "stable": "3.0.2",
+	"testing": "3.0.2"
 }

+ 2 - 2
package.json

@@ -4,7 +4,7 @@
     "company": "Coding Instinct AB"
   },
   "name": "grafana",
-  "version": "3.0.0-beta7",
+  "version": "3.1.0",
   "repository": {
     "type": "git",
     "url": "http://github.com/grafana/grafana.git"
@@ -13,7 +13,7 @@
     "zone.js": "^0.6.6",
     "autoprefixer": "^6.3.3",
     "es6-promise": "^3.0.2",
-    "es6-shim": "^0.35.0",
+    "es6-shim": "^0.35.1",
     "expect.js": "~0.2.0",
     "glob": "~3.2.7",
     "grunt": "~0.4.0",

+ 5 - 5
packaging/publish/publish.sh

@@ -1,7 +1,7 @@
 #! /usr/bin/env bash
 
-deb_ver=3.0.0-beta51460725904
-rpm_ver=3.0.0-beta51460725904
+deb_ver=3.0.1
+rpm_ver=3.0.1-1
 
 #rpm_ver=3.0.0-1
 
@@ -16,7 +16,7 @@ rpm_ver=3.0.0-beta51460725904
 #wget https://grafanarel.s3.amazonaws.com/builds/grafana-${rpm_ver}.x86_64.rpm
 
 #package_cloud push grafana/testing/el/6 grafana-${rpm_ver}.x86_64.rpm
-package_cloud push grafana/testing/el/7 grafana-${rpm_ver}.x86_64.rpm
+#package_cloud push grafana/testing/el/7 grafana-${rpm_ver}.x86_64.rpm
 
-# package_cloud push grafana/stable/el/7 grafana-${version}-1.x86_64.rpm
-# package_cloud push grafana/stable/el/6 grafana-${version}-1.x86_64.rpm
+package_cloud push grafana/stable/el/7 grafana-${rpm_ver}.x86_64.rpm
+package_cloud push grafana/stable/el/6 grafana-${rpm_ver}.x86_64.rpm

+ 82 - 32
pkg/api/cloudwatch/cloudwatch.go

@@ -4,6 +4,8 @@ import (
 	"encoding/json"
 	"errors"
 	"io/ioutil"
+	"strings"
+	"sync"
 	"time"
 
 	"github.com/aws/aws-sdk-go/aws"
@@ -14,6 +16,8 @@ import (
 	"github.com/aws/aws-sdk-go/aws/session"
 	"github.com/aws/aws-sdk-go/service/cloudwatch"
 	"github.com/aws/aws-sdk-go/service/ec2"
+	"github.com/aws/aws-sdk-go/service/sts"
+	"github.com/grafana/grafana/pkg/log"
 	"github.com/grafana/grafana/pkg/middleware"
 	m "github.com/grafana/grafana/pkg/models"
 )
@@ -44,31 +48,97 @@ func init() {
 	}
 }
 
-var awsCredentials map[string]*credentials.Credentials = make(map[string]*credentials.Credentials)
+type cache struct {
+	credential *credentials.Credentials
+	expiration *time.Time
+}
 
-func getCredentials(profile string) *credentials.Credentials {
-	if _, ok := awsCredentials[profile]; ok {
-		return awsCredentials[profile]
+var awsCredentialCache map[string]cache = make(map[string]cache)
+var credentialCacheLock sync.RWMutex
+
+func getCredentials(profile string, region string, assumeRoleArn string) *credentials.Credentials {
+	cacheKey := profile + ":" + assumeRoleArn
+	credentialCacheLock.RLock()
+	if _, ok := awsCredentialCache[cacheKey]; ok {
+		if awsCredentialCache[cacheKey].expiration != nil &&
+			(*awsCredentialCache[cacheKey].expiration).After(time.Now().UTC()) {
+			result := awsCredentialCache[cacheKey].credential
+			credentialCacheLock.RUnlock()
+			return result
+		}
+	}
+	credentialCacheLock.RUnlock()
+
+	accessKeyId := ""
+	secretAccessKey := ""
+	sessionToken := ""
+	var expiration *time.Time
+	expiration = nil
+	if strings.Index(assumeRoleArn, "arn:aws:iam:") == 0 {
+		params := &sts.AssumeRoleInput{
+			RoleArn:         aws.String(assumeRoleArn),
+			RoleSessionName: aws.String("GrafanaSession"),
+			DurationSeconds: aws.Int64(900),
+		}
+
+		stsSess := session.New()
+		stsCreds := credentials.NewChainCredentials(
+			[]credentials.Provider{
+				&credentials.EnvProvider{},
+				&credentials.SharedCredentialsProvider{Filename: "", Profile: profile},
+				&ec2rolecreds.EC2RoleProvider{Client: ec2metadata.New(stsSess), ExpiryWindow: 5 * time.Minute},
+			})
+		stsConfig := &aws.Config{
+			Region:      aws.String(region),
+			Credentials: stsCreds,
+		}
+		svc := sts.New(session.New(stsConfig), stsConfig)
+		resp, err := svc.AssumeRole(params)
+		if err != nil {
+			// ignore
+			log.Error(3, "CloudWatch: Failed to assume role", err)
+		}
+		if resp.Credentials != nil {
+			accessKeyId = *resp.Credentials.AccessKeyId
+			secretAccessKey = *resp.Credentials.SecretAccessKey
+			sessionToken = *resp.Credentials.SessionToken
+			expiration = resp.Credentials.Expiration
+		}
 	}
 
 	sess := session.New()
 	creds := credentials.NewChainCredentials(
 		[]credentials.Provider{
+			&credentials.StaticProvider{Value: credentials.Value{
+				AccessKeyID:     accessKeyId,
+				SecretAccessKey: secretAccessKey,
+				SessionToken:    sessionToken,
+			}},
 			&credentials.EnvProvider{},
 			&credentials.SharedCredentialsProvider{Filename: "", Profile: profile},
 			&ec2rolecreds.EC2RoleProvider{Client: ec2metadata.New(sess), ExpiryWindow: 5 * time.Minute},
 		})
-	awsCredentials[profile] = creds
+	credentialCacheLock.Lock()
+	awsCredentialCache[cacheKey] = cache{
+		credential: creds,
+		expiration: expiration,
+	}
+	credentialCacheLock.Unlock()
 
 	return creds
 }
 
-func handleGetMetricStatistics(req *cwRequest, c *middleware.Context) {
+func getAwsConfig(req *cwRequest) *aws.Config {
+	assumeRoleArn := req.DataSource.JsonData.Get("assumeRoleArn").MustString()
 	cfg := &aws.Config{
 		Region:      aws.String(req.Region),
-		Credentials: getCredentials(req.DataSource.Database),
+		Credentials: getCredentials(req.DataSource.Database, req.Region, assumeRoleArn),
 	}
+	return cfg
+}
 
+func handleGetMetricStatistics(req *cwRequest, c *middleware.Context) {
+	cfg := getAwsConfig(req)
 	svc := cloudwatch.New(session.New(cfg), cfg)
 
 	reqParam := &struct {
@@ -104,11 +174,7 @@ func handleGetMetricStatistics(req *cwRequest, c *middleware.Context) {
 }
 
 func handleListMetrics(req *cwRequest, c *middleware.Context) {
-	cfg := &aws.Config{
-		Region:      aws.String(req.Region),
-		Credentials: getCredentials(req.DataSource.Database),
-	}
-
+	cfg := getAwsConfig(req)
 	svc := cloudwatch.New(session.New(cfg), cfg)
 
 	reqParam := &struct {
@@ -144,11 +210,7 @@ func handleListMetrics(req *cwRequest, c *middleware.Context) {
 }
 
 func handleDescribeAlarms(req *cwRequest, c *middleware.Context) {
-	cfg := &aws.Config{
-		Region:      aws.String(req.Region),
-		Credentials: getCredentials(req.DataSource.Database),
-	}
-
+	cfg := getAwsConfig(req)
 	svc := cloudwatch.New(session.New(cfg), cfg)
 
 	reqParam := &struct {
@@ -187,11 +249,7 @@ func handleDescribeAlarms(req *cwRequest, c *middleware.Context) {
 }
 
 func handleDescribeAlarmsForMetric(req *cwRequest, c *middleware.Context) {
-	cfg := &aws.Config{
-		Region:      aws.String(req.Region),
-		Credentials: getCredentials(req.DataSource.Database),
-	}
-
+	cfg := getAwsConfig(req)
 	svc := cloudwatch.New(session.New(cfg), cfg)
 
 	reqParam := &struct {
@@ -227,11 +285,7 @@ func handleDescribeAlarmsForMetric(req *cwRequest, c *middleware.Context) {
 }
 
 func handleDescribeAlarmHistory(req *cwRequest, c *middleware.Context) {
-	cfg := &aws.Config{
-		Region:      aws.String(req.Region),
-		Credentials: getCredentials(req.DataSource.Database),
-	}
-
+	cfg := getAwsConfig(req)
 	svc := cloudwatch.New(session.New(cfg), cfg)
 
 	reqParam := &struct {
@@ -263,11 +317,7 @@ func handleDescribeAlarmHistory(req *cwRequest, c *middleware.Context) {
 }
 
 func handleDescribeInstances(req *cwRequest, c *middleware.Context) {
-	cfg := &aws.Config{
-		Region:      aws.String(req.Region),
-		Credentials: getCredentials(req.DataSource.Database),
-	}
-
+	cfg := getAwsConfig(req)
 	svc := ec2.New(session.New(cfg), cfg)
 
 	reqParam := &struct {

+ 11 - 9
pkg/api/cloudwatch/metrics.go

@@ -86,7 +86,7 @@ func init() {
 		"AWS/EC2":              {"AutoScalingGroupName", "ImageId", "InstanceId", "InstanceType"},
 		"AWS/ELB":              {"LoadBalancerName", "AvailabilityZone"},
 		"AWS/ElasticMapReduce": {"ClusterId", "JobFlowId", "JobId"},
-		"AWS/ES":               {},
+		"AWS/ES":               {"ClientId", "DomainName"},
 		"AWS/Events":           {"RuleName"},
 		"AWS/Kinesis":          {"StreamName", "ShardID"},
 		"AWS/Lambda":           {"FunctionName"},
@@ -166,7 +166,8 @@ func handleGetMetrics(req *cwRequest, c *middleware.Context) {
 		}
 	} else {
 		var err error
-		if namespaceMetrics, err = getMetricsForCustomMetrics(req.Region, reqParam.Parameters.Namespace, req.DataSource.Database, getAllMetrics); err != nil {
+		assumeRoleArn := req.DataSource.JsonData.Get("assumeRoleArn").MustString()
+		if namespaceMetrics, err = getMetricsForCustomMetrics(req.Region, reqParam.Parameters.Namespace, req.DataSource.Database, assumeRoleArn, getAllMetrics); err != nil {
 			c.JsonApiErr(500, "Unable to call AWS API", err)
 			return
 		}
@@ -199,7 +200,8 @@ func handleGetDimensions(req *cwRequest, c *middleware.Context) {
 		}
 	} else {
 		var err error
-		if dimensionValues, err = getDimensionsForCustomMetrics(req.Region, reqParam.Parameters.Namespace, req.DataSource.Database, getAllMetrics); err != nil {
+		assumeRoleArn := req.DataSource.JsonData.Get("assumeRoleArn").MustString()
+		if dimensionValues, err = getDimensionsForCustomMetrics(req.Region, reqParam.Parameters.Namespace, req.DataSource.Database, assumeRoleArn, getAllMetrics); err != nil {
 			c.JsonApiErr(500, "Unable to call AWS API", err)
 			return
 		}
@@ -214,10 +216,10 @@ func handleGetDimensions(req *cwRequest, c *middleware.Context) {
 	c.JSON(200, result)
 }
 
-func getAllMetrics(region string, namespace string, database string) (cloudwatch.ListMetricsOutput, error) {
+func getAllMetrics(region string, namespace string, database string, assumeRoleArn string) (cloudwatch.ListMetricsOutput, error) {
 	cfg := &aws.Config{
 		Region:      aws.String(region),
-		Credentials: getCredentials(database),
+		Credentials: getCredentials(database, region, assumeRoleArn),
 	}
 
 	svc := cloudwatch.New(session.New(cfg), cfg)
@@ -244,8 +246,8 @@ func getAllMetrics(region string, namespace string, database string) (cloudwatch
 
 var metricsCacheLock sync.Mutex
 
-func getMetricsForCustomMetrics(region string, namespace string, database string, getAllMetrics func(string, string, string) (cloudwatch.ListMetricsOutput, error)) ([]string, error) {
-	result, err := getAllMetrics(region, namespace, database)
+func getMetricsForCustomMetrics(region string, namespace string, database string, assumeRoleArn string, getAllMetrics func(string, string, string, string) (cloudwatch.ListMetricsOutput, error)) ([]string, error) {
+	result, err := getAllMetrics(region, namespace, database, assumeRoleArn)
 	if err != nil {
 		return []string{}, err
 	}
@@ -282,8 +284,8 @@ func getMetricsForCustomMetrics(region string, namespace string, database string
 
 var dimensionsCacheLock sync.Mutex
 
-func getDimensionsForCustomMetrics(region string, namespace string, database string, getAllMetrics func(string, string, string) (cloudwatch.ListMetricsOutput, error)) ([]string, error) {
-	result, err := getAllMetrics(region, namespace, database)
+func getDimensionsForCustomMetrics(region string, namespace string, database string, assumeRoleArn string, getAllMetrics func(string, string, string, string) (cloudwatch.ListMetricsOutput, error)) ([]string, error) {
+	result, err := getAllMetrics(region, namespace, database, assumeRoleArn)
 	if err != nil {
 		return []string{}, err
 	}

+ 6 - 4
pkg/api/cloudwatch/metrics_test.go

@@ -14,7 +14,8 @@ func TestCloudWatchMetrics(t *testing.T) {
 		region := "us-east-1"
 		namespace := "Foo"
 		database := "default"
-		f := func(region string, namespace string, database string) (cloudwatch.ListMetricsOutput, error) {
+		assumeRoleArn := ""
+		f := func(region string, namespace string, database string, assumeRoleArn string) (cloudwatch.ListMetricsOutput, error) {
 			return cloudwatch.ListMetricsOutput{
 				Metrics: []*cloudwatch.Metric{
 					{
@@ -28,7 +29,7 @@ func TestCloudWatchMetrics(t *testing.T) {
 				},
 			}, nil
 		}
-		metrics, _ := getMetricsForCustomMetrics(region, namespace, database, f)
+		metrics, _ := getMetricsForCustomMetrics(region, namespace, database, assumeRoleArn, f)
 
 		Convey("Should contain Test_MetricName", func() {
 			So(metrics, ShouldContain, "Test_MetricName")
@@ -39,7 +40,8 @@ func TestCloudWatchMetrics(t *testing.T) {
 		region := "us-east-1"
 		namespace := "Foo"
 		database := "default"
-		f := func(region string, namespace string, database string) (cloudwatch.ListMetricsOutput, error) {
+		assumeRoleArn := ""
+		f := func(region string, namespace string, database string, assumeRoleArn string) (cloudwatch.ListMetricsOutput, error) {
 			return cloudwatch.ListMetricsOutput{
 				Metrics: []*cloudwatch.Metric{
 					{
@@ -53,7 +55,7 @@ func TestCloudWatchMetrics(t *testing.T) {
 				},
 			}, nil
 		}
-		dimensionKeys, _ := getDimensionsForCustomMetrics(region, namespace, database, f)
+		dimensionKeys, _ := getDimensionsForCustomMetrics(region, namespace, database, assumeRoleArn, f)
 
 		Convey("Should contain Test_DimensionName", func() {
 			So(dimensionKeys, ShouldContain, "Test_DimensionName")

+ 1 - 0
pkg/api/dtos/plugins.go

@@ -29,6 +29,7 @@ type PluginListItem struct {
 	Info          *plugins.PluginInfo `json:"info"`
 	LatestVersion string              `json:"latestVersion"`
 	HasUpdate     bool                `json:"hasUpdate"`
+	DefaultNavUrl string              `json:"defaultNavUrl"`
 }
 
 type PluginList []PluginListItem

+ 6 - 3
pkg/api/frontendsettings.go

@@ -137,9 +137,12 @@ func getFrontendSettingsMap(c *middleware.Context) (map[string]interface{}, erro
 		"allowOrgCreate":    (setting.AllowUserOrgCreate && c.IsSignedIn) || c.IsGrafanaAdmin,
 		"authProxyEnabled":  setting.AuthProxyEnabled,
 		"buildInfo": map[string]interface{}{
-			"version":    setting.BuildVersion,
-			"commit":     setting.BuildCommit,
-			"buildstamp": setting.BuildStamp,
+			"version":       setting.BuildVersion,
+			"commit":        setting.BuildCommit,
+			"buildstamp":    setting.BuildStamp,
+			"latestVersion": plugins.GrafanaLatestVersion,
+			"hasUpdate":     plugins.GrafanaHasUpdate,
+			"env":           setting.Env,
 		},
 	}
 

+ 1 - 1
pkg/api/pluginproxy/pluginproxy.go

@@ -88,7 +88,7 @@ func NewApiPluginProxy(ctx *middleware.Context, proxyPath string, route *plugins
 			}
 
 			for key, value := range headers {
-				log.Info("setting key %v value %v", key, value[0])
+				log.Trace("setting key %v value %v", key, value[0])
 				req.Header.Set(key, value[0])
 			}
 		}

+ 6 - 0
pkg/api/plugins.go

@@ -8,6 +8,7 @@ import (
 	"github.com/grafana/grafana/pkg/middleware"
 	m "github.com/grafana/grafana/pkg/models"
 	"github.com/grafana/grafana/pkg/plugins"
+	"github.com/grafana/grafana/pkg/setting"
 )
 
 func GetPluginList(c *middleware.Context) Response {
@@ -46,6 +47,7 @@ func GetPluginList(c *middleware.Context) Response {
 			Info:          &pluginDef.Info,
 			LatestVersion: pluginDef.GrafanaNetVersion,
 			HasUpdate:     pluginDef.GrafanaNetHasUpdate,
+			DefaultNavUrl: pluginDef.DefaultNavUrl,
 		}
 
 		if pluginSetting, exists := pluginSettingsMap[pluginDef.Id]; exists {
@@ -53,6 +55,10 @@ func GetPluginList(c *middleware.Context) Response {
 			listItem.Pinned = pluginSetting.Pinned
 		}
 
+		if listItem.DefaultNavUrl == "" || !listItem.Enabled {
+			listItem.DefaultNavUrl = setting.AppSubUrl + "/plugins/" + listItem.Id + "/edit"
+		}
+
 		// filter out disabled
 		if enabledFilter == "1" && !listItem.Enabled {
 			continue

+ 1 - 1
pkg/api/render.go

@@ -31,7 +31,7 @@ func RenderToPng(c *middleware.Context) {
 		Width:     queryReader.Get("width", "800"),
 		Height:    queryReader.Get("height", "400"),
 		SessionId: c.Session.ID(),
-		Timeout:   queryReader.Get("timeout", "15"),
+		Timeout:   queryReader.Get("timeout", "30"),
 	}
 
 	renderOpts.Url = setting.ToAbsUrl(renderOpts.Url)

+ 7 - 8
pkg/cmd/grafana-cli/commands/upgrade_command.go

@@ -1,6 +1,8 @@
 package commands
 
 import (
+	"github.com/fatih/color"
+	"github.com/grafana/grafana/pkg/cmd/grafana-cli/log"
 	s "github.com/grafana/grafana/pkg/cmd/grafana-cli/services"
 )
 
@@ -14,20 +16,17 @@ func upgradeCommand(c CommandLine) error {
 		return err
 	}
 
-	remotePlugins, err2 := s.ListAllPlugins(c.GlobalString("repo"))
+	v, err2 := s.GetPlugin(localPlugin.Id, c.GlobalString("repo"))
 
 	if err2 != nil {
 		return err2
 	}
 
-	for _, v := range remotePlugins.Plugins {
-		if localPlugin.Id == v.Id {
-			if ShouldUpgrade(localPlugin.Info.Version, v) {
-				s.RemoveInstalledPlugin(pluginsDir, pluginName)
-				return InstallPlugin(localPlugin.Id, "", c)
-			}
-		}
+	if ShouldUpgrade(localPlugin.Info.Version, v) {
+		s.RemoveInstalledPlugin(pluginsDir, pluginName)
+		return InstallPlugin(localPlugin.Id, "", c)
 	}
 
+	log.Infof("%s %s is up to date \n", color.GreenString("✔"), localPlugin.Id)
 	return nil
 }

+ 15 - 7
pkg/cmd/grafana-cli/services/services.go

@@ -44,7 +44,7 @@ func ReadPlugin(pluginDir, pluginName string) (m.InstalledPlugin, error) {
 	}
 
 	if res.Id == "" {
-		return m.InstalledPlugin{}, errors.New("could not read find plugin " + pluginName)
+		return m.InstalledPlugin{}, errors.New("could not find plugin " + pluginName + " in " + pluginDir)
 	}
 
 	return res, nil
@@ -69,13 +69,21 @@ func RemoveInstalledPlugin(pluginPath, id string) error {
 }
 
 func GetPlugin(pluginId, repoUrl string) (m.Plugin, error) {
-	resp, _ := ListAllPlugins(repoUrl)
+	fullUrl := repoUrl + "/repo/" + pluginId
 
-	for _, i := range resp.Plugins {
-		if i.Id == pluginId {
-			return i, nil
-		}
+	res, err := goreq.Request{Uri: fullUrl, MaxRedirects: 3}.Do()
+	if err != nil {
+		return m.Plugin{}, err
+	}
+	if res.StatusCode != 200 {
+		return m.Plugin{}, fmt.Errorf("Could not access %s statuscode %v", fullUrl, res.StatusCode)
 	}
 
-	return m.Plugin{}, errors.New("could not find plugin named \"" + pluginId + "\"")
+	var resp m.Plugin
+	err = res.Body.FromJsonTo(&resp)
+	if err != nil {
+		return m.Plugin{}, errors.New("Could not load plugin data")
+	}
+
+	return resp, nil
 }

+ 2 - 0
pkg/components/renderer/renderer.go

@@ -1,6 +1,7 @@
 package renderer
 
 import (
+	"fmt"
 	"io"
 	"os"
 	"os/exec"
@@ -72,6 +73,7 @@ func RenderToPng(params *RenderOpts) (string, error) {
 		if err := cmd.Process.Kill(); err != nil {
 			log.Error(4, "failed to kill: %v", err)
 		}
+		return "", fmt.Errorf("PhantomRenderer::renderToPng timeout (>%vs)", timeout)
 	case <-done:
 	}
 

+ 0 - 2
pkg/plugins/app_plugin.go

@@ -76,8 +76,6 @@ func (app *AppPlugin) initApp() {
 		}
 	}
 
-	app.DefaultNavUrl = setting.AppSubUrl + "/plugins/" + app.Id + "/edit"
-
 	// slugify pages
 	for _, include := range app.Includes {
 		if include.Slug == "" {

+ 10 - 1
pkg/plugins/queries.go

@@ -24,7 +24,16 @@ func GetPluginSettings(orgId int64) (map[string]*m.PluginSettingInfoDTO, error)
 		}
 
 		// default to enabled true
-		opt := &m.PluginSettingInfoDTO{Enabled: true}
+		opt := &m.PluginSettingInfoDTO{
+			PluginId: pluginDef.Id,
+			OrgId:    orgId,
+			Enabled:  true,
+		}
+
+		// apps are disabled by default
+		if pluginDef.Type == PluginTypeApp {
+			opt.Enabled = false
+		}
 
 		// if it's included in app check app settings
 		if pluginDef.IncludedInAppId != "" {

+ 2 - 2
pkg/plugins/update_checker.go

@@ -91,14 +91,14 @@ func checkForUpdates() {
 
 	resp2, err := client.Get("https://raw.githubusercontent.com/grafana/grafana/master/latest.json")
 	if err != nil {
-		log.Trace("Failed to get lates.json repo from github: %v", err.Error())
+		log.Trace("Failed to get latest.json repo from github: %v", err.Error())
 		return
 	}
 
 	defer resp2.Body.Close()
 	body, err = ioutil.ReadAll(resp2.Body)
 	if err != nil {
-		log.Trace("Update check failed, reading response from github.net, %v", err.Error())
+		log.Trace("Update check failed, reading response from github.com, %v", err.Error())
 		return
 	}
 

+ 3 - 1
pkg/services/sqlstore/preferences.go

@@ -5,6 +5,8 @@ import (
 
 	"github.com/grafana/grafana/pkg/bus"
 	m "github.com/grafana/grafana/pkg/models"
+
+	"github.com/grafana/grafana/pkg/setting"
 )
 
 func init() {
@@ -26,7 +28,7 @@ func GetPreferencesWithDefaults(query *m.GetPreferencesWithDefaultsQuery) error
 	}
 
 	res := &m.Preferences{
-		Theme:           "dark",
+		Theme:           setting.DefaultTheme,
 		Timezone:        "browser",
 		HomeDashboardId: 0,
 	}

+ 18 - 6
pkg/setting/setting.go

@@ -88,6 +88,7 @@ var (
 	AutoAssignOrgRole  string
 	VerifyEmailEnabled bool
 	LoginHint          string
+	DefaultTheme       string
 
 	// Http auth
 	AdminUser     string
@@ -454,6 +455,7 @@ func NewConfigContext(args *CommandLineArgs) error {
 	AutoAssignOrgRole = users.Key("auto_assign_org_role").In("Editor", []string{"Editor", "Admin", "Read Only Editor", "Viewer"})
 	VerifyEmailEnabled = users.Key("verify_email_enabled").MustBool(false)
 	LoginHint = users.Key("login_hint").String()
+	DefaultTheme = users.Key("default_theme").String()
 
 	// anonymous access
 	AnonymousEnabled = Cfg.Section("auth.anonymous").Key("enabled").MustBool(false)
@@ -526,6 +528,17 @@ var logLevels = map[string]int{
 	"Critical": 5,
 }
 
+func getLogLevel(key string, defaultName string) (string, int) {
+	levelName := Cfg.Section(key).Key("level").In(defaultName, []string{"Trace", "Debug", "Info", "Warn", "Error", "Critical"})
+
+	level, ok := logLevels[levelName]
+	if !ok {
+		log.Fatal(4, "Unknown log level: %s", levelName)
+	}
+
+	return levelName, level
+}
+
 func initLogging(args *CommandLineArgs) {
 	//close any existing log handlers.
 	log.Close()
@@ -533,8 +546,12 @@ func initLogging(args *CommandLineArgs) {
 	LogModes = strings.Split(Cfg.Section("log").Key("mode").MustString("console"), ",")
 	LogsPath = makeAbsolute(Cfg.Section("paths").Key("logs").String(), HomePath)
 
+	defaultLevelName, _ := getLogLevel("log", "Info")
+
 	LogConfigs = make([]util.DynMap, len(LogModes))
+
 	for i, mode := range LogModes {
+
 		mode = strings.TrimSpace(mode)
 		sec, err := Cfg.GetSection("log." + mode)
 		if err != nil {
@@ -542,12 +559,7 @@ func initLogging(args *CommandLineArgs) {
 		}
 
 		// Log level.
-		levelName := Cfg.Section("log."+mode).Key("level").In("Trace",
-			[]string{"Trace", "Debug", "Info", "Warn", "Error", "Critical"})
-		level, ok := logLevels[levelName]
-		if !ok {
-			log.Fatal(4, "Unknown log level: %s", levelName)
-		}
+		_, level := getLogLevel("log."+mode, defaultLevelName)
 
 		// Generate log configuration.
 		switch mode {

+ 4 - 0
public/app/app.ts

@@ -42,6 +42,10 @@ export class GrafanaApp {
     app.constant('grafanaVersion', "@grafanaVersion@");
 
     app.config(($locationProvider, $controllerProvider, $compileProvider, $filterProvider, $provide) => {
+      if (config.buildInfo.env !== 'development') {
+        $compileProvider.debugInfoEnabled(false);
+      }
+
       this.registerFunctions.controller = $controllerProvider.register;
       this.registerFunctions.directive  = $compileProvider.directive;
       this.registerFunctions.factory    = $provide.factory;

+ 3 - 1
public/app/core/directives/metric_segment.js

@@ -209,7 +209,9 @@ function (_, $, coreModule) {
             // needs to call this after digest so
             // property is synced with outerscope
             $scope.$$postDigest(function() {
-              $scope.onChange();
+              $scope.$apply(function() {
+                $scope.onChange();
+              });
             });
           };
 

+ 29 - 22
public/app/core/services/datasource_srv.js

@@ -66,14 +66,17 @@ function (angular, _, coreModule, config) {
     };
 
     this.getAnnotationSources = function() {
-      return _.reduce(config.datasources, function(memo, value) {
+      var sources = [];
 
+      this.addDataSourceVariables(sources);
+
+      _.each(config.datasources, function(value) {
         if (value.meta && value.meta.annotations) {
-          memo.push(value);
+          sources.push(value);
         }
+      });
 
-        return memo;
-      }, []);
+      return sources;
     };
 
     this.getMetricSources = function(options) {
@@ -90,24 +93,7 @@ function (angular, _, coreModule, config) {
       });
 
       if (!options || !options.skipVariables) {
-        // look for data source variables
-        for (var i = 0; i < templateSrv.variables.length; i++) {
-          var variable = templateSrv.variables[i];
-          if (variable.type !== 'datasource') {
-            continue;
-          }
-
-          var first = variable.current.value;
-          var ds = config.datasources[first];
-
-          if (ds) {
-            metricSources.push({
-              name: '$' + variable.name,
-              value: '$' + variable.name,
-              meta: ds.meta,
-            });
-          }
-        }
+        this.addDataSourceVariables(metricSources);
       }
 
       metricSources.sort(function(a, b) {
@@ -123,6 +109,27 @@ function (angular, _, coreModule, config) {
       return metricSources;
     };
 
+    this.addDataSourceVariables = function(list) {
+      // look for data source variables
+      for (var i = 0; i < templateSrv.variables.length; i++) {
+        var variable = templateSrv.variables[i];
+        if (variable.type !== 'datasource') {
+          continue;
+        }
+
+        var first = variable.current.value;
+        var ds = config.datasources[first];
+
+        if (ds) {
+          list.push({
+            name: '$' + variable.name,
+            value: '$' + variable.name,
+            meta: ds.meta,
+          });
+        }
+      }
+    };
+
     this.init();
   });
 });

+ 1 - 1
public/app/core/utils/datemath.ts

@@ -28,7 +28,7 @@ export function parse(text, roundUp?) {
       mathString = text.substring(index + 2);
     }
     // We're going to just require ISO8601 timestamps, k?
-    time = moment(parseString);
+    time = moment(parseString, moment.ISO_8601);
   }
 
   if (!mathString.length) {

+ 2 - 0
public/app/core/utils/kbn.js

@@ -396,6 +396,7 @@ function($, _) {
   kbn.valueFormats.ev           = kbn.formatBuilders.decimalSIPrefix('eV');
   kbn.valueFormats.amp          = kbn.formatBuilders.decimalSIPrefix('A');
   kbn.valueFormats.volt         = kbn.formatBuilders.decimalSIPrefix('V');
+  kbn.valueFormats.dBm          = kbn.formatBuilders.decimalSIPrefix('dBm');
 
   // Temperature
   kbn.valueFormats.celsius   = kbn.formatBuilders.fixedUnit('°C');
@@ -677,6 +678,7 @@ function($, _) {
           {text: 'electron volt (eV)',         value: 'ev'          },
           {text: 'Ampere (A)',                 value: 'amp'         },
           {text: 'Volt (V)',                   value: 'volt'        },
+          {text: 'Decibel-milliwatt (dBm)',    value: 'dBm'         },
         ]
       },
       {

+ 5 - 4
public/app/features/annotations/annotations_srv.js

@@ -55,10 +55,11 @@ define([
         }, this);
       });
 
-      promiseCached = $q.all(promises)
-        .then(function() {
-          return list;
-        });
+      promiseCached = $q.all(promises).then(function() {
+        return list;
+      }).catch(function(err) {
+        $rootScope.appEvent('alert-error', ['Annotations failed', (err.message || err)]);
+      });
 
       return promiseCached;
     };

+ 1 - 1
public/app/features/annotations/editor_ctrl.js

@@ -30,7 +30,7 @@ function (angular, _, $) {
     $scope.datasourceChanged = function() {
       return datasourceSrv.get($scope.currentAnnotation.datasource).then(function(ds) {
         $scope.currentDatasource = ds;
-        $scope.currentAnnotation.datasource = ds.name;
+        $scope.currentAnnotation.datasource = $scope.currentAnnotation.datasource;
       });
     };
 

+ 1 - 1
public/app/features/dashboard/dashboardSrv.js

@@ -65,7 +65,7 @@ function (angular, $, _, moment) {
 
     // cleans meta data and other non peristent state
     p.getSaveModelClone = function() {
-      var copy = angular.copy(this);
+      var copy = $.extend(true, {}, this);
       delete copy.meta;
       return copy;
     };

+ 2 - 1
public/app/features/dashboard/dynamicDashboardSrv.js

@@ -52,6 +52,8 @@ function (angular, _) {
           else if (panel.repeatPanelId && panel.repeatIteration !== this.iteration) {
             row.panels = _.without(row.panels, panel);
             j = j - 1;
+          } else if (row.repeat || row.repeatRowId) {
+            continue;
           } else if (!_.isEmpty(panel.scopedVars) && panel.repeatIteration !== this.iteration) {
             panel.scopedVars = {};
           }
@@ -118,7 +120,6 @@ function (angular, _) {
           panel = copy.panels[i];
           panel.scopedVars = {};
           panel.scopedVars[variable.name] = option;
-          panel.repeatIteration = this.iteration;
         }
       }, this);
     };

+ 1 - 1
public/app/features/dashboard/partials/settings.html

@@ -86,7 +86,7 @@
 					<button class="btn btn-inverse gf-from-btn" ng-click="_.move(dashboard.rows,$index,$index+1)">
 						<i ng-class="{'invisible': $last}" class="fa fa-arrow-down"></i>
 					</button>
-					<button class="btn btn-inverse gf-form-btn" click="dashboard.rows = _.without(dashboard.rows,row)">
+					<button class="btn btn-inverse gf-form-btn" ng-click="dashboard.rows = _.without(dashboard.rows,row)">
 						<i class="fa fa-trash"></i>
 					</button>
 				</div>

+ 15 - 3
public/app/features/dashboard/rowCtrl.js

@@ -142,12 +142,18 @@ function (angular, _, config) {
   });
 
   module.directive('panelWidth', function() {
+    var fullscreen = false;
+
     return function(scope, element) {
       function updateWidth() {
-        element[0].style.width = ((scope.panel.span / 1.2) * 10) + '%';
+        if (!fullscreen) {
+          element[0].style.width = ((scope.panel.span / 1.2) * 10) + '%';
+        }
       }
 
       scope.onAppEvent('panel-fullscreen-enter', function(evt, info) {
+        fullscreen = true;
+
         if (scope.panel.id !== info.panelId) {
           element.hide();
         } else {
@@ -156,14 +162,20 @@ function (angular, _, config) {
       });
 
       scope.onAppEvent('panel-fullscreen-exit', function(evt, info) {
+        fullscreen = false;
+
         if (scope.panel.id !== info.panelId) {
           element.show();
-        } else {
-          updateWidth();
         }
+
+        updateWidth();
       });
 
       scope.$watch('panel.span', updateWidth);
+
+      if (fullscreen) {
+        element.hide();
+      }
     };
   });
 

+ 2 - 2
public/app/features/dashboard/shareModalCtrl.js

@@ -70,12 +70,12 @@ function (angular, _, require, config) {
       $scope.shareUrl = linkSrv.addParamsToUrl(baseUrl, params);
 
       var soloUrl = $scope.shareUrl;
-      soloUrl = soloUrl.replace('/dashboard/', '/dashboard-solo/');
+      soloUrl = soloUrl.replace(config.appSubUrl + '/dashboard/', config.appSubUrl + '/dashboard-solo/');
       soloUrl = soloUrl.replace("&fullscreen", "");
 
       $scope.iframeHtml = '<iframe src="' + soloUrl + '" width="450" height="200" frameborder="0"></iframe>';
 
-      $scope.imageUrl = soloUrl.replace('/dashboard-solo/', '/render/dashboard-solo/');
+      $scope.imageUrl = soloUrl.replace(config.appSubUrl + '/dashboard-solo/', config.appSubUrl + '/render/dashboard-solo/');
       $scope.imageUrl += '&width=1000';
       $scope.imageUrl += '&height=500';
     };

+ 1 - 1
public/app/features/dashboard/timeSrv.js

@@ -50,7 +50,7 @@ define([
 
       if (!isNaN(value)) {
         var epoch = parseInt(value);
-        return moment(epoch);
+        return moment.utc(epoch);
       }
 
       return null;

+ 11 - 9
public/app/features/org/partials/newOrg.html

@@ -8,14 +8,16 @@
 
 	<p class="playlist-description">Each organization contains their own dashboards, data sources and configuration, and cannot be shared between orgs. While users may belong to more than one, mutiple organization are most frequently used in multi-tenant deployments. </p>
 
-	<div class="gf-form-group">
-		<div class="gf-form">
-			<span class="gf-form-label width-10">Org. name</span>
-			<input type="text" ng-model="newOrg.name" required class="gf-form-input max-width-21" placeholder="organization name">
+	<form>
+		<div class="gf-form-group">
+			<div class="gf-form">
+				<span class="gf-form-label width-10">Org. name</span>
+				<input type="text" ng-model="newOrg.name" required class="gf-form-input max-width-21" placeholder="organization name">
+			</div>
+			<br>
+			<div class="gf-form-buttons-row">
+				<button type="submit" class="btn btn-success" ng-click="createOrg()">Create</button>
+			</div>
 		</div>
-		<br>
-		<div class="gf-form-buttons-row">
-			<button class="btn btn-success" ng-click="createOrg()">Create</button>
-		</div>
-	</div>
+	</form>
 </div>

+ 1 - 1
public/app/features/panel/metrics_ds_selector.ts

@@ -10,7 +10,7 @@ var template = `
   <div class="gf-form-inline">
     <div class="gf-form">
       <label class="gf-form-label">
-        <i class="icon-gf icon-gf-datasource"></i>
+        <i class="icon-gf icon-gf-datasources"></i>
       </label>
       <label class="gf-form-label">
         Panel data source

+ 19 - 1
public/app/features/panel/panel_ctrl.ts

@@ -8,6 +8,7 @@ import $ from 'jquery';
 const TITLE_HEIGHT = 25;
 const EMPTY_TITLE_HEIGHT = 9;
 const PANEL_PADDING = 5;
+const PANEL_BORDER = 2;
 
 import {Emitter} from 'app/core/core';
 
@@ -90,6 +91,23 @@ export class PanelCtrl {
     this.addEditorTab('General', 'public/app/partials/panelgeneral.html');
     this.editModeInitiated = true;
     this.events.emit('init-edit-mode', null);
+
+    var routeParams = this.$injector.get('$routeParams');
+    if (routeParams.editorTab) {
+      this.editorTabs.forEach((tab, i) => {
+        if (tab.title === routeParams.editorTab) {
+          this.editorTabIndex = i;
+        }
+      });
+    }
+  }
+
+  changeTab(newIndex) {
+    this.editorTabIndex = newIndex;
+    var route = this.$injector.get('$route');
+
+    route.current.params.editorTab = this.editorTabs[newIndex].title;
+    route.updateParams();
   }
 
   addEditorTab(title, directiveFn, index?) {
@@ -141,7 +159,7 @@ export class PanelCtrl {
       }
     }
 
-    this.height = this.containerHeight - (PANEL_PADDING + (this.panel.title ? TITLE_HEIGHT : EMPTY_TITLE_HEIGHT));
+    this.height = this.containerHeight - (PANEL_BORDER + PANEL_PADDING + (this.panel.title ? TITLE_HEIGHT : EMPTY_TITLE_HEIGHT));
   }
 
   render(payload?) {

+ 1 - 1
public/app/features/panel/panel_directive.ts

@@ -36,7 +36,7 @@ var panelTemplate = `
 
         <ul class="gf-tabs">
           <li class="gf-tabs-item" ng-repeat="tab in ::ctrl.editorTabs">
-            <a class="gf-tabs-link" ng-click="ctrl.editorTabIndex = $index" ng-class="{active: ctrl.editorTabIndex === $index}">
+            <a class="gf-tabs-link" ng-click="ctrl.changeTab($index)" ng-class="{active: ctrl.editorTabIndex === $index}">
               {{::tab.title}}
             </a>
           </li>

+ 16 - 8
public/app/features/templating/templateSrv.js

@@ -42,6 +42,16 @@ function (angular, _) {
       return value.replace(/([\!\*\+\-\=<>\s\&\|\(\)\[\]\{\}\^\~\?\:\\/"])/g, "\\$1");
     }
 
+    this.luceneFormat = function(value) {
+      if (typeof value === 'string') {
+        return luceneEscape(value);
+      }
+      var quotedValues = _.map(value, function(val) {
+        return '\"' + luceneEscape(val) + '\"';
+      });
+      return '(' + quotedValues.join(' OR ') + ')';
+    };
+
     this.formatValue = function(value, format, variable) {
       // for some scopedVars there is no variable
       variable = variable || {};
@@ -60,13 +70,7 @@ function (angular, _) {
           return '(' + escapedValues.join('|') + ')';
         }
         case "lucene": {
-          if (typeof value === 'string') {
-            return luceneEscape(value);
-          }
-          var quotedValues = _.map(value, function(val) {
-            return '\"' + luceneEscape(val) + '\"';
-          });
-          return '(' + quotedValues.join(' OR ') + ')';
+          return this.luceneFormat(value, format, variable);
         }
         case "pipe": {
           if (typeof value === 'string') {
@@ -97,7 +101,11 @@ function (angular, _) {
       if (!str) {
         return false;
       }
-      return str.indexOf('$' + variableName) !== -1 || str.indexOf('[[' + variableName + ']]') !== -1;
+
+      variableName = regexEscape(variableName);
+      var findVarRegex = new RegExp('\\$(' + variableName + ')[\\W|$]|\\[\\[(' + variableName + ')\\]\\]', 'g');
+      var match = findVarRegex.exec(str);
+      return match !== null;
     };
 
     this.highlightVariablesAsHtml = function(str) {

+ 11 - 4
public/app/features/templating/templateValuesSrv.js

@@ -204,7 +204,7 @@ function (angular, _, kbn) {
       }
 
       if (options.length === 0) {
-        options.push({text: 'No datasurces found', value: ''});
+        options.push({text: 'No data sources found', value: ''});
       }
 
       variable.options = options;
@@ -213,8 +213,7 @@ function (angular, _, kbn) {
     this.updateOptions = function(variable) {
       if (variable.type !== 'query') {
         self._updateNonQueryVariable(variable);
-        self.setVariableValue(variable, variable.options[0]);
-        return $q.when([]);
+        return self.validateVariableSelectionState(variable);
       }
 
       return datasourceSrv.get(variable.datasource)
@@ -251,7 +250,7 @@ function (angular, _, kbn) {
       if (_.isArray(variable.current.value)) {
         self.selectOptionsForCurrentValue(variable);
       } else {
-        var currentOption = _.findWhere(variable.options, { text: variable.current.text });
+        var currentOption = _.findWhere(variable.options, {text: variable.current.text});
         if (currentOption) {
           return self.setVariableValue(variable, currentOption, true);
         } else {
@@ -314,6 +313,14 @@ function (angular, _, kbn) {
         var value = item.value || item.text;
         var text = item.text || item.value;
 
+        if (_.isNumber(value)) {
+          value = value.toString();
+        }
+
+        if (_.isNumber(text)) {
+          text = text.toString();
+        }
+
         if (regex) {
           matches = regex.exec(value);
           if (!matches) { continue; }

+ 7 - 0
public/app/plugins/datasource/cloudwatch/partials/config.html

@@ -24,4 +24,11 @@
 			Namespaces of Custom Metrics
 		</info-popover>
 	</div>
+	<div class="gf-form">
+		<label class="gf-form-label width-13">Assume Role ARN</label>
+		<input type="text" class="gf-form-input max-width-18" ng-model='ctrl.current.jsonData.assumeRoleArn' placeholder="arn:aws:iam:*"></input>
+		<info-popover mode="right-absolute">
+			ARN of Assume Role
+		</info-popover>
+	</div>
 </div>

+ 11 - 3
public/app/plugins/datasource/elasticsearch/datasource.js

@@ -78,7 +78,7 @@ function (angular, _, moment, kbn, ElasticQueryBuilder, IndexPattern, ElasticRes
         range[timeField]["format"] = "epoch_millis";
       }
 
-      var queryInterpolated = templateSrv.replace(queryString);
+      var queryInterpolated = templateSrv.replace(queryString, {}, 'lucene');
       var filter = { "bool": { "must": [{ "range": range }] } };
       var query = { "bool": { "should": [{ "query_string": { "query": queryInterpolated } }] } };
       var data = {
@@ -204,6 +204,14 @@ function (angular, _, moment, kbn, ElasticQueryBuilder, IndexPattern, ElasticRes
       });
     };
 
+    function escapeForJson(value) {
+      return value.replace(/\"/g, '\\"');
+    }
+
+    function luceneThenJsonFormat(value) {
+      return escapeForJson(templateSrv.luceneFormat(value));
+    }
+
     this.getFields = function(query) {
       return this._get('/_mapping').then(function(res) {
         var fields = {};
@@ -246,7 +254,7 @@ function (angular, _, moment, kbn, ElasticQueryBuilder, IndexPattern, ElasticRes
       var header = this.getQueryHeader('count', range.from, range.to);
       var esQuery = angular.toJson(this.queryBuilder.getTermsQuery(queryDef));
 
-      esQuery = esQuery.replace("$lucene_query", queryDef.query || '*');
+      esQuery = esQuery.replace("$lucene_query", escapeForJson(queryDef.query || '*'));
       esQuery = esQuery.replace(/\$timeFrom/g, range.from.valueOf());
       esQuery = esQuery.replace(/\$timeTo/g, range.to.valueOf());
       esQuery = header + '\n' + esQuery + '\n';
@@ -260,7 +268,7 @@ function (angular, _, moment, kbn, ElasticQueryBuilder, IndexPattern, ElasticRes
     };
 
     this.metricFindQuery = function(query) {
-      query = templateSrv.replace(query);
+      query = templateSrv.replace(query, {}, luceneThenJsonFormat);
       query = angular.fromJson(query);
       if (!query) {
         return $q.when([]);

+ 3 - 3
public/app/plugins/datasource/elasticsearch/partials/bucket_agg.html

@@ -70,9 +70,9 @@
 	</div>
 
 	<div ng-if="agg.type === 'filters'">
-		<div class="gf-form-inline" ng-repeat="filter in agg.settings.filters" ng-class="{last: $last}">
+		<div class="gf-form-inline offset-width-7" ng-repeat="filter in agg.settings.filters">
 			<div class="gf-form">
-				<label class="gf-form-item width-10">Query {{$index + 1}}</label>
+				<label class="gf-form-label width-10">Query {{$index + 1}}</label>
 				<input type="text" class="gf-form-input max-width-12" ng-model="filter.query" spellcheck='false' placeholder="Lucene query" ng-blur="onChangeInternal()">
 			</div>
 			<div class="gf-form">
@@ -88,7 +88,7 @@
 
 	<div ng-if="agg.type === 'geohash_grid'">
 		<div class="gf-form offset-width-7">
-			<label class="gf-form-label">Precision</label>
+			<label class="gf-form-label width-10">Precision</label>
 			<input type="number" class="gf-form-input max-width-12" ng-model="agg.settings.precision" spellcheck='false' placeholder="3" ng-blur="onChangeInternal()">
 		</div>
 	</div>

+ 7 - 3
public/app/plugins/datasource/influxdb/datasource.ts

@@ -45,7 +45,7 @@ export default class InfluxDatasource {
     var i, y;
 
     var allQueries = _.map(options.targets, (target) => {
-      if (target.hide) { return []; }
+      if (target.hide) { return ""; }
 
       queryTargets.push(target);
 
@@ -54,8 +54,12 @@ export default class InfluxDatasource {
       var query =  queryModel.render(true);
       query = query.replace(/\$interval/g, (target.interval || options.interval));
       return query;
-
-    }).join(";");
+    }).reduce((acc, current) => {
+      if (current !== "") {
+        acc += ";" + current;
+      }
+      return acc;
+    });
 
     // replace grafana variables
     allQueries = allQueries.replace(/\$timeFilter/g, timeFilter);

+ 1 - 1
public/app/plugins/datasource/influxdb/influx_query.ts

@@ -152,7 +152,7 @@ export default class InfluxQuery {
       if (interpolate) {
         value = this.templateSrv.replace(value, this.scopedVars);
       }
-      if (isNaN(+value)) {
+      if (operator !== '>' && operator !== '<') {
         value = "'" + value.replace('\\', '\\\\') + "'";
       }
     } else if (interpolate){

+ 1 - 1
public/app/plugins/datasource/influxdb/partials/query.editor.html

@@ -30,7 +30,7 @@
 		<div class="gf-form-inline" ng-repeat="selectParts in ctrl.queryModel.selectModels">
 			<div class="gf-form">
 				<label class="gf-form-label query-keyword width-7">
-					<span ng-show="$index === 0">SELECT</span>
+					<span ng-show="$index === 0">SELECT</span>&nbsp;
 				</label>
 			</div>
 

+ 1 - 1
public/app/plugins/datasource/influxdb/partials/query.options.html

@@ -45,7 +45,7 @@
 			<ul>
 				<li>$m = replaced with measurement name</li>
 				<li>$measurement = replaced with measurement name</li>
-				<li>$1 - $9 = replaced with part of measurement name (if you seperate your measurement name with dots)</li>
+				<li>$1 - $9 = replaced with part of measurement name (if you separate your measurement name with dots)</li>
 				<li>$col = replaced with column name</li>
 				<li>$tag_hostname = replaced with the value of the hostname tag</li>
 				<li>You can also use [[tag_hostname]] pattern replacement syntax</li>

+ 13 - 0
public/app/plugins/datasource/influxdb/specs/influx_query_specs.ts

@@ -101,6 +101,19 @@ describe('InfluxQuery', function() {
     });
   });
 
+  describe('query with value condition', function() {
+    it('should not quote value', function() {
+      var query = new InfluxQuery({
+        measurement: 'cpu',
+        groupBy: [],
+        tags: [{key: 'value', value: '5', operator: '>'}]
+      }, templateSrv, {});
+
+      var queryText = query.render();
+      expect(queryText).to.be('SELECT mean("value") FROM "cpu" WHERE "value" > 5 AND $timeFilter');
+    });
+  });
+
   describe('series with groupByTag', function() {
     it('should generate correct query', function() {
       var query = new InfluxQuery({

+ 2 - 1
public/app/plugins/datasource/opentsdb/config_ctrl.ts

@@ -16,7 +16,8 @@ export class OpenTsConfigCtrl {
 
   tsdbVersions = [
     {name: '<=2.1', value: 1},
-    {name: '>=2.2', value: 2},
+    {name: '==2.2', value: 2},
+    {name: '==2.3', value: 3},
   ];
 
   tsdbResolutions = [

Vissa filer visades inte eftersom för många filer har ändrats