Przeglądaj źródła

remove unused code from vendor

bergquist 8 lat temu
rodzic
commit
7ed643f302
100 zmienionych plików z 6 dodań i 25682 usunięć
  1. 6 2
      Gopkg.toml
  2. 0 8
      vendor/cloud.google.com/go/bigquery/benchmarks/README.md
  3. 0 85
      vendor/cloud.google.com/go/bigquery/benchmarks/bench.go
  4. 0 10
      vendor/cloud.google.com/go/bigquery/benchmarks/queries.json
  5. 0 156
      vendor/cloud.google.com/go/bigquery/bigquery.go
  6. 0 101
      vendor/cloud.google.com/go/bigquery/copy.go
  7. 0 501
      vendor/cloud.google.com/go/bigquery/dataset.go
  8. 0 689
      vendor/cloud.google.com/go/bigquery/datatransfer/apiv1/data_transfer_client.go
  9. 0 49
      vendor/cloud.google.com/go/bigquery/datatransfer/apiv1/doc.go
  10. 0 297
      vendor/cloud.google.com/go/bigquery/doc.go
  11. 0 82
      vendor/cloud.google.com/go/bigquery/error.go
  12. 0 398
      vendor/cloud.google.com/go/bigquery/external.go
  13. 0 105
      vendor/cloud.google.com/go/bigquery/extract.go
  14. 0 135
      vendor/cloud.google.com/go/bigquery/file.go
  15. 0 73
      vendor/cloud.google.com/go/bigquery/gcs.go
  16. 0 206
      vendor/cloud.google.com/go/bigquery/iterator.go
  17. 0 669
      vendor/cloud.google.com/go/bigquery/job.go
  18. 0 126
      vendor/cloud.google.com/go/bigquery/load.go
  19. 0 349
      vendor/cloud.google.com/go/bigquery/params.go
  20. 0 284
      vendor/cloud.google.com/go/bigquery/query.go
  21. 0 318
      vendor/cloud.google.com/go/bigquery/schema.go
  22. 0 487
      vendor/cloud.google.com/go/bigquery/table.go
  23. 0 224
      vendor/cloud.google.com/go/bigquery/uploader.go
  24. 0 718
      vendor/cloud.google.com/go/bigquery/value.go
  25. 0 423
      vendor/cloud.google.com/go/bigtable/admin.go
  26. 0 792
      vendor/cloud.google.com/go/bigtable/bigtable.go
  27. 0 1273
      vendor/cloud.google.com/go/bigtable/bttest/inmem.go
  28. 0 842
      vendor/cloud.google.com/go/bigtable/cmd/cbt/cbt.go
  29. 0 213
      vendor/cloud.google.com/go/bigtable/cmd/cbt/cbtdoc.go
  30. 0 44
      vendor/cloud.google.com/go/bigtable/cmd/emulator/cbtemulator.go
  31. 0 204
      vendor/cloud.google.com/go/bigtable/cmd/loadtest/loadtest.go
  32. 0 155
      vendor/cloud.google.com/go/bigtable/cmd/scantest/scantest.go
  33. 0 125
      vendor/cloud.google.com/go/bigtable/doc.go
  34. 0 318
      vendor/cloud.google.com/go/bigtable/filter.go
  35. 0 158
      vendor/cloud.google.com/go/bigtable/gc.go
  36. 0 246
      vendor/cloud.google.com/go/bigtable/internal/cbtconfig/cbtconfig.go
  37. 0 106
      vendor/cloud.google.com/go/bigtable/internal/gax/call_option.go
  38. 0 84
      vendor/cloud.google.com/go/bigtable/internal/gax/invoke.go
  39. 0 48
      vendor/cloud.google.com/go/bigtable/internal/option/option.go
  40. 0 144
      vendor/cloud.google.com/go/bigtable/internal/stat/stats.go
  41. 0 250
      vendor/cloud.google.com/go/bigtable/reader.go
  42. 0 1178
      vendor/cloud.google.com/go/bigtable/testdata/read-rows-acceptance-test.json
  43. 0 277
      vendor/cloud.google.com/go/civil/civil.go
  44. 0 450
      vendor/cloud.google.com/go/cmd/go-cloud-debug-agent/debuglet.go
  45. 0 174
      vendor/cloud.google.com/go/cmd/go-cloud-debug-agent/internal/breakpoints/breakpoints.go
  46. 0 291
      vendor/cloud.google.com/go/cmd/go-cloud-debug-agent/internal/controller/client.go
  47. 0 460
      vendor/cloud.google.com/go/cmd/go-cloud-debug-agent/internal/valuecollector/valuecollector.go
  48. 0 674
      vendor/cloud.google.com/go/container/apiv1/cluster_manager_client.go
  49. 0 48
      vendor/cloud.google.com/go/container/apiv1/doc.go
  50. 0 272
      vendor/cloud.google.com/go/container/container.go
  51. 0 118
      vendor/cloud.google.com/go/datastore/client.go
  52. 0 574
      vendor/cloud.google.com/go/datastore/datastore.go
  53. 0 454
      vendor/cloud.google.com/go/datastore/doc.go
  54. 0 47
      vendor/cloud.google.com/go/datastore/errors.go
  55. 0 280
      vendor/cloud.google.com/go/datastore/key.go
  56. 0 491
      vendor/cloud.google.com/go/datastore/load.go
  57. 0 342
      vendor/cloud.google.com/go/datastore/prop.go
  58. 0 773
      vendor/cloud.google.com/go/datastore/query.go
  59. 0 425
      vendor/cloud.google.com/go/datastore/save.go
  60. 0 41
      vendor/cloud.google.com/go/datastore/testdata/index.yaml
  61. 0 36
      vendor/cloud.google.com/go/datastore/time.go
  62. 0 310
      vendor/cloud.google.com/go/datastore/transaction.go
  63. 0 215
      vendor/cloud.google.com/go/debugger/apiv2/controller2_client.go
  64. 0 211
      vendor/cloud.google.com/go/debugger/apiv2/debugger2_client.go
  65. 0 50
      vendor/cloud.google.com/go/debugger/apiv2/doc.go
  66. 0 437
      vendor/cloud.google.com/go/dlp/apiv2beta1/dlp_client.go
  67. 0 48
      vendor/cloud.google.com/go/dlp/apiv2beta1/doc.go
  68. 0 50
      vendor/cloud.google.com/go/errorreporting/apiv1beta1/doc.go
  69. 0 161
      vendor/cloud.google.com/go/errorreporting/apiv1beta1/error_group_client.go
  70. 0 301
      vendor/cloud.google.com/go/errorreporting/apiv1beta1/error_stats_client.go
  71. 0 130
      vendor/cloud.google.com/go/errorreporting/apiv1beta1/report_errors_client.go
  72. 0 230
      vendor/cloud.google.com/go/errorreporting/errors.go
  73. 0 48
      vendor/cloud.google.com/go/firestore/apiv1beta1/doc.go
  74. 0 544
      vendor/cloud.google.com/go/firestore/apiv1beta1/firestore_client.go
  75. 0 261
      vendor/cloud.google.com/go/firestore/client.go
  76. 0 114
      vendor/cloud.google.com/go/firestore/collref.go
  77. 0 214
      vendor/cloud.google.com/go/firestore/doc.go
  78. 0 558
      vendor/cloud.google.com/go/firestore/docref.go
  79. 0 261
      vendor/cloud.google.com/go/firestore/document.go
  80. 0 210
      vendor/cloud.google.com/go/firestore/fieldpath.go
  81. 0 423
      vendor/cloud.google.com/go/firestore/from_value.go
  82. 0 3
      vendor/cloud.google.com/go/firestore/genproto/README.md
  83. 0 662
      vendor/cloud.google.com/go/firestore/genproto/test.pb.go
  84. 0 16
      vendor/cloud.google.com/go/firestore/internal/Makefile
  85. 0 161
      vendor/cloud.google.com/go/firestore/internal/doc-snippets.go
  86. 0 142
      vendor/cloud.google.com/go/firestore/internal/doc.template
  87. 0 116
      vendor/cloud.google.com/go/firestore/internal/snipdoc.awk
  88. 0 177
      vendor/cloud.google.com/go/firestore/options.go
  89. 0 463
      vendor/cloud.google.com/go/firestore/query.go
  90. 0 11
      vendor/cloud.google.com/go/firestore/testdata/Makefile
  91. 0 1
      vendor/cloud.google.com/go/firestore/testdata/VERSION
  92. 0 27
      vendor/cloud.google.com/go/firestore/testdata/create-basic.textproto
  93. 0 61
      vendor/cloud.google.com/go/firestore/testdata/create-complex.textproto
  94. 0 13
      vendor/cloud.google.com/go/firestore/testdata/create-del-noarray-nested.textproto
  95. 0 13
      vendor/cloud.google.com/go/firestore/testdata/create-del-noarray.textproto
  96. 0 20
      vendor/cloud.google.com/go/firestore/testdata/create-empty.textproto
  97. 0 11
      vendor/cloud.google.com/go/firestore/testdata/create-nodel.textproto
  98. 0 40
      vendor/cloud.google.com/go/firestore/testdata/create-nosplit.textproto
  99. 0 41
      vendor/cloud.google.com/go/firestore/testdata/create-special-chars.textproto
  100. 0 26
      vendor/cloud.google.com/go/firestore/testdata/create-st-alone.textproto

+ 6 - 2
Gopkg.toml

@@ -173,9 +173,8 @@
   name = "golang.org/x/sync"
 
 [[constraint]]
-  branch = "v2"
   name = "gopkg.in/gomail.v2"
-  #version = "2.0.0"
+  branch = "v2" 
 
 [[constraint]]
   name = "gopkg.in/ini.v1"
@@ -188,3 +187,8 @@
 [[constraint]]
   branch = "v2"
   name = "gopkg.in/yaml.v2"
+
+[prune]
+  non-go = true
+  test-go = true
+  unused-packages = true

+ 0 - 8
vendor/cloud.google.com/go/bigquery/benchmarks/README.md

@@ -1,8 +0,0 @@
-# BigQuery Benchmark
-This directory contains benchmarks for BigQuery client.
-
-## Usage
-`go run bench.go -- <your project id> queries.json`
-
-BigQuery service caches requests so the benchmark should be run
-at least twice, disregarding the first result.

+ 0 - 85
vendor/cloud.google.com/go/bigquery/benchmarks/bench.go

@@ -1,85 +0,0 @@
-// Copyright 2017 Google Inc. All Rights Reserved.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//      http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-//+build ignore
-
-package main
-
-import (
-	"encoding/json"
-	"flag"
-	"io/ioutil"
-	"log"
-	"time"
-
-	"cloud.google.com/go/bigquery"
-	"golang.org/x/net/context"
-	"google.golang.org/api/iterator"
-)
-
-func main() {
-	flag.Parse()
-
-	ctx := context.Background()
-	c, err := bigquery.NewClient(ctx, flag.Arg(0))
-	if err != nil {
-		log.Fatal(err)
-	}
-
-	queriesJSON, err := ioutil.ReadFile(flag.Arg(1))
-	if err != nil {
-		log.Fatal(err)
-	}
-
-	var queries []string
-	if err := json.Unmarshal(queriesJSON, &queries); err != nil {
-		log.Fatal(err)
-	}
-
-	for _, q := range queries {
-		doQuery(ctx, c, q)
-	}
-}
-
-func doQuery(ctx context.Context, c *bigquery.Client, qt string) {
-	startTime := time.Now()
-	q := c.Query(qt)
-	it, err := q.Read(ctx)
-	if err != nil {
-		log.Fatal(err)
-	}
-
-	numRows, numCols := 0, 0
-	var firstByte time.Duration
-
-	for {
-		var values []bigquery.Value
-		err := it.Next(&values)
-		if err == iterator.Done {
-			break
-		}
-		if err != nil {
-			log.Fatal(err)
-		}
-		if numRows == 0 {
-			numCols = len(values)
-			firstByte = time.Since(startTime)
-		} else if numCols != len(values) {
-			log.Fatalf("got %d columns, want %d", len(values), numCols)
-		}
-		numRows++
-	}
-	log.Printf("query %q: %d rows, %d cols, first byte %f sec, total %f sec",
-		qt, numRows, numCols, firstByte.Seconds(), time.Since(startTime).Seconds())
-}

+ 0 - 10
vendor/cloud.google.com/go/bigquery/benchmarks/queries.json

@@ -1,10 +0,0 @@
-[
-  "SELECT * FROM `nyc-tlc.yellow.trips` LIMIT 10000",
-  "SELECT * FROM `nyc-tlc.yellow.trips` LIMIT 100000",
-  "SELECT * FROM `nyc-tlc.yellow.trips` LIMIT 1000000",
-  "SELECT title FROM `bigquery-public-data.samples.wikipedia` ORDER BY title LIMIT 1000",
-  "SELECT title, id, timestamp, contributor_ip FROM `bigquery-public-data.samples.wikipedia` WHERE title like 'Blo%' ORDER BY id",
-  "SELECT * FROM `bigquery-public-data.baseball.games_post_wide` ORDER BY gameId",
-  "SELECT * FROM `bigquery-public-data.samples.github_nested` WHERE repository.has_downloads ORDER BY repository.created_at LIMIT 10000",
-  "SELECT repo_name, path FROM `bigquery-public-data.github_repos.files` WHERE path LIKE '%.java' ORDER BY id LIMIT 1000000"
-]

+ 0 - 156
vendor/cloud.google.com/go/bigquery/bigquery.go

@@ -1,156 +0,0 @@
-// Copyright 2015 Google Inc. All Rights Reserved.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//      http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package bigquery
-
-import (
-	"fmt"
-	"io"
-	"net/http"
-	"time"
-
-	gax "github.com/googleapis/gax-go"
-
-	"cloud.google.com/go/internal"
-	"cloud.google.com/go/internal/version"
-
-	"google.golang.org/api/googleapi"
-	"google.golang.org/api/option"
-	htransport "google.golang.org/api/transport/http"
-
-	"golang.org/x/net/context"
-	bq "google.golang.org/api/bigquery/v2"
-)
-
-const (
-	prodAddr  = "https://www.googleapis.com/bigquery/v2/"
-	Scope     = "https://www.googleapis.com/auth/bigquery"
-	userAgent = "gcloud-golang-bigquery/20160429"
-)
-
-var xGoogHeader = fmt.Sprintf("gl-go/%s gccl/%s", version.Go(), version.Repo)
-
-func setClientHeader(headers http.Header) {
-	headers.Set("x-goog-api-client", xGoogHeader)
-}
-
-// Client may be used to perform BigQuery operations.
-type Client struct {
-	projectID string
-	bqs       *bq.Service
-}
-
-// NewClient constructs a new Client which can perform BigQuery operations.
-// Operations performed via the client are billed to the specified GCP project.
-func NewClient(ctx context.Context, projectID string, opts ...option.ClientOption) (*Client, error) {
-	o := []option.ClientOption{
-		option.WithEndpoint(prodAddr),
-		option.WithScopes(Scope),
-		option.WithUserAgent(userAgent),
-	}
-	o = append(o, opts...)
-	httpClient, endpoint, err := htransport.NewClient(ctx, o...)
-	if err != nil {
-		return nil, fmt.Errorf("bigquery: dialing: %v", err)
-	}
-	bqs, err := bq.New(httpClient)
-	if err != nil {
-		return nil, fmt.Errorf("bigquery: constructing client: %v", err)
-	}
-	bqs.BasePath = endpoint
-	c := &Client{
-		projectID: projectID,
-		bqs:       bqs,
-	}
-	return c, nil
-}
-
-// Close closes any resources held by the client.
-// Close should be called when the client is no longer needed.
-// It need not be called at program exit.
-func (c *Client) Close() error {
-	return nil
-}
-
-// Calls the Jobs.Insert RPC and returns a Job.
-func (c *Client) insertJob(ctx context.Context, job *bq.Job, media io.Reader) (*Job, error) {
-	call := c.bqs.Jobs.Insert(c.projectID, job).Context(ctx)
-	setClientHeader(call.Header())
-	if media != nil {
-		call.Media(media)
-	}
-	var res *bq.Job
-	var err error
-	invoke := func() error {
-		res, err = call.Do()
-		return err
-	}
-	// A job with a client-generated ID can be retried; the presence of the
-	// ID makes the insert operation idempotent.
-	// We don't retry if there is media, because it is an io.Reader. We'd
-	// have to read the contents and keep it in memory, and that could be expensive.
-	// TODO(jba): Look into retrying if media != nil.
-	if job.JobReference != nil && media == nil {
-		err = runWithRetry(ctx, invoke)
-	} else {
-		err = invoke()
-	}
-	if err != nil {
-		return nil, err
-	}
-	return bqToJob(res, c)
-}
-
-// Convert a number of milliseconds since the Unix epoch to a time.Time.
-// Treat an input of zero specially: convert it to the zero time,
-// rather than the start of the epoch.
-func unixMillisToTime(m int64) time.Time {
-	if m == 0 {
-		return time.Time{}
-	}
-	return time.Unix(0, m*1e6)
-}
-
-// runWithRetry calls the function until it returns nil or a non-retryable error, or
-// the context is done.
-// See the similar function in ../storage/invoke.go. The main difference is the
-// reason for retrying.
-func runWithRetry(ctx context.Context, call func() error) error {
-	// These parameters match the suggestions in https://cloud.google.com/bigquery/sla.
-	backoff := gax.Backoff{
-		Initial:    1 * time.Second,
-		Max:        32 * time.Second,
-		Multiplier: 2,
-	}
-	return internal.Retry(ctx, backoff, func() (stop bool, err error) {
-		err = call()
-		if err == nil {
-			return true, nil
-		}
-		return !retryableError(err), err
-	})
-}
-
-// This is the correct definition of retryable according to the BigQuery team.
-func retryableError(err error) bool {
-	e, ok := err.(*googleapi.Error)
-	if !ok {
-		return false
-	}
-	var reason string
-	if len(e.Errors) > 0 {
-		reason = e.Errors[0].Reason
-	}
-	return reason == "backendError" || reason == "rateLimitExceeded"
-}

+ 0 - 101
vendor/cloud.google.com/go/bigquery/copy.go

@@ -1,101 +0,0 @@
-// Copyright 2016 Google Inc. All Rights Reserved.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//      http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package bigquery
-
-import (
-	"golang.org/x/net/context"
-	bq "google.golang.org/api/bigquery/v2"
-)
-
-// CopyConfig holds the configuration for a copy job.
-type CopyConfig struct {
-	// Srcs are the tables from which data will be copied.
-	Srcs []*Table
-
-	// Dst is the table into which the data will be copied.
-	Dst *Table
-
-	// CreateDisposition specifies the circumstances under which the destination table will be created.
-	// The default is CreateIfNeeded.
-	CreateDisposition TableCreateDisposition
-
-	// WriteDisposition specifies how existing data in the destination table is treated.
-	// The default is WriteEmpty.
-	WriteDisposition TableWriteDisposition
-
-	// The labels associated with this job.
-	Labels map[string]string
-}
-
-func (c *CopyConfig) toBQ() *bq.JobConfiguration {
-	var ts []*bq.TableReference
-	for _, t := range c.Srcs {
-		ts = append(ts, t.toBQ())
-	}
-	return &bq.JobConfiguration{
-		Labels: c.Labels,
-		Copy: &bq.JobConfigurationTableCopy{
-			CreateDisposition: string(c.CreateDisposition),
-			WriteDisposition:  string(c.WriteDisposition),
-			DestinationTable:  c.Dst.toBQ(),
-			SourceTables:      ts,
-		},
-	}
-}
-
-func bqToCopyConfig(q *bq.JobConfiguration, c *Client) *CopyConfig {
-	cc := &CopyConfig{
-		Labels:            q.Labels,
-		CreateDisposition: TableCreateDisposition(q.Copy.CreateDisposition),
-		WriteDisposition:  TableWriteDisposition(q.Copy.WriteDisposition),
-		Dst:               bqToTable(q.Copy.DestinationTable, c),
-	}
-	for _, t := range q.Copy.SourceTables {
-		cc.Srcs = append(cc.Srcs, bqToTable(t, c))
-	}
-	return cc
-}
-
-// A Copier copies data into a BigQuery table from one or more BigQuery tables.
-type Copier struct {
-	JobIDConfig
-	CopyConfig
-	c *Client
-}
-
-// CopierFrom returns a Copier which can be used to copy data into a
-// BigQuery table from one or more BigQuery tables.
-// The returned Copier may optionally be further configured before its Run method is called.
-func (t *Table) CopierFrom(srcs ...*Table) *Copier {
-	return &Copier{
-		c: t.c,
-		CopyConfig: CopyConfig{
-			Srcs: srcs,
-			Dst:  t,
-		},
-	}
-}
-
-// Run initiates a copy job.
-func (c *Copier) Run(ctx context.Context) (*Job, error) {
-	return c.c.insertJob(ctx, c.newJob(), nil)
-}
-
-func (c *Copier) newJob() *bq.Job {
-	return &bq.Job{
-		JobReference:  c.JobIDConfig.createJobRef(c.c.projectID),
-		Configuration: c.CopyConfig.toBQ(),
-	}
-}

+ 0 - 501
vendor/cloud.google.com/go/bigquery/dataset.go

@@ -1,501 +0,0 @@
-// Copyright 2015 Google Inc. All Rights Reserved.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//      http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package bigquery
-
-import (
-	"errors"
-	"fmt"
-	"time"
-
-	"cloud.google.com/go/internal/optional"
-
-	"golang.org/x/net/context"
-	bq "google.golang.org/api/bigquery/v2"
-	"google.golang.org/api/iterator"
-)
-
-// Dataset is a reference to a BigQuery dataset.
-type Dataset struct {
-	ProjectID string
-	DatasetID string
-	c         *Client
-}
-
-// DatasetMetadata contains information about a BigQuery dataset.
-type DatasetMetadata struct {
-	// These fields can be set when creating a dataset.
-	Name                   string            // The user-friendly name for this dataset.
-	Description            string            // The user-friendly description of this dataset.
-	Location               string            // The geo location of the dataset.
-	DefaultTableExpiration time.Duration     // The default expiration time for new tables.
-	Labels                 map[string]string // User-provided labels.
-	Access                 []*AccessEntry    // Access permissions.
-
-	// These fields are read-only.
-	CreationTime     time.Time
-	LastModifiedTime time.Time // When the dataset or any of its tables were modified.
-	FullID           string    // The full dataset ID in the form projectID:datasetID.
-
-	// ETag is the ETag obtained when reading metadata. Pass it to Dataset.Update to
-	// ensure that the metadata hasn't changed since it was read.
-	ETag string
-}
-
-// DatasetMetadataToUpdate is used when updating a dataset's metadata.
-// Only non-nil fields will be updated.
-type DatasetMetadataToUpdate struct {
-	Description optional.String // The user-friendly description of this table.
-	Name        optional.String // The user-friendly name for this dataset.
-
-	// DefaultTableExpiration is the the default expiration time for new tables.
-	// If set to time.Duration(0), new tables never expire.
-	DefaultTableExpiration optional.Duration
-
-	// The entire access list. It is not possible to replace individual entries.
-	Access []*AccessEntry
-
-	labelUpdater
-}
-
-// Dataset creates a handle to a BigQuery dataset in the client's project.
-func (c *Client) Dataset(id string) *Dataset {
-	return c.DatasetInProject(c.projectID, id)
-}
-
-// DatasetInProject creates a handle to a BigQuery dataset in the specified project.
-func (c *Client) DatasetInProject(projectID, datasetID string) *Dataset {
-	return &Dataset{
-		ProjectID: projectID,
-		DatasetID: datasetID,
-		c:         c,
-	}
-}
-
-// Create creates a dataset in the BigQuery service. An error will be returned if the
-// dataset already exists. Pass in a DatasetMetadata value to configure the dataset.
-func (d *Dataset) Create(ctx context.Context, md *DatasetMetadata) error {
-	ds, err := md.toBQ()
-	if err != nil {
-		return err
-	}
-	ds.DatasetReference = &bq.DatasetReference{DatasetId: d.DatasetID}
-	call := d.c.bqs.Datasets.Insert(d.ProjectID, ds).Context(ctx)
-	setClientHeader(call.Header())
-	_, err = call.Do()
-	return err
-}
-
-func (dm *DatasetMetadata) toBQ() (*bq.Dataset, error) {
-	ds := &bq.Dataset{}
-	if dm == nil {
-		return ds, nil
-	}
-	ds.FriendlyName = dm.Name
-	ds.Description = dm.Description
-	ds.Location = dm.Location
-	ds.DefaultTableExpirationMs = int64(dm.DefaultTableExpiration / time.Millisecond)
-	ds.Labels = dm.Labels
-	var err error
-	ds.Access, err = accessListToBQ(dm.Access)
-	if err != nil {
-		return nil, err
-	}
-	if !dm.CreationTime.IsZero() {
-		return nil, errors.New("bigquery: Dataset.CreationTime is not writable")
-	}
-	if !dm.LastModifiedTime.IsZero() {
-		return nil, errors.New("bigquery: Dataset.LastModifiedTime is not writable")
-	}
-	if dm.FullID != "" {
-		return nil, errors.New("bigquery: Dataset.FullID is not writable")
-	}
-	if dm.ETag != "" {
-		return nil, errors.New("bigquery: Dataset.ETag is not writable")
-	}
-	return ds, nil
-}
-
-func accessListToBQ(a []*AccessEntry) ([]*bq.DatasetAccess, error) {
-	var q []*bq.DatasetAccess
-	for _, e := range a {
-		a, err := e.toBQ()
-		if err != nil {
-			return nil, err
-		}
-		q = append(q, a)
-	}
-	return q, nil
-}
-
-// Delete deletes the dataset.
-func (d *Dataset) Delete(ctx context.Context) error {
-	call := d.c.bqs.Datasets.Delete(d.ProjectID, d.DatasetID).Context(ctx)
-	setClientHeader(call.Header())
-	return call.Do()
-}
-
-// Metadata fetches the metadata for the dataset.
-func (d *Dataset) Metadata(ctx context.Context) (*DatasetMetadata, error) {
-	call := d.c.bqs.Datasets.Get(d.ProjectID, d.DatasetID).Context(ctx)
-	setClientHeader(call.Header())
-	var ds *bq.Dataset
-	if err := runWithRetry(ctx, func() (err error) {
-		ds, err = call.Do()
-		return err
-	}); err != nil {
-		return nil, err
-	}
-	return bqToDatasetMetadata(ds)
-}
-
-func bqToDatasetMetadata(d *bq.Dataset) (*DatasetMetadata, error) {
-	dm := &DatasetMetadata{
-		CreationTime:           unixMillisToTime(d.CreationTime),
-		LastModifiedTime:       unixMillisToTime(d.LastModifiedTime),
-		DefaultTableExpiration: time.Duration(d.DefaultTableExpirationMs) * time.Millisecond,
-		Description:            d.Description,
-		Name:                   d.FriendlyName,
-		FullID:                 d.Id,
-		Location:               d.Location,
-		Labels:                 d.Labels,
-		ETag:                   d.Etag,
-	}
-	for _, a := range d.Access {
-		e, err := bqToAccessEntry(a, nil)
-		if err != nil {
-			return nil, err
-		}
-		dm.Access = append(dm.Access, e)
-	}
-	return dm, nil
-}
-
-// Update modifies specific Dataset metadata fields.
-// To perform a read-modify-write that protects against intervening reads,
-// set the etag argument to the DatasetMetadata.ETag field from the read.
-// Pass the empty string for etag for a "blind write" that will always succeed.
-func (d *Dataset) Update(ctx context.Context, dm DatasetMetadataToUpdate, etag string) (*DatasetMetadata, error) {
-	ds, err := dm.toBQ()
-	if err != nil {
-		return nil, err
-	}
-	call := d.c.bqs.Datasets.Patch(d.ProjectID, d.DatasetID, ds).Context(ctx)
-	setClientHeader(call.Header())
-	if etag != "" {
-		call.Header().Set("If-Match", etag)
-	}
-	var ds2 *bq.Dataset
-	if err := runWithRetry(ctx, func() (err error) {
-		ds2, err = call.Do()
-		return err
-	}); err != nil {
-		return nil, err
-	}
-	return bqToDatasetMetadata(ds2)
-}
-
-func (dm *DatasetMetadataToUpdate) toBQ() (*bq.Dataset, error) {
-	ds := &bq.Dataset{}
-	forceSend := func(field string) {
-		ds.ForceSendFields = append(ds.ForceSendFields, field)
-	}
-
-	if dm.Description != nil {
-		ds.Description = optional.ToString(dm.Description)
-		forceSend("Description")
-	}
-	if dm.Name != nil {
-		ds.FriendlyName = optional.ToString(dm.Name)
-		forceSend("FriendlyName")
-	}
-	if dm.DefaultTableExpiration != nil {
-		dur := optional.ToDuration(dm.DefaultTableExpiration)
-		if dur == 0 {
-			// Send a null to delete the field.
-			ds.NullFields = append(ds.NullFields, "DefaultTableExpirationMs")
-		} else {
-			ds.DefaultTableExpirationMs = int64(dur / time.Millisecond)
-		}
-	}
-	if dm.Access != nil {
-		var err error
-		ds.Access, err = accessListToBQ(dm.Access)
-		if err != nil {
-			return nil, err
-		}
-		if len(ds.Access) == 0 {
-			ds.NullFields = append(ds.NullFields, "Access")
-		}
-	}
-	labels, forces, nulls := dm.update()
-	ds.Labels = labels
-	ds.ForceSendFields = append(ds.ForceSendFields, forces...)
-	ds.NullFields = append(ds.NullFields, nulls...)
-	return ds, nil
-}
-
-// Table creates a handle to a BigQuery table in the dataset.
-// To determine if a table exists, call Table.Metadata.
-// If the table does not already exist, use Table.Create to create it.
-func (d *Dataset) Table(tableID string) *Table {
-	return &Table{ProjectID: d.ProjectID, DatasetID: d.DatasetID, TableID: tableID, c: d.c}
-}
-
-// Tables returns an iterator over the tables in the Dataset.
-func (d *Dataset) Tables(ctx context.Context) *TableIterator {
-	it := &TableIterator{
-		ctx:     ctx,
-		dataset: d,
-	}
-	it.pageInfo, it.nextFunc = iterator.NewPageInfo(
-		it.fetch,
-		func() int { return len(it.tables) },
-		func() interface{} { b := it.tables; it.tables = nil; return b })
-	return it
-}
-
-// A TableIterator is an iterator over Tables.
-type TableIterator struct {
-	ctx      context.Context
-	dataset  *Dataset
-	tables   []*Table
-	pageInfo *iterator.PageInfo
-	nextFunc func() error
-}
-
-// Next returns the next result. Its second return value is Done if there are
-// no more results. Once Next returns Done, all subsequent calls will return
-// Done.
-func (it *TableIterator) Next() (*Table, error) {
-	if err := it.nextFunc(); err != nil {
-		return nil, err
-	}
-	t := it.tables[0]
-	it.tables = it.tables[1:]
-	return t, nil
-}
-
-// PageInfo supports pagination. See the google.golang.org/api/iterator package for details.
-func (it *TableIterator) PageInfo() *iterator.PageInfo { return it.pageInfo }
-
-// for testing
-var listTables = func(it *TableIterator, pageSize int, pageToken string) (*bq.TableList, error) {
-	call := it.dataset.c.bqs.Tables.List(it.dataset.ProjectID, it.dataset.DatasetID).
-		PageToken(pageToken).
-		Context(it.ctx)
-	setClientHeader(call.Header())
-	if pageSize > 0 {
-		call.MaxResults(int64(pageSize))
-	}
-	var res *bq.TableList
-	err := runWithRetry(it.ctx, func() (err error) {
-		res, err = call.Do()
-		return err
-	})
-	return res, err
-}
-
-func (it *TableIterator) fetch(pageSize int, pageToken string) (string, error) {
-	res, err := listTables(it, pageSize, pageToken)
-	if err != nil {
-		return "", err
-	}
-	for _, t := range res.Tables {
-		it.tables = append(it.tables, bqToTable(t.TableReference, it.dataset.c))
-	}
-	return res.NextPageToken, nil
-}
-
-func bqToTable(tr *bq.TableReference, c *Client) *Table {
-	return &Table{
-		ProjectID: tr.ProjectId,
-		DatasetID: tr.DatasetId,
-		TableID:   tr.TableId,
-		c:         c,
-	}
-}
-
-// Datasets returns an iterator over the datasets in a project.
-// The Client's project is used by default, but that can be
-// changed by setting ProjectID on the returned iterator before calling Next.
-func (c *Client) Datasets(ctx context.Context) *DatasetIterator {
-	return c.DatasetsInProject(ctx, c.projectID)
-}
-
-// DatasetsInProject returns an iterator over the datasets in the provided project.
-//
-// Deprecated: call Client.Datasets, then set ProjectID on the returned iterator.
-func (c *Client) DatasetsInProject(ctx context.Context, projectID string) *DatasetIterator {
-	it := &DatasetIterator{
-		ctx:       ctx,
-		c:         c,
-		ProjectID: projectID,
-	}
-	it.pageInfo, it.nextFunc = iterator.NewPageInfo(
-		it.fetch,
-		func() int { return len(it.items) },
-		func() interface{} { b := it.items; it.items = nil; return b })
-	return it
-}
-
-// DatasetIterator iterates over the datasets in a project.
-type DatasetIterator struct {
-	// ListHidden causes hidden datasets to be listed when set to true.
-	// Set before the first call to Next.
-	ListHidden bool
-
-	// Filter restricts the datasets returned by label. The filter syntax is described in
-	// https://cloud.google.com/bigquery/docs/labeling-datasets#filtering_datasets_using_labels
-	// Set before the first call to Next.
-	Filter string
-
-	// The project ID of the listed datasets.
-	// Set before the first call to Next.
-	ProjectID string
-
-	ctx      context.Context
-	c        *Client
-	pageInfo *iterator.PageInfo
-	nextFunc func() error
-	items    []*Dataset
-}
-
-// PageInfo supports pagination. See the google.golang.org/api/iterator package for details.
-func (it *DatasetIterator) PageInfo() *iterator.PageInfo { return it.pageInfo }
-
-func (it *DatasetIterator) Next() (*Dataset, error) {
-	if err := it.nextFunc(); err != nil {
-		return nil, err
-	}
-	item := it.items[0]
-	it.items = it.items[1:]
-	return item, nil
-}
-
-// for testing
-var listDatasets = func(it *DatasetIterator, pageSize int, pageToken string) (*bq.DatasetList, error) {
-	call := it.c.bqs.Datasets.List(it.ProjectID).
-		Context(it.ctx).
-		PageToken(pageToken).
-		All(it.ListHidden)
-	setClientHeader(call.Header())
-	if pageSize > 0 {
-		call.MaxResults(int64(pageSize))
-	}
-	if it.Filter != "" {
-		call.Filter(it.Filter)
-	}
-	var res *bq.DatasetList
-	err := runWithRetry(it.ctx, func() (err error) {
-		res, err = call.Do()
-		return err
-	})
-	return res, err
-}
-
-func (it *DatasetIterator) fetch(pageSize int, pageToken string) (string, error) {
-	res, err := listDatasets(it, pageSize, pageToken)
-	if err != nil {
-		return "", err
-	}
-	for _, d := range res.Datasets {
-		it.items = append(it.items, &Dataset{
-			ProjectID: d.DatasetReference.ProjectId,
-			DatasetID: d.DatasetReference.DatasetId,
-			c:         it.c,
-		})
-	}
-	return res.NextPageToken, nil
-}
-
-// An AccessEntry describes the permissions that an entity has on a dataset.
-type AccessEntry struct {
-	Role       AccessRole // The role of the entity
-	EntityType EntityType // The type of entity
-	Entity     string     // The entity (individual or group) granted access
-	View       *Table     // The view granted access (EntityType must be ViewEntity)
-}
-
-// AccessRole is the level of access to grant to a dataset.
-type AccessRole string
-
-const (
-	OwnerRole  AccessRole = "OWNER"
-	ReaderRole AccessRole = "READER"
-	WriterRole AccessRole = "WRITER"
-)
-
-// EntityType is the type of entity in an AccessEntry.
-type EntityType int
-
-const (
-	// A domain (e.g. "example.com")
-	DomainEntity EntityType = iota + 1
-
-	// Email address of a Google Group
-	GroupEmailEntity
-
-	// Email address of an individual user.
-	UserEmailEntity
-
-	// A special group: one of projectOwners, projectReaders, projectWriters or allAuthenticatedUsers.
-	SpecialGroupEntity
-
-	// A BigQuery view.
-	ViewEntity
-)
-
-func (e *AccessEntry) toBQ() (*bq.DatasetAccess, error) {
-	q := &bq.DatasetAccess{Role: string(e.Role)}
-	switch e.EntityType {
-	case DomainEntity:
-		q.Domain = e.Entity
-	case GroupEmailEntity:
-		q.GroupByEmail = e.Entity
-	case UserEmailEntity:
-		q.UserByEmail = e.Entity
-	case SpecialGroupEntity:
-		q.SpecialGroup = e.Entity
-	case ViewEntity:
-		q.View = e.View.toBQ()
-	default:
-		return nil, fmt.Errorf("bigquery: unknown entity type %d", e.EntityType)
-	}
-	return q, nil
-}
-
-func bqToAccessEntry(q *bq.DatasetAccess, c *Client) (*AccessEntry, error) {
-	e := &AccessEntry{Role: AccessRole(q.Role)}
-	switch {
-	case q.Domain != "":
-		e.Entity = q.Domain
-		e.EntityType = DomainEntity
-	case q.GroupByEmail != "":
-		e.Entity = q.GroupByEmail
-		e.EntityType = GroupEmailEntity
-	case q.UserByEmail != "":
-		e.Entity = q.UserByEmail
-		e.EntityType = UserEmailEntity
-	case q.SpecialGroup != "":
-		e.Entity = q.SpecialGroup
-		e.EntityType = SpecialGroupEntity
-	case q.View != nil:
-		e.View = c.DatasetInProject(q.View.ProjectId, q.View.DatasetId).Table(q.View.TableId)
-		e.EntityType = ViewEntity
-	default:
-		return nil, errors.New("bigquery: invalid access value")
-	}
-	return e, nil
-}

+ 0 - 689
vendor/cloud.google.com/go/bigquery/datatransfer/apiv1/data_transfer_client.go

@@ -1,689 +0,0 @@
-// Copyright 2017, Google LLC All rights reserved.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//     http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// AUTO-GENERATED CODE. DO NOT EDIT.
-
-package datatransfer
-
-import (
-	"math"
-	"time"
-
-	"cloud.google.com/go/internal/version"
-	gax "github.com/googleapis/gax-go"
-	"golang.org/x/net/context"
-	"google.golang.org/api/iterator"
-	"google.golang.org/api/option"
-	"google.golang.org/api/transport"
-	datatransferpb "google.golang.org/genproto/googleapis/cloud/bigquery/datatransfer/v1"
-	"google.golang.org/grpc"
-	"google.golang.org/grpc/codes"
-	"google.golang.org/grpc/metadata"
-)
-
-// CallOptions contains the retry settings for each method of Client.
-type CallOptions struct {
-	GetDataSource        []gax.CallOption
-	ListDataSources      []gax.CallOption
-	CreateTransferConfig []gax.CallOption
-	UpdateTransferConfig []gax.CallOption
-	DeleteTransferConfig []gax.CallOption
-	GetTransferConfig    []gax.CallOption
-	ListTransferConfigs  []gax.CallOption
-	ScheduleTransferRuns []gax.CallOption
-	GetTransferRun       []gax.CallOption
-	DeleteTransferRun    []gax.CallOption
-	ListTransferRuns     []gax.CallOption
-	ListTransferLogs     []gax.CallOption
-	CheckValidCreds      []gax.CallOption
-}
-
-func defaultClientOptions() []option.ClientOption {
-	return []option.ClientOption{
-		option.WithEndpoint("bigquerydatatransfer.googleapis.com:443"),
-		option.WithScopes(DefaultAuthScopes()...),
-	}
-}
-
-func defaultCallOptions() *CallOptions {
-	retry := map[[2]string][]gax.CallOption{
-		{"default", "idempotent"}: {
-			gax.WithRetry(func() gax.Retryer {
-				return gax.OnCodes([]codes.Code{
-					codes.DeadlineExceeded,
-					codes.Unavailable,
-				}, gax.Backoff{
-					Initial:    100 * time.Millisecond,
-					Max:        60000 * time.Millisecond,
-					Multiplier: 1.3,
-				})
-			}),
-		},
-	}
-	return &CallOptions{
-		GetDataSource:        retry[[2]string{"default", "idempotent"}],
-		ListDataSources:      retry[[2]string{"default", "idempotent"}],
-		CreateTransferConfig: retry[[2]string{"default", "non_idempotent"}],
-		UpdateTransferConfig: retry[[2]string{"default", "non_idempotent"}],
-		DeleteTransferConfig: retry[[2]string{"default", "idempotent"}],
-		GetTransferConfig:    retry[[2]string{"default", "idempotent"}],
-		ListTransferConfigs:  retry[[2]string{"default", "idempotent"}],
-		ScheduleTransferRuns: retry[[2]string{"default", "non_idempotent"}],
-		GetTransferRun:       retry[[2]string{"default", "idempotent"}],
-		DeleteTransferRun:    retry[[2]string{"default", "idempotent"}],
-		ListTransferRuns:     retry[[2]string{"default", "idempotent"}],
-		ListTransferLogs:     retry[[2]string{"default", "idempotent"}],
-		CheckValidCreds:      retry[[2]string{"default", "idempotent"}],
-	}
-}
-
-// Client is a client for interacting with BigQuery Data Transfer API.
-type Client struct {
-	// The connection to the service.
-	conn *grpc.ClientConn
-
-	// The gRPC API client.
-	client datatransferpb.DataTransferServiceClient
-
-	// The call options for this service.
-	CallOptions *CallOptions
-
-	// The x-goog-* metadata to be sent with each request.
-	xGoogMetadata metadata.MD
-}
-
-// NewClient creates a new data transfer service client.
-//
-// The Google BigQuery Data Transfer Service API enables BigQuery users to
-// configure the transfer of their data from other Google Products into BigQuery.
-// This service contains methods that are end user exposed. It backs up the
-// frontend.
-func NewClient(ctx context.Context, opts ...option.ClientOption) (*Client, error) {
-	conn, err := transport.DialGRPC(ctx, append(defaultClientOptions(), opts...)...)
-	if err != nil {
-		return nil, err
-	}
-	c := &Client{
-		conn:        conn,
-		CallOptions: defaultCallOptions(),
-
-		client: datatransferpb.NewDataTransferServiceClient(conn),
-	}
-	c.setGoogleClientInfo()
-	return c, nil
-}
-
-// Connection returns the client's connection to the API service.
-func (c *Client) Connection() *grpc.ClientConn {
-	return c.conn
-}
-
-// Close closes the connection to the API service. The user should invoke this when
-// the client is no longer required.
-func (c *Client) Close() error {
-	return c.conn.Close()
-}
-
-// setGoogleClientInfo sets the name and version of the application in
-// the `x-goog-api-client` header passed on each request. Intended for
-// use by Google-written clients.
-func (c *Client) setGoogleClientInfo(keyval ...string) {
-	kv := append([]string{"gl-go", version.Go()}, keyval...)
-	kv = append(kv, "gapic", version.Repo, "gax", gax.Version, "grpc", grpc.Version)
-	c.xGoogMetadata = metadata.Pairs("x-goog-api-client", gax.XGoogHeader(kv...))
-}
-
-// ProjectPath returns the path for the project resource.
-func ProjectPath(project string) string {
-	return "" +
-		"projects/" +
-		project +
-		""
-}
-
-// LocationPath returns the path for the location resource.
-func LocationPath(project, location string) string {
-	return "" +
-		"projects/" +
-		project +
-		"/locations/" +
-		location +
-		""
-}
-
-// LocationDataSourcePath returns the path for the location data source resource.
-func LocationDataSourcePath(project, location, dataSource string) string {
-	return "" +
-		"projects/" +
-		project +
-		"/locations/" +
-		location +
-		"/dataSources/" +
-		dataSource +
-		""
-}
-
-// LocationTransferConfigPath returns the path for the location transfer config resource.
-func LocationTransferConfigPath(project, location, transferConfig string) string {
-	return "" +
-		"projects/" +
-		project +
-		"/locations/" +
-		location +
-		"/transferConfigs/" +
-		transferConfig +
-		""
-}
-
-// LocationRunPath returns the path for the location run resource.
-func LocationRunPath(project, location, transferConfig, run string) string {
-	return "" +
-		"projects/" +
-		project +
-		"/locations/" +
-		location +
-		"/transferConfigs/" +
-		transferConfig +
-		"/runs/" +
-		run +
-		""
-}
-
-// DataSourcePath returns the path for the data source resource.
-func DataSourcePath(project, dataSource string) string {
-	return "" +
-		"projects/" +
-		project +
-		"/dataSources/" +
-		dataSource +
-		""
-}
-
-// TransferConfigPath returns the path for the transfer config resource.
-func TransferConfigPath(project, transferConfig string) string {
-	return "" +
-		"projects/" +
-		project +
-		"/transferConfigs/" +
-		transferConfig +
-		""
-}
-
-// RunPath returns the path for the run resource.
-func RunPath(project, transferConfig, run string) string {
-	return "" +
-		"projects/" +
-		project +
-		"/transferConfigs/" +
-		transferConfig +
-		"/runs/" +
-		run +
-		""
-}
-
-// GetDataSource retrieves a supported data source and returns its settings,
-// which can be used for UI rendering.
-func (c *Client) GetDataSource(ctx context.Context, req *datatransferpb.GetDataSourceRequest, opts ...gax.CallOption) (*datatransferpb.DataSource, error) {
-	ctx = insertMetadata(ctx, c.xGoogMetadata)
-	opts = append(c.CallOptions.GetDataSource[0:len(c.CallOptions.GetDataSource):len(c.CallOptions.GetDataSource)], opts...)
-	var resp *datatransferpb.DataSource
-	err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
-		var err error
-		resp, err = c.client.GetDataSource(ctx, req, settings.GRPC...)
-		return err
-	}, opts...)
-	if err != nil {
-		return nil, err
-	}
-	return resp, nil
-}
-
-// ListDataSources lists supported data sources and returns their settings,
-// which can be used for UI rendering.
-func (c *Client) ListDataSources(ctx context.Context, req *datatransferpb.ListDataSourcesRequest, opts ...gax.CallOption) *DataSourceIterator {
-	ctx = insertMetadata(ctx, c.xGoogMetadata)
-	opts = append(c.CallOptions.ListDataSources[0:len(c.CallOptions.ListDataSources):len(c.CallOptions.ListDataSources)], opts...)
-	it := &DataSourceIterator{}
-	it.InternalFetch = func(pageSize int, pageToken string) ([]*datatransferpb.DataSource, string, error) {
-		var resp *datatransferpb.ListDataSourcesResponse
-		req.PageToken = pageToken
-		if pageSize > math.MaxInt32 {
-			req.PageSize = math.MaxInt32
-		} else {
-			req.PageSize = int32(pageSize)
-		}
-		err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
-			var err error
-			resp, err = c.client.ListDataSources(ctx, req, settings.GRPC...)
-			return err
-		}, opts...)
-		if err != nil {
-			return nil, "", err
-		}
-		return resp.DataSources, resp.NextPageToken, nil
-	}
-	fetch := func(pageSize int, pageToken string) (string, error) {
-		items, nextPageToken, err := it.InternalFetch(pageSize, pageToken)
-		if err != nil {
-			return "", err
-		}
-		it.items = append(it.items, items...)
-		return nextPageToken, nil
-	}
-	it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf)
-	return it
-}
-
-// CreateTransferConfig creates a new data transfer configuration.
-func (c *Client) CreateTransferConfig(ctx context.Context, req *datatransferpb.CreateTransferConfigRequest, opts ...gax.CallOption) (*datatransferpb.TransferConfig, error) {
-	ctx = insertMetadata(ctx, c.xGoogMetadata)
-	opts = append(c.CallOptions.CreateTransferConfig[0:len(c.CallOptions.CreateTransferConfig):len(c.CallOptions.CreateTransferConfig)], opts...)
-	var resp *datatransferpb.TransferConfig
-	err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
-		var err error
-		resp, err = c.client.CreateTransferConfig(ctx, req, settings.GRPC...)
-		return err
-	}, opts...)
-	if err != nil {
-		return nil, err
-	}
-	return resp, nil
-}
-
-// UpdateTransferConfig updates a data transfer configuration.
-// All fields must be set, even if they are not updated.
-func (c *Client) UpdateTransferConfig(ctx context.Context, req *datatransferpb.UpdateTransferConfigRequest, opts ...gax.CallOption) (*datatransferpb.TransferConfig, error) {
-	ctx = insertMetadata(ctx, c.xGoogMetadata)
-	opts = append(c.CallOptions.UpdateTransferConfig[0:len(c.CallOptions.UpdateTransferConfig):len(c.CallOptions.UpdateTransferConfig)], opts...)
-	var resp *datatransferpb.TransferConfig
-	err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
-		var err error
-		resp, err = c.client.UpdateTransferConfig(ctx, req, settings.GRPC...)
-		return err
-	}, opts...)
-	if err != nil {
-		return nil, err
-	}
-	return resp, nil
-}
-
-// DeleteTransferConfig deletes a data transfer configuration,
-// including any associated transfer runs and logs.
-func (c *Client) DeleteTransferConfig(ctx context.Context, req *datatransferpb.DeleteTransferConfigRequest, opts ...gax.CallOption) error {
-	ctx = insertMetadata(ctx, c.xGoogMetadata)
-	opts = append(c.CallOptions.DeleteTransferConfig[0:len(c.CallOptions.DeleteTransferConfig):len(c.CallOptions.DeleteTransferConfig)], opts...)
-	err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
-		var err error
-		_, err = c.client.DeleteTransferConfig(ctx, req, settings.GRPC...)
-		return err
-	}, opts...)
-	return err
-}
-
-// GetTransferConfig returns information about a data transfer config.
-func (c *Client) GetTransferConfig(ctx context.Context, req *datatransferpb.GetTransferConfigRequest, opts ...gax.CallOption) (*datatransferpb.TransferConfig, error) {
-	ctx = insertMetadata(ctx, c.xGoogMetadata)
-	opts = append(c.CallOptions.GetTransferConfig[0:len(c.CallOptions.GetTransferConfig):len(c.CallOptions.GetTransferConfig)], opts...)
-	var resp *datatransferpb.TransferConfig
-	err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
-		var err error
-		resp, err = c.client.GetTransferConfig(ctx, req, settings.GRPC...)
-		return err
-	}, opts...)
-	if err != nil {
-		return nil, err
-	}
-	return resp, nil
-}
-
-// ListTransferConfigs returns information about all data transfers in the project.
-func (c *Client) ListTransferConfigs(ctx context.Context, req *datatransferpb.ListTransferConfigsRequest, opts ...gax.CallOption) *TransferConfigIterator {
-	ctx = insertMetadata(ctx, c.xGoogMetadata)
-	opts = append(c.CallOptions.ListTransferConfigs[0:len(c.CallOptions.ListTransferConfigs):len(c.CallOptions.ListTransferConfigs)], opts...)
-	it := &TransferConfigIterator{}
-	it.InternalFetch = func(pageSize int, pageToken string) ([]*datatransferpb.TransferConfig, string, error) {
-		var resp *datatransferpb.ListTransferConfigsResponse
-		req.PageToken = pageToken
-		if pageSize > math.MaxInt32 {
-			req.PageSize = math.MaxInt32
-		} else {
-			req.PageSize = int32(pageSize)
-		}
-		err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
-			var err error
-			resp, err = c.client.ListTransferConfigs(ctx, req, settings.GRPC...)
-			return err
-		}, opts...)
-		if err != nil {
-			return nil, "", err
-		}
-		return resp.TransferConfigs, resp.NextPageToken, nil
-	}
-	fetch := func(pageSize int, pageToken string) (string, error) {
-		items, nextPageToken, err := it.InternalFetch(pageSize, pageToken)
-		if err != nil {
-			return "", err
-		}
-		it.items = append(it.items, items...)
-		return nextPageToken, nil
-	}
-	it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf)
-	return it
-}
-
-// ScheduleTransferRuns creates transfer runs for a time range [range_start_time, range_end_time].
-// For each date - or whatever granularity the data source supports - in the
-// range, one transfer run is created.
-// Note that runs are created per UTC time in the time range.
-func (c *Client) ScheduleTransferRuns(ctx context.Context, req *datatransferpb.ScheduleTransferRunsRequest, opts ...gax.CallOption) (*datatransferpb.ScheduleTransferRunsResponse, error) {
-	ctx = insertMetadata(ctx, c.xGoogMetadata)
-	opts = append(c.CallOptions.ScheduleTransferRuns[0:len(c.CallOptions.ScheduleTransferRuns):len(c.CallOptions.ScheduleTransferRuns)], opts...)
-	var resp *datatransferpb.ScheduleTransferRunsResponse
-	err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
-		var err error
-		resp, err = c.client.ScheduleTransferRuns(ctx, req, settings.GRPC...)
-		return err
-	}, opts...)
-	if err != nil {
-		return nil, err
-	}
-	return resp, nil
-}
-
-// GetTransferRun returns information about the particular transfer run.
-func (c *Client) GetTransferRun(ctx context.Context, req *datatransferpb.GetTransferRunRequest, opts ...gax.CallOption) (*datatransferpb.TransferRun, error) {
-	ctx = insertMetadata(ctx, c.xGoogMetadata)
-	opts = append(c.CallOptions.GetTransferRun[0:len(c.CallOptions.GetTransferRun):len(c.CallOptions.GetTransferRun)], opts...)
-	var resp *datatransferpb.TransferRun
-	err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
-		var err error
-		resp, err = c.client.GetTransferRun(ctx, req, settings.GRPC...)
-		return err
-	}, opts...)
-	if err != nil {
-		return nil, err
-	}
-	return resp, nil
-}
-
-// DeleteTransferRun deletes the specified transfer run.
-func (c *Client) DeleteTransferRun(ctx context.Context, req *datatransferpb.DeleteTransferRunRequest, opts ...gax.CallOption) error {
-	ctx = insertMetadata(ctx, c.xGoogMetadata)
-	opts = append(c.CallOptions.DeleteTransferRun[0:len(c.CallOptions.DeleteTransferRun):len(c.CallOptions.DeleteTransferRun)], opts...)
-	err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
-		var err error
-		_, err = c.client.DeleteTransferRun(ctx, req, settings.GRPC...)
-		return err
-	}, opts...)
-	return err
-}
-
-// ListTransferRuns returns information about running and completed jobs.
-func (c *Client) ListTransferRuns(ctx context.Context, req *datatransferpb.ListTransferRunsRequest, opts ...gax.CallOption) *TransferRunIterator {
-	ctx = insertMetadata(ctx, c.xGoogMetadata)
-	opts = append(c.CallOptions.ListTransferRuns[0:len(c.CallOptions.ListTransferRuns):len(c.CallOptions.ListTransferRuns)], opts...)
-	it := &TransferRunIterator{}
-	it.InternalFetch = func(pageSize int, pageToken string) ([]*datatransferpb.TransferRun, string, error) {
-		var resp *datatransferpb.ListTransferRunsResponse
-		req.PageToken = pageToken
-		if pageSize > math.MaxInt32 {
-			req.PageSize = math.MaxInt32
-		} else {
-			req.PageSize = int32(pageSize)
-		}
-		err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
-			var err error
-			resp, err = c.client.ListTransferRuns(ctx, req, settings.GRPC...)
-			return err
-		}, opts...)
-		if err != nil {
-			return nil, "", err
-		}
-		return resp.TransferRuns, resp.NextPageToken, nil
-	}
-	fetch := func(pageSize int, pageToken string) (string, error) {
-		items, nextPageToken, err := it.InternalFetch(pageSize, pageToken)
-		if err != nil {
-			return "", err
-		}
-		it.items = append(it.items, items...)
-		return nextPageToken, nil
-	}
-	it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf)
-	return it
-}
-
-// ListTransferLogs returns user facing log messages for the data transfer run.
-func (c *Client) ListTransferLogs(ctx context.Context, req *datatransferpb.ListTransferLogsRequest, opts ...gax.CallOption) *TransferMessageIterator {
-	ctx = insertMetadata(ctx, c.xGoogMetadata)
-	opts = append(c.CallOptions.ListTransferLogs[0:len(c.CallOptions.ListTransferLogs):len(c.CallOptions.ListTransferLogs)], opts...)
-	it := &TransferMessageIterator{}
-	it.InternalFetch = func(pageSize int, pageToken string) ([]*datatransferpb.TransferMessage, string, error) {
-		var resp *datatransferpb.ListTransferLogsResponse
-		req.PageToken = pageToken
-		if pageSize > math.MaxInt32 {
-			req.PageSize = math.MaxInt32
-		} else {
-			req.PageSize = int32(pageSize)
-		}
-		err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
-			var err error
-			resp, err = c.client.ListTransferLogs(ctx, req, settings.GRPC...)
-			return err
-		}, opts...)
-		if err != nil {
-			return nil, "", err
-		}
-		return resp.TransferMessages, resp.NextPageToken, nil
-	}
-	fetch := func(pageSize int, pageToken string) (string, error) {
-		items, nextPageToken, err := it.InternalFetch(pageSize, pageToken)
-		if err != nil {
-			return "", err
-		}
-		it.items = append(it.items, items...)
-		return nextPageToken, nil
-	}
-	it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf)
-	return it
-}
-
-// CheckValidCreds returns true if valid credentials exist for the given data source and
-// requesting user.
-// Some data sources doesn't support service account, so we need to talk to
-// them on behalf of the end user. This API just checks whether we have OAuth
-// token for the particular user, which is a pre-requisite before user can
-// create a transfer config.
-func (c *Client) CheckValidCreds(ctx context.Context, req *datatransferpb.CheckValidCredsRequest, opts ...gax.CallOption) (*datatransferpb.CheckValidCredsResponse, error) {
-	ctx = insertMetadata(ctx, c.xGoogMetadata)
-	opts = append(c.CallOptions.CheckValidCreds[0:len(c.CallOptions.CheckValidCreds):len(c.CallOptions.CheckValidCreds)], opts...)
-	var resp *datatransferpb.CheckValidCredsResponse
-	err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
-		var err error
-		resp, err = c.client.CheckValidCreds(ctx, req, settings.GRPC...)
-		return err
-	}, opts...)
-	if err != nil {
-		return nil, err
-	}
-	return resp, nil
-}
-
-// DataSourceIterator manages a stream of *datatransferpb.DataSource.
-type DataSourceIterator struct {
-	items    []*datatransferpb.DataSource
-	pageInfo *iterator.PageInfo
-	nextFunc func() error
-
-	// InternalFetch is for use by the Google Cloud Libraries only.
-	// It is not part of the stable interface of this package.
-	//
-	// InternalFetch returns results from a single call to the underlying RPC.
-	// The number of results is no greater than pageSize.
-	// If there are no more results, nextPageToken is empty and err is nil.
-	InternalFetch func(pageSize int, pageToken string) (results []*datatransferpb.DataSource, nextPageToken string, err error)
-}
-
-// PageInfo supports pagination. See the google.golang.org/api/iterator package for details.
-func (it *DataSourceIterator) PageInfo() *iterator.PageInfo {
-	return it.pageInfo
-}
-
-// Next returns the next result. Its second return value is iterator.Done if there are no more
-// results. Once Next returns Done, all subsequent calls will return Done.
-func (it *DataSourceIterator) Next() (*datatransferpb.DataSource, error) {
-	var item *datatransferpb.DataSource
-	if err := it.nextFunc(); err != nil {
-		return item, err
-	}
-	item = it.items[0]
-	it.items = it.items[1:]
-	return item, nil
-}
-
-func (it *DataSourceIterator) bufLen() int {
-	return len(it.items)
-}
-
-func (it *DataSourceIterator) takeBuf() interface{} {
-	b := it.items
-	it.items = nil
-	return b
-}
-
-// TransferConfigIterator manages a stream of *datatransferpb.TransferConfig.
-type TransferConfigIterator struct {
-	items    []*datatransferpb.TransferConfig
-	pageInfo *iterator.PageInfo
-	nextFunc func() error
-
-	// InternalFetch is for use by the Google Cloud Libraries only.
-	// It is not part of the stable interface of this package.
-	//
-	// InternalFetch returns results from a single call to the underlying RPC.
-	// The number of results is no greater than pageSize.
-	// If there are no more results, nextPageToken is empty and err is nil.
-	InternalFetch func(pageSize int, pageToken string) (results []*datatransferpb.TransferConfig, nextPageToken string, err error)
-}
-
-// PageInfo supports pagination. See the google.golang.org/api/iterator package for details.
-func (it *TransferConfigIterator) PageInfo() *iterator.PageInfo {
-	return it.pageInfo
-}
-
-// Next returns the next result. Its second return value is iterator.Done if there are no more
-// results. Once Next returns Done, all subsequent calls will return Done.
-func (it *TransferConfigIterator) Next() (*datatransferpb.TransferConfig, error) {
-	var item *datatransferpb.TransferConfig
-	if err := it.nextFunc(); err != nil {
-		return item, err
-	}
-	item = it.items[0]
-	it.items = it.items[1:]
-	return item, nil
-}
-
-func (it *TransferConfigIterator) bufLen() int {
-	return len(it.items)
-}
-
-func (it *TransferConfigIterator) takeBuf() interface{} {
-	b := it.items
-	it.items = nil
-	return b
-}
-
-// TransferMessageIterator manages a stream of *datatransferpb.TransferMessage.
-type TransferMessageIterator struct {
-	items    []*datatransferpb.TransferMessage
-	pageInfo *iterator.PageInfo
-	nextFunc func() error
-
-	// InternalFetch is for use by the Google Cloud Libraries only.
-	// It is not part of the stable interface of this package.
-	//
-	// InternalFetch returns results from a single call to the underlying RPC.
-	// The number of results is no greater than pageSize.
-	// If there are no more results, nextPageToken is empty and err is nil.
-	InternalFetch func(pageSize int, pageToken string) (results []*datatransferpb.TransferMessage, nextPageToken string, err error)
-}
-
-// PageInfo supports pagination. See the google.golang.org/api/iterator package for details.
-func (it *TransferMessageIterator) PageInfo() *iterator.PageInfo {
-	return it.pageInfo
-}
-
-// Next returns the next result. Its second return value is iterator.Done if there are no more
-// results. Once Next returns Done, all subsequent calls will return Done.
-func (it *TransferMessageIterator) Next() (*datatransferpb.TransferMessage, error) {
-	var item *datatransferpb.TransferMessage
-	if err := it.nextFunc(); err != nil {
-		return item, err
-	}
-	item = it.items[0]
-	it.items = it.items[1:]
-	return item, nil
-}
-
-func (it *TransferMessageIterator) bufLen() int {
-	return len(it.items)
-}
-
-func (it *TransferMessageIterator) takeBuf() interface{} {
-	b := it.items
-	it.items = nil
-	return b
-}
-
-// TransferRunIterator manages a stream of *datatransferpb.TransferRun.
-type TransferRunIterator struct {
-	items    []*datatransferpb.TransferRun
-	pageInfo *iterator.PageInfo
-	nextFunc func() error
-
-	// InternalFetch is for use by the Google Cloud Libraries only.
-	// It is not part of the stable interface of this package.
-	//
-	// InternalFetch returns results from a single call to the underlying RPC.
-	// The number of results is no greater than pageSize.
-	// If there are no more results, nextPageToken is empty and err is nil.
-	InternalFetch func(pageSize int, pageToken string) (results []*datatransferpb.TransferRun, nextPageToken string, err error)
-}
-
-// PageInfo supports pagination. See the google.golang.org/api/iterator package for details.
-func (it *TransferRunIterator) PageInfo() *iterator.PageInfo {
-	return it.pageInfo
-}
-
-// Next returns the next result. Its second return value is iterator.Done if there are no more
-// results. Once Next returns Done, all subsequent calls will return Done.
-func (it *TransferRunIterator) Next() (*datatransferpb.TransferRun, error) {
-	var item *datatransferpb.TransferRun
-	if err := it.nextFunc(); err != nil {
-		return item, err
-	}
-	item = it.items[0]
-	it.items = it.items[1:]
-	return item, nil
-}
-
-func (it *TransferRunIterator) bufLen() int {
-	return len(it.items)
-}
-
-func (it *TransferRunIterator) takeBuf() interface{} {
-	b := it.items
-	it.items = nil
-	return b
-}

+ 0 - 49
vendor/cloud.google.com/go/bigquery/datatransfer/apiv1/doc.go

@@ -1,49 +0,0 @@
-// Copyright 2017, Google LLC All rights reserved.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//     http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// AUTO-GENERATED CODE. DO NOT EDIT.
-
-// Package datatransfer is an auto-generated package for the
-// BigQuery Data Transfer API.
-//
-//   NOTE: This package is in alpha. It is not stable, and is likely to change.
-//
-// Transfers data from partner SaaS applications to Google BigQuery on a
-// scheduled, managed basis.
-package datatransfer // import "cloud.google.com/go/bigquery/datatransfer/apiv1"
-
-import (
-	"golang.org/x/net/context"
-	"google.golang.org/grpc/metadata"
-)
-
-func insertMetadata(ctx context.Context, mds ...metadata.MD) context.Context {
-	out, _ := metadata.FromOutgoingContext(ctx)
-	out = out.Copy()
-	for _, md := range mds {
-		for k, v := range md {
-			out[k] = append(out[k], v...)
-		}
-	}
-	return metadata.NewOutgoingContext(ctx, out)
-}
-
-// DefaultAuthScopes reports the default set of authentication scopes to use with this package.
-func DefaultAuthScopes() []string {
-	return []string{
-		"https://www.googleapis.com/auth/bigquery",
-		"https://www.googleapis.com/auth/cloud-platform",
-		"https://www.googleapis.com/auth/cloud-platform.read-only",
-	}
-}

+ 0 - 297
vendor/cloud.google.com/go/bigquery/doc.go

@@ -1,297 +0,0 @@
-// Copyright 2015 Google Inc. All Rights Reserved.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//      http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-/*
-Package bigquery provides a client for the BigQuery service.
-
-Note: This package is in beta.  Some backwards-incompatible changes may occur.
-
-The following assumes a basic familiarity with BigQuery concepts.
-See https://cloud.google.com/bigquery/docs.
-
-
-Creating a Client
-
-To start working with this package, create a client:
-
-    ctx := context.Background()
-    client, err := bigquery.NewClient(ctx, projectID)
-    if err != nil {
-        // TODO: Handle error.
-    }
-
-Querying
-
-To query existing tables, create a Query and call its Read method:
-
-    q := client.Query(`
-        SELECT year, SUM(number) as num
-        FROM [bigquery-public-data:usa_names.usa_1910_2013]
-        WHERE name = "William"
-        GROUP BY year
-        ORDER BY year
-    `)
-    it, err := q.Read(ctx)
-    if err != nil {
-        // TODO: Handle error.
-    }
-
-Then iterate through the resulting rows. You can store a row using
-anything that implements the ValueLoader interface, or with a slice or map of bigquery.Value.
-A slice is simplest:
-
-    for {
-        var values []bigquery.Value
-        err := it.Next(&values)
-        if err == iterator.Done {
-            break
-        }
-        if err != nil {
-            // TODO: Handle error.
-        }
-        fmt.Println(values)
-    }
-
-You can also use a struct whose exported fields match the query:
-
-    type Count struct {
-        Year int
-        Num  int
-    }
-    for {
-        var c Count
-        err := it.Next(&c)
-        if err == iterator.Done {
-            break
-        }
-        if err != nil {
-            // TODO: Handle error.
-        }
-        fmt.Println(c)
-    }
-
-You can also start the query running and get the results later.
-Create the query as above, but call Run instead of Read. This returns a Job,
-which represents an asychronous operation.
-
-    job, err := q.Run(ctx)
-    if err != nil {
-        // TODO: Handle error.
-    }
-
-Get the job's ID, a printable string. You can save this string to retrieve
-the results at a later time, even in another process.
-
-    jobID := job.ID()
-    fmt.Printf("The job ID is %s\n", jobID)
-
-To retrieve the job's results from the ID, first look up the Job:
-
-    job, err = client.JobFromID(ctx, jobID)
-    if err != nil {
-        // TODO: Handle error.
-    }
-
-Use the Job.Read method to obtain an iterator, and loop over the rows.
-Query.Read is just a convenience method that combines Query.Run and Job.Read.
-
-    it, err = job.Read(ctx)
-    if err != nil {
-        // TODO: Handle error.
-    }
-    // Proceed with iteration as above.
-
-Datasets and Tables
-
-You can refer to datasets in the client's project with the Dataset method, and
-in other projects with the DatasetInProject method:
-
-    myDataset := client.Dataset("my_dataset")
-    yourDataset := client.DatasetInProject("your-project-id", "your_dataset")
-
-These methods create references to datasets, not the datasets themselves. You can have
-a dataset reference even if the dataset doesn't exist yet. Use Dataset.Create to
-create a dataset from a reference:
-
-    if err := myDataset.Create(ctx, nil); err != nil {
-        // TODO: Handle error.
-    }
-
-You can refer to tables with Dataset.Table. Like bigquery.Dataset, bigquery.Table is a reference
-to an object in BigQuery that may or may not exist.
-
-    table := myDataset.Table("my_table")
-
-You can create, delete and update the metadata of tables with methods on Table.
-For instance, you could create a temporary table with:
-
-    err = myDataset.Table("temp").Create(ctx, &bigquery.TableMetadata{
-        ExpirationTime: time.Now().Add(1*time.Hour)})
-    if err != nil {
-        // TODO: Handle error.
-    }
-
-We'll see how to create a table with a schema in the next section.
-
-Schemas
-
-There are two ways to construct schemas with this package.
-You can build a schema by hand, like so:
-
-    schema1 := bigquery.Schema{
-        &bigquery.FieldSchema{Name: "Name", Required: true, Type: bigquery.StringFieldType},
-        &bigquery.FieldSchema{Name: "Grades", Repeated: true, Type: bigquery.IntegerFieldType},
-    }
-
-Or you can infer the schema from a struct:
-
-    type student struct {
-        Name   string
-        Grades []int
-    }
-    schema2, err := bigquery.InferSchema(student{})
-    if err != nil {
-        // TODO: Handle error.
-    }
-    // schema1 and schema2 are identical.
-
-Struct inference supports tags like those of the encoding/json package,
-so you can change names, ignore fields, or mark a field as nullable (non-required):
-
-    type student2 struct {
-        Name     string `bigquery:"full_name"`
-        Grades   []int
-        Secret   string `bigquery:"-"`
-        Optional int    `bigquery:",nullable"
-    }
-    schema3, err := bigquery.InferSchema(student2{})
-    if err != nil {
-        // TODO: Handle error.
-    }
-    // schema3 has required fields "full_name", "Grade" and nullable field "Optional".
-
-Having constructed a schema, you can create a table with it like so:
-
-    if err := table.Create(ctx, &bigquery.TableMetadata{Schema: schema1}); err != nil {
-        // TODO: Handle error.
-    }
-
-Copying
-
-You can copy one or more tables to another table. Begin by constructing a Copier
-describing the copy. Then set any desired copy options, and finally call Run to get a Job:
-
-    copier := myDataset.Table("dest").CopierFrom(myDataset.Table("src"))
-    copier.WriteDisposition = bigquery.WriteTruncate
-    job, err = copier.Run(ctx)
-    if err != nil {
-        // TODO: Handle error.
-    }
-
-You can chain the call to Run if you don't want to set options:
-
-    job, err = myDataset.Table("dest").CopierFrom(myDataset.Table("src")).Run(ctx)
-    if err != nil {
-        // TODO: Handle error.
-    }
-
-You can wait for your job to complete:
-
-    status, err := job.Wait(ctx)
-    if err != nil {
-        // TODO: Handle error.
-    }
-
-Job.Wait polls with exponential backoff. You can also poll yourself, if you
-wish:
-
-    for {
-        status, err := job.Status(ctx)
-        if err != nil {
-            // TODO: Handle error.
-        }
-        if status.Done() {
-            if status.Err() != nil {
-                log.Fatalf("Job failed with error %v", status.Err())
-            }
-            break
-        }
-        time.Sleep(pollInterval)
-    }
-
-Loading and Uploading
-
-There are two ways to populate a table with this package: load the data from a Google Cloud Storage
-object, or upload rows directly from your program.
-
-For loading, first create a GCSReference, configuring it if desired. Then make a Loader, optionally configure
-it as well, and call its Run method.
-
-    gcsRef := bigquery.NewGCSReference("gs://my-bucket/my-object")
-    gcsRef.AllowJaggedRows = true
-    loader := myDataset.Table("dest").LoaderFrom(gcsRef)
-    loader.CreateDisposition = bigquery.CreateNever
-    job, err = loader.Run(ctx)
-    // Poll the job for completion if desired, as above.
-
-To upload, first define a type that implements the ValueSaver interface, which has a single method named Save.
-Then create an Uploader, and call its Put method with a slice of values.
-
-    u := table.Uploader()
-    // Item implements the ValueSaver interface.
-    items := []*Item{
-        {Name: "n1", Size: 32.6, Count: 7},
-        {Name: "n2", Size: 4, Count: 2},
-        {Name: "n3", Size: 101.5, Count: 1},
-    }
-    if err := u.Put(ctx, items); err != nil {
-        // TODO: Handle error.
-    }
-
-You can also upload a struct that doesn't implement ValueSaver. Use the StructSaver type
-to specify the schema and insert ID by hand, or just supply the struct or struct pointer
-directly and the schema will be inferred:
-
-    type Item2 struct {
-        Name  string
-        Size  float64
-        Count int
-    }
-    // Item implements the ValueSaver interface.
-    items2 := []*Item2{
-        {Name: "n1", Size: 32.6, Count: 7},
-        {Name: "n2", Size: 4, Count: 2},
-        {Name: "n3", Size: 101.5, Count: 1},
-    }
-    if err := u.Put(ctx, items2); err != nil {
-        // TODO: Handle error.
-    }
-
-Extracting
-
-If you've been following so far, extracting data from a BigQuery table
-into a Google Cloud Storage object will feel familiar. First create an
-Extractor, then optionally configure it, and lastly call its Run method.
-
-    extractor := table.ExtractorTo(gcsRef)
-    extractor.DisableHeader = true
-    job, err = extractor.Run(ctx)
-    // Poll the job for completion if desired, as above.
-
-Authentication
-
-See examples of authorization and authentication at
-https://godoc.org/cloud.google.com/go#pkg-examples.
-*/
-package bigquery // import "cloud.google.com/go/bigquery"

+ 0 - 82
vendor/cloud.google.com/go/bigquery/error.go

@@ -1,82 +0,0 @@
-// Copyright 2015 Google Inc. All Rights Reserved.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//      http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package bigquery
-
-import (
-	"fmt"
-
-	bq "google.golang.org/api/bigquery/v2"
-)
-
-// An Error contains detailed information about a failed bigquery operation.
-type Error struct {
-	// Mirrors bq.ErrorProto, but drops DebugInfo
-	Location, Message, Reason string
-}
-
-func (e Error) Error() string {
-	return fmt.Sprintf("{Location: %q; Message: %q; Reason: %q}", e.Location, e.Message, e.Reason)
-}
-
-func bqToError(ep *bq.ErrorProto) *Error {
-	if ep == nil {
-		return nil
-	}
-	return &Error{
-		Location: ep.Location,
-		Message:  ep.Message,
-		Reason:   ep.Reason,
-	}
-}
-
-// A MultiError contains multiple related errors.
-type MultiError []error
-
-func (m MultiError) Error() string {
-	switch len(m) {
-	case 0:
-		return "(0 errors)"
-	case 1:
-		return m[0].Error()
-	case 2:
-		return m[0].Error() + " (and 1 other error)"
-	}
-	return fmt.Sprintf("%s (and %d other errors)", m[0].Error(), len(m)-1)
-}
-
-// RowInsertionError contains all errors that occurred when attempting to insert a row.
-type RowInsertionError struct {
-	InsertID string // The InsertID associated with the affected row.
-	RowIndex int    // The 0-based index of the affected row in the batch of rows being inserted.
-	Errors   MultiError
-}
-
-func (e *RowInsertionError) Error() string {
-	errFmt := "insertion of row [insertID: %q; insertIndex: %v] failed with error: %s"
-	return fmt.Sprintf(errFmt, e.InsertID, e.RowIndex, e.Errors.Error())
-}
-
-// PutMultiError contains an error for each row which was not successfully inserted
-// into a BigQuery table.
-type PutMultiError []RowInsertionError
-
-func (pme PutMultiError) Error() string {
-	plural := "s"
-	if len(pme) == 1 {
-		plural = ""
-	}
-
-	return fmt.Sprintf("%v row insertion%s failed", len(pme), plural)
-}

+ 0 - 398
vendor/cloud.google.com/go/bigquery/external.go

@@ -1,398 +0,0 @@
-// Copyright 2017 Google Inc. All Rights Reserved.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//      http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package bigquery
-
-import (
-	"encoding/base64"
-	"unicode/utf8"
-
-	bq "google.golang.org/api/bigquery/v2"
-)
-
-// DataFormat describes the format of BigQuery table data.
-type DataFormat string
-
-// Constants describing the format of BigQuery table data.
-const (
-	CSV             DataFormat = "CSV"
-	Avro            DataFormat = "AVRO"
-	JSON            DataFormat = "NEWLINE_DELIMITED_JSON"
-	DatastoreBackup DataFormat = "DATASTORE_BACKUP"
-	GoogleSheets    DataFormat = "GOOGLE_SHEETS"
-	Bigtable        DataFormat = "BIGTABLE"
-)
-
-// ExternalData is a table which is stored outside of BigQuery. It is implemented by
-// *ExternalDataConfig.
-// GCSReference also implements it, for backwards compatibility.
-type ExternalData interface {
-	toBQ() bq.ExternalDataConfiguration
-}
-
-// ExternalDataConfig describes data external to BigQuery that can be used
-// in queries and to create external tables.
-type ExternalDataConfig struct {
-	// The format of the data. Required.
-	SourceFormat DataFormat
-
-	// The fully-qualified URIs that point to your
-	// data in Google Cloud. Required.
-	//
-	// For Google Cloud Storage URIs, each URI can contain one '*' wildcard character
-	// and it must come after the 'bucket' name. Size limits related to load jobs
-	// apply to external data sources.
-	//
-	// For Google Cloud Bigtable URIs, exactly one URI can be specified and it has be
-	// a fully specified and valid HTTPS URL for a Google Cloud Bigtable table.
-	//
-	// For Google Cloud Datastore backups, exactly one URI can be specified. Also,
-	// the '*' wildcard character is not allowed.
-	SourceURIs []string
-
-	// The schema of the data. Required for CSV and JSON; disallowed for the
-	// other formats.
-	Schema Schema
-
-	// Try to detect schema and format options automatically.
-	// Any option specified explicitly will be honored.
-	AutoDetect bool
-
-	// The compression type of the data.
-	Compression Compression
-
-	// IgnoreUnknownValues causes values not matching the schema to be
-	// tolerated. Unknown values are ignored. For CSV this ignores extra values
-	// at the end of a line. For JSON this ignores named values that do not
-	// match any column name. If this field is not set, records containing
-	// unknown values are treated as bad records. The MaxBadRecords field can
-	// be used to customize how bad records are handled.
-	IgnoreUnknownValues bool
-
-	// MaxBadRecords is the maximum number of bad records that will be ignored
-	// when reading data.
-	MaxBadRecords int64
-
-	// Additional options for CSV, GoogleSheets and Bigtable formats.
-	Options ExternalDataConfigOptions
-}
-
-func (e *ExternalDataConfig) toBQ() bq.ExternalDataConfiguration {
-	q := bq.ExternalDataConfiguration{
-		SourceFormat:        string(e.SourceFormat),
-		SourceUris:          e.SourceURIs,
-		Autodetect:          e.AutoDetect,
-		Compression:         string(e.Compression),
-		IgnoreUnknownValues: e.IgnoreUnknownValues,
-		MaxBadRecords:       e.MaxBadRecords,
-	}
-	if e.Schema != nil {
-		q.Schema = e.Schema.toBQ()
-	}
-	if e.Options != nil {
-		e.Options.populateExternalDataConfig(&q)
-	}
-	return q
-}
-
-func bqToExternalDataConfig(q *bq.ExternalDataConfiguration) (*ExternalDataConfig, error) {
-	e := &ExternalDataConfig{
-		SourceFormat:        DataFormat(q.SourceFormat),
-		SourceURIs:          q.SourceUris,
-		AutoDetect:          q.Autodetect,
-		Compression:         Compression(q.Compression),
-		IgnoreUnknownValues: q.IgnoreUnknownValues,
-		MaxBadRecords:       q.MaxBadRecords,
-		Schema:              bqToSchema(q.Schema),
-	}
-	switch {
-	case q.CsvOptions != nil:
-		e.Options = bqToCSVOptions(q.CsvOptions)
-	case q.GoogleSheetsOptions != nil:
-		e.Options = bqToGoogleSheetsOptions(q.GoogleSheetsOptions)
-	case q.BigtableOptions != nil:
-		var err error
-		e.Options, err = bqToBigtableOptions(q.BigtableOptions)
-		if err != nil {
-			return nil, err
-		}
-	}
-	return e, nil
-}
-
-// ExternalDataConfigOptions are additional options for external data configurations.
-// This interface is implemented by CSVOptions, GoogleSheetsOptions and BigtableOptions.
-type ExternalDataConfigOptions interface {
-	populateExternalDataConfig(*bq.ExternalDataConfiguration)
-}
-
-// CSVOptions are additional options for CSV external data sources.
-type CSVOptions struct {
-	// AllowJaggedRows causes missing trailing optional columns to be tolerated
-	// when reading CSV data. Missing values are treated as nulls.
-	AllowJaggedRows bool
-
-	// AllowQuotedNewlines sets whether quoted data sections containing
-	// newlines are allowed when reading CSV data.
-	AllowQuotedNewlines bool
-
-	// Encoding is the character encoding of data to be read.
-	Encoding Encoding
-
-	// FieldDelimiter is the separator for fields in a CSV file, used when
-	// reading or exporting data. The default is ",".
-	FieldDelimiter string
-
-	// Quote is the value used to quote data sections in a CSV file. The
-	// default quotation character is the double quote ("), which is used if
-	// both Quote and ForceZeroQuote are unset.
-	// To specify that no character should be interpreted as a quotation
-	// character, set ForceZeroQuote to true.
-	// Only used when reading data.
-	Quote          string
-	ForceZeroQuote bool
-
-	// The number of rows at the top of a CSV file that BigQuery will skip when
-	// reading data.
-	SkipLeadingRows int64
-}
-
-func (o *CSVOptions) populateExternalDataConfig(c *bq.ExternalDataConfiguration) {
-	c.CsvOptions = &bq.CsvOptions{
-		AllowJaggedRows:     o.AllowJaggedRows,
-		AllowQuotedNewlines: o.AllowQuotedNewlines,
-		Encoding:            string(o.Encoding),
-		FieldDelimiter:      o.FieldDelimiter,
-		Quote:               o.quote(),
-		SkipLeadingRows:     o.SkipLeadingRows,
-	}
-}
-
-// quote returns the CSV quote character, or nil if unset.
-func (o *CSVOptions) quote() *string {
-	if o.ForceZeroQuote {
-		quote := ""
-		return &quote
-	}
-	if o.Quote == "" {
-		return nil
-	}
-	return &o.Quote
-}
-
-func (o *CSVOptions) setQuote(ps *string) {
-	if ps != nil {
-		o.Quote = *ps
-		if o.Quote == "" {
-			o.ForceZeroQuote = true
-		}
-	}
-}
-
-func bqToCSVOptions(q *bq.CsvOptions) *CSVOptions {
-	o := &CSVOptions{
-		AllowJaggedRows:     q.AllowJaggedRows,
-		AllowQuotedNewlines: q.AllowQuotedNewlines,
-		Encoding:            Encoding(q.Encoding),
-		FieldDelimiter:      q.FieldDelimiter,
-		SkipLeadingRows:     q.SkipLeadingRows,
-	}
-	o.setQuote(q.Quote)
-	return o
-}
-
-// GoogleSheetsOptions are additional options for GoogleSheets external data sources.
-type GoogleSheetsOptions struct {
-	// The number of rows at the top of a sheet that BigQuery will skip when
-	// reading data.
-	SkipLeadingRows int64
-}
-
-func (o *GoogleSheetsOptions) populateExternalDataConfig(c *bq.ExternalDataConfiguration) {
-	c.GoogleSheetsOptions = &bq.GoogleSheetsOptions{
-		SkipLeadingRows: o.SkipLeadingRows,
-	}
-}
-
-func bqToGoogleSheetsOptions(q *bq.GoogleSheetsOptions) *GoogleSheetsOptions {
-	return &GoogleSheetsOptions{
-		SkipLeadingRows: q.SkipLeadingRows,
-	}
-}
-
-// BigtableOptions are additional options for Bigtable external data sources.
-type BigtableOptions struct {
-	// A list of column families to expose in the table schema along with their
-	// types. If omitted, all column families are present in the table schema and
-	// their values are read as BYTES.
-	ColumnFamilies []*BigtableColumnFamily
-
-	// If true, then the column families that are not specified in columnFamilies
-	// list are not exposed in the table schema. Otherwise, they are read with BYTES
-	// type values. The default is false.
-	IgnoreUnspecifiedColumnFamilies bool
-
-	// If true, then the rowkey column families will be read and converted to string.
-	// Otherwise they are read with BYTES type values and users need to manually cast
-	// them with CAST if necessary. The default is false.
-	ReadRowkeyAsString bool
-}
-
-func (o *BigtableOptions) populateExternalDataConfig(c *bq.ExternalDataConfiguration) {
-	q := &bq.BigtableOptions{
-		IgnoreUnspecifiedColumnFamilies: o.IgnoreUnspecifiedColumnFamilies,
-		ReadRowkeyAsString:              o.ReadRowkeyAsString,
-	}
-	for _, f := range o.ColumnFamilies {
-		q.ColumnFamilies = append(q.ColumnFamilies, f.toBQ())
-	}
-	c.BigtableOptions = q
-}
-
-func bqToBigtableOptions(q *bq.BigtableOptions) (*BigtableOptions, error) {
-	b := &BigtableOptions{
-		IgnoreUnspecifiedColumnFamilies: q.IgnoreUnspecifiedColumnFamilies,
-		ReadRowkeyAsString:              q.ReadRowkeyAsString,
-	}
-	for _, f := range q.ColumnFamilies {
-		f2, err := bqToBigtableColumnFamily(f)
-		if err != nil {
-			return nil, err
-		}
-		b.ColumnFamilies = append(b.ColumnFamilies, f2)
-	}
-	return b, nil
-}
-
-// BigtableColumnFamily describes how BigQuery should access a Bigtable column family.
-type BigtableColumnFamily struct {
-	// Identifier of the column family.
-	FamilyID string
-
-	// Lists of columns that should be exposed as individual fields as opposed to a
-	// list of (column name, value) pairs. All columns whose qualifier matches a
-	// qualifier in this list can be accessed as .. Other columns can be accessed as
-	// a list through .Column field.
-	Columns []*BigtableColumn
-
-	// The encoding of the values when the type is not STRING. Acceptable encoding values are:
-	// - TEXT - indicates values are alphanumeric text strings.
-	// - BINARY - indicates values are encoded using HBase Bytes.toBytes family of functions.
-	// This can be overridden for a specific column by listing that column in 'columns' and
-	// specifying an encoding for it.
-	Encoding string
-
-	// If true, only the latest version of values are exposed for all columns in this
-	// column family. This can be overridden for a specific column by listing that
-	// column in 'columns' and specifying a different setting for that column.
-	OnlyReadLatest bool
-
-	// The type to convert the value in cells of this
-	// column family. The values are expected to be encoded using HBase
-	// Bytes.toBytes function when using the BINARY encoding value.
-	// Following BigQuery types are allowed (case-sensitive):
-	// BYTES STRING INTEGER FLOAT BOOLEAN.
-	// The default type is BYTES. This can be overridden for a specific column by
-	// listing that column in 'columns' and specifying a type for it.
-	Type string
-}
-
-func (b *BigtableColumnFamily) toBQ() *bq.BigtableColumnFamily {
-	q := &bq.BigtableColumnFamily{
-		FamilyId:       b.FamilyID,
-		Encoding:       b.Encoding,
-		OnlyReadLatest: b.OnlyReadLatest,
-		Type:           b.Type,
-	}
-	for _, col := range b.Columns {
-		q.Columns = append(q.Columns, col.toBQ())
-	}
-	return q
-}
-
-func bqToBigtableColumnFamily(q *bq.BigtableColumnFamily) (*BigtableColumnFamily, error) {
-	b := &BigtableColumnFamily{
-		FamilyID:       q.FamilyId,
-		Encoding:       q.Encoding,
-		OnlyReadLatest: q.OnlyReadLatest,
-		Type:           q.Type,
-	}
-	for _, col := range q.Columns {
-		c, err := bqToBigtableColumn(col)
-		if err != nil {
-			return nil, err
-		}
-		b.Columns = append(b.Columns, c)
-	}
-	return b, nil
-}
-
-// BigtableColumn describes how BigQuery should access a Bigtable column.
-type BigtableColumn struct {
-	// Qualifier of the column. Columns in the parent column family that have this
-	// exact qualifier are exposed as . field. The column field name is the
-	// same as the column qualifier.
-	Qualifier string
-
-	// If the qualifier is not a valid BigQuery field identifier i.e. does not match
-	// [a-zA-Z][a-zA-Z0-9_]*, a valid identifier must be provided as the column field
-	// name and is used as field name in queries.
-	FieldName string
-
-	// If true, only the latest version of values are exposed for this column.
-	// See BigtableColumnFamily.OnlyReadLatest.
-	OnlyReadLatest bool
-
-	// The encoding of the values when the type is not STRING.
-	// See BigtableColumnFamily.Encoding
-	Encoding string
-
-	// The type to convert the value in cells of this column.
-	// See BigtableColumnFamily.Type
-	Type string
-}
-
-func (b *BigtableColumn) toBQ() *bq.BigtableColumn {
-	q := &bq.BigtableColumn{
-		FieldName:      b.FieldName,
-		OnlyReadLatest: b.OnlyReadLatest,
-		Encoding:       b.Encoding,
-		Type:           b.Type,
-	}
-	if utf8.ValidString(b.Qualifier) {
-		q.QualifierString = b.Qualifier
-	} else {
-		q.QualifierEncoded = base64.RawStdEncoding.EncodeToString([]byte(b.Qualifier))
-	}
-	return q
-}
-
-func bqToBigtableColumn(q *bq.BigtableColumn) (*BigtableColumn, error) {
-	b := &BigtableColumn{
-		FieldName:      q.FieldName,
-		OnlyReadLatest: q.OnlyReadLatest,
-		Encoding:       q.Encoding,
-		Type:           q.Type,
-	}
-	if q.QualifierString != "" {
-		b.Qualifier = q.QualifierString
-	} else {
-		bytes, err := base64.RawStdEncoding.DecodeString(q.QualifierEncoded)
-		if err != nil {
-			return nil, err
-		}
-		b.Qualifier = string(bytes)
-	}
-	return b, nil
-}

+ 0 - 105
vendor/cloud.google.com/go/bigquery/extract.go

@@ -1,105 +0,0 @@
-// Copyright 2016 Google Inc. All Rights Reserved.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//      http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package bigquery
-
-import (
-	"golang.org/x/net/context"
-	bq "google.golang.org/api/bigquery/v2"
-)
-
-// ExtractConfig holds the configuration for an extract job.
-type ExtractConfig struct {
-	// Src is the table from which data will be extracted.
-	Src *Table
-
-	// Dst is the destination into which the data will be extracted.
-	Dst *GCSReference
-
-	// DisableHeader disables the printing of a header row in exported data.
-	DisableHeader bool
-
-	// The labels associated with this job.
-	Labels map[string]string
-}
-
-func (e *ExtractConfig) toBQ() *bq.JobConfiguration {
-	var printHeader *bool
-	if e.DisableHeader {
-		f := false
-		printHeader = &f
-	}
-	return &bq.JobConfiguration{
-		Labels: e.Labels,
-		Extract: &bq.JobConfigurationExtract{
-			DestinationUris:   append([]string{}, e.Dst.URIs...),
-			Compression:       string(e.Dst.Compression),
-			DestinationFormat: string(e.Dst.DestinationFormat),
-			FieldDelimiter:    e.Dst.FieldDelimiter,
-			SourceTable:       e.Src.toBQ(),
-			PrintHeader:       printHeader,
-		},
-	}
-}
-
-func bqToExtractConfig(q *bq.JobConfiguration, c *Client) *ExtractConfig {
-	qe := q.Extract
-	return &ExtractConfig{
-		Labels: q.Labels,
-		Dst: &GCSReference{
-			URIs:              qe.DestinationUris,
-			Compression:       Compression(qe.Compression),
-			DestinationFormat: DataFormat(qe.DestinationFormat),
-			FileConfig: FileConfig{
-				CSVOptions: CSVOptions{
-					FieldDelimiter: qe.FieldDelimiter,
-				},
-			},
-		},
-		DisableHeader: qe.PrintHeader != nil && !*qe.PrintHeader,
-		Src:           bqToTable(qe.SourceTable, c),
-	}
-}
-
-// An Extractor extracts data from a BigQuery table into Google Cloud Storage.
-type Extractor struct {
-	JobIDConfig
-	ExtractConfig
-	c *Client
-}
-
-// ExtractorTo returns an Extractor which can be used to extract data from a
-// BigQuery table into Google Cloud Storage.
-// The returned Extractor may optionally be further configured before its Run method is called.
-func (t *Table) ExtractorTo(dst *GCSReference) *Extractor {
-	return &Extractor{
-		c: t.c,
-		ExtractConfig: ExtractConfig{
-			Src: t,
-			Dst: dst,
-		},
-	}
-}
-
-// Run initiates an extract job.
-func (e *Extractor) Run(ctx context.Context) (*Job, error) {
-	return e.c.insertJob(ctx, e.newJob(), nil)
-}
-
-func (e *Extractor) newJob() *bq.Job {
-	return &bq.Job{
-		JobReference:  e.JobIDConfig.createJobRef(e.c.projectID),
-		Configuration: e.ExtractConfig.toBQ(),
-	}
-}

+ 0 - 135
vendor/cloud.google.com/go/bigquery/file.go

@@ -1,135 +0,0 @@
-// Copyright 2016 Google Inc. All Rights Reserved.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//      http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package bigquery
-
-import (
-	"io"
-
-	bq "google.golang.org/api/bigquery/v2"
-)
-
-// A ReaderSource is a source for a load operation that gets
-// data from an io.Reader.
-//
-// When a ReaderSource is part of a LoadConfig obtained via Job.Config,
-// its internal io.Reader will be nil, so it cannot be used for a
-// subsequent load operation.
-type ReaderSource struct {
-	r io.Reader
-	FileConfig
-}
-
-// NewReaderSource creates a ReaderSource from an io.Reader. You may
-// optionally configure properties on the ReaderSource that describe the
-// data being read, before passing it to Table.LoaderFrom.
-func NewReaderSource(r io.Reader) *ReaderSource {
-	return &ReaderSource{r: r}
-}
-
-func (r *ReaderSource) populateLoadConfig(lc *bq.JobConfigurationLoad) io.Reader {
-	r.FileConfig.populateLoadConfig(lc)
-	return r.r
-}
-
-// FileConfig contains configuration options that pertain to files, typically
-// text files that require interpretation to be used as a BigQuery table. A
-// file may live in Google Cloud Storage (see GCSReference), or it may be
-// loaded into a table via the Table.LoaderFromReader.
-type FileConfig struct {
-	// SourceFormat is the format of the GCS data to be read.
-	// Allowed values are: CSV, Avro, JSON, DatastoreBackup.  The default is CSV.
-	SourceFormat DataFormat
-
-	// Indicates if we should automatically infer the options and
-	// schema for CSV and JSON sources.
-	AutoDetect bool
-
-	// MaxBadRecords is the maximum number of bad records that will be ignored
-	// when reading data.
-	MaxBadRecords int64
-
-	// IgnoreUnknownValues causes values not matching the schema to be
-	// tolerated. Unknown values are ignored. For CSV this ignores extra values
-	// at the end of a line. For JSON this ignores named values that do not
-	// match any column name. If this field is not set, records containing
-	// unknown values are treated as bad records. The MaxBadRecords field can
-	// be used to customize how bad records are handled.
-	IgnoreUnknownValues bool
-
-	// Schema describes the data. It is required when reading CSV or JSON data,
-	// unless the data is being loaded into a table that already exists.
-	Schema Schema
-
-	// Additional options for CSV files.
-	CSVOptions
-}
-
-func (fc *FileConfig) populateLoadConfig(conf *bq.JobConfigurationLoad) {
-	conf.SkipLeadingRows = fc.SkipLeadingRows
-	conf.SourceFormat = string(fc.SourceFormat)
-	conf.Autodetect = fc.AutoDetect
-	conf.AllowJaggedRows = fc.AllowJaggedRows
-	conf.AllowQuotedNewlines = fc.AllowQuotedNewlines
-	conf.Encoding = string(fc.Encoding)
-	conf.FieldDelimiter = fc.FieldDelimiter
-	conf.IgnoreUnknownValues = fc.IgnoreUnknownValues
-	conf.MaxBadRecords = fc.MaxBadRecords
-	if fc.Schema != nil {
-		conf.Schema = fc.Schema.toBQ()
-	}
-	conf.Quote = fc.quote()
-}
-
-func bqPopulateFileConfig(conf *bq.JobConfigurationLoad, fc *FileConfig) {
-	fc.SourceFormat = DataFormat(conf.SourceFormat)
-	fc.AutoDetect = conf.Autodetect
-	fc.MaxBadRecords = conf.MaxBadRecords
-	fc.IgnoreUnknownValues = conf.IgnoreUnknownValues
-	fc.Schema = bqToSchema(conf.Schema)
-	fc.SkipLeadingRows = conf.SkipLeadingRows
-	fc.AllowJaggedRows = conf.AllowJaggedRows
-	fc.AllowQuotedNewlines = conf.AllowQuotedNewlines
-	fc.Encoding = Encoding(conf.Encoding)
-	fc.FieldDelimiter = conf.FieldDelimiter
-	fc.CSVOptions.setQuote(conf.Quote)
-}
-
-func (fc *FileConfig) populateExternalDataConfig(conf *bq.ExternalDataConfiguration) {
-	format := fc.SourceFormat
-	if format == "" {
-		// Format must be explicitly set for external data sources.
-		format = CSV
-	}
-	conf.Autodetect = fc.AutoDetect
-	conf.IgnoreUnknownValues = fc.IgnoreUnknownValues
-	conf.MaxBadRecords = fc.MaxBadRecords
-	conf.SourceFormat = string(format)
-	if fc.Schema != nil {
-		conf.Schema = fc.Schema.toBQ()
-	}
-	if format == CSV {
-		fc.CSVOptions.populateExternalDataConfig(conf)
-	}
-}
-
-// Encoding specifies the character encoding of data to be loaded into BigQuery.
-// See https://cloud.google.com/bigquery/docs/reference/v2/jobs#configuration.load.encoding
-// for more details about how this is used.
-type Encoding string
-
-const (
-	UTF_8      Encoding = "UTF-8"
-	ISO_8859_1 Encoding = "ISO-8859-1"
-)

+ 0 - 73
vendor/cloud.google.com/go/bigquery/gcs.go

@@ -1,73 +0,0 @@
-// Copyright 2015 Google Inc. All Rights Reserved.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//      http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package bigquery
-
-import (
-	"io"
-
-	bq "google.golang.org/api/bigquery/v2"
-)
-
-// GCSReference is a reference to one or more Google Cloud Storage objects, which together constitute
-// an input or output to a BigQuery operation.
-type GCSReference struct {
-	// URIs refer to Google Cloud Storage objects.
-	URIs []string
-
-	FileConfig
-
-	// DestinationFormat is the format to use when writing exported files.
-	// Allowed values are: CSV, Avro, JSON.  The default is CSV.
-	// CSV is not supported for tables with nested or repeated fields.
-	DestinationFormat DataFormat
-
-	// Compression specifies the type of compression to apply when writing data
-	// to Google Cloud Storage, or using this GCSReference as an ExternalData
-	// source with CSV or JSON SourceFormat. Default is None.
-	Compression Compression
-}
-
-// NewGCSReference constructs a reference to one or more Google Cloud Storage objects, which together constitute a data source or destination.
-// In the simple case, a single URI in the form gs://bucket/object may refer to a single GCS object.
-// Data may also be split into mutiple files, if multiple URIs or URIs containing wildcards are provided.
-// Each URI may contain one '*' wildcard character, which (if present) must come after the bucket name.
-// For more information about the treatment of wildcards and multiple URIs,
-// see https://cloud.google.com/bigquery/exporting-data-from-bigquery#exportingmultiple
-func NewGCSReference(uri ...string) *GCSReference {
-	return &GCSReference{URIs: uri}
-}
-
-// Compression is the type of compression to apply when writing data to Google Cloud Storage.
-type Compression string
-
-const (
-	None Compression = "NONE"
-	Gzip Compression = "GZIP"
-)
-
-func (gcs *GCSReference) populateLoadConfig(lc *bq.JobConfigurationLoad) io.Reader {
-	lc.SourceUris = gcs.URIs
-	gcs.FileConfig.populateLoadConfig(lc)
-	return nil
-}
-
-func (gcs *GCSReference) toBQ() bq.ExternalDataConfiguration {
-	conf := bq.ExternalDataConfiguration{
-		Compression: string(gcs.Compression),
-		SourceUris:  append([]string{}, gcs.URIs...),
-	}
-	gcs.FileConfig.populateExternalDataConfig(&conf)
-	return conf
-}

+ 0 - 206
vendor/cloud.google.com/go/bigquery/iterator.go

@@ -1,206 +0,0 @@
-// Copyright 2015 Google Inc. All Rights Reserved.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//      http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package bigquery
-
-import (
-	"fmt"
-	"reflect"
-
-	"golang.org/x/net/context"
-	bq "google.golang.org/api/bigquery/v2"
-	"google.golang.org/api/iterator"
-)
-
-func newRowIterator(ctx context.Context, t *Table, pf pageFetcher) *RowIterator {
-	it := &RowIterator{
-		ctx:   ctx,
-		table: t,
-		pf:    pf,
-	}
-	it.pageInfo, it.nextFunc = iterator.NewPageInfo(
-		it.fetch,
-		func() int { return len(it.rows) },
-		func() interface{} { r := it.rows; it.rows = nil; return r })
-	return it
-}
-
-// A RowIterator provides access to the result of a BigQuery lookup.
-type RowIterator struct {
-	ctx      context.Context
-	table    *Table
-	pf       pageFetcher
-	pageInfo *iterator.PageInfo
-	nextFunc func() error
-
-	// StartIndex can be set before the first call to Next. If PageInfo().Token
-	// is also set, StartIndex is ignored.
-	StartIndex uint64
-
-	rows [][]Value
-
-	schema       Schema       // populated on first call to fetch
-	structLoader structLoader // used to populate a pointer to a struct
-}
-
-// Next loads the next row into dst. Its return value is iterator.Done if there
-// are no more results. Once Next returns iterator.Done, all subsequent calls
-// will return iterator.Done.
-//
-// dst may implement ValueLoader, or may be a *[]Value, *map[string]Value, or struct pointer.
-//
-// If dst is a *[]Value, it will be set to to new []Value whose i'th element
-// will be populated with the i'th column of the row.
-//
-// If dst is a *map[string]Value, a new map will be created if dst is nil. Then
-// for each schema column name, the map key of that name will be set to the column's
-// value. STRUCT types (RECORD types or nested schemas) become nested maps.
-//
-// If dst is pointer to a struct, each column in the schema will be matched
-// with an exported field of the struct that has the same name, ignoring case.
-// Unmatched schema columns and struct fields will be ignored.
-//
-// Each BigQuery column type corresponds to one or more Go types; a matching struct
-// field must be of the correct type. The correspondences are:
-//
-//   STRING      string
-//   BOOL        bool
-//   INTEGER     int, int8, int16, int32, int64, uint8, uint16, uint32
-//   FLOAT       float32, float64
-//   BYTES       []byte
-//   TIMESTAMP   time.Time
-//   DATE        civil.Date
-//   TIME        civil.Time
-//   DATETIME    civil.DateTime
-//
-// A repeated field corresponds to a slice or array of the element type. A STRUCT
-// type (RECORD or nested schema) corresponds to a nested struct or struct pointer.
-// All calls to Next on the same iterator must use the same struct type.
-//
-// It is an error to attempt to read a BigQuery NULL value into a struct field.
-// If your table contains NULLs, use a *[]Value or *map[string]Value.
-func (it *RowIterator) Next(dst interface{}) error {
-	var vl ValueLoader
-	switch dst := dst.(type) {
-	case ValueLoader:
-		vl = dst
-	case *[]Value:
-		vl = (*valueList)(dst)
-	case *map[string]Value:
-		vl = (*valueMap)(dst)
-	default:
-		if !isStructPtr(dst) {
-			return fmt.Errorf("bigquery: cannot convert %T to ValueLoader (need pointer to []Value, map[string]Value, or struct)", dst)
-		}
-	}
-	if err := it.nextFunc(); err != nil {
-		return err
-	}
-	row := it.rows[0]
-	it.rows = it.rows[1:]
-
-	if vl == nil {
-		// This can only happen if dst is a pointer to a struct. We couldn't
-		// set vl above because we need the schema.
-		if err := it.structLoader.set(dst, it.schema); err != nil {
-			return err
-		}
-		vl = &it.structLoader
-	}
-	return vl.Load(row, it.schema)
-}
-
-func isStructPtr(x interface{}) bool {
-	t := reflect.TypeOf(x)
-	return t.Kind() == reflect.Ptr && t.Elem().Kind() == reflect.Struct
-}
-
-// PageInfo supports pagination. See the google.golang.org/api/iterator package for details.
-func (it *RowIterator) PageInfo() *iterator.PageInfo { return it.pageInfo }
-
-func (it *RowIterator) fetch(pageSize int, pageToken string) (string, error) {
-	res, err := it.pf(it.ctx, it.table, it.schema, it.StartIndex, int64(pageSize), pageToken)
-	if err != nil {
-		return "", err
-	}
-	it.rows = append(it.rows, res.rows...)
-	it.schema = res.schema
-	return res.pageToken, nil
-}
-
-// A pageFetcher returns a page of rows from a destination table.
-type pageFetcher func(ctx context.Context, _ *Table, _ Schema, startIndex uint64, pageSize int64, pageToken string) (*fetchPageResult, error)
-
-type fetchPageResult struct {
-	pageToken string
-	rows      [][]Value
-	totalRows uint64
-	schema    Schema
-}
-
-// fetchPage gets a page of rows from t.
-func fetchPage(ctx context.Context, t *Table, schema Schema, startIndex uint64, pageSize int64, pageToken string) (*fetchPageResult, error) {
-	// Fetch the table schema in the background, if necessary.
-	errc := make(chan error, 1)
-	if schema != nil {
-		errc <- nil
-	} else {
-		go func() {
-			var bqt *bq.Table
-			err := runWithRetry(ctx, func() (err error) {
-				bqt, err = t.c.bqs.Tables.Get(t.ProjectID, t.DatasetID, t.TableID).
-					Fields("schema").
-					Context(ctx).
-					Do()
-				return err
-			})
-			if err == nil && bqt.Schema != nil {
-				schema = bqToSchema(bqt.Schema)
-			}
-			errc <- err
-		}()
-	}
-	call := t.c.bqs.Tabledata.List(t.ProjectID, t.DatasetID, t.TableID)
-	setClientHeader(call.Header())
-	if pageToken != "" {
-		call.PageToken(pageToken)
-	} else {
-		call.StartIndex(startIndex)
-	}
-	if pageSize > 0 {
-		call.MaxResults(pageSize)
-	}
-	var res *bq.TableDataList
-	err := runWithRetry(ctx, func() (err error) {
-		res, err = call.Context(ctx).Do()
-		return err
-	})
-	if err != nil {
-		return nil, err
-	}
-	err = <-errc
-	if err != nil {
-		return nil, err
-	}
-	rows, err := convertRows(res.Rows, schema)
-	if err != nil {
-		return nil, err
-	}
-	return &fetchPageResult{
-		pageToken: res.PageToken,
-		rows:      rows,
-		totalRows: uint64(res.TotalRows),
-		schema:    schema,
-	}, nil
-}

+ 0 - 669
vendor/cloud.google.com/go/bigquery/job.go

@@ -1,669 +0,0 @@
-// Copyright 2015 Google Inc. All Rights Reserved.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//      http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package bigquery
-
-import (
-	"errors"
-	"fmt"
-	"math/rand"
-	"os"
-	"sync"
-	"time"
-
-	"cloud.google.com/go/internal"
-	gax "github.com/googleapis/gax-go"
-	"golang.org/x/net/context"
-	bq "google.golang.org/api/bigquery/v2"
-	"google.golang.org/api/googleapi"
-	"google.golang.org/api/iterator"
-)
-
-// A Job represents an operation which has been submitted to BigQuery for processing.
-type Job struct {
-	c         *Client
-	projectID string
-	jobID     string
-
-	config     *bq.JobConfiguration
-	lastStatus *JobStatus
-}
-
-// JobFromID creates a Job which refers to an existing BigQuery job. The job
-// need not have been created by this package. For example, the job may have
-// been created in the BigQuery console.
-func (c *Client) JobFromID(ctx context.Context, id string) (*Job, error) {
-	bqjob, err := c.getJobInternal(ctx, id, "configuration", "jobReference", "status", "statistics")
-	if err != nil {
-		return nil, err
-	}
-	return bqToJob(bqjob, c)
-}
-
-// ID returns the job's ID.
-func (j *Job) ID() string {
-	return j.jobID
-}
-
-// State is one of a sequence of states that a Job progresses through as it is processed.
-type State int
-
-const (
-	StateUnspecified State = iota // used only as a default in JobIterator
-	Pending
-	Running
-	Done
-)
-
-// JobStatus contains the current State of a job, and errors encountered while processing that job.
-type JobStatus struct {
-	State State
-
-	err error
-
-	// All errors encountered during the running of the job.
-	// Not all Errors are fatal, so errors here do not necessarily mean that the job has completed or was unsuccessful.
-	Errors []*Error
-
-	// Statistics about the job.
-	Statistics *JobStatistics
-}
-
-// JobConfig contains configuration information for a job. It is implemented by
-// *CopyConfig, *ExtractConfig, *LoadConfig and *QueryConfig.
-type JobConfig interface {
-	isJobConfig()
-}
-
-func (*CopyConfig) isJobConfig()    {}
-func (*ExtractConfig) isJobConfig() {}
-func (*LoadConfig) isJobConfig()    {}
-func (*QueryConfig) isJobConfig()   {}
-
-// Config returns the configuration information for j.
-func (j *Job) Config() (JobConfig, error) {
-	return bqToJobConfig(j.config, j.c)
-}
-
-func bqToJobConfig(q *bq.JobConfiguration, c *Client) (JobConfig, error) {
-	switch {
-	case q == nil:
-		return nil, nil
-	case q.Copy != nil:
-		return bqToCopyConfig(q, c), nil
-	case q.Extract != nil:
-		return bqToExtractConfig(q, c), nil
-	case q.Load != nil:
-		return bqToLoadConfig(q, c), nil
-	case q.Query != nil:
-		return bqToQueryConfig(q, c)
-	default:
-		return nil, nil
-	}
-}
-
-// JobIDConfig  describes how to create an ID for a job.
-type JobIDConfig struct {
-	// JobID is the ID to use for the job. If empty, a random job ID will be generated.
-	JobID string
-
-	// If AddJobIDSuffix is true, then a random string will be appended to JobID.
-	AddJobIDSuffix bool
-}
-
-// createJobRef creates a JobReference.
-// projectID must be non-empty.
-func (j *JobIDConfig) createJobRef(projectID string) *bq.JobReference {
-	// We don't check whether projectID is empty; the server will return an
-	// error when it encounters the resulting JobReference.
-	jr := &bq.JobReference{ProjectId: projectID}
-	if j.JobID == "" {
-		jr.JobId = randomIDFn()
-	} else if j.AddJobIDSuffix {
-		jr.JobId = j.JobID + "-" + randomIDFn()
-	} else {
-		jr.JobId = j.JobID
-	}
-	return jr
-}
-
-const alphanum = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789"
-
-var (
-	rngMu sync.Mutex
-	rng   = rand.New(rand.NewSource(time.Now().UnixNano() ^ int64(os.Getpid())))
-)
-
-// For testing.
-var randomIDFn = randomID
-
-// As of August 2017, the BigQuery service uses 27 alphanumeric characters for
-// suffixes.
-const randomIDLen = 27
-
-func randomID() string {
-	// This is used for both job IDs and insert IDs.
-	var b [randomIDLen]byte
-	rngMu.Lock()
-	for i := 0; i < len(b); i++ {
-		b[i] = alphanum[rng.Intn(len(alphanum))]
-	}
-	rngMu.Unlock()
-	return string(b[:])
-}
-
-// Done reports whether the job has completed.
-// After Done returns true, the Err method will return an error if the job completed unsuccesfully.
-func (s *JobStatus) Done() bool {
-	return s.State == Done
-}
-
-// Err returns the error that caused the job to complete unsuccesfully (if any).
-func (s *JobStatus) Err() error {
-	return s.err
-}
-
-// Status retrieves the current status of the job from BigQuery. It fails if the Status could not be determined.
-func (j *Job) Status(ctx context.Context) (*JobStatus, error) {
-	bqjob, err := j.c.getJobInternal(ctx, j.jobID, "status", "statistics")
-	if err != nil {
-		return nil, err
-	}
-	if err := j.setStatus(bqjob.Status); err != nil {
-		return nil, err
-	}
-	j.setStatistics(bqjob.Statistics, j.c)
-	return j.lastStatus, nil
-}
-
-// LastStatus returns the most recently retrieved status of the job. The status is
-// retrieved when a new job is created, or when JobFromID or Job.Status is called.
-// Call Job.Status to get the most up-to-date information about a job.
-func (j *Job) LastStatus() *JobStatus {
-	return j.lastStatus
-}
-
-// Cancel requests that a job be cancelled. This method returns without waiting for
-// cancellation to take effect. To check whether the job has terminated, use Job.Status.
-// Cancelled jobs may still incur costs.
-func (j *Job) Cancel(ctx context.Context) error {
-	// Jobs.Cancel returns a job entity, but the only relevant piece of
-	// data it may contain (the status of the job) is unreliable.  From the
-	// docs: "This call will return immediately, and the client will need
-	// to poll for the job status to see if the cancel completed
-	// successfully".  So it would be misleading to return a status.
-	call := j.c.bqs.Jobs.Cancel(j.projectID, j.jobID).
-		Fields(). // We don't need any of the response data.
-		Context(ctx)
-	setClientHeader(call.Header())
-	return runWithRetry(ctx, func() error {
-		_, err := call.Do()
-		return err
-	})
-}
-
-// Wait blocks until the job or the context is done. It returns the final status
-// of the job.
-// If an error occurs while retrieving the status, Wait returns that error. But
-// Wait returns nil if the status was retrieved successfully, even if
-// status.Err() != nil. So callers must check both errors. See the example.
-func (j *Job) Wait(ctx context.Context) (*JobStatus, error) {
-	if j.isQuery() {
-		// We can avoid polling for query jobs.
-		if _, err := j.waitForQuery(ctx, j.projectID); err != nil {
-			return nil, err
-		}
-		// Note: extra RPC even if you just want to wait for the query to finish.
-		js, err := j.Status(ctx)
-		if err != nil {
-			return nil, err
-		}
-		return js, nil
-	}
-	// Non-query jobs must poll.
-	var js *JobStatus
-	err := internal.Retry(ctx, gax.Backoff{}, func() (stop bool, err error) {
-		js, err = j.Status(ctx)
-		if err != nil {
-			return true, err
-		}
-		if js.Done() {
-			return true, nil
-		}
-		return false, nil
-	})
-	if err != nil {
-		return nil, err
-	}
-	return js, nil
-}
-
-// Read fetches the results of a query job.
-// If j is not a query job, Read returns an error.
-func (j *Job) Read(ctx context.Context) (*RowIterator, error) {
-	return j.read(ctx, j.waitForQuery, fetchPage)
-}
-
-func (j *Job) read(ctx context.Context, waitForQuery func(context.Context, string) (Schema, error), pf pageFetcher) (*RowIterator, error) {
-	if !j.isQuery() {
-		return nil, errors.New("bigquery: cannot read from a non-query job")
-	}
-	destTable := j.config.Query.DestinationTable
-	// The destination table should only be nil if there was a query error.
-	if destTable == nil {
-		return nil, errors.New("bigquery: query job missing destination table")
-	}
-	projectID := destTable.ProjectId
-	schema, err := waitForQuery(ctx, projectID)
-	if err != nil {
-		return nil, err
-	}
-	dt := bqToTable(destTable, j.c)
-	it := newRowIterator(ctx, dt, pf)
-	it.schema = schema
-	return it, nil
-}
-
-// waitForQuery waits for the query job to complete and returns its schema.
-func (j *Job) waitForQuery(ctx context.Context, projectID string) (Schema, error) {
-	// Use GetQueryResults only to wait for completion, not to read results.
-	call := j.c.bqs.Jobs.GetQueryResults(projectID, j.jobID).Context(ctx).MaxResults(0)
-	setClientHeader(call.Header())
-	backoff := gax.Backoff{
-		Initial:    1 * time.Second,
-		Multiplier: 2,
-		Max:        60 * time.Second,
-	}
-	var res *bq.GetQueryResultsResponse
-	err := internal.Retry(ctx, backoff, func() (stop bool, err error) {
-		res, err = call.Do()
-		if err != nil {
-			return !retryableError(err), err
-		}
-		if !res.JobComplete { // GetQueryResults may return early without error; retry.
-			return false, nil
-		}
-		return true, nil
-	})
-	if err != nil {
-		return nil, err
-	}
-	return bqToSchema(res.Schema), nil
-}
-
-// JobStatistics contains statistics about a job.
-type JobStatistics struct {
-	CreationTime        time.Time
-	StartTime           time.Time
-	EndTime             time.Time
-	TotalBytesProcessed int64
-
-	Details Statistics
-}
-
-// Statistics is one of ExtractStatistics, LoadStatistics or QueryStatistics.
-type Statistics interface {
-	implementsStatistics()
-}
-
-// ExtractStatistics contains statistics about an extract job.
-type ExtractStatistics struct {
-	// The number of files per destination URI or URI pattern specified in the
-	// extract configuration. These values will be in the same order as the
-	// URIs specified in the 'destinationUris' field.
-	DestinationURIFileCounts []int64
-}
-
-// LoadStatistics contains statistics about a load job.
-type LoadStatistics struct {
-	// The number of bytes of source data in a load job.
-	InputFileBytes int64
-
-	// The number of source files in a load job.
-	InputFiles int64
-
-	// Size of the loaded data in bytes. Note that while a load job is in the
-	// running state, this value may change.
-	OutputBytes int64
-
-	// The number of rows imported in a load job. Note that while an import job is
-	// in the running state, this value may change.
-	OutputRows int64
-}
-
-// QueryStatistics contains statistics about a query job.
-type QueryStatistics struct {
-	// Billing tier for the job.
-	BillingTier int64
-
-	// Whether the query result was fetched from the query cache.
-	CacheHit bool
-
-	// The type of query statement, if valid.
-	StatementType string
-
-	// Total bytes billed for the job.
-	TotalBytesBilled int64
-
-	// Total bytes processed for the job.
-	TotalBytesProcessed int64
-
-	// Describes execution plan for the query.
-	QueryPlan []*ExplainQueryStage
-
-	// The number of rows affected by a DML statement. Present only for DML
-	// statements INSERT, UPDATE or DELETE.
-	NumDMLAffectedRows int64
-
-	// ReferencedTables: [Output-only, Experimental] Referenced tables for
-	// the job. Queries that reference more than 50 tables will not have a
-	// complete list.
-	ReferencedTables []*Table
-
-	// The schema of the results. Present only for successful dry run of
-	// non-legacy SQL queries.
-	Schema Schema
-
-	// Standard SQL: list of undeclared query parameter names detected during a
-	// dry run validation.
-	UndeclaredQueryParameterNames []string
-}
-
-// ExplainQueryStage describes one stage of a query.
-type ExplainQueryStage struct {
-	// Relative amount of the total time the average shard spent on CPU-bound tasks.
-	ComputeRatioAvg float64
-
-	// Relative amount of the total time the slowest shard spent on CPU-bound tasks.
-	ComputeRatioMax float64
-
-	// Unique ID for stage within plan.
-	ID int64
-
-	// Human-readable name for stage.
-	Name string
-
-	// Relative amount of the total time the average shard spent reading input.
-	ReadRatioAvg float64
-
-	// Relative amount of the total time the slowest shard spent reading input.
-	ReadRatioMax float64
-
-	// Number of records read into the stage.
-	RecordsRead int64
-
-	// Number of records written by the stage.
-	RecordsWritten int64
-
-	// Current status for the stage.
-	Status string
-
-	// List of operations within the stage in dependency order (approximately
-	// chronological).
-	Steps []*ExplainQueryStep
-
-	// Relative amount of the total time the average shard spent waiting to be scheduled.
-	WaitRatioAvg float64
-
-	// Relative amount of the total time the slowest shard spent waiting to be scheduled.
-	WaitRatioMax float64
-
-	// Relative amount of the total time the average shard spent on writing output.
-	WriteRatioAvg float64
-
-	// Relative amount of the total time the slowest shard spent on writing output.
-	WriteRatioMax float64
-}
-
-// ExplainQueryStep describes one step of a query stage.
-type ExplainQueryStep struct {
-	// Machine-readable operation type.
-	Kind string
-
-	// Human-readable stage descriptions.
-	Substeps []string
-}
-
-func (*ExtractStatistics) implementsStatistics() {}
-func (*LoadStatistics) implementsStatistics()    {}
-func (*QueryStatistics) implementsStatistics()   {}
-
-// Jobs lists jobs within a project.
-func (c *Client) Jobs(ctx context.Context) *JobIterator {
-	it := &JobIterator{
-		ctx:       ctx,
-		c:         c,
-		ProjectID: c.projectID,
-	}
-	it.pageInfo, it.nextFunc = iterator.NewPageInfo(
-		it.fetch,
-		func() int { return len(it.items) },
-		func() interface{} { b := it.items; it.items = nil; return b })
-	return it
-}
-
-// JobIterator iterates over jobs in a project.
-type JobIterator struct {
-	ProjectID string // Project ID of the jobs to list. Default is the client's project.
-	AllUsers  bool   // Whether to list jobs owned by all users in the project, or just the current caller.
-	State     State  // List only jobs in the given state. Defaults to all states.
-
-	ctx      context.Context
-	c        *Client
-	pageInfo *iterator.PageInfo
-	nextFunc func() error
-	items    []*Job
-}
-
-func (it *JobIterator) PageInfo() *iterator.PageInfo { return it.pageInfo }
-
-func (it *JobIterator) Next() (*Job, error) {
-	if err := it.nextFunc(); err != nil {
-		return nil, err
-	}
-	item := it.items[0]
-	it.items = it.items[1:]
-	return item, nil
-}
-
-func (it *JobIterator) fetch(pageSize int, pageToken string) (string, error) {
-	var st string
-	switch it.State {
-	case StateUnspecified:
-		st = ""
-	case Pending:
-		st = "pending"
-	case Running:
-		st = "running"
-	case Done:
-		st = "done"
-	default:
-		return "", fmt.Errorf("bigquery: invalid value for JobIterator.State: %d", it.State)
-	}
-
-	req := it.c.bqs.Jobs.List(it.ProjectID).
-		Context(it.ctx).
-		PageToken(pageToken).
-		Projection("full").
-		AllUsers(it.AllUsers)
-	if st != "" {
-		req.StateFilter(st)
-	}
-	setClientHeader(req.Header())
-	if pageSize > 0 {
-		req.MaxResults(int64(pageSize))
-	}
-	res, err := req.Do()
-	if err != nil {
-		return "", err
-	}
-	for _, j := range res.Jobs {
-		job, err := convertListedJob(j, it.c)
-		if err != nil {
-			return "", err
-		}
-		it.items = append(it.items, job)
-	}
-	return res.NextPageToken, nil
-}
-
-func convertListedJob(j *bq.JobListJobs, c *Client) (*Job, error) {
-	return bqToJob2(j.JobReference, j.Configuration, j.Status, j.Statistics, c)
-}
-
-func (c *Client) getJobInternal(ctx context.Context, jobID string, fields ...googleapi.Field) (*bq.Job, error) {
-	var job *bq.Job
-	call := c.bqs.Jobs.Get(c.projectID, jobID).Context(ctx)
-	if len(fields) > 0 {
-		call = call.Fields(fields...)
-	}
-	setClientHeader(call.Header())
-	err := runWithRetry(ctx, func() (err error) {
-		job, err = call.Do()
-		return err
-	})
-	if err != nil {
-		return nil, err
-	}
-	return job, nil
-}
-
-func bqToJob(q *bq.Job, c *Client) (*Job, error) {
-	return bqToJob2(q.JobReference, q.Configuration, q.Status, q.Statistics, c)
-}
-
-func bqToJob2(qr *bq.JobReference, qc *bq.JobConfiguration, qs *bq.JobStatus, qt *bq.JobStatistics, c *Client) (*Job, error) {
-	j := &Job{
-		projectID: qr.ProjectId,
-		jobID:     qr.JobId,
-		c:         c,
-	}
-	j.setConfig(qc)
-	if err := j.setStatus(qs); err != nil {
-		return nil, err
-	}
-	j.setStatistics(qt, c)
-	return j, nil
-}
-
-func (j *Job) setConfig(config *bq.JobConfiguration) {
-	if config == nil {
-		return
-	}
-	j.config = config
-}
-
-func (j *Job) isQuery() bool {
-	return j.config != nil && j.config.Query != nil
-}
-
-var stateMap = map[string]State{"PENDING": Pending, "RUNNING": Running, "DONE": Done}
-
-func (j *Job) setStatus(qs *bq.JobStatus) error {
-	if qs == nil {
-		return nil
-	}
-	state, ok := stateMap[qs.State]
-	if !ok {
-		return fmt.Errorf("unexpected job state: %v", qs.State)
-	}
-	j.lastStatus = &JobStatus{
-		State: state,
-		err:   nil,
-	}
-	if err := bqToError(qs.ErrorResult); state == Done && err != nil {
-		j.lastStatus.err = err
-	}
-	for _, ep := range qs.Errors {
-		j.lastStatus.Errors = append(j.lastStatus.Errors, bqToError(ep))
-	}
-	return nil
-}
-
-func (j *Job) setStatistics(s *bq.JobStatistics, c *Client) {
-	if s == nil || j.lastStatus == nil {
-		return
-	}
-	js := &JobStatistics{
-		CreationTime:        unixMillisToTime(s.CreationTime),
-		StartTime:           unixMillisToTime(s.StartTime),
-		EndTime:             unixMillisToTime(s.EndTime),
-		TotalBytesProcessed: s.TotalBytesProcessed,
-	}
-	switch {
-	case s.Extract != nil:
-		js.Details = &ExtractStatistics{
-			DestinationURIFileCounts: []int64(s.Extract.DestinationUriFileCounts),
-		}
-	case s.Load != nil:
-		js.Details = &LoadStatistics{
-			InputFileBytes: s.Load.InputFileBytes,
-			InputFiles:     s.Load.InputFiles,
-			OutputBytes:    s.Load.OutputBytes,
-			OutputRows:     s.Load.OutputRows,
-		}
-	case s.Query != nil:
-		var names []string
-		for _, qp := range s.Query.UndeclaredQueryParameters {
-			names = append(names, qp.Name)
-		}
-		var tables []*Table
-		for _, tr := range s.Query.ReferencedTables {
-			tables = append(tables, bqToTable(tr, c))
-		}
-		js.Details = &QueryStatistics{
-			BillingTier:                   s.Query.BillingTier,
-			CacheHit:                      s.Query.CacheHit,
-			StatementType:                 s.Query.StatementType,
-			TotalBytesBilled:              s.Query.TotalBytesBilled,
-			TotalBytesProcessed:           s.Query.TotalBytesProcessed,
-			NumDMLAffectedRows:            s.Query.NumDmlAffectedRows,
-			QueryPlan:                     queryPlanFromProto(s.Query.QueryPlan),
-			Schema:                        bqToSchema(s.Query.Schema),
-			ReferencedTables:              tables,
-			UndeclaredQueryParameterNames: names,
-		}
-	}
-	j.lastStatus.Statistics = js
-}
-
-func queryPlanFromProto(stages []*bq.ExplainQueryStage) []*ExplainQueryStage {
-	var res []*ExplainQueryStage
-	for _, s := range stages {
-		var steps []*ExplainQueryStep
-		for _, p := range s.Steps {
-			steps = append(steps, &ExplainQueryStep{
-				Kind:     p.Kind,
-				Substeps: p.Substeps,
-			})
-		}
-		res = append(res, &ExplainQueryStage{
-			ComputeRatioAvg: s.ComputeRatioAvg,
-			ComputeRatioMax: s.ComputeRatioMax,
-			ID:              s.Id,
-			Name:            s.Name,
-			ReadRatioAvg:    s.ReadRatioAvg,
-			ReadRatioMax:    s.ReadRatioMax,
-			RecordsRead:     s.RecordsRead,
-			RecordsWritten:  s.RecordsWritten,
-			Status:          s.Status,
-			Steps:           steps,
-			WaitRatioAvg:    s.WaitRatioAvg,
-			WaitRatioMax:    s.WaitRatioMax,
-			WriteRatioAvg:   s.WriteRatioAvg,
-			WriteRatioMax:   s.WriteRatioMax,
-		})
-	}
-	return res
-}

+ 0 - 126
vendor/cloud.google.com/go/bigquery/load.go

@@ -1,126 +0,0 @@
-// Copyright 2016 Google Inc. All Rights Reserved.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//      http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package bigquery
-
-import (
-	"io"
-
-	"golang.org/x/net/context"
-	bq "google.golang.org/api/bigquery/v2"
-)
-
-// LoadConfig holds the configuration for a load job.
-type LoadConfig struct {
-	// Src is the source from which data will be loaded.
-	Src LoadSource
-
-	// Dst is the table into which the data will be loaded.
-	Dst *Table
-
-	// CreateDisposition specifies the circumstances under which the destination table will be created.
-	// The default is CreateIfNeeded.
-	CreateDisposition TableCreateDisposition
-
-	// WriteDisposition specifies how existing data in the destination table is treated.
-	// The default is WriteAppend.
-	WriteDisposition TableWriteDisposition
-
-	// The labels associated with this job.
-	Labels map[string]string
-
-	// If non-nil, the destination table is partitioned by time.
-	TimePartitioning *TimePartitioning
-}
-
-func (l *LoadConfig) toBQ() (*bq.JobConfiguration, io.Reader) {
-	config := &bq.JobConfiguration{
-		Labels: l.Labels,
-		Load: &bq.JobConfigurationLoad{
-			CreateDisposition: string(l.CreateDisposition),
-			WriteDisposition:  string(l.WriteDisposition),
-			DestinationTable:  l.Dst.toBQ(),
-			TimePartitioning:  l.TimePartitioning.toBQ(),
-		},
-	}
-	media := l.Src.populateLoadConfig(config.Load)
-	return config, media
-}
-
-func bqToLoadConfig(q *bq.JobConfiguration, c *Client) *LoadConfig {
-	lc := &LoadConfig{
-		Labels:            q.Labels,
-		CreateDisposition: TableCreateDisposition(q.Load.CreateDisposition),
-		WriteDisposition:  TableWriteDisposition(q.Load.WriteDisposition),
-		Dst:               bqToTable(q.Load.DestinationTable, c),
-		TimePartitioning:  bqToTimePartitioning(q.Load.TimePartitioning),
-	}
-	var fc *FileConfig
-	if len(q.Load.SourceUris) == 0 {
-		s := NewReaderSource(nil)
-		fc = &s.FileConfig
-		lc.Src = s
-	} else {
-		s := NewGCSReference(q.Load.SourceUris...)
-		fc = &s.FileConfig
-		lc.Src = s
-	}
-	bqPopulateFileConfig(q.Load, fc)
-	return lc
-}
-
-// A Loader loads data from Google Cloud Storage into a BigQuery table.
-type Loader struct {
-	JobIDConfig
-	LoadConfig
-	c *Client
-}
-
-// A LoadSource represents a source of data that can be loaded into
-// a BigQuery table.
-//
-// This package defines two LoadSources: GCSReference, for Google Cloud Storage
-// objects, and ReaderSource, for data read from an io.Reader.
-type LoadSource interface {
-	// populates config, returns media
-	populateLoadConfig(*bq.JobConfigurationLoad) io.Reader
-}
-
-// LoaderFrom returns a Loader which can be used to load data into a BigQuery table.
-// The returned Loader may optionally be further configured before its Run method is called.
-// See GCSReference and ReaderSource for additional configuration options that
-// affect loading.
-func (t *Table) LoaderFrom(src LoadSource) *Loader {
-	return &Loader{
-		c: t.c,
-		LoadConfig: LoadConfig{
-			Src: src,
-			Dst: t,
-		},
-	}
-}
-
-// Run initiates a load job.
-func (l *Loader) Run(ctx context.Context) (*Job, error) {
-	job, media := l.newJob()
-	return l.c.insertJob(ctx, job, media)
-}
-
-func (l *Loader) newJob() (*bq.Job, io.Reader) {
-	config, media := l.LoadConfig.toBQ()
-	return &bq.Job{
-		JobReference:  l.JobIDConfig.createJobRef(l.c.projectID),
-		Configuration: config,
-	}, media
-}

+ 0 - 349
vendor/cloud.google.com/go/bigquery/params.go

@@ -1,349 +0,0 @@
-// Copyright 2016 Google Inc. All Rights Reserved.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//      http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package bigquery
-
-import (
-	"encoding/base64"
-	"errors"
-	"fmt"
-	"reflect"
-	"regexp"
-	"strings"
-	"time"
-
-	"cloud.google.com/go/civil"
-	"cloud.google.com/go/internal/fields"
-
-	bq "google.golang.org/api/bigquery/v2"
-)
-
-var (
-	// See https://cloud.google.com/bigquery/docs/reference/standard-sql/data-types#timestamp-type.
-	timestampFormat = "2006-01-02 15:04:05.999999-07:00"
-
-	// See https://cloud.google.com/bigquery/docs/reference/rest/v2/tables#schema.fields.name
-	validFieldName = regexp.MustCompile("^[a-zA-Z_][a-zA-Z0-9_]{0,127}$")
-)
-
-const nullableTagOption = "nullable"
-
-func bqTagParser(t reflect.StructTag) (name string, keep bool, other interface{}, err error) {
-	name, keep, opts, err := fields.ParseStandardTag("bigquery", t)
-	if err != nil {
-		return "", false, nil, err
-	}
-	if name != "" && !validFieldName.MatchString(name) {
-		return "", false, nil, errInvalidFieldName
-	}
-	for _, opt := range opts {
-		if opt != nullableTagOption {
-			return "", false, nil, fmt.Errorf(
-				"bigquery: invalid tag option %q. The only valid option is %q",
-				opt, nullableTagOption)
-		}
-	}
-	return name, keep, opts, nil
-}
-
-var fieldCache = fields.NewCache(bqTagParser, nil, nil)
-
-var (
-	int64ParamType     = &bq.QueryParameterType{Type: "INT64"}
-	float64ParamType   = &bq.QueryParameterType{Type: "FLOAT64"}
-	boolParamType      = &bq.QueryParameterType{Type: "BOOL"}
-	stringParamType    = &bq.QueryParameterType{Type: "STRING"}
-	bytesParamType     = &bq.QueryParameterType{Type: "BYTES"}
-	dateParamType      = &bq.QueryParameterType{Type: "DATE"}
-	timeParamType      = &bq.QueryParameterType{Type: "TIME"}
-	dateTimeParamType  = &bq.QueryParameterType{Type: "DATETIME"}
-	timestampParamType = &bq.QueryParameterType{Type: "TIMESTAMP"}
-)
-
-var (
-	typeOfDate     = reflect.TypeOf(civil.Date{})
-	typeOfTime     = reflect.TypeOf(civil.Time{})
-	typeOfDateTime = reflect.TypeOf(civil.DateTime{})
-	typeOfGoTime   = reflect.TypeOf(time.Time{})
-)
-
-// A QueryParameter is a parameter to a query.
-type QueryParameter struct {
-	// Name is used for named parameter mode.
-	// It must match the name in the query case-insensitively.
-	Name string
-
-	// Value is the value of the parameter.
-	//
-	// When you create a QueryParameter to send to BigQuery, the following Go types
-	// are supported, with their corresponding Bigquery types:
-	// int, int8, int16, int32, int64, uint8, uint16, uint32: INT64
-	//   Note that uint, uint64 and uintptr are not supported, because
-	//   they may contain values that cannot fit into a 64-bit signed integer.
-	// float32, float64: FLOAT64
-	// bool: BOOL
-	// string: STRING
-	// []byte: BYTES
-	// time.Time: TIMESTAMP
-	// Arrays and slices of the above.
-	// Structs of the above. Only the exported fields are used.
-	//
-	// When a QueryParameter is returned inside a QueryConfig from a call to
-	// Job.Config:
-	// Integers are of type int64.
-	// Floating-point values are of type float64.
-	// Arrays are of type []interface{}, regardless of the array element type.
-	// Structs are of type map[string]interface{}.
-	Value interface{}
-}
-
-func (p QueryParameter) toBQ() (*bq.QueryParameter, error) {
-	pv, err := paramValue(reflect.ValueOf(p.Value))
-	if err != nil {
-		return nil, err
-	}
-	pt, err := paramType(reflect.TypeOf(p.Value))
-	if err != nil {
-		return nil, err
-	}
-	return &bq.QueryParameter{
-		Name:           p.Name,
-		ParameterValue: &pv,
-		ParameterType:  pt,
-	}, nil
-}
-
-func paramType(t reflect.Type) (*bq.QueryParameterType, error) {
-	if t == nil {
-		return nil, errors.New("bigquery: nil parameter")
-	}
-	switch t {
-	case typeOfDate:
-		return dateParamType, nil
-	case typeOfTime:
-		return timeParamType, nil
-	case typeOfDateTime:
-		return dateTimeParamType, nil
-	case typeOfGoTime:
-		return timestampParamType, nil
-	}
-	switch t.Kind() {
-	case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Uint8, reflect.Uint16, reflect.Uint32:
-		return int64ParamType, nil
-
-	case reflect.Float32, reflect.Float64:
-		return float64ParamType, nil
-
-	case reflect.Bool:
-		return boolParamType, nil
-
-	case reflect.String:
-		return stringParamType, nil
-
-	case reflect.Slice:
-		if t.Elem().Kind() == reflect.Uint8 {
-			return bytesParamType, nil
-		}
-		fallthrough
-
-	case reflect.Array:
-		et, err := paramType(t.Elem())
-		if err != nil {
-			return nil, err
-		}
-		return &bq.QueryParameterType{Type: "ARRAY", ArrayType: et}, nil
-
-	case reflect.Ptr:
-		if t.Elem().Kind() != reflect.Struct {
-			break
-		}
-		t = t.Elem()
-		fallthrough
-
-	case reflect.Struct:
-		var fts []*bq.QueryParameterTypeStructTypes
-		fields, err := fieldCache.Fields(t)
-		if err != nil {
-			return nil, err
-		}
-		for _, f := range fields {
-			pt, err := paramType(f.Type)
-			if err != nil {
-				return nil, err
-			}
-			fts = append(fts, &bq.QueryParameterTypeStructTypes{
-				Name: f.Name,
-				Type: pt,
-			})
-		}
-		return &bq.QueryParameterType{Type: "STRUCT", StructTypes: fts}, nil
-	}
-	return nil, fmt.Errorf("bigquery: Go type %s cannot be represented as a parameter type", t)
-}
-
-func paramValue(v reflect.Value) (bq.QueryParameterValue, error) {
-	var res bq.QueryParameterValue
-	if !v.IsValid() {
-		return res, errors.New("bigquery: nil parameter")
-	}
-	t := v.Type()
-	switch t {
-	case typeOfDate:
-		res.Value = v.Interface().(civil.Date).String()
-		return res, nil
-
-	case typeOfTime:
-		// civil.Time has nanosecond resolution, but BigQuery TIME only microsecond.
-		res.Value = CivilTimeString(v.Interface().(civil.Time))
-		return res, nil
-
-	case typeOfDateTime:
-		res.Value = CivilDateTimeString(v.Interface().(civil.DateTime))
-		return res, nil
-
-	case typeOfGoTime:
-		res.Value = v.Interface().(time.Time).Format(timestampFormat)
-		return res, nil
-	}
-	switch t.Kind() {
-	case reflect.Slice:
-		if t.Elem().Kind() == reflect.Uint8 {
-			res.Value = base64.StdEncoding.EncodeToString(v.Interface().([]byte))
-			return res, nil
-		}
-		fallthrough
-
-	case reflect.Array:
-		var vals []*bq.QueryParameterValue
-		for i := 0; i < v.Len(); i++ {
-			val, err := paramValue(v.Index(i))
-			if err != nil {
-				return bq.QueryParameterValue{}, err
-			}
-			vals = append(vals, &val)
-		}
-		return bq.QueryParameterValue{ArrayValues: vals}, nil
-
-	case reflect.Ptr:
-		if t.Elem().Kind() != reflect.Struct {
-			return res, fmt.Errorf("bigquery: Go type %s cannot be represented as a parameter value", t)
-		}
-		t = t.Elem()
-		v = v.Elem()
-		if !v.IsValid() {
-			// nil pointer becomes empty value
-			return res, nil
-		}
-		fallthrough
-
-	case reflect.Struct:
-		fields, err := fieldCache.Fields(t)
-		if err != nil {
-			return bq.QueryParameterValue{}, err
-		}
-		res.StructValues = map[string]bq.QueryParameterValue{}
-		for _, f := range fields {
-			fv := v.FieldByIndex(f.Index)
-			fp, err := paramValue(fv)
-			if err != nil {
-				return bq.QueryParameterValue{}, err
-			}
-			res.StructValues[f.Name] = fp
-		}
-		return res, nil
-	}
-	// None of the above: assume a scalar type. (If it's not a valid type,
-	// paramType will catch the error.)
-	res.Value = fmt.Sprint(v.Interface())
-	return res, nil
-}
-
-func bqToQueryParameter(q *bq.QueryParameter) (QueryParameter, error) {
-	p := QueryParameter{Name: q.Name}
-	val, err := convertParamValue(q.ParameterValue, q.ParameterType)
-	if err != nil {
-		return QueryParameter{}, err
-	}
-	p.Value = val
-	return p, nil
-}
-
-var paramTypeToFieldType = map[string]FieldType{
-	int64ParamType.Type:   IntegerFieldType,
-	float64ParamType.Type: FloatFieldType,
-	boolParamType.Type:    BooleanFieldType,
-	stringParamType.Type:  StringFieldType,
-	bytesParamType.Type:   BytesFieldType,
-	dateParamType.Type:    DateFieldType,
-	timeParamType.Type:    TimeFieldType,
-}
-
-// Convert a parameter value from the service to a Go value. This is similar to, but
-// not quite the same as, converting data values.
-func convertParamValue(qval *bq.QueryParameterValue, qtype *bq.QueryParameterType) (interface{}, error) {
-	switch qtype.Type {
-	case "ARRAY":
-		if qval == nil {
-			return []interface{}(nil), nil
-		}
-		return convertParamArray(qval.ArrayValues, qtype.ArrayType)
-	case "STRUCT":
-		if qval == nil {
-			return map[string]interface{}(nil), nil
-		}
-		return convertParamStruct(qval.StructValues, qtype.StructTypes)
-	case "TIMESTAMP":
-		return time.Parse(timestampFormat, qval.Value)
-	case "DATETIME":
-		parts := strings.Fields(qval.Value)
-		if len(parts) != 2 {
-			return nil, fmt.Errorf("bigquery: bad DATETIME value %q", qval.Value)
-		}
-		return civil.ParseDateTime(parts[0] + "T" + parts[1])
-	default:
-		return convertBasicType(qval.Value, paramTypeToFieldType[qtype.Type])
-	}
-}
-
-// convertParamArray converts a query parameter array value to a Go value. It
-// always returns a []interface{}.
-func convertParamArray(elVals []*bq.QueryParameterValue, elType *bq.QueryParameterType) ([]interface{}, error) {
-	var vals []interface{}
-	for _, el := range elVals {
-		val, err := convertParamValue(el, elType)
-		if err != nil {
-			return nil, err
-		}
-		vals = append(vals, val)
-	}
-	return vals, nil
-}
-
-// convertParamValue converts a query parameter struct value into a Go value. It
-// always returns a map[string]interface{}.
-func convertParamStruct(sVals map[string]bq.QueryParameterValue, sTypes []*bq.QueryParameterTypeStructTypes) (map[string]interface{}, error) {
-	vals := map[string]interface{}{}
-	for _, st := range sTypes {
-		if sv, ok := sVals[st.Name]; ok {
-			val, err := convertParamValue(&sv, st.Type)
-			if err != nil {
-				return nil, err
-			}
-			vals[st.Name] = val
-		} else {
-			vals[st.Name] = nil
-		}
-	}
-	return vals, nil
-}

+ 0 - 284
vendor/cloud.google.com/go/bigquery/query.go

@@ -1,284 +0,0 @@
-// Copyright 2015 Google Inc. All Rights Reserved.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//      http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package bigquery
-
-import (
-	"errors"
-
-	"golang.org/x/net/context"
-	bq "google.golang.org/api/bigquery/v2"
-)
-
-// QueryConfig holds the configuration for a query job.
-type QueryConfig struct {
-	// Dst is the table into which the results of the query will be written.
-	// If this field is nil, a temporary table will be created.
-	Dst *Table
-
-	// The query to execute. See https://cloud.google.com/bigquery/query-reference for details.
-	Q string
-
-	// DefaultProjectID and DefaultDatasetID specify the dataset to use for unqualified table names in the query.
-	// If DefaultProjectID is set, DefaultDatasetID must also be set.
-	DefaultProjectID string
-	DefaultDatasetID string
-
-	// TableDefinitions describes data sources outside of BigQuery.
-	// The map keys may be used as table names in the query string.
-	//
-	// When a QueryConfig is returned from Job.Config, the map values
-	// are always of type *ExternalDataConfig.
-	TableDefinitions map[string]ExternalData
-
-	// CreateDisposition specifies the circumstances under which the destination table will be created.
-	// The default is CreateIfNeeded.
-	CreateDisposition TableCreateDisposition
-
-	// WriteDisposition specifies how existing data in the destination table is treated.
-	// The default is WriteEmpty.
-	WriteDisposition TableWriteDisposition
-
-	// DisableQueryCache prevents results being fetched from the query cache.
-	// If this field is false, results are fetched from the cache if they are available.
-	// The query cache is a best-effort cache that is flushed whenever tables in the query are modified.
-	// Cached results are only available when TableID is unspecified in the query's destination Table.
-	// For more information, see https://cloud.google.com/bigquery/querying-data#querycaching
-	DisableQueryCache bool
-
-	// DisableFlattenedResults prevents results being flattened.
-	// If this field is false, results from nested and repeated fields are flattened.
-	// DisableFlattenedResults implies AllowLargeResults
-	// For more information, see https://cloud.google.com/bigquery/docs/data#nested
-	DisableFlattenedResults bool
-
-	// AllowLargeResults allows the query to produce arbitrarily large result tables.
-	// The destination must be a table.
-	// When using this option, queries will take longer to execute, even if the result set is small.
-	// For additional limitations, see https://cloud.google.com/bigquery/querying-data#largequeryresults
-	AllowLargeResults bool
-
-	// Priority specifies the priority with which to schedule the query.
-	// The default priority is InteractivePriority.
-	// For more information, see https://cloud.google.com/bigquery/querying-data#batchqueries
-	Priority QueryPriority
-
-	// MaxBillingTier sets the maximum billing tier for a Query.
-	// Queries that have resource usage beyond this tier will fail (without
-	// incurring a charge). If this field is zero, the project default will be used.
-	MaxBillingTier int
-
-	// MaxBytesBilled limits the number of bytes billed for
-	// this job.  Queries that would exceed this limit will fail (without incurring
-	// a charge).
-	// If this field is less than 1, the project default will be
-	// used.
-	MaxBytesBilled int64
-
-	// UseStandardSQL causes the query to use standard SQL. The default.
-	// Deprecated: use UseLegacySQL.
-	UseStandardSQL bool
-
-	// UseLegacySQL causes the query to use legacy SQL.
-	UseLegacySQL bool
-
-	// Parameters is a list of query parameters. The presence of parameters
-	// implies the use of standard SQL.
-	// If the query uses positional syntax ("?"), then no parameter may have a name.
-	// If the query uses named syntax ("@p"), then all parameters must have names.
-	// It is illegal to mix positional and named syntax.
-	Parameters []QueryParameter
-
-	// The labels associated with this job.
-	Labels map[string]string
-
-	// If true, don't actually run this job. A valid query will return a mostly
-	// empty response with some processing statistics, while an invalid query will
-	// return the same error it would if it wasn't a dry run.
-	//
-	// Query.Read will fail with dry-run queries. Call Query.Run instead, and then
-	// call LastStatus on the returned job to get statistics. Calling Status on a
-	// dry-run job will fail.
-	DryRun bool
-}
-
-func (qc *QueryConfig) toBQ() (*bq.JobConfiguration, error) {
-	qconf := &bq.JobConfigurationQuery{
-		Query:              qc.Q,
-		CreateDisposition:  string(qc.CreateDisposition),
-		WriteDisposition:   string(qc.WriteDisposition),
-		AllowLargeResults:  qc.AllowLargeResults,
-		Priority:           string(qc.Priority),
-		MaximumBytesBilled: qc.MaxBytesBilled,
-	}
-	if len(qc.TableDefinitions) > 0 {
-		qconf.TableDefinitions = make(map[string]bq.ExternalDataConfiguration)
-	}
-	for name, data := range qc.TableDefinitions {
-		qconf.TableDefinitions[name] = data.toBQ()
-	}
-	if qc.DefaultProjectID != "" || qc.DefaultDatasetID != "" {
-		qconf.DefaultDataset = &bq.DatasetReference{
-			DatasetId: qc.DefaultDatasetID,
-			ProjectId: qc.DefaultProjectID,
-		}
-	}
-	if tier := int64(qc.MaxBillingTier); tier > 0 {
-		qconf.MaximumBillingTier = &tier
-	}
-	f := false
-	if qc.DisableQueryCache {
-		qconf.UseQueryCache = &f
-	}
-	if qc.DisableFlattenedResults {
-		qconf.FlattenResults = &f
-		// DisableFlattenResults implies AllowLargeResults.
-		qconf.AllowLargeResults = true
-	}
-	if qc.UseStandardSQL && qc.UseLegacySQL {
-		return nil, errors.New("bigquery: cannot provide both UseStandardSQL and UseLegacySQL")
-	}
-	if len(qc.Parameters) > 0 && qc.UseLegacySQL {
-		return nil, errors.New("bigquery: cannot provide both Parameters (implying standard SQL) and UseLegacySQL")
-	}
-	if qc.UseLegacySQL {
-		qconf.UseLegacySql = true
-	} else {
-		qconf.UseLegacySql = false
-		qconf.ForceSendFields = append(qconf.ForceSendFields, "UseLegacySql")
-	}
-	if qc.Dst != nil && !qc.Dst.implicitTable() {
-		qconf.DestinationTable = qc.Dst.toBQ()
-	}
-	for _, p := range qc.Parameters {
-		qp, err := p.toBQ()
-		if err != nil {
-			return nil, err
-		}
-		qconf.QueryParameters = append(qconf.QueryParameters, qp)
-	}
-	return &bq.JobConfiguration{
-		Labels: qc.Labels,
-		DryRun: qc.DryRun,
-		Query:  qconf,
-	}, nil
-}
-
-func bqToQueryConfig(q *bq.JobConfiguration, c *Client) (*QueryConfig, error) {
-	qq := q.Query
-	qc := &QueryConfig{
-		Labels:            q.Labels,
-		DryRun:            q.DryRun,
-		Q:                 qq.Query,
-		CreateDisposition: TableCreateDisposition(qq.CreateDisposition),
-		WriteDisposition:  TableWriteDisposition(qq.WriteDisposition),
-		AllowLargeResults: qq.AllowLargeResults,
-		Priority:          QueryPriority(qq.Priority),
-		MaxBytesBilled:    qq.MaximumBytesBilled,
-		UseLegacySQL:      qq.UseLegacySql,
-		UseStandardSQL:    !qq.UseLegacySql,
-	}
-	if len(qq.TableDefinitions) > 0 {
-		qc.TableDefinitions = make(map[string]ExternalData)
-	}
-	for name, qedc := range qq.TableDefinitions {
-		edc, err := bqToExternalDataConfig(&qedc)
-		if err != nil {
-			return nil, err
-		}
-		qc.TableDefinitions[name] = edc
-	}
-	if qq.DefaultDataset != nil {
-		qc.DefaultProjectID = qq.DefaultDataset.ProjectId
-		qc.DefaultDatasetID = qq.DefaultDataset.DatasetId
-	}
-	if qq.MaximumBillingTier != nil {
-		qc.MaxBillingTier = int(*qq.MaximumBillingTier)
-	}
-	if qq.UseQueryCache != nil && !*qq.UseQueryCache {
-		qc.DisableQueryCache = true
-	}
-	if qq.FlattenResults != nil && !*qq.FlattenResults {
-		qc.DisableFlattenedResults = true
-	}
-	if qq.DestinationTable != nil {
-		qc.Dst = bqToTable(qq.DestinationTable, c)
-	}
-	for _, qp := range qq.QueryParameters {
-		p, err := bqToQueryParameter(qp)
-		if err != nil {
-			return nil, err
-		}
-		qc.Parameters = append(qc.Parameters, p)
-	}
-	return qc, nil
-}
-
-// QueryPriority specifies a priority with which a query is to be executed.
-type QueryPriority string
-
-const (
-	BatchPriority       QueryPriority = "BATCH"
-	InteractivePriority QueryPriority = "INTERACTIVE"
-)
-
-// A Query queries data from a BigQuery table. Use Client.Query to create a Query.
-type Query struct {
-	JobIDConfig
-	QueryConfig
-	client *Client
-}
-
-// Query creates a query with string q.
-// The returned Query may optionally be further configured before its Run method is called.
-func (c *Client) Query(q string) *Query {
-	return &Query{
-		client:      c,
-		QueryConfig: QueryConfig{Q: q},
-	}
-}
-
-// Run initiates a query job.
-func (q *Query) Run(ctx context.Context) (*Job, error) {
-	job, err := q.newJob()
-	if err != nil {
-		return nil, err
-	}
-	j, err := q.client.insertJob(ctx, job, nil)
-	if err != nil {
-		return nil, err
-	}
-	return j, nil
-}
-
-func (q *Query) newJob() (*bq.Job, error) {
-	config, err := q.QueryConfig.toBQ()
-	if err != nil {
-		return nil, err
-	}
-	return &bq.Job{
-		JobReference:  q.JobIDConfig.createJobRef(q.client.projectID),
-		Configuration: config,
-	}, nil
-}
-
-// Read submits a query for execution and returns the results via a RowIterator.
-// It is a shorthand for Query.Run followed by Job.Read.
-func (q *Query) Read(ctx context.Context) (*RowIterator, error) {
-	job, err := q.Run(ctx)
-	if err != nil {
-		return nil, err
-	}
-	return job.Read(ctx)
-}

+ 0 - 318
vendor/cloud.google.com/go/bigquery/schema.go

@@ -1,318 +0,0 @@
-// Copyright 2015 Google Inc. All Rights Reserved.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//      http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package bigquery
-
-import (
-	"errors"
-	"fmt"
-	"reflect"
-
-	"cloud.google.com/go/internal/atomiccache"
-
-	bq "google.golang.org/api/bigquery/v2"
-)
-
-// Schema describes the fields in a table or query result.
-type Schema []*FieldSchema
-
-type FieldSchema struct {
-	// The field name.
-	// Must contain only letters (a-z, A-Z), numbers (0-9), or underscores (_),
-	// and must start with a letter or underscore.
-	// The maximum length is 128 characters.
-	Name string
-
-	// A description of the field. The maximum length is 16,384 characters.
-	Description string
-
-	// Whether the field may contain multiple values.
-	Repeated bool
-	// Whether the field is required.  Ignored if Repeated is true.
-	Required bool
-
-	// The field data type.  If Type is Record, then this field contains a nested schema,
-	// which is described by Schema.
-	Type FieldType
-	// Describes the nested schema if Type is set to Record.
-	Schema Schema
-}
-
-func (fs *FieldSchema) toBQ() *bq.TableFieldSchema {
-	tfs := &bq.TableFieldSchema{
-		Description: fs.Description,
-		Name:        fs.Name,
-		Type:        string(fs.Type),
-	}
-
-	if fs.Repeated {
-		tfs.Mode = "REPEATED"
-	} else if fs.Required {
-		tfs.Mode = "REQUIRED"
-	} // else leave as default, which is interpreted as NULLABLE.
-
-	for _, f := range fs.Schema {
-		tfs.Fields = append(tfs.Fields, f.toBQ())
-	}
-
-	return tfs
-}
-
-func (s Schema) toBQ() *bq.TableSchema {
-	var fields []*bq.TableFieldSchema
-	for _, f := range s {
-		fields = append(fields, f.toBQ())
-	}
-	return &bq.TableSchema{Fields: fields}
-}
-
-func bqToFieldSchema(tfs *bq.TableFieldSchema) *FieldSchema {
-	fs := &FieldSchema{
-		Description: tfs.Description,
-		Name:        tfs.Name,
-		Repeated:    tfs.Mode == "REPEATED",
-		Required:    tfs.Mode == "REQUIRED",
-		Type:        FieldType(tfs.Type),
-	}
-
-	for _, f := range tfs.Fields {
-		fs.Schema = append(fs.Schema, bqToFieldSchema(f))
-	}
-	return fs
-}
-
-func bqToSchema(ts *bq.TableSchema) Schema {
-	if ts == nil {
-		return nil
-	}
-	var s Schema
-	for _, f := range ts.Fields {
-		s = append(s, bqToFieldSchema(f))
-	}
-	return s
-}
-
-type FieldType string
-
-const (
-	StringFieldType    FieldType = "STRING"
-	BytesFieldType     FieldType = "BYTES"
-	IntegerFieldType   FieldType = "INTEGER"
-	FloatFieldType     FieldType = "FLOAT"
-	BooleanFieldType   FieldType = "BOOLEAN"
-	TimestampFieldType FieldType = "TIMESTAMP"
-	RecordFieldType    FieldType = "RECORD"
-	DateFieldType      FieldType = "DATE"
-	TimeFieldType      FieldType = "TIME"
-	DateTimeFieldType  FieldType = "DATETIME"
-)
-
-var (
-	errNoStruct             = errors.New("bigquery: can only infer schema from struct or pointer to struct")
-	errUnsupportedFieldType = errors.New("bigquery: unsupported type of field in struct")
-	errInvalidFieldName     = errors.New("bigquery: invalid name of field in struct")
-)
-
-var typeOfByteSlice = reflect.TypeOf([]byte{})
-
-// InferSchema tries to derive a BigQuery schema from the supplied struct value.
-// NOTE: All fields in the returned Schema are configured to be required,
-// unless the corresponding field in the supplied struct is a slice or array.
-//
-// It is considered an error if the struct (including nested structs) contains
-// any exported fields that are pointers or one of the following types:
-// uint, uint64, uintptr, map, interface, complex64, complex128, func, chan.
-// In these cases, an error will be returned.
-// Future versions may handle these cases without error.
-//
-// Recursively defined structs are also disallowed.
-func InferSchema(st interface{}) (Schema, error) {
-	return inferSchemaReflectCached(reflect.TypeOf(st))
-}
-
-// TODO(jba): replace with sync.Map for Go 1.9.
-var schemaCache atomiccache.Cache
-
-type cacheVal struct {
-	schema Schema
-	err    error
-}
-
-func inferSchemaReflectCached(t reflect.Type) (Schema, error) {
-	cv := schemaCache.Get(t, func() interface{} {
-		s, err := inferSchemaReflect(t)
-		return cacheVal{s, err}
-	}).(cacheVal)
-	return cv.schema, cv.err
-}
-
-func inferSchemaReflect(t reflect.Type) (Schema, error) {
-	rec, err := hasRecursiveType(t, nil)
-	if err != nil {
-		return nil, err
-	}
-	if rec {
-		return nil, fmt.Errorf("bigquery: schema inference for recursive type %s", t)
-	}
-	return inferStruct(t)
-}
-
-func inferStruct(t reflect.Type) (Schema, error) {
-	switch t.Kind() {
-	case reflect.Ptr:
-		if t.Elem().Kind() != reflect.Struct {
-			return nil, errNoStruct
-		}
-		t = t.Elem()
-		fallthrough
-
-	case reflect.Struct:
-		return inferFields(t)
-	default:
-		return nil, errNoStruct
-	}
-}
-
-// inferFieldSchema infers the FieldSchema for a Go type
-func inferFieldSchema(rt reflect.Type, nullable bool) (*FieldSchema, error) {
-	switch rt {
-	case typeOfByteSlice:
-		return &FieldSchema{Required: !nullable, Type: BytesFieldType}, nil
-	case typeOfGoTime:
-		return &FieldSchema{Required: !nullable, Type: TimestampFieldType}, nil
-	case typeOfDate:
-		return &FieldSchema{Required: !nullable, Type: DateFieldType}, nil
-	case typeOfTime:
-		return &FieldSchema{Required: !nullable, Type: TimeFieldType}, nil
-	case typeOfDateTime:
-		return &FieldSchema{Required: !nullable, Type: DateTimeFieldType}, nil
-	}
-	if isSupportedIntType(rt) {
-		return &FieldSchema{Required: !nullable, Type: IntegerFieldType}, nil
-	}
-	switch rt.Kind() {
-	case reflect.Slice, reflect.Array:
-		et := rt.Elem()
-		if et != typeOfByteSlice && (et.Kind() == reflect.Slice || et.Kind() == reflect.Array) {
-			// Multi dimensional slices/arrays are not supported by BigQuery
-			return nil, errUnsupportedFieldType
-		}
-
-		f, err := inferFieldSchema(et, false)
-		if err != nil {
-			return nil, err
-		}
-		f.Repeated = true
-		f.Required = false
-		return f, nil
-	case reflect.Struct, reflect.Ptr:
-		nested, err := inferStruct(rt)
-		if err != nil {
-			return nil, err
-		}
-		return &FieldSchema{Required: !nullable, Type: RecordFieldType, Schema: nested}, nil
-	case reflect.String:
-		return &FieldSchema{Required: !nullable, Type: StringFieldType}, nil
-	case reflect.Bool:
-		return &FieldSchema{Required: !nullable, Type: BooleanFieldType}, nil
-	case reflect.Float32, reflect.Float64:
-		return &FieldSchema{Required: !nullable, Type: FloatFieldType}, nil
-	default:
-		return nil, errUnsupportedFieldType
-	}
-}
-
-// inferFields extracts all exported field types from struct type.
-func inferFields(rt reflect.Type) (Schema, error) {
-	var s Schema
-	fields, err := fieldCache.Fields(rt)
-	if err != nil {
-		return nil, err
-	}
-	for _, field := range fields {
-		var nullable bool
-		for _, opt := range field.ParsedTag.([]string) {
-			if opt == nullableTagOption {
-				nullable = true
-				break
-			}
-		}
-		f, err := inferFieldSchema(field.Type, nullable)
-		if err != nil {
-			return nil, err
-		}
-		f.Name = field.Name
-		s = append(s, f)
-	}
-	return s, nil
-}
-
-// isSupportedIntType reports whether t can be properly represented by the
-// BigQuery INTEGER/INT64 type.
-func isSupportedIntType(t reflect.Type) bool {
-	switch t.Kind() {
-	case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int,
-		reflect.Uint8, reflect.Uint16, reflect.Uint32:
-		return true
-	default:
-		return false
-	}
-}
-
-// typeList is a linked list of reflect.Types.
-type typeList struct {
-	t    reflect.Type
-	next *typeList
-}
-
-func (l *typeList) has(t reflect.Type) bool {
-	for l != nil {
-		if l.t == t {
-			return true
-		}
-		l = l.next
-	}
-	return false
-}
-
-// hasRecursiveType reports whether t or any type inside t refers to itself, directly or indirectly,
-// via exported fields. (Schema inference ignores unexported fields.)
-func hasRecursiveType(t reflect.Type, seen *typeList) (bool, error) {
-	for t.Kind() == reflect.Ptr || t.Kind() == reflect.Slice || t.Kind() == reflect.Array {
-		t = t.Elem()
-	}
-	if t.Kind() != reflect.Struct {
-		return false, nil
-	}
-	if seen.has(t) {
-		return true, nil
-	}
-	fields, err := fieldCache.Fields(t)
-	if err != nil {
-		return false, err
-	}
-	seen = &typeList{t, seen}
-	// Because seen is a linked list, additions to it from one field's
-	// recursive call will not affect the value for subsequent fields' calls.
-	for _, field := range fields {
-		ok, err := hasRecursiveType(field.Type, seen)
-		if err != nil {
-			return false, err
-		}
-		if ok {
-			return true, nil
-		}
-	}
-	return false, nil
-}

+ 0 - 487
vendor/cloud.google.com/go/bigquery/table.go

@@ -1,487 +0,0 @@
-// Copyright 2015 Google Inc. All Rights Reserved.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//      http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package bigquery
-
-import (
-	"errors"
-	"fmt"
-	"time"
-
-	"golang.org/x/net/context"
-
-	"cloud.google.com/go/internal/optional"
-	bq "google.golang.org/api/bigquery/v2"
-)
-
-// A Table is a reference to a BigQuery table.
-type Table struct {
-	// ProjectID, DatasetID and TableID may be omitted if the Table is the destination for a query.
-	// In this case the result will be stored in an ephemeral table.
-	ProjectID string
-	DatasetID string
-	// TableID must contain only letters (a-z, A-Z), numbers (0-9), or underscores (_).
-	// The maximum length is 1,024 characters.
-	TableID string
-
-	c *Client
-}
-
-// TableMetadata contains information about a BigQuery table.
-type TableMetadata struct {
-	// The following fields can be set when creating a table.
-
-	// The user-friendly name for the table.
-	Name string
-
-	// The user-friendly description of the table.
-	Description string
-
-	// The table schema. If provided on create, ViewQuery must be empty.
-	Schema Schema
-
-	// The query to use for a view. If provided on create, Schema must be nil.
-	ViewQuery string
-
-	// Use Legacy SQL for the view query.
-	// At most one of UseLegacySQL and UseStandardSQL can be true.
-	UseLegacySQL bool
-
-	// Use Legacy SQL for the view query. The default.
-	// At most one of UseLegacySQL and UseStandardSQL can be true.
-	// Deprecated: use UseLegacySQL.
-	UseStandardSQL bool
-
-	// If non-nil, the table is partitioned by time.
-	TimePartitioning *TimePartitioning
-
-	// The time when this table expires. If not set, the table will persist
-	// indefinitely. Expired tables will be deleted and their storage reclaimed.
-	ExpirationTime time.Time
-
-	// User-provided labels.
-	Labels map[string]string
-
-	// Information about a table stored outside of BigQuery.
-	ExternalDataConfig *ExternalDataConfig
-
-	// All the fields below are read-only.
-
-	FullID           string // An opaque ID uniquely identifying the table.
-	Type             TableType
-	CreationTime     time.Time
-	LastModifiedTime time.Time
-
-	// The size of the table in bytes.
-	// This does not include data that is being buffered during a streaming insert.
-	NumBytes int64
-
-	// The number of rows of data in this table.
-	// This does not include data that is being buffered during a streaming insert.
-	NumRows uint64
-
-	// Contains information regarding this table's streaming buffer, if one is
-	// present. This field will be nil if the table is not being streamed to or if
-	// there is no data in the streaming buffer.
-	StreamingBuffer *StreamingBuffer
-
-	// ETag is the ETag obtained when reading metadata. Pass it to Table.Update to
-	// ensure that the metadata hasn't changed since it was read.
-	ETag string
-}
-
-// TableCreateDisposition specifies the circumstances under which destination table will be created.
-// Default is CreateIfNeeded.
-type TableCreateDisposition string
-
-const (
-	// CreateIfNeeded will create the table if it does not already exist.
-	// Tables are created atomically on successful completion of a job.
-	CreateIfNeeded TableCreateDisposition = "CREATE_IF_NEEDED"
-
-	// CreateNever ensures the table must already exist and will not be
-	// automatically created.
-	CreateNever TableCreateDisposition = "CREATE_NEVER"
-)
-
-// TableWriteDisposition specifies how existing data in a destination table is treated.
-// Default is WriteAppend.
-type TableWriteDisposition string
-
-const (
-	// WriteAppend will append to any existing data in the destination table.
-	// Data is appended atomically on successful completion of a job.
-	WriteAppend TableWriteDisposition = "WRITE_APPEND"
-
-	// WriteTruncate overrides the existing data in the destination table.
-	// Data is overwritten atomically on successful completion of a job.
-	WriteTruncate TableWriteDisposition = "WRITE_TRUNCATE"
-
-	// WriteEmpty fails writes if the destination table already contains data.
-	WriteEmpty TableWriteDisposition = "WRITE_EMPTY"
-)
-
-// TableType is the type of table.
-type TableType string
-
-const (
-	RegularTable  TableType = "TABLE"
-	ViewTable     TableType = "VIEW"
-	ExternalTable TableType = "EXTERNAL"
-)
-
-// TimePartitioning describes the time-based date partitioning on a table.
-// For more information see: https://cloud.google.com/bigquery/docs/creating-partitioned-tables.
-type TimePartitioning struct {
-	// The amount of time to keep the storage for a partition.
-	// If the duration is empty (0), the data in the partitions do not expire.
-	Expiration time.Duration
-
-	// If empty, the table is partitioned by pseudo column '_PARTITIONTIME'; if set, the
-	// table is partitioned by this field. The field must be a top-level TIMESTAMP or
-	// DATE field. Its mode must be NULLABLE or REQUIRED.
-	Field string
-}
-
-func (p *TimePartitioning) toBQ() *bq.TimePartitioning {
-	if p == nil {
-		return nil
-	}
-	return &bq.TimePartitioning{
-		Type:         "DAY",
-		ExpirationMs: int64(p.Expiration / time.Millisecond),
-		Field:        p.Field,
-	}
-}
-
-func bqToTimePartitioning(q *bq.TimePartitioning) *TimePartitioning {
-	if q == nil {
-		return nil
-	}
-	return &TimePartitioning{
-		Expiration: time.Duration(q.ExpirationMs) * time.Millisecond,
-		Field:      q.Field,
-	}
-}
-
-// StreamingBuffer holds information about the streaming buffer.
-type StreamingBuffer struct {
-	// A lower-bound estimate of the number of bytes currently in the streaming
-	// buffer.
-	EstimatedBytes uint64
-
-	// A lower-bound estimate of the number of rows currently in the streaming
-	// buffer.
-	EstimatedRows uint64
-
-	// The time of the oldest entry in the streaming buffer.
-	OldestEntryTime time.Time
-}
-
-func (t *Table) toBQ() *bq.TableReference {
-	return &bq.TableReference{
-		ProjectId: t.ProjectID,
-		DatasetId: t.DatasetID,
-		TableId:   t.TableID,
-	}
-}
-
-// FullyQualifiedName returns the ID of the table in projectID:datasetID.tableID format.
-func (t *Table) FullyQualifiedName() string {
-	return fmt.Sprintf("%s:%s.%s", t.ProjectID, t.DatasetID, t.TableID)
-}
-
-// implicitTable reports whether Table is an empty placeholder, which signifies that a new table should be created with an auto-generated Table ID.
-func (t *Table) implicitTable() bool {
-	return t.ProjectID == "" && t.DatasetID == "" && t.TableID == ""
-}
-
-// Create creates a table in the BigQuery service.
-// Pass in a TableMetadata value to configure the table.
-// If tm.View.Query is non-empty, the created table will be of type VIEW.
-// Expiration can only be set during table creation.
-// After table creation, a view can be modified only if its table was initially created
-// with a view.
-func (t *Table) Create(ctx context.Context, tm *TableMetadata) error {
-	table, err := tm.toBQ()
-	if err != nil {
-		return err
-	}
-	table.TableReference = &bq.TableReference{
-		ProjectId: t.ProjectID,
-		DatasetId: t.DatasetID,
-		TableId:   t.TableID,
-	}
-	req := t.c.bqs.Tables.Insert(t.ProjectID, t.DatasetID, table).Context(ctx)
-	setClientHeader(req.Header())
-	_, err = req.Do()
-	return err
-}
-
-func (tm *TableMetadata) toBQ() (*bq.Table, error) {
-	t := &bq.Table{}
-	if tm == nil {
-		return t, nil
-	}
-	if tm.Schema != nil && tm.ViewQuery != "" {
-		return nil, errors.New("bigquery: provide Schema or ViewQuery, not both")
-	}
-	t.FriendlyName = tm.Name
-	t.Description = tm.Description
-	t.Labels = tm.Labels
-	if tm.Schema != nil {
-		t.Schema = tm.Schema.toBQ()
-	}
-	if tm.ViewQuery != "" {
-		if tm.UseStandardSQL && tm.UseLegacySQL {
-			return nil, errors.New("bigquery: cannot provide both UseStandardSQL and UseLegacySQL")
-		}
-		t.View = &bq.ViewDefinition{Query: tm.ViewQuery}
-		if tm.UseLegacySQL {
-			t.View.UseLegacySql = true
-		} else {
-			t.View.UseLegacySql = false
-			t.View.ForceSendFields = append(t.View.ForceSendFields, "UseLegacySql")
-		}
-	} else if tm.UseLegacySQL || tm.UseStandardSQL {
-		return nil, errors.New("bigquery: UseLegacy/StandardSQL requires ViewQuery")
-	}
-	t.TimePartitioning = tm.TimePartitioning.toBQ()
-	if !tm.ExpirationTime.IsZero() {
-		t.ExpirationTime = tm.ExpirationTime.UnixNano() / 1e6
-	}
-	if tm.ExternalDataConfig != nil {
-		edc := tm.ExternalDataConfig.toBQ()
-		t.ExternalDataConfiguration = &edc
-	}
-	if tm.FullID != "" {
-		return nil, errors.New("cannot set FullID on create")
-	}
-	if tm.Type != "" {
-		return nil, errors.New("cannot set Type on create")
-	}
-	if !tm.CreationTime.IsZero() {
-		return nil, errors.New("cannot set CreationTime on create")
-	}
-	if !tm.LastModifiedTime.IsZero() {
-		return nil, errors.New("cannot set LastModifiedTime on create")
-	}
-	if tm.NumBytes != 0 {
-		return nil, errors.New("cannot set NumBytes on create")
-	}
-	if tm.NumRows != 0 {
-		return nil, errors.New("cannot set NumRows on create")
-	}
-	if tm.StreamingBuffer != nil {
-		return nil, errors.New("cannot set StreamingBuffer on create")
-	}
-	if tm.ETag != "" {
-		return nil, errors.New("cannot set ETag on create")
-	}
-	return t, nil
-}
-
-// Metadata fetches the metadata for the table.
-func (t *Table) Metadata(ctx context.Context) (*TableMetadata, error) {
-	req := t.c.bqs.Tables.Get(t.ProjectID, t.DatasetID, t.TableID).Context(ctx)
-	setClientHeader(req.Header())
-	var table *bq.Table
-	err := runWithRetry(ctx, func() (err error) {
-		table, err = req.Do()
-		return err
-	})
-	if err != nil {
-		return nil, err
-	}
-	return bqToTableMetadata(table)
-}
-
-func bqToTableMetadata(t *bq.Table) (*TableMetadata, error) {
-	md := &TableMetadata{
-		Description:      t.Description,
-		Name:             t.FriendlyName,
-		Type:             TableType(t.Type),
-		FullID:           t.Id,
-		Labels:           t.Labels,
-		NumBytes:         t.NumBytes,
-		NumRows:          t.NumRows,
-		ExpirationTime:   unixMillisToTime(t.ExpirationTime),
-		CreationTime:     unixMillisToTime(t.CreationTime),
-		LastModifiedTime: unixMillisToTime(int64(t.LastModifiedTime)),
-		ETag:             t.Etag,
-	}
-	if t.Schema != nil {
-		md.Schema = bqToSchema(t.Schema)
-	}
-	if t.View != nil {
-		md.ViewQuery = t.View.Query
-		md.UseLegacySQL = t.View.UseLegacySql
-	}
-	md.TimePartitioning = bqToTimePartitioning(t.TimePartitioning)
-	if t.StreamingBuffer != nil {
-		md.StreamingBuffer = &StreamingBuffer{
-			EstimatedBytes:  t.StreamingBuffer.EstimatedBytes,
-			EstimatedRows:   t.StreamingBuffer.EstimatedRows,
-			OldestEntryTime: unixMillisToTime(int64(t.StreamingBuffer.OldestEntryTime)),
-		}
-	}
-	if t.ExternalDataConfiguration != nil {
-		edc, err := bqToExternalDataConfig(t.ExternalDataConfiguration)
-		if err != nil {
-			return nil, err
-		}
-		md.ExternalDataConfig = edc
-	}
-	return md, nil
-}
-
-// Delete deletes the table.
-func (t *Table) Delete(ctx context.Context) error {
-	req := t.c.bqs.Tables.Delete(t.ProjectID, t.DatasetID, t.TableID).Context(ctx)
-	setClientHeader(req.Header())
-	return req.Do()
-}
-
-// Read fetches the contents of the table.
-func (t *Table) Read(ctx context.Context) *RowIterator {
-	return t.read(ctx, fetchPage)
-}
-
-func (t *Table) read(ctx context.Context, pf pageFetcher) *RowIterator {
-	return newRowIterator(ctx, t, pf)
-}
-
-// Update modifies specific Table metadata fields.
-func (t *Table) Update(ctx context.Context, tm TableMetadataToUpdate, etag string) (*TableMetadata, error) {
-	bqt := tm.toBQ()
-	call := t.c.bqs.Tables.Patch(t.ProjectID, t.DatasetID, t.TableID, bqt).Context(ctx)
-	setClientHeader(call.Header())
-	if etag != "" {
-		call.Header().Set("If-Match", etag)
-	}
-	var res *bq.Table
-	if err := runWithRetry(ctx, func() (err error) {
-		res, err = call.Do()
-		return err
-	}); err != nil {
-		return nil, err
-	}
-	return bqToTableMetadata(res)
-}
-
-func (tm *TableMetadataToUpdate) toBQ() *bq.Table {
-	t := &bq.Table{}
-	forceSend := func(field string) {
-		t.ForceSendFields = append(t.ForceSendFields, field)
-	}
-
-	if tm.Description != nil {
-		t.Description = optional.ToString(tm.Description)
-		forceSend("Description")
-	}
-	if tm.Name != nil {
-		t.FriendlyName = optional.ToString(tm.Name)
-		forceSend("FriendlyName")
-	}
-	if tm.Schema != nil {
-		t.Schema = tm.Schema.toBQ()
-		forceSend("Schema")
-	}
-	if !tm.ExpirationTime.IsZero() {
-		t.ExpirationTime = tm.ExpirationTime.UnixNano() / 1e6
-		forceSend("ExpirationTime")
-	}
-	if tm.ViewQuery != nil {
-		t.View = &bq.ViewDefinition{
-			Query:           optional.ToString(tm.ViewQuery),
-			ForceSendFields: []string{"Query"},
-		}
-	}
-	if tm.UseLegacySQL != nil {
-		if t.View == nil {
-			t.View = &bq.ViewDefinition{}
-		}
-		t.View.UseLegacySql = optional.ToBool(tm.UseLegacySQL)
-		t.View.ForceSendFields = append(t.View.ForceSendFields, "UseLegacySql")
-	}
-	labels, forces, nulls := tm.update()
-	t.Labels = labels
-	t.ForceSendFields = append(t.ForceSendFields, forces...)
-	t.NullFields = append(t.NullFields, nulls...)
-	return t
-}
-
-// TableMetadataToUpdate is used when updating a table's metadata.
-// Only non-nil fields will be updated.
-type TableMetadataToUpdate struct {
-	// The user-friendly description of this table.
-	Description optional.String
-
-	// The user-friendly name for this table.
-	Name optional.String
-
-	// The table's schema.
-	// When updating a schema, you can add columns but not remove them.
-	Schema Schema
-
-	// The time when this table expires.
-	ExpirationTime time.Time
-
-	// The query to use for a view.
-	ViewQuery optional.String
-
-	// Use Legacy SQL for the view query.
-	UseLegacySQL optional.Bool
-
-	labelUpdater
-}
-
-// labelUpdater contains common code for updating labels.
-type labelUpdater struct {
-	setLabels    map[string]string
-	deleteLabels map[string]bool
-}
-
-// SetLabel causes a label to be added or modified on a call to Update.
-func (u *labelUpdater) SetLabel(name, value string) {
-	if u.setLabels == nil {
-		u.setLabels = map[string]string{}
-	}
-	u.setLabels[name] = value
-}
-
-// DeleteLabel causes a label to be deleted on a call to Update.
-func (u *labelUpdater) DeleteLabel(name string) {
-	if u.deleteLabels == nil {
-		u.deleteLabels = map[string]bool{}
-	}
-	u.deleteLabels[name] = true
-}
-
-func (u *labelUpdater) update() (labels map[string]string, forces, nulls []string) {
-	if u.setLabels == nil && u.deleteLabels == nil {
-		return nil, nil, nil
-	}
-	labels = map[string]string{}
-	for k, v := range u.setLabels {
-		labels[k] = v
-	}
-	if len(labels) == 0 && len(u.deleteLabels) > 0 {
-		forces = []string{"Labels"}
-	}
-	for l := range u.deleteLabels {
-		nulls = append(nulls, "Labels."+l)
-	}
-	return labels, forces, nulls
-}

+ 0 - 224
vendor/cloud.google.com/go/bigquery/uploader.go

@@ -1,224 +0,0 @@
-// Copyright 2015 Google Inc. All Rights Reserved.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//      http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package bigquery
-
-import (
-	"errors"
-	"fmt"
-	"reflect"
-
-	"golang.org/x/net/context"
-	bq "google.golang.org/api/bigquery/v2"
-)
-
-// An Uploader does streaming inserts into a BigQuery table.
-// It is safe for concurrent use.
-type Uploader struct {
-	t *Table
-
-	// SkipInvalidRows causes rows containing invalid data to be silently
-	// ignored. The default value is false, which causes the entire request to
-	// fail if there is an attempt to insert an invalid row.
-	SkipInvalidRows bool
-
-	// IgnoreUnknownValues causes values not matching the schema to be ignored.
-	// The default value is false, which causes records containing such values
-	// to be treated as invalid records.
-	IgnoreUnknownValues bool
-
-	// A TableTemplateSuffix allows Uploaders to create tables automatically.
-	//
-	// Experimental: this option is experimental and may be modified or removed in future versions,
-	// regardless of any other documented package stability guarantees.
-	//
-	// When you specify a suffix, the table you upload data to
-	// will be used as a template for creating a new table, with the same schema,
-	// called <table> + <suffix>.
-	//
-	// More information is available at
-	// https://cloud.google.com/bigquery/streaming-data-into-bigquery#template-tables
-	TableTemplateSuffix string
-}
-
-// Uploader returns an Uploader that can be used to append rows to t.
-// The returned Uploader may optionally be further configured before its Put method is called.
-func (t *Table) Uploader() *Uploader {
-	return &Uploader{t: t}
-}
-
-// Put uploads one or more rows to the BigQuery service.
-//
-// If src is ValueSaver, then its Save method is called to produce a row for uploading.
-//
-// If src is a struct or pointer to a struct, then a schema is inferred from it
-// and used to create a StructSaver. The InsertID of the StructSaver will be
-// empty.
-//
-// If src is a slice of ValueSavers, structs, or struct pointers, then each
-// element of the slice is treated as above, and multiple rows are uploaded.
-//
-// Put returns a PutMultiError if one or more rows failed to be uploaded.
-// The PutMultiError contains a RowInsertionError for each failed row.
-//
-// Put will retry on temporary errors (see
-// https://cloud.google.com/bigquery/troubleshooting-errors). This can result
-// in duplicate rows if you do not use insert IDs. Also, if the error persists,
-// the call will run indefinitely. Pass a context with a timeout to prevent
-// hanging calls.
-func (u *Uploader) Put(ctx context.Context, src interface{}) error {
-	savers, err := valueSavers(src)
-	if err != nil {
-		return err
-	}
-	return u.putMulti(ctx, savers)
-}
-
-func valueSavers(src interface{}) ([]ValueSaver, error) {
-	saver, ok, err := toValueSaver(src)
-	if err != nil {
-		return nil, err
-	}
-	if ok {
-		return []ValueSaver{saver}, nil
-	}
-	srcVal := reflect.ValueOf(src)
-	if srcVal.Kind() != reflect.Slice {
-		return nil, fmt.Errorf("%T is not a ValueSaver, struct, struct pointer, or slice", src)
-
-	}
-	var savers []ValueSaver
-	for i := 0; i < srcVal.Len(); i++ {
-		s := srcVal.Index(i).Interface()
-		saver, ok, err := toValueSaver(s)
-		if err != nil {
-			return nil, err
-		}
-		if !ok {
-			return nil, fmt.Errorf("src[%d] has type %T, which is not a ValueSaver, struct or struct pointer", i, s)
-		}
-		savers = append(savers, saver)
-	}
-	return savers, nil
-}
-
-// Make a ValueSaver from x, which must implement ValueSaver already
-// or be a struct or pointer to struct.
-func toValueSaver(x interface{}) (ValueSaver, bool, error) {
-	if _, ok := x.(StructSaver); ok {
-		return nil, false, errors.New("bigquery: use &StructSaver, not StructSaver")
-	}
-	var insertID string
-	// Handle StructSavers specially so we can infer the schema if necessary.
-	if ss, ok := x.(*StructSaver); ok && ss.Schema == nil {
-		x = ss.Struct
-		insertID = ss.InsertID
-		// Fall through so we can infer the schema.
-	}
-	if saver, ok := x.(ValueSaver); ok {
-		return saver, ok, nil
-	}
-	v := reflect.ValueOf(x)
-	// Support Put with []interface{}
-	if v.Kind() == reflect.Interface {
-		v = v.Elem()
-	}
-	if v.Kind() == reflect.Ptr {
-		v = v.Elem()
-	}
-	if v.Kind() != reflect.Struct {
-		return nil, false, nil
-	}
-	schema, err := inferSchemaReflectCached(v.Type())
-	if err != nil {
-		return nil, false, err
-	}
-	return &StructSaver{
-		Struct:   x,
-		InsertID: insertID,
-		Schema:   schema,
-	}, true, nil
-}
-
-func (u *Uploader) putMulti(ctx context.Context, src []ValueSaver) error {
-	req, err := u.newInsertRequest(src)
-	if err != nil {
-		return err
-	}
-	if req == nil {
-		return nil
-	}
-	call := u.t.c.bqs.Tabledata.InsertAll(u.t.ProjectID, u.t.DatasetID, u.t.TableID, req)
-	call = call.Context(ctx)
-	setClientHeader(call.Header())
-	var res *bq.TableDataInsertAllResponse
-	err = runWithRetry(ctx, func() (err error) {
-		res, err = call.Do()
-		return err
-	})
-	if err != nil {
-		return err
-	}
-	return handleInsertErrors(res.InsertErrors, req.Rows)
-}
-
-func (u *Uploader) newInsertRequest(savers []ValueSaver) (*bq.TableDataInsertAllRequest, error) {
-	if savers == nil { // If there are no rows, do nothing.
-		return nil, nil
-	}
-	req := &bq.TableDataInsertAllRequest{
-		TemplateSuffix:      u.TableTemplateSuffix,
-		IgnoreUnknownValues: u.IgnoreUnknownValues,
-		SkipInvalidRows:     u.SkipInvalidRows,
-	}
-	for _, saver := range savers {
-		row, insertID, err := saver.Save()
-		if err != nil {
-			return nil, err
-		}
-		if insertID == "" {
-			insertID = randomIDFn()
-		}
-		m := make(map[string]bq.JsonValue)
-		for k, v := range row {
-			m[k] = bq.JsonValue(v)
-		}
-		req.Rows = append(req.Rows, &bq.TableDataInsertAllRequestRows{
-			InsertId: insertID,
-			Json:     m,
-		})
-	}
-	return req, nil
-}
-
-func handleInsertErrors(ierrs []*bq.TableDataInsertAllResponseInsertErrors, rows []*bq.TableDataInsertAllRequestRows) error {
-	if len(ierrs) == 0 {
-		return nil
-	}
-	var errs PutMultiError
-	for _, e := range ierrs {
-		if int(e.Index) > len(rows) {
-			return fmt.Errorf("internal error: unexpected row index: %v", e.Index)
-		}
-		rie := RowInsertionError{
-			InsertID: rows[e.Index].InsertId,
-			RowIndex: int(e.Index),
-		}
-		for _, errp := range e.Errors {
-			rie.Errors = append(rie.Errors, bqToError(errp))
-		}
-		errs = append(errs, rie)
-	}
-	return errs
-}

+ 0 - 718
vendor/cloud.google.com/go/bigquery/value.go

@@ -1,718 +0,0 @@
-// Copyright 2015 Google Inc. All Rights Reserved.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//      http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package bigquery
-
-import (
-	"encoding/base64"
-	"errors"
-	"fmt"
-	"reflect"
-	"strconv"
-	"time"
-
-	"cloud.google.com/go/civil"
-
-	bq "google.golang.org/api/bigquery/v2"
-)
-
-// Value stores the contents of a single cell from a BigQuery result.
-type Value interface{}
-
-// ValueLoader stores a slice of Values representing a result row from a Read operation.
-// See RowIterator.Next for more information.
-type ValueLoader interface {
-	Load(v []Value, s Schema) error
-}
-
-// valueList converts a []Value to implement ValueLoader.
-type valueList []Value
-
-// Load stores a sequence of values in a valueList.
-// It resets the slice length to zero, then appends each value to it.
-func (vs *valueList) Load(v []Value, _ Schema) error {
-	*vs = append((*vs)[:0], v...)
-	return nil
-}
-
-// valueMap converts a map[string]Value to implement ValueLoader.
-type valueMap map[string]Value
-
-// Load stores a sequence of values in a valueMap.
-func (vm *valueMap) Load(v []Value, s Schema) error {
-	if *vm == nil {
-		*vm = map[string]Value{}
-	}
-	loadMap(*vm, v, s)
-	return nil
-}
-
-func loadMap(m map[string]Value, vals []Value, s Schema) {
-	for i, f := range s {
-		val := vals[i]
-		var v interface{}
-		switch {
-		case f.Schema == nil:
-			v = val
-		case !f.Repeated:
-			m2 := map[string]Value{}
-			loadMap(m2, val.([]Value), f.Schema)
-			v = m2
-		default: // repeated and nested
-			sval := val.([]Value)
-			vs := make([]Value, len(sval))
-			for j, e := range sval {
-				m2 := map[string]Value{}
-				loadMap(m2, e.([]Value), f.Schema)
-				vs[j] = m2
-			}
-			v = vs
-		}
-		m[f.Name] = v
-	}
-}
-
-type structLoader struct {
-	typ reflect.Type // type of struct
-	err error
-	ops []structLoaderOp
-
-	vstructp reflect.Value // pointer to current struct value; changed by set
-}
-
-// A setFunc is a function that sets a struct field or slice/array
-// element to a value.
-type setFunc func(v reflect.Value, val interface{}) error
-
-// A structLoaderOp instructs the loader to set a struct field to a row value.
-type structLoaderOp struct {
-	fieldIndex []int
-	valueIndex int
-	setFunc    setFunc
-	repeated   bool
-}
-
-var errNoNulls = errors.New("bigquery: NULL values cannot be read into structs")
-
-func setAny(v reflect.Value, x interface{}) error {
-	if x == nil {
-		return errNoNulls
-	}
-	v.Set(reflect.ValueOf(x))
-	return nil
-}
-
-func setInt(v reflect.Value, x interface{}) error {
-	if x == nil {
-		return errNoNulls
-	}
-	xx := x.(int64)
-	if v.OverflowInt(xx) {
-		return fmt.Errorf("bigquery: value %v overflows struct field of type %v", xx, v.Type())
-	}
-	v.SetInt(xx)
-	return nil
-}
-
-func setFloat(v reflect.Value, x interface{}) error {
-	if x == nil {
-		return errNoNulls
-	}
-	xx := x.(float64)
-	if v.OverflowFloat(xx) {
-		return fmt.Errorf("bigquery: value %v overflows struct field of type %v", xx, v.Type())
-	}
-	v.SetFloat(xx)
-	return nil
-}
-
-func setBool(v reflect.Value, x interface{}) error {
-	if x == nil {
-		return errNoNulls
-	}
-	v.SetBool(x.(bool))
-	return nil
-}
-
-func setString(v reflect.Value, x interface{}) error {
-	if x == nil {
-		return errNoNulls
-	}
-	v.SetString(x.(string))
-	return nil
-}
-
-func setBytes(v reflect.Value, x interface{}) error {
-	if x == nil {
-		return errNoNulls
-	}
-	v.SetBytes(x.([]byte))
-	return nil
-}
-
-// set remembers a value for the next call to Load. The value must be
-// a pointer to a struct. (This is checked in RowIterator.Next.)
-func (sl *structLoader) set(structp interface{}, schema Schema) error {
-	if sl.err != nil {
-		return sl.err
-	}
-	sl.vstructp = reflect.ValueOf(structp)
-	typ := sl.vstructp.Type().Elem()
-	if sl.typ == nil {
-		// First call: remember the type and compile the schema.
-		sl.typ = typ
-		ops, err := compileToOps(typ, schema)
-		if err != nil {
-			sl.err = err
-			return err
-		}
-		sl.ops = ops
-	} else if sl.typ != typ {
-		return fmt.Errorf("bigquery: struct type changed from %s to %s", sl.typ, typ)
-	}
-	return nil
-}
-
-// compileToOps produces a sequence of operations that will set the fields of a
-// value of structType to the contents of a row with schema.
-func compileToOps(structType reflect.Type, schema Schema) ([]structLoaderOp, error) {
-	var ops []structLoaderOp
-	fields, err := fieldCache.Fields(structType)
-	if err != nil {
-		return nil, err
-	}
-	for i, schemaField := range schema {
-		// Look for an exported struct field with the same name as the schema
-		// field, ignoring case (BigQuery column names are case-insensitive,
-		// and we want to act like encoding/json anyway).
-		structField := fields.Match(schemaField.Name)
-		if structField == nil {
-			// Ignore schema fields with no corresponding struct field.
-			continue
-		}
-		op := structLoaderOp{
-			fieldIndex: structField.Index,
-			valueIndex: i,
-		}
-		t := structField.Type
-		if schemaField.Repeated {
-			if t.Kind() != reflect.Slice && t.Kind() != reflect.Array {
-				return nil, fmt.Errorf("bigquery: repeated schema field %s requires slice or array, but struct field %s has type %s",
-					schemaField.Name, structField.Name, t)
-			}
-			t = t.Elem()
-			op.repeated = true
-		}
-		if schemaField.Type == RecordFieldType {
-			// Field can be a struct or a pointer to a struct.
-			if t.Kind() == reflect.Ptr {
-				t = t.Elem()
-			}
-			if t.Kind() != reflect.Struct {
-				return nil, fmt.Errorf("bigquery: field %s has type %s, expected struct or *struct",
-					structField.Name, structField.Type)
-			}
-			nested, err := compileToOps(t, schemaField.Schema)
-			if err != nil {
-				return nil, err
-			}
-			op.setFunc = func(v reflect.Value, val interface{}) error {
-				return setNested(nested, v, val.([]Value))
-			}
-		} else {
-			op.setFunc = determineSetFunc(t, schemaField.Type)
-			if op.setFunc == nil {
-				return nil, fmt.Errorf("bigquery: schema field %s of type %s is not assignable to struct field %s of type %s",
-					schemaField.Name, schemaField.Type, structField.Name, t)
-			}
-		}
-		ops = append(ops, op)
-	}
-	return ops, nil
-}
-
-// determineSetFunc chooses the best function for setting a field of type ftype
-// to a value whose schema field type is stype. It returns nil if stype
-// is not assignable to ftype.
-// determineSetFunc considers only basic types. See compileToOps for
-// handling of repetition and nesting.
-func determineSetFunc(ftype reflect.Type, stype FieldType) setFunc {
-	switch stype {
-	case StringFieldType:
-		if ftype.Kind() == reflect.String {
-			return setString
-		}
-
-	case BytesFieldType:
-		if ftype == typeOfByteSlice {
-			return setBytes
-		}
-
-	case IntegerFieldType:
-		if isSupportedIntType(ftype) {
-			return setInt
-		}
-
-	case FloatFieldType:
-		switch ftype.Kind() {
-		case reflect.Float32, reflect.Float64:
-			return setFloat
-		}
-
-	case BooleanFieldType:
-		if ftype.Kind() == reflect.Bool {
-			return setBool
-		}
-
-	case TimestampFieldType:
-		if ftype == typeOfGoTime {
-			return setAny
-		}
-
-	case DateFieldType:
-		if ftype == typeOfDate {
-			return setAny
-		}
-
-	case TimeFieldType:
-		if ftype == typeOfTime {
-			return setAny
-		}
-
-	case DateTimeFieldType:
-		if ftype == typeOfDateTime {
-			return setAny
-		}
-	}
-	return nil
-}
-
-func (sl *structLoader) Load(values []Value, _ Schema) error {
-	if sl.err != nil {
-		return sl.err
-	}
-	return runOps(sl.ops, sl.vstructp.Elem(), values)
-}
-
-// runOps executes a sequence of ops, setting the fields of vstruct to the
-// supplied values.
-func runOps(ops []structLoaderOp, vstruct reflect.Value, values []Value) error {
-	for _, op := range ops {
-		field := vstruct.FieldByIndex(op.fieldIndex)
-		var err error
-		if op.repeated {
-			err = setRepeated(field, values[op.valueIndex].([]Value), op.setFunc)
-		} else {
-			err = op.setFunc(field, values[op.valueIndex])
-		}
-		if err != nil {
-			return err
-		}
-	}
-	return nil
-}
-
-func setNested(ops []structLoaderOp, v reflect.Value, vals []Value) error {
-	// v is either a struct or a pointer to a struct.
-	if v.Kind() == reflect.Ptr {
-		// If the pointer is nil, set it to a zero struct value.
-		if v.IsNil() {
-			v.Set(reflect.New(v.Type().Elem()))
-		}
-		v = v.Elem()
-	}
-	return runOps(ops, v, vals)
-}
-
-func setRepeated(field reflect.Value, vslice []Value, setElem setFunc) error {
-	vlen := len(vslice)
-	var flen int
-	switch field.Type().Kind() {
-	case reflect.Slice:
-		// Make a slice of the right size, avoiding allocation if possible.
-		switch {
-		case field.Len() < vlen:
-			field.Set(reflect.MakeSlice(field.Type(), vlen, vlen))
-		case field.Len() > vlen:
-			field.SetLen(vlen)
-		}
-		flen = vlen
-
-	case reflect.Array:
-		flen = field.Len()
-		if flen > vlen {
-			// Set extra elements to their zero value.
-			z := reflect.Zero(field.Type().Elem())
-			for i := vlen; i < flen; i++ {
-				field.Index(i).Set(z)
-			}
-		}
-	default:
-		return fmt.Errorf("bigquery: impossible field type %s", field.Type())
-	}
-	for i, val := range vslice {
-		if i < flen { // avoid writing past the end of a short array
-			if err := setElem(field.Index(i), val); err != nil {
-				return err
-			}
-		}
-	}
-	return nil
-}
-
-// A ValueSaver returns a row of data to be inserted into a table.
-type ValueSaver interface {
-	// Save returns a row to be inserted into a BigQuery table, represented
-	// as a map from field name to Value.
-	// If insertID is non-empty, BigQuery will use it to de-duplicate
-	// insertions of this row on a best-effort basis.
-	Save() (row map[string]Value, insertID string, err error)
-}
-
-// ValuesSaver implements ValueSaver for a slice of Values.
-type ValuesSaver struct {
-	Schema Schema
-
-	// If non-empty, BigQuery will use InsertID to de-duplicate insertions
-	// of this row on a best-effort basis.
-	InsertID string
-
-	Row []Value
-}
-
-// Save implements ValueSaver.
-func (vls *ValuesSaver) Save() (map[string]Value, string, error) {
-	m, err := valuesToMap(vls.Row, vls.Schema)
-	return m, vls.InsertID, err
-}
-
-func valuesToMap(vs []Value, schema Schema) (map[string]Value, error) {
-	if len(vs) != len(schema) {
-		return nil, errors.New("Schema does not match length of row to be inserted")
-	}
-
-	m := make(map[string]Value)
-	for i, fieldSchema := range schema {
-		if fieldSchema.Type != RecordFieldType {
-			m[fieldSchema.Name] = toUploadValue(vs[i], fieldSchema)
-			continue
-		}
-		// Nested record, possibly repeated.
-		vals, ok := vs[i].([]Value)
-		if !ok {
-			return nil, errors.New("nested record is not a []Value")
-		}
-		if !fieldSchema.Repeated {
-			value, err := valuesToMap(vals, fieldSchema.Schema)
-			if err != nil {
-				return nil, err
-			}
-			m[fieldSchema.Name] = value
-			continue
-		}
-		// A repeated nested field is converted into a slice of maps.
-		var maps []Value
-		for _, v := range vals {
-			sv, ok := v.([]Value)
-			if !ok {
-				return nil, errors.New("nested record in slice is not a []Value")
-			}
-			value, err := valuesToMap(sv, fieldSchema.Schema)
-			if err != nil {
-				return nil, err
-			}
-			maps = append(maps, value)
-		}
-		m[fieldSchema.Name] = maps
-	}
-	return m, nil
-}
-
-// StructSaver implements ValueSaver for a struct.
-// The struct is converted to a map of values by using the values of struct
-// fields corresponding to schema fields. Additional and missing
-// fields are ignored, as are nested struct pointers that are nil.
-type StructSaver struct {
-	// Schema determines what fields of the struct are uploaded. It should
-	// match the table's schema.
-	Schema Schema
-
-	// If non-empty, BigQuery will use InsertID to de-duplicate insertions
-	// of this row on a best-effort basis.
-	InsertID string
-
-	// Struct should be a struct or a pointer to a struct.
-	Struct interface{}
-}
-
-// Save implements ValueSaver.
-func (ss *StructSaver) Save() (row map[string]Value, insertID string, err error) {
-	vstruct := reflect.ValueOf(ss.Struct)
-	row, err = structToMap(vstruct, ss.Schema)
-	if err != nil {
-		return nil, "", err
-	}
-	return row, ss.InsertID, nil
-}
-
-func structToMap(vstruct reflect.Value, schema Schema) (map[string]Value, error) {
-	if vstruct.Kind() == reflect.Ptr {
-		vstruct = vstruct.Elem()
-	}
-	if !vstruct.IsValid() {
-		return nil, nil
-	}
-	m := map[string]Value{}
-	if vstruct.Kind() != reflect.Struct {
-		return nil, fmt.Errorf("bigquery: type is %s, need struct or struct pointer", vstruct.Type())
-	}
-	fields, err := fieldCache.Fields(vstruct.Type())
-	if err != nil {
-		return nil, err
-	}
-	for _, schemaField := range schema {
-		// Look for an exported struct field with the same name as the schema
-		// field, ignoring case.
-		structField := fields.Match(schemaField.Name)
-		if structField == nil {
-			continue
-		}
-		val, err := structFieldToUploadValue(vstruct.FieldByIndex(structField.Index), schemaField)
-		if err != nil {
-			return nil, err
-		}
-		// Add the value to the map, unless it is nil.
-		if val != nil {
-			m[schemaField.Name] = val
-		}
-	}
-	return m, nil
-}
-
-// structFieldToUploadValue converts a struct field to a value suitable for ValueSaver.Save, using
-// the schemaField as a guide.
-// structFieldToUploadValue is careful to return a true nil interface{} when needed, so its
-// caller can easily identify a nil value.
-func structFieldToUploadValue(vfield reflect.Value, schemaField *FieldSchema) (interface{}, error) {
-	if schemaField.Repeated && (vfield.Kind() != reflect.Slice && vfield.Kind() != reflect.Array) {
-		return nil, fmt.Errorf("bigquery: repeated schema field %s requires slice or array, but value has type %s",
-			schemaField.Name, vfield.Type())
-	}
-
-	// A non-nested field can be represented by its Go value, except for civil times.
-	if schemaField.Type != RecordFieldType {
-		return toUploadValueReflect(vfield, schemaField), nil
-	}
-	// A non-repeated nested field is converted into a map[string]Value.
-	if !schemaField.Repeated {
-		m, err := structToMap(vfield, schemaField.Schema)
-		if err != nil {
-			return nil, err
-		}
-		if m == nil {
-			return nil, nil
-		}
-		return m, nil
-	}
-	// A repeated nested field is converted into a slice of maps.
-	if vfield.Len() == 0 {
-		return nil, nil
-	}
-	var vals []Value
-	for i := 0; i < vfield.Len(); i++ {
-		m, err := structToMap(vfield.Index(i), schemaField.Schema)
-		if err != nil {
-			return nil, err
-		}
-		vals = append(vals, m)
-	}
-	return vals, nil
-}
-
-func toUploadValue(val interface{}, fs *FieldSchema) interface{} {
-	if fs.Type == TimeFieldType || fs.Type == DateTimeFieldType {
-		return toUploadValueReflect(reflect.ValueOf(val), fs)
-	}
-	return val
-}
-
-func toUploadValueReflect(v reflect.Value, fs *FieldSchema) interface{} {
-	switch fs.Type {
-	case TimeFieldType:
-		return civilToUploadValue(v, fs, func(v reflect.Value) string {
-			return CivilTimeString(v.Interface().(civil.Time))
-		})
-	case DateTimeFieldType:
-		return civilToUploadValue(v, fs, func(v reflect.Value) string {
-			return CivilDateTimeString(v.Interface().(civil.DateTime))
-		})
-	default:
-		if !fs.Repeated || v.Len() > 0 {
-			return v.Interface()
-		}
-		// The service treats a null repeated field as an error. Return
-		// nil to omit the field entirely.
-		return nil
-	}
-}
-
-func civilToUploadValue(v reflect.Value, fs *FieldSchema, cvt func(reflect.Value) string) interface{} {
-	if !fs.Repeated {
-		return cvt(v)
-	}
-	if v.Len() == 0 {
-		return nil
-	}
-	s := make([]string, v.Len())
-	for i := 0; i < v.Len(); i++ {
-		s[i] = cvt(v.Index(i))
-	}
-	return s
-}
-
-// CivilTimeString returns a string representing a civil.Time in a format compatible
-// with BigQuery SQL. It rounds the time to the nearest microsecond and returns a
-// string with six digits of sub-second precision.
-//
-// Use CivilTimeString when using civil.Time in DML, for example in INSERT
-// statements.
-func CivilTimeString(t civil.Time) string {
-	if t.Nanosecond == 0 {
-		return t.String()
-	} else {
-		micro := (t.Nanosecond + 500) / 1000 // round to nearest microsecond
-		t.Nanosecond = 0
-		return t.String() + fmt.Sprintf(".%06d", micro)
-	}
-}
-
-// CivilDateTimeString returns a string representing a civil.DateTime in a format compatible
-// with BigQuery SQL. It separate the date and time with a space, and formats the time
-// with CivilTimeString.
-//
-// Use CivilDateTimeString when using civil.DateTime in DML, for example in INSERT
-// statements.
-func CivilDateTimeString(dt civil.DateTime) string {
-	return dt.Date.String() + " " + CivilTimeString(dt.Time)
-}
-
-// convertRows converts a series of TableRows into a series of Value slices.
-// schema is used to interpret the data from rows; its length must match the
-// length of each row.
-func convertRows(rows []*bq.TableRow, schema Schema) ([][]Value, error) {
-	var rs [][]Value
-	for _, r := range rows {
-		row, err := convertRow(r, schema)
-		if err != nil {
-			return nil, err
-		}
-		rs = append(rs, row)
-	}
-	return rs, nil
-}
-
-func convertRow(r *bq.TableRow, schema Schema) ([]Value, error) {
-	if len(schema) != len(r.F) {
-		return nil, errors.New("schema length does not match row length")
-	}
-	var values []Value
-	for i, cell := range r.F {
-		fs := schema[i]
-		v, err := convertValue(cell.V, fs.Type, fs.Schema)
-		if err != nil {
-			return nil, err
-		}
-		values = append(values, v)
-	}
-	return values, nil
-}
-
-func convertValue(val interface{}, typ FieldType, schema Schema) (Value, error) {
-	switch val := val.(type) {
-	case nil:
-		return nil, nil
-	case []interface{}:
-		return convertRepeatedRecord(val, typ, schema)
-	case map[string]interface{}:
-		return convertNestedRecord(val, schema)
-	case string:
-		return convertBasicType(val, typ)
-	default:
-		return nil, fmt.Errorf("got value %v; expected a value of type %s", val, typ)
-	}
-}
-
-func convertRepeatedRecord(vals []interface{}, typ FieldType, schema Schema) (Value, error) {
-	var values []Value
-	for _, cell := range vals {
-		// each cell contains a single entry, keyed by "v"
-		val := cell.(map[string]interface{})["v"]
-		v, err := convertValue(val, typ, schema)
-		if err != nil {
-			return nil, err
-		}
-		values = append(values, v)
-	}
-	return values, nil
-}
-
-func convertNestedRecord(val map[string]interface{}, schema Schema) (Value, error) {
-	// convertNestedRecord is similar to convertRow, as a record has the same structure as a row.
-
-	// Nested records are wrapped in a map with a single key, "f".
-	record := val["f"].([]interface{})
-	if len(record) != len(schema) {
-		return nil, errors.New("schema length does not match record length")
-	}
-
-	var values []Value
-	for i, cell := range record {
-		// each cell contains a single entry, keyed by "v"
-		val := cell.(map[string]interface{})["v"]
-		fs := schema[i]
-		v, err := convertValue(val, fs.Type, fs.Schema)
-		if err != nil {
-			return nil, err
-		}
-		values = append(values, v)
-	}
-	return values, nil
-}
-
-// convertBasicType returns val as an interface with a concrete type specified by typ.
-func convertBasicType(val string, typ FieldType) (Value, error) {
-	switch typ {
-	case StringFieldType:
-		return val, nil
-	case BytesFieldType:
-		return base64.StdEncoding.DecodeString(val)
-	case IntegerFieldType:
-		return strconv.ParseInt(val, 10, 64)
-	case FloatFieldType:
-		return strconv.ParseFloat(val, 64)
-	case BooleanFieldType:
-		return strconv.ParseBool(val)
-	case TimestampFieldType:
-		f, err := strconv.ParseFloat(val, 64)
-		return Value(time.Unix(0, int64(f*1e9)).UTC()), err
-	case DateFieldType:
-		return civil.ParseDate(val)
-	case TimeFieldType:
-		return civil.ParseTime(val)
-	case DateTimeFieldType:
-		return civil.ParseDateTime(val)
-	default:
-		return nil, fmt.Errorf("unrecognized type: %s", typ)
-	}
-}

+ 0 - 423
vendor/cloud.google.com/go/bigtable/admin.go

@@ -1,423 +0,0 @@
-/*
-Copyright 2015 Google Inc. All Rights Reserved.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package bigtable
-
-import (
-	"fmt"
-	"regexp"
-	"strings"
-
-	btopt "cloud.google.com/go/bigtable/internal/option"
-	"cloud.google.com/go/longrunning"
-	lroauto "cloud.google.com/go/longrunning/autogen"
-	"golang.org/x/net/context"
-	"google.golang.org/api/option"
-	gtransport "google.golang.org/api/transport/grpc"
-	btapb "google.golang.org/genproto/googleapis/bigtable/admin/v2"
-	"google.golang.org/grpc"
-	"google.golang.org/grpc/metadata"
-	"google.golang.org/grpc/status"
-	"google.golang.org/grpc/codes"
-)
-
-const adminAddr = "bigtableadmin.googleapis.com:443"
-
-// AdminClient is a client type for performing admin operations within a specific instance.
-type AdminClient struct {
-	conn    *grpc.ClientConn
-	tClient btapb.BigtableTableAdminClient
-
-	project, instance string
-
-	// Metadata to be sent with each request.
-	md metadata.MD
-}
-
-// NewAdminClient creates a new AdminClient for a given project and instance.
-func NewAdminClient(ctx context.Context, project, instance string, opts ...option.ClientOption) (*AdminClient, error) {
-	o, err := btopt.DefaultClientOptions(adminAddr, AdminScope, clientUserAgent)
-	if err != nil {
-		return nil, err
-	}
-	o = append(o, opts...)
-	conn, err := gtransport.Dial(ctx, o...)
-	if err != nil {
-		return nil, fmt.Errorf("dialing: %v", err)
-	}
-	return &AdminClient{
-		conn:     conn,
-		tClient:  btapb.NewBigtableTableAdminClient(conn),
-		project:  project,
-		instance: instance,
-		md:       metadata.Pairs(resourcePrefixHeader, fmt.Sprintf("projects/%s/instances/%s", project, instance)),
-	}, nil
-}
-
-// Close closes the AdminClient.
-func (ac *AdminClient) Close() error {
-	return ac.conn.Close()
-}
-
-func (ac *AdminClient) instancePrefix() string {
-	return fmt.Sprintf("projects/%s/instances/%s", ac.project, ac.instance)
-}
-
-// Tables returns a list of the tables in the instance.
-func (ac *AdminClient) Tables(ctx context.Context) ([]string, error) {
-	ctx = mergeOutgoingMetadata(ctx, ac.md)
-	prefix := ac.instancePrefix()
-	req := &btapb.ListTablesRequest{
-		Parent: prefix,
-	}
-	res, err := ac.tClient.ListTables(ctx, req)
-	if err != nil {
-		return nil, err
-	}
-	names := make([]string, 0, len(res.Tables))
-	for _, tbl := range res.Tables {
-		names = append(names, strings.TrimPrefix(tbl.Name, prefix+"/tables/"))
-	}
-	return names, nil
-}
-
-// TableConf contains all of the information necessary to create a table with column families.
-type TableConf struct {
-	TableID   string
-	SplitKeys []string
-	// Families is a map from family name to GCPolicy
-	Families map[string]GCPolicy
-}
-
-// CreateTable creates a new table in the instance.
-// This method may return before the table's creation is complete.
-func (ac *AdminClient) CreateTable(ctx context.Context, table string) error {
-	return ac.CreateTableFromConf(ctx, &TableConf{TableID: table})
-}
-
-// CreatePresplitTable creates a new table in the instance.
-// The list of row keys will be used to initially split the table into multiple tablets.
-// Given two split keys, "s1" and "s2", three tablets will be created,
-// spanning the key ranges: [, s1), [s1, s2), [s2, ).
-// This method may return before the table's creation is complete.
-func (ac *AdminClient) CreatePresplitTable(ctx context.Context, table string, splitKeys []string) error {
-	return ac.CreateTableFromConf(ctx, &TableConf{TableID: table, SplitKeys: splitKeys})
-}
-
-// CreateTableFromConf creates a new table in the instance from the given configuration.
-func (ac *AdminClient) CreateTableFromConf(ctx context.Context, conf *TableConf) error {
-	ctx = mergeOutgoingMetadata(ctx, ac.md)
-	var req_splits []*btapb.CreateTableRequest_Split
-	for _, split := range conf.SplitKeys {
-		req_splits = append(req_splits, &btapb.CreateTableRequest_Split{[]byte(split)})
-	}
-	var tbl btapb.Table
-	if conf.Families != nil {
-		tbl.ColumnFamilies = make(map[string]*btapb.ColumnFamily)
-		for fam, policy := range conf.Families {
-			tbl.ColumnFamilies[fam] = &btapb.ColumnFamily{policy.proto()}
-		}
-	}
-	prefix := ac.instancePrefix()
-	req := &btapb.CreateTableRequest{
-		Parent:        prefix,
-		TableId:       conf.TableID,
-		Table:         &tbl,
-		InitialSplits: req_splits,
-	}
-	_, err := ac.tClient.CreateTable(ctx, req)
-	return err
-}
-
-// CreateColumnFamily creates a new column family in a table.
-func (ac *AdminClient) CreateColumnFamily(ctx context.Context, table, family string) error {
-	// TODO(dsymonds): Permit specifying gcexpr and any other family settings.
-	ctx = mergeOutgoingMetadata(ctx, ac.md)
-	prefix := ac.instancePrefix()
-	req := &btapb.ModifyColumnFamiliesRequest{
-		Name: prefix + "/tables/" + table,
-		Modifications: []*btapb.ModifyColumnFamiliesRequest_Modification{{
-			Id:  family,
-			Mod: &btapb.ModifyColumnFamiliesRequest_Modification_Create{&btapb.ColumnFamily{}},
-		}},
-	}
-	_, err := ac.tClient.ModifyColumnFamilies(ctx, req)
-	return err
-}
-
-// DeleteTable deletes a table and all of its data.
-func (ac *AdminClient) DeleteTable(ctx context.Context, table string) error {
-	ctx = mergeOutgoingMetadata(ctx, ac.md)
-	prefix := ac.instancePrefix()
-	req := &btapb.DeleteTableRequest{
-		Name: prefix + "/tables/" + table,
-	}
-	_, err := ac.tClient.DeleteTable(ctx, req)
-	return err
-}
-
-// DeleteColumnFamily deletes a column family in a table and all of its data.
-func (ac *AdminClient) DeleteColumnFamily(ctx context.Context, table, family string) error {
-	ctx = mergeOutgoingMetadata(ctx, ac.md)
-	prefix := ac.instancePrefix()
-	req := &btapb.ModifyColumnFamiliesRequest{
-		Name: prefix + "/tables/" + table,
-		Modifications: []*btapb.ModifyColumnFamiliesRequest_Modification{{
-			Id:  family,
-			Mod: &btapb.ModifyColumnFamiliesRequest_Modification_Drop{true},
-		}},
-	}
-	_, err := ac.tClient.ModifyColumnFamilies(ctx, req)
-	return err
-}
-
-// TableInfo represents information about a table.
-type TableInfo struct {
-	// DEPRECATED - This field is deprecated. Please use FamilyInfos instead.
-	Families    []string
-	FamilyInfos []FamilyInfo
-}
-
-// FamilyInfo represents information about a column family.
-type FamilyInfo struct {
-	Name     string
-	GCPolicy string
-}
-
-// TableInfo retrieves information about a table.
-func (ac *AdminClient) TableInfo(ctx context.Context, table string) (*TableInfo, error) {
-	ctx = mergeOutgoingMetadata(ctx, ac.md)
-	prefix := ac.instancePrefix()
-	req := &btapb.GetTableRequest{
-		Name: prefix + "/tables/" + table,
-	}
-	res, err := ac.tClient.GetTable(ctx, req)
-	if err != nil {
-		return nil, err
-	}
-	ti := &TableInfo{}
-	for name, fam := range res.ColumnFamilies {
-		ti.Families = append(ti.Families, name)
-		ti.FamilyInfos = append(ti.FamilyInfos, FamilyInfo{Name: name, GCPolicy: GCRuleToString(fam.GcRule)})
-	}
-	return ti, nil
-}
-
-// SetGCPolicy specifies which cells in a column family should be garbage collected.
-// GC executes opportunistically in the background; table reads may return data
-// matching the GC policy.
-func (ac *AdminClient) SetGCPolicy(ctx context.Context, table, family string, policy GCPolicy) error {
-	ctx = mergeOutgoingMetadata(ctx, ac.md)
-	prefix := ac.instancePrefix()
-	req := &btapb.ModifyColumnFamiliesRequest{
-		Name: prefix + "/tables/" + table,
-		Modifications: []*btapb.ModifyColumnFamiliesRequest_Modification{{
-			Id:  family,
-			Mod: &btapb.ModifyColumnFamiliesRequest_Modification_Update{&btapb.ColumnFamily{GcRule: policy.proto()}},
-		}},
-	}
-	_, err := ac.tClient.ModifyColumnFamilies(ctx, req)
-	return err
-}
-
-// DropRowRange permanently deletes a row range from the specified table.
-func (ac *AdminClient) DropRowRange(ctx context.Context, table, rowKeyPrefix string) error {
-	ctx = mergeOutgoingMetadata(ctx, ac.md)
-	prefix := ac.instancePrefix()
-	req := &btapb.DropRowRangeRequest{
-		Name:   prefix + "/tables/" + table,
-		Target: &btapb.DropRowRangeRequest_RowKeyPrefix{[]byte(rowKeyPrefix)},
-	}
-	_, err := ac.tClient.DropRowRange(ctx, req)
-	return err
-}
-
-const instanceAdminAddr = "bigtableadmin.googleapis.com:443"
-
-// InstanceAdminClient is a client type for performing admin operations on instances.
-// These operations can be substantially more dangerous than those provided by AdminClient.
-type InstanceAdminClient struct {
-	conn      *grpc.ClientConn
-	iClient   btapb.BigtableInstanceAdminClient
-	lroClient *lroauto.OperationsClient
-
-	project string
-
-	// Metadata to be sent with each request.
-	md metadata.MD
-}
-
-// NewInstanceAdminClient creates a new InstanceAdminClient for a given project.
-func NewInstanceAdminClient(ctx context.Context, project string, opts ...option.ClientOption) (*InstanceAdminClient, error) {
-	o, err := btopt.DefaultClientOptions(instanceAdminAddr, InstanceAdminScope, clientUserAgent)
-	if err != nil {
-		return nil, err
-	}
-	o = append(o, opts...)
-	conn, err := gtransport.Dial(ctx, o...)
-	if err != nil {
-		return nil, fmt.Errorf("dialing: %v", err)
-	}
-
-	lroClient, err := lroauto.NewOperationsClient(ctx, option.WithGRPCConn(conn))
-	if err != nil {
-		// This error "should not happen", since we are just reusing old connection
-		// and never actually need to dial.
-		// If this does happen, we could leak conn. However, we cannot close conn:
-		// If the user invoked the function with option.WithGRPCConn,
-		// we would close a connection that's still in use.
-		// TODO(pongad): investigate error conditions.
-		return nil, err
-	}
-
-	return &InstanceAdminClient{
-		conn:      conn,
-		iClient:   btapb.NewBigtableInstanceAdminClient(conn),
-		lroClient: lroClient,
-
-		project: project,
-		md:      metadata.Pairs(resourcePrefixHeader, "projects/"+project),
-	}, nil
-}
-
-// Close closes the InstanceAdminClient.
-func (iac *InstanceAdminClient) Close() error {
-	return iac.conn.Close()
-}
-
-// StorageType is the type of storage used for all tables in an instance
-type StorageType int
-
-const (
-	SSD StorageType = iota
-	HDD
-)
-
-func (st StorageType) proto() btapb.StorageType {
-	if st == HDD {
-		return btapb.StorageType_HDD
-	}
-	return btapb.StorageType_SSD
-}
-
-// InstanceType is the type of the instance
-type InstanceType int32
-
-const (
-	PRODUCTION  InstanceType = InstanceType(btapb.Instance_PRODUCTION)
-	DEVELOPMENT              = InstanceType(btapb.Instance_DEVELOPMENT)
-)
-
-// InstanceInfo represents information about an instance
-type InstanceInfo struct {
-	Name        string // name of the instance
-	DisplayName string // display name for UIs
-}
-
-// InstanceConf contains the information necessary to create an Instance
-type InstanceConf struct {
-	InstanceId, DisplayName, ClusterId, Zone string
-	// NumNodes must not be specified for DEVELOPMENT instance types
-	NumNodes     int32
-	StorageType  StorageType
-	InstanceType InstanceType
-}
-
-var instanceNameRegexp = regexp.MustCompile(`^projects/([^/]+)/instances/([a-z][-a-z0-9]*)$`)
-
-// CreateInstance creates a new instance in the project.
-// This method will return when the instance has been created or when an error occurs.
-func (iac *InstanceAdminClient) CreateInstance(ctx context.Context, conf *InstanceConf) error {
-	ctx = mergeOutgoingMetadata(ctx, iac.md)
-	req := &btapb.CreateInstanceRequest{
-		Parent:     "projects/" + iac.project,
-		InstanceId: conf.InstanceId,
-		Instance:   &btapb.Instance{DisplayName: conf.DisplayName, Type: btapb.Instance_Type(conf.InstanceType)},
-		Clusters: map[string]*btapb.Cluster{
-			conf.ClusterId: {
-				ServeNodes:         conf.NumNodes,
-				DefaultStorageType: conf.StorageType.proto(),
-				Location:           "projects/" + iac.project + "/locations/" + conf.Zone,
-			},
-		},
-	}
-
-	lro, err := iac.iClient.CreateInstance(ctx, req)
-	if err != nil {
-		return err
-	}
-	resp := btapb.Instance{}
-	return longrunning.InternalNewOperation(iac.lroClient, lro).Wait(ctx, &resp)
-}
-
-// DeleteInstance deletes an instance from the project.
-func (iac *InstanceAdminClient) DeleteInstance(ctx context.Context, instanceId string) error {
-	ctx = mergeOutgoingMetadata(ctx, iac.md)
-	req := &btapb.DeleteInstanceRequest{"projects/" + iac.project + "/instances/" + instanceId}
-	_, err := iac.iClient.DeleteInstance(ctx, req)
-	return err
-}
-
-// Instances returns a list of instances in the project.
-func (iac *InstanceAdminClient) Instances(ctx context.Context) ([]*InstanceInfo, error) {
-	ctx = mergeOutgoingMetadata(ctx, iac.md)
-	req := &btapb.ListInstancesRequest{
-		Parent: "projects/" + iac.project,
-	}
-	res, err := iac.iClient.ListInstances(ctx, req)
-	if err != nil {
-		return nil, err
-	}
-	if len(res.FailedLocations) > 0 {
-		// We don't have a good way to return a partial result in the face of some zones being unavailable.
-		// Fail the entire request.
-		return nil, status.Errorf(codes.Unavailable, "Failed locations: %v", res.FailedLocations)
-	}
-
-	var is []*InstanceInfo
-	for _, i := range res.Instances {
-		m := instanceNameRegexp.FindStringSubmatch(i.Name)
-		if m == nil {
-			return nil, fmt.Errorf("malformed instance name %q", i.Name)
-		}
-		is = append(is, &InstanceInfo{
-			Name:        m[2],
-			DisplayName: i.DisplayName,
-		})
-	}
-	return is, nil
-}
-
-// InstanceInfo returns information about an instance.
-func (iac *InstanceAdminClient) InstanceInfo(ctx context.Context, instanceId string) (*InstanceInfo, error) {
-	ctx = mergeOutgoingMetadata(ctx, iac.md)
-	req := &btapb.GetInstanceRequest{
-		Name: "projects/" + iac.project + "/instances/" + instanceId,
-	}
-	res, err := iac.iClient.GetInstance(ctx, req)
-	if err != nil {
-		return nil, err
-	}
-
-	m := instanceNameRegexp.FindStringSubmatch(res.Name)
-	if m == nil {
-		return nil, fmt.Errorf("malformed instance name %q", res.Name)
-	}
-	return &InstanceInfo{
-		Name:        m[2],
-		DisplayName: res.DisplayName,
-	}, nil
-}

+ 0 - 792
vendor/cloud.google.com/go/bigtable/bigtable.go

@@ -1,792 +0,0 @@
-/*
-Copyright 2015 Google Inc. All Rights Reserved.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package bigtable // import "cloud.google.com/go/bigtable"
-
-import (
-	"errors"
-	"fmt"
-	"io"
-	"strconv"
-	"time"
-
-	"cloud.google.com/go/bigtable/internal/gax"
-	btopt "cloud.google.com/go/bigtable/internal/option"
-	"github.com/golang/protobuf/proto"
-	"golang.org/x/net/context"
-	"google.golang.org/api/option"
-	gtransport "google.golang.org/api/transport/grpc"
-	btpb "google.golang.org/genproto/googleapis/bigtable/v2"
-	"google.golang.org/grpc"
-	"google.golang.org/grpc/codes"
-	"google.golang.org/grpc/metadata"
-)
-
-const prodAddr = "bigtable.googleapis.com:443"
-
-// Client is a client for reading and writing data to tables in an instance.
-//
-// A Client is safe to use concurrently, except for its Close method.
-type Client struct {
-	conn              *grpc.ClientConn
-	client            btpb.BigtableClient
-	project, instance string
-}
-
-// NewClient creates a new Client for a given project and instance.
-func NewClient(ctx context.Context, project, instance string, opts ...option.ClientOption) (*Client, error) {
-	o, err := btopt.DefaultClientOptions(prodAddr, Scope, clientUserAgent)
-	if err != nil {
-		return nil, err
-	}
-	// Default to a small connection pool that can be overridden.
-	o = append(o,
-		option.WithGRPCConnectionPool(4),
-		// Set the max size to correspond to server-side limits.
-		option.WithGRPCDialOption(grpc.WithDefaultCallOptions(grpc.MaxCallSendMsgSize(100<<20), grpc.MaxCallRecvMsgSize(100<<20))),
-		// TODO(grpc/grpc-go#1388) using connection pool without WithBlock
-		// can cause RPCs to fail randomly. We can delete this after the issue is fixed.
-		option.WithGRPCDialOption(grpc.WithBlock()))
-	o = append(o, opts...)
-	conn, err := gtransport.Dial(ctx, o...)
-	if err != nil {
-		return nil, fmt.Errorf("dialing: %v", err)
-	}
-	return &Client{
-		conn:     conn,
-		client:   btpb.NewBigtableClient(conn),
-		project:  project,
-		instance: instance,
-	}, nil
-}
-
-// Close closes the Client.
-func (c *Client) Close() error {
-	return c.conn.Close()
-}
-
-var (
-	idempotentRetryCodes  = []codes.Code{codes.DeadlineExceeded, codes.Unavailable, codes.Aborted}
-	isIdempotentRetryCode = make(map[codes.Code]bool)
-	retryOptions          = []gax.CallOption{
-		gax.WithDelayTimeoutSettings(100*time.Millisecond, 2000*time.Millisecond, 1.2),
-		gax.WithRetryCodes(idempotentRetryCodes),
-	}
-)
-
-func init() {
-	for _, code := range idempotentRetryCodes {
-		isIdempotentRetryCode[code] = true
-	}
-}
-
-func (c *Client) fullTableName(table string) string {
-	return fmt.Sprintf("projects/%s/instances/%s/tables/%s", c.project, c.instance, table)
-}
-
-// A Table refers to a table.
-//
-// A Table is safe to use concurrently.
-type Table struct {
-	c     *Client
-	table string
-
-	// Metadata to be sent with each request.
-	md metadata.MD
-}
-
-// Open opens a table.
-func (c *Client) Open(table string) *Table {
-	return &Table{
-		c:     c,
-		table: table,
-		md:    metadata.Pairs(resourcePrefixHeader, c.fullTableName(table)),
-	}
-}
-
-// TODO(dsymonds): Read method that returns a sequence of ReadItems.
-
-// ReadRows reads rows from a table. f is called for each row.
-// If f returns false, the stream is shut down and ReadRows returns.
-// f owns its argument, and f is called serially in order by row key.
-//
-// By default, the yielded rows will contain all values in all cells.
-// Use RowFilter to limit the cells returned.
-func (t *Table) ReadRows(ctx context.Context, arg RowSet, f func(Row) bool, opts ...ReadOption) error {
-	ctx = mergeOutgoingMetadata(ctx, t.md)
-
-	var prevRowKey string
-	err := gax.Invoke(ctx, func(ctx context.Context) error {
-		req := &btpb.ReadRowsRequest{
-			TableName: t.c.fullTableName(t.table),
-			Rows:      arg.proto(),
-		}
-		for _, opt := range opts {
-			opt.set(req)
-		}
-		ctx, cancel := context.WithCancel(ctx) // for aborting the stream
-		defer cancel()
-
-		stream, err := t.c.client.ReadRows(ctx, req)
-		if err != nil {
-			return err
-		}
-		cr := newChunkReader()
-		for {
-			res, err := stream.Recv()
-			if err == io.EOF {
-				break
-			}
-			if err != nil {
-				// Reset arg for next Invoke call.
-				arg = arg.retainRowsAfter(prevRowKey)
-				return err
-			}
-
-			for _, cc := range res.Chunks {
-				row, err := cr.Process(cc)
-				if err != nil {
-					// No need to prepare for a retry, this is an unretryable error.
-					return err
-				}
-				if row == nil {
-					continue
-				}
-				prevRowKey = row.Key()
-				if !f(row) {
-					// Cancel and drain stream.
-					cancel()
-					for {
-						if _, err := stream.Recv(); err != nil {
-							// The stream has ended. We don't return an error
-							// because the caller has intentionally interrupted the scan.
-							return nil
-						}
-					}
-				}
-			}
-			if err := cr.Close(); err != nil {
-				// No need to prepare for a retry, this is an unretryable error.
-				return err
-			}
-		}
-		return err
-	}, retryOptions...)
-
-	return err
-}
-
-// ReadRow is a convenience implementation of a single-row reader.
-// A missing row will return a zero-length map and a nil error.
-func (t *Table) ReadRow(ctx context.Context, row string, opts ...ReadOption) (Row, error) {
-	var r Row
-	err := t.ReadRows(ctx, SingleRow(row), func(rr Row) bool {
-		r = rr
-		return true
-	}, opts...)
-	return r, err
-}
-
-// decodeFamilyProto adds the cell data from f to the given row.
-func decodeFamilyProto(r Row, row string, f *btpb.Family) {
-	fam := f.Name // does not have colon
-	for _, col := range f.Columns {
-		for _, cell := range col.Cells {
-			ri := ReadItem{
-				Row:       row,
-				Column:    fam + ":" + string(col.Qualifier),
-				Timestamp: Timestamp(cell.TimestampMicros),
-				Value:     cell.Value,
-			}
-			r[fam] = append(r[fam], ri)
-		}
-	}
-}
-
-// RowSet is a set of rows to be read. It is satisfied by RowList, RowRange and RowRangeList.
-// The serialized size of the RowSet must be no larger than 1MiB.
-type RowSet interface {
-	proto() *btpb.RowSet
-
-	// retainRowsAfter returns a new RowSet that does not include the
-	// given row key or any row key lexicographically less than it.
-	retainRowsAfter(lastRowKey string) RowSet
-
-	// Valid reports whether this set can cover at least one row.
-	valid() bool
-}
-
-// RowList is a sequence of row keys.
-type RowList []string
-
-func (r RowList) proto() *btpb.RowSet {
-	keys := make([][]byte, len(r))
-	for i, row := range r {
-		keys[i] = []byte(row)
-	}
-	return &btpb.RowSet{RowKeys: keys}
-}
-
-func (r RowList) retainRowsAfter(lastRowKey string) RowSet {
-	var retryKeys RowList
-	for _, key := range r {
-		if key > lastRowKey {
-			retryKeys = append(retryKeys, key)
-		}
-	}
-	return retryKeys
-}
-
-func (r RowList) valid() bool {
-	return len(r) > 0
-}
-
-// A RowRange is a half-open interval [Start, Limit) encompassing
-// all the rows with keys at least as large as Start, and less than Limit.
-// (Bigtable string comparison is the same as Go's.)
-// A RowRange can be unbounded, encompassing all keys at least as large as Start.
-type RowRange struct {
-	start string
-	limit string
-}
-
-// NewRange returns the new RowRange [begin, end).
-func NewRange(begin, end string) RowRange {
-	return RowRange{
-		start: begin,
-		limit: end,
-	}
-}
-
-// Unbounded tests whether a RowRange is unbounded.
-func (r RowRange) Unbounded() bool {
-	return r.limit == ""
-}
-
-// Contains says whether the RowRange contains the key.
-func (r RowRange) Contains(row string) bool {
-	return r.start <= row && (r.limit == "" || r.limit > row)
-}
-
-// String provides a printable description of a RowRange.
-func (r RowRange) String() string {
-	a := strconv.Quote(r.start)
-	if r.Unbounded() {
-		return fmt.Sprintf("[%s,∞)", a)
-	}
-	return fmt.Sprintf("[%s,%q)", a, r.limit)
-}
-
-func (r RowRange) proto() *btpb.RowSet {
-	rr := &btpb.RowRange{
-		StartKey: &btpb.RowRange_StartKeyClosed{[]byte(r.start)},
-	}
-	if !r.Unbounded() {
-		rr.EndKey = &btpb.RowRange_EndKeyOpen{[]byte(r.limit)}
-	}
-	return &btpb.RowSet{RowRanges: []*btpb.RowRange{rr}}
-}
-
-func (r RowRange) retainRowsAfter(lastRowKey string) RowSet {
-	if lastRowKey == "" || lastRowKey < r.start {
-		return r
-	}
-	// Set the beginning of the range to the row after the last scanned.
-	start := lastRowKey + "\x00"
-	if r.Unbounded() {
-		return InfiniteRange(start)
-	}
-	return NewRange(start, r.limit)
-}
-
-func (r RowRange) valid() bool {
-	return r.start < r.limit
-}
-
-// RowRangeList is a sequence of RowRanges representing the union of the ranges.
-type RowRangeList []RowRange
-
-func (r RowRangeList) proto() *btpb.RowSet {
-	ranges := make([]*btpb.RowRange, len(r))
-	for i, rr := range r {
-		// RowRange.proto() returns a RowSet with a single element RowRange array
-		ranges[i] = rr.proto().RowRanges[0]
-	}
-	return &btpb.RowSet{RowRanges: ranges}
-}
-
-func (r RowRangeList) retainRowsAfter(lastRowKey string) RowSet {
-	if lastRowKey == "" {
-		return r
-	}
-	// Return a list of any range that has not yet been completely processed
-	var ranges RowRangeList
-	for _, rr := range r {
-		retained := rr.retainRowsAfter(lastRowKey)
-		if retained.valid() {
-			ranges = append(ranges, retained.(RowRange))
-		}
-	}
-	return ranges
-}
-
-func (r RowRangeList) valid() bool {
-	for _, rr := range r {
-		if rr.valid() {
-			return true
-		}
-	}
-	return false
-}
-
-// SingleRow returns a RowSet for reading a single row.
-func SingleRow(row string) RowSet {
-	return RowList{row}
-}
-
-// PrefixRange returns a RowRange consisting of all keys starting with the prefix.
-func PrefixRange(prefix string) RowRange {
-	return RowRange{
-		start: prefix,
-		limit: prefixSuccessor(prefix),
-	}
-}
-
-// InfiniteRange returns the RowRange consisting of all keys at least as
-// large as start.
-func InfiniteRange(start string) RowRange {
-	return RowRange{
-		start: start,
-		limit: "",
-	}
-}
-
-// prefixSuccessor returns the lexically smallest string greater than the
-// prefix, if it exists, or "" otherwise.  In either case, it is the string
-// needed for the Limit of a RowRange.
-func prefixSuccessor(prefix string) string {
-	if prefix == "" {
-		return "" // infinite range
-	}
-	n := len(prefix)
-	for n--; n >= 0 && prefix[n] == '\xff'; n-- {
-	}
-	if n == -1 {
-		return ""
-	}
-	ans := []byte(prefix[:n])
-	ans = append(ans, prefix[n]+1)
-	return string(ans)
-}
-
-// A ReadOption is an optional argument to ReadRows.
-type ReadOption interface {
-	set(req *btpb.ReadRowsRequest)
-}
-
-// RowFilter returns a ReadOption that applies f to the contents of read rows.
-//
-// If multiple RowFilters are provided, only the last is used. To combine filters,
-// use ChainFilters or InterleaveFilters instead.
-func RowFilter(f Filter) ReadOption { return rowFilter{f} }
-
-type rowFilter struct{ f Filter }
-
-func (rf rowFilter) set(req *btpb.ReadRowsRequest) { req.Filter = rf.f.proto() }
-
-// LimitRows returns a ReadOption that will limit the number of rows to be read.
-func LimitRows(limit int64) ReadOption { return limitRows{limit} }
-
-type limitRows struct{ limit int64 }
-
-func (lr limitRows) set(req *btpb.ReadRowsRequest) { req.RowsLimit = lr.limit }
-
-// mutationsAreRetryable returns true if all mutations are idempotent
-// and therefore retryable. A mutation is idempotent iff all cell timestamps
-// have an explicit timestamp set and do not rely on the timestamp being set on the server.
-func mutationsAreRetryable(muts []*btpb.Mutation) bool {
-	serverTime := int64(ServerTime)
-	for _, mut := range muts {
-		setCell := mut.GetSetCell()
-		if setCell != nil && setCell.TimestampMicros == serverTime {
-			return false
-		}
-	}
-	return true
-}
-
-// Apply applies a Mutation to a specific row.
-func (t *Table) Apply(ctx context.Context, row string, m *Mutation, opts ...ApplyOption) error {
-	ctx = mergeOutgoingMetadata(ctx, t.md)
-	after := func(res proto.Message) {
-		for _, o := range opts {
-			o.after(res)
-		}
-	}
-
-	var callOptions []gax.CallOption
-	if m.cond == nil {
-		req := &btpb.MutateRowRequest{
-			TableName: t.c.fullTableName(t.table),
-			RowKey:    []byte(row),
-			Mutations: m.ops,
-		}
-		if mutationsAreRetryable(m.ops) {
-			callOptions = retryOptions
-		}
-		var res *btpb.MutateRowResponse
-		err := gax.Invoke(ctx, func(ctx context.Context) error {
-			var err error
-			res, err = t.c.client.MutateRow(ctx, req)
-			return err
-		}, callOptions...)
-		if err == nil {
-			after(res)
-		}
-		return err
-	}
-
-	req := &btpb.CheckAndMutateRowRequest{
-		TableName:       t.c.fullTableName(t.table),
-		RowKey:          []byte(row),
-		PredicateFilter: m.cond.proto(),
-	}
-	if m.mtrue != nil {
-		req.TrueMutations = m.mtrue.ops
-	}
-	if m.mfalse != nil {
-		req.FalseMutations = m.mfalse.ops
-	}
-	if mutationsAreRetryable(req.TrueMutations) && mutationsAreRetryable(req.FalseMutations) {
-		callOptions = retryOptions
-	}
-	var cmRes *btpb.CheckAndMutateRowResponse
-	err := gax.Invoke(ctx, func(ctx context.Context) error {
-		var err error
-		cmRes, err = t.c.client.CheckAndMutateRow(ctx, req)
-		return err
-	}, callOptions...)
-	if err == nil {
-		after(cmRes)
-	}
-	return err
-}
-
-// An ApplyOption is an optional argument to Apply.
-type ApplyOption interface {
-	after(res proto.Message)
-}
-
-type applyAfterFunc func(res proto.Message)
-
-func (a applyAfterFunc) after(res proto.Message) { a(res) }
-
-// GetCondMutationResult returns an ApplyOption that reports whether the conditional
-// mutation's condition matched.
-func GetCondMutationResult(matched *bool) ApplyOption {
-	return applyAfterFunc(func(res proto.Message) {
-		if res, ok := res.(*btpb.CheckAndMutateRowResponse); ok {
-			*matched = res.PredicateMatched
-		}
-	})
-}
-
-// Mutation represents a set of changes for a single row of a table.
-type Mutation struct {
-	ops []*btpb.Mutation
-
-	// for conditional mutations
-	cond          Filter
-	mtrue, mfalse *Mutation
-}
-
-// NewMutation returns a new mutation.
-func NewMutation() *Mutation {
-	return new(Mutation)
-}
-
-// NewCondMutation returns a conditional mutation.
-// The given row filter determines which mutation is applied:
-// If the filter matches any cell in the row, mtrue is applied;
-// otherwise, mfalse is applied.
-// Either given mutation may be nil.
-func NewCondMutation(cond Filter, mtrue, mfalse *Mutation) *Mutation {
-	return &Mutation{cond: cond, mtrue: mtrue, mfalse: mfalse}
-}
-
-// Set sets a value in a specified column, with the given timestamp.
-// The timestamp will be truncated to millisecond granularity.
-// A timestamp of ServerTime means to use the server timestamp.
-func (m *Mutation) Set(family, column string, ts Timestamp, value []byte) {
-	m.ops = append(m.ops, &btpb.Mutation{Mutation: &btpb.Mutation_SetCell_{&btpb.Mutation_SetCell{
-		FamilyName:      family,
-		ColumnQualifier: []byte(column),
-		TimestampMicros: int64(ts.TruncateToMilliseconds()),
-		Value:           value,
-	}}})
-}
-
-// DeleteCellsInColumn will delete all the cells whose columns are family:column.
-func (m *Mutation) DeleteCellsInColumn(family, column string) {
-	m.ops = append(m.ops, &btpb.Mutation{Mutation: &btpb.Mutation_DeleteFromColumn_{&btpb.Mutation_DeleteFromColumn{
-		FamilyName:      family,
-		ColumnQualifier: []byte(column),
-	}}})
-}
-
-// DeleteTimestampRange deletes all cells whose columns are family:column
-// and whose timestamps are in the half-open interval [start, end).
-// If end is zero, it will be interpreted as infinity.
-// The timestamps will be truncated to millisecond granularity.
-func (m *Mutation) DeleteTimestampRange(family, column string, start, end Timestamp) {
-	m.ops = append(m.ops, &btpb.Mutation{Mutation: &btpb.Mutation_DeleteFromColumn_{&btpb.Mutation_DeleteFromColumn{
-		FamilyName:      family,
-		ColumnQualifier: []byte(column),
-		TimeRange: &btpb.TimestampRange{
-			StartTimestampMicros: int64(start.TruncateToMilliseconds()),
-			EndTimestampMicros:   int64(end.TruncateToMilliseconds()),
-		},
-	}}})
-}
-
-// DeleteCellsInFamily will delete all the cells whose columns are family:*.
-func (m *Mutation) DeleteCellsInFamily(family string) {
-	m.ops = append(m.ops, &btpb.Mutation{Mutation: &btpb.Mutation_DeleteFromFamily_{&btpb.Mutation_DeleteFromFamily{
-		FamilyName: family,
-	}}})
-}
-
-// DeleteRow deletes the entire row.
-func (m *Mutation) DeleteRow() {
-	m.ops = append(m.ops, &btpb.Mutation{Mutation: &btpb.Mutation_DeleteFromRow_{&btpb.Mutation_DeleteFromRow{}}})
-}
-
-// entryErr is a container that combines an entry with the error that was returned for it.
-// Err may be nil if no error was returned for the Entry, or if the Entry has not yet been processed.
-type entryErr struct {
-	Entry *btpb.MutateRowsRequest_Entry
-	Err   error
-}
-
-// ApplyBulk applies multiple Mutations, up to a maximum of 100,000.
-// Each mutation is individually applied atomically,
-// but the set of mutations may be applied in any order.
-//
-// Two types of failures may occur. If the entire process
-// fails, (nil, err) will be returned. If specific mutations
-// fail to apply, ([]err, nil) will be returned, and the errors
-// will correspond to the relevant rowKeys/muts arguments.
-//
-// Conditional mutations cannot be applied in bulk and providing one will result in an error.
-func (t *Table) ApplyBulk(ctx context.Context, rowKeys []string, muts []*Mutation, opts ...ApplyOption) ([]error, error) {
-	ctx = mergeOutgoingMetadata(ctx, t.md)
-	if len(rowKeys) != len(muts) {
-		return nil, fmt.Errorf("mismatched rowKeys and mutation array lengths: %d, %d", len(rowKeys), len(muts))
-	}
-
-	origEntries := make([]*entryErr, len(rowKeys))
-	for i, key := range rowKeys {
-		mut := muts[i]
-		if mut.cond != nil {
-			return nil, errors.New("conditional mutations cannot be applied in bulk")
-		}
-		origEntries[i] = &entryErr{Entry: &btpb.MutateRowsRequest_Entry{RowKey: []byte(key), Mutations: mut.ops}}
-	}
-
-	// entries will be reduced after each invocation to just what needs to be retried.
-	entries := make([]*entryErr, len(rowKeys))
-	copy(entries, origEntries)
-	err := gax.Invoke(ctx, func(ctx context.Context) error {
-		err := t.doApplyBulk(ctx, entries, opts...)
-		if err != nil {
-			// We want to retry the entire request with the current entries
-			return err
-		}
-		entries = t.getApplyBulkRetries(entries)
-		if len(entries) > 0 && len(idempotentRetryCodes) > 0 {
-			// We have at least one mutation that needs to be retried.
-			// Return an arbitrary error that is retryable according to callOptions.
-			return grpc.Errorf(idempotentRetryCodes[0], "Synthetic error: partial failure of ApplyBulk")
-		}
-		return nil
-	}, retryOptions...)
-
-	if err != nil {
-		return nil, err
-	}
-
-	// Accumulate all of the errors into an array to return, interspersed with nils for successful
-	// entries. The absence of any errors means we should return nil.
-	var errs []error
-	var foundErr bool
-	for _, entry := range origEntries {
-		if entry.Err != nil {
-			foundErr = true
-		}
-		errs = append(errs, entry.Err)
-	}
-	if foundErr {
-		return errs, nil
-	}
-	return nil, nil
-}
-
-// getApplyBulkRetries returns the entries that need to be retried
-func (t *Table) getApplyBulkRetries(entries []*entryErr) []*entryErr {
-	var retryEntries []*entryErr
-	for _, entry := range entries {
-		err := entry.Err
-		if err != nil && isIdempotentRetryCode[grpc.Code(err)] && mutationsAreRetryable(entry.Entry.Mutations) {
-			// There was an error and the entry is retryable.
-			retryEntries = append(retryEntries, entry)
-		}
-	}
-	return retryEntries
-}
-
-// doApplyBulk does the work of a single ApplyBulk invocation
-func (t *Table) doApplyBulk(ctx context.Context, entryErrs []*entryErr, opts ...ApplyOption) error {
-	after := func(res proto.Message) {
-		for _, o := range opts {
-			o.after(res)
-		}
-	}
-
-	entries := make([]*btpb.MutateRowsRequest_Entry, len(entryErrs))
-	for i, entryErr := range entryErrs {
-		entries[i] = entryErr.Entry
-	}
-	req := &btpb.MutateRowsRequest{
-		TableName: t.c.fullTableName(t.table),
-		Entries:   entries,
-	}
-	stream, err := t.c.client.MutateRows(ctx, req)
-	if err != nil {
-		return err
-	}
-	for {
-		res, err := stream.Recv()
-		if err == io.EOF {
-			break
-		}
-		if err != nil {
-			return err
-		}
-
-		for i, entry := range res.Entries {
-			status := entry.Status
-			if status.Code == int32(codes.OK) {
-				entryErrs[i].Err = nil
-			} else {
-				entryErrs[i].Err = grpc.Errorf(codes.Code(status.Code), status.Message)
-			}
-		}
-		after(res)
-	}
-	return nil
-}
-
-// Timestamp is in units of microseconds since 1 January 1970.
-type Timestamp int64
-
-// ServerTime is a specific Timestamp that may be passed to (*Mutation).Set.
-// It indicates that the server's timestamp should be used.
-const ServerTime Timestamp = -1
-
-// Time converts a time.Time into a Timestamp.
-func Time(t time.Time) Timestamp { return Timestamp(t.UnixNano() / 1e3) }
-
-// Now returns the Timestamp representation of the current time on the client.
-func Now() Timestamp { return Time(time.Now()) }
-
-// Time converts a Timestamp into a time.Time.
-func (ts Timestamp) Time() time.Time { return time.Unix(0, int64(ts)*1e3) }
-
-// TruncateToMilliseconds truncates a Timestamp to millisecond granularity,
-// which is currently the only granularity supported.
-func (ts Timestamp) TruncateToMilliseconds() Timestamp {
-	if ts == ServerTime {
-		return ts
-	}
-	return ts - ts%1000
-}
-
-// ApplyReadModifyWrite applies a ReadModifyWrite to a specific row.
-// It returns the newly written cells.
-func (t *Table) ApplyReadModifyWrite(ctx context.Context, row string, m *ReadModifyWrite) (Row, error) {
-	ctx = mergeOutgoingMetadata(ctx, t.md)
-	req := &btpb.ReadModifyWriteRowRequest{
-		TableName: t.c.fullTableName(t.table),
-		RowKey:    []byte(row),
-		Rules:     m.ops,
-	}
-	res, err := t.c.client.ReadModifyWriteRow(ctx, req)
-	if err != nil {
-		return nil, err
-	}
-	if res.Row == nil {
-		return nil, errors.New("unable to apply ReadModifyWrite: res.Row=nil")
-	}
-	r := make(Row)
-	for _, fam := range res.Row.Families { // res is *btpb.Row, fam is *btpb.Family
-		decodeFamilyProto(r, row, fam)
-	}
-	return r, nil
-}
-
-// ReadModifyWrite represents a set of operations on a single row of a table.
-// It is like Mutation but for non-idempotent changes.
-// When applied, these operations operate on the latest values of the row's cells,
-// and result in a new value being written to the relevant cell with a timestamp
-// that is max(existing timestamp, current server time).
-//
-// The application of a ReadModifyWrite is atomic; concurrent ReadModifyWrites will
-// be executed serially by the server.
-type ReadModifyWrite struct {
-	ops []*btpb.ReadModifyWriteRule
-}
-
-// NewReadModifyWrite returns a new ReadModifyWrite.
-func NewReadModifyWrite() *ReadModifyWrite { return new(ReadModifyWrite) }
-
-// AppendValue appends a value to a specific cell's value.
-// If the cell is unset, it will be treated as an empty value.
-func (m *ReadModifyWrite) AppendValue(family, column string, v []byte) {
-	m.ops = append(m.ops, &btpb.ReadModifyWriteRule{
-		FamilyName:      family,
-		ColumnQualifier: []byte(column),
-		Rule:            &btpb.ReadModifyWriteRule_AppendValue{v},
-	})
-}
-
-// Increment interprets the value in a specific cell as a 64-bit big-endian signed integer,
-// and adds a value to it. If the cell is unset, it will be treated as zero.
-// If the cell is set and is not an 8-byte value, the entire ApplyReadModifyWrite
-// operation will fail.
-func (m *ReadModifyWrite) Increment(family, column string, delta int64) {
-	m.ops = append(m.ops, &btpb.ReadModifyWriteRule{
-		FamilyName:      family,
-		ColumnQualifier: []byte(column),
-		Rule:            &btpb.ReadModifyWriteRule_IncrementAmount{delta},
-	})
-}
-
-// mergeOutgoingMetadata returns a context populated by the existing outgoing metadata,
-// if any, joined with internal metadata.
-func mergeOutgoingMetadata(ctx context.Context, md metadata.MD) context.Context {
-	mdCopy, _ := metadata.FromOutgoingContext(ctx)
-	return metadata.NewOutgoingContext(ctx, metadata.Join(mdCopy, md))
-}

+ 0 - 1273
vendor/cloud.google.com/go/bigtable/bttest/inmem.go

@@ -1,1273 +0,0 @@
-/*
-Copyright 2015 Google Inc. All Rights Reserved.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-/*
-Package bttest contains test helpers for working with the bigtable package.
-
-To use a Server, create it, and then connect to it with no security:
-(The project/instance values are ignored.)
-	srv, err := bttest.NewServer("127.0.0.1:0")
-	...
-	conn, err := grpc.Dial(srv.Addr, grpc.WithInsecure())
-	...
-	client, err := bigtable.NewClient(ctx, proj, instance,
-	        option.WithGRPCConn(conn))
-	...
-*/
-package bttest // import "cloud.google.com/go/bigtable/bttest"
-
-import (
-	"encoding/binary"
-	"fmt"
-	"log"
-	"math/rand"
-	"net"
-	"regexp"
-	"sort"
-	"strings"
-	"sync"
-	"time"
-
-	"bytes"
-
-	emptypb "github.com/golang/protobuf/ptypes/empty"
-	"github.com/golang/protobuf/ptypes/wrappers"
-	"golang.org/x/net/context"
-	btapb "google.golang.org/genproto/googleapis/bigtable/admin/v2"
-	btpb "google.golang.org/genproto/googleapis/bigtable/v2"
-	statpb "google.golang.org/genproto/googleapis/rpc/status"
-	"google.golang.org/grpc"
-	"google.golang.org/grpc/codes"
-)
-
-// Server is an in-memory Cloud Bigtable fake.
-// It is unauthenticated, and only a rough approximation.
-type Server struct {
-	Addr string
-
-	l   net.Listener
-	srv *grpc.Server
-	s   *server
-}
-
-// server is the real implementation of the fake.
-// It is a separate and unexported type so the API won't be cluttered with
-// methods that are only relevant to the fake's implementation.
-type server struct {
-	mu     sync.Mutex
-	tables map[string]*table // keyed by fully qualified name
-	gcc    chan int          // set when gcloop starts, closed when server shuts down
-
-	// Any unimplemented methods will cause a panic.
-	btapb.BigtableTableAdminServer
-	btpb.BigtableServer
-}
-
-// NewServer creates a new Server.
-// The Server will be listening for gRPC connections, without TLS,
-// on the provided address. The resolved address is named by the Addr field.
-func NewServer(laddr string, opt ...grpc.ServerOption) (*Server, error) {
-	l, err := net.Listen("tcp", laddr)
-	if err != nil {
-		return nil, err
-	}
-
-	s := &Server{
-		Addr: l.Addr().String(),
-		l:    l,
-		srv:  grpc.NewServer(opt...),
-		s: &server{
-			tables: make(map[string]*table),
-		},
-	}
-	btapb.RegisterBigtableTableAdminServer(s.srv, s.s)
-	btpb.RegisterBigtableServer(s.srv, s.s)
-
-	go s.srv.Serve(s.l)
-
-	return s, nil
-}
-
-// Close shuts down the server.
-func (s *Server) Close() {
-	s.s.mu.Lock()
-	if s.s.gcc != nil {
-		close(s.s.gcc)
-	}
-	s.s.mu.Unlock()
-
-	s.srv.Stop()
-	s.l.Close()
-}
-
-func (s *server) CreateTable(ctx context.Context, req *btapb.CreateTableRequest) (*btapb.Table, error) {
-	tbl := req.Parent + "/tables/" + req.TableId
-
-	s.mu.Lock()
-	if _, ok := s.tables[tbl]; ok {
-		s.mu.Unlock()
-		return nil, grpc.Errorf(codes.AlreadyExists, "table %q already exists", tbl)
-	}
-	s.tables[tbl] = newTable(req)
-	s.mu.Unlock()
-
-	return &btapb.Table{Name: tbl}, nil
-}
-
-func (s *server) ListTables(ctx context.Context, req *btapb.ListTablesRequest) (*btapb.ListTablesResponse, error) {
-	res := &btapb.ListTablesResponse{}
-	prefix := req.Parent + "/tables/"
-
-	s.mu.Lock()
-	for tbl := range s.tables {
-		if strings.HasPrefix(tbl, prefix) {
-			res.Tables = append(res.Tables, &btapb.Table{Name: tbl})
-		}
-	}
-	s.mu.Unlock()
-
-	return res, nil
-}
-
-func (s *server) GetTable(ctx context.Context, req *btapb.GetTableRequest) (*btapb.Table, error) {
-	tbl := req.Name
-
-	s.mu.Lock()
-	tblIns, ok := s.tables[tbl]
-	s.mu.Unlock()
-	if !ok {
-		return nil, grpc.Errorf(codes.NotFound, "table %q not found", tbl)
-	}
-
-	return &btapb.Table{
-		Name:           tbl,
-		ColumnFamilies: toColumnFamilies(tblIns.columnFamilies()),
-	}, nil
-}
-
-func (s *server) DeleteTable(ctx context.Context, req *btapb.DeleteTableRequest) (*emptypb.Empty, error) {
-	s.mu.Lock()
-	defer s.mu.Unlock()
-	if _, ok := s.tables[req.Name]; !ok {
-		return nil, grpc.Errorf(codes.NotFound, "table %q not found", req.Name)
-	}
-	delete(s.tables, req.Name)
-	return &emptypb.Empty{}, nil
-}
-
-func (s *server) ModifyColumnFamilies(ctx context.Context, req *btapb.ModifyColumnFamiliesRequest) (*btapb.Table, error) {
-	tblName := req.Name[strings.LastIndex(req.Name, "/")+1:]
-
-	s.mu.Lock()
-	tbl, ok := s.tables[req.Name]
-	s.mu.Unlock()
-	if !ok {
-		return nil, grpc.Errorf(codes.NotFound, "table %q not found", req.Name)
-	}
-
-	tbl.mu.Lock()
-	defer tbl.mu.Unlock()
-
-	for _, mod := range req.Modifications {
-		if create := mod.GetCreate(); create != nil {
-			if _, ok := tbl.families[mod.Id]; ok {
-				return nil, grpc.Errorf(codes.AlreadyExists, "family %q already exists", mod.Id)
-			}
-			newcf := &columnFamily{
-				name:   req.Name + "/columnFamilies/" + mod.Id,
-				order:  tbl.counter,
-				gcRule: create.GcRule,
-			}
-			tbl.counter++
-			tbl.families[mod.Id] = newcf
-		} else if mod.GetDrop() {
-			if _, ok := tbl.families[mod.Id]; !ok {
-				return nil, fmt.Errorf("can't delete unknown family %q", mod.Id)
-			}
-			delete(tbl.families, mod.Id)
-		} else if modify := mod.GetUpdate(); modify != nil {
-			if _, ok := tbl.families[mod.Id]; !ok {
-				return nil, fmt.Errorf("no such family %q", mod.Id)
-			}
-			newcf := &columnFamily{
-				name:   req.Name + "/columnFamilies/" + mod.Id,
-				gcRule: modify.GcRule,
-			}
-			// assume that we ALWAYS want to replace by the new setting
-			// we may need partial update through
-			tbl.families[mod.Id] = newcf
-		}
-	}
-
-	s.needGC()
-	return &btapb.Table{
-		Name:           tblName,
-		ColumnFamilies: toColumnFamilies(tbl.families),
-	}, nil
-}
-
-func (s *server) DropRowRange(ctx context.Context, req *btapb.DropRowRangeRequest) (*emptypb.Empty, error) {
-	s.mu.Lock()
-	defer s.mu.Unlock()
-	tbl, ok := s.tables[req.Name]
-	if !ok {
-		return nil, grpc.Errorf(codes.NotFound, "table %q not found", req.Name)
-	}
-
-	if req.GetDeleteAllDataFromTable() {
-		tbl.rows = nil
-		tbl.rowIndex = make(map[string]*row)
-	} else {
-		// Delete rows by prefix
-		prefixBytes := req.GetRowKeyPrefix()
-		if prefixBytes == nil {
-			return nil, fmt.Errorf("missing row key prefix")
-		}
-		prefix := string(prefixBytes)
-
-		start := -1
-		end := 0
-		for i, row := range tbl.rows {
-			match := strings.HasPrefix(row.key, prefix)
-			if match {
-				// Delete the mapping. Row will be deleted from sorted range below.
-				delete(tbl.rowIndex, row.key)
-			}
-			if match && start == -1 {
-				start = i
-			} else if !match && start != -1 {
-				break
-			}
-			end++
-		}
-		if start != -1 {
-			// Delete the range, using method from https://github.com/golang/go/wiki/SliceTricks
-			copy(tbl.rows[start:], tbl.rows[end:])
-			for k, n := len(tbl.rows)-end+start, len(tbl.rows); k < n; k++ {
-				tbl.rows[k] = nil
-			}
-			tbl.rows = tbl.rows[:len(tbl.rows)-end+start]
-		}
-	}
-
-	return &emptypb.Empty{}, nil
-}
-
-func (s *server) ReadRows(req *btpb.ReadRowsRequest, stream btpb.Bigtable_ReadRowsServer) error {
-	s.mu.Lock()
-	tbl, ok := s.tables[req.TableName]
-	s.mu.Unlock()
-	if !ok {
-		return grpc.Errorf(codes.NotFound, "table %q not found", req.TableName)
-	}
-
-	// Rows to read can be specified by a set of row keys and/or a set of row ranges.
-	// Output is a stream of sorted, de-duped rows.
-	tbl.mu.RLock()
-	rowSet := make(map[string]*row)
-	if req.Rows != nil {
-		// Add the explicitly given keys
-		for _, key := range req.Rows.RowKeys {
-			start := string(key)
-			addRows(start, start+"\x00", tbl, rowSet)
-		}
-
-		// Add keys from row ranges
-		for _, rr := range req.Rows.RowRanges {
-			var start, end string
-			switch sk := rr.StartKey.(type) {
-			case *btpb.RowRange_StartKeyClosed:
-				start = string(sk.StartKeyClosed)
-			case *btpb.RowRange_StartKeyOpen:
-				start = string(sk.StartKeyOpen) + "\x00"
-			}
-			switch ek := rr.EndKey.(type) {
-			case *btpb.RowRange_EndKeyClosed:
-				end = string(ek.EndKeyClosed) + "\x00"
-			case *btpb.RowRange_EndKeyOpen:
-				end = string(ek.EndKeyOpen)
-			}
-
-			addRows(start, end, tbl, rowSet)
-		}
-	} else {
-		// Read all rows
-		addRows("", "", tbl, rowSet)
-	}
-	tbl.mu.RUnlock()
-
-	rows := make([]*row, 0, len(rowSet))
-	for _, r := range rowSet {
-		rows = append(rows, r)
-	}
-	sort.Sort(byRowKey(rows))
-
-	limit := int(req.RowsLimit)
-	count := 0
-	for _, r := range rows {
-		if limit > 0 && count >= limit {
-			return nil
-		}
-		streamed, err := streamRow(stream, r, req.Filter)
-		if err != nil {
-			return err
-		}
-		if streamed {
-			count++
-		}
-	}
-	return nil
-}
-
-func addRows(start, end string, tbl *table, rowSet map[string]*row) {
-	si, ei := 0, len(tbl.rows) // half-open interval
-	if start != "" {
-		si = sort.Search(len(tbl.rows), func(i int) bool { return tbl.rows[i].key >= start })
-	}
-	if end != "" {
-		ei = sort.Search(len(tbl.rows), func(i int) bool { return tbl.rows[i].key >= end })
-	}
-	if si < ei {
-		for _, row := range tbl.rows[si:ei] {
-			rowSet[row.key] = row
-		}
-	}
-}
-
-// streamRow filters the given row and sends it via the given stream.
-// Returns true if at least one cell matched the filter and was streamed, false otherwise.
-func streamRow(stream btpb.Bigtable_ReadRowsServer, r *row, f *btpb.RowFilter) (bool, error) {
-	r.mu.Lock()
-	nr := r.copy()
-	r.mu.Unlock()
-	r = nr
-
-	if !filterRow(f, r) {
-		return false, nil
-	}
-
-	rrr := &btpb.ReadRowsResponse{}
-	families := r.sortedFamilies()
-	for _, fam := range families {
-		for _, colName := range fam.colNames {
-			cells := fam.cells[colName]
-			if len(cells) == 0 {
-				continue
-			}
-			// TODO(dsymonds): Apply transformers.
-			for _, cell := range cells {
-				rrr.Chunks = append(rrr.Chunks, &btpb.ReadRowsResponse_CellChunk{
-					RowKey:          []byte(r.key),
-					FamilyName:      &wrappers.StringValue{Value: fam.name},
-					Qualifier:       &wrappers.BytesValue{Value: []byte(colName)},
-					TimestampMicros: cell.ts,
-					Value:           cell.value,
-				})
-			}
-		}
-	}
-	// We can't have a cell with just COMMIT set, which would imply a new empty cell.
-	// So modify the last cell to have the COMMIT flag set.
-	if len(rrr.Chunks) > 0 {
-		rrr.Chunks[len(rrr.Chunks)-1].RowStatus = &btpb.ReadRowsResponse_CellChunk_CommitRow{true}
-	}
-
-	return true, stream.Send(rrr)
-}
-
-// filterRow modifies a row with the given filter. Returns true if at least one cell from the row matches,
-// false otherwise.
-func filterRow(f *btpb.RowFilter, r *row) bool {
-	if f == nil {
-		return true
-	}
-	// Handle filters that apply beyond just including/excluding cells.
-	switch f := f.Filter.(type) {
-	case *btpb.RowFilter_Chain_:
-		for _, sub := range f.Chain.Filters {
-			if !filterRow(sub, r) {
-				return false
-			}
-		}
-		return true
-	case *btpb.RowFilter_Interleave_:
-		srs := make([]*row, 0, len(f.Interleave.Filters))
-		for _, sub := range f.Interleave.Filters {
-			sr := r.copy()
-			filterRow(sub, sr)
-			srs = append(srs, sr)
-		}
-		// merge
-		// TODO(dsymonds): is this correct?
-		r.families = make(map[string]*family)
-		for _, sr := range srs {
-			for _, fam := range sr.families {
-				f := r.getOrCreateFamily(fam.name, fam.order)
-				for colName, cs := range fam.cells {
-					f.cells[colName] = append(f.cellsByColumn(colName), cs...)
-				}
-			}
-		}
-		var count int
-		for _, fam := range r.families {
-			for _, cs := range fam.cells {
-				sort.Sort(byDescTS(cs))
-				count += len(cs)
-			}
-		}
-		return count > 0
-	case *btpb.RowFilter_CellsPerColumnLimitFilter:
-		lim := int(f.CellsPerColumnLimitFilter)
-		for _, fam := range r.families {
-			for col, cs := range fam.cells {
-				if len(cs) > lim {
-					fam.cells[col] = cs[:lim]
-				}
-			}
-		}
-		return true
-	case *btpb.RowFilter_Condition_:
-		if filterRow(f.Condition.PredicateFilter, r.copy()) {
-			if f.Condition.TrueFilter == nil {
-				return false
-			}
-			return filterRow(f.Condition.TrueFilter, r)
-		}
-		if f.Condition.FalseFilter == nil {
-			return false
-		}
-		return filterRow(f.Condition.FalseFilter, r)
-	case *btpb.RowFilter_RowKeyRegexFilter:
-		pat := string(f.RowKeyRegexFilter)
-		rx, err := regexp.Compile(pat)
-		if err != nil {
-			log.Printf("Bad rowkey_regex_filter pattern %q: %v", pat, err)
-			return false
-		}
-		if !rx.MatchString(r.key) {
-			return false
-		}
-	case *btpb.RowFilter_CellsPerRowLimitFilter:
-		// Grab the first n cells in the row.
-		lim := int(f.CellsPerRowLimitFilter)
-		for _, fam := range r.families {
-			for _, col := range fam.colNames {
-				cs := fam.cells[col]
-				if len(cs) > lim {
-					fam.cells[col] = cs[:lim]
-					lim = 0
-				} else {
-					lim -= len(cs)
-				}
-			}
-		}
-		return true
-	case *btpb.RowFilter_CellsPerRowOffsetFilter:
-		// Skip the first n cells in the row.
-		offset := int(f.CellsPerRowOffsetFilter)
-		for _, fam := range r.families {
-			for _, col := range fam.colNames {
-				cs := fam.cells[col]
-				if len(cs) > offset {
-					fam.cells[col] = cs[offset:]
-					offset = 0
-					return true
-				} else {
-					fam.cells[col] = cs[:0]
-					offset -= len(cs)
-				}
-			}
-		}
-		return true
-	}
-
-	// Any other case, operate on a per-cell basis.
-	cellCount := 0
-	for _, fam := range r.families {
-		for colName, cs := range fam.cells {
-			fam.cells[colName] = filterCells(f, fam.name, colName, cs)
-			cellCount += len(fam.cells[colName])
-		}
-	}
-	return cellCount > 0
-}
-
-func filterCells(f *btpb.RowFilter, fam, col string, cs []cell) []cell {
-	var ret []cell
-	for _, cell := range cs {
-		if includeCell(f, fam, col, cell) {
-			cell = modifyCell(f, cell)
-			ret = append(ret, cell)
-		}
-	}
-	return ret
-}
-
-func modifyCell(f *btpb.RowFilter, c cell) cell {
-	if f == nil {
-		return c
-	}
-	// Consider filters that may modify the cell contents
-	switch f.Filter.(type) {
-	case *btpb.RowFilter_StripValueTransformer:
-		return cell{ts: c.ts}
-	default:
-		return c
-	}
-}
-
-func includeCell(f *btpb.RowFilter, fam, col string, cell cell) bool {
-	if f == nil {
-		return true
-	}
-	// TODO(dsymonds): Implement many more filters.
-	switch f := f.Filter.(type) {
-	case *btpb.RowFilter_CellsPerColumnLimitFilter:
-		// Don't log, row-level filter
-		return true
-	case *btpb.RowFilter_RowKeyRegexFilter:
-		// Don't log, row-level filter
-		return true
-	case *btpb.RowFilter_StripValueTransformer:
-		// Don't log, cell-modifying filter
-		return true
-	default:
-		log.Printf("WARNING: don't know how to handle filter of type %T (ignoring it)", f)
-		return true
-	case *btpb.RowFilter_FamilyNameRegexFilter:
-		pat := string(f.FamilyNameRegexFilter)
-		rx, err := regexp.Compile(pat)
-		if err != nil {
-			log.Printf("Bad family_name_regex_filter pattern %q: %v", pat, err)
-			return false
-		}
-		return rx.MatchString(fam)
-	case *btpb.RowFilter_ColumnQualifierRegexFilter:
-		pat := string(f.ColumnQualifierRegexFilter)
-		rx, err := regexp.Compile(pat)
-		if err != nil {
-			log.Printf("Bad column_qualifier_regex_filter pattern %q: %v", pat, err)
-			return false
-		}
-		return rx.MatchString(col)
-	case *btpb.RowFilter_ValueRegexFilter:
-		pat := string(f.ValueRegexFilter)
-		rx, err := regexp.Compile(pat)
-		if err != nil {
-			log.Printf("Bad value_regex_filter pattern %q: %v", pat, err)
-			return false
-		}
-		return rx.Match(cell.value)
-	case *btpb.RowFilter_ColumnRangeFilter:
-		if fam != f.ColumnRangeFilter.FamilyName {
-			return false
-		}
-		// Start qualifier defaults to empty string closed
-		inRangeStart := func() bool { return col >= "" }
-		switch sq := f.ColumnRangeFilter.StartQualifier.(type) {
-		case *btpb.ColumnRange_StartQualifierOpen:
-			inRangeStart = func() bool { return col > string(sq.StartQualifierOpen) }
-		case *btpb.ColumnRange_StartQualifierClosed:
-			inRangeStart = func() bool { return col >= string(sq.StartQualifierClosed) }
-		}
-		// End qualifier defaults to no upper boundary
-		inRangeEnd := func() bool { return true }
-		switch eq := f.ColumnRangeFilter.EndQualifier.(type) {
-		case *btpb.ColumnRange_EndQualifierClosed:
-			inRangeEnd = func() bool { return col <= string(eq.EndQualifierClosed) }
-		case *btpb.ColumnRange_EndQualifierOpen:
-			inRangeEnd = func() bool { return col < string(eq.EndQualifierOpen) }
-		}
-		return inRangeStart() && inRangeEnd()
-	case *btpb.RowFilter_TimestampRangeFilter:
-		// Lower bound is inclusive and defaults to 0, upper bound is exclusive and defaults to infinity.
-		return cell.ts >= f.TimestampRangeFilter.StartTimestampMicros &&
-			(f.TimestampRangeFilter.EndTimestampMicros == 0 || cell.ts < f.TimestampRangeFilter.EndTimestampMicros)
-	case *btpb.RowFilter_ValueRangeFilter:
-		v := cell.value
-		// Start value defaults to empty string closed
-		inRangeStart := func() bool { return bytes.Compare(v, []byte{}) >= 0 }
-		switch sv := f.ValueRangeFilter.StartValue.(type) {
-		case *btpb.ValueRange_StartValueOpen:
-			inRangeStart = func() bool { return bytes.Compare(v, sv.StartValueOpen) > 0 }
-		case *btpb.ValueRange_StartValueClosed:
-			inRangeStart = func() bool { return bytes.Compare(v, sv.StartValueClosed) >= 0 }
-		}
-		// End value defaults to no upper boundary
-		inRangeEnd := func() bool { return true }
-		switch ev := f.ValueRangeFilter.EndValue.(type) {
-		case *btpb.ValueRange_EndValueClosed:
-			inRangeEnd = func() bool { return bytes.Compare(v, ev.EndValueClosed) <= 0 }
-		case *btpb.ValueRange_EndValueOpen:
-			inRangeEnd = func() bool { return bytes.Compare(v, ev.EndValueOpen) < 0 }
-		}
-		return inRangeStart() && inRangeEnd()
-	}
-}
-
-func (s *server) MutateRow(ctx context.Context, req *btpb.MutateRowRequest) (*btpb.MutateRowResponse, error) {
-	s.mu.Lock()
-	tbl, ok := s.tables[req.TableName]
-	s.mu.Unlock()
-	if !ok {
-		return nil, grpc.Errorf(codes.NotFound, "table %q not found", req.TableName)
-	}
-	fs := tbl.columnFamilies()
-	r, _ := tbl.mutableRow(string(req.RowKey))
-	r.mu.Lock()
-	defer tbl.resortRowIndex() // Make sure the row lock is released before this grabs the table lock
-	defer r.mu.Unlock()
-	if err := applyMutations(tbl, r, req.Mutations, fs); err != nil {
-		return nil, err
-	}
-	return &btpb.MutateRowResponse{}, nil
-}
-
-func (s *server) MutateRows(req *btpb.MutateRowsRequest, stream btpb.Bigtable_MutateRowsServer) error {
-	s.mu.Lock()
-	tbl, ok := s.tables[req.TableName]
-	s.mu.Unlock()
-	if !ok {
-		return grpc.Errorf(codes.NotFound, "table %q not found", req.TableName)
-	}
-	res := &btpb.MutateRowsResponse{Entries: make([]*btpb.MutateRowsResponse_Entry, len(req.Entries))}
-
-	fs := tbl.columnFamilies()
-
-	defer tbl.resortRowIndex()
-	for i, entry := range req.Entries {
-		r, _ := tbl.mutableRow(string(entry.RowKey))
-		r.mu.Lock()
-		code, msg := int32(codes.OK), ""
-		if err := applyMutations(tbl, r, entry.Mutations, fs); err != nil {
-			code = int32(codes.Internal)
-			msg = err.Error()
-		}
-		res.Entries[i] = &btpb.MutateRowsResponse_Entry{
-			Index:  int64(i),
-			Status: &statpb.Status{Code: code, Message: msg},
-		}
-		r.mu.Unlock()
-	}
-	stream.Send(res)
-	return nil
-}
-
-func (s *server) CheckAndMutateRow(ctx context.Context, req *btpb.CheckAndMutateRowRequest) (*btpb.CheckAndMutateRowResponse, error) {
-	s.mu.Lock()
-	tbl, ok := s.tables[req.TableName]
-	s.mu.Unlock()
-	if !ok {
-		return nil, grpc.Errorf(codes.NotFound, "table %q not found", req.TableName)
-	}
-	res := &btpb.CheckAndMutateRowResponse{}
-
-	fs := tbl.columnFamilies()
-
-	r, _ := tbl.mutableRow(string(req.RowKey))
-	r.mu.Lock()
-	defer r.mu.Unlock()
-
-	// Figure out which mutation to apply.
-	whichMut := false
-	if req.PredicateFilter == nil {
-		// Use true_mutations iff row contains any cells.
-		whichMut = !r.isEmpty()
-	} else {
-		// Use true_mutations iff any cells in the row match the filter.
-		// TODO(dsymonds): This could be cheaper.
-		nr := r.copy()
-		filterRow(req.PredicateFilter, nr)
-		whichMut = !nr.isEmpty()
-	}
-	res.PredicateMatched = whichMut
-	muts := req.FalseMutations
-	if whichMut {
-		muts = req.TrueMutations
-	}
-
-	defer tbl.resortRowIndex()
-	if err := applyMutations(tbl, r, muts, fs); err != nil {
-		return nil, err
-	}
-	return res, nil
-}
-
-// applyMutations applies a sequence of mutations to a row.
-// fam should be a snapshot of the keys of tbl.families.
-// It assumes r.mu is locked.
-func applyMutations(tbl *table, r *row, muts []*btpb.Mutation, fs map[string]*columnFamily) error {
-	for _, mut := range muts {
-		switch mut := mut.Mutation.(type) {
-		default:
-			return fmt.Errorf("can't handle mutation type %T", mut)
-		case *btpb.Mutation_SetCell_:
-			set := mut.SetCell
-			if _, ok := fs[set.FamilyName]; !ok {
-				return fmt.Errorf("unknown family %q", set.FamilyName)
-			}
-			ts := set.TimestampMicros
-			if ts == -1 { // bigtable.ServerTime
-				ts = newTimestamp()
-			}
-			if !tbl.validTimestamp(ts) {
-				return fmt.Errorf("invalid timestamp %d", ts)
-			}
-			fam := set.FamilyName
-			col := string(set.ColumnQualifier)
-
-			newCell := cell{ts: ts, value: set.Value}
-			f := r.getOrCreateFamily(fam, fs[fam].order)
-			f.cells[col] = appendOrReplaceCell(f.cellsByColumn(col), newCell)
-		case *btpb.Mutation_DeleteFromColumn_:
-			del := mut.DeleteFromColumn
-			if _, ok := fs[del.FamilyName]; !ok {
-				return fmt.Errorf("unknown family %q", del.FamilyName)
-			}
-			fam := del.FamilyName
-			col := string(del.ColumnQualifier)
-			if _, ok := r.families[fam]; ok {
-				cs := r.families[fam].cells[col]
-				if del.TimeRange != nil {
-					tsr := del.TimeRange
-					if !tbl.validTimestamp(tsr.StartTimestampMicros) {
-						return fmt.Errorf("invalid timestamp %d", tsr.StartTimestampMicros)
-					}
-					if !tbl.validTimestamp(tsr.EndTimestampMicros) {
-						return fmt.Errorf("invalid timestamp %d", tsr.EndTimestampMicros)
-					}
-					// Find half-open interval to remove.
-					// Cells are in descending timestamp order,
-					// so the predicates to sort.Search are inverted.
-					si, ei := 0, len(cs)
-					if tsr.StartTimestampMicros > 0 {
-						ei = sort.Search(len(cs), func(i int) bool { return cs[i].ts < tsr.StartTimestampMicros })
-					}
-					if tsr.EndTimestampMicros > 0 {
-						si = sort.Search(len(cs), func(i int) bool { return cs[i].ts < tsr.EndTimestampMicros })
-					}
-					if si < ei {
-						copy(cs[si:], cs[ei:])
-						cs = cs[:len(cs)-(ei-si)]
-					}
-				} else {
-					cs = nil
-				}
-				if len(cs) == 0 {
-					delete(r.families[fam].cells, col)
-					colNames := r.families[fam].colNames
-					i := sort.Search(len(colNames), func(i int) bool { return colNames[i] >= col })
-					if i < len(colNames) && colNames[i] == col {
-						r.families[fam].colNames = append(colNames[:i], colNames[i+1:]...)
-					}
-					if len(r.families[fam].cells) == 0 {
-						delete(r.families, fam)
-					}
-				} else {
-					r.families[fam].cells[col] = cs
-				}
-			}
-		case *btpb.Mutation_DeleteFromRow_:
-			r.families = make(map[string]*family)
-		case *btpb.Mutation_DeleteFromFamily_:
-			fampre := mut.DeleteFromFamily.FamilyName
-			delete(r.families, fampre)
-		}
-	}
-	return nil
-}
-
-func maxTimestamp(x, y int64) int64 {
-	if x > y {
-		return x
-	}
-	return y
-}
-
-func newTimestamp() int64 {
-	ts := time.Now().UnixNano() / 1e3
-	ts -= ts % 1000 // round to millisecond granularity
-	return ts
-}
-
-func appendOrReplaceCell(cs []cell, newCell cell) []cell {
-	replaced := false
-	for i, cell := range cs {
-		if cell.ts == newCell.ts {
-			cs[i] = newCell
-			replaced = true
-			break
-		}
-	}
-	if !replaced {
-		cs = append(cs, newCell)
-	}
-	sort.Sort(byDescTS(cs))
-	return cs
-}
-
-func (s *server) ReadModifyWriteRow(ctx context.Context, req *btpb.ReadModifyWriteRowRequest) (*btpb.ReadModifyWriteRowResponse, error) {
-	s.mu.Lock()
-	tbl, ok := s.tables[req.TableName]
-	s.mu.Unlock()
-	if !ok {
-		return nil, grpc.Errorf(codes.NotFound, "table %q not found", req.TableName)
-	}
-	updates := make(map[string]cell) // copy of updated cells; keyed by full column name
-
-	fs := tbl.columnFamilies()
-
-	rowKey := string(req.RowKey)
-	r, isNewRow := tbl.mutableRow(rowKey)
-	// This must be done before the row lock, acquired below, is released.
-	if isNewRow {
-		defer tbl.resortRowIndex()
-	}
-	r.mu.Lock()
-	defer r.mu.Unlock()
-	// Assume all mutations apply to the most recent version of the cell.
-	// TODO(dsymonds): Verify this assumption and document it in the proto.
-	for _, rule := range req.Rules {
-		if _, ok := fs[rule.FamilyName]; !ok {
-			return nil, fmt.Errorf("unknown family %q", rule.FamilyName)
-		}
-
-		fam := rule.FamilyName
-		col := string(rule.ColumnQualifier)
-		isEmpty := false
-		f := r.getOrCreateFamily(fam, fs[fam].order)
-		cs := f.cells[col]
-		isEmpty = len(cs) == 0
-
-		ts := newTimestamp()
-		var newCell, prevCell cell
-		if !isEmpty {
-			cells := r.families[fam].cells[col]
-			prevCell = cells[0]
-
-			// ts is the max of now or the prev cell's timestamp in case the
-			// prev cell is in the future
-			ts = maxTimestamp(ts, prevCell.ts)
-		}
-
-		switch rule := rule.Rule.(type) {
-		default:
-			return nil, fmt.Errorf("unknown RMW rule oneof %T", rule)
-		case *btpb.ReadModifyWriteRule_AppendValue:
-			newCell = cell{ts: ts, value: append(prevCell.value, rule.AppendValue...)}
-		case *btpb.ReadModifyWriteRule_IncrementAmount:
-			var v int64
-			if !isEmpty {
-				prevVal := prevCell.value
-				if len(prevVal) != 8 {
-					return nil, fmt.Errorf("increment on non-64-bit value")
-				}
-				v = int64(binary.BigEndian.Uint64(prevVal))
-			}
-			v += rule.IncrementAmount
-			var val [8]byte
-			binary.BigEndian.PutUint64(val[:], uint64(v))
-			newCell = cell{ts: ts, value: val[:]}
-		}
-		key := strings.Join([]string{fam, col}, ":")
-		updates[key] = newCell
-		f.cells[col] = appendOrReplaceCell(f.cellsByColumn(col), newCell)
-	}
-
-	res := &btpb.Row{
-		Key: req.RowKey,
-	}
-	for col, cell := range updates {
-		i := strings.Index(col, ":")
-		fam, qual := col[:i], col[i+1:]
-		var f *btpb.Family
-		for _, ff := range res.Families {
-			if ff.Name == fam {
-				f = ff
-				break
-			}
-		}
-		if f == nil {
-			f = &btpb.Family{Name: fam}
-			res.Families = append(res.Families, f)
-		}
-		f.Columns = append(f.Columns, &btpb.Column{
-			Qualifier: []byte(qual),
-			Cells: []*btpb.Cell{{
-				TimestampMicros: cell.ts,
-				Value:           cell.value,
-			}},
-		})
-	}
-	return &btpb.ReadModifyWriteRowResponse{Row: res}, nil
-}
-
-func (s *server) SampleRowKeys(req *btpb.SampleRowKeysRequest, stream btpb.Bigtable_SampleRowKeysServer) error {
-	s.mu.Lock()
-	tbl, ok := s.tables[req.TableName]
-	s.mu.Unlock()
-	if !ok {
-		return grpc.Errorf(codes.NotFound, "table %q not found", req.TableName)
-	}
-
-	tbl.mu.RLock()
-	defer tbl.mu.RUnlock()
-
-	// The return value of SampleRowKeys is very loosely defined. Return at least the
-	// final row key in the table and choose other row keys randomly.
-	var offset int64
-	for i, row := range tbl.rows {
-		if i == len(tbl.rows)-1 || rand.Int31n(100) == 0 {
-			resp := &btpb.SampleRowKeysResponse{
-				RowKey:      []byte(row.key),
-				OffsetBytes: offset,
-			}
-			err := stream.Send(resp)
-			if err != nil {
-				return err
-			}
-		}
-		offset += int64(row.size())
-	}
-	return nil
-}
-
-// needGC is invoked whenever the server needs gcloop running.
-func (s *server) needGC() {
-	s.mu.Lock()
-	if s.gcc == nil {
-		s.gcc = make(chan int)
-		go s.gcloop(s.gcc)
-	}
-	s.mu.Unlock()
-}
-
-func (s *server) gcloop(done <-chan int) {
-	const (
-		minWait = 500  // ms
-		maxWait = 1500 // ms
-	)
-
-	for {
-		// Wait for a random time interval.
-		d := time.Duration(minWait+rand.Intn(maxWait-minWait)) * time.Millisecond
-		select {
-		case <-time.After(d):
-		case <-done:
-			return // server has been closed
-		}
-
-		// Do a GC pass over all tables.
-		var tables []*table
-		s.mu.Lock()
-		for _, tbl := range s.tables {
-			tables = append(tables, tbl)
-		}
-		s.mu.Unlock()
-		for _, tbl := range tables {
-			tbl.gc()
-		}
-	}
-}
-
-type table struct {
-	mu       sync.RWMutex
-	counter  uint64                   // increment by 1 when a new family is created
-	families map[string]*columnFamily // keyed by plain family name
-	rows     []*row                   // sorted by row key
-	rowIndex map[string]*row          // indexed by row key
-}
-
-func newTable(ctr *btapb.CreateTableRequest) *table {
-	fams := make(map[string]*columnFamily)
-	c := uint64(0)
-	if ctr.Table != nil {
-		for id, cf := range ctr.Table.ColumnFamilies {
-			fams[id] = &columnFamily{
-				name:   ctr.Parent + "/columnFamilies/" + id,
-				order:  c,
-				gcRule: cf.GcRule,
-			}
-			c++
-		}
-	}
-	return &table{
-		families: fams,
-		counter:  c,
-		rowIndex: make(map[string]*row),
-	}
-}
-
-func (t *table) validTimestamp(ts int64) bool {
-	// Assume millisecond granularity is required.
-	return ts%1000 == 0
-}
-
-func (t *table) columnFamilies() map[string]*columnFamily {
-	cp := make(map[string]*columnFamily)
-	t.mu.RLock()
-	for fam, cf := range t.families {
-		cp[fam] = cf
-	}
-	t.mu.RUnlock()
-	return cp
-}
-
-func (t *table) mutableRow(row string) (mutRow *row, isNewRow bool) {
-	// Try fast path first.
-	t.mu.RLock()
-	r := t.rowIndex[row]
-	t.mu.RUnlock()
-	if r != nil {
-		return r, false
-	}
-
-	// We probably need to create the row.
-	t.mu.Lock()
-	r = t.rowIndex[row]
-	if r == nil {
-		r = newRow(row)
-		t.rowIndex[row] = r
-		t.rows = append(t.rows, r)
-	}
-	t.mu.Unlock()
-	return r, true
-}
-
-func (t *table) resortRowIndex() {
-	t.mu.Lock()
-	sort.Sort(byRowKey(t.rows))
-	t.mu.Unlock()
-}
-
-func (t *table) gc() {
-	// This method doesn't add or remove rows, so we only need a read lock for the table.
-	t.mu.RLock()
-	defer t.mu.RUnlock()
-
-	// Gather GC rules we'll apply.
-	rules := make(map[string]*btapb.GcRule) // keyed by "fam"
-	for fam, cf := range t.families {
-		if cf.gcRule != nil {
-			rules[fam] = cf.gcRule
-		}
-	}
-	if len(rules) == 0 {
-		return
-	}
-
-	for _, r := range t.rows {
-		r.mu.Lock()
-		r.gc(rules)
-		r.mu.Unlock()
-	}
-}
-
-type byRowKey []*row
-
-func (b byRowKey) Len() int           { return len(b) }
-func (b byRowKey) Swap(i, j int)      { b[i], b[j] = b[j], b[i] }
-func (b byRowKey) Less(i, j int) bool { return b[i].key < b[j].key }
-
-type row struct {
-	key string
-
-	mu       sync.Mutex
-	families map[string]*family // keyed by family name
-}
-
-func newRow(key string) *row {
-	return &row{
-		key:      key,
-		families: make(map[string]*family),
-	}
-}
-
-// copy returns a copy of the row.
-// Cell values are aliased.
-// r.mu should be held.
-func (r *row) copy() *row {
-	nr := newRow(r.key)
-	for _, fam := range r.families {
-		nr.families[fam.name] = &family{
-			name:     fam.name,
-			order:    fam.order,
-			colNames: fam.colNames,
-			cells:    make(map[string][]cell),
-		}
-		for col, cs := range fam.cells {
-			// Copy the []cell slice, but not the []byte inside each cell.
-			nr.families[fam.name].cells[col] = append([]cell(nil), cs...)
-		}
-	}
-	return nr
-}
-
-// isEmpty returns true if a row doesn't contain any cell
-func (r *row) isEmpty() bool {
-	for _, fam := range r.families {
-		for _, cs := range fam.cells {
-			if len(cs) > 0 {
-				return false
-			}
-		}
-	}
-	return true
-}
-
-// sortedFamilies returns a column family set
-// sorted in ascending creation order in a row.
-func (r *row) sortedFamilies() []*family {
-	var families []*family
-	for _, fam := range r.families {
-		families = append(families, fam)
-	}
-	sort.Sort(byCreationOrder(families))
-	return families
-}
-
-func (r *row) getOrCreateFamily(name string, order uint64) *family {
-	if _, ok := r.families[name]; !ok {
-		r.families[name] = &family{
-			name:  name,
-			order: order,
-			cells: make(map[string][]cell),
-		}
-	}
-	return r.families[name]
-}
-
-// gc applies the given GC rules to the row.
-// r.mu should be held.
-func (r *row) gc(rules map[string]*btapb.GcRule) {
-	for _, fam := range r.families {
-		rule, ok := rules[fam.name]
-		if !ok {
-			continue
-		}
-		for col, cs := range fam.cells {
-			r.families[fam.name].cells[col] = applyGC(cs, rule)
-		}
-	}
-}
-
-// size returns the total size of all cell values in the row.
-func (r *row) size() int {
-	size := 0
-	for _, fam := range r.families {
-		for _, cells := range fam.cells {
-			for _, cell := range cells {
-				size += len(cell.value)
-			}
-		}
-	}
-	return size
-}
-
-func (r *row) String() string {
-	return r.key
-}
-
-var gcTypeWarn sync.Once
-
-// applyGC applies the given GC rule to the cells.
-func applyGC(cells []cell, rule *btapb.GcRule) []cell {
-	switch rule := rule.Rule.(type) {
-	default:
-		// TODO(dsymonds): Support GcRule_Intersection_
-		gcTypeWarn.Do(func() {
-			log.Printf("Unsupported GC rule type %T", rule)
-		})
-	case *btapb.GcRule_Union_:
-		for _, sub := range rule.Union.Rules {
-			cells = applyGC(cells, sub)
-		}
-		return cells
-	case *btapb.GcRule_MaxAge:
-		// Timestamps are in microseconds.
-		cutoff := time.Now().UnixNano() / 1e3
-		cutoff -= rule.MaxAge.Seconds * 1e6
-		cutoff -= int64(rule.MaxAge.Nanos) / 1e3
-		// The slice of cells in in descending timestamp order.
-		// This sort.Search will return the index of the first cell whose timestamp is chronologically before the cutoff.
-		si := sort.Search(len(cells), func(i int) bool { return cells[i].ts < cutoff })
-		if si < len(cells) {
-			log.Printf("bttest: GC MaxAge(%v) deleted %d cells.", rule.MaxAge, len(cells)-si)
-		}
-		return cells[:si]
-	case *btapb.GcRule_MaxNumVersions:
-		n := int(rule.MaxNumVersions)
-		if len(cells) > n {
-			cells = cells[:n]
-		}
-		return cells
-	}
-	return cells
-}
-
-type family struct {
-	name     string            // Column family name
-	order    uint64            // Creation order of column family
-	colNames []string          // Collumn names are sorted in lexicographical ascending order
-	cells    map[string][]cell // Keyed by collumn name; cells are in descending timestamp order
-}
-
-type byCreationOrder []*family
-
-func (b byCreationOrder) Len() int           { return len(b) }
-func (b byCreationOrder) Swap(i, j int)      { b[i], b[j] = b[j], b[i] }
-func (b byCreationOrder) Less(i, j int) bool { return b[i].order < b[j].order }
-
-// cellsByColumn adds the column name to colNames set if it does not exist
-// and returns all cells within a column
-func (f *family) cellsByColumn(name string) []cell {
-	if _, ok := f.cells[name]; !ok {
-		f.colNames = append(f.colNames, name)
-		sort.Strings(f.colNames)
-	}
-	return f.cells[name]
-}
-
-type cell struct {
-	ts    int64
-	value []byte
-}
-
-type byDescTS []cell
-
-func (b byDescTS) Len() int           { return len(b) }
-func (b byDescTS) Swap(i, j int)      { b[i], b[j] = b[j], b[i] }
-func (b byDescTS) Less(i, j int) bool { return b[i].ts > b[j].ts }
-
-type columnFamily struct {
-	name   string
-	order  uint64 // Creation order of column family
-	gcRule *btapb.GcRule
-}
-
-func (c *columnFamily) proto() *btapb.ColumnFamily {
-	return &btapb.ColumnFamily{
-		GcRule: c.gcRule,
-	}
-}
-
-func toColumnFamilies(families map[string]*columnFamily) map[string]*btapb.ColumnFamily {
-	fs := make(map[string]*btapb.ColumnFamily)
-	for k, v := range families {
-		fs[k] = v.proto()
-	}
-	return fs
-}

+ 0 - 842
vendor/cloud.google.com/go/bigtable/cmd/cbt/cbt.go

@@ -1,842 +0,0 @@
-/*
-Copyright 2015 Google Inc. All Rights Reserved.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package main
-
-// Command docs are in cbtdoc.go.
-
-import (
-	"bytes"
-	"flag"
-	"fmt"
-	"go/format"
-	"io"
-	"log"
-	"os"
-	"regexp"
-	"sort"
-	"strconv"
-	"strings"
-	"text/tabwriter"
-	"text/template"
-	"time"
-
-	"cloud.google.com/go/bigtable"
-	"cloud.google.com/go/bigtable/internal/cbtconfig"
-	"golang.org/x/net/context"
-	"google.golang.org/api/option"
-	"google.golang.org/grpc"
-)
-
-var (
-	oFlag = flag.String("o", "", "if set, redirect stdout to this file")
-
-	config              *cbtconfig.Config
-	client              *bigtable.Client
-	adminClient         *bigtable.AdminClient
-	instanceAdminClient *bigtable.InstanceAdminClient
-
-	version      = "<unknown version>"
-	revision     = "<unknown revision>"
-	revisionDate = "<unknown revision date>"
-)
-
-func getCredentialOpts(opts []option.ClientOption) []option.ClientOption {
-	if ts := config.TokenSource; ts != nil {
-		opts = append(opts, option.WithTokenSource(ts))
-	}
-	if tlsCreds := config.TLSCreds; tlsCreds != nil {
-		opts = append(opts, option.WithGRPCDialOption(grpc.WithTransportCredentials(tlsCreds)))
-	}
-	return opts
-}
-
-func getClient() *bigtable.Client {
-	if client == nil {
-		var opts []option.ClientOption
-		if ep := config.DataEndpoint; ep != "" {
-			opts = append(opts, option.WithEndpoint(ep))
-		}
-		opts = getCredentialOpts(opts)
-		var err error
-		client, err = bigtable.NewClient(context.Background(), config.Project, config.Instance, opts...)
-		if err != nil {
-			log.Fatalf("Making bigtable.Client: %v", err)
-		}
-	}
-	return client
-}
-
-func getAdminClient() *bigtable.AdminClient {
-	if adminClient == nil {
-		var opts []option.ClientOption
-		if ep := config.AdminEndpoint; ep != "" {
-			opts = append(opts, option.WithEndpoint(ep))
-		}
-		opts = getCredentialOpts(opts)
-		var err error
-		adminClient, err = bigtable.NewAdminClient(context.Background(), config.Project, config.Instance, opts...)
-		if err != nil {
-			log.Fatalf("Making bigtable.AdminClient: %v", err)
-		}
-	}
-	return adminClient
-}
-
-func getInstanceAdminClient() *bigtable.InstanceAdminClient {
-	if instanceAdminClient == nil {
-		var opts []option.ClientOption
-		if ep := config.AdminEndpoint; ep != "" {
-			opts = append(opts, option.WithEndpoint(ep))
-		}
-		opts = getCredentialOpts(opts)
-		var err error
-		instanceAdminClient, err = bigtable.NewInstanceAdminClient(context.Background(), config.Project, opts...)
-		if err != nil {
-			log.Fatalf("Making bigtable.InstanceAdminClient: %v", err)
-		}
-	}
-	return instanceAdminClient
-}
-
-func main() {
-	var err error
-	config, err = cbtconfig.Load()
-	if err != nil {
-		log.Fatal(err)
-	}
-	config.RegisterFlags()
-
-	flag.Usage = func() { usage(os.Stderr) }
-	flag.Parse()
-	if flag.NArg() == 0 {
-		usage(os.Stderr)
-		os.Exit(1)
-	}
-
-	if *oFlag != "" {
-		f, err := os.Create(*oFlag)
-		if err != nil {
-			log.Fatal(err)
-		}
-		defer func() {
-			if err := f.Close(); err != nil {
-				log.Fatal(err)
-			}
-		}()
-		os.Stdout = f
-	}
-
-	ctx := context.Background()
-	for _, cmd := range commands {
-		if cmd.Name == flag.Arg(0) {
-			if err := config.CheckFlags(cmd.Required); err != nil {
-				log.Fatal(err)
-			}
-			cmd.do(ctx, flag.Args()[1:]...)
-			return
-		}
-	}
-	log.Fatalf("Unknown command %q", flag.Arg(0))
-}
-
-func usage(w io.Writer) {
-	fmt.Fprintf(w, "Usage: %s [flags] <command> ...\n", os.Args[0])
-	flag.CommandLine.SetOutput(w)
-	flag.CommandLine.PrintDefaults()
-	fmt.Fprintf(w, "\n%s", cmdSummary)
-}
-
-var cmdSummary string // generated in init, below
-
-func init() {
-	var buf bytes.Buffer
-	tw := tabwriter.NewWriter(&buf, 10, 8, 4, '\t', 0)
-	for _, cmd := range commands {
-		fmt.Fprintf(tw, "cbt %s\t%s\n", cmd.Name, cmd.Desc)
-	}
-	tw.Flush()
-	buf.WriteString(configHelp)
-	cmdSummary = buf.String()
-}
-
-var configHelp = `
-For convenience, values of the -project, -instance, -creds,
--admin-endpoint and -data-endpoint flags may be specified in
-` + cbtconfig.Filename() + ` in this format:
-	project = my-project-123
-	instance = my-instance
-	creds = path-to-account-key.json
-	admin-endpoint = hostname:port
-	data-endpoint = hostname:port
-All values are optional, and all will be overridden by flags.
-
-cbt ` + version + ` ` + revision + ` ` + revisionDate + `
-`
-
-var commands = []struct {
-	Name, Desc string
-	do         func(context.Context, ...string)
-	Usage      string
-	Required   cbtconfig.RequiredFlags
-}{
-	{
-		Name:     "count",
-		Desc:     "Count rows in a table",
-		do:       doCount,
-		Usage:    "cbt count <table>",
-		Required: cbtconfig.ProjectAndInstanceRequired,
-	},
-	{
-		Name:     "createfamily",
-		Desc:     "Create a column family",
-		do:       doCreateFamily,
-		Usage:    "cbt createfamily <table> <family>",
-		Required: cbtconfig.ProjectAndInstanceRequired,
-	},
-	{
-		Name: "createtable",
-		Desc: "Create a table",
-		do:   doCreateTable,
-		Usage: "cbt createtable <table> [initial_splits...]\n" +
-			"  initial_splits=row		A row key to be used to initially split the table " +
-			"into multiple tablets. Can be repeated to create multiple splits.",
-		Required: cbtconfig.ProjectAndInstanceRequired,
-	},
-	{
-		Name:     "deletecolumn",
-		Desc:     "Delete all cells in a column",
-		do:       doDeleteColumn,
-		Usage:    "cbt deletecolumn <table> <row> <family> <column>",
-		Required: cbtconfig.ProjectAndInstanceRequired,
-	},
-	{
-		Name:     "deletefamily",
-		Desc:     "Delete a column family",
-		do:       doDeleteFamily,
-		Usage:    "cbt deletefamily <table> <family>",
-		Required: cbtconfig.ProjectAndInstanceRequired,
-	},
-	{
-		Name:     "deleterow",
-		Desc:     "Delete a row",
-		do:       doDeleteRow,
-		Usage:    "cbt deleterow <table> <row>",
-		Required: cbtconfig.ProjectAndInstanceRequired,
-	},
-	{
-		Name:     "deletetable",
-		Desc:     "Delete a table",
-		do:       doDeleteTable,
-		Usage:    "cbt deletetable <table>",
-		Required: cbtconfig.ProjectAndInstanceRequired,
-	},
-	{
-		Name:     "doc",
-		Desc:     "Print godoc-suitable documentation for cbt",
-		do:       doDoc,
-		Usage:    "cbt doc",
-		Required: cbtconfig.NoneRequired,
-	},
-	{
-		Name:     "help",
-		Desc:     "Print help text",
-		do:       doHelp,
-		Usage:    "cbt help [command]",
-		Required: cbtconfig.NoneRequired,
-	},
-	{
-		Name:     "listinstances",
-		Desc:     "List instances in a project",
-		do:       doListInstances,
-		Usage:    "cbt listinstances",
-		Required: cbtconfig.ProjectRequired,
-	},
-	{
-		Name:     "lookup",
-		Desc:     "Read from a single row",
-		do:       doLookup,
-		Usage:    "cbt lookup <table> <row>",
-		Required: cbtconfig.ProjectAndInstanceRequired,
-	},
-	{
-		Name: "ls",
-		Desc: "List tables and column families",
-		do:   doLS,
-		Usage: "cbt ls			List tables\n" +
-			"cbt ls <table>		List column families in <table>",
-		Required: cbtconfig.ProjectAndInstanceRequired,
-	},
-	{
-		Name:     "mddoc",
-		Desc:     "Print documentation for cbt in Markdown format",
-		do:       doMDDoc,
-		Usage:    "cbt mddoc",
-		Required: cbtconfig.NoneRequired,
-	},
-	{
-		Name: "read",
-		Desc: "Read rows",
-		do:   doRead,
-		Usage: "cbt read <table> [start=<row>] [end=<row>] [prefix=<prefix>]" +
-			" [regex=<regex>] [count=<n>]\n" +
-			"  start=<row>		Start reading at this row\n" +
-			"  end=<row>		Stop reading before this row\n" +
-			"  prefix=<prefix>	Read rows with this prefix\n" +
-			"  regex=<regex> 	Read rows with keys matching this regex\n" +
-			"  count=<n>		Read only this many rows\n",
-		Required: cbtconfig.ProjectAndInstanceRequired,
-	},
-	{
-		Name: "set",
-		Desc: "Set value of a cell",
-		do:   doSet,
-		Usage: "cbt set <table> <row> family:column=val[@ts] ...\n" +
-			"  family:column=val[@ts] may be repeated to set multiple cells.\n" +
-			"\n" +
-			"  ts is an optional integer timestamp.\n" +
-			"  If it cannot be parsed, the `@ts` part will be\n" +
-			"  interpreted as part of the value.",
-		Required: cbtconfig.ProjectAndInstanceRequired,
-	},
-	{
-		Name: "setgcpolicy",
-		Desc: "Set the GC policy for a column family",
-		do:   doSetGCPolicy,
-		Usage: "cbt setgcpolicy <table> <family> ( maxage=<d> | maxversions=<n> )\n" +
-			"\n" +
-			`  maxage=<d>		Maximum timestamp age to preserve (e.g. "1h", "4d")` + "\n" +
-			"  maxversions=<n>	Maximum number of versions to preserve",
-		Required: cbtconfig.ProjectAndInstanceRequired,
-	},
-	{
-		Name:     "version",
-		Desc:     "Print the current cbt version",
-		do:       doVersion,
-		Usage:    "cbt version",
-		Required: cbtconfig.NoneRequired,
-	},
-}
-
-func doCount(ctx context.Context, args ...string) {
-	if len(args) != 1 {
-		log.Fatal("usage: cbt count <table>")
-	}
-	tbl := getClient().Open(args[0])
-
-	n := 0
-	err := tbl.ReadRows(ctx, bigtable.InfiniteRange(""), func(_ bigtable.Row) bool {
-		n++
-		return true
-	}, bigtable.RowFilter(bigtable.StripValueFilter()))
-	if err != nil {
-		log.Fatalf("Reading rows: %v", err)
-	}
-	fmt.Println(n)
-}
-
-func doCreateFamily(ctx context.Context, args ...string) {
-	if len(args) != 2 {
-		log.Fatal("usage: cbt createfamily <table> <family>")
-	}
-	err := getAdminClient().CreateColumnFamily(ctx, args[0], args[1])
-	if err != nil {
-		log.Fatalf("Creating column family: %v", err)
-	}
-}
-
-func doCreateTable(ctx context.Context, args ...string) {
-	if len(args) < 1 {
-		log.Fatal("usage: cbt createtable <table> [initial_splits...]")
-	}
-	var err error
-	if len(args) > 1 {
-		splits := args[1:]
-		err = getAdminClient().CreatePresplitTable(ctx, args[0], splits)
-	} else {
-		err = getAdminClient().CreateTable(ctx, args[0])
-	}
-	if err != nil {
-		log.Fatalf("Creating table: %v", err)
-	}
-}
-
-func doDeleteColumn(ctx context.Context, args ...string) {
-	if len(args) != 4 {
-		log.Fatal("usage: cbt deletecolumn <table> <row> <family> <column>")
-	}
-	tbl := getClient().Open(args[0])
-	mut := bigtable.NewMutation()
-	mut.DeleteCellsInColumn(args[2], args[3])
-	if err := tbl.Apply(ctx, args[1], mut); err != nil {
-		log.Fatalf("Deleting cells in column: %v", err)
-	}
-}
-
-func doDeleteFamily(ctx context.Context, args ...string) {
-	if len(args) != 2 {
-		log.Fatal("usage: cbt deletefamily <table> <family>")
-	}
-	err := getAdminClient().DeleteColumnFamily(ctx, args[0], args[1])
-	if err != nil {
-		log.Fatalf("Deleting column family: %v", err)
-	}
-}
-
-func doDeleteRow(ctx context.Context, args ...string) {
-	if len(args) != 2 {
-		log.Fatal("usage: cbt deleterow <table> <row>")
-	}
-	tbl := getClient().Open(args[0])
-	mut := bigtable.NewMutation()
-	mut.DeleteRow()
-	if err := tbl.Apply(ctx, args[1], mut); err != nil {
-		log.Fatalf("Deleting row: %v", err)
-	}
-}
-
-func doDeleteTable(ctx context.Context, args ...string) {
-	if len(args) != 1 {
-		log.Fatalf("Can't do `cbt deletetable %s`", args)
-	}
-	err := getAdminClient().DeleteTable(ctx, args[0])
-	if err != nil {
-		log.Fatalf("Deleting table: %v", err)
-	}
-}
-
-// to break circular dependencies
-var (
-	doDocFn   func(ctx context.Context, args ...string)
-	doHelpFn  func(ctx context.Context, args ...string)
-	doMDDocFn func(ctx context.Context, args ...string)
-)
-
-func init() {
-	doDocFn = doDocReal
-	doHelpFn = doHelpReal
-	doMDDocFn = doMDDocReal
-}
-
-func doDoc(ctx context.Context, args ...string)   { doDocFn(ctx, args...) }
-func doHelp(ctx context.Context, args ...string)  { doHelpFn(ctx, args...) }
-func doMDDoc(ctx context.Context, args ...string) { doMDDocFn(ctx, args...) }
-
-func docFlags() []*flag.Flag {
-	// Only include specific flags, in a specific order.
-	var flags []*flag.Flag
-	for _, name := range []string{"project", "instance", "creds"} {
-		f := flag.Lookup(name)
-		if f == nil {
-			log.Fatalf("Flag not linked: -%s", name)
-		}
-		flags = append(flags, f)
-	}
-	return flags
-}
-
-func doDocReal(ctx context.Context, args ...string) {
-	data := map[string]interface{}{
-		"Commands": commands,
-		"Flags":    docFlags(),
-	}
-	var buf bytes.Buffer
-	if err := docTemplate.Execute(&buf, data); err != nil {
-		log.Fatalf("Bad doc template: %v", err)
-	}
-	out, err := format.Source(buf.Bytes())
-	if err != nil {
-		log.Fatalf("Bad doc output: %v", err)
-	}
-	os.Stdout.Write(out)
-}
-
-func indentLines(s, ind string) string {
-	ss := strings.Split(s, "\n")
-	for i, p := range ss {
-		ss[i] = ind + p
-	}
-	return strings.Join(ss, "\n")
-}
-
-var docTemplate = template.Must(template.New("doc").Funcs(template.FuncMap{
-	"indent": indentLines,
-}).
-	Parse(`
-// Copyright 2016 Google Inc. All Rights Reserved.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//      http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// DO NOT EDIT. THIS IS AUTOMATICALLY GENERATED.
-// Run "go generate" to regenerate.
-//go:generate go run cbt.go -o cbtdoc.go doc
-
-/*
-Cbt is a tool for doing basic interactions with Cloud Bigtable. To learn how to
-install the cbt tool, see the
-[cbt overview](https://cloud.google.com/bigtable/docs/go/cbt-overview).
-
-Usage:
-
-	cbt [options] command [arguments]
-
-The commands are:
-{{range .Commands}}
-	{{printf "%-25s %s" .Name .Desc}}{{end}}
-
-Use "cbt help <command>" for more information about a command.
-
-The options are:
-{{range .Flags}}
-	-{{.Name}} string
-		{{.Usage}}{{end}}
-
-{{range .Commands}}
-{{.Desc}}
-
-Usage:
-{{indent .Usage "\t"}}
-
-
-
-{{end}}
-*/
-package main
-`))
-
-func doHelpReal(ctx context.Context, args ...string) {
-	if len(args) == 0 {
-		usage(os.Stdout)
-		return
-	}
-	for _, cmd := range commands {
-		if cmd.Name == args[0] {
-			fmt.Println(cmd.Usage)
-			return
-		}
-	}
-	log.Fatalf("Don't know command %q", args[0])
-}
-
-func doListInstances(ctx context.Context, args ...string) {
-	if len(args) != 0 {
-		log.Fatalf("usage: cbt listinstances")
-	}
-	is, err := getInstanceAdminClient().Instances(ctx)
-	if err != nil {
-		log.Fatalf("Getting list of instances: %v", err)
-	}
-	tw := tabwriter.NewWriter(os.Stdout, 10, 8, 4, '\t', 0)
-	fmt.Fprintf(tw, "Instance Name\tInfo\n")
-	fmt.Fprintf(tw, "-------------\t----\n")
-	for _, i := range is {
-		fmt.Fprintf(tw, "%s\t%s\n", i.Name, i.DisplayName)
-	}
-	tw.Flush()
-}
-
-func doLookup(ctx context.Context, args ...string) {
-	if len(args) != 2 {
-		log.Fatalf("usage: cbt lookup <table> <row>")
-	}
-	table, row := args[0], args[1]
-	tbl := getClient().Open(table)
-	r, err := tbl.ReadRow(ctx, row)
-	if err != nil {
-		log.Fatalf("Reading row: %v", err)
-	}
-	printRow(r)
-}
-
-func printRow(r bigtable.Row) {
-	fmt.Println(strings.Repeat("-", 40))
-	fmt.Println(r.Key())
-
-	var fams []string
-	for fam := range r {
-		fams = append(fams, fam)
-	}
-	sort.Strings(fams)
-	for _, fam := range fams {
-		ris := r[fam]
-		sort.Sort(byColumn(ris))
-		for _, ri := range ris {
-			ts := time.Unix(0, int64(ri.Timestamp)*1e3)
-			fmt.Printf("  %-40s @ %s\n", ri.Column, ts.Format("2006/01/02-15:04:05.000000"))
-			fmt.Printf("    %q\n", ri.Value)
-		}
-	}
-}
-
-type byColumn []bigtable.ReadItem
-
-func (b byColumn) Len() int           { return len(b) }
-func (b byColumn) Swap(i, j int)      { b[i], b[j] = b[j], b[i] }
-func (b byColumn) Less(i, j int) bool { return b[i].Column < b[j].Column }
-
-type byFamilyName []bigtable.FamilyInfo
-
-func (b byFamilyName) Len() int           { return len(b) }
-func (b byFamilyName) Swap(i, j int)      { b[i], b[j] = b[j], b[i] }
-func (b byFamilyName) Less(i, j int) bool { return b[i].Name < b[j].Name }
-
-func doLS(ctx context.Context, args ...string) {
-	switch len(args) {
-	default:
-		log.Fatalf("Can't do `cbt ls %s`", args)
-	case 0:
-		tables, err := getAdminClient().Tables(ctx)
-		if err != nil {
-			log.Fatalf("Getting list of tables: %v", err)
-		}
-		sort.Strings(tables)
-		for _, table := range tables {
-			fmt.Println(table)
-		}
-	case 1:
-		table := args[0]
-		ti, err := getAdminClient().TableInfo(ctx, table)
-		if err != nil {
-			log.Fatalf("Getting table info: %v", err)
-		}
-		sort.Sort(byFamilyName(ti.FamilyInfos))
-		tw := tabwriter.NewWriter(os.Stdout, 10, 8, 4, '\t', 0)
-		fmt.Fprintf(tw, "Family Name\tGC Policy\n")
-		fmt.Fprintf(tw, "-----------\t---------\n")
-		for _, fam := range ti.FamilyInfos {
-			fmt.Fprintf(tw, "%s\t%s\n", fam.Name, fam.GCPolicy)
-		}
-		tw.Flush()
-	}
-}
-
-func doMDDocReal(ctx context.Context, args ...string) {
-	data := map[string]interface{}{
-		"Commands": commands,
-		"Flags":    docFlags(),
-	}
-	var buf bytes.Buffer
-	if err := mddocTemplate.Execute(&buf, data); err != nil {
-		log.Fatalf("Bad mddoc template: %v", err)
-	}
-	io.Copy(os.Stdout, &buf)
-}
-
-var mddocTemplate = template.Must(template.New("mddoc").Funcs(template.FuncMap{
-	"indent": indentLines,
-}).
-	Parse(`
-Cbt is a tool for doing basic interactions with Cloud Bigtable.
-
-Usage:
-
-	cbt [options] command [arguments]
-
-The commands are:
-{{range .Commands}}
-	{{printf "%-25s %s" .Name .Desc}}{{end}}
-
-Use "cbt help <command>" for more information about a command.
-
-The options are:
-{{range .Flags}}
-	-{{.Name}} string
-		{{.Usage}}{{end}}
-
-{{range .Commands}}
-## {{.Desc}}
-
-{{indent .Usage "\t"}}
-
-
-
-{{end}}
-`))
-
-func doRead(ctx context.Context, args ...string) {
-	if len(args) < 1 {
-		log.Fatalf("usage: cbt read <table> [args ...]")
-	}
-	tbl := getClient().Open(args[0])
-
-	parsed := make(map[string]string)
-	for _, arg := range args[1:] {
-		i := strings.Index(arg, "=")
-		if i < 0 {
-			log.Fatalf("Bad arg %q", arg)
-		}
-		key, val := arg[:i], arg[i+1:]
-		switch key {
-		default:
-			log.Fatalf("Unknown arg key %q", key)
-		case "limit":
-			// Be nicer; we used to support this, but renamed it to "end".
-			log.Fatalf("Unknown arg key %q; did you mean %q?", key, "end")
-		case "start", "end", "prefix", "count", "regex":
-			parsed[key] = val
-		}
-	}
-	if (parsed["start"] != "" || parsed["end"] != "") && parsed["prefix"] != "" {
-		log.Fatal(`"start"/"end" may not be mixed with "prefix"`)
-	}
-
-	var rr bigtable.RowRange
-	if start, end := parsed["start"], parsed["end"]; end != "" {
-		rr = bigtable.NewRange(start, end)
-	} else if start != "" {
-		rr = bigtable.InfiniteRange(start)
-	}
-	if prefix := parsed["prefix"]; prefix != "" {
-		rr = bigtable.PrefixRange(prefix)
-	}
-
-	var opts []bigtable.ReadOption
-	if count := parsed["count"]; count != "" {
-		n, err := strconv.ParseInt(count, 0, 64)
-		if err != nil {
-			log.Fatalf("Bad count %q: %v", count, err)
-		}
-		opts = append(opts, bigtable.LimitRows(n))
-	}
-	if regex := parsed["regex"]; regex != "" {
-		opts = append(opts, bigtable.RowFilter(bigtable.RowKeyFilter(regex)))
-	}
-
-	// TODO(dsymonds): Support filters.
-	err := tbl.ReadRows(ctx, rr, func(r bigtable.Row) bool {
-		printRow(r)
-		return true
-	}, opts...)
-	if err != nil {
-		log.Fatalf("Reading rows: %v", err)
-	}
-}
-
-var setArg = regexp.MustCompile(`([^:]+):([^=]*)=(.*)`)
-
-func doSet(ctx context.Context, args ...string) {
-	if len(args) < 3 {
-		log.Fatalf("usage: cbt set <table> <row> family:[column]=val[@ts] ...")
-	}
-	tbl := getClient().Open(args[0])
-	row := args[1]
-	mut := bigtable.NewMutation()
-	for _, arg := range args[2:] {
-		m := setArg.FindStringSubmatch(arg)
-		if m == nil {
-			log.Fatalf("Bad set arg %q", arg)
-		}
-		val := m[3]
-		ts := bigtable.Now()
-		if i := strings.LastIndex(val, "@"); i >= 0 {
-			// Try parsing a timestamp.
-			n, err := strconv.ParseInt(val[i+1:], 0, 64)
-			if err == nil {
-				val = val[:i]
-				ts = bigtable.Timestamp(n)
-			}
-		}
-		mut.Set(m[1], m[2], ts, []byte(val))
-	}
-	if err := tbl.Apply(ctx, row, mut); err != nil {
-		log.Fatalf("Applying mutation: %v", err)
-	}
-}
-
-func doSetGCPolicy(ctx context.Context, args ...string) {
-	if len(args) < 3 {
-		log.Fatalf("usage: cbt setgcpolicy <table> <family> ( maxage=<d> | maxversions=<n> )")
-	}
-	table := args[0]
-	fam := args[1]
-
-	var pol bigtable.GCPolicy
-	switch p := args[2]; {
-	case strings.HasPrefix(p, "maxage="):
-		d, err := parseDuration(p[7:])
-		if err != nil {
-			log.Fatal(err)
-		}
-		pol = bigtable.MaxAgePolicy(d)
-	case strings.HasPrefix(p, "maxversions="):
-		n, err := strconv.ParseUint(p[12:], 10, 16)
-		if err != nil {
-			log.Fatal(err)
-		}
-		pol = bigtable.MaxVersionsPolicy(int(n))
-	default:
-		log.Fatalf("Bad GC policy %q", p)
-	}
-	if err := getAdminClient().SetGCPolicy(ctx, table, fam, pol); err != nil {
-		log.Fatalf("Setting GC policy: %v", err)
-	}
-}
-
-// parseDuration parses a duration string.
-// It is similar to Go's time.ParseDuration, except with a different set of supported units,
-// and only simple formats supported.
-func parseDuration(s string) (time.Duration, error) {
-	// [0-9]+[a-z]+
-
-	// Split [0-9]+ from [a-z]+.
-	i := 0
-	for ; i < len(s); i++ {
-		c := s[i]
-		if c < '0' || c > '9' {
-			break
-		}
-	}
-	ds, u := s[:i], s[i:]
-	if ds == "" || u == "" {
-		return 0, fmt.Errorf("invalid duration %q", s)
-	}
-	// Parse them.
-	d, err := strconv.ParseUint(ds, 10, 32)
-	if err != nil {
-		return 0, fmt.Errorf("invalid duration %q: %v", s, err)
-	}
-	unit, ok := unitMap[u]
-	if !ok {
-		return 0, fmt.Errorf("unknown unit %q in duration %q", u, s)
-	}
-	if d > uint64((1<<63-1)/unit) {
-		// overflow
-		return 0, fmt.Errorf("invalid duration %q overflows", s)
-	}
-	return time.Duration(d) * unit, nil
-}
-
-var unitMap = map[string]time.Duration{
-	"ms": time.Millisecond,
-	"s":  time.Second,
-	"m":  time.Minute,
-	"h":  time.Hour,
-	"d":  24 * time.Hour,
-}
-
-func doVersion(ctx context.Context, args ...string) {
-	fmt.Printf("%s %s %s\n", version, revision, revisionDate)
-}

+ 0 - 213
vendor/cloud.google.com/go/bigtable/cmd/cbt/cbtdoc.go

@@ -1,213 +0,0 @@
-// Copyright 2016 Google Inc. All Rights Reserved.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//      http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// DO NOT EDIT. THIS IS AUTOMATICALLY GENERATED.
-// Run "go generate" to regenerate.
-//go:generate go run cbt.go -o cbtdoc.go doc
-
-/*
-Cbt is a tool for doing basic interactions with Cloud Bigtable. To learn how to
-install the cbt tool, see the
-[cbt overview](https://cloud.google.com/bigtable/docs/go/cbt-overview).
-
-Usage:
-
-	cbt [options] command [arguments]
-
-The commands are:
-
-	count                     Count rows in a table
-	createfamily              Create a column family
-	createtable               Create a table
-	deletecolumn              Delete all cells in a column
-	deletefamily              Delete a column family
-	deleterow                 Delete a row
-	deletetable               Delete a table
-	doc                       Print godoc-suitable documentation for cbt
-	help                      Print help text
-	listinstances             List instances in a project
-	lookup                    Read from a single row
-	ls                        List tables and column families
-	mddoc                     Print documentation for cbt in Markdown format
-	read                      Read rows
-	set                       Set value of a cell
-	setgcpolicy               Set the GC policy for a column family
-	version                   Print the current cbt version
-
-Use "cbt help <command>" for more information about a command.
-
-The options are:
-
-	-project string
-		project ID, if unset uses gcloud configured project
-	-instance string
-		Cloud Bigtable instance
-	-creds string
-		if set, use application credentials in this file
-
-
-Count rows in a table
-
-Usage:
-	cbt count <table>
-
-
-
-
-Create a column family
-
-Usage:
-	cbt createfamily <table> <family>
-
-
-
-
-Create a table
-
-Usage:
-	cbt createtable <table> [initial_splits...]
-	  initial_splits=row		A row key to be used to initially split the table into multiple tablets. Can be repeated to create multiple splits.
-
-
-
-
-Delete all cells in a column
-
-Usage:
-	cbt deletecolumn <table> <row> <family> <column>
-
-
-
-
-Delete a column family
-
-Usage:
-	cbt deletefamily <table> <family>
-
-
-
-
-Delete a row
-
-Usage:
-	cbt deleterow <table> <row>
-
-
-
-
-Delete a table
-
-Usage:
-	cbt deletetable <table>
-
-
-
-
-Print godoc-suitable documentation for cbt
-
-Usage:
-	cbt doc
-
-
-
-
-Print help text
-
-Usage:
-	cbt help [command]
-
-
-
-
-List instances in a project
-
-Usage:
-	cbt listinstances
-
-
-
-
-Read from a single row
-
-Usage:
-	cbt lookup <table> <row>
-
-
-
-
-List tables and column families
-
-Usage:
-	cbt ls			List tables
-	cbt ls <table>		List column families in <table>
-
-
-
-
-Print documentation for cbt in Markdown format
-
-Usage:
-	cbt mddoc
-
-
-
-
-Read rows
-
-Usage:
-	cbt read <table> [start=<row>] [end=<row>] [prefix=<prefix>] [regex=<regex>] [count=<n>]
-	  start=<row>		Start reading at this row
-	  end=<row>		Stop reading before this row
-	  prefix=<prefix>	Read rows with this prefix
-	  regex=<regex> 	Read rows with keys matching this regex
-	  count=<n>		Read only this many rows
-
-
-
-
-
-Set value of a cell
-
-Usage:
-	cbt set <table> <row> family:column=val[@ts] ...
-	  family:column=val[@ts] may be repeated to set multiple cells.
-
-	  ts is an optional integer timestamp.
-	  If it cannot be parsed, the `@ts` part will be
-	  interpreted as part of the value.
-
-
-
-
-Set the GC policy for a column family
-
-Usage:
-	cbt setgcpolicy <table> <family> ( maxage=<d> | maxversions=<n> )
-
-	  maxage=<d>		Maximum timestamp age to preserve (e.g. "1h", "4d")
-	  maxversions=<n>	Maximum number of versions to preserve
-
-
-
-
-Print the current cbt version
-
-Usage:
-	cbt version
-
-
-
-
-*/
-package main

+ 0 - 44
vendor/cloud.google.com/go/bigtable/cmd/emulator/cbtemulator.go

@@ -1,44 +0,0 @@
-// Copyright 2016 Google Inc. All Rights Reserved.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//      http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-/*
-cbtemulator launches the in-memory Cloud Bigtable server on the given address.
-*/
-package main
-
-import (
-	"flag"
-	"fmt"
-	"log"
-
-	"cloud.google.com/go/bigtable/bttest"
-	"google.golang.org/grpc"
-)
-
-var (
-	host = flag.String("host", "localhost", "the address to bind to on the local machine")
-	port = flag.Int("port", 9000, "the port number to bind to on the local machine")
-)
-
-func main() {
-	grpc.EnableTracing = false
-	flag.Parse()
-	srv, err := bttest.NewServer(fmt.Sprintf("%s:%d", *host, *port))
-	if err != nil {
-		log.Fatalf("failed to start emulator: %v", err)
-	}
-
-	fmt.Printf("Cloud Bigtable emulator running on %s\n", srv.Addr)
-	select {}
-}

+ 0 - 204
vendor/cloud.google.com/go/bigtable/cmd/loadtest/loadtest.go

@@ -1,204 +0,0 @@
-/*
-Copyright 2015 Google Inc. All Rights Reserved.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-/*
-Loadtest does some load testing through the Go client library for Cloud Bigtable.
-*/
-package main
-
-import (
-	"bytes"
-	"flag"
-	"fmt"
-	"log"
-	"math/rand"
-	"os"
-	"os/signal"
-	"sync"
-	"sync/atomic"
-	"time"
-
-	"cloud.google.com/go/bigtable"
-	"cloud.google.com/go/bigtable/internal/cbtconfig"
-	"cloud.google.com/go/bigtable/internal/stat"
-	"golang.org/x/net/context"
-	"google.golang.org/api/option"
-	"google.golang.org/grpc"
-)
-
-var (
-	runFor = flag.Duration("run_for", 5*time.Second,
-		"how long to run the load test for; 0 to run forever until SIGTERM")
-	scratchTable = flag.String("scratch_table", "loadtest-scratch", "name of table to use; should not already exist")
-	csvOutput    = flag.String("csv_output", "",
-		"output path for statistics in .csv format. If this file already exists it will be overwritten.")
-	poolSize = flag.Int("pool_size", 1, "size of the gRPC connection pool to use for the data client")
-	reqCount = flag.Int("req_count", 100, "number of concurrent requests")
-
-	config      *cbtconfig.Config
-	client      *bigtable.Client
-	adminClient *bigtable.AdminClient
-)
-
-func main() {
-	var err error
-	config, err = cbtconfig.Load()
-	if err != nil {
-		log.Fatal(err)
-	}
-	config.RegisterFlags()
-
-	flag.Parse()
-	if err := config.CheckFlags(cbtconfig.ProjectAndInstanceRequired); err != nil {
-		log.Fatal(err)
-	}
-	if config.Creds != "" {
-		os.Setenv("GOOGLE_APPLICATION_CREDENTIALS", config.Creds)
-	}
-	if flag.NArg() != 0 {
-		flag.Usage()
-		os.Exit(1)
-	}
-
-	var options []option.ClientOption
-	if *poolSize > 1 {
-		options = append(options,
-			option.WithGRPCConnectionPool(*poolSize),
-
-			// TODO(grpc/grpc-go#1388) using connection pool without WithBlock
-			// can cause RPCs to fail randomly. We can delete this after the issue is fixed.
-			option.WithGRPCDialOption(grpc.WithBlock()))
-	}
-
-	var csvFile *os.File
-	if *csvOutput != "" {
-		csvFile, err = os.Create(*csvOutput)
-		if err != nil {
-			log.Fatalf("creating csv output file: %v", err)
-		}
-		defer csvFile.Close()
-		log.Printf("Writing statistics to %q ...", *csvOutput)
-	}
-
-	log.Printf("Dialing connections...")
-	client, err = bigtable.NewClient(context.Background(), config.Project, config.Instance, options...)
-	if err != nil {
-		log.Fatalf("Making bigtable.Client: %v", err)
-	}
-	defer client.Close()
-	adminClient, err = bigtable.NewAdminClient(context.Background(), config.Project, config.Instance)
-	if err != nil {
-		log.Fatalf("Making bigtable.AdminClient: %v", err)
-	}
-	defer adminClient.Close()
-
-	// Create a scratch table.
-	log.Printf("Setting up scratch table...")
-	if err := adminClient.CreateTable(context.Background(), *scratchTable); err != nil {
-		log.Fatalf("Making scratch table %q: %v", *scratchTable, err)
-	}
-	if err := adminClient.CreateColumnFamily(context.Background(), *scratchTable, "f"); err != nil {
-		log.Fatalf("Making scratch table column family: %v", err)
-	}
-	// Upon a successful run, delete the table. Don't bother checking for errors.
-	defer adminClient.DeleteTable(context.Background(), *scratchTable)
-
-	// Also delete the table on SIGTERM.
-	c := make(chan os.Signal, 1)
-	signal.Notify(c, os.Interrupt)
-	go func() {
-		s := <-c
-		log.Printf("Caught %v, cleaning scratch table.", s)
-		adminClient.DeleteTable(context.Background(), *scratchTable)
-		os.Exit(1)
-	}()
-
-	log.Printf("Starting load test... (run for %v)", *runFor)
-	tbl := client.Open(*scratchTable)
-	sem := make(chan int, *reqCount) // limit the number of requests happening at once
-	var reads, writes stats
-	stopTime := time.Now().Add(*runFor)
-	var wg sync.WaitGroup
-	for time.Now().Before(stopTime) || *runFor == 0 {
-		sem <- 1
-		wg.Add(1)
-		go func() {
-			defer wg.Done()
-			defer func() { <-sem }()
-
-			ok := true
-			opStart := time.Now()
-			var stats *stats
-			defer func() {
-				stats.Record(ok, time.Since(opStart))
-			}()
-
-			row := fmt.Sprintf("row%d", rand.Intn(100)) // operate on 1 of 100 rows
-
-			switch rand.Intn(10) {
-			default:
-				// read
-				stats = &reads
-				_, err := tbl.ReadRow(context.Background(), row, bigtable.RowFilter(bigtable.LatestNFilter(1)))
-				if err != nil {
-					log.Printf("Error doing read: %v", err)
-					ok = false
-				}
-			case 0, 1, 2, 3, 4:
-				// write
-				stats = &writes
-				mut := bigtable.NewMutation()
-				mut.Set("f", "col", bigtable.Now(), bytes.Repeat([]byte("0"), 1<<10)) // 1 KB write
-				if err := tbl.Apply(context.Background(), row, mut); err != nil {
-					log.Printf("Error doing mutation: %v", err)
-					ok = false
-				}
-			}
-		}()
-	}
-	wg.Wait()
-
-	readsAgg := stat.NewAggregate("reads", reads.ds, reads.tries-reads.ok)
-	writesAgg := stat.NewAggregate("writes", writes.ds, writes.tries-writes.ok)
-	log.Printf("Reads (%d ok / %d tries):\n%v", reads.ok, reads.tries, readsAgg)
-	log.Printf("Writes (%d ok / %d tries):\n%v", writes.ok, writes.tries, writesAgg)
-
-	if csvFile != nil {
-		stat.WriteCSV([]*stat.Aggregate{readsAgg, writesAgg}, csvFile)
-	}
-}
-
-var allStats int64 // atomic
-
-type stats struct {
-	mu        sync.Mutex
-	tries, ok int
-	ds        []time.Duration
-}
-
-func (s *stats) Record(ok bool, d time.Duration) {
-	s.mu.Lock()
-	s.tries++
-	if ok {
-		s.ok++
-	}
-	s.ds = append(s.ds, d)
-	s.mu.Unlock()
-
-	if n := atomic.AddInt64(&allStats, 1); n%1000 == 0 {
-		log.Printf("Progress: done %d ops", n)
-	}
-}

+ 0 - 155
vendor/cloud.google.com/go/bigtable/cmd/scantest/scantest.go

@@ -1,155 +0,0 @@
-/*
-Copyright 2016 Google Inc. All Rights Reserved.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-/*
-Scantest does scan-related load testing against Cloud Bigtable. The logic here
-mimics a similar test written using the Java client.
-*/
-package main
-
-import (
-	"bytes"
-	"flag"
-	"fmt"
-	"log"
-	"math/rand"
-	"os"
-	"sync"
-	"sync/atomic"
-	"text/tabwriter"
-	"time"
-
-	"cloud.google.com/go/bigtable"
-	"cloud.google.com/go/bigtable/internal/cbtconfig"
-	"cloud.google.com/go/bigtable/internal/stat"
-	"golang.org/x/net/context"
-)
-
-var (
-	runFor   = flag.Duration("run_for", 5*time.Second, "how long to run the load test for")
-	numScans = flag.Int("concurrent_scans", 1, "number of concurrent scans")
-	rowLimit = flag.Int("row_limit", 10000, "max number of records per scan")
-
-	config *cbtconfig.Config
-	client *bigtable.Client
-)
-
-func main() {
-	flag.Usage = func() {
-		fmt.Printf("Usage: scantest [options] <table_name>\n\n")
-		flag.PrintDefaults()
-	}
-
-	var err error
-	config, err = cbtconfig.Load()
-	if err != nil {
-		log.Fatal(err)
-	}
-	config.RegisterFlags()
-
-	flag.Parse()
-	if err := config.CheckFlags(cbtconfig.ProjectAndInstanceRequired); err != nil {
-		log.Fatal(err)
-	}
-	if config.Creds != "" {
-		os.Setenv("GOOGLE_APPLICATION_CREDENTIALS", config.Creds)
-	}
-	if flag.NArg() != 1 {
-		flag.Usage()
-		os.Exit(1)
-	}
-
-	table := flag.Arg(0)
-
-	log.Printf("Dialing connections...")
-	client, err = bigtable.NewClient(context.Background(), config.Project, config.Instance)
-	if err != nil {
-		log.Fatalf("Making bigtable.Client: %v", err)
-	}
-	defer client.Close()
-
-	log.Printf("Starting scan test... (run for %v)", *runFor)
-	tbl := client.Open(table)
-	sem := make(chan int, *numScans) // limit the number of requests happening at once
-	var scans stats
-
-	stopTime := time.Now().Add(*runFor)
-	var wg sync.WaitGroup
-	for time.Now().Before(stopTime) {
-		sem <- 1
-		wg.Add(1)
-		go func() {
-			defer wg.Done()
-			defer func() { <-sem }()
-
-			ok := true
-			opStart := time.Now()
-			defer func() {
-				scans.Record(ok, time.Since(opStart))
-			}()
-
-			// Start at a random row key
-			key := fmt.Sprintf("user%d", rand.Int63())
-			limit := bigtable.LimitRows(int64(*rowLimit))
-			noop := func(bigtable.Row) bool { return true }
-			if err := tbl.ReadRows(context.Background(), bigtable.NewRange(key, ""), noop, limit); err != nil {
-				log.Printf("Error during scan: %v", err)
-				ok = false
-			}
-		}()
-	}
-	wg.Wait()
-
-	agg := stat.NewAggregate("scans", scans.ds, scans.tries-scans.ok)
-	log.Printf("Scans (%d ok / %d tries):\nscan times:\n%v\nthroughput (rows/second):\n%v",
-		scans.ok, scans.tries, agg, throughputString(agg))
-}
-
-func throughputString(agg *stat.Aggregate) string {
-	var buf bytes.Buffer
-	tw := tabwriter.NewWriter(&buf, 0, 0, 1, ' ', 0) // one-space padding
-	rowLimitF := float64(*rowLimit)
-	fmt.Fprintf(
-		tw,
-		"min:\t%.2f\nmedian:\t%.2f\nmax:\t%.2f\n",
-		rowLimitF/agg.Max.Seconds(),
-		rowLimitF/agg.Median.Seconds(),
-		rowLimitF/agg.Min.Seconds())
-	tw.Flush()
-	return buf.String()
-}
-
-var allStats int64 // atomic
-
-type stats struct {
-	mu        sync.Mutex
-	tries, ok int
-	ds        []time.Duration
-}
-
-func (s *stats) Record(ok bool, d time.Duration) {
-	s.mu.Lock()
-	s.tries++
-	if ok {
-		s.ok++
-	}
-	s.ds = append(s.ds, d)
-	s.mu.Unlock()
-
-	if n := atomic.AddInt64(&allStats, 1); n%1000 == 0 {
-		log.Printf("Progress: done %d ops", n)
-	}
-}

+ 0 - 125
vendor/cloud.google.com/go/bigtable/doc.go

@@ -1,125 +0,0 @@
-/*
-Copyright 2015 Google Inc. All Rights Reserved.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-/*
-Package bigtable is an API to Google Cloud Bigtable.
-
-See https://cloud.google.com/bigtable/docs/ for general product documentation.
-
-Setup and Credentials
-
-Use NewClient or NewAdminClient to create a client that can be used to access
-the data or admin APIs respectively. Both require credentials that have permission
-to access the Cloud Bigtable API.
-
-If your program is run on Google App Engine or Google Compute Engine, using the Application Default Credentials
-(https://developers.google.com/accounts/docs/application-default-credentials)
-is the simplest option. Those credentials will be used by default when NewClient or NewAdminClient are called.
-
-To use alternate credentials, pass them to NewClient or NewAdminClient using option.WithTokenSource.
-For instance, you can use service account credentials by visiting
-https://cloud.google.com/console/project/MYPROJECT/apiui/credential,
-creating a new OAuth "Client ID", storing the JSON key somewhere accessible, and writing
-	jsonKey, err := ioutil.ReadFile(pathToKeyFile)
-	...
-	config, err := google.JWTConfigFromJSON(jsonKey, bigtable.Scope) // or bigtable.AdminScope, etc.
-	...
-	client, err := bigtable.NewClient(ctx, project, instance, option.WithTokenSource(config.TokenSource(ctx)))
-	...
-Here, `google` means the golang.org/x/oauth2/google package
-and `option` means the google.golang.org/api/option package.
-
-Reading
-
-The principal way to read from a Bigtable is to use the ReadRows method on *Table.
-A RowRange specifies a contiguous portion of a table. A Filter may be provided through
-RowFilter to limit or transform the data that is returned.
-	tbl := client.Open("mytable")
-	...
-	// Read all the rows starting with "com.google.",
-	// but only fetch the columns in the "links" family.
-	rr := bigtable.PrefixRange("com.google.")
-	err := tbl.ReadRows(ctx, rr, func(r Row) bool {
-		// do something with r
-		return true // keep going
-	}, bigtable.RowFilter(bigtable.FamilyFilter("links")))
-	...
-
-To read a single row, use the ReadRow helper method.
-	r, err := tbl.ReadRow(ctx, "com.google.cloud") // "com.google.cloud" is the entire row key
-	...
-
-Writing
-
-This API exposes two distinct forms of writing to a Bigtable: a Mutation and a ReadModifyWrite.
-The former expresses idempotent operations.
-The latter expresses non-idempotent operations and returns the new values of updated cells.
-These operations are performed by creating a Mutation or ReadModifyWrite (with NewMutation or NewReadModifyWrite),
-building up one or more operations on that, and then using the Apply or ApplyReadModifyWrite
-methods on a Table.
-
-For instance, to set a couple of cells in a table,
-	tbl := client.Open("mytable")
-	mut := bigtable.NewMutation()
-	mut.Set("links", "maps.google.com", bigtable.Now(), []byte("1"))
-	mut.Set("links", "golang.org", bigtable.Now(), []byte("1"))
-	err := tbl.Apply(ctx, "com.google.cloud", mut)
-	...
-
-To increment an encoded value in one cell,
-	tbl := client.Open("mytable")
-	rmw := bigtable.NewReadModifyWrite()
-	rmw.Increment("links", "golang.org", 12) // add 12 to the cell in column "links:golang.org"
-	r, err := tbl.ApplyReadModifyWrite(ctx, "com.google.cloud", rmw)
-	...
-
-Retries
-
-If a read or write operation encounters a transient error it will be retried until a successful
-response, an unretryable error or the context deadline is reached. Non-idempotent writes (where
-the timestamp is set to ServerTime) will not be retried. In the case of ReadRows, retried calls
-will not re-scan rows that have already been processed.
-
-Authentication
-
-See examples of authorization and authentication at
-https://godoc.org/cloud.google.com/go#pkg-examples.
-
-*/
-package bigtable // import "cloud.google.com/go/bigtable"
-
-// Scope constants for authentication credentials.
-// These should be used when using credential creation functions such as oauth.NewServiceAccountFromFile.
-const (
-	// Scope is the OAuth scope for Cloud Bigtable data operations.
-	Scope = "https://www.googleapis.com/auth/bigtable.data"
-	// ReadonlyScope is the OAuth scope for Cloud Bigtable read-only data operations.
-	ReadonlyScope = "https://www.googleapis.com/auth/bigtable.readonly"
-
-	// AdminScope is the OAuth scope for Cloud Bigtable table admin operations.
-	AdminScope = "https://www.googleapis.com/auth/bigtable.admin.table"
-
-	// InstanceAdminScope is the OAuth scope for Cloud Bigtable instance (and cluster) admin operations.
-	InstanceAdminScope = "https://www.googleapis.com/auth/bigtable.admin.cluster"
-)
-
-// clientUserAgent identifies the version of this package.
-// It should be bumped upon significant changes only.
-const clientUserAgent = "cbt-go/20160628"
-
-// resourcePrefixHeader is the name of the metadata header used to indicate
-// the resource being operated on.
-const resourcePrefixHeader = "google-cloud-resource-prefix"

+ 0 - 318
vendor/cloud.google.com/go/bigtable/filter.go

@@ -1,318 +0,0 @@
-/*
-Copyright 2015 Google Inc. All Rights Reserved.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package bigtable
-
-import (
-	"fmt"
-	"strings"
-	"time"
-
-	btpb "google.golang.org/genproto/googleapis/bigtable/v2"
-)
-
-// A Filter represents a row filter.
-type Filter interface {
-	String() string
-	proto() *btpb.RowFilter
-}
-
-// ChainFilters returns a filter that applies a sequence of filters.
-func ChainFilters(sub ...Filter) Filter { return chainFilter{sub} }
-
-type chainFilter struct {
-	sub []Filter
-}
-
-func (cf chainFilter) String() string {
-	var ss []string
-	for _, sf := range cf.sub {
-		ss = append(ss, sf.String())
-	}
-	return "(" + strings.Join(ss, " | ") + ")"
-}
-
-func (cf chainFilter) proto() *btpb.RowFilter {
-	chain := &btpb.RowFilter_Chain{}
-	for _, sf := range cf.sub {
-		chain.Filters = append(chain.Filters, sf.proto())
-	}
-	return &btpb.RowFilter{
-		Filter: &btpb.RowFilter_Chain_{chain},
-	}
-}
-
-// InterleaveFilters returns a filter that applies a set of filters in parallel
-// and interleaves the results.
-func InterleaveFilters(sub ...Filter) Filter { return interleaveFilter{sub} }
-
-type interleaveFilter struct {
-	sub []Filter
-}
-
-func (ilf interleaveFilter) String() string {
-	var ss []string
-	for _, sf := range ilf.sub {
-		ss = append(ss, sf.String())
-	}
-	return "(" + strings.Join(ss, " + ") + ")"
-}
-
-func (ilf interleaveFilter) proto() *btpb.RowFilter {
-	inter := &btpb.RowFilter_Interleave{}
-	for _, sf := range ilf.sub {
-		inter.Filters = append(inter.Filters, sf.proto())
-	}
-	return &btpb.RowFilter{
-		Filter: &btpb.RowFilter_Interleave_{inter},
-	}
-}
-
-// RowKeyFilter returns a filter that matches cells from rows whose
-// key matches the provided RE2 pattern.
-// See https://github.com/google/re2/wiki/Syntax for the accepted syntax.
-func RowKeyFilter(pattern string) Filter { return rowKeyFilter(pattern) }
-
-type rowKeyFilter string
-
-func (rkf rowKeyFilter) String() string { return fmt.Sprintf("row(%s)", string(rkf)) }
-
-func (rkf rowKeyFilter) proto() *btpb.RowFilter {
-	return &btpb.RowFilter{Filter: &btpb.RowFilter_RowKeyRegexFilter{[]byte(rkf)}}
-}
-
-// FamilyFilter returns a filter that matches cells whose family name
-// matches the provided RE2 pattern.
-// See https://github.com/google/re2/wiki/Syntax for the accepted syntax.
-func FamilyFilter(pattern string) Filter { return familyFilter(pattern) }
-
-type familyFilter string
-
-func (ff familyFilter) String() string { return fmt.Sprintf("col(%s:)", string(ff)) }
-
-func (ff familyFilter) proto() *btpb.RowFilter {
-	return &btpb.RowFilter{Filter: &btpb.RowFilter_FamilyNameRegexFilter{string(ff)}}
-}
-
-// ColumnFilter returns a filter that matches cells whose column name
-// matches the provided RE2 pattern.
-// See https://github.com/google/re2/wiki/Syntax for the accepted syntax.
-func ColumnFilter(pattern string) Filter { return columnFilter(pattern) }
-
-type columnFilter string
-
-func (cf columnFilter) String() string { return fmt.Sprintf("col(.*:%s)", string(cf)) }
-
-func (cf columnFilter) proto() *btpb.RowFilter {
-	return &btpb.RowFilter{Filter: &btpb.RowFilter_ColumnQualifierRegexFilter{[]byte(cf)}}
-}
-
-// ValueFilter returns a filter that matches cells whose value
-// matches the provided RE2 pattern.
-// See https://github.com/google/re2/wiki/Syntax for the accepted syntax.
-func ValueFilter(pattern string) Filter { return valueFilter(pattern) }
-
-type valueFilter string
-
-func (vf valueFilter) String() string { return fmt.Sprintf("value_match(%s)", string(vf)) }
-
-func (vf valueFilter) proto() *btpb.RowFilter {
-	return &btpb.RowFilter{Filter: &btpb.RowFilter_ValueRegexFilter{[]byte(vf)}}
-}
-
-// LatestNFilter returns a filter that matches the most recent N cells in each column.
-func LatestNFilter(n int) Filter { return latestNFilter(n) }
-
-type latestNFilter int32
-
-func (lnf latestNFilter) String() string { return fmt.Sprintf("col(*,%d)", lnf) }
-
-func (lnf latestNFilter) proto() *btpb.RowFilter {
-	return &btpb.RowFilter{Filter: &btpb.RowFilter_CellsPerColumnLimitFilter{int32(lnf)}}
-}
-
-// StripValueFilter returns a filter that replaces each value with the empty string.
-func StripValueFilter() Filter { return stripValueFilter{} }
-
-type stripValueFilter struct{}
-
-func (stripValueFilter) String() string { return "strip_value()" }
-func (stripValueFilter) proto() *btpb.RowFilter {
-	return &btpb.RowFilter{Filter: &btpb.RowFilter_StripValueTransformer{true}}
-}
-
-// TimestampRangeFilter returns a filter that matches any cells whose timestamp is within the given time bounds.  A zero
-// time means no bound.
-// The timestamp will be truncated to millisecond granularity.
-func TimestampRangeFilter(startTime time.Time, endTime time.Time) Filter {
-	trf := timestampRangeFilter{}
-	if !startTime.IsZero() {
-		trf.startTime = Time(startTime)
-	}
-	if !endTime.IsZero() {
-		trf.endTime = Time(endTime)
-	}
-	return trf
-}
-
-// TimestampRangeFilterMicros returns a filter that matches any cells whose timestamp is within the given time bounds,
-// specified in units of microseconds since 1 January 1970. A zero value for the end time is interpreted as no bound.
-// The timestamp will be truncated to millisecond granularity.
-func TimestampRangeFilterMicros(startTime Timestamp, endTime Timestamp) Filter {
-	return timestampRangeFilter{startTime, endTime}
-}
-
-type timestampRangeFilter struct {
-	startTime Timestamp
-	endTime   Timestamp
-}
-
-func (trf timestampRangeFilter) String() string {
-	return fmt.Sprintf("timestamp_range(%v,%v)", trf.startTime, trf.endTime)
-}
-
-func (trf timestampRangeFilter) proto() *btpb.RowFilter {
-	return &btpb.RowFilter{
-		Filter: &btpb.RowFilter_TimestampRangeFilter{
-			&btpb.TimestampRange{
-				int64(trf.startTime.TruncateToMilliseconds()),
-				int64(trf.endTime.TruncateToMilliseconds()),
-			},
-		}}
-}
-
-// ColumnRangeFilter returns a filter that matches a contiguous range of columns within a single
-// family, as specified by an inclusive start qualifier and exclusive end qualifier.
-func ColumnRangeFilter(family, start, end string) Filter {
-	return columnRangeFilter{family, start, end}
-}
-
-type columnRangeFilter struct {
-	family string
-	start  string
-	end    string
-}
-
-func (crf columnRangeFilter) String() string {
-	return fmt.Sprintf("columnRangeFilter(%s,%s,%s)", crf.family, crf.start, crf.end)
-}
-
-func (crf columnRangeFilter) proto() *btpb.RowFilter {
-	r := &btpb.ColumnRange{FamilyName: crf.family}
-	if crf.start != "" {
-		r.StartQualifier = &btpb.ColumnRange_StartQualifierClosed{[]byte(crf.start)}
-	}
-	if crf.end != "" {
-		r.EndQualifier = &btpb.ColumnRange_EndQualifierOpen{[]byte(crf.end)}
-	}
-	return &btpb.RowFilter{&btpb.RowFilter_ColumnRangeFilter{r}}
-}
-
-// ValueRangeFilter returns a filter that matches cells with values that fall within
-// the given range, as specified by an inclusive start value and exclusive end value.
-func ValueRangeFilter(start, end []byte) Filter {
-	return valueRangeFilter{start, end}
-}
-
-type valueRangeFilter struct {
-	start []byte
-	end   []byte
-}
-
-func (vrf valueRangeFilter) String() string {
-	return fmt.Sprintf("valueRangeFilter(%s,%s)", vrf.start, vrf.end)
-}
-
-func (vrf valueRangeFilter) proto() *btpb.RowFilter {
-	r := &btpb.ValueRange{}
-	if vrf.start != nil {
-		r.StartValue = &btpb.ValueRange_StartValueClosed{vrf.start}
-	}
-	if vrf.end != nil {
-		r.EndValue = &btpb.ValueRange_EndValueOpen{vrf.end}
-	}
-	return &btpb.RowFilter{&btpb.RowFilter_ValueRangeFilter{r}}
-}
-
-// ConditionFilter returns a filter that evaluates to one of two possible filters depending
-// on whether or not the given predicate filter matches at least one cell.
-// If the matched filter is nil then no results will be returned.
-// IMPORTANT NOTE: The predicate filter does not execute atomically with the
-// true and false filters, which may lead to inconsistent or unexpected
-// results. Additionally, condition filters have poor performance, especially
-// when filters are set for the false condition.
-func ConditionFilter(predicateFilter, trueFilter, falseFilter Filter) Filter {
-	return conditionFilter{predicateFilter, trueFilter, falseFilter}
-}
-
-type conditionFilter struct {
-	predicateFilter Filter
-	trueFilter      Filter
-	falseFilter     Filter
-}
-
-func (cf conditionFilter) String() string {
-	return fmt.Sprintf("conditionFilter(%s,%s,%s)", cf.predicateFilter, cf.trueFilter, cf.falseFilter)
-}
-
-func (cf conditionFilter) proto() *btpb.RowFilter {
-	var tf *btpb.RowFilter
-	var ff *btpb.RowFilter
-	if cf.trueFilter != nil {
-		tf = cf.trueFilter.proto()
-	}
-	if cf.falseFilter != nil {
-		ff = cf.falseFilter.proto()
-	}
-	return &btpb.RowFilter{
-		&btpb.RowFilter_Condition_{&btpb.RowFilter_Condition{
-			cf.predicateFilter.proto(),
-			tf,
-			ff,
-		}}}
-}
-
-// CellsPerRowOffsetFilter returns a filter that skips the first N cells of each row, matching all subsequent cells.
-func CellsPerRowOffsetFilter(n int) Filter {
-	return cellsPerRowOffsetFilter(n)
-}
-
-type cellsPerRowOffsetFilter int32
-
-func (cof cellsPerRowOffsetFilter) String() string {
-	return fmt.Sprintf("cells_per_row_offset(%d)", cof)
-}
-
-func (cof cellsPerRowOffsetFilter) proto() *btpb.RowFilter {
-	return &btpb.RowFilter{Filter: &btpb.RowFilter_CellsPerRowOffsetFilter{int32(cof)}}
-}
-
-// CellsPerRowLimitFilter returns a filter that matches only the first N cells of each row.
-func CellsPerRowLimitFilter(n int) Filter {
-	return cellsPerRowLimitFilter(n)
-}
-
-type cellsPerRowLimitFilter int32
-
-func (clf cellsPerRowLimitFilter) String() string {
-	return fmt.Sprintf("cells_per_row_limit(%d)", clf)
-}
-
-func (clf cellsPerRowLimitFilter) proto() *btpb.RowFilter {
-	return &btpb.RowFilter{Filter: &btpb.RowFilter_CellsPerRowLimitFilter{int32(clf)}}
-}
-
-// TODO(dsymonds): More filters: sampling

+ 0 - 158
vendor/cloud.google.com/go/bigtable/gc.go

@@ -1,158 +0,0 @@
-/*
-Copyright 2015 Google Inc. All Rights Reserved.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package bigtable
-
-import (
-	"fmt"
-	"strings"
-	"time"
-
-	durpb "github.com/golang/protobuf/ptypes/duration"
-	bttdpb "google.golang.org/genproto/googleapis/bigtable/admin/v2"
-)
-
-// A GCPolicy represents a rule that determines which cells are eligible for garbage collection.
-type GCPolicy interface {
-	String() string
-	proto() *bttdpb.GcRule
-}
-
-// IntersectionPolicy returns a GC policy that only applies when all its sub-policies apply.
-func IntersectionPolicy(sub ...GCPolicy) GCPolicy { return intersectionPolicy{sub} }
-
-type intersectionPolicy struct {
-	sub []GCPolicy
-}
-
-func (ip intersectionPolicy) String() string {
-	var ss []string
-	for _, sp := range ip.sub {
-		ss = append(ss, sp.String())
-	}
-	return "(" + strings.Join(ss, " && ") + ")"
-}
-
-func (ip intersectionPolicy) proto() *bttdpb.GcRule {
-	inter := &bttdpb.GcRule_Intersection{}
-	for _, sp := range ip.sub {
-		inter.Rules = append(inter.Rules, sp.proto())
-	}
-	return &bttdpb.GcRule{
-		Rule: &bttdpb.GcRule_Intersection_{inter},
-	}
-}
-
-// UnionPolicy returns a GC policy that applies when any of its sub-policies apply.
-func UnionPolicy(sub ...GCPolicy) GCPolicy { return unionPolicy{sub} }
-
-type unionPolicy struct {
-	sub []GCPolicy
-}
-
-func (up unionPolicy) String() string {
-	var ss []string
-	for _, sp := range up.sub {
-		ss = append(ss, sp.String())
-	}
-	return "(" + strings.Join(ss, " || ") + ")"
-}
-
-func (up unionPolicy) proto() *bttdpb.GcRule {
-	union := &bttdpb.GcRule_Union{}
-	for _, sp := range up.sub {
-		union.Rules = append(union.Rules, sp.proto())
-	}
-	return &bttdpb.GcRule{
-		Rule: &bttdpb.GcRule_Union_{union},
-	}
-}
-
-// MaxVersionsPolicy returns a GC policy that applies to all versions of a cell
-// except for the most recent n.
-func MaxVersionsPolicy(n int) GCPolicy { return maxVersionsPolicy(n) }
-
-type maxVersionsPolicy int
-
-func (mvp maxVersionsPolicy) String() string { return fmt.Sprintf("versions() > %d", int(mvp)) }
-
-func (mvp maxVersionsPolicy) proto() *bttdpb.GcRule {
-	return &bttdpb.GcRule{Rule: &bttdpb.GcRule_MaxNumVersions{int32(mvp)}}
-}
-
-// MaxAgePolicy returns a GC policy that applies to all cells
-// older than the given age.
-func MaxAgePolicy(d time.Duration) GCPolicy { return maxAgePolicy(d) }
-
-type maxAgePolicy time.Duration
-
-var units = []struct {
-	d      time.Duration
-	suffix string
-}{
-	{24 * time.Hour, "d"},
-	{time.Hour, "h"},
-	{time.Minute, "m"},
-}
-
-func (ma maxAgePolicy) String() string {
-	d := time.Duration(ma)
-	for _, u := range units {
-		if d%u.d == 0 {
-			return fmt.Sprintf("age() > %d%s", d/u.d, u.suffix)
-		}
-	}
-	return fmt.Sprintf("age() > %d", d/time.Microsecond)
-}
-
-func (ma maxAgePolicy) proto() *bttdpb.GcRule {
-	// This doesn't handle overflows, etc.
-	// Fix this if people care about GC policies over 290 years.
-	ns := time.Duration(ma).Nanoseconds()
-	return &bttdpb.GcRule{
-		Rule: &bttdpb.GcRule_MaxAge{&durpb.Duration{
-			Seconds: ns / 1e9,
-			Nanos:   int32(ns % 1e9),
-		}},
-	}
-}
-
-// GCRuleToString converts the given GcRule proto to a user-visible string.
-func GCRuleToString(rule *bttdpb.GcRule) string {
-	if rule == nil {
-		return "<default>"
-	}
-	switch r := rule.Rule.(type) {
-	case *bttdpb.GcRule_MaxNumVersions:
-		return MaxVersionsPolicy(int(r.MaxNumVersions)).String()
-	case *bttdpb.GcRule_MaxAge:
-		return MaxAgePolicy(time.Duration(r.MaxAge.Seconds) * time.Second).String()
-	case *bttdpb.GcRule_Intersection_:
-		return joinRules(r.Intersection.Rules, " && ")
-	case *bttdpb.GcRule_Union_:
-		return joinRules(r.Union.Rules, " || ")
-	default:
-		return ""
-	}
-}
-
-func joinRules(rules []*bttdpb.GcRule, sep string) string {
-	var chunks []string
-	for _, r := range rules {
-		chunks = append(chunks, GCRuleToString(r))
-	}
-	return "(" + strings.Join(chunks, sep) + ")"
-}

+ 0 - 246
vendor/cloud.google.com/go/bigtable/internal/cbtconfig/cbtconfig.go

@@ -1,246 +0,0 @@
-/*
-Copyright 2015 Google Inc. All Rights Reserved.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-// Package cbtconfig encapsulates common code for reading configuration from .cbtrc and gcloud.
-package cbtconfig
-
-import (
-	"bufio"
-	"bytes"
-	"crypto/tls"
-	"crypto/x509"
-	"encoding/json"
-	"flag"
-	"fmt"
-	"io/ioutil"
-	"log"
-	"os"
-	"os/exec"
-	"path/filepath"
-	"runtime"
-	"strings"
-	"time"
-
-	"golang.org/x/oauth2"
-	"google.golang.org/grpc/credentials"
-)
-
-// Config represents a configuration.
-type Config struct {
-	Project, Instance string                           // required
-	Creds             string                           // optional
-	AdminEndpoint     string                           // optional
-	DataEndpoint      string                           // optional
-	CertFile          string                           // optional
-	TokenSource       oauth2.TokenSource               // derived
-	TLSCreds          credentials.TransportCredentials // derived
-}
-
-type RequiredFlags uint
-
-const NoneRequired RequiredFlags = 0
-const (
-	ProjectRequired RequiredFlags = 1 << iota
-	InstanceRequired
-)
-const ProjectAndInstanceRequired RequiredFlags = ProjectRequired | InstanceRequired
-
-// RegisterFlags registers a set of standard flags for this config.
-// It should be called before flag.Parse.
-func (c *Config) RegisterFlags() {
-	flag.StringVar(&c.Project, "project", c.Project, "project ID, if unset uses gcloud configured project")
-	flag.StringVar(&c.Instance, "instance", c.Instance, "Cloud Bigtable instance")
-	flag.StringVar(&c.Creds, "creds", c.Creds, "if set, use application credentials in this file")
-	flag.StringVar(&c.AdminEndpoint, "admin-endpoint", c.AdminEndpoint, "Override the admin api endpoint")
-	flag.StringVar(&c.DataEndpoint, "data-endpoint", c.DataEndpoint, "Override the data api endpoint")
-	flag.StringVar(&c.CertFile, "cert-file", c.CertFile, "Override the TLS certificates file")
-}
-
-// CheckFlags checks that the required config values are set.
-func (c *Config) CheckFlags(required RequiredFlags) error {
-	var missing []string
-	if c.CertFile != "" {
-		b, err := ioutil.ReadFile(c.CertFile)
-		if err != nil {
-			return fmt.Errorf("Failed to load certificates from %s: %v", c.CertFile, err)
-		}
-
-		cp := x509.NewCertPool()
-		if !cp.AppendCertsFromPEM(b) {
-			return fmt.Errorf("Failed to append certificates from %s", c.CertFile)
-		}
-
-		c.TLSCreds = credentials.NewTLS(&tls.Config{RootCAs: cp})
-	}
-	if required != NoneRequired {
-		c.SetFromGcloud()
-	}
-	if required&ProjectRequired != 0 && c.Project == "" {
-		missing = append(missing, "-project")
-	}
-	if required&InstanceRequired != 0 && c.Instance == "" {
-		missing = append(missing, "-instance")
-	}
-	if len(missing) > 0 {
-		return fmt.Errorf("Missing %s", strings.Join(missing, " and "))
-	}
-	return nil
-}
-
-// Filename returns the filename consulted for standard configuration.
-func Filename() string {
-	// TODO(dsymonds): Might need tweaking for Windows.
-	return filepath.Join(os.Getenv("HOME"), ".cbtrc")
-}
-
-// Load loads a .cbtrc file.
-// If the file is not present, an empty config is returned.
-func Load() (*Config, error) {
-	filename := Filename()
-	data, err := ioutil.ReadFile(filename)
-	if err != nil {
-		// silent fail if the file isn't there
-		if os.IsNotExist(err) {
-			return &Config{}, nil
-		}
-		return nil, fmt.Errorf("Reading %s: %v", filename, err)
-	}
-	c := new(Config)
-	s := bufio.NewScanner(bytes.NewReader(data))
-	for s.Scan() {
-		line := s.Text()
-		i := strings.Index(line, "=")
-		if i < 0 {
-			return nil, fmt.Errorf("Bad line in %s: %q", filename, line)
-		}
-		key, val := strings.TrimSpace(line[:i]), strings.TrimSpace(line[i+1:])
-		switch key {
-		default:
-			return nil, fmt.Errorf("Unknown key in %s: %q", filename, key)
-		case "project":
-			c.Project = val
-		case "instance":
-			c.Instance = val
-		case "creds":
-			c.Creds = val
-		case "admin-endpoint":
-			c.AdminEndpoint = val
-		case "data-endpoint":
-			c.DataEndpoint = val
-		}
-
-	}
-	return c, s.Err()
-}
-
-type GcloudCredential struct {
-	AccessToken string    `json:"access_token"`
-	Expiry      time.Time `json:"token_expiry"`
-}
-
-func (cred *GcloudCredential) Token() *oauth2.Token {
-	return &oauth2.Token{AccessToken: cred.AccessToken, TokenType: "Bearer", Expiry: cred.Expiry}
-}
-
-type GcloudConfig struct {
-	Configuration struct {
-		Properties struct {
-			Core struct {
-				Project string `json:"project"`
-			} `json:"core"`
-		} `json:"properties"`
-	} `json:"configuration"`
-	Credential GcloudCredential `json:"credential"`
-}
-
-type GcloudCmdTokenSource struct {
-	Command string
-	Args    []string
-}
-
-// Token implements the oauth2.TokenSource interface
-func (g *GcloudCmdTokenSource) Token() (*oauth2.Token, error) {
-	gcloudConfig, err := LoadGcloudConfig(g.Command, g.Args)
-	if err != nil {
-		return nil, err
-	}
-	return gcloudConfig.Credential.Token(), nil
-}
-
-// LoadGcloudConfig retrieves the gcloud configuration values we need use via the
-// 'config-helper' command
-func LoadGcloudConfig(gcloudCmd string, gcloudCmdArgs []string) (*GcloudConfig, error) {
-	out, err := exec.Command(gcloudCmd, gcloudCmdArgs...).Output()
-	if err != nil {
-		return nil, fmt.Errorf("Could not retrieve gcloud configuration")
-	}
-
-	var gcloudConfig GcloudConfig
-	if err := json.Unmarshal(out, &gcloudConfig); err != nil {
-		return nil, fmt.Errorf("Could not parse gcloud configuration")
-	}
-
-	return &gcloudConfig, nil
-}
-
-// SetFromGcloud retrieves and sets any missing config values from the gcloud
-// configuration if possible possible
-func (c *Config) SetFromGcloud() error {
-
-	if c.Creds == "" {
-		c.Creds = os.Getenv("GOOGLE_APPLICATION_CREDENTIALS")
-		if c.Creds == "" {
-			log.Printf("-creds flag unset, will use gcloud credential")
-		}
-	} else {
-		os.Setenv("GOOGLE_APPLICATION_CREDENTIALS", c.Creds)
-	}
-
-	if c.Project == "" {
-		log.Printf("-project flag unset, will use gcloud active project")
-	}
-
-	if c.Creds != "" && c.Project != "" {
-		return nil
-	}
-
-	gcloudCmd := "gcloud"
-	if runtime.GOOS == "windows" {
-		gcloudCmd = gcloudCmd + ".cmd"
-	}
-
-	gcloudCmdArgs := []string{"config", "config-helper",
-		"--format=json(configuration.properties.core.project,credential)"}
-
-	gcloudConfig, err := LoadGcloudConfig(gcloudCmd, gcloudCmdArgs)
-	if err != nil {
-		return err
-	}
-
-	if c.Project == "" && gcloudConfig.Configuration.Properties.Core.Project != "" {
-		log.Printf("gcloud active project is \"%s\"",
-			gcloudConfig.Configuration.Properties.Core.Project)
-		c.Project = gcloudConfig.Configuration.Properties.Core.Project
-	}
-
-	if c.Creds == "" {
-		c.TokenSource = oauth2.ReuseTokenSource(
-			gcloudConfig.Credential.Token(),
-			&GcloudCmdTokenSource{Command: gcloudCmd, Args: gcloudCmdArgs})
-	}
-
-	return nil
-}

+ 0 - 106
vendor/cloud.google.com/go/bigtable/internal/gax/call_option.go

@@ -1,106 +0,0 @@
-/*
-Copyright 2016 Google Inc. All Rights Reserved.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-// This is ia snapshot from github.com/googleapis/gax-go with minor modifications.
-package gax
-
-import (
-	"time"
-
-	"google.golang.org/grpc/codes"
-)
-
-type CallOption interface {
-	Resolve(*CallSettings)
-}
-
-type callOptions []CallOption
-
-func (opts callOptions) Resolve(s *CallSettings) *CallSettings {
-	for _, opt := range opts {
-		opt.Resolve(s)
-	}
-	return s
-}
-
-// Encapsulates the call settings for a particular API call.
-type CallSettings struct {
-	Timeout       time.Duration
-	RetrySettings RetrySettings
-}
-
-// Per-call configurable settings for retrying upon transient failure.
-type RetrySettings struct {
-	RetryCodes      map[codes.Code]bool
-	BackoffSettings BackoffSettings
-}
-
-// Parameters to the exponential backoff algorithm for retrying.
-type BackoffSettings struct {
-	DelayTimeoutSettings MultipliableDuration
-	RPCTimeoutSettings   MultipliableDuration
-}
-
-type MultipliableDuration struct {
-	Initial    time.Duration
-	Max        time.Duration
-	Multiplier float64
-}
-
-func (w CallSettings) Resolve(s *CallSettings) {
-	s.Timeout = w.Timeout
-	s.RetrySettings = w.RetrySettings
-
-	s.RetrySettings.RetryCodes = make(map[codes.Code]bool, len(w.RetrySettings.RetryCodes))
-	for key, value := range w.RetrySettings.RetryCodes {
-		s.RetrySettings.RetryCodes[key] = value
-	}
-}
-
-type withRetryCodes []codes.Code
-
-func (w withRetryCodes) Resolve(s *CallSettings) {
-	s.RetrySettings.RetryCodes = make(map[codes.Code]bool)
-	for _, code := range w {
-		s.RetrySettings.RetryCodes[code] = true
-	}
-}
-
-// WithRetryCodes sets a list of Google API canonical error codes upon which a
-// retry should be attempted.
-func WithRetryCodes(retryCodes []codes.Code) CallOption {
-	return withRetryCodes(retryCodes)
-}
-
-type withDelayTimeoutSettings MultipliableDuration
-
-func (w withDelayTimeoutSettings) Resolve(s *CallSettings) {
-	s.RetrySettings.BackoffSettings.DelayTimeoutSettings = MultipliableDuration(w)
-}
-
-// WithDelayTimeoutSettings specifies:
-// - The initial delay time, in milliseconds, between the completion of
-//   the first failed request and the initiation of the first retrying
-//   request.
-// - The multiplier by which to increase the delay time between the
-//   completion of failed requests, and the initiation of the subsequent
-//   retrying request.
-// - The maximum delay time, in milliseconds, between requests. When this
-//   value is reached, `RetryDelayMultiplier` will no longer be used to
-//   increase delay time.
-func WithDelayTimeoutSettings(initial time.Duration, max time.Duration, multiplier float64) CallOption {
-	return withDelayTimeoutSettings(MultipliableDuration{initial, max, multiplier})
-}

+ 0 - 84
vendor/cloud.google.com/go/bigtable/internal/gax/invoke.go

@@ -1,84 +0,0 @@
-/*
-Copyright 2015 Google Inc. All Rights Reserved.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-// This is ia snapshot from github.com/googleapis/gax-go with minor modifications.
-package gax
-
-import (
-	"math/rand"
-	"time"
-
-	"golang.org/x/net/context"
-	"google.golang.org/grpc"
-	"google.golang.org/grpc/codes"
-	"log"
-	"os"
-)
-
-var logger *log.Logger = log.New(os.Stderr, "", log.LstdFlags)
-
-// A user defined call stub.
-type APICall func(context.Context) error
-
-// scaleDuration returns the product of a and mult.
-func scaleDuration(a time.Duration, mult float64) time.Duration {
-	ns := float64(a) * mult
-	return time.Duration(ns)
-}
-
-// invokeWithRetry calls stub using an exponential backoff retry mechanism
-// based on the values provided in callSettings.
-func invokeWithRetry(ctx context.Context, stub APICall, callSettings CallSettings) error {
-	retrySettings := callSettings.RetrySettings
-	backoffSettings := callSettings.RetrySettings.BackoffSettings
-	delay := backoffSettings.DelayTimeoutSettings.Initial
-	for {
-		// If the deadline is exceeded...
-		if ctx.Err() != nil {
-			return ctx.Err()
-		}
-		err := stub(ctx)
-		code := grpc.Code(err)
-		if code == codes.OK {
-			return nil
-		}
-
-		if !retrySettings.RetryCodes[code] {
-			return err
-		}
-
-		// Sleep a random amount up to the current delay
-		d := time.Duration(rand.Int63n(int64(delay)))
-		delayCtx, _ := context.WithTimeout(ctx, delay)
-		logger.Printf("Retryable error: %v, retrying in %v", err, d)
-		<-delayCtx.Done()
-
-		delay = scaleDuration(delay, backoffSettings.DelayTimeoutSettings.Multiplier)
-		if delay > backoffSettings.DelayTimeoutSettings.Max {
-			delay = backoffSettings.DelayTimeoutSettings.Max
-		}
-	}
-}
-
-// Invoke calls stub with a child of context modified by the specified options.
-func Invoke(ctx context.Context, stub APICall, opts ...CallOption) error {
-	settings := &CallSettings{}
-	callOptions(opts).Resolve(settings)
-	if len(settings.RetrySettings.RetryCodes) > 0 {
-		return invokeWithRetry(ctx, stub, *settings)
-	}
-	return stub(ctx)
-}

+ 0 - 48
vendor/cloud.google.com/go/bigtable/internal/option/option.go

@@ -1,48 +0,0 @@
-/*
-Copyright 2015 Google Inc. All Rights Reserved.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-// Package option contains common code for dealing with client options.
-package option
-
-import (
-	"fmt"
-	"os"
-
-	"google.golang.org/api/option"
-	"google.golang.org/grpc"
-)
-
-// DefaultClientOptions returns the default client options to use for the
-// client's gRPC connection.
-func DefaultClientOptions(endpoint, scope, userAgent string) ([]option.ClientOption, error) {
-	var o []option.ClientOption
-	// Check the environment variables for the bigtable emulator.
-	// Dial it directly and don't pass any credentials.
-	if addr := os.Getenv("BIGTABLE_EMULATOR_HOST"); addr != "" {
-		conn, err := grpc.Dial(addr, grpc.WithInsecure())
-		if err != nil {
-			return nil, fmt.Errorf("emulator grpc.Dial: %v", err)
-		}
-		o = []option.ClientOption{option.WithGRPCConn(conn)}
-	} else {
-		o = []option.ClientOption{
-			option.WithEndpoint(endpoint),
-			option.WithScopes(scope),
-			option.WithUserAgent(userAgent),
-		}
-	}
-	return o, nil
-}

+ 0 - 144
vendor/cloud.google.com/go/bigtable/internal/stat/stats.go

@@ -1,144 +0,0 @@
-// Copyright 2016 Google Inc. All Rights Reserved.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//      http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package stat
-
-import (
-	"bytes"
-	"encoding/csv"
-	"fmt"
-	"io"
-	"math"
-	"sort"
-	"strconv"
-	"text/tabwriter"
-	"time"
-)
-
-type byDuration []time.Duration
-
-func (data byDuration) Len() int           { return len(data) }
-func (data byDuration) Swap(i, j int)      { data[i], data[j] = data[j], data[i] }
-func (data byDuration) Less(i, j int) bool { return data[i] < data[j] }
-
-// quantile returns a value representing the kth of q quantiles.
-// May alter the order of data.
-func quantile(data []time.Duration, k, q int) (quantile time.Duration, ok bool) {
-	if len(data) < 1 {
-		return 0, false
-	}
-	if k > q {
-		return 0, false
-	}
-	if k < 0 || q < 1 {
-		return 0, false
-	}
-
-	sort.Sort(byDuration(data))
-
-	if k == 0 {
-		return data[0], true
-	}
-	if k == q {
-		return data[len(data)-1], true
-	}
-
-	bucketSize := float64(len(data)-1) / float64(q)
-	i := float64(k) * bucketSize
-
-	lower := int(math.Trunc(i))
-	var upper int
-	if i > float64(lower) && lower+1 < len(data) {
-		// If the quantile lies between two elements
-		upper = lower + 1
-	} else {
-		upper = lower
-	}
-	weightUpper := i - float64(lower)
-	weightLower := 1 - weightUpper
-	return time.Duration(weightLower*float64(data[lower]) + weightUpper*float64(data[upper])), true
-}
-
-type Aggregate struct {
-	Name               string
-	Count, Errors      int
-	Min, Median, Max   time.Duration
-	P75, P90, P95, P99 time.Duration // percentiles
-}
-
-// NewAggregate constructs an aggregate from latencies. Returns nil if latencies does not contain aggregateable data.
-func NewAggregate(name string, latencies []time.Duration, errorCount int) *Aggregate {
-	agg := Aggregate{Name: name, Count: len(latencies), Errors: errorCount}
-
-	if len(latencies) == 0 {
-		return nil
-	}
-	var ok bool
-	if agg.Min, ok = quantile(latencies, 0, 2); !ok {
-		return nil
-	}
-	if agg.Median, ok = quantile(latencies, 1, 2); !ok {
-		return nil
-	}
-	if agg.Max, ok = quantile(latencies, 2, 2); !ok {
-		return nil
-	}
-	if agg.P75, ok = quantile(latencies, 75, 100); !ok {
-		return nil
-	}
-	if agg.P90, ok = quantile(latencies, 90, 100); !ok {
-		return nil
-	}
-	if agg.P95, ok = quantile(latencies, 95, 100); !ok {
-		return nil
-	}
-	if agg.P99, ok = quantile(latencies, 99, 100); !ok {
-		return nil
-	}
-	return &agg
-}
-
-func (agg *Aggregate) String() string {
-	if agg == nil {
-		return "no data"
-	}
-	var buf bytes.Buffer
-	tw := tabwriter.NewWriter(&buf, 0, 0, 1, ' ', 0) // one-space padding
-	fmt.Fprintf(tw, "min:\t%v\nmedian:\t%v\nmax:\t%v\n95th percentile:\t%v\n99th percentile:\t%v\n",
-		agg.Min, agg.Median, agg.Max, agg.P95, agg.P99)
-	tw.Flush()
-	return buf.String()
-}
-
-// WriteCSV writes a csv file to the given Writer,
-// with a header row and one row per aggregate.
-func WriteCSV(aggs []*Aggregate, iow io.Writer) error {
-	w := csv.NewWriter(iow)
-	defer w.Flush()
-	err := w.Write([]string{"name", "count", "errors", "min", "median", "max", "p75", "p90", "p95", "p99"})
-	if err != nil {
-		return err
-	}
-	for _, agg := range aggs {
-		err = w.Write([]string{
-			agg.Name, strconv.Itoa(agg.Count), strconv.Itoa(agg.Errors),
-			agg.Min.String(), agg.Median.String(), agg.Max.String(),
-			agg.P75.String(), agg.P90.String(), agg.P95.String(), agg.P99.String(),
-		})
-		if err != nil {
-			return err
-		}
-	}
-	return nil
-}

+ 0 - 250
vendor/cloud.google.com/go/bigtable/reader.go

@@ -1,250 +0,0 @@
-/*
-Copyright 2016 Google Inc. All Rights Reserved.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package bigtable
-
-import (
-	"bytes"
-	"fmt"
-
-	btpb "google.golang.org/genproto/googleapis/bigtable/v2"
-)
-
-// A Row is returned by ReadRows. The map is keyed by column family (the prefix
-// of the column name before the colon). The values are the returned ReadItems
-// for that column family in the order returned by Read.
-type Row map[string][]ReadItem
-
-// Key returns the row's key, or "" if the row is empty.
-func (r Row) Key() string {
-	for _, items := range r {
-		if len(items) > 0 {
-			return items[0].Row
-		}
-	}
-	return ""
-}
-
-// A ReadItem is returned by Read. A ReadItem contains data from a specific row and column.
-type ReadItem struct {
-	Row, Column string
-	Timestamp   Timestamp
-	Value       []byte
-}
-
-// The current state of the read rows state machine.
-type rrState int64
-
-const (
-	newRow rrState = iota
-	rowInProgress
-	cellInProgress
-)
-
-// chunkReader handles cell chunks from the read rows response and combines
-// them into full Rows.
-type chunkReader struct {
-	state   rrState
-	curKey  []byte
-	curFam  string
-	curQual []byte
-	curTS   int64
-	curVal  []byte
-	curRow  Row
-	lastKey string
-}
-
-// newChunkReader returns a new chunkReader for handling read rows responses.
-func newChunkReader() *chunkReader {
-	return &chunkReader{state: newRow}
-}
-
-// Process takes a cell chunk and returns a new Row if the given chunk
-// completes a Row, or nil otherwise.
-func (cr *chunkReader) Process(cc *btpb.ReadRowsResponse_CellChunk) (Row, error) {
-	var row Row
-	switch cr.state {
-	case newRow:
-		if err := cr.validateNewRow(cc); err != nil {
-			return nil, err
-		}
-
-		cr.curRow = make(Row)
-		cr.curKey = cc.RowKey
-		cr.curFam = cc.FamilyName.Value
-		cr.curQual = cc.Qualifier.Value
-		cr.curTS = cc.TimestampMicros
-		row = cr.handleCellValue(cc)
-
-	case rowInProgress:
-		if err := cr.validateRowInProgress(cc); err != nil {
-			return nil, err
-		}
-
-		if cc.GetResetRow() {
-			cr.resetToNewRow()
-			return nil, nil
-		}
-
-		if cc.FamilyName != nil {
-			cr.curFam = cc.FamilyName.Value
-		}
-		if cc.Qualifier != nil {
-			cr.curQual = cc.Qualifier.Value
-		}
-		cr.curTS = cc.TimestampMicros
-		row = cr.handleCellValue(cc)
-
-	case cellInProgress:
-		if err := cr.validateCellInProgress(cc); err != nil {
-			return nil, err
-		}
-		if cc.GetResetRow() {
-			cr.resetToNewRow()
-			return nil, nil
-		}
-		row = cr.handleCellValue(cc)
-	}
-
-	return row, nil
-}
-
-// Close must be called after all cell chunks from the response
-// have been processed. An error will be returned if the reader is
-// in an invalid state, in which case the error should be propagated to the caller.
-func (cr *chunkReader) Close() error {
-	if cr.state != newRow {
-		return fmt.Errorf("invalid state for end of stream %q", cr.state)
-	}
-	return nil
-}
-
-// handleCellValue returns a Row if the cell value includes a commit, otherwise nil.
-func (cr *chunkReader) handleCellValue(cc *btpb.ReadRowsResponse_CellChunk) Row {
-	if cc.ValueSize > 0 {
-		// ValueSize is specified so expect a split value of ValueSize bytes
-		if cr.curVal == nil {
-			cr.curVal = make([]byte, 0, cc.ValueSize)
-		}
-		cr.curVal = append(cr.curVal, cc.Value...)
-		cr.state = cellInProgress
-	} else {
-		// This cell is either the complete value or the last chunk of a split
-		if cr.curVal == nil {
-			cr.curVal = cc.Value
-		} else {
-			cr.curVal = append(cr.curVal, cc.Value...)
-		}
-		cr.finishCell()
-
-		if cc.GetCommitRow() {
-			return cr.commitRow()
-		} else {
-			cr.state = rowInProgress
-		}
-	}
-
-	return nil
-}
-
-func (cr *chunkReader) finishCell() {
-	ri := ReadItem{
-		Row:       string(cr.curKey),
-		Column:    fmt.Sprintf("%s:%s", cr.curFam, cr.curQual),
-		Timestamp: Timestamp(cr.curTS),
-		Value:     cr.curVal,
-	}
-	cr.curRow[cr.curFam] = append(cr.curRow[cr.curFam], ri)
-	cr.curVal = nil
-}
-
-func (cr *chunkReader) commitRow() Row {
-	row := cr.curRow
-	cr.lastKey = cr.curRow.Key()
-	cr.resetToNewRow()
-	return row
-}
-
-func (cr *chunkReader) resetToNewRow() {
-	cr.curKey = nil
-	cr.curFam = ""
-	cr.curQual = nil
-	cr.curVal = nil
-	cr.curRow = nil
-	cr.curTS = 0
-	cr.state = newRow
-}
-
-func (cr *chunkReader) validateNewRow(cc *btpb.ReadRowsResponse_CellChunk) error {
-	if cc.GetResetRow() {
-		return fmt.Errorf("reset_row not allowed between rows")
-	}
-	if cc.RowKey == nil || cc.FamilyName == nil || cc.Qualifier == nil {
-		return fmt.Errorf("missing key field for new row %v", cc)
-	}
-	if cr.lastKey != "" && cr.lastKey >= string(cc.RowKey) {
-		return fmt.Errorf("out of order row key: %q, %q", cr.lastKey, string(cc.RowKey))
-	}
-	return nil
-}
-
-func (cr *chunkReader) validateRowInProgress(cc *btpb.ReadRowsResponse_CellChunk) error {
-	if err := cr.validateRowStatus(cc); err != nil {
-		return err
-	}
-	if cc.RowKey != nil && !bytes.Equal(cc.RowKey, cr.curKey) {
-		return fmt.Errorf("received new row key %q during existing row %q", cc.RowKey, cr.curKey)
-	}
-	if cc.FamilyName != nil && cc.Qualifier == nil {
-		return fmt.Errorf("family name %q specified without a qualifier", cc.FamilyName)
-	}
-	return nil
-}
-
-func (cr *chunkReader) validateCellInProgress(cc *btpb.ReadRowsResponse_CellChunk) error {
-	if err := cr.validateRowStatus(cc); err != nil {
-		return err
-	}
-	if cr.curVal == nil {
-		return fmt.Errorf("no cached cell while CELL_IN_PROGRESS %v", cc)
-	}
-	if cc.GetResetRow() == false && cr.isAnyKeyPresent(cc) {
-		return fmt.Errorf("cell key components found while CELL_IN_PROGRESS %v", cc)
-	}
-	return nil
-}
-
-func (cr *chunkReader) isAnyKeyPresent(cc *btpb.ReadRowsResponse_CellChunk) bool {
-	return cc.RowKey != nil ||
-		cc.FamilyName != nil ||
-		cc.Qualifier != nil ||
-		cc.TimestampMicros != 0
-}
-
-// Validate a RowStatus, commit or reset, if present.
-func (cr *chunkReader) validateRowStatus(cc *btpb.ReadRowsResponse_CellChunk) error {
-	// Resets can't be specified with any other part of a cell
-	if cc.GetResetRow() && (cr.isAnyKeyPresent(cc) ||
-		cc.Value != nil ||
-		cc.ValueSize != 0 ||
-		cc.Labels != nil) {
-		return fmt.Errorf("reset must not be specified with other fields %v", cc)
-	}
-	if cc.GetCommitRow() && cc.ValueSize > 0 {
-		return fmt.Errorf("commit row found in between chunks in a cell")
-	}
-	return nil
-}

+ 0 - 1178
vendor/cloud.google.com/go/bigtable/testdata/read-rows-acceptance-test.json

@@ -1,1178 +0,0 @@
-{
-  "tests": [
-    {
-      "name": "invalid - no commit",
-      "chunks": [
-        "row_key: \"RK\"\nfamily_name: \u003c\n  value: \"A\"\n\u003e\nqualifier: \u003c\n  value: \"C\"\n\u003e\ntimestamp_micros: 100\nvalue: \"value-VAL\"\ncommit_row: false\n"
-      ],
-      "results": [
-        {
-          "rk": "",
-          "fm": "",
-          "qual": "",
-          "ts": 0,
-          "value": "",
-          "label": "",
-          "error": true
-        }
-      ]
-    },
-    {
-      "name": "invalid - no cell key before commit",
-      "chunks": [
-        "commit_row: true\n"
-      ],
-      "results": [
-        {
-          "rk": "",
-          "fm": "",
-          "qual": "",
-          "ts": 0,
-          "value": "",
-          "label": "",
-          "error": true
-        }
-      ]
-    },
-    {
-      "name": "invalid - no cell key before value",
-      "chunks": [
-        "timestamp_micros: 100\nvalue: \"value-VAL\"\ncommit_row: false\n"
-      ],
-      "results": [
-        {
-          "rk": "",
-          "fm": "",
-          "qual": "",
-          "ts": 0,
-          "value": "",
-          "label": "",
-          "error": true
-        }
-      ]
-    },
-    {
-      "name": "invalid - new col family must specify qualifier",
-      "chunks": [
-        "row_key: \"RK\"\nfamily_name: \u003c\n  value: \"A\"\n\u003e\nqualifier: \u003c\n  value: \"C\"\n\u003e\ntimestamp_micros: 101\nvalue: \"value-VAL_1\"\ncommit_row: false\n",
-        "family_name: \u003c\n  value: \"B\"\n\u003e\ntimestamp_micros: 102\nvalue: \"value-VAL_2\"\ncommit_row: true\n"
-      ],
-      "results": [
-        {
-          "rk": "",
-          "fm": "",
-          "qual": "",
-          "ts": 0,
-          "value": "",
-          "label": "",
-          "error": true
-        }
-      ]
-    },
-    {
-      "name": "bare commit implies ts=0",
-      "chunks": [
-        "row_key: \"RK\"\nfamily_name: \u003c\n  value: \"A\"\n\u003e\nqualifier: \u003c\n  value: \"C\"\n\u003e\ntimestamp_micros: 100\nvalue: \"value-VAL\"\ncommit_row: false\n",
-        "commit_row: true\n"
-      ],
-      "results": [
-        {
-          "rk": "RK",
-          "fm": "A",
-          "qual": "C",
-          "ts": 100,
-          "value": "value-VAL",
-          "label": "",
-          "error": false
-        },
-        {
-          "rk": "RK",
-          "fm": "A",
-          "qual": "C",
-          "ts": 0,
-          "value": "",
-          "label": "",
-          "error": false
-        }
-      ]
-    },
-    {
-      "name": "simple row with timestamp",
-      "chunks": [
-        "row_key: \"RK\"\nfamily_name: \u003c\n  value: \"A\"\n\u003e\nqualifier: \u003c\n  value: \"C\"\n\u003e\ntimestamp_micros: 100\nvalue: \"value-VAL\"\ncommit_row: true\n"
-      ],
-      "results": [
-        {
-          "rk": "RK",
-          "fm": "A",
-          "qual": "C",
-          "ts": 100,
-          "value": "value-VAL",
-          "label": "",
-          "error": false
-        }
-      ]
-    },
-    {
-      "name": "missing timestamp, implied ts=0",
-      "chunks": [
-        "row_key: \"RK\"\nfamily_name: \u003c\n  value: \"A\"\n\u003e\nqualifier: \u003c\n  value: \"C\"\n\u003e\nvalue: \"value-VAL\"\ncommit_row: true\n"
-      ],
-      "results": [
-        {
-          "rk": "RK",
-          "fm": "A",
-          "qual": "C",
-          "ts": 0,
-          "value": "value-VAL",
-          "label": "",
-          "error": false
-        }
-      ]
-    },
-    {
-      "name": "empty cell value",
-      "chunks": [
-        "row_key: \"RK\"\nfamily_name: \u003c\n  value: \"A\"\n\u003e\nqualifier: \u003c\n  value: \"C\"\n\u003e\ncommit_row: true\n"
-      ],
-      "results": [
-        {
-          "rk": "RK",
-          "fm": "A",
-          "qual": "C",
-          "ts": 0,
-          "value": "",
-          "label": "",
-          "error": false
-        }
-      ]
-    },
-    {
-      "name": "two unsplit cells",
-      "chunks": [
-        "row_key: \"RK\"\nfamily_name: \u003c\n  value: \"A\"\n\u003e\nqualifier: \u003c\n  value: \"C\"\n\u003e\ntimestamp_micros: 101\nvalue: \"value-VAL_1\"\ncommit_row: false\n",
-        "timestamp_micros: 102\nvalue: \"value-VAL_2\"\ncommit_row: true\n"
-      ],
-      "results": [
-        {
-          "rk": "RK",
-          "fm": "A",
-          "qual": "C",
-          "ts": 101,
-          "value": "value-VAL_1",
-          "label": "",
-          "error": false
-        },
-        {
-          "rk": "RK",
-          "fm": "A",
-          "qual": "C",
-          "ts": 102,
-          "value": "value-VAL_2",
-          "label": "",
-          "error": false
-        }
-      ]
-    },
-    {
-      "name": "two qualifiers",
-      "chunks": [
-        "row_key: \"RK\"\nfamily_name: \u003c\n  value: \"A\"\n\u003e\nqualifier: \u003c\n  value: \"C\"\n\u003e\ntimestamp_micros: 101\nvalue: \"value-VAL_1\"\ncommit_row: false\n",
-        "qualifier: \u003c\n  value: \"D\"\n\u003e\ntimestamp_micros: 102\nvalue: \"value-VAL_2\"\ncommit_row: true\n"
-      ],
-      "results": [
-        {
-          "rk": "RK",
-          "fm": "A",
-          "qual": "C",
-          "ts": 101,
-          "value": "value-VAL_1",
-          "label": "",
-          "error": false
-        },
-        {
-          "rk": "RK",
-          "fm": "A",
-          "qual": "D",
-          "ts": 102,
-          "value": "value-VAL_2",
-          "label": "",
-          "error": false
-        }
-      ]
-    },
-    {
-      "name": "two families",
-      "chunks": [
-        "row_key: \"RK\"\nfamily_name: \u003c\n  value: \"A\"\n\u003e\nqualifier: \u003c\n  value: \"C\"\n\u003e\ntimestamp_micros: 101\nvalue: \"value-VAL_1\"\ncommit_row: false\n",
-        "family_name: \u003c\n  value: \"B\"\n\u003e\nqualifier: \u003c\n  value: \"E\"\n\u003e\ntimestamp_micros: 102\nvalue: \"value-VAL_2\"\ncommit_row: true\n"
-      ],
-      "results": [
-        {
-          "rk": "RK",
-          "fm": "A",
-          "qual": "C",
-          "ts": 101,
-          "value": "value-VAL_1",
-          "label": "",
-          "error": false
-        },
-        {
-          "rk": "RK",
-          "fm": "B",
-          "qual": "E",
-          "ts": 102,
-          "value": "value-VAL_2",
-          "label": "",
-          "error": false
-        }
-      ]
-    },
-    {
-      "name": "with labels",
-      "chunks": [
-        "row_key: \"RK\"\nfamily_name: \u003c\n  value: \"A\"\n\u003e\nqualifier: \u003c\n  value: \"C\"\n\u003e\ntimestamp_micros: 101\nlabels: \"L_1\"\nvalue: \"value-VAL_1\"\ncommit_row: false\n",
-        "timestamp_micros: 102\nlabels: \"L_2\"\nvalue: \"value-VAL_2\"\ncommit_row: true\n"
-      ],
-      "results": [
-        {
-          "rk": "RK",
-          "fm": "A",
-          "qual": "C",
-          "ts": 101,
-          "value": "value-VAL_1",
-          "label": "L_1",
-          "error": false
-        },
-        {
-          "rk": "RK",
-          "fm": "A",
-          "qual": "C",
-          "ts": 102,
-          "value": "value-VAL_2",
-          "label": "L_2",
-          "error": false
-        }
-      ]
-    },
-    {
-      "name": "split cell, bare commit",
-      "chunks": [
-        "row_key: \"RK\"\nfamily_name: \u003c\n  value: \"A\"\n\u003e\nqualifier: \u003c\n  value: \"C\"\n\u003e\ntimestamp_micros: 100\nvalue: \"v\"\nvalue_size: 10\ncommit_row: false\n",
-        "value: \"alue-VAL\"\ncommit_row: false\n",
-        "commit_row: true\n"
-      ],
-      "results": [
-        {
-          "rk": "RK",
-          "fm": "A",
-          "qual": "C",
-          "ts": 100,
-          "value": "value-VAL",
-          "label": "",
-          "error": false
-        },
-        {
-          "rk": "RK",
-          "fm": "A",
-          "qual": "C",
-          "ts": 0,
-          "value": "",
-          "label": "",
-          "error": false
-        }
-      ]
-    },
-    {
-      "name": "split cell",
-      "chunks": [
-        "row_key: \"RK\"\nfamily_name: \u003c\n  value: \"A\"\n\u003e\nqualifier: \u003c\n  value: \"C\"\n\u003e\ntimestamp_micros: 100\nvalue: \"v\"\nvalue_size: 10\ncommit_row: false\n",
-        "value: \"alue-VAL\"\ncommit_row: true\n"
-      ],
-      "results": [
-        {
-          "rk": "RK",
-          "fm": "A",
-          "qual": "C",
-          "ts": 100,
-          "value": "value-VAL",
-          "label": "",
-          "error": false
-        }
-      ]
-    },
-    {
-      "name": "split four ways",
-      "chunks": [
-        "row_key: \"RK\"\nfamily_name: \u003c\n  value: \"A\"\n\u003e\nqualifier: \u003c\n  value: \"C\"\n\u003e\ntimestamp_micros: 100\nlabels: \"L\"\nvalue: \"v\"\nvalue_size: 10\ncommit_row: false\n",
-        "value: \"a\"\nvalue_size: 10\ncommit_row: false\n",
-        "value: \"l\"\nvalue_size: 10\ncommit_row: false\n",
-        "value: \"ue-VAL\"\ncommit_row: true\n"
-      ],
-      "results": [
-        {
-          "rk": "RK",
-          "fm": "A",
-          "qual": "C",
-          "ts": 100,
-          "value": "value-VAL",
-          "label": "L",
-          "error": false
-        }
-      ]
-    },
-    {
-      "name": "two split cells",
-      "chunks": [
-        "row_key: \"RK\"\nfamily_name: \u003c\n  value: \"A\"\n\u003e\nqualifier: \u003c\n  value: \"C\"\n\u003e\ntimestamp_micros: 101\nvalue: \"v\"\nvalue_size: 10\ncommit_row: false\n",
-        "value: \"alue-VAL_1\"\ncommit_row: false\n",
-        "timestamp_micros: 102\nvalue: \"v\"\nvalue_size: 10\ncommit_row: false\n",
-        "value: \"alue-VAL_2\"\ncommit_row: true\n"
-      ],
-      "results": [
-        {
-          "rk": "RK",
-          "fm": "A",
-          "qual": "C",
-          "ts": 101,
-          "value": "value-VAL_1",
-          "label": "",
-          "error": false
-        },
-        {
-          "rk": "RK",
-          "fm": "A",
-          "qual": "C",
-          "ts": 102,
-          "value": "value-VAL_2",
-          "label": "",
-          "error": false
-        }
-      ]
-    },
-    {
-      "name": "multi-qualifier splits",
-      "chunks": [
-        "row_key: \"RK\"\nfamily_name: \u003c\n  value: \"A\"\n\u003e\nqualifier: \u003c\n  value: \"C\"\n\u003e\ntimestamp_micros: 101\nvalue: \"v\"\nvalue_size: 10\ncommit_row: false\n",
-        "value: \"alue-VAL_1\"\ncommit_row: false\n",
-        "qualifier: \u003c\n  value: \"D\"\n\u003e\ntimestamp_micros: 102\nvalue: \"v\"\nvalue_size: 10\ncommit_row: false\n",
-        "value: \"alue-VAL_2\"\ncommit_row: true\n"
-      ],
-      "results": [
-        {
-          "rk": "RK",
-          "fm": "A",
-          "qual": "C",
-          "ts": 101,
-          "value": "value-VAL_1",
-          "label": "",
-          "error": false
-        },
-        {
-          "rk": "RK",
-          "fm": "A",
-          "qual": "D",
-          "ts": 102,
-          "value": "value-VAL_2",
-          "label": "",
-          "error": false
-        }
-      ]
-    },
-    {
-      "name": "multi-qualifier multi-split",
-      "chunks": [
-        "row_key: \"RK\"\nfamily_name: \u003c\n  value: \"A\"\n\u003e\nqualifier: \u003c\n  value: \"C\"\n\u003e\ntimestamp_micros: 101\nvalue: \"v\"\nvalue_size: 10\ncommit_row: false\n",
-        "value: \"a\"\nvalue_size: 10\ncommit_row: false\n",
-        "value: \"lue-VAL_1\"\ncommit_row: false\n",
-        "qualifier: \u003c\n  value: \"D\"\n\u003e\ntimestamp_micros: 102\nvalue: \"v\"\nvalue_size: 10\ncommit_row: false\n",
-        "value: \"a\"\nvalue_size: 10\ncommit_row: false\n",
-        "value: \"lue-VAL_2\"\ncommit_row: true\n"
-      ],
-      "results": [
-        {
-          "rk": "RK",
-          "fm": "A",
-          "qual": "C",
-          "ts": 101,
-          "value": "value-VAL_1",
-          "label": "",
-          "error": false
-        },
-        {
-          "rk": "RK",
-          "fm": "A",
-          "qual": "D",
-          "ts": 102,
-          "value": "value-VAL_2",
-          "label": "",
-          "error": false
-        }
-      ]
-    },
-    {
-      "name": "multi-family split",
-      "chunks": [
-        "row_key: \"RK\"\nfamily_name: \u003c\n  value: \"A\"\n\u003e\nqualifier: \u003c\n  value: \"C\"\n\u003e\ntimestamp_micros: 101\nvalue: \"v\"\nvalue_size: 10\ncommit_row: false\n",
-        "value: \"alue-VAL_1\"\ncommit_row: false\n",
-        "family_name: \u003c\n  value: \"B\"\n\u003e\nqualifier: \u003c\n  value: \"E\"\n\u003e\ntimestamp_micros: 102\nvalue: \"v\"\nvalue_size: 10\ncommit_row: false\n",
-        "value: \"alue-VAL_2\"\ncommit_row: true\n"
-      ],
-      "results": [
-        {
-          "rk": "RK",
-          "fm": "A",
-          "qual": "C",
-          "ts": 101,
-          "value": "value-VAL_1",
-          "label": "",
-          "error": false
-        },
-        {
-          "rk": "RK",
-          "fm": "B",
-          "qual": "E",
-          "ts": 102,
-          "value": "value-VAL_2",
-          "label": "",
-          "error": false
-        }
-      ]
-    },
-    {
-      "name": "invalid - no commit between rows",
-      "chunks": [
-        "row_key: \"RK_1\"\nfamily_name: \u003c\n  value: \"A\"\n\u003e\nqualifier: \u003c\n  value: \"C\"\n\u003e\ntimestamp_micros: 100\nvalue: \"value-VAL\"\ncommit_row: false\n",
-        "row_key: \"RK_2\"\nfamily_name: \u003c\n  value: \"A\"\n\u003e\nqualifier: \u003c\n  value: \"C\"\n\u003e\ntimestamp_micros: 100\nvalue: \"value-VAL\"\ncommit_row: false\n"
-      ],
-      "results": [
-        {
-          "rk": "",
-          "fm": "",
-          "qual": "",
-          "ts": 0,
-          "value": "",
-          "label": "",
-          "error": true
-        }
-      ]
-    },
-    {
-      "name": "invalid - no commit after first row",
-      "chunks": [
-        "row_key: \"RK_1\"\nfamily_name: \u003c\n  value: \"A\"\n\u003e\nqualifier: \u003c\n  value: \"C\"\n\u003e\ntimestamp_micros: 100\nvalue: \"value-VAL\"\ncommit_row: false\n",
-        "row_key: \"RK_2\"\nfamily_name: \u003c\n  value: \"A\"\n\u003e\nqualifier: \u003c\n  value: \"C\"\n\u003e\ntimestamp_micros: 100\nvalue: \"value-VAL\"\ncommit_row: true\n"
-      ],
-      "results": [
-        {
-          "rk": "",
-          "fm": "",
-          "qual": "",
-          "ts": 0,
-          "value": "",
-          "label": "",
-          "error": true
-        }
-      ]
-    },
-    {
-      "name": "invalid - last row missing commit",
-      "chunks": [
-        "row_key: \"RK_1\"\nfamily_name: \u003c\n  value: \"A\"\n\u003e\nqualifier: \u003c\n  value: \"C\"\n\u003e\ntimestamp_micros: 100\nvalue: \"value-VAL\"\ncommit_row: true\n",
-        "row_key: \"RK_2\"\nfamily_name: \u003c\n  value: \"A\"\n\u003e\nqualifier: \u003c\n  value: \"C\"\n\u003e\ntimestamp_micros: 100\nvalue: \"value-VAL\"\ncommit_row: false\n"
-      ],
-      "results": [
-        {
-          "rk": "RK_1",
-          "fm": "A",
-          "qual": "C",
-          "ts": 100,
-          "value": "value-VAL",
-          "label": "",
-          "error": false
-        },
-        {
-          "rk": "",
-          "fm": "",
-          "qual": "",
-          "ts": 0,
-          "value": "",
-          "label": "",
-          "error": true
-        }
-      ]
-    },
-    {
-      "name": "invalid - duplicate row key",
-      "chunks": [
-        "row_key: \"RK_1\"\nfamily_name: \u003c\n  value: \"A\"\n\u003e\nqualifier: \u003c\n  value: \"C\"\n\u003e\ntimestamp_micros: 100\nvalue: \"value-VAL\"\ncommit_row: true\n",
-        "row_key: \"RK_1\"\nfamily_name: \u003c\n  value: \"B\"\n\u003e\nqualifier: \u003c\n  value: \"D\"\n\u003e\ntimestamp_micros: 100\nvalue: \"value-VAL\"\ncommit_row: true\n"
-      ],
-      "results": [
-        {
-          "rk": "RK_1",
-          "fm": "A",
-          "qual": "C",
-          "ts": 100,
-          "value": "value-VAL",
-          "label": "",
-          "error": false
-        },
-        {
-          "rk": "",
-          "fm": "",
-          "qual": "",
-          "ts": 0,
-          "value": "",
-          "label": "",
-          "error": true
-        }
-      ]
-    },
-    {
-      "name": "invalid - new row missing row key",
-      "chunks": [
-        "row_key: \"RK_1\"\nfamily_name: \u003c\n  value: \"A\"\n\u003e\nqualifier: \u003c\n  value: \"C\"\n\u003e\ntimestamp_micros: 100\nvalue: \"value-VAL\"\ncommit_row: true\n",
-        "timestamp_micros: 100\nvalue: \"value-VAL\"\ncommit_row: true\n"
-      ],
-      "results": [
-        {
-          "rk": "RK_1",
-          "fm": "A",
-          "qual": "C",
-          "ts": 100,
-          "value": "value-VAL",
-          "label": "",
-          "error": false
-        },
-        {
-          "rk": "",
-          "fm": "",
-          "qual": "",
-          "ts": 0,
-          "value": "",
-          "label": "",
-          "error": true
-        }
-      ]
-    },
-    {
-      "name": "two rows",
-      "chunks": [
-        "row_key: \"RK_1\"\nfamily_name: \u003c\n  value: \"A\"\n\u003e\nqualifier: \u003c\n  value: \"C\"\n\u003e\ntimestamp_micros: 100\nvalue: \"value-VAL\"\ncommit_row: true\n",
-        "row_key: \"RK_2\"\nfamily_name: \u003c\n  value: \"A\"\n\u003e\nqualifier: \u003c\n  value: \"C\"\n\u003e\ntimestamp_micros: 100\nvalue: \"value-VAL\"\ncommit_row: true\n"
-      ],
-      "results": [
-        {
-          "rk": "RK_1",
-          "fm": "A",
-          "qual": "C",
-          "ts": 100,
-          "value": "value-VAL",
-          "label": "",
-          "error": false
-        },
-        {
-          "rk": "RK_2",
-          "fm": "A",
-          "qual": "C",
-          "ts": 100,
-          "value": "value-VAL",
-          "label": "",
-          "error": false
-        }
-      ]
-    },
-    {
-      "name": "two rows implicit timestamp",
-      "chunks": [
-        "row_key: \"RK_1\"\nfamily_name: \u003c\n  value: \"A\"\n\u003e\nqualifier: \u003c\n  value: \"C\"\n\u003e\nvalue: \"value-VAL\"\ncommit_row: true\n",
-        "row_key: \"RK_2\"\nfamily_name: \u003c\n  value: \"A\"\n\u003e\nqualifier: \u003c\n  value: \"C\"\n\u003e\ntimestamp_micros: 100\nvalue: \"value-VAL\"\ncommit_row: true\n"
-      ],
-      "results": [
-        {
-          "rk": "RK_1",
-          "fm": "A",
-          "qual": "C",
-          "ts": 0,
-          "value": "value-VAL",
-          "label": "",
-          "error": false
-        },
-        {
-          "rk": "RK_2",
-          "fm": "A",
-          "qual": "C",
-          "ts": 100,
-          "value": "value-VAL",
-          "label": "",
-          "error": false
-        }
-      ]
-    },
-    {
-      "name": "two rows empty value",
-      "chunks": [
-        "row_key: \"RK_1\"\nfamily_name: \u003c\n  value: \"A\"\n\u003e\nqualifier: \u003c\n  value: \"C\"\n\u003e\ncommit_row: true\n",
-        "row_key: \"RK_2\"\nfamily_name: \u003c\n  value: \"A\"\n\u003e\nqualifier: \u003c\n  value: \"C\"\n\u003e\ntimestamp_micros: 100\nvalue: \"value-VAL\"\ncommit_row: true\n"
-      ],
-      "results": [
-        {
-          "rk": "RK_1",
-          "fm": "A",
-          "qual": "C",
-          "ts": 0,
-          "value": "",
-          "label": "",
-          "error": false
-        },
-        {
-          "rk": "RK_2",
-          "fm": "A",
-          "qual": "C",
-          "ts": 100,
-          "value": "value-VAL",
-          "label": "",
-          "error": false
-        }
-      ]
-    },
-    {
-      "name": "two rows, one with multiple cells",
-      "chunks": [
-        "row_key: \"RK_1\"\nfamily_name: \u003c\n  value: \"A\"\n\u003e\nqualifier: \u003c\n  value: \"C\"\n\u003e\ntimestamp_micros: 101\nvalue: \"value-VAL_1\"\ncommit_row: false\n",
-        "timestamp_micros: 102\nvalue: \"value-VAL_2\"\ncommit_row: true\n",
-        "row_key: \"RK_2\"\nfamily_name: \u003c\n  value: \"B\"\n\u003e\nqualifier: \u003c\n  value: \"D\"\n\u003e\ntimestamp_micros: 103\nvalue: \"value-VAL_3\"\ncommit_row: true\n"
-      ],
-      "results": [
-        {
-          "rk": "RK_1",
-          "fm": "A",
-          "qual": "C",
-          "ts": 101,
-          "value": "value-VAL_1",
-          "label": "",
-          "error": false
-        },
-        {
-          "rk": "RK_1",
-          "fm": "A",
-          "qual": "C",
-          "ts": 102,
-          "value": "value-VAL_2",
-          "label": "",
-          "error": false
-        },
-        {
-          "rk": "RK_2",
-          "fm": "B",
-          "qual": "D",
-          "ts": 103,
-          "value": "value-VAL_3",
-          "label": "",
-          "error": false
-        }
-      ]
-    },
-    {
-      "name": "two rows, multiple cells",
-      "chunks": [
-        "row_key: \"RK_1\"\nfamily_name: \u003c\n  value: \"A\"\n\u003e\nqualifier: \u003c\n  value: \"C\"\n\u003e\ntimestamp_micros: 101\nvalue: \"value-VAL_1\"\ncommit_row: false\n",
-        "qualifier: \u003c\n  value: \"D\"\n\u003e\ntimestamp_micros: 102\nvalue: \"value-VAL_2\"\ncommit_row: true\n",
-        "row_key: \"RK_2\"\nfamily_name: \u003c\n  value: \"B\"\n\u003e\nqualifier: \u003c\n  value: \"E\"\n\u003e\ntimestamp_micros: 103\nvalue: \"value-VAL_3\"\ncommit_row: false\n",
-        "qualifier: \u003c\n  value: \"F\"\n\u003e\ntimestamp_micros: 104\nvalue: \"value-VAL_4\"\ncommit_row: true\n"
-      ],
-      "results": [
-        {
-          "rk": "RK_1",
-          "fm": "A",
-          "qual": "C",
-          "ts": 101,
-          "value": "value-VAL_1",
-          "label": "",
-          "error": false
-        },
-        {
-          "rk": "RK_1",
-          "fm": "A",
-          "qual": "D",
-          "ts": 102,
-          "value": "value-VAL_2",
-          "label": "",
-          "error": false
-        },
-        {
-          "rk": "RK_2",
-          "fm": "B",
-          "qual": "E",
-          "ts": 103,
-          "value": "value-VAL_3",
-          "label": "",
-          "error": false
-        },
-        {
-          "rk": "RK_2",
-          "fm": "B",
-          "qual": "F",
-          "ts": 104,
-          "value": "value-VAL_4",
-          "label": "",
-          "error": false
-        }
-      ]
-    },
-    {
-      "name": "two rows, multiple cells, multiple families",
-      "chunks": [
-        "row_key: \"RK_1\"\nfamily_name: \u003c\n  value: \"A\"\n\u003e\nqualifier: \u003c\n  value: \"C\"\n\u003e\ntimestamp_micros: 101\nvalue: \"value-VAL_1\"\ncommit_row: false\n",
-        "family_name: \u003c\n  value: \"B\"\n\u003e\nqualifier: \u003c\n  value: \"E\"\n\u003e\ntimestamp_micros: 102\nvalue: \"value-VAL_2\"\ncommit_row: true\n",
-        "row_key: \"RK_2\"\nfamily_name: \u003c\n  value: \"M\"\n\u003e\nqualifier: \u003c\n  value: \"O\"\n\u003e\ntimestamp_micros: 103\nvalue: \"value-VAL_3\"\ncommit_row: false\n",
-        "family_name: \u003c\n  value: \"N\"\n\u003e\nqualifier: \u003c\n  value: \"P\"\n\u003e\ntimestamp_micros: 104\nvalue: \"value-VAL_4\"\ncommit_row: true\n"
-      ],
-      "results": [
-        {
-          "rk": "RK_1",
-          "fm": "A",
-          "qual": "C",
-          "ts": 101,
-          "value": "value-VAL_1",
-          "label": "",
-          "error": false
-        },
-        {
-          "rk": "RK_1",
-          "fm": "B",
-          "qual": "E",
-          "ts": 102,
-          "value": "value-VAL_2",
-          "label": "",
-          "error": false
-        },
-        {
-          "rk": "RK_2",
-          "fm": "M",
-          "qual": "O",
-          "ts": 103,
-          "value": "value-VAL_3",
-          "label": "",
-          "error": false
-        },
-        {
-          "rk": "RK_2",
-          "fm": "N",
-          "qual": "P",
-          "ts": 104,
-          "value": "value-VAL_4",
-          "label": "",
-          "error": false
-        }
-      ]
-    },
-    {
-      "name": "two rows, four cells, 2 labels",
-      "chunks": [
-        "row_key: \"RK_1\"\nfamily_name: \u003c\n  value: \"A\"\n\u003e\nqualifier: \u003c\n  value: \"C\"\n\u003e\ntimestamp_micros: 101\nlabels: \"L_1\"\nvalue: \"value-VAL_1\"\ncommit_row: false\n",
-        "timestamp_micros: 102\nvalue: \"value-VAL_2\"\ncommit_row: true\n",
-        "row_key: \"RK_2\"\nfamily_name: \u003c\n  value: \"B\"\n\u003e\nqualifier: \u003c\n  value: \"D\"\n\u003e\ntimestamp_micros: 103\nlabels: \"L_3\"\nvalue: \"value-VAL_3\"\ncommit_row: false\n",
-        "timestamp_micros: 104\nvalue: \"value-VAL_4\"\ncommit_row: true\n"
-      ],
-      "results": [
-        {
-          "rk": "RK_1",
-          "fm": "A",
-          "qual": "C",
-          "ts": 101,
-          "value": "value-VAL_1",
-          "label": "L_1",
-          "error": false
-        },
-        {
-          "rk": "RK_1",
-          "fm": "A",
-          "qual": "C",
-          "ts": 102,
-          "value": "value-VAL_2",
-          "label": "",
-          "error": false
-        },
-        {
-          "rk": "RK_2",
-          "fm": "B",
-          "qual": "D",
-          "ts": 103,
-          "value": "value-VAL_3",
-          "label": "L_3",
-          "error": false
-        },
-        {
-          "rk": "RK_2",
-          "fm": "B",
-          "qual": "D",
-          "ts": 104,
-          "value": "value-VAL_4",
-          "label": "",
-          "error": false
-        }
-      ]
-    },
-    {
-      "name": "two rows with splits, same timestamp",
-      "chunks": [
-        "row_key: \"RK_1\"\nfamily_name: \u003c\n  value: \"A\"\n\u003e\nqualifier: \u003c\n  value: \"C\"\n\u003e\ntimestamp_micros: 100\nvalue: \"v\"\nvalue_size: 10\ncommit_row: false\n",
-        "value: \"alue-VAL_1\"\ncommit_row: true\n",
-        "row_key: \"RK_2\"\nfamily_name: \u003c\n  value: \"A\"\n\u003e\nqualifier: \u003c\n  value: \"C\"\n\u003e\ntimestamp_micros: 100\nvalue: \"v\"\nvalue_size: 10\ncommit_row: false\n",
-        "value: \"alue-VAL_2\"\ncommit_row: true\n"
-      ],
-      "results": [
-        {
-          "rk": "RK_1",
-          "fm": "A",
-          "qual": "C",
-          "ts": 100,
-          "value": "value-VAL_1",
-          "label": "",
-          "error": false
-        },
-        {
-          "rk": "RK_2",
-          "fm": "A",
-          "qual": "C",
-          "ts": 100,
-          "value": "value-VAL_2",
-          "label": "",
-          "error": false
-        }
-      ]
-    },
-    {
-      "name": "invalid - bare reset",
-      "chunks": [
-        "reset_row: true\n"
-      ],
-      "results": [
-        {
-          "rk": "",
-          "fm": "",
-          "qual": "",
-          "ts": 0,
-          "value": "",
-          "label": "",
-          "error": true
-        }
-      ]
-    },
-    {
-      "name": "invalid - bad reset, no commit",
-      "chunks": [
-        "reset_row: true\n",
-        "row_key: \"RK\"\nfamily_name: \u003c\n  value: \"A\"\n\u003e\nqualifier: \u003c\n  value: \"C\"\n\u003e\ntimestamp_micros: 100\nvalue: \"value-VAL\"\ncommit_row: false\n"
-      ],
-      "results": [
-        {
-          "rk": "",
-          "fm": "",
-          "qual": "",
-          "ts": 0,
-          "value": "",
-          "label": "",
-          "error": true
-        }
-      ]
-    },
-    {
-      "name": "invalid - missing key after reset",
-      "chunks": [
-        "row_key: \"RK\"\nfamily_name: \u003c\n  value: \"A\"\n\u003e\nqualifier: \u003c\n  value: \"C\"\n\u003e\ntimestamp_micros: 100\nvalue: \"value-VAL\"\ncommit_row: false\n",
-        "reset_row: true\n",
-        "timestamp_micros: 100\nvalue: \"value-VAL\"\ncommit_row: true\n"
-      ],
-      "results": [
-        {
-          "rk": "",
-          "fm": "",
-          "qual": "",
-          "ts": 0,
-          "value": "",
-          "label": "",
-          "error": true
-        }
-      ]
-    },
-    {
-      "name": "no data after reset",
-      "chunks": [
-        "row_key: \"RK\"\nfamily_name: \u003c\n  value: \"A\"\n\u003e\nqualifier: \u003c\n  value: \"C\"\n\u003e\ntimestamp_micros: 100\nvalue: \"value-VAL\"\ncommit_row: false\n",
-        "reset_row: true\n"
-      ],
-      "results": null
-    },
-    {
-      "name": "simple reset",
-      "chunks": [
-        "row_key: \"RK\"\nfamily_name: \u003c\n  value: \"A\"\n\u003e\nqualifier: \u003c\n  value: \"C\"\n\u003e\ntimestamp_micros: 100\nvalue: \"value-VAL\"\ncommit_row: false\n",
-        "reset_row: true\n",
-        "row_key: \"RK\"\nfamily_name: \u003c\n  value: \"A\"\n\u003e\nqualifier: \u003c\n  value: \"C\"\n\u003e\ntimestamp_micros: 100\nvalue: \"value-VAL\"\ncommit_row: true\n"
-      ],
-      "results": [
-        {
-          "rk": "RK",
-          "fm": "A",
-          "qual": "C",
-          "ts": 100,
-          "value": "value-VAL",
-          "label": "",
-          "error": false
-        }
-      ]
-    },
-    {
-      "name": "reset to new val",
-      "chunks": [
-        "row_key: \"RK\"\nfamily_name: \u003c\n  value: \"A\"\n\u003e\nqualifier: \u003c\n  value: \"C\"\n\u003e\ntimestamp_micros: 100\nvalue: \"value-VAL_1\"\ncommit_row: false\n",
-        "reset_row: true\n",
-        "row_key: \"RK\"\nfamily_name: \u003c\n  value: \"A\"\n\u003e\nqualifier: \u003c\n  value: \"C\"\n\u003e\ntimestamp_micros: 100\nvalue: \"value-VAL_2\"\ncommit_row: true\n"
-      ],
-      "results": [
-        {
-          "rk": "RK",
-          "fm": "A",
-          "qual": "C",
-          "ts": 100,
-          "value": "value-VAL_2",
-          "label": "",
-          "error": false
-        }
-      ]
-    },
-    {
-      "name": "reset to new qual",
-      "chunks": [
-        "row_key: \"RK\"\nfamily_name: \u003c\n  value: \"A\"\n\u003e\nqualifier: \u003c\n  value: \"C\"\n\u003e\ntimestamp_micros: 100\nvalue: \"value-VAL_1\"\ncommit_row: false\n",
-        "reset_row: true\n",
-        "row_key: \"RK\"\nfamily_name: \u003c\n  value: \"A\"\n\u003e\nqualifier: \u003c\n  value: \"D\"\n\u003e\ntimestamp_micros: 100\nvalue: \"value-VAL_1\"\ncommit_row: true\n"
-      ],
-      "results": [
-        {
-          "rk": "RK",
-          "fm": "A",
-          "qual": "D",
-          "ts": 100,
-          "value": "value-VAL_1",
-          "label": "",
-          "error": false
-        }
-      ]
-    },
-    {
-      "name": "reset with splits",
-      "chunks": [
-        "row_key: \"RK\"\nfamily_name: \u003c\n  value: \"A\"\n\u003e\nqualifier: \u003c\n  value: \"C\"\n\u003e\ntimestamp_micros: 100\nvalue: \"value-VAL_1\"\ncommit_row: false\n",
-        "timestamp_micros: 102\nvalue: \"value-VAL_2\"\ncommit_row: false\n",
-        "reset_row: true\n",
-        "row_key: \"RK\"\nfamily_name: \u003c\n  value: \"A\"\n\u003e\nqualifier: \u003c\n  value: \"C\"\n\u003e\ntimestamp_micros: 100\nvalue: \"value-VAL_2\"\ncommit_row: true\n"
-      ],
-      "results": [
-        {
-          "rk": "RK",
-          "fm": "A",
-          "qual": "C",
-          "ts": 100,
-          "value": "value-VAL_2",
-          "label": "",
-          "error": false
-        }
-      ]
-    },
-    {
-      "name": "reset two cells",
-      "chunks": [
-        "row_key: \"RK\"\nfamily_name: \u003c\n  value: \"A\"\n\u003e\nqualifier: \u003c\n  value: \"C\"\n\u003e\ntimestamp_micros: 100\nvalue: \"value-VAL_1\"\ncommit_row: false\n",
-        "reset_row: true\n",
-        "row_key: \"RK\"\nfamily_name: \u003c\n  value: \"A\"\n\u003e\nqualifier: \u003c\n  value: \"C\"\n\u003e\ntimestamp_micros: 100\nvalue: \"value-VAL_2\"\ncommit_row: false\n",
-        "timestamp_micros: 103\nvalue: \"value-VAL_3\"\ncommit_row: true\n"
-      ],
-      "results": [
-        {
-          "rk": "RK",
-          "fm": "A",
-          "qual": "C",
-          "ts": 100,
-          "value": "value-VAL_2",
-          "label": "",
-          "error": false
-        },
-        {
-          "rk": "RK",
-          "fm": "A",
-          "qual": "C",
-          "ts": 103,
-          "value": "value-VAL_3",
-          "label": "",
-          "error": false
-        }
-      ]
-    },
-    {
-      "name": "two resets",
-      "chunks": [
-        "row_key: \"RK\"\nfamily_name: \u003c\n  value: \"A\"\n\u003e\nqualifier: \u003c\n  value: \"C\"\n\u003e\ntimestamp_micros: 100\nvalue: \"value-VAL_1\"\ncommit_row: false\n",
-        "reset_row: true\n",
-        "row_key: \"RK\"\nfamily_name: \u003c\n  value: \"A\"\n\u003e\nqualifier: \u003c\n  value: \"C\"\n\u003e\ntimestamp_micros: 100\nvalue: \"value-VAL_2\"\ncommit_row: false\n",
-        "reset_row: true\n",
-        "row_key: \"RK\"\nfamily_name: \u003c\n  value: \"A\"\n\u003e\nqualifier: \u003c\n  value: \"C\"\n\u003e\ntimestamp_micros: 100\nvalue: \"value-VAL_3\"\ncommit_row: true\n"
-      ],
-      "results": [
-        {
-          "rk": "RK",
-          "fm": "A",
-          "qual": "C",
-          "ts": 100,
-          "value": "value-VAL_3",
-          "label": "",
-          "error": false
-        }
-      ]
-    },
-    {
-      "name": "reset then two cells",
-      "chunks": [
-        "row_key: \"RK\"\nfamily_name: \u003c\n  value: \"A\"\n\u003e\nqualifier: \u003c\n  value: \"C\"\n\u003e\ntimestamp_micros: 100\nvalue: \"value-VAL_1\"\ncommit_row: false\n",
-        "reset_row: true\n",
-        "row_key: \"RK\"\nfamily_name: \u003c\n  value: \"B\"\n\u003e\nqualifier: \u003c\n  value: \"C\"\n\u003e\ntimestamp_micros: 100\nvalue: \"value-VAL_2\"\ncommit_row: false\n",
-        "qualifier: \u003c\n  value: \"D\"\n\u003e\ntimestamp_micros: 103\nvalue: \"value-VAL_3\"\ncommit_row: true\n"
-      ],
-      "results": [
-        {
-          "rk": "RK",
-          "fm": "B",
-          "qual": "C",
-          "ts": 100,
-          "value": "value-VAL_2",
-          "label": "",
-          "error": false
-        },
-        {
-          "rk": "RK",
-          "fm": "B",
-          "qual": "D",
-          "ts": 103,
-          "value": "value-VAL_3",
-          "label": "",
-          "error": false
-        }
-      ]
-    },
-    {
-      "name": "reset to new row",
-      "chunks": [
-        "row_key: \"RK_1\"\nfamily_name: \u003c\n  value: \"A\"\n\u003e\nqualifier: \u003c\n  value: \"C\"\n\u003e\ntimestamp_micros: 100\nvalue: \"value-VAL_1\"\ncommit_row: false\n",
-        "reset_row: true\n",
-        "row_key: \"RK_2\"\nfamily_name: \u003c\n  value: \"A\"\n\u003e\nqualifier: \u003c\n  value: \"C\"\n\u003e\ntimestamp_micros: 100\nvalue: \"value-VAL_2\"\ncommit_row: true\n"
-      ],
-      "results": [
-        {
-          "rk": "RK_2",
-          "fm": "A",
-          "qual": "C",
-          "ts": 100,
-          "value": "value-VAL_2",
-          "label": "",
-          "error": false
-        }
-      ]
-    },
-    {
-      "name": "reset in between chunks",
-      "chunks": [
-        "row_key: \"RK\"\nfamily_name: \u003c\n  value: \"A\"\n\u003e\nqualifier: \u003c\n  value: \"C\"\n\u003e\ntimestamp_micros: 100\nlabels: \"L\"\nvalue: \"v\"\nvalue_size: 10\ncommit_row: false\n",
-        "value: \"a\"\nvalue_size: 10\ncommit_row: false\n",
-        "reset_row: true\n",
-        "row_key: \"RK_1\"\nfamily_name: \u003c\n  value: \"A\"\n\u003e\nqualifier: \u003c\n  value: \"C\"\n\u003e\ntimestamp_micros: 100\nvalue: \"value-VAL_1\"\ncommit_row: true\n"
-      ],
-      "results": [
-        {
-          "rk": "RK_1",
-          "fm": "A",
-          "qual": "C",
-          "ts": 100,
-          "value": "value-VAL_1",
-          "label": "",
-          "error": false
-        }
-      ]
-    },
-    {
-      "name": "invalid - reset with chunk",
-      "chunks": [
-        "row_key: \"RK\"\nfamily_name: \u003c\n  value: \"A\"\n\u003e\nqualifier: \u003c\n  value: \"C\"\n\u003e\ntimestamp_micros: 100\nlabels: \"L\"\nvalue: \"v\"\nvalue_size: 10\ncommit_row: false\n",
-        "value: \"a\"\nvalue_size: 10\nreset_row: true\n"
-      ],
-      "results": [
-        {
-          "rk": "",
-          "fm": "",
-          "qual": "",
-          "ts": 0,
-          "value": "",
-          "label": "",
-          "error": true
-        }
-      ]
-    },
-    {
-      "name": "invalid - commit with chunk",
-      "chunks": [
-        "row_key: \"RK\"\nfamily_name: \u003c\n  value: \"A\"\n\u003e\nqualifier: \u003c\n  value: \"C\"\n\u003e\ntimestamp_micros: 100\nlabels: \"L\"\nvalue: \"v\"\nvalue_size: 10\ncommit_row: false\n",
-        "value: \"a\"\nvalue_size: 10\ncommit_row: true\n"
-      ],
-      "results": [
-        {
-          "rk": "",
-          "fm": "",
-          "qual": "",
-          "ts": 0,
-          "value": "",
-          "label": "",
-          "error": true
-        }
-      ]
-    },
-    {
-      "name": "empty cell chunk",
-      "chunks": [
-        "row_key: \"RK\"\nfamily_name: \u003c\n  value: \"A\"\n\u003e\nqualifier: \u003c\n  value: \"C\"\n\u003e\ntimestamp_micros: 100\nvalue: \"value-VAL\"\ncommit_row: false\n",
-        "commit_row: false\n",
-        "commit_row: true\n"
-      ],
-      "results": [
-        {
-          "rk": "RK",
-          "fm": "A",
-          "qual": "C",
-          "ts": 100,
-          "value": "value-VAL",
-          "label": "",
-          "error": false
-        },
-        {
-          "rk": "RK",
-          "fm": "A",
-          "qual": "C",
-          "ts": 0,
-          "value": "",
-          "label": "",
-          "error": false
-        },
-        {
-          "rk": "RK",
-          "fm": "A",
-          "qual": "C",
-          "ts": 0,
-          "value": "",
-          "label": "",
-          "error": false
-        }
-      ]
-    }
-  ]
-}

+ 0 - 277
vendor/cloud.google.com/go/civil/civil.go

@@ -1,277 +0,0 @@
-// Copyright 2016 Google Inc. All Rights Reserved.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//      http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Package civil implements types for civil time, a time-zone-independent
-// representation of time that follows the rules of the proleptic
-// Gregorian calendar with exactly 24-hour days, 60-minute hours, and 60-second
-// minutes.
-//
-// Because they lack location information, these types do not represent unique
-// moments or intervals of time. Use time.Time for that purpose.
-package civil
-
-import (
-	"fmt"
-	"time"
-)
-
-// A Date represents a date (year, month, day).
-//
-// This type does not include location information, and therefore does not
-// describe a unique 24-hour timespan.
-type Date struct {
-	Year  int        // Year (e.g., 2014).
-	Month time.Month // Month of the year (January = 1, ...).
-	Day   int        // Day of the month, starting at 1.
-}
-
-// DateOf returns the Date in which a time occurs in that time's location.
-func DateOf(t time.Time) Date {
-	var d Date
-	d.Year, d.Month, d.Day = t.Date()
-	return d
-}
-
-// ParseDate parses a string in RFC3339 full-date format and returns the date value it represents.
-func ParseDate(s string) (Date, error) {
-	t, err := time.Parse("2006-01-02", s)
-	if err != nil {
-		return Date{}, err
-	}
-	return DateOf(t), nil
-}
-
-// String returns the date in RFC3339 full-date format.
-func (d Date) String() string {
-	return fmt.Sprintf("%04d-%02d-%02d", d.Year, d.Month, d.Day)
-}
-
-// IsValid reports whether the date is valid.
-func (d Date) IsValid() bool {
-	return DateOf(d.In(time.UTC)) == d
-}
-
-// In returns the time corresponding to time 00:00:00 of the date in the location.
-//
-// In is always consistent with time.Date, even when time.Date returns a time
-// on a different day. For example, if loc is America/Indiana/Vincennes, then both
-//     time.Date(1955, time.May, 1, 0, 0, 0, 0, loc)
-// and
-//     civil.Date{Year: 1955, Month: time.May, Day: 1}.In(loc)
-// return 23:00:00 on April 30, 1955.
-//
-// In panics if loc is nil.
-func (d Date) In(loc *time.Location) time.Time {
-	return time.Date(d.Year, d.Month, d.Day, 0, 0, 0, 0, loc)
-}
-
-// AddDays returns the date that is n days in the future.
-// n can also be negative to go into the past.
-func (d Date) AddDays(n int) Date {
-	return DateOf(d.In(time.UTC).AddDate(0, 0, n))
-}
-
-// DaysSince returns the signed number of days between the date and s, not including the end day.
-// This is the inverse operation to AddDays.
-func (d Date) DaysSince(s Date) (days int) {
-	// We convert to Unix time so we do not have to worry about leap seconds:
-	// Unix time increases by exactly 86400 seconds per day.
-	deltaUnix := d.In(time.UTC).Unix() - s.In(time.UTC).Unix()
-	return int(deltaUnix / 86400)
-}
-
-// Before reports whether d1 occurs before d2.
-func (d1 Date) Before(d2 Date) bool {
-	if d1.Year != d2.Year {
-		return d1.Year < d2.Year
-	}
-	if d1.Month != d2.Month {
-		return d1.Month < d2.Month
-	}
-	return d1.Day < d2.Day
-}
-
-// After reports whether d1 occurs after d2.
-func (d1 Date) After(d2 Date) bool {
-	return d2.Before(d1)
-}
-
-// MarshalText implements the encoding.TextMarshaler interface.
-// The output is the result of d.String().
-func (d Date) MarshalText() ([]byte, error) {
-	return []byte(d.String()), nil
-}
-
-// UnmarshalText implements the encoding.TextUnmarshaler interface.
-// The date is expected to be a string in a format accepted by ParseDate.
-func (d *Date) UnmarshalText(data []byte) error {
-	var err error
-	*d, err = ParseDate(string(data))
-	return err
-}
-
-// A Time represents a time with nanosecond precision.
-//
-// This type does not include location information, and therefore does not
-// describe a unique moment in time.
-//
-// This type exists to represent the TIME type in storage-based APIs like BigQuery.
-// Most operations on Times are unlikely to be meaningful. Prefer the DateTime type.
-type Time struct {
-	Hour       int // The hour of the day in 24-hour format; range [0-23]
-	Minute     int // The minute of the hour; range [0-59]
-	Second     int // The second of the minute; range [0-59]
-	Nanosecond int // The nanosecond of the second; range [0-999999999]
-}
-
-// TimeOf returns the Time representing the time of day in which a time occurs
-// in that time's location. It ignores the date.
-func TimeOf(t time.Time) Time {
-	var tm Time
-	tm.Hour, tm.Minute, tm.Second = t.Clock()
-	tm.Nanosecond = t.Nanosecond()
-	return tm
-}
-
-// ParseTime parses a string and returns the time value it represents.
-// ParseTime accepts an extended form of the RFC3339 partial-time format. After
-// the HH:MM:SS part of the string, an optional fractional part may appear,
-// consisting of a decimal point followed by one to nine decimal digits.
-// (RFC3339 admits only one digit after the decimal point).
-func ParseTime(s string) (Time, error) {
-	t, err := time.Parse("15:04:05.999999999", s)
-	if err != nil {
-		return Time{}, err
-	}
-	return TimeOf(t), nil
-}
-
-// String returns the date in the format described in ParseTime. If Nanoseconds
-// is zero, no fractional part will be generated. Otherwise, the result will
-// end with a fractional part consisting of a decimal point and nine digits.
-func (t Time) String() string {
-	s := fmt.Sprintf("%02d:%02d:%02d", t.Hour, t.Minute, t.Second)
-	if t.Nanosecond == 0 {
-		return s
-	}
-	return s + fmt.Sprintf(".%09d", t.Nanosecond)
-}
-
-// IsValid reports whether the time is valid.
-func (t Time) IsValid() bool {
-	// Construct a non-zero time.
-	tm := time.Date(2, 2, 2, t.Hour, t.Minute, t.Second, t.Nanosecond, time.UTC)
-	return TimeOf(tm) == t
-}
-
-// MarshalText implements the encoding.TextMarshaler interface.
-// The output is the result of t.String().
-func (t Time) MarshalText() ([]byte, error) {
-	return []byte(t.String()), nil
-}
-
-// UnmarshalText implements the encoding.TextUnmarshaler interface.
-// The time is expected to be a string in a format accepted by ParseTime.
-func (t *Time) UnmarshalText(data []byte) error {
-	var err error
-	*t, err = ParseTime(string(data))
-	return err
-}
-
-// A DateTime represents a date and time.
-//
-// This type does not include location information, and therefore does not
-// describe a unique moment in time.
-type DateTime struct {
-	Date Date
-	Time Time
-}
-
-// Note: We deliberately do not embed Date into DateTime, to avoid promoting AddDays and Sub.
-
-// DateTimeOf returns the DateTime in which a time occurs in that time's location.
-func DateTimeOf(t time.Time) DateTime {
-	return DateTime{
-		Date: DateOf(t),
-		Time: TimeOf(t),
-	}
-}
-
-// ParseDateTime parses a string and returns the DateTime it represents.
-// ParseDateTime accepts a variant of the RFC3339 date-time format that omits
-// the time offset but includes an optional fractional time, as described in
-// ParseTime. Informally, the accepted format is
-//     YYYY-MM-DDTHH:MM:SS[.FFFFFFFFF]
-// where the 'T' may be a lower-case 't'.
-func ParseDateTime(s string) (DateTime, error) {
-	t, err := time.Parse("2006-01-02T15:04:05.999999999", s)
-	if err != nil {
-		t, err = time.Parse("2006-01-02t15:04:05.999999999", s)
-		if err != nil {
-			return DateTime{}, err
-		}
-	}
-	return DateTimeOf(t), nil
-}
-
-// String returns the date in the format described in ParseDate.
-func (dt DateTime) String() string {
-	return dt.Date.String() + "T" + dt.Time.String()
-}
-
-// IsValid reports whether the datetime is valid.
-func (dt DateTime) IsValid() bool {
-	return dt.Date.IsValid() && dt.Time.IsValid()
-}
-
-// In returns the time corresponding to the DateTime in the given location.
-//
-// If the time is missing or ambigous at the location, In returns the same
-// result as time.Date. For example, if loc is America/Indiana/Vincennes, then
-// both
-//     time.Date(1955, time.May, 1, 0, 30, 0, 0, loc)
-// and
-//     civil.DateTime{
-//         civil.Date{Year: 1955, Month: time.May, Day: 1}},
-//         civil.Time{Minute: 30}}.In(loc)
-// return 23:30:00 on April 30, 1955.
-//
-// In panics if loc is nil.
-func (dt DateTime) In(loc *time.Location) time.Time {
-	return time.Date(dt.Date.Year, dt.Date.Month, dt.Date.Day, dt.Time.Hour, dt.Time.Minute, dt.Time.Second, dt.Time.Nanosecond, loc)
-}
-
-// Before reports whether dt1 occurs before dt2.
-func (dt1 DateTime) Before(dt2 DateTime) bool {
-	return dt1.In(time.UTC).Before(dt2.In(time.UTC))
-}
-
-// After reports whether dt1 occurs after dt2.
-func (dt1 DateTime) After(dt2 DateTime) bool {
-	return dt2.Before(dt1)
-}
-
-// MarshalText implements the encoding.TextMarshaler interface.
-// The output is the result of dt.String().
-func (dt DateTime) MarshalText() ([]byte, error) {
-	return []byte(dt.String()), nil
-}
-
-// UnmarshalText implements the encoding.TextUnmarshaler interface.
-// The datetime is expected to be a string in a format accepted by ParseDateTime
-func (dt *DateTime) UnmarshalText(data []byte) error {
-	var err error
-	*dt, err = ParseDateTime(string(data))
-	return err
-}

+ 0 - 450
vendor/cloud.google.com/go/cmd/go-cloud-debug-agent/debuglet.go

@@ -1,450 +0,0 @@
-// Copyright 2016 Google Inc. All Rights Reserved.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//      http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// +build linux,go1.7
-
-package main
-
-import (
-	"encoding/json"
-	"flag"
-	"fmt"
-	"io/ioutil"
-	"log"
-	"math/rand"
-	"os"
-	"sync"
-	"time"
-
-	"cloud.google.com/go/cmd/go-cloud-debug-agent/internal/breakpoints"
-	debuglet "cloud.google.com/go/cmd/go-cloud-debug-agent/internal/controller"
-	"cloud.google.com/go/cmd/go-cloud-debug-agent/internal/valuecollector"
-	"cloud.google.com/go/compute/metadata"
-	"golang.org/x/debug"
-	"golang.org/x/debug/local"
-	"golang.org/x/net/context"
-	"golang.org/x/oauth2"
-	"golang.org/x/oauth2/google"
-	cd "google.golang.org/api/clouddebugger/v2"
-)
-
-var (
-	appModule         = flag.String("appmodule", "", "Optional application module name.")
-	appVersion        = flag.String("appversion", "", "Optional application module version name.")
-	sourceContextFile = flag.String("sourcecontext", "", "File containing JSON-encoded source context.")
-	verbose           = flag.Bool("v", false, "Output verbose log messages.")
-	projectNumber     = flag.String("projectnumber", "", "Project number."+
-		"  If this is not set, it is read from the GCP metadata server.")
-	projectID = flag.String("projectid", "", "Project ID."+
-		"  If this is not set, it is read from the GCP metadata server.")
-	serviceAccountFile = flag.String("serviceaccountfile", "", "File containing JSON service account credentials.")
-)
-
-const (
-	maxCapturedStackFrames = 50
-	maxCapturedVariables   = 1000
-)
-
-func main() {
-	flag.Usage = usage
-	flag.Parse()
-	args := flag.Args()
-	if len(args) == 0 {
-		// The user needs to supply the name of the executable to run.
-		flag.Usage()
-		return
-	}
-	if *projectNumber == "" {
-		var err error
-		*projectNumber, err = metadata.NumericProjectID()
-		if err != nil {
-			log.Print("Debuglet initialization: ", err)
-		}
-	}
-	if *projectID == "" {
-		var err error
-		*projectID, err = metadata.ProjectID()
-		if err != nil {
-			log.Print("Debuglet initialization: ", err)
-		}
-	}
-	sourceContexts, err := readSourceContextFile(*sourceContextFile)
-	if err != nil {
-		log.Print("Reading source context file: ", err)
-	}
-	var ts oauth2.TokenSource
-	ctx := context.Background()
-	if *serviceAccountFile != "" {
-		if ts, err = serviceAcctTokenSource(ctx, *serviceAccountFile, cd.CloudDebuggerScope); err != nil {
-			log.Fatalf("Error getting credentials from file %s: %v", *serviceAccountFile, err)
-		}
-	} else if ts, err = google.DefaultTokenSource(ctx, cd.CloudDebuggerScope); err != nil {
-		log.Print("Error getting application default credentials for Cloud Debugger:", err)
-		os.Exit(103)
-	}
-	c, err := debuglet.NewController(ctx, debuglet.Options{
-		ProjectNumber:  *projectNumber,
-		ProjectID:      *projectID,
-		AppModule:      *appModule,
-		AppVersion:     *appVersion,
-		SourceContexts: sourceContexts,
-		Verbose:        *verbose,
-		TokenSource:    ts,
-	})
-	if err != nil {
-		log.Fatal("Error connecting to Cloud Debugger: ", err)
-	}
-	prog, err := local.New(args[0])
-	if err != nil {
-		log.Fatal("Error loading program: ", err)
-	}
-	// Load the program, but don't actually start it running yet.
-	if _, err = prog.Run(args[1:]...); err != nil {
-		log.Fatal("Error loading program: ", err)
-	}
-	bs := breakpoints.NewBreakpointStore(prog)
-
-	// Seed the random number generator.
-	rand.Seed(time.Now().UnixNano())
-
-	// Now we want to do two things: run the user's program, and start sending
-	// List requests periodically to the Debuglet Controller to get breakpoints
-	// to set.
-	//
-	// We want to give the Debuglet Controller a chance to give us breakpoints
-	// before we start the program, otherwise we would miss any breakpoint
-	// triggers that occur during program startup -- for example, a breakpoint on
-	// the first line of main. But if the Debuglet Controller is not responding or
-	// is returning errors, we don't want to delay starting the program
-	// indefinitely.
-	//
-	// We pass a channel to breakpointListLoop, which will close it when the first
-	// List call finishes.  Then we wait until either the channel is closed or a
-	// 5-second timer has finished before starting the program.
-	ch := make(chan bool)
-	// Start a goroutine that sends List requests to the Debuglet Controller, and
-	// sets any breakpoints it gets back.
-	go breakpointListLoop(ctx, c, bs, ch)
-	// Wait until 5 seconds have passed or breakpointListLoop has closed ch.
-	select {
-	case <-time.After(5 * time.Second):
-	case <-ch:
-	}
-	// Run the debuggee.
-	programLoop(ctx, c, bs, prog)
-}
-
-// usage prints a usage message to stderr and exits.
-func usage() {
-	me := "a.out"
-	if len(os.Args) >= 1 {
-		me = os.Args[0]
-	}
-	fmt.Fprintf(os.Stderr, "Usage of %s:\n", me)
-	fmt.Fprintf(os.Stderr, "\t%s [flags...] -- <program name> args...\n", me)
-	fmt.Fprintf(os.Stderr, "Flags:\n")
-	flag.PrintDefaults()
-	fmt.Fprintf(os.Stderr,
-		"See https://cloud.google.com/tools/cloud-debugger/setting-up-on-compute-engine for more information.\n")
-	os.Exit(2)
-}
-
-// readSourceContextFile reads a JSON-encoded source context from the given file.
-// It returns a non-empty slice on success.
-func readSourceContextFile(filename string) ([]*cd.SourceContext, error) {
-	if filename == "" {
-		return nil, nil
-	}
-	scJSON, err := ioutil.ReadFile(filename)
-	if err != nil {
-		return nil, fmt.Errorf("reading file %q: %v", filename, err)
-	}
-	var sc cd.SourceContext
-	if err = json.Unmarshal(scJSON, &sc); err != nil {
-		return nil, fmt.Errorf("parsing file %q: %v", filename, err)
-	}
-	return []*cd.SourceContext{&sc}, nil
-}
-
-// breakpointListLoop repeatedly calls the Debuglet Controller's List RPC, and
-// passes the results to the BreakpointStore so it can set and unset breakpoints
-// in the program.
-//
-// After the first List call finishes, ch is closed.
-func breakpointListLoop(ctx context.Context, c *debuglet.Controller, bs *breakpoints.BreakpointStore, first chan bool) {
-	const (
-		avgTimeBetweenCalls = time.Second
-		errorDelay          = 5 * time.Second
-	)
-
-	// randomDuration returns a random duration with expected value avg.
-	randomDuration := func(avg time.Duration) time.Duration {
-		return time.Duration(rand.Int63n(int64(2*avg + 1)))
-	}
-
-	var consecutiveFailures uint
-
-	for {
-		callStart := time.Now()
-		resp, err := c.List(ctx)
-		if err != nil && err != debuglet.ErrListUnchanged {
-			log.Printf("Debuglet controller server error: %v", err)
-		}
-		if err == nil {
-			bs.ProcessBreakpointList(resp.Breakpoints)
-		}
-
-		if first != nil {
-			// We've finished one call to List and set any breakpoints we received.
-			close(first)
-			first = nil
-		}
-
-		// Asynchronously send updates for any breakpoints that caused an error when
-		// the BreakpointStore tried to process them.  We don't wait for the update
-		// to finish before the program can exit, as we do for normal updates.
-		errorBps := bs.ErrorBreakpoints()
-		for _, bp := range errorBps {
-			go func(bp *cd.Breakpoint) {
-				if err := c.Update(ctx, bp.Id, bp); err != nil {
-					log.Printf("Failed to send breakpoint update for %s: %s", bp.Id, err)
-				}
-			}(bp)
-		}
-
-		// Make the next call not too soon after the one we just did.
-		delay := randomDuration(avgTimeBetweenCalls)
-
-		// If the call returned an error other than ErrListUnchanged, wait longer.
-		if err != nil && err != debuglet.ErrListUnchanged {
-			// Wait twice as long after each consecutive failure, to a maximum of 16x.
-			delay += randomDuration(errorDelay * (1 << consecutiveFailures))
-			if consecutiveFailures < 4 {
-				consecutiveFailures++
-			}
-		} else {
-			consecutiveFailures = 0
-		}
-
-		// Sleep until we reach time callStart+delay.  If we've already passed that
-		// time, time.Sleep will return immediately -- this should be the common
-		// case, since the server will delay responding to List for a while when
-		// there are no changes to report.
-		time.Sleep(callStart.Add(delay).Sub(time.Now()))
-	}
-}
-
-// programLoop runs the program being debugged to completion.  When a breakpoint's
-// conditions are satisfied, it sends an Update RPC to the Debuglet Controller.
-// The function returns when the program exits and all Update RPCs have finished.
-func programLoop(ctx context.Context, c *debuglet.Controller, bs *breakpoints.BreakpointStore, prog debug.Program) {
-	var wg sync.WaitGroup
-	for {
-		// Run the program until it hits a breakpoint or exits.
-		status, err := prog.Resume()
-		if err != nil {
-			break
-		}
-
-		// Get the breakpoints at this address whose conditions were satisfied,
-		// and remove the ones that aren't logpoints.
-		bps := bs.BreakpointsAtPC(status.PC)
-		bps = bpsWithConditionSatisfied(bps, prog)
-		for _, bp := range bps {
-			if bp.Action != "LOG" {
-				bs.RemoveBreakpoint(bp)
-			}
-		}
-
-		if len(bps) == 0 {
-			continue
-		}
-
-		// Evaluate expressions and get the stack.
-		vc := valuecollector.NewCollector(prog, maxCapturedVariables)
-		needStackFrames := false
-		for _, bp := range bps {
-			// If evaluating bp's condition didn't return an error, evaluate bp's
-			// expressions, and later get the stack frames.
-			if bp.Status == nil {
-				bp.EvaluatedExpressions = expressionValues(bp.Expressions, prog, vc)
-				needStackFrames = true
-			}
-		}
-		var (
-			stack                    []*cd.StackFrame
-			stackFramesStatusMessage *cd.StatusMessage
-		)
-		if needStackFrames {
-			stack, stackFramesStatusMessage = stackFrames(prog, vc)
-		}
-
-		// Read variable values from the program.
-		variableTable := vc.ReadValues()
-
-		// Start a goroutine to send updates to the Debuglet Controller or write
-		// to logs, concurrently with resuming the program.
-		// TODO: retry Update on failure.
-		for _, bp := range bps {
-			wg.Add(1)
-			switch bp.Action {
-			case "LOG":
-				go func(format string, evaluatedExpressions []*cd.Variable) {
-					s := valuecollector.LogString(format, evaluatedExpressions, variableTable)
-					log.Print(s)
-					wg.Done()
-				}(bp.LogMessageFormat, bp.EvaluatedExpressions)
-				bp.Status = nil
-				bp.EvaluatedExpressions = nil
-			default:
-				go func(bp *cd.Breakpoint) {
-					defer wg.Done()
-					bp.IsFinalState = true
-					if bp.Status == nil {
-						// If evaluating bp's condition didn't return an error, include the
-						// stack frames, variable table, and any status message produced when
-						// getting the stack frames.
-						bp.StackFrames = stack
-						bp.VariableTable = variableTable
-						bp.Status = stackFramesStatusMessage
-					}
-					if err := c.Update(ctx, bp.Id, bp); err != nil {
-						log.Printf("Failed to send breakpoint update for %s: %s", bp.Id, err)
-					}
-				}(bp)
-			}
-		}
-	}
-
-	// Wait for all updates to finish before returning.
-	wg.Wait()
-}
-
-// bpsWithConditionSatisfied returns the breakpoints whose conditions are true
-// (or that do not have a condition.)
-func bpsWithConditionSatisfied(bpsIn []*cd.Breakpoint, prog debug.Program) []*cd.Breakpoint {
-	var bpsOut []*cd.Breakpoint
-	for _, bp := range bpsIn {
-		cond, err := condTruth(bp.Condition, prog)
-		if err != nil {
-			bp.Status = errorStatusMessage(err.Error(), refersToBreakpointCondition)
-			// Include bp in the list to be updated when there's an error, so that
-			// the user gets a response.
-			bpsOut = append(bpsOut, bp)
-		} else if cond {
-			bpsOut = append(bpsOut, bp)
-		}
-	}
-	return bpsOut
-}
-
-// condTruth evaluates a condition.
-func condTruth(condition string, prog debug.Program) (bool, error) {
-	if condition == "" {
-		// A condition wasn't set.
-		return true, nil
-	}
-	val, err := prog.Evaluate(condition)
-	if err != nil {
-		return false, err
-	}
-	if v, ok := val.(bool); !ok {
-		return false, fmt.Errorf("condition expression has type %T, should be bool", val)
-	} else {
-		return v, nil
-	}
-}
-
-// expressionValues evaluates a slice of expressions and returns a []*cd.Variable
-// containing the results.
-// If the result of an expression evaluation refers to values from the program's
-// memory (e.g., the expression evaluates to a slice) a corresponding variable is
-// added to the value collector, to be read later.
-func expressionValues(expressions []string, prog debug.Program, vc *valuecollector.Collector) []*cd.Variable {
-	evaluatedExpressions := make([]*cd.Variable, len(expressions))
-	for i, exp := range expressions {
-		ee := &cd.Variable{Name: exp}
-		evaluatedExpressions[i] = ee
-		if val, err := prog.Evaluate(exp); err != nil {
-			ee.Status = errorStatusMessage(err.Error(), refersToBreakpointExpression)
-		} else {
-			vc.FillValue(val, ee)
-		}
-	}
-	return evaluatedExpressions
-}
-
-// stackFrames returns a stack trace for the program.  It passes references to
-// function parameters and local variables to the value collector, so it can read
-// their values later.
-func stackFrames(prog debug.Program, vc *valuecollector.Collector) ([]*cd.StackFrame, *cd.StatusMessage) {
-	frames, err := prog.Frames(maxCapturedStackFrames)
-	if err != nil {
-		return nil, errorStatusMessage("Error getting stack: "+err.Error(), refersToUnspecified)
-	}
-	stackFrames := make([]*cd.StackFrame, len(frames))
-	for i, f := range frames {
-		frame := &cd.StackFrame{}
-		frame.Function = f.Function
-		for _, v := range f.Params {
-			frame.Arguments = append(frame.Arguments, vc.AddVariable(debug.LocalVar(v)))
-		}
-		for _, v := range f.Vars {
-			frame.Locals = append(frame.Locals, vc.AddVariable(v))
-		}
-		frame.Location = &cd.SourceLocation{
-			Path: f.File,
-			Line: int64(f.Line),
-		}
-		stackFrames[i] = frame
-	}
-	return stackFrames, nil
-}
-
-// errorStatusMessage returns a *cd.StatusMessage indicating an error,
-// with the given message and refersTo field.
-func errorStatusMessage(msg string, refersTo int) *cd.StatusMessage {
-	return &cd.StatusMessage{
-		Description: &cd.FormatMessage{Format: "$0", Parameters: []string{msg}},
-		IsError:     true,
-		RefersTo:    refersToString[refersTo],
-	}
-}
-
-const (
-	// RefersTo values for cd.StatusMessage.
-	refersToUnspecified = iota
-	refersToBreakpointCondition
-	refersToBreakpointExpression
-)
-
-// refersToString contains the strings for each refersTo value.
-// See the definition of StatusMessage in the v2/clouddebugger package.
-var refersToString = map[int]string{
-	refersToUnspecified:          "UNSPECIFIED",
-	refersToBreakpointCondition:  "BREAKPOINT_CONDITION",
-	refersToBreakpointExpression: "BREAKPOINT_EXPRESSION",
-}
-
-func serviceAcctTokenSource(ctx context.Context, filename string, scope ...string) (oauth2.TokenSource, error) {
-	data, err := ioutil.ReadFile(filename)
-	if err != nil {
-		return nil, fmt.Errorf("cannot read service account file: %v", err)
-	}
-	cfg, err := google.JWTConfigFromJSON(data, scope...)
-	if err != nil {
-		return nil, fmt.Errorf("google.JWTConfigFromJSON: %v", err)
-	}
-	return cfg.TokenSource(ctx), nil
-}

+ 0 - 174
vendor/cloud.google.com/go/cmd/go-cloud-debug-agent/internal/breakpoints/breakpoints.go

@@ -1,174 +0,0 @@
-// Copyright 2016 Google Inc. All Rights Reserved.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//      http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Package breakpoints handles breakpoint requests we get from the user through
-// the Debuglet Controller, and manages corresponding breakpoints set in the code.
-package breakpoints
-
-import (
-	"log"
-	"sync"
-
-	"golang.org/x/debug"
-	cd "google.golang.org/api/clouddebugger/v2"
-)
-
-// BreakpointStore stores the set of breakpoints for a program.
-type BreakpointStore struct {
-	mu sync.Mutex
-	// prog is the program being debugged.
-	prog debug.Program
-	// idToBreakpoint is a map from breakpoint identifier to *cd.Breakpoint.  The
-	// map value is nil if the breakpoint is inactive.  A breakpoint is active if:
-	// - We received it from the Debuglet Controller, and it was active at the time;
-	// - We were able to set code breakpoints for it;
-	// - We have not reached any of those code breakpoints while satisfying the
-	//   breakpoint's conditions, or the breakpoint has action LOG; and
-	// - The Debuglet Controller hasn't informed us the breakpoint has become inactive.
-	idToBreakpoint map[string]*cd.Breakpoint
-	// pcToBps and bpToPCs store the many-to-many relationship between breakpoints we
-	// received from the Debuglet Controller and the code breakpoints we set for them.
-	pcToBps map[uint64][]*cd.Breakpoint
-	bpToPCs map[*cd.Breakpoint][]uint64
-	// errors contains any breakpoints which couldn't be set because they caused an
-	// error.  These are retrieved with ErrorBreakpoints, and the caller is
-	// expected to handle sending updates for them.
-	errors []*cd.Breakpoint
-}
-
-// NewBreakpointStore returns a BreakpointStore for the given program.
-func NewBreakpointStore(prog debug.Program) *BreakpointStore {
-	return &BreakpointStore{
-		idToBreakpoint: make(map[string]*cd.Breakpoint),
-		pcToBps:        make(map[uint64][]*cd.Breakpoint),
-		bpToPCs:        make(map[*cd.Breakpoint][]uint64),
-		prog:           prog,
-	}
-}
-
-// ProcessBreakpointList applies updates received from the Debuglet Controller through a List call.
-func (bs *BreakpointStore) ProcessBreakpointList(bps []*cd.Breakpoint) {
-	bs.mu.Lock()
-	defer bs.mu.Unlock()
-	for _, bp := range bps {
-		if storedBp, ok := bs.idToBreakpoint[bp.Id]; ok {
-			if storedBp != nil && bp.IsFinalState {
-				// IsFinalState indicates that the breakpoint has been made inactive.
-				bs.removeBreakpointLocked(storedBp)
-			}
-		} else {
-			if bp.IsFinalState {
-				// The controller is notifying us that the breakpoint is no longer active,
-				// but we didn't know about it anyway.
-				continue
-			}
-			if bp.Action != "" && bp.Action != "CAPTURE" && bp.Action != "LOG" {
-				bp.IsFinalState = true
-				bp.Status = &cd.StatusMessage{
-					Description: &cd.FormatMessage{Format: "Action is not supported"},
-					IsError:     true,
-				}
-				bs.errors = append(bs.errors, bp)
-				// Note in idToBreakpoint that we've already seen this breakpoint, so that we
-				// don't try to report it as an error multiple times.
-				bs.idToBreakpoint[bp.Id] = nil
-				continue
-			}
-			pcs, err := bs.prog.BreakpointAtLine(bp.Location.Path, uint64(bp.Location.Line))
-			if err != nil {
-				log.Printf("error setting breakpoint at %s:%d: %v", bp.Location.Path, bp.Location.Line, err)
-			}
-			if len(pcs) == 0 {
-				// We can't find a PC for this breakpoint's source line, so don't make it active.
-				// TODO: we could snap the line to a location where we can break, or report an error to the user.
-				bs.idToBreakpoint[bp.Id] = nil
-			} else {
-				bs.idToBreakpoint[bp.Id] = bp
-				for _, pc := range pcs {
-					bs.pcToBps[pc] = append(bs.pcToBps[pc], bp)
-				}
-				bs.bpToPCs[bp] = pcs
-			}
-		}
-	}
-}
-
-// ErrorBreakpoints returns a slice of Breakpoints that caused errors when the
-// BreakpointStore tried to process them, and resets the list of such
-// breakpoints.
-// The caller is expected to send updates to the server to indicate the errors.
-func (bs *BreakpointStore) ErrorBreakpoints() []*cd.Breakpoint {
-	bs.mu.Lock()
-	defer bs.mu.Unlock()
-	bps := bs.errors
-	bs.errors = nil
-	return bps
-}
-
-// BreakpointsAtPC returns all the breakpoints for which we set a code
-// breakpoint at the given address.
-func (bs *BreakpointStore) BreakpointsAtPC(pc uint64) []*cd.Breakpoint {
-	bs.mu.Lock()
-	defer bs.mu.Unlock()
-	return bs.pcToBps[pc]
-}
-
-// RemoveBreakpoint makes the given breakpoint inactive.
-// This is called when either the debugged program hits the breakpoint, or the Debuglet
-// Controller informs us that the breakpoint is now inactive.
-func (bs *BreakpointStore) RemoveBreakpoint(bp *cd.Breakpoint) {
-	bs.mu.Lock()
-	bs.removeBreakpointLocked(bp)
-	bs.mu.Unlock()
-}
-
-func (bs *BreakpointStore) removeBreakpointLocked(bp *cd.Breakpoint) {
-	// Set the ID's corresponding breakpoint to nil, so that we won't activate it
-	// if we see it again.
-	// TODO: we could delete it after a few seconds.
-	bs.idToBreakpoint[bp.Id] = nil
-
-	// Delete bp from the list of cd breakpoints at each of its corresponding
-	// code breakpoint locations, and delete any code breakpoints which no longer
-	// have a corresponding cd breakpoint.
-	var codeBreakpointsToDelete []uint64
-	for _, pc := range bs.bpToPCs[bp] {
-		bps := remove(bs.pcToBps[pc], bp)
-		if len(bps) == 0 {
-			// bp was the last breakpoint set at this PC, so delete the code breakpoint.
-			codeBreakpointsToDelete = append(codeBreakpointsToDelete, pc)
-			delete(bs.pcToBps, pc)
-		} else {
-			bs.pcToBps[pc] = bps
-		}
-	}
-	if len(codeBreakpointsToDelete) > 0 {
-		bs.prog.DeleteBreakpoints(codeBreakpointsToDelete)
-	}
-	delete(bs.bpToPCs, bp)
-}
-
-// remove updates rs by removing r, then returns rs.
-// The mutex in the BreakpointStore which contains rs should be held.
-func remove(rs []*cd.Breakpoint, r *cd.Breakpoint) []*cd.Breakpoint {
-	for i := range rs {
-		if rs[i] == r {
-			rs[i] = rs[len(rs)-1]
-			rs = rs[0 : len(rs)-1]
-			return rs
-		}
-	}
-	// We shouldn't reach here.
-	return rs
-}

+ 0 - 291
vendor/cloud.google.com/go/cmd/go-cloud-debug-agent/internal/controller/client.go

@@ -1,291 +0,0 @@
-// Copyright 2016 Google Inc. All Rights Reserved.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//      http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Package controller is a library for interacting with the Google Cloud Debugger's Debuglet Controller service.
-package controller
-
-import (
-	"crypto/sha256"
-	"encoding/json"
-	"errors"
-	"fmt"
-	"log"
-	"sync"
-
-	"golang.org/x/net/context"
-	"golang.org/x/oauth2"
-	cd "google.golang.org/api/clouddebugger/v2"
-	"google.golang.org/api/googleapi"
-	"google.golang.org/api/option"
-	htransport "google.golang.org/api/transport/http"
-)
-
-const (
-	// agentVersionString identifies the agent to the service.
-	agentVersionString = "google.com/go-gcp/v0.2"
-	// initWaitToken is the wait token sent in the first Update request to a server.
-	initWaitToken = "init"
-)
-
-var (
-	// ErrListUnchanged is returned by List if the server time limit is reached
-	// before the list of breakpoints changes.
-	ErrListUnchanged = errors.New("breakpoint list unchanged")
-	// ErrDebuggeeDisabled is returned by List or Update if the server has disabled
-	// this Debuggee.  The caller can retry later.
-	ErrDebuggeeDisabled = errors.New("debuglet disabled by server")
-)
-
-// Controller manages a connection to the Debuglet Controller service.
-type Controller struct {
-	s serviceInterface
-	// waitToken is sent with List requests so the server knows which set of
-	// breakpoints this client has already seen. Each successful List request
-	// returns a new waitToken to send in the next request.
-	waitToken string
-	// verbose determines whether to do some logging
-	verbose bool
-	// options, uniquifier and description are used in register.
-	options     Options
-	uniquifier  string
-	description string
-	// labels are included when registering the debuggee. They should contain
-	// the module name, version and minorversion, and are used by the debug UI
-	// to label the correct version active for debugging.
-	labels map[string]string
-	// mu protects debuggeeID
-	mu sync.Mutex
-	// debuggeeID is returned from the server on registration, and is passed back
-	// to the server in List and Update requests.
-	debuggeeID string
-}
-
-// Options controls how the Debuglet Controller client identifies itself to the server.
-// See https://cloud.google.com/storage/docs/projects and
-// https://cloud.google.com/tools/cloud-debugger/setting-up-on-compute-engine
-// for further documentation of these parameters.
-type Options struct {
-	ProjectNumber  string              // GCP Project Number.
-	ProjectID      string              // GCP Project ID.
-	AppModule      string              // Module name for the debugged program.
-	AppVersion     string              // Version number for this module.
-	SourceContexts []*cd.SourceContext // Description of source.
-	Verbose        bool
-	TokenSource    oauth2.TokenSource // Source of Credentials used for Stackdriver Debugger.
-}
-
-type serviceInterface interface {
-	Register(ctx context.Context, req *cd.RegisterDebuggeeRequest) (*cd.RegisterDebuggeeResponse, error)
-	Update(ctx context.Context, debuggeeID, breakpointID string, req *cd.UpdateActiveBreakpointRequest) (*cd.UpdateActiveBreakpointResponse, error)
-	List(ctx context.Context, debuggeeID, waitToken string) (*cd.ListActiveBreakpointsResponse, error)
-}
-
-var newService = func(ctx context.Context, tokenSource oauth2.TokenSource) (serviceInterface, error) {
-	httpClient, endpoint, err := htransport.NewClient(ctx, option.WithTokenSource(tokenSource))
-	if err != nil {
-		return nil, err
-	}
-	s, err := cd.New(httpClient)
-	if err != nil {
-		return nil, err
-	}
-	if endpoint != "" {
-		s.BasePath = endpoint
-	}
-	return &service{s: s}, nil
-}
-
-type service struct {
-	s *cd.Service
-}
-
-func (s service) Register(ctx context.Context, req *cd.RegisterDebuggeeRequest) (*cd.RegisterDebuggeeResponse, error) {
-	call := cd.NewControllerDebuggeesService(s.s).Register(req)
-	return call.Context(ctx).Do()
-}
-
-func (s service) Update(ctx context.Context, debuggeeID, breakpointID string, req *cd.UpdateActiveBreakpointRequest) (*cd.UpdateActiveBreakpointResponse, error) {
-	call := cd.NewControllerDebuggeesBreakpointsService(s.s).Update(debuggeeID, breakpointID, req)
-	return call.Context(ctx).Do()
-}
-
-func (s service) List(ctx context.Context, debuggeeID, waitToken string) (*cd.ListActiveBreakpointsResponse, error) {
-	call := cd.NewControllerDebuggeesBreakpointsService(s.s).List(debuggeeID)
-	call.WaitToken(waitToken)
-	return call.Context(ctx).Do()
-}
-
-// NewController connects to the Debuglet Controller server using the given options,
-// and returns a Controller for that connection.
-// Google Application Default Credentials are used to connect to the Debuglet Controller;
-// see https://developers.google.com/identity/protocols/application-default-credentials
-func NewController(ctx context.Context, o Options) (*Controller, error) {
-	// We build a JSON encoding of o.SourceContexts so we can hash it.
-	scJSON, err := json.Marshal(o.SourceContexts)
-	if err != nil {
-		scJSON = nil
-		o.SourceContexts = nil
-	}
-	const minorversion = "107157" // any arbitrary numeric string
-
-	// Compute a uniquifier string by hashing the project number, app module name,
-	// app module version, debuglet version, and source context.
-	// The choice of hash function is arbitrary.
-	h := sha256.Sum256([]byte(fmt.Sprintf("%d %s %d %s %d %s %d %s %d %s %d %s",
-		len(o.ProjectNumber), o.ProjectNumber,
-		len(o.AppModule), o.AppModule,
-		len(o.AppVersion), o.AppVersion,
-		len(agentVersionString), agentVersionString,
-		len(scJSON), scJSON,
-		len(minorversion), minorversion)))
-	uniquifier := fmt.Sprintf("%X", h[0:16]) // 32 hex characters
-
-	description := o.ProjectID
-	if o.AppModule != "" {
-		description += "-" + o.AppModule
-	}
-	if o.AppVersion != "" {
-		description += "-" + o.AppVersion
-	}
-
-	s, err := newService(ctx, o.TokenSource)
-	if err != nil {
-		return nil, err
-	}
-
-	// Construct client.
-	c := &Controller{
-		s:           s,
-		waitToken:   initWaitToken,
-		verbose:     o.Verbose,
-		options:     o,
-		uniquifier:  uniquifier,
-		description: description,
-		labels: map[string]string{
-			"module":       o.AppModule,
-			"version":      o.AppVersion,
-			"minorversion": minorversion,
-		},
-	}
-
-	return c, nil
-}
-
-func (c *Controller) getDebuggeeID(ctx context.Context) (string, error) {
-	c.mu.Lock()
-	defer c.mu.Unlock()
-	if c.debuggeeID != "" {
-		return c.debuggeeID, nil
-	}
-	// The debuglet hasn't been registered yet, or it is disabled and we should try registering again.
-	if err := c.register(ctx); err != nil {
-		return "", err
-	}
-	return c.debuggeeID, nil
-}
-
-// List retrieves the current list of breakpoints from the server.
-// If the set of breakpoints on the server is the same as the one returned in
-// the previous call to List, the server can delay responding until it changes,
-// and return an error instead if no change occurs before a time limit the
-// server sets.  List can't be called concurrently with itself.
-func (c *Controller) List(ctx context.Context) (*cd.ListActiveBreakpointsResponse, error) {
-	id, err := c.getDebuggeeID(ctx)
-	if err != nil {
-		return nil, err
-	}
-	resp, err := c.s.List(ctx, id, c.waitToken)
-	if err != nil {
-		if isAbortedError(err) {
-			return nil, ErrListUnchanged
-		}
-		// For other errors, the protocol requires that we attempt to re-register.
-		c.mu.Lock()
-		defer c.mu.Unlock()
-		if regError := c.register(ctx); regError != nil {
-			return nil, regError
-		}
-		return nil, err
-	}
-	if resp == nil {
-		return nil, errors.New("no response")
-	}
-	if c.verbose {
-		log.Printf("List response: %v", resp)
-	}
-	c.waitToken = resp.NextWaitToken
-	return resp, nil
-}
-
-// isAbortedError tests if err is a *googleapi.Error, that it contains one error
-// in Errors, and that that error's Reason is "aborted".
-func isAbortedError(err error) bool {
-	e, _ := err.(*googleapi.Error)
-	if e == nil {
-		return false
-	}
-	if len(e.Errors) != 1 {
-		return false
-	}
-	return e.Errors[0].Reason == "aborted"
-}
-
-// Update reports information to the server about a breakpoint that was hit.
-// Update can be called concurrently with List and Update.
-func (c *Controller) Update(ctx context.Context, breakpointID string, bp *cd.Breakpoint) error {
-	req := &cd.UpdateActiveBreakpointRequest{Breakpoint: bp}
-	if c.verbose {
-		log.Printf("sending update for %s: %v", breakpointID, req)
-	}
-	id, err := c.getDebuggeeID(ctx)
-	if err != nil {
-		return err
-	}
-	_, err = c.s.Update(ctx, id, breakpointID, req)
-	return err
-}
-
-// register calls the Debuglet Controller Register method, and sets c.debuggeeID.
-// c.mu should be locked while calling this function.  List and Update can't
-// make progress until it returns.
-func (c *Controller) register(ctx context.Context) error {
-	req := cd.RegisterDebuggeeRequest{
-		Debuggee: &cd.Debuggee{
-			AgentVersion:   agentVersionString,
-			Description:    c.description,
-			Project:        c.options.ProjectNumber,
-			SourceContexts: c.options.SourceContexts,
-			Uniquifier:     c.uniquifier,
-			Labels:         c.labels,
-		},
-	}
-	resp, err := c.s.Register(ctx, &req)
-	if err != nil {
-		return err
-	}
-	if resp == nil {
-		return errors.New("register: no response")
-	}
-	if resp.Debuggee.IsDisabled {
-		// Setting c.debuggeeID to empty makes sure future List and Update calls
-		// will call register first.
-		c.debuggeeID = ""
-	} else {
-		c.debuggeeID = resp.Debuggee.Id
-	}
-	if c.debuggeeID == "" {
-		return ErrDebuggeeDisabled
-	}
-	return nil
-}

+ 0 - 460
vendor/cloud.google.com/go/cmd/go-cloud-debug-agent/internal/valuecollector/valuecollector.go

@@ -1,460 +0,0 @@
-// Copyright 2016 Google Inc. All Rights Reserved.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//      http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Package valuecollector is used to collect the values of variables in a program.
-package valuecollector
-
-import (
-	"bytes"
-	"fmt"
-	"strconv"
-	"strings"
-
-	"golang.org/x/debug"
-	cd "google.golang.org/api/clouddebugger/v2"
-)
-
-const (
-	maxArrayLength = 50
-	maxMapLength   = 20
-)
-
-// Collector is given references to variables from a program being debugged
-// using AddVariable. Then when ReadValues is called, the Collector will fetch
-// the values of those variables. Any variables referred to by those values
-// will also be fetched; e.g. the targets of pointers, members of structs,
-// elements of slices, etc. This continues iteratively, building a graph of
-// values, until all the reachable values are fetched, or a size limit is
-// reached.
-//
-// Variables are passed to the Collector as debug.Var, which is used by x/debug
-// to represent references to variables. Values are returned as cd.Variable,
-// which is used by the Debuglet Controller to represent the graph of values.
-//
-// For example, if the program has a struct variable:
-//
-//	foo := SomeStruct{a:42, b:"xyz"}
-//
-// and we call AddVariable with a reference to foo, we will get back a result
-// like:
-//
-//	cd.Variable{Name:"foo", VarTableIndex:10}
-//
-// which denotes a variable named "foo" which will have its value stored in
-// element 10 of the table that will later be returned by ReadValues. That
-// element might be:
-//
-//	out[10] = &cd.Variable{Members:{{Name:"a", VarTableIndex:11},{Name:"b", VarTableIndex:12}}}
-//
-// which denotes a struct with two members a and b, whose values are in elements
-// 11 and 12 of the output table:
-//
-//	out[11] = &cd.Variable{Value:"42"}
-//	out[12] = &cd.Variable{Value:"xyz"}
-type Collector struct {
-	// prog is the program being debugged.
-	prog debug.Program
-	// limit is the maximum size of the output slice of values.
-	limit int
-	// index is a map from references (variables and map elements) to their
-	// locations in the table.
-	index map[reference]int
-	// table contains the references, including those given to the
-	// Collector directly and those the Collector itself found.
-	// If VarTableIndex is set to 0 in a cd.Variable, it is ignored, so the first entry
-	// of table can't be used. On initialization we put a dummy value there.
-	table []reference
-}
-
-// reference represents a value which is in the queue to be read by the
-// collector.  It is either a debug.Var, or a mapElement.
-type reference interface{}
-
-// mapElement represents an element of a map in the debugged program's memory.
-type mapElement struct {
-	debug.Map
-	index uint64
-}
-
-// NewCollector returns a Collector for the given program and size limit.
-// The limit is the maximum size of the slice of values returned by ReadValues.
-func NewCollector(prog debug.Program, limit int) *Collector {
-	return &Collector{
-		prog:  prog,
-		limit: limit,
-		index: make(map[reference]int),
-		table: []reference{debug.Var{}},
-	}
-}
-
-// AddVariable adds another variable to be collected.
-// The Collector doesn't get the value immediately; it returns a cd.Variable
-// that contains an index into the table which will later be returned by
-// ReadValues.
-func (c *Collector) AddVariable(lv debug.LocalVar) *cd.Variable {
-	ret := &cd.Variable{Name: lv.Name}
-	if index, ok := c.add(lv.Var); !ok {
-		// If the add call failed, it's because we reached the size limit.
-		// The Debuglet Controller's convention is to pass it a "Not Captured" error
-		// in this case.
-		ret.Status = statusMessage(messageNotCaptured, true, refersToVariableName)
-	} else {
-		ret.VarTableIndex = int64(index)
-	}
-	return ret
-}
-
-// add adds a reference to the set of values to be read from the
-// program. It returns the index in the output table that will contain the
-// corresponding value. It fails if the table has reached the size limit.
-// It deduplicates references, so the index may be the same as one that was
-// returned from an earlier add call.
-func (c *Collector) add(r reference) (outputIndex int, ok bool) {
-	if i, ok := c.index[r]; ok {
-		return i, true
-	}
-	i := len(c.table)
-	if i >= c.limit {
-		return 0, false
-	}
-	c.index[r] = i
-	c.table = append(c.table, r)
-	return i, true
-}
-
-func addMember(v *cd.Variable, name string) *cd.Variable {
-	v2 := &cd.Variable{Name: name}
-	v.Members = append(v.Members, v2)
-	return v2
-}
-
-// ReadValues fetches values of the variables that were passed to the Collector
-// with AddVariable. The values of any new variables found are also fetched,
-// e.g. the targets of pointers or the members of structs, until we reach the
-// size limit or we run out of values to fetch.
-// The results are output as a []*cd.Variable, which is the type we need to send
-// to the Debuglet Controller after we trigger a breakpoint.
-func (c *Collector) ReadValues() (out []*cd.Variable) {
-	for i := 0; i < len(c.table); i++ {
-		// Create a new cd.Variable for this value, and append it to the output.
-		dcv := new(cd.Variable)
-		out = append(out, dcv)
-		if i == 0 {
-			// The first element is unused.
-			continue
-		}
-		switch x := c.table[i].(type) {
-		case mapElement:
-			key, value, err := c.prog.MapElement(x.Map, x.index)
-			if err != nil {
-				dcv.Status = statusMessage(err.Error(), true, refersToVariableValue)
-				continue
-			}
-			// Add a member for the key.
-			member := addMember(dcv, "key")
-			if index, ok := c.add(key); !ok {
-				// The table is full.
-				member.Status = statusMessage(messageNotCaptured, true, refersToVariableName)
-				continue
-			} else {
-				member.VarTableIndex = int64(index)
-			}
-			// Add a member for the value.
-			member = addMember(dcv, "value")
-			if index, ok := c.add(value); !ok {
-				// The table is full.
-				member.Status = statusMessage(messageNotCaptured, true, refersToVariableName)
-			} else {
-				member.VarTableIndex = int64(index)
-			}
-		case debug.Var:
-			if v, err := c.prog.Value(x); err != nil {
-				dcv.Status = statusMessage(err.Error(), true, refersToVariableValue)
-			} else {
-				c.FillValue(v, dcv)
-			}
-		}
-	}
-	return out
-}
-
-// indexable is an interface for arrays, slices and channels.
-type indexable interface {
-	Len() uint64
-	Element(uint64) debug.Var
-}
-
-// channel implements indexable.
-type channel struct {
-	debug.Channel
-}
-
-func (c channel) Len() uint64 {
-	return c.Length
-}
-
-var (
-	_ indexable = debug.Array{}
-	_ indexable = debug.Slice{}
-	_ indexable = channel{}
-)
-
-// FillValue copies a value into a cd.Variable.  Any variables referred to by
-// that value, e.g. struct members and pointer targets, are added to the
-// collector's queue, to be fetched later by ReadValues.
-func (c *Collector) FillValue(v debug.Value, dcv *cd.Variable) {
-	if c, ok := v.(debug.Channel); ok {
-		// Convert to channel, which implements indexable.
-		v = channel{c}
-	}
-	// Fill in dcv in a manner depending on the type of the value we got.
-	switch val := v.(type) {
-	case int8, int16, int32, int64, bool, uint8, uint16, uint32, uint64, float32, float64, complex64, complex128:
-		// For simple types, we just print the value to dcv.Value.
-		dcv.Value = fmt.Sprint(val)
-	case string:
-		// Put double quotes around strings.
-		dcv.Value = strconv.Quote(val)
-	case debug.String:
-		if uint64(len(val.String)) < val.Length {
-			// This string value was truncated.
-			dcv.Value = strconv.Quote(val.String + "...")
-		} else {
-			dcv.Value = strconv.Quote(val.String)
-		}
-	case debug.Struct:
-		// For structs, we add an entry to dcv.Members for each field in the
-		// struct.
-		// Each member will contain the name of the field, and the index in the
-		// output table which will contain the value of that field.
-		for _, f := range val.Fields {
-			member := addMember(dcv, f.Name)
-			if index, ok := c.add(f.Var); !ok {
-				// The table is full.
-				member.Status = statusMessage(messageNotCaptured, true, refersToVariableName)
-			} else {
-				member.VarTableIndex = int64(index)
-			}
-		}
-	case debug.Map:
-		dcv.Value = fmt.Sprintf("len = %d", val.Length)
-		for i := uint64(0); i < val.Length; i++ {
-			field := addMember(dcv, `⚫`)
-			if i == maxMapLength {
-				field.Name = "..."
-				field.Status = statusMessage(messageTruncated, true, refersToVariableName)
-				break
-			}
-			if index, ok := c.add(mapElement{val, i}); !ok {
-				// The value table is full; add a member to contain the error message.
-				field.Name = "..."
-				field.Status = statusMessage(messageNotCaptured, true, refersToVariableName)
-				break
-			} else {
-				field.VarTableIndex = int64(index)
-			}
-		}
-	case debug.Pointer:
-		if val.Address == 0 {
-			dcv.Value = "<nil>"
-		} else if val.TypeID == 0 {
-			// We don't know the type of the pointer, so just output the address as
-			// the value.
-			dcv.Value = fmt.Sprintf("0x%X", val.Address)
-			dcv.Status = statusMessage(messageUnknownPointerType, false, refersToVariableName)
-		} else {
-			// Adds the pointed-to variable to the table, and links this value to
-			// that table entry through VarTableIndex.
-			dcv.Value = fmt.Sprintf("0x%X", val.Address)
-			target := addMember(dcv, "")
-			if index, ok := c.add(debug.Var(val)); !ok {
-				target.Status = statusMessage(messageNotCaptured, true, refersToVariableName)
-			} else {
-				target.VarTableIndex = int64(index)
-			}
-		}
-	case indexable:
-		// Arrays, slices and channels.
-		dcv.Value = "len = " + fmt.Sprint(val.Len())
-		for j := uint64(0); j < val.Len(); j++ {
-			field := addMember(dcv, fmt.Sprint(`[`, j, `]`))
-			if j == maxArrayLength {
-				field.Name = "..."
-				field.Status = statusMessage(messageTruncated, true, refersToVariableName)
-				break
-			}
-			vr := val.Element(j)
-			if index, ok := c.add(vr); !ok {
-				// The value table is full; add a member to contain the error message.
-				field.Name = "..."
-				field.Status = statusMessage(messageNotCaptured, true, refersToVariableName)
-				break
-			} else {
-				// Add a member with the index as the name.
-				field.VarTableIndex = int64(index)
-			}
-		}
-	default:
-		dcv.Status = statusMessage(messageUnknownType, false, refersToVariableName)
-	}
-}
-
-// statusMessage returns a *cd.StatusMessage with the given message, IsError
-// field and refersTo field.
-func statusMessage(msg string, isError bool, refersTo int) *cd.StatusMessage {
-	return &cd.StatusMessage{
-		Description: &cd.FormatMessage{Format: "$0", Parameters: []string{msg}},
-		IsError:     isError,
-		RefersTo:    refersToString[refersTo],
-	}
-}
-
-// LogString produces a string for a logpoint, substituting in variable values
-// using evaluatedExpressions and varTable.
-func LogString(s string, evaluatedExpressions []*cd.Variable, varTable []*cd.Variable) string {
-	var buf bytes.Buffer
-	fmt.Fprintf(&buf, "LOGPOINT: ")
-	seen := make(map[*cd.Variable]bool)
-	for i := 0; i < len(s); {
-		if s[i] == '$' {
-			i++
-			if num, n, ok := parseToken(s[i:], len(evaluatedExpressions)-1); ok {
-				// This token is one of $0, $1, etc.  Write the corresponding expression.
-				writeExpression(&buf, evaluatedExpressions[num], false, varTable, seen)
-				i += n
-			} else {
-				// Something else, like $$.
-				buf.WriteByte(s[i])
-				i++
-			}
-		} else {
-			buf.WriteByte(s[i])
-			i++
-		}
-	}
-	return buf.String()
-}
-
-func parseToken(s string, max int) (num int, bytesRead int, ok bool) {
-	var i int
-	for i < len(s) && s[i] >= '0' && s[i] <= '9' {
-		i++
-	}
-	num, err := strconv.Atoi(s[:i])
-	return num, i, err == nil && num <= max
-}
-
-// writeExpression recursively writes variables to buf, in a format suitable
-// for logging.  If printName is true, writes the name of the variable.
-func writeExpression(buf *bytes.Buffer, v *cd.Variable, printName bool, varTable []*cd.Variable, seen map[*cd.Variable]bool) {
-	if v == nil {
-		// Shouldn't happen.
-		return
-	}
-	name, value, status, members := v.Name, v.Value, v.Status, v.Members
-
-	// If v.VarTableIndex is not zero, it refers to an element of varTable.
-	// We merge its fields with the fields we got from v.
-	var other *cd.Variable
-	if idx := int(v.VarTableIndex); idx > 0 && idx < len(varTable) {
-		other = varTable[idx]
-	}
-	if other != nil {
-		if name == "" {
-			name = other.Name
-		}
-		if value == "" {
-			value = other.Value
-		}
-		if status == nil {
-			status = other.Status
-		}
-		if len(members) == 0 {
-			members = other.Members
-		}
-	}
-	if printName && name != "" {
-		buf.WriteString(name)
-		buf.WriteByte(':')
-	}
-
-	// If we have seen this value before, write "..." rather than repeating it.
-	if seen[v] {
-		buf.WriteString("...")
-		return
-	}
-	seen[v] = true
-	if other != nil {
-		if seen[other] {
-			buf.WriteString("...")
-			return
-		}
-		seen[other] = true
-	}
-
-	if value != "" && !strings.HasPrefix(value, "len = ") {
-		// A plain value.
-		buf.WriteString(value)
-	} else if status != nil && status.Description != nil {
-		// An error.
-		for _, p := range status.Description.Parameters {
-			buf.WriteByte('(')
-			buf.WriteString(p)
-			buf.WriteByte(')')
-		}
-	} else if name == `⚫` {
-		// A map element.
-		first := true
-		for _, member := range members {
-			if first {
-				first = false
-			} else {
-				buf.WriteByte(':')
-			}
-			writeExpression(buf, member, false, varTable, seen)
-		}
-	} else {
-		// A map, array, slice, channel, or struct.
-		isStruct := value == ""
-		first := true
-		buf.WriteByte('{')
-		for _, member := range members {
-			if first {
-				first = false
-			} else {
-				buf.WriteString(", ")
-			}
-			writeExpression(buf, member, isStruct, varTable, seen)
-		}
-		buf.WriteByte('}')
-	}
-}
-
-const (
-	// Error messages for cd.StatusMessage
-	messageNotCaptured        = "Not captured"
-	messageTruncated          = "Truncated"
-	messageUnknownPointerType = "Unknown pointer type"
-	messageUnknownType        = "Unknown type"
-	// RefersTo values for cd.StatusMessage.
-	refersToVariableName = iota
-	refersToVariableValue
-)
-
-// refersToString contains the strings for each refersTo value.
-// See the definition of StatusMessage in the v2/clouddebugger package.
-var refersToString = map[int]string{
-	refersToVariableName:  "VARIABLE_NAME",
-	refersToVariableValue: "VARIABLE_VALUE",
-}

+ 0 - 674
vendor/cloud.google.com/go/container/apiv1/cluster_manager_client.go

@@ -1,674 +0,0 @@
-// Copyright 2017, Google LLC All rights reserved.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//     http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// AUTO-GENERATED CODE. DO NOT EDIT.
-
-package container
-
-import (
-	"time"
-
-	"cloud.google.com/go/internal/version"
-	gax "github.com/googleapis/gax-go"
-	"golang.org/x/net/context"
-	"google.golang.org/api/option"
-	"google.golang.org/api/transport"
-	containerpb "google.golang.org/genproto/googleapis/container/v1"
-	"google.golang.org/grpc"
-	"google.golang.org/grpc/codes"
-	"google.golang.org/grpc/metadata"
-)
-
-// ClusterManagerCallOptions contains the retry settings for each method of ClusterManagerClient.
-type ClusterManagerCallOptions struct {
-	ListClusters            []gax.CallOption
-	GetCluster              []gax.CallOption
-	CreateCluster           []gax.CallOption
-	UpdateCluster           []gax.CallOption
-	UpdateNodePool          []gax.CallOption
-	SetNodePoolAutoscaling  []gax.CallOption
-	SetLoggingService       []gax.CallOption
-	SetMonitoringService    []gax.CallOption
-	SetAddonsConfig         []gax.CallOption
-	SetLocations            []gax.CallOption
-	UpdateMaster            []gax.CallOption
-	SetMasterAuth           []gax.CallOption
-	DeleteCluster           []gax.CallOption
-	ListOperations          []gax.CallOption
-	GetOperation            []gax.CallOption
-	CancelOperation         []gax.CallOption
-	GetServerConfig         []gax.CallOption
-	ListNodePools           []gax.CallOption
-	GetNodePool             []gax.CallOption
-	CreateNodePool          []gax.CallOption
-	DeleteNodePool          []gax.CallOption
-	RollbackNodePoolUpgrade []gax.CallOption
-	SetNodePoolManagement   []gax.CallOption
-	SetLabels               []gax.CallOption
-	SetLegacyAbac           []gax.CallOption
-	StartIPRotation         []gax.CallOption
-	CompleteIPRotation      []gax.CallOption
-	SetNodePoolSize         []gax.CallOption
-	SetNetworkPolicy        []gax.CallOption
-	SetMaintenancePolicy    []gax.CallOption
-}
-
-func defaultClusterManagerClientOptions() []option.ClientOption {
-	return []option.ClientOption{
-		option.WithEndpoint("container.googleapis.com:443"),
-		option.WithScopes(DefaultAuthScopes()...),
-	}
-}
-
-func defaultClusterManagerCallOptions() *ClusterManagerCallOptions {
-	retry := map[[2]string][]gax.CallOption{
-		{"default", "idempotent"}: {
-			gax.WithRetry(func() gax.Retryer {
-				return gax.OnCodes([]codes.Code{
-					codes.DeadlineExceeded,
-					codes.Unavailable,
-				}, gax.Backoff{
-					Initial:    100 * time.Millisecond,
-					Max:        60000 * time.Millisecond,
-					Multiplier: 1.3,
-				})
-			}),
-		},
-	}
-	return &ClusterManagerCallOptions{
-		ListClusters:            retry[[2]string{"default", "idempotent"}],
-		GetCluster:              retry[[2]string{"default", "idempotent"}],
-		CreateCluster:           retry[[2]string{"default", "non_idempotent"}],
-		UpdateCluster:           retry[[2]string{"default", "non_idempotent"}],
-		UpdateNodePool:          retry[[2]string{"default", "non_idempotent"}],
-		SetNodePoolAutoscaling:  retry[[2]string{"default", "non_idempotent"}],
-		SetLoggingService:       retry[[2]string{"default", "non_idempotent"}],
-		SetMonitoringService:    retry[[2]string{"default", "non_idempotent"}],
-		SetAddonsConfig:         retry[[2]string{"default", "non_idempotent"}],
-		SetLocations:            retry[[2]string{"default", "non_idempotent"}],
-		UpdateMaster:            retry[[2]string{"default", "non_idempotent"}],
-		SetMasterAuth:           retry[[2]string{"default", "non_idempotent"}],
-		DeleteCluster:           retry[[2]string{"default", "idempotent"}],
-		ListOperations:          retry[[2]string{"default", "idempotent"}],
-		GetOperation:            retry[[2]string{"default", "idempotent"}],
-		CancelOperation:         retry[[2]string{"default", "non_idempotent"}],
-		GetServerConfig:         retry[[2]string{"default", "idempotent"}],
-		ListNodePools:           retry[[2]string{"default", "idempotent"}],
-		GetNodePool:             retry[[2]string{"default", "idempotent"}],
-		CreateNodePool:          retry[[2]string{"default", "non_idempotent"}],
-		DeleteNodePool:          retry[[2]string{"default", "idempotent"}],
-		RollbackNodePoolUpgrade: retry[[2]string{"default", "non_idempotent"}],
-		SetNodePoolManagement:   retry[[2]string{"default", "non_idempotent"}],
-		SetLabels:               retry[[2]string{"default", "non_idempotent"}],
-		SetLegacyAbac:           retry[[2]string{"default", "non_idempotent"}],
-		StartIPRotation:         retry[[2]string{"default", "non_idempotent"}],
-		CompleteIPRotation:      retry[[2]string{"default", "non_idempotent"}],
-		SetNodePoolSize:         retry[[2]string{"default", "non_idempotent"}],
-		SetNetworkPolicy:        retry[[2]string{"default", "non_idempotent"}],
-		SetMaintenancePolicy:    retry[[2]string{"default", "non_idempotent"}],
-	}
-}
-
-// ClusterManagerClient is a client for interacting with Google Container Engine API.
-type ClusterManagerClient struct {
-	// The connection to the service.
-	conn *grpc.ClientConn
-
-	// The gRPC API client.
-	clusterManagerClient containerpb.ClusterManagerClient
-
-	// The call options for this service.
-	CallOptions *ClusterManagerCallOptions
-
-	// The x-goog-* metadata to be sent with each request.
-	xGoogMetadata metadata.MD
-}
-
-// NewClusterManagerClient creates a new cluster manager client.
-//
-// Google Container Engine Cluster Manager v1
-func NewClusterManagerClient(ctx context.Context, opts ...option.ClientOption) (*ClusterManagerClient, error) {
-	conn, err := transport.DialGRPC(ctx, append(defaultClusterManagerClientOptions(), opts...)...)
-	if err != nil {
-		return nil, err
-	}
-	c := &ClusterManagerClient{
-		conn:        conn,
-		CallOptions: defaultClusterManagerCallOptions(),
-
-		clusterManagerClient: containerpb.NewClusterManagerClient(conn),
-	}
-	c.setGoogleClientInfo()
-	return c, nil
-}
-
-// Connection returns the client's connection to the API service.
-func (c *ClusterManagerClient) Connection() *grpc.ClientConn {
-	return c.conn
-}
-
-// Close closes the connection to the API service. The user should invoke this when
-// the client is no longer required.
-func (c *ClusterManagerClient) Close() error {
-	return c.conn.Close()
-}
-
-// setGoogleClientInfo sets the name and version of the application in
-// the `x-goog-api-client` header passed on each request. Intended for
-// use by Google-written clients.
-func (c *ClusterManagerClient) setGoogleClientInfo(keyval ...string) {
-	kv := append([]string{"gl-go", version.Go()}, keyval...)
-	kv = append(kv, "gapic", version.Repo, "gax", gax.Version, "grpc", grpc.Version)
-	c.xGoogMetadata = metadata.Pairs("x-goog-api-client", gax.XGoogHeader(kv...))
-}
-
-// ListClusters lists all clusters owned by a project in either the specified zone or all
-// zones.
-func (c *ClusterManagerClient) ListClusters(ctx context.Context, req *containerpb.ListClustersRequest, opts ...gax.CallOption) (*containerpb.ListClustersResponse, error) {
-	ctx = insertMetadata(ctx, c.xGoogMetadata)
-	opts = append(c.CallOptions.ListClusters[0:len(c.CallOptions.ListClusters):len(c.CallOptions.ListClusters)], opts...)
-	var resp *containerpb.ListClustersResponse
-	err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
-		var err error
-		resp, err = c.clusterManagerClient.ListClusters(ctx, req, settings.GRPC...)
-		return err
-	}, opts...)
-	if err != nil {
-		return nil, err
-	}
-	return resp, nil
-}
-
-// GetCluster gets the details of a specific cluster.
-func (c *ClusterManagerClient) GetCluster(ctx context.Context, req *containerpb.GetClusterRequest, opts ...gax.CallOption) (*containerpb.Cluster, error) {
-	ctx = insertMetadata(ctx, c.xGoogMetadata)
-	opts = append(c.CallOptions.GetCluster[0:len(c.CallOptions.GetCluster):len(c.CallOptions.GetCluster)], opts...)
-	var resp *containerpb.Cluster
-	err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
-		var err error
-		resp, err = c.clusterManagerClient.GetCluster(ctx, req, settings.GRPC...)
-		return err
-	}, opts...)
-	if err != nil {
-		return nil, err
-	}
-	return resp, nil
-}
-
-// CreateCluster creates a cluster, consisting of the specified number and type of Google
-// Compute Engine instances.
-//
-// By default, the cluster is created in the project's
-// default network (at /compute/docs/networks-and-firewalls#networks).
-//
-// One firewall is added for the cluster. After cluster creation,
-// the cluster creates routes for each node to allow the containers
-// on that node to communicate with all other instances in the
-// cluster.
-//
-// Finally, an entry is added to the project's global metadata indicating
-// which CIDR range is being used by the cluster.
-func (c *ClusterManagerClient) CreateCluster(ctx context.Context, req *containerpb.CreateClusterRequest, opts ...gax.CallOption) (*containerpb.Operation, error) {
-	ctx = insertMetadata(ctx, c.xGoogMetadata)
-	opts = append(c.CallOptions.CreateCluster[0:len(c.CallOptions.CreateCluster):len(c.CallOptions.CreateCluster)], opts...)
-	var resp *containerpb.Operation
-	err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
-		var err error
-		resp, err = c.clusterManagerClient.CreateCluster(ctx, req, settings.GRPC...)
-		return err
-	}, opts...)
-	if err != nil {
-		return nil, err
-	}
-	return resp, nil
-}
-
-// UpdateCluster updates the settings of a specific cluster.
-func (c *ClusterManagerClient) UpdateCluster(ctx context.Context, req *containerpb.UpdateClusterRequest, opts ...gax.CallOption) (*containerpb.Operation, error) {
-	ctx = insertMetadata(ctx, c.xGoogMetadata)
-	opts = append(c.CallOptions.UpdateCluster[0:len(c.CallOptions.UpdateCluster):len(c.CallOptions.UpdateCluster)], opts...)
-	var resp *containerpb.Operation
-	err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
-		var err error
-		resp, err = c.clusterManagerClient.UpdateCluster(ctx, req, settings.GRPC...)
-		return err
-	}, opts...)
-	if err != nil {
-		return nil, err
-	}
-	return resp, nil
-}
-
-// UpdateNodePool updates the version and/or image type of a specific node pool.
-func (c *ClusterManagerClient) UpdateNodePool(ctx context.Context, req *containerpb.UpdateNodePoolRequest, opts ...gax.CallOption) (*containerpb.Operation, error) {
-	ctx = insertMetadata(ctx, c.xGoogMetadata)
-	opts = append(c.CallOptions.UpdateNodePool[0:len(c.CallOptions.UpdateNodePool):len(c.CallOptions.UpdateNodePool)], opts...)
-	var resp *containerpb.Operation
-	err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
-		var err error
-		resp, err = c.clusterManagerClient.UpdateNodePool(ctx, req, settings.GRPC...)
-		return err
-	}, opts...)
-	if err != nil {
-		return nil, err
-	}
-	return resp, nil
-}
-
-// SetNodePoolAutoscaling sets the autoscaling settings of a specific node pool.
-func (c *ClusterManagerClient) SetNodePoolAutoscaling(ctx context.Context, req *containerpb.SetNodePoolAutoscalingRequest, opts ...gax.CallOption) (*containerpb.Operation, error) {
-	ctx = insertMetadata(ctx, c.xGoogMetadata)
-	opts = append(c.CallOptions.SetNodePoolAutoscaling[0:len(c.CallOptions.SetNodePoolAutoscaling):len(c.CallOptions.SetNodePoolAutoscaling)], opts...)
-	var resp *containerpb.Operation
-	err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
-		var err error
-		resp, err = c.clusterManagerClient.SetNodePoolAutoscaling(ctx, req, settings.GRPC...)
-		return err
-	}, opts...)
-	if err != nil {
-		return nil, err
-	}
-	return resp, nil
-}
-
-// SetLoggingService sets the logging service of a specific cluster.
-func (c *ClusterManagerClient) SetLoggingService(ctx context.Context, req *containerpb.SetLoggingServiceRequest, opts ...gax.CallOption) (*containerpb.Operation, error) {
-	ctx = insertMetadata(ctx, c.xGoogMetadata)
-	opts = append(c.CallOptions.SetLoggingService[0:len(c.CallOptions.SetLoggingService):len(c.CallOptions.SetLoggingService)], opts...)
-	var resp *containerpb.Operation
-	err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
-		var err error
-		resp, err = c.clusterManagerClient.SetLoggingService(ctx, req, settings.GRPC...)
-		return err
-	}, opts...)
-	if err != nil {
-		return nil, err
-	}
-	return resp, nil
-}
-
-// SetMonitoringService sets the monitoring service of a specific cluster.
-func (c *ClusterManagerClient) SetMonitoringService(ctx context.Context, req *containerpb.SetMonitoringServiceRequest, opts ...gax.CallOption) (*containerpb.Operation, error) {
-	ctx = insertMetadata(ctx, c.xGoogMetadata)
-	opts = append(c.CallOptions.SetMonitoringService[0:len(c.CallOptions.SetMonitoringService):len(c.CallOptions.SetMonitoringService)], opts...)
-	var resp *containerpb.Operation
-	err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
-		var err error
-		resp, err = c.clusterManagerClient.SetMonitoringService(ctx, req, settings.GRPC...)
-		return err
-	}, opts...)
-	if err != nil {
-		return nil, err
-	}
-	return resp, nil
-}
-
-// SetAddonsConfig sets the addons of a specific cluster.
-func (c *ClusterManagerClient) SetAddonsConfig(ctx context.Context, req *containerpb.SetAddonsConfigRequest, opts ...gax.CallOption) (*containerpb.Operation, error) {
-	ctx = insertMetadata(ctx, c.xGoogMetadata)
-	opts = append(c.CallOptions.SetAddonsConfig[0:len(c.CallOptions.SetAddonsConfig):len(c.CallOptions.SetAddonsConfig)], opts...)
-	var resp *containerpb.Operation
-	err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
-		var err error
-		resp, err = c.clusterManagerClient.SetAddonsConfig(ctx, req, settings.GRPC...)
-		return err
-	}, opts...)
-	if err != nil {
-		return nil, err
-	}
-	return resp, nil
-}
-
-// SetLocations sets the locations of a specific cluster.
-func (c *ClusterManagerClient) SetLocations(ctx context.Context, req *containerpb.SetLocationsRequest, opts ...gax.CallOption) (*containerpb.Operation, error) {
-	ctx = insertMetadata(ctx, c.xGoogMetadata)
-	opts = append(c.CallOptions.SetLocations[0:len(c.CallOptions.SetLocations):len(c.CallOptions.SetLocations)], opts...)
-	var resp *containerpb.Operation
-	err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
-		var err error
-		resp, err = c.clusterManagerClient.SetLocations(ctx, req, settings.GRPC...)
-		return err
-	}, opts...)
-	if err != nil {
-		return nil, err
-	}
-	return resp, nil
-}
-
-// UpdateMaster updates the master of a specific cluster.
-func (c *ClusterManagerClient) UpdateMaster(ctx context.Context, req *containerpb.UpdateMasterRequest, opts ...gax.CallOption) (*containerpb.Operation, error) {
-	ctx = insertMetadata(ctx, c.xGoogMetadata)
-	opts = append(c.CallOptions.UpdateMaster[0:len(c.CallOptions.UpdateMaster):len(c.CallOptions.UpdateMaster)], opts...)
-	var resp *containerpb.Operation
-	err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
-		var err error
-		resp, err = c.clusterManagerClient.UpdateMaster(ctx, req, settings.GRPC...)
-		return err
-	}, opts...)
-	if err != nil {
-		return nil, err
-	}
-	return resp, nil
-}
-
-// SetMasterAuth used to set master auth materials. Currently supports :-
-// Changing the admin password of a specific cluster.
-// This can be either via password generation or explicitly set the password.
-func (c *ClusterManagerClient) SetMasterAuth(ctx context.Context, req *containerpb.SetMasterAuthRequest, opts ...gax.CallOption) (*containerpb.Operation, error) {
-	ctx = insertMetadata(ctx, c.xGoogMetadata)
-	opts = append(c.CallOptions.SetMasterAuth[0:len(c.CallOptions.SetMasterAuth):len(c.CallOptions.SetMasterAuth)], opts...)
-	var resp *containerpb.Operation
-	err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
-		var err error
-		resp, err = c.clusterManagerClient.SetMasterAuth(ctx, req, settings.GRPC...)
-		return err
-	}, opts...)
-	if err != nil {
-		return nil, err
-	}
-	return resp, nil
-}
-
-// DeleteCluster deletes the cluster, including the Kubernetes endpoint and all worker
-// nodes.
-//
-// Firewalls and routes that were configured during cluster creation
-// are also deleted.
-//
-// Other Google Compute Engine resources that might be in use by the cluster
-// (e.g. load balancer resources) will not be deleted if they weren't present
-// at the initial create time.
-func (c *ClusterManagerClient) DeleteCluster(ctx context.Context, req *containerpb.DeleteClusterRequest, opts ...gax.CallOption) (*containerpb.Operation, error) {
-	ctx = insertMetadata(ctx, c.xGoogMetadata)
-	opts = append(c.CallOptions.DeleteCluster[0:len(c.CallOptions.DeleteCluster):len(c.CallOptions.DeleteCluster)], opts...)
-	var resp *containerpb.Operation
-	err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
-		var err error
-		resp, err = c.clusterManagerClient.DeleteCluster(ctx, req, settings.GRPC...)
-		return err
-	}, opts...)
-	if err != nil {
-		return nil, err
-	}
-	return resp, nil
-}
-
-// ListOperations lists all operations in a project in a specific zone or all zones.
-func (c *ClusterManagerClient) ListOperations(ctx context.Context, req *containerpb.ListOperationsRequest, opts ...gax.CallOption) (*containerpb.ListOperationsResponse, error) {
-	ctx = insertMetadata(ctx, c.xGoogMetadata)
-	opts = append(c.CallOptions.ListOperations[0:len(c.CallOptions.ListOperations):len(c.CallOptions.ListOperations)], opts...)
-	var resp *containerpb.ListOperationsResponse
-	err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
-		var err error
-		resp, err = c.clusterManagerClient.ListOperations(ctx, req, settings.GRPC...)
-		return err
-	}, opts...)
-	if err != nil {
-		return nil, err
-	}
-	return resp, nil
-}
-
-// GetOperation gets the specified operation.
-func (c *ClusterManagerClient) GetOperation(ctx context.Context, req *containerpb.GetOperationRequest, opts ...gax.CallOption) (*containerpb.Operation, error) {
-	ctx = insertMetadata(ctx, c.xGoogMetadata)
-	opts = append(c.CallOptions.GetOperation[0:len(c.CallOptions.GetOperation):len(c.CallOptions.GetOperation)], opts...)
-	var resp *containerpb.Operation
-	err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
-		var err error
-		resp, err = c.clusterManagerClient.GetOperation(ctx, req, settings.GRPC...)
-		return err
-	}, opts...)
-	if err != nil {
-		return nil, err
-	}
-	return resp, nil
-}
-
-// CancelOperation cancels the specified operation.
-func (c *ClusterManagerClient) CancelOperation(ctx context.Context, req *containerpb.CancelOperationRequest, opts ...gax.CallOption) error {
-	ctx = insertMetadata(ctx, c.xGoogMetadata)
-	opts = append(c.CallOptions.CancelOperation[0:len(c.CallOptions.CancelOperation):len(c.CallOptions.CancelOperation)], opts...)
-	err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
-		var err error
-		_, err = c.clusterManagerClient.CancelOperation(ctx, req, settings.GRPC...)
-		return err
-	}, opts...)
-	return err
-}
-
-// GetServerConfig returns configuration info about the Container Engine service.
-func (c *ClusterManagerClient) GetServerConfig(ctx context.Context, req *containerpb.GetServerConfigRequest, opts ...gax.CallOption) (*containerpb.ServerConfig, error) {
-	ctx = insertMetadata(ctx, c.xGoogMetadata)
-	opts = append(c.CallOptions.GetServerConfig[0:len(c.CallOptions.GetServerConfig):len(c.CallOptions.GetServerConfig)], opts...)
-	var resp *containerpb.ServerConfig
-	err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
-		var err error
-		resp, err = c.clusterManagerClient.GetServerConfig(ctx, req, settings.GRPC...)
-		return err
-	}, opts...)
-	if err != nil {
-		return nil, err
-	}
-	return resp, nil
-}
-
-// ListNodePools lists the node pools for a cluster.
-func (c *ClusterManagerClient) ListNodePools(ctx context.Context, req *containerpb.ListNodePoolsRequest, opts ...gax.CallOption) (*containerpb.ListNodePoolsResponse, error) {
-	ctx = insertMetadata(ctx, c.xGoogMetadata)
-	opts = append(c.CallOptions.ListNodePools[0:len(c.CallOptions.ListNodePools):len(c.CallOptions.ListNodePools)], opts...)
-	var resp *containerpb.ListNodePoolsResponse
-	err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
-		var err error
-		resp, err = c.clusterManagerClient.ListNodePools(ctx, req, settings.GRPC...)
-		return err
-	}, opts...)
-	if err != nil {
-		return nil, err
-	}
-	return resp, nil
-}
-
-// GetNodePool retrieves the node pool requested.
-func (c *ClusterManagerClient) GetNodePool(ctx context.Context, req *containerpb.GetNodePoolRequest, opts ...gax.CallOption) (*containerpb.NodePool, error) {
-	ctx = insertMetadata(ctx, c.xGoogMetadata)
-	opts = append(c.CallOptions.GetNodePool[0:len(c.CallOptions.GetNodePool):len(c.CallOptions.GetNodePool)], opts...)
-	var resp *containerpb.NodePool
-	err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
-		var err error
-		resp, err = c.clusterManagerClient.GetNodePool(ctx, req, settings.GRPC...)
-		return err
-	}, opts...)
-	if err != nil {
-		return nil, err
-	}
-	return resp, nil
-}
-
-// CreateNodePool creates a node pool for a cluster.
-func (c *ClusterManagerClient) CreateNodePool(ctx context.Context, req *containerpb.CreateNodePoolRequest, opts ...gax.CallOption) (*containerpb.Operation, error) {
-	ctx = insertMetadata(ctx, c.xGoogMetadata)
-	opts = append(c.CallOptions.CreateNodePool[0:len(c.CallOptions.CreateNodePool):len(c.CallOptions.CreateNodePool)], opts...)
-	var resp *containerpb.Operation
-	err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
-		var err error
-		resp, err = c.clusterManagerClient.CreateNodePool(ctx, req, settings.GRPC...)
-		return err
-	}, opts...)
-	if err != nil {
-		return nil, err
-	}
-	return resp, nil
-}
-
-// DeleteNodePool deletes a node pool from a cluster.
-func (c *ClusterManagerClient) DeleteNodePool(ctx context.Context, req *containerpb.DeleteNodePoolRequest, opts ...gax.CallOption) (*containerpb.Operation, error) {
-	ctx = insertMetadata(ctx, c.xGoogMetadata)
-	opts = append(c.CallOptions.DeleteNodePool[0:len(c.CallOptions.DeleteNodePool):len(c.CallOptions.DeleteNodePool)], opts...)
-	var resp *containerpb.Operation
-	err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
-		var err error
-		resp, err = c.clusterManagerClient.DeleteNodePool(ctx, req, settings.GRPC...)
-		return err
-	}, opts...)
-	if err != nil {
-		return nil, err
-	}
-	return resp, nil
-}
-
-// RollbackNodePoolUpgrade roll back the previously Aborted or Failed NodePool upgrade.
-// This will be an no-op if the last upgrade successfully completed.
-func (c *ClusterManagerClient) RollbackNodePoolUpgrade(ctx context.Context, req *containerpb.RollbackNodePoolUpgradeRequest, opts ...gax.CallOption) (*containerpb.Operation, error) {
-	ctx = insertMetadata(ctx, c.xGoogMetadata)
-	opts = append(c.CallOptions.RollbackNodePoolUpgrade[0:len(c.CallOptions.RollbackNodePoolUpgrade):len(c.CallOptions.RollbackNodePoolUpgrade)], opts...)
-	var resp *containerpb.Operation
-	err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
-		var err error
-		resp, err = c.clusterManagerClient.RollbackNodePoolUpgrade(ctx, req, settings.GRPC...)
-		return err
-	}, opts...)
-	if err != nil {
-		return nil, err
-	}
-	return resp, nil
-}
-
-// SetNodePoolManagement sets the NodeManagement options for a node pool.
-func (c *ClusterManagerClient) SetNodePoolManagement(ctx context.Context, req *containerpb.SetNodePoolManagementRequest, opts ...gax.CallOption) (*containerpb.Operation, error) {
-	ctx = insertMetadata(ctx, c.xGoogMetadata)
-	opts = append(c.CallOptions.SetNodePoolManagement[0:len(c.CallOptions.SetNodePoolManagement):len(c.CallOptions.SetNodePoolManagement)], opts...)
-	var resp *containerpb.Operation
-	err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
-		var err error
-		resp, err = c.clusterManagerClient.SetNodePoolManagement(ctx, req, settings.GRPC...)
-		return err
-	}, opts...)
-	if err != nil {
-		return nil, err
-	}
-	return resp, nil
-}
-
-// SetLabels sets labels on a cluster.
-func (c *ClusterManagerClient) SetLabels(ctx context.Context, req *containerpb.SetLabelsRequest, opts ...gax.CallOption) (*containerpb.Operation, error) {
-	ctx = insertMetadata(ctx, c.xGoogMetadata)
-	opts = append(c.CallOptions.SetLabels[0:len(c.CallOptions.SetLabels):len(c.CallOptions.SetLabels)], opts...)
-	var resp *containerpb.Operation
-	err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
-		var err error
-		resp, err = c.clusterManagerClient.SetLabels(ctx, req, settings.GRPC...)
-		return err
-	}, opts...)
-	if err != nil {
-		return nil, err
-	}
-	return resp, nil
-}
-
-// SetLegacyAbac enables or disables the ABAC authorization mechanism on a cluster.
-func (c *ClusterManagerClient) SetLegacyAbac(ctx context.Context, req *containerpb.SetLegacyAbacRequest, opts ...gax.CallOption) (*containerpb.Operation, error) {
-	ctx = insertMetadata(ctx, c.xGoogMetadata)
-	opts = append(c.CallOptions.SetLegacyAbac[0:len(c.CallOptions.SetLegacyAbac):len(c.CallOptions.SetLegacyAbac)], opts...)
-	var resp *containerpb.Operation
-	err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
-		var err error
-		resp, err = c.clusterManagerClient.SetLegacyAbac(ctx, req, settings.GRPC...)
-		return err
-	}, opts...)
-	if err != nil {
-		return nil, err
-	}
-	return resp, nil
-}
-
-// StartIPRotation start master IP rotation.
-func (c *ClusterManagerClient) StartIPRotation(ctx context.Context, req *containerpb.StartIPRotationRequest, opts ...gax.CallOption) (*containerpb.Operation, error) {
-	ctx = insertMetadata(ctx, c.xGoogMetadata)
-	opts = append(c.CallOptions.StartIPRotation[0:len(c.CallOptions.StartIPRotation):len(c.CallOptions.StartIPRotation)], opts...)
-	var resp *containerpb.Operation
-	err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
-		var err error
-		resp, err = c.clusterManagerClient.StartIPRotation(ctx, req, settings.GRPC...)
-		return err
-	}, opts...)
-	if err != nil {
-		return nil, err
-	}
-	return resp, nil
-}
-
-// CompleteIPRotation completes master IP rotation.
-func (c *ClusterManagerClient) CompleteIPRotation(ctx context.Context, req *containerpb.CompleteIPRotationRequest, opts ...gax.CallOption) (*containerpb.Operation, error) {
-	ctx = insertMetadata(ctx, c.xGoogMetadata)
-	opts = append(c.CallOptions.CompleteIPRotation[0:len(c.CallOptions.CompleteIPRotation):len(c.CallOptions.CompleteIPRotation)], opts...)
-	var resp *containerpb.Operation
-	err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
-		var err error
-		resp, err = c.clusterManagerClient.CompleteIPRotation(ctx, req, settings.GRPC...)
-		return err
-	}, opts...)
-	if err != nil {
-		return nil, err
-	}
-	return resp, nil
-}
-
-// SetNodePoolSize sets the size of a specific node pool.
-func (c *ClusterManagerClient) SetNodePoolSize(ctx context.Context, req *containerpb.SetNodePoolSizeRequest, opts ...gax.CallOption) (*containerpb.Operation, error) {
-	ctx = insertMetadata(ctx, c.xGoogMetadata)
-	opts = append(c.CallOptions.SetNodePoolSize[0:len(c.CallOptions.SetNodePoolSize):len(c.CallOptions.SetNodePoolSize)], opts...)
-	var resp *containerpb.Operation
-	err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
-		var err error
-		resp, err = c.clusterManagerClient.SetNodePoolSize(ctx, req, settings.GRPC...)
-		return err
-	}, opts...)
-	if err != nil {
-		return nil, err
-	}
-	return resp, nil
-}
-
-// SetNetworkPolicy enables/Disables Network Policy for a cluster.
-func (c *ClusterManagerClient) SetNetworkPolicy(ctx context.Context, req *containerpb.SetNetworkPolicyRequest, opts ...gax.CallOption) (*containerpb.Operation, error) {
-	ctx = insertMetadata(ctx, c.xGoogMetadata)
-	opts = append(c.CallOptions.SetNetworkPolicy[0:len(c.CallOptions.SetNetworkPolicy):len(c.CallOptions.SetNetworkPolicy)], opts...)
-	var resp *containerpb.Operation
-	err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
-		var err error
-		resp, err = c.clusterManagerClient.SetNetworkPolicy(ctx, req, settings.GRPC...)
-		return err
-	}, opts...)
-	if err != nil {
-		return nil, err
-	}
-	return resp, nil
-}
-
-// SetMaintenancePolicy sets the maintenance policy for a cluster.
-func (c *ClusterManagerClient) SetMaintenancePolicy(ctx context.Context, req *containerpb.SetMaintenancePolicyRequest, opts ...gax.CallOption) (*containerpb.Operation, error) {
-	ctx = insertMetadata(ctx, c.xGoogMetadata)
-	opts = append(c.CallOptions.SetMaintenancePolicy[0:len(c.CallOptions.SetMaintenancePolicy):len(c.CallOptions.SetMaintenancePolicy)], opts...)
-	var resp *containerpb.Operation
-	err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
-		var err error
-		resp, err = c.clusterManagerClient.SetMaintenancePolicy(ctx, req, settings.GRPC...)
-		return err
-	}, opts...)
-	if err != nil {
-		return nil, err
-	}
-	return resp, nil
-}

+ 0 - 48
vendor/cloud.google.com/go/container/apiv1/doc.go

@@ -1,48 +0,0 @@
-// Copyright 2017, Google LLC All rights reserved.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//     http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// AUTO-GENERATED CODE. DO NOT EDIT.
-
-// Package container is an auto-generated package for the
-// Google Container Engine API.
-//
-//   NOTE: This package is in alpha. It is not stable, and is likely to change.
-//
-// The Google Kubernetes Engine API is used for building and managing
-// container
-// based applications, powered by the open source Kubernetes technology.
-package container // import "cloud.google.com/go/container/apiv1"
-
-import (
-	"golang.org/x/net/context"
-	"google.golang.org/grpc/metadata"
-)
-
-func insertMetadata(ctx context.Context, mds ...metadata.MD) context.Context {
-	out, _ := metadata.FromOutgoingContext(ctx)
-	out = out.Copy()
-	for _, md := range mds {
-		for k, v := range md {
-			out[k] = append(out[k], v...)
-		}
-	}
-	return metadata.NewOutgoingContext(ctx, out)
-}
-
-// DefaultAuthScopes reports the default set of authentication scopes to use with this package.
-func DefaultAuthScopes() []string {
-	return []string{
-		"https://www.googleapis.com/auth/cloud-platform",
-	}
-}

+ 0 - 272
vendor/cloud.google.com/go/container/container.go

@@ -1,272 +0,0 @@
-// Copyright 2014 Google Inc. All Rights Reserved.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//      http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Package container contains a deprecated Google Container Engine client.
-//
-// Deprecated: Use google.golang.org/api/container instead.
-package container // import "cloud.google.com/go/container"
-
-import (
-	"errors"
-	"fmt"
-	"time"
-
-	"golang.org/x/net/context"
-	raw "google.golang.org/api/container/v1"
-	"google.golang.org/api/option"
-	htransport "google.golang.org/api/transport/http"
-)
-
-type Type string
-
-const (
-	TypeCreate = Type("createCluster")
-	TypeDelete = Type("deleteCluster")
-)
-
-type Status string
-
-const (
-	StatusDone         = Status("done")
-	StatusPending      = Status("pending")
-	StatusRunning      = Status("running")
-	StatusError        = Status("error")
-	StatusProvisioning = Status("provisioning")
-	StatusStopping     = Status("stopping")
-)
-
-const prodAddr = "https://container.googleapis.com/"
-const userAgent = "gcloud-golang-container/20151008"
-
-// Client is a Google Container Engine client, which may be used to manage
-// clusters with a project.  It must be constructed via NewClient.
-type Client struct {
-	projectID string
-	svc       *raw.Service
-}
-
-// NewClient creates a new Google Container Engine client.
-func NewClient(ctx context.Context, projectID string, opts ...option.ClientOption) (*Client, error) {
-	o := []option.ClientOption{
-		option.WithEndpoint(prodAddr),
-		option.WithScopes(raw.CloudPlatformScope),
-		option.WithUserAgent(userAgent),
-	}
-	o = append(o, opts...)
-	httpClient, endpoint, err := htransport.NewClient(ctx, o...)
-	if err != nil {
-		return nil, fmt.Errorf("dialing: %v", err)
-	}
-
-	svc, err := raw.New(httpClient)
-	if err != nil {
-		return nil, fmt.Errorf("constructing container client: %v", err)
-	}
-	svc.BasePath = endpoint
-
-	c := &Client{
-		projectID: projectID,
-		svc:       svc,
-	}
-
-	return c, nil
-}
-
-// Resource is a Google Container Engine cluster resource.
-type Resource struct {
-	// Name is the name of this cluster. The name must be unique
-	// within this project and zone, and can be up to 40 characters.
-	Name string
-
-	// Description is the description of the cluster. Optional.
-	Description string
-
-	// Zone is the Google Compute Engine zone in which the cluster resides.
-	Zone string
-
-	// Status is the current status of the cluster. It could either be
-	// StatusError, StatusProvisioning, StatusRunning or StatusStopping.
-	Status Status
-
-	// Num is the number of the nodes in this cluster resource.
-	Num int64
-
-	// APIVersion is the version of the Kubernetes master and kubelets running
-	// in this cluster. Allowed value is 0.4.2, or leave blank to
-	// pick up the latest stable release.
-	APIVersion string
-
-	// Endpoint is the IP address of this cluster's Kubernetes master.
-	// The endpoint can be accessed at https://username:password@endpoint/.
-	// See Username and Password fields for the username and password information.
-	Endpoint string
-
-	// Username is the username to use when accessing the Kubernetes master endpoint.
-	Username string
-
-	// Password is the password to use when accessing the Kubernetes master endpoint.
-	Password string
-
-	// ContainerIPv4CIDR is the IP addresses of the container pods in
-	// this cluster, in CIDR notation (e.g. 1.2.3.4/29).
-	ContainerIPv4CIDR string
-
-	// ServicesIPv4CIDR is the IP addresses of the Kubernetes services in this
-	// cluster, in CIDR notation (e.g. 1.2.3.4/29). Service addresses are
-	// always in the 10.0.0.0/16 range.
-	ServicesIPv4CIDR string
-
-	// MachineType is a Google Compute Engine machine type (e.g. n1-standard-1).
-	// If none set, the default type is used while creating a new cluster.
-	MachineType string
-
-	// This field is ignored. It was removed from the underlying container API in v1.
-	SourceImage string
-
-	// Created is the creation time of this cluster.
-	Created time.Time
-}
-
-func resourceFromRaw(c *raw.Cluster) *Resource {
-	if c == nil {
-		return nil
-	}
-	r := &Resource{
-		Name:              c.Name,
-		Description:       c.Description,
-		Zone:              c.Zone,
-		Status:            Status(c.Status),
-		Num:               c.CurrentNodeCount,
-		APIVersion:        c.InitialClusterVersion,
-		Endpoint:          c.Endpoint,
-		Username:          c.MasterAuth.Username,
-		Password:          c.MasterAuth.Password,
-		ContainerIPv4CIDR: c.ClusterIpv4Cidr,
-		ServicesIPv4CIDR:  c.ServicesIpv4Cidr,
-		MachineType:       c.NodeConfig.MachineType,
-	}
-	r.Created, _ = time.Parse(time.RFC3339, c.CreateTime)
-	return r
-}
-
-func resourcesFromRaw(c []*raw.Cluster) []*Resource {
-	r := make([]*Resource, len(c))
-	for i, val := range c {
-		r[i] = resourceFromRaw(val)
-	}
-	return r
-}
-
-// Op represents a Google Container Engine API operation.
-type Op struct {
-	// Name is the name of the operation.
-	Name string
-
-	// Zone is the Google Compute Engine zone.
-	Zone string
-
-	// This field is ignored. It was removed from the underlying container API in v1.
-	TargetURL string
-
-	// Type is the operation type. It could be either be TypeCreate or TypeDelete.
-	Type Type
-
-	// Status is the current status of this operation. It could be either
-	// OpDone or OpPending.
-	Status Status
-}
-
-func opFromRaw(o *raw.Operation) *Op {
-	if o == nil {
-		return nil
-	}
-	return &Op{
-		Name:   o.Name,
-		Zone:   o.Zone,
-		Type:   Type(o.OperationType),
-		Status: Status(o.Status),
-	}
-}
-
-func opsFromRaw(o []*raw.Operation) []*Op {
-	ops := make([]*Op, len(o))
-	for i, val := range o {
-		ops[i] = opFromRaw(val)
-	}
-	return ops
-}
-
-// Clusters returns a list of cluster resources from the specified zone.
-// If no zone is specified, it returns all clusters under the user project.
-func (c *Client) Clusters(ctx context.Context, zone string) ([]*Resource, error) {
-	if zone == "" {
-		zone = "-"
-	}
-	resp, err := c.svc.Projects.Zones.Clusters.List(c.projectID, zone).Do()
-	if err != nil {
-		return nil, err
-	}
-	return resourcesFromRaw(resp.Clusters), nil
-}
-
-// Cluster returns metadata about the specified cluster.
-func (c *Client) Cluster(ctx context.Context, zone, name string) (*Resource, error) {
-	resp, err := c.svc.Projects.Zones.Clusters.Get(c.projectID, zone, name).Do()
-	if err != nil {
-		return nil, err
-	}
-	return resourceFromRaw(resp), nil
-}
-
-// CreateCluster creates a new cluster with the provided metadata
-// in the specified zone.
-func (c *Client) CreateCluster(ctx context.Context, zone string, resource *Resource) (*Resource, error) {
-	panic("not implemented")
-}
-
-// DeleteCluster deletes a cluster.
-func (c *Client) DeleteCluster(ctx context.Context, zone, name string) error {
-	_, err := c.svc.Projects.Zones.Clusters.Delete(c.projectID, zone, name).Do()
-	return err
-}
-
-// Operations returns a list of operations from the specified zone.
-// If no zone is specified, it looks up for all of the operations
-// that are running under the user's project.
-func (c *Client) Operations(ctx context.Context, zone string) ([]*Op, error) {
-	if zone == "" {
-		resp, err := c.svc.Projects.Zones.Operations.List(c.projectID, "-").Do()
-		if err != nil {
-			return nil, err
-		}
-		return opsFromRaw(resp.Operations), nil
-	}
-	resp, err := c.svc.Projects.Zones.Operations.List(c.projectID, zone).Do()
-	if err != nil {
-		return nil, err
-	}
-	return opsFromRaw(resp.Operations), nil
-}
-
-// Operation returns an operation.
-func (c *Client) Operation(ctx context.Context, zone, name string) (*Op, error) {
-	resp, err := c.svc.Projects.Zones.Operations.Get(c.projectID, zone, name).Do()
-	if err != nil {
-		return nil, err
-	}
-	if resp.StatusMessage != "" {
-		return nil, errors.New(resp.StatusMessage)
-	}
-	return opFromRaw(resp), nil
-}

+ 0 - 118
vendor/cloud.google.com/go/datastore/client.go

@@ -1,118 +0,0 @@
-// Copyright 2017 Google Inc. All Rights Reserved.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//      http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package datastore
-
-import (
-	"fmt"
-
-	gax "github.com/googleapis/gax-go"
-
-	"cloud.google.com/go/internal"
-	"cloud.google.com/go/internal/version"
-	"golang.org/x/net/context"
-	pb "google.golang.org/genproto/googleapis/datastore/v1"
-	"google.golang.org/grpc"
-	"google.golang.org/grpc/codes"
-	"google.golang.org/grpc/metadata"
-	"google.golang.org/grpc/status"
-)
-
-// datastoreClient is a wrapper for the pb.DatastoreClient that includes gRPC
-// metadata to be sent in each request for server-side traffic management.
-type datastoreClient struct {
-	// Embed so we still implement the DatastoreClient interface,
-	// if the interface adds more methods.
-	pb.DatastoreClient
-
-	c  pb.DatastoreClient
-	md metadata.MD
-}
-
-func newDatastoreClient(conn *grpc.ClientConn, projectID string) pb.DatastoreClient {
-	return &datastoreClient{
-		c: pb.NewDatastoreClient(conn),
-		md: metadata.Pairs(
-			resourcePrefixHeader, "projects/"+projectID,
-			"x-goog-api-client", fmt.Sprintf("gl-go/%s gccl/%s grpc/", version.Go(), version.Repo)),
-	}
-}
-
-func (dc *datastoreClient) Lookup(ctx context.Context, in *pb.LookupRequest, opts ...grpc.CallOption) (res *pb.LookupResponse, err error) {
-	err = dc.invoke(ctx, func(ctx context.Context) error {
-		res, err = dc.c.Lookup(ctx, in, opts...)
-		return err
-	})
-	return res, err
-}
-
-func (dc *datastoreClient) RunQuery(ctx context.Context, in *pb.RunQueryRequest, opts ...grpc.CallOption) (res *pb.RunQueryResponse, err error) {
-	err = dc.invoke(ctx, func(ctx context.Context) error {
-		res, err = dc.c.RunQuery(ctx, in, opts...)
-		return err
-	})
-	return res, err
-}
-
-func (dc *datastoreClient) BeginTransaction(ctx context.Context, in *pb.BeginTransactionRequest, opts ...grpc.CallOption) (res *pb.BeginTransactionResponse, err error) {
-	err = dc.invoke(ctx, func(ctx context.Context) error {
-		res, err = dc.c.BeginTransaction(ctx, in, opts...)
-		return err
-	})
-	return res, err
-}
-
-func (dc *datastoreClient) Commit(ctx context.Context, in *pb.CommitRequest, opts ...grpc.CallOption) (res *pb.CommitResponse, err error) {
-	err = dc.invoke(ctx, func(ctx context.Context) error {
-		res, err = dc.c.Commit(ctx, in, opts...)
-		return err
-	})
-	return res, err
-}
-
-func (dc *datastoreClient) Rollback(ctx context.Context, in *pb.RollbackRequest, opts ...grpc.CallOption) (res *pb.RollbackResponse, err error) {
-	err = dc.invoke(ctx, func(ctx context.Context) error {
-		res, err = dc.c.Rollback(ctx, in, opts...)
-		return err
-	})
-	return res, err
-}
-
-func (dc *datastoreClient) AllocateIds(ctx context.Context, in *pb.AllocateIdsRequest, opts ...grpc.CallOption) (res *pb.AllocateIdsResponse, err error) {
-	err = dc.invoke(ctx, func(ctx context.Context) error {
-		res, err = dc.c.AllocateIds(ctx, in, opts...)
-		return err
-	})
-	return res, err
-}
-
-func (dc *datastoreClient) invoke(ctx context.Context, f func(ctx context.Context) error) error {
-	ctx = metadata.NewOutgoingContext(ctx, dc.md)
-	return internal.Retry(ctx, gax.Backoff{}, func() (stop bool, err error) {
-		err = f(ctx)
-		return !shouldRetry(err), err
-	})
-}
-
-func shouldRetry(err error) bool {
-	if err == nil {
-		return false
-	}
-	s, ok := status.FromError(err)
-	if !ok {
-		return false
-	}
-	// See https://cloud.google.com/datastore/docs/concepts/errors.
-	return s.Code() == codes.Unavailable || s.Code() == codes.DeadlineExceeded
-}

+ 0 - 574
vendor/cloud.google.com/go/datastore/datastore.go

@@ -1,574 +0,0 @@
-// Copyright 2014 Google Inc. All Rights Reserved.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//      http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package datastore
-
-import (
-	"errors"
-	"fmt"
-	"log"
-	"os"
-	"reflect"
-
-	"golang.org/x/net/context"
-	"google.golang.org/api/option"
-	gtransport "google.golang.org/api/transport/grpc"
-	pb "google.golang.org/genproto/googleapis/datastore/v1"
-	"google.golang.org/grpc"
-)
-
-const (
-	prodAddr  = "datastore.googleapis.com:443"
-	userAgent = "gcloud-golang-datastore/20160401"
-)
-
-// ScopeDatastore grants permissions to view and/or manage datastore entities
-const ScopeDatastore = "https://www.googleapis.com/auth/datastore"
-
-// resourcePrefixHeader is the name of the metadata header used to indicate
-// the resource being operated on.
-const resourcePrefixHeader = "google-cloud-resource-prefix"
-
-// Client is a client for reading and writing data in a datastore dataset.
-type Client struct {
-	conn     *grpc.ClientConn
-	client   pb.DatastoreClient
-	endpoint string
-	dataset  string // Called dataset by the datastore API, synonym for project ID.
-}
-
-// NewClient creates a new Client for a given dataset.
-// If the project ID is empty, it is derived from the DATASTORE_PROJECT_ID environment variable.
-// If the DATASTORE_EMULATOR_HOST environment variable is set, client will use its value
-// to connect to a locally-running datastore emulator.
-func NewClient(ctx context.Context, projectID string, opts ...option.ClientOption) (*Client, error) {
-	var o []option.ClientOption
-	// Environment variables for gcd emulator:
-	// https://cloud.google.com/datastore/docs/tools/datastore-emulator
-	// If the emulator is available, dial it directly (and don't pass any credentials).
-	if addr := os.Getenv("DATASTORE_EMULATOR_HOST"); addr != "" {
-		conn, err := grpc.Dial(addr, grpc.WithInsecure())
-		if err != nil {
-			return nil, fmt.Errorf("grpc.Dial: %v", err)
-		}
-		o = []option.ClientOption{option.WithGRPCConn(conn)}
-	} else {
-		o = []option.ClientOption{
-			option.WithEndpoint(prodAddr),
-			option.WithScopes(ScopeDatastore),
-			option.WithUserAgent(userAgent),
-		}
-	}
-	// Warn if we see the legacy emulator environment variables.
-	if os.Getenv("DATASTORE_HOST") != "" && os.Getenv("DATASTORE_EMULATOR_HOST") == "" {
-		log.Print("WARNING: legacy environment variable DATASTORE_HOST is ignored. Use DATASTORE_EMULATOR_HOST instead.")
-	}
-	if os.Getenv("DATASTORE_DATASET") != "" && os.Getenv("DATASTORE_PROJECT_ID") == "" {
-		log.Print("WARNING: legacy environment variable DATASTORE_DATASET is ignored. Use DATASTORE_PROJECT_ID instead.")
-	}
-	if projectID == "" {
-		projectID = os.Getenv("DATASTORE_PROJECT_ID")
-	}
-	if projectID == "" {
-		return nil, errors.New("datastore: missing project/dataset id")
-	}
-	o = append(o, opts...)
-	conn, err := gtransport.Dial(ctx, o...)
-	if err != nil {
-		return nil, fmt.Errorf("dialing: %v", err)
-	}
-	return &Client{
-		conn:    conn,
-		client:  newDatastoreClient(conn, projectID),
-		dataset: projectID,
-	}, nil
-
-}
-
-var (
-	// ErrInvalidEntityType is returned when functions like Get or Next are
-	// passed a dst or src argument of invalid type.
-	ErrInvalidEntityType = errors.New("datastore: invalid entity type")
-	// ErrInvalidKey is returned when an invalid key is presented.
-	ErrInvalidKey = errors.New("datastore: invalid key")
-	// ErrNoSuchEntity is returned when no entity was found for a given key.
-	ErrNoSuchEntity = errors.New("datastore: no such entity")
-)
-
-type multiArgType int
-
-const (
-	multiArgTypeInvalid multiArgType = iota
-	multiArgTypePropertyLoadSaver
-	multiArgTypeStruct
-	multiArgTypeStructPtr
-	multiArgTypeInterface
-)
-
-// ErrFieldMismatch is returned when a field is to be loaded into a different
-// type than the one it was stored from, or when a field is missing or
-// unexported in the destination struct.
-// StructType is the type of the struct pointed to by the destination argument
-// passed to Get or to Iterator.Next.
-type ErrFieldMismatch struct {
-	StructType reflect.Type
-	FieldName  string
-	Reason     string
-}
-
-func (e *ErrFieldMismatch) Error() string {
-	return fmt.Sprintf("datastore: cannot load field %q into a %q: %s",
-		e.FieldName, e.StructType, e.Reason)
-}
-
-// GeoPoint represents a location as latitude/longitude in degrees.
-type GeoPoint struct {
-	Lat, Lng float64
-}
-
-// Valid returns whether a GeoPoint is within [-90, 90] latitude and [-180, 180] longitude.
-func (g GeoPoint) Valid() bool {
-	return -90 <= g.Lat && g.Lat <= 90 && -180 <= g.Lng && g.Lng <= 180
-}
-
-func keyToProto(k *Key) *pb.Key {
-	if k == nil {
-		return nil
-	}
-
-	var path []*pb.Key_PathElement
-	for {
-		el := &pb.Key_PathElement{Kind: k.Kind}
-		if k.ID != 0 {
-			el.IdType = &pb.Key_PathElement_Id{Id: k.ID}
-		} else if k.Name != "" {
-			el.IdType = &pb.Key_PathElement_Name{Name: k.Name}
-		}
-		path = append(path, el)
-		if k.Parent == nil {
-			break
-		}
-		k = k.Parent
-	}
-
-	// The path should be in order [grandparent, parent, child]
-	// We did it backward above, so reverse back.
-	for i := 0; i < len(path)/2; i++ {
-		path[i], path[len(path)-i-1] = path[len(path)-i-1], path[i]
-	}
-
-	key := &pb.Key{Path: path}
-	if k.Namespace != "" {
-		key.PartitionId = &pb.PartitionId{
-			NamespaceId: k.Namespace,
-		}
-	}
-	return key
-}
-
-// protoToKey decodes a protocol buffer representation of a key into an
-// equivalent *Key object. If the key is invalid, protoToKey will return the
-// invalid key along with ErrInvalidKey.
-func protoToKey(p *pb.Key) (*Key, error) {
-	var key *Key
-	var namespace string
-	if partition := p.PartitionId; partition != nil {
-		namespace = partition.NamespaceId
-	}
-	for _, el := range p.Path {
-		key = &Key{
-			Namespace: namespace,
-			Kind:      el.Kind,
-			ID:        el.GetId(),
-			Name:      el.GetName(),
-			Parent:    key,
-		}
-	}
-	if !key.valid() { // Also detects key == nil.
-		return key, ErrInvalidKey
-	}
-	return key, nil
-}
-
-// multiKeyToProto is a batch version of keyToProto.
-func multiKeyToProto(keys []*Key) []*pb.Key {
-	ret := make([]*pb.Key, len(keys))
-	for i, k := range keys {
-		ret[i] = keyToProto(k)
-	}
-	return ret
-}
-
-// multiKeyToProto is a batch version of keyToProto.
-func multiProtoToKey(keys []*pb.Key) ([]*Key, error) {
-	hasErr := false
-	ret := make([]*Key, len(keys))
-	err := make(MultiError, len(keys))
-	for i, k := range keys {
-		ret[i], err[i] = protoToKey(k)
-		if err[i] != nil {
-			hasErr = true
-		}
-	}
-	if hasErr {
-		return nil, err
-	}
-	return ret, nil
-}
-
-// multiValid is a batch version of Key.valid. It returns an error, not a
-// []bool.
-func multiValid(key []*Key) error {
-	invalid := false
-	for _, k := range key {
-		if !k.valid() {
-			invalid = true
-			break
-		}
-	}
-	if !invalid {
-		return nil
-	}
-	err := make(MultiError, len(key))
-	for i, k := range key {
-		if !k.valid() {
-			err[i] = ErrInvalidKey
-		}
-	}
-	return err
-}
-
-// checkMultiArg checks that v has type []S, []*S, []I, or []P, for some struct
-// type S, for some interface type I, or some non-interface non-pointer type P
-// such that P or *P implements PropertyLoadSaver.
-//
-// It returns what category the slice's elements are, and the reflect.Type
-// that represents S, I or P.
-//
-// As a special case, PropertyList is an invalid type for v.
-//
-// TODO(djd): multiArg is very confusing. Fold this logic into the
-// relevant Put/Get methods to make the logic less opaque.
-func checkMultiArg(v reflect.Value) (m multiArgType, elemType reflect.Type) {
-	if v.Kind() != reflect.Slice {
-		return multiArgTypeInvalid, nil
-	}
-	if v.Type() == typeOfPropertyList {
-		return multiArgTypeInvalid, nil
-	}
-	elemType = v.Type().Elem()
-	if reflect.PtrTo(elemType).Implements(typeOfPropertyLoadSaver) {
-		return multiArgTypePropertyLoadSaver, elemType
-	}
-	switch elemType.Kind() {
-	case reflect.Struct:
-		return multiArgTypeStruct, elemType
-	case reflect.Interface:
-		return multiArgTypeInterface, elemType
-	case reflect.Ptr:
-		elemType = elemType.Elem()
-		if elemType.Kind() == reflect.Struct {
-			return multiArgTypeStructPtr, elemType
-		}
-	}
-	return multiArgTypeInvalid, nil
-}
-
-// Close closes the Client.
-func (c *Client) Close() error {
-	return c.conn.Close()
-}
-
-// Get loads the entity stored for key into dst, which must be a struct pointer
-// or implement PropertyLoadSaver. If there is no such entity for the key, Get
-// returns ErrNoSuchEntity.
-//
-// The values of dst's unmatched struct fields are not modified, and matching
-// slice-typed fields are not reset before appending to them. In particular, it
-// is recommended to pass a pointer to a zero valued struct on each Get call.
-//
-// ErrFieldMismatch is returned when a field is to be loaded into a different
-// type than the one it was stored from, or when a field is missing or
-// unexported in the destination struct. ErrFieldMismatch is only returned if
-// dst is a struct pointer.
-func (c *Client) Get(ctx context.Context, key *Key, dst interface{}) error {
-	if dst == nil { // get catches nil interfaces; we need to catch nil ptr here
-		return ErrInvalidEntityType
-	}
-	err := c.get(ctx, []*Key{key}, []interface{}{dst}, nil)
-	if me, ok := err.(MultiError); ok {
-		return me[0]
-	}
-	return err
-}
-
-// GetMulti is a batch version of Get.
-//
-// dst must be a []S, []*S, []I or []P, for some struct type S, some interface
-// type I, or some non-interface non-pointer type P such that P or *P
-// implements PropertyLoadSaver. If an []I, each element must be a valid dst
-// for Get: it must be a struct pointer or implement PropertyLoadSaver.
-//
-// As a special case, PropertyList is an invalid type for dst, even though a
-// PropertyList is a slice of structs. It is treated as invalid to avoid being
-// mistakenly passed when []PropertyList was intended.
-func (c *Client) GetMulti(ctx context.Context, keys []*Key, dst interface{}) error {
-	return c.get(ctx, keys, dst, nil)
-}
-
-func (c *Client) get(ctx context.Context, keys []*Key, dst interface{}, opts *pb.ReadOptions) error {
-	v := reflect.ValueOf(dst)
-	multiArgType, _ := checkMultiArg(v)
-
-	// Sanity checks
-	if multiArgType == multiArgTypeInvalid {
-		return errors.New("datastore: dst has invalid type")
-	}
-	if len(keys) != v.Len() {
-		return errors.New("datastore: keys and dst slices have different length")
-	}
-	if len(keys) == 0 {
-		return nil
-	}
-
-	// Go through keys, validate them, serialize then, and create a dict mapping them to their indices.
-	// Equal keys are deduped.
-	multiErr, any := make(MultiError, len(keys)), false
-	keyMap := make(map[string][]int, len(keys))
-	pbKeys := make([]*pb.Key, 0, len(keys))
-	for i, k := range keys {
-		if !k.valid() {
-			multiErr[i] = ErrInvalidKey
-			any = true
-		} else {
-			ks := k.String()
-			if _, ok := keyMap[ks]; !ok {
-				pbKeys = append(pbKeys, keyToProto(k))
-			}
-			keyMap[ks] = append(keyMap[ks], i)
-		}
-	}
-	if any {
-		return multiErr
-	}
-	req := &pb.LookupRequest{
-		ProjectId:   c.dataset,
-		Keys:        pbKeys,
-		ReadOptions: opts,
-	}
-	resp, err := c.client.Lookup(ctx, req)
-	if err != nil {
-		return err
-	}
-	found := resp.Found
-	missing := resp.Missing
-	// Upper bound 100 iterations to prevent infinite loop.
-	// We choose 100 iterations somewhat logically:
-	// Max number of Entities you can request from Datastore is 1,000.
-	// Max size for a Datastore Entity is 1 MiB.
-	// Max request size is 10 MiB, so we assume max response size is also 10 MiB.
-	// 1,000 / 10 = 100.
-	// Note that if ctx has a deadline, the deadline will probably
-	// be hit before we reach 100 iterations.
-	for i := 0; len(resp.Deferred) > 0 && i < 100; i++ {
-		req.Keys = resp.Deferred
-		resp, err = c.client.Lookup(ctx, req)
-		if err != nil {
-			return err
-		}
-		found = append(found, resp.Found...)
-		missing = append(missing, resp.Missing...)
-	}
-
-	filled := 0
-	for _, e := range found {
-		k, err := protoToKey(e.Entity.Key)
-		if err != nil {
-			return errors.New("datastore: internal error: server returned an invalid key")
-		}
-		filled += len(keyMap[k.String()])
-		for _, index := range keyMap[k.String()] {
-			elem := v.Index(index)
-			if multiArgType == multiArgTypePropertyLoadSaver || multiArgType == multiArgTypeStruct {
-				elem = elem.Addr()
-			}
-			if multiArgType == multiArgTypeStructPtr && elem.IsNil() {
-				elem.Set(reflect.New(elem.Type().Elem()))
-			}
-			if err := loadEntityProto(elem.Interface(), e.Entity); err != nil {
-				multiErr[index] = err
-				any = true
-			}
-		}
-	}
-	for _, e := range missing {
-		k, err := protoToKey(e.Entity.Key)
-		if err != nil {
-			return errors.New("datastore: internal error: server returned an invalid key")
-		}
-		filled += len(keyMap[k.String()])
-		for _, index := range keyMap[k.String()] {
-			multiErr[index] = ErrNoSuchEntity
-		}
-		any = true
-	}
-
-	if filled != len(keys) {
-		return errors.New("datastore: internal error: server returned the wrong number of entities")
-	}
-
-	if any {
-		return multiErr
-	}
-	return nil
-}
-
-// Put saves the entity src into the datastore with key k. src must be a struct
-// pointer or implement PropertyLoadSaver; if a struct pointer then any
-// unexported fields of that struct will be skipped. If k is an incomplete key,
-// the returned key will be a unique key generated by the datastore.
-func (c *Client) Put(ctx context.Context, key *Key, src interface{}) (*Key, error) {
-	k, err := c.PutMulti(ctx, []*Key{key}, []interface{}{src})
-	if err != nil {
-		if me, ok := err.(MultiError); ok {
-			return nil, me[0]
-		}
-		return nil, err
-	}
-	return k[0], nil
-}
-
-// PutMulti is a batch version of Put.
-//
-// src must satisfy the same conditions as the dst argument to GetMulti.
-func (c *Client) PutMulti(ctx context.Context, keys []*Key, src interface{}) ([]*Key, error) {
-	mutations, err := putMutations(keys, src)
-	if err != nil {
-		return nil, err
-	}
-
-	// Make the request.
-	req := &pb.CommitRequest{
-		ProjectId: c.dataset,
-		Mutations: mutations,
-		Mode:      pb.CommitRequest_NON_TRANSACTIONAL,
-	}
-	resp, err := c.client.Commit(ctx, req)
-	if err != nil {
-		return nil, err
-	}
-
-	// Copy any newly minted keys into the returned keys.
-	ret := make([]*Key, len(keys))
-	for i, key := range keys {
-		if key.Incomplete() {
-			// This key is in the mutation results.
-			ret[i], err = protoToKey(resp.MutationResults[i].Key)
-			if err != nil {
-				return nil, errors.New("datastore: internal error: server returned an invalid key")
-			}
-		} else {
-			ret[i] = key
-		}
-	}
-	return ret, nil
-}
-
-func putMutations(keys []*Key, src interface{}) ([]*pb.Mutation, error) {
-	v := reflect.ValueOf(src)
-	multiArgType, _ := checkMultiArg(v)
-	if multiArgType == multiArgTypeInvalid {
-		return nil, errors.New("datastore: src has invalid type")
-	}
-	if len(keys) != v.Len() {
-		return nil, errors.New("datastore: key and src slices have different length")
-	}
-	if len(keys) == 0 {
-		return nil, nil
-	}
-	if err := multiValid(keys); err != nil {
-		return nil, err
-	}
-	mutations := make([]*pb.Mutation, 0, len(keys))
-	multiErr := make(MultiError, len(keys))
-	hasErr := false
-	for i, k := range keys {
-		elem := v.Index(i)
-		// Two cases where we need to take the address:
-		// 1) multiArgTypePropertyLoadSaver => &elem implements PLS
-		// 2) multiArgTypeStruct => saveEntity needs *struct
-		if multiArgType == multiArgTypePropertyLoadSaver || multiArgType == multiArgTypeStruct {
-			elem = elem.Addr()
-		}
-		p, err := saveEntity(k, elem.Interface())
-		if err != nil {
-			multiErr[i] = err
-			hasErr = true
-		}
-		var mut *pb.Mutation
-		if k.Incomplete() {
-			mut = &pb.Mutation{Operation: &pb.Mutation_Insert{Insert: p}}
-		} else {
-			mut = &pb.Mutation{Operation: &pb.Mutation_Upsert{Upsert: p}}
-		}
-		mutations = append(mutations, mut)
-	}
-	if hasErr {
-		return nil, multiErr
-	}
-	return mutations, nil
-}
-
-// Delete deletes the entity for the given key.
-func (c *Client) Delete(ctx context.Context, key *Key) error {
-	err := c.DeleteMulti(ctx, []*Key{key})
-	if me, ok := err.(MultiError); ok {
-		return me[0]
-	}
-	return err
-}
-
-// DeleteMulti is a batch version of Delete.
-func (c *Client) DeleteMulti(ctx context.Context, keys []*Key) error {
-	mutations, err := deleteMutations(keys)
-	if err != nil {
-		return err
-	}
-
-	req := &pb.CommitRequest{
-		ProjectId: c.dataset,
-		Mutations: mutations,
-		Mode:      pb.CommitRequest_NON_TRANSACTIONAL,
-	}
-	_, err = c.client.Commit(ctx, req)
-	return err
-}
-
-func deleteMutations(keys []*Key) ([]*pb.Mutation, error) {
-	mutations := make([]*pb.Mutation, 0, len(keys))
-	set := make(map[string]bool, len(keys))
-	for _, k := range keys {
-		if k.Incomplete() {
-			return nil, fmt.Errorf("datastore: can't delete the incomplete key: %v", k)
-		}
-		ks := k.String()
-		if !set[ks] {
-			mutations = append(mutations, &pb.Mutation{
-				Operation: &pb.Mutation_Delete{Delete: keyToProto(k)},
-			})
-		}
-		set[ks] = true
-	}
-	return mutations, nil
-}

+ 0 - 454
vendor/cloud.google.com/go/datastore/doc.go

@@ -1,454 +0,0 @@
-// Copyright 2016 Google Inc. All Rights Reserved.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//      http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-/*
-Package datastore provides a client for Google Cloud Datastore.
-
-Note: This package is in beta.  Some backwards-incompatible changes may occur.
-
-
-Basic Operations
-
-Entities are the unit of storage and are associated with a key. A key
-consists of an optional parent key, a string application ID, a string kind
-(also known as an entity type), and either a StringID or an IntID. A
-StringID is also known as an entity name or key name.
-
-It is valid to create a key with a zero StringID and a zero IntID; this is
-called an incomplete key, and does not refer to any saved entity. Putting an
-entity into the datastore under an incomplete key will cause a unique key
-to be generated for that entity, with a non-zero IntID.
-
-An entity's contents are a mapping from case-sensitive field names to values.
-Valid value types are:
-  - signed integers (int, int8, int16, int32 and int64),
-  - bool,
-  - string,
-  - float32 and float64,
-  - []byte (up to 1 megabyte in length),
-  - any type whose underlying type is one of the above predeclared types,
-  - *Key,
-  - GeoPoint,
-  - time.Time (stored with microsecond precision),
-  - structs whose fields are all valid value types,
-  - pointers to structs whose fields are all valid value types,
-  - slices of any of the above.
-
-Slices of structs are valid, as are structs that contain slices.
-
-The Get and Put functions load and save an entity's contents. An entity's
-contents are typically represented by a struct pointer.
-
-Example code:
-
-	type Entity struct {
-		Value string
-	}
-
-	func main() {
-		ctx := context.Background()
-
-		// Create a datastore client. In a typical application, you would create
-		// a single client which is reused for every datastore operation.
-		dsClient, err := datastore.NewClient(ctx, "my-project")
-		if err != nil {
-			// Handle error.
-		}
-
-		k := datastore.NameKey("Entity", "stringID", nil)
-		e := new(Entity)
-		if err := dsClient.Get(ctx, k, e); err != nil {
-			// Handle error.
-		}
-
-		old := e.Value
-		e.Value = "Hello World!"
-
-		if _, err := dsClient.Put(ctx, k, e); err != nil {
-			// Handle error.
-		}
-
-		fmt.Printf("Updated value from %q to %q\n", old, e.Value)
-	}
-
-GetMulti, PutMulti and DeleteMulti are batch versions of the Get, Put and
-Delete functions. They take a []*Key instead of a *Key, and may return a
-datastore.MultiError when encountering partial failure.
-
-
-Properties
-
-An entity's contents can be represented by a variety of types. These are
-typically struct pointers, but can also be any type that implements the
-PropertyLoadSaver interface. If using a struct pointer, you do not have to
-explicitly implement the PropertyLoadSaver interface; the datastore will
-automatically convert via reflection. If a struct pointer does implement that
-interface then those methods will be used in preference to the default
-behavior for struct pointers. Struct pointers are more strongly typed and are
-easier to use; PropertyLoadSavers are more flexible.
-
-The actual types passed do not have to match between Get and Put calls or even
-across different calls to datastore. It is valid to put a *PropertyList and
-get that same entity as a *myStruct, or put a *myStruct0 and get a *myStruct1.
-Conceptually, any entity is saved as a sequence of properties, and is loaded
-into the destination value on a property-by-property basis. When loading into
-a struct pointer, an entity that cannot be completely represented (such as a
-missing field) will result in an ErrFieldMismatch error but it is up to the
-caller whether this error is fatal, recoverable or ignorable.
-
-By default, for struct pointers, all properties are potentially indexed, and
-the property name is the same as the field name (and hence must start with an
-upper case letter).
-
-Fields may have a `datastore:"name,options"` tag. The tag name is the
-property name, which must be one or more valid Go identifiers joined by ".",
-but may start with a lower case letter. An empty tag name means to just use the
-field name. A "-" tag name means that the datastore will ignore that field.
-
-The only valid options are "omitempty", "noindex" and "flatten".
-
-If the options include "omitempty" and the value of the field is empty, then the field will be omitted on Save.
-The empty values are false, 0, any nil interface value, and any array, slice, map, or string of length zero.
-Struct field values will never be empty.
-
-If options include "noindex" then the field will not be indexed. All fields are indexed
-by default. Strings or byte slices longer than 1500 bytes cannot be indexed;
-fields used to store long strings and byte slices must be tagged with "noindex"
-or they will cause Put operations to fail.
-
-For a nested struct field, the options may also include "flatten". This indicates
-that the immediate fields and any nested substruct fields of the nested struct should be
-flattened. See below for examples.
-
-To use multiple options together, separate them by a comma.
-The order does not matter.
-
-If the options is "" then the comma may be omitted.
-
-Example code:
-
-	// A and B are renamed to a and b.
-	// A, C and J are not indexed.
-	// D's tag is equivalent to having no tag at all (E).
-	// I is ignored entirely by the datastore.
-	// J has tag information for both the datastore and json packages.
-	type TaggedStruct struct {
-		A int `datastore:"a,noindex"`
-		B int `datastore:"b"`
-		C int `datastore:",noindex"`
-		D int `datastore:""`
-		E int
-		I int `datastore:"-"`
-		J int `datastore:",noindex" json:"j"`
-	}
-
-
-Key Field
-
-If the struct contains a *datastore.Key field tagged with the name "__key__",
-its value will be ignored on Put. When reading the Entity back into the Go struct,
-the field will be populated with the *datastore.Key value used to query for
-the Entity.
-
-Example code:
-
-	type MyEntity struct {
-		A int
-		K *datastore.Key `datastore:"__key__"`
-	}
-
-	k := datastore.NameKey("Entity", "stringID", nil)
-	e := MyEntity{A: 12}
-	k, err = dsClient.Put(ctx, k, e)
-	if err != nil {
-		// Handle error.
-	}
-
-	var entities []MyEntity
-	q := datastore.NewQuery("Entity").Filter("A =", 12).Limit(1)
-	_, err := dsClient.GetAll(ctx, q, &entities)
-	if err != nil {
-		// Handle error
-	}
-
-	log.Println(entities[0])
-	// Prints {12 /Entity,stringID}
-
-
-
-Structured Properties
-
-If the struct pointed to contains other structs, then the nested or embedded
-structs are themselves saved as Entity values. For example, given these definitions:
-
-	type Inner struct {
-		W int32
-		X string
-	}
-
-	type Outer struct {
-		I Inner
-	}
-
-then an Outer would have one property, Inner, encoded as an Entity value.
-
-If an outer struct is tagged "noindex" then all of its implicit flattened
-fields are effectively "noindex".
-
-If the Inner struct contains a *Key field with the name "__key__", like so:
-
-	type Inner struct {
-		W int32
-		X string
-		K *datastore.Key `datastore:"__key__"`
-	}
-
-	type Outer struct {
-		I Inner
-	}
-
-then the value of K will be used as the Key for Inner, represented
-as an Entity value in datastore.
-
-If any nested struct fields should be flattened, instead of encoded as
-Entity values, the nested struct field should be tagged with the "flatten"
-option. For example, given the following:
-
-	type Inner1 struct {
-		W int32
-		X string
-	}
-
-	type Inner2 struct {
-		Y float64
-	}
-
-	type Inner3 struct {
-		Z bool
-	}
-
-	type Inner4 struct {
-		WW int
-	}
-
-	type Inner5 struct {
-		X Inner4
-	}
-
-	type Outer struct {
-		A int16
-		I []Inner1 `datastore:",flatten"`
-		J Inner2   `datastore:",flatten"`
-		K Inner5   `datastore:",flatten"`
-		Inner3     `datastore:",flatten"`
-	}
-
-an Outer's properties would be equivalent to those of:
-
-	type OuterEquivalent struct {
-		A          int16
-		IDotW      []int32  `datastore:"I.W"`
-		IDotX      []string `datastore:"I.X"`
-		JDotY      float64  `datastore:"J.Y"`
-		KDotXDotWW int      `datastore:"K.X.WW"`
-		Z          bool
-	}
-
-Note that the "flatten" option cannot be used for Entity value fields.
-The server will reject any dotted field names for an Entity value.
-
-
-The PropertyLoadSaver Interface
-
-An entity's contents can also be represented by any type that implements the
-PropertyLoadSaver interface. This type may be a struct pointer, but it does
-not have to be. The datastore package will call Load when getting the entity's
-contents, and Save when putting the entity's contents.
-Possible uses include deriving non-stored fields, verifying fields, or indexing
-a field only if its value is positive.
-
-Example code:
-
-	type CustomPropsExample struct {
-		I, J int
-		// Sum is not stored, but should always be equal to I + J.
-		Sum int `datastore:"-"`
-	}
-
-	func (x *CustomPropsExample) Load(ps []datastore.Property) error {
-		// Load I and J as usual.
-		if err := datastore.LoadStruct(x, ps); err != nil {
-			return err
-		}
-		// Derive the Sum field.
-		x.Sum = x.I + x.J
-		return nil
-	}
-
-	func (x *CustomPropsExample) Save() ([]datastore.Property, error) {
-		// Validate the Sum field.
-		if x.Sum != x.I + x.J {
-			return nil, errors.New("CustomPropsExample has inconsistent sum")
-		}
-		// Save I and J as usual. The code below is equivalent to calling
-		// "return datastore.SaveStruct(x)", but is done manually for
-		// demonstration purposes.
-		return []datastore.Property{
-			{
-				Name:  "I",
-				Value: int64(x.I),
-			},
-			{
-				Name:  "J",
-				Value: int64(x.J),
-			},
-		}, nil
-	}
-
-The *PropertyList type implements PropertyLoadSaver, and can therefore hold an
-arbitrary entity's contents.
-
-The KeyLoader Interface
-
-If a type implements the PropertyLoadSaver interface, it may
-also want to implement the KeyLoader interface.
-The KeyLoader interface exists to allow implementations of PropertyLoadSaver
-to also load an Entity's Key into the Go type. This type may be a struct
-pointer, but it does not have to be. The datastore package will call LoadKey
-when getting the entity's contents, after calling Load.
-
-Example code:
-
-	type WithKeyExample struct {
-		I int
-		Key   *datastore.Key
-	}
-
-	func (x *WithKeyExample) LoadKey(k *datastore.Key) error {
-		x.Key = k
-		return nil
-	}
-
-	func (x *WithKeyExample) Load(ps []datastore.Property) error {
-		// Load I as usual.
-		return datastore.LoadStruct(x, ps)
-	}
-
-	func (x *WithKeyExample) Save() ([]datastore.Property, error) {
-		// Save I as usual.
-		return datastore.SaveStruct(x)
-	}
-
-To load a Key into a struct which does not implement the PropertyLoadSaver
-interface, see the "Key Field" section above.
-
-
-Queries
-
-Queries retrieve entities based on their properties or key's ancestry. Running
-a query yields an iterator of results: either keys or (key, entity) pairs.
-Queries are re-usable and it is safe to call Query.Run from concurrent
-goroutines. Iterators are not safe for concurrent use.
-
-Queries are immutable, and are either created by calling NewQuery, or derived
-from an existing query by calling a method like Filter or Order that returns a
-new query value. A query is typically constructed by calling NewQuery followed
-by a chain of zero or more such methods. These methods are:
-  - Ancestor and Filter constrain the entities returned by running a query.
-  - Order affects the order in which they are returned.
-  - Project constrains the fields returned.
-  - Distinct de-duplicates projected entities.
-  - KeysOnly makes the iterator return only keys, not (key, entity) pairs.
-  - Start, End, Offset and Limit define which sub-sequence of matching entities
-    to return. Start and End take cursors, Offset and Limit take integers. Start
-    and Offset affect the first result, End and Limit affect the last result.
-    If both Start and Offset are set, then the offset is relative to Start.
-    If both End and Limit are set, then the earliest constraint wins. Limit is
-    relative to Start+Offset, not relative to End. As a special case, a
-    negative limit means unlimited.
-
-Example code:
-
-	type Widget struct {
-		Description string
-		Price       int
-	}
-
-	func printWidgets(ctx context.Context, client *datastore.Client) {
-		q := datastore.NewQuery("Widget").
-			Filter("Price <", 1000).
-			Order("-Price")
-		for t := client.Run(ctx, q); ; {
-			var x Widget
-			key, err := t.Next(&x)
-			if err == iterator.Done {
-				break
-			}
-			if err != nil {
-				// Handle error.
-			}
-			fmt.Printf("Key=%v\nWidget=%#v\n\n", key, x)
-		}
-	}
-
-
-Transactions
-
-Client.RunInTransaction runs a function in a transaction.
-
-Example code:
-
-	type Counter struct {
-		Count int
-	}
-
-	func incCount(ctx context.Context, client *datastore.Client) {
-		var count int
-		key := datastore.NameKey("Counter", "singleton", nil)
-		_, err := client.RunInTransaction(ctx, func(tx *datastore.Transaction) error {
-			var x Counter
-			if err := tx.Get(key, &x); err != nil && err != datastore.ErrNoSuchEntity {
-				return err
-			}
-			x.Count++
-			if _, err := tx.Put(key, &x); err != nil {
-				return err
-			}
-			count = x.Count
-			return nil
-		})
-		if err != nil {
-			// Handle error.
-		}
-		// The value of count is only valid once the transaction is successful
-		// (RunInTransaction has returned nil).
-		fmt.Printf("Count=%d\n", count)
-	}
-
-Google Cloud Datastore Emulator
-
-This package supports the Cloud Datastore emulator, which is useful for testing and
-development. Environment variables are used to indicate that datastore traffic should be
-directed to the emulator instead of the production Datastore service.
-
-To install and set up the emulator and its environment variables, see the documentation
-at https://cloud.google.com/datastore/docs/tools/datastore-emulator.
-
-Authentication
-
-See examples of authorization and authentication at
-https://godoc.org/cloud.google.com/go#pkg-examples.
-
-*/
-package datastore // import "cloud.google.com/go/datastore"

+ 0 - 47
vendor/cloud.google.com/go/datastore/errors.go

@@ -1,47 +0,0 @@
-// Copyright 2014 Google Inc. All Rights Reserved.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//      http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// This file provides error functions for common API failure modes.
-
-package datastore
-
-import (
-	"fmt"
-)
-
-// MultiError is returned by batch operations when there are errors with
-// particular elements. Errors will be in a one-to-one correspondence with
-// the input elements; successful elements will have a nil entry.
-type MultiError []error
-
-func (m MultiError) Error() string {
-	s, n := "", 0
-	for _, e := range m {
-		if e != nil {
-			if n == 0 {
-				s = e.Error()
-			}
-			n++
-		}
-	}
-	switch n {
-	case 0:
-		return "(0 errors)"
-	case 1:
-		return s
-	case 2:
-		return s + " (and 1 other error)"
-	}
-	return fmt.Sprintf("%s (and %d other errors)", s, n-1)
-}

+ 0 - 280
vendor/cloud.google.com/go/datastore/key.go

@@ -1,280 +0,0 @@
-// Copyright 2014 Google Inc. All Rights Reserved.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//      http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package datastore
-
-import (
-	"bytes"
-	"encoding/base64"
-	"encoding/gob"
-	"errors"
-	"strconv"
-	"strings"
-
-	"github.com/golang/protobuf/proto"
-	"golang.org/x/net/context"
-	pb "google.golang.org/genproto/googleapis/datastore/v1"
-)
-
-// Key represents the datastore key for a stored entity.
-type Key struct {
-	// Kind cannot be empty.
-	Kind string
-	// Either ID or Name must be zero for the Key to be valid.
-	// If both are zero, the Key is incomplete.
-	ID   int64
-	Name string
-	// Parent must either be a complete Key or nil.
-	Parent *Key
-
-	// Namespace provides the ability to partition your data for multiple
-	// tenants. In most cases, it is not necessary to specify a namespace.
-	// See docs on datastore multitenancy for details:
-	// https://cloud.google.com/datastore/docs/concepts/multitenancy
-	Namespace string
-}
-
-// Incomplete reports whether the key does not refer to a stored entity.
-func (k *Key) Incomplete() bool {
-	return k.Name == "" && k.ID == 0
-}
-
-// valid returns whether the key is valid.
-func (k *Key) valid() bool {
-	if k == nil {
-		return false
-	}
-	for ; k != nil; k = k.Parent {
-		if k.Kind == "" {
-			return false
-		}
-		if k.Name != "" && k.ID != 0 {
-			return false
-		}
-		if k.Parent != nil {
-			if k.Parent.Incomplete() {
-				return false
-			}
-			if k.Parent.Namespace != k.Namespace {
-				return false
-			}
-		}
-	}
-	return true
-}
-
-// Equal reports whether two keys are equal. Two keys are equal if they are
-// both nil, or if their kinds, IDs, names, namespaces and parents are equal.
-func (k *Key) Equal(o *Key) bool {
-	for {
-		if k == nil || o == nil {
-			return k == o // if either is nil, both must be nil
-		}
-		if k.Namespace != o.Namespace || k.Name != o.Name || k.ID != o.ID || k.Kind != o.Kind {
-			return false
-		}
-		if k.Parent == nil && o.Parent == nil {
-			return true
-		}
-		k = k.Parent
-		o = o.Parent
-	}
-}
-
-// marshal marshals the key's string representation to the buffer.
-func (k *Key) marshal(b *bytes.Buffer) {
-	if k.Parent != nil {
-		k.Parent.marshal(b)
-	}
-	b.WriteByte('/')
-	b.WriteString(k.Kind)
-	b.WriteByte(',')
-	if k.Name != "" {
-		b.WriteString(k.Name)
-	} else {
-		b.WriteString(strconv.FormatInt(k.ID, 10))
-	}
-}
-
-// String returns a string representation of the key.
-func (k *Key) String() string {
-	if k == nil {
-		return ""
-	}
-	b := bytes.NewBuffer(make([]byte, 0, 512))
-	k.marshal(b)
-	return b.String()
-}
-
-// Note: Fields not renamed compared to appengine gobKey struct
-// This ensures gobs created by appengine can be read here, and vice/versa
-type gobKey struct {
-	Kind      string
-	StringID  string
-	IntID     int64
-	Parent    *gobKey
-	AppID     string
-	Namespace string
-}
-
-func keyToGobKey(k *Key) *gobKey {
-	if k == nil {
-		return nil
-	}
-	return &gobKey{
-		Kind:      k.Kind,
-		StringID:  k.Name,
-		IntID:     k.ID,
-		Parent:    keyToGobKey(k.Parent),
-		Namespace: k.Namespace,
-	}
-}
-
-func gobKeyToKey(gk *gobKey) *Key {
-	if gk == nil {
-		return nil
-	}
-	return &Key{
-		Kind:      gk.Kind,
-		Name:      gk.StringID,
-		ID:        gk.IntID,
-		Parent:    gobKeyToKey(gk.Parent),
-		Namespace: gk.Namespace,
-	}
-}
-
-// GobEncode marshals the key into a sequence of bytes
-// using an encoding/gob.Encoder.
-func (k *Key) GobEncode() ([]byte, error) {
-	buf := new(bytes.Buffer)
-	if err := gob.NewEncoder(buf).Encode(keyToGobKey(k)); err != nil {
-		return nil, err
-	}
-	return buf.Bytes(), nil
-}
-
-// GobDecode unmarshals a sequence of bytes using an encoding/gob.Decoder.
-func (k *Key) GobDecode(buf []byte) error {
-	gk := new(gobKey)
-	if err := gob.NewDecoder(bytes.NewBuffer(buf)).Decode(gk); err != nil {
-		return err
-	}
-	*k = *gobKeyToKey(gk)
-	return nil
-}
-
-// MarshalJSON marshals the key into JSON.
-func (k *Key) MarshalJSON() ([]byte, error) {
-	return []byte(`"` + k.Encode() + `"`), nil
-}
-
-// UnmarshalJSON unmarshals a key JSON object into a Key.
-func (k *Key) UnmarshalJSON(buf []byte) error {
-	if len(buf) < 2 || buf[0] != '"' || buf[len(buf)-1] != '"' {
-		return errors.New("datastore: bad JSON key")
-	}
-	k2, err := DecodeKey(string(buf[1 : len(buf)-1]))
-	if err != nil {
-		return err
-	}
-	*k = *k2
-	return nil
-}
-
-// Encode returns an opaque representation of the key
-// suitable for use in HTML and URLs.
-// This is compatible with the Python and Java runtimes.
-func (k *Key) Encode() string {
-	pKey := keyToProto(k)
-
-	b, err := proto.Marshal(pKey)
-	if err != nil {
-		panic(err)
-	}
-
-	// Trailing padding is stripped.
-	return strings.TrimRight(base64.URLEncoding.EncodeToString(b), "=")
-}
-
-// DecodeKey decodes a key from the opaque representation returned by Encode.
-func DecodeKey(encoded string) (*Key, error) {
-	// Re-add padding.
-	if m := len(encoded) % 4; m != 0 {
-		encoded += strings.Repeat("=", 4-m)
-	}
-
-	b, err := base64.URLEncoding.DecodeString(encoded)
-	if err != nil {
-		return nil, err
-	}
-
-	pKey := new(pb.Key)
-	if err := proto.Unmarshal(b, pKey); err != nil {
-		return nil, err
-	}
-	return protoToKey(pKey)
-}
-
-// AllocateIDs accepts a slice of incomplete keys and returns a
-// slice of complete keys that are guaranteed to be valid in the datastore.
-func (c *Client) AllocateIDs(ctx context.Context, keys []*Key) ([]*Key, error) {
-	if keys == nil {
-		return nil, nil
-	}
-
-	req := &pb.AllocateIdsRequest{
-		ProjectId: c.dataset,
-		Keys:      multiKeyToProto(keys),
-	}
-	resp, err := c.client.AllocateIds(ctx, req)
-	if err != nil {
-		return nil, err
-	}
-
-	return multiProtoToKey(resp.Keys)
-}
-
-// IncompleteKey creates a new incomplete key.
-// The supplied kind cannot be empty.
-// The namespace of the new key is empty.
-func IncompleteKey(kind string, parent *Key) *Key {
-	return &Key{
-		Kind:   kind,
-		Parent: parent,
-	}
-}
-
-// NameKey creates a new key with a name.
-// The supplied kind cannot be empty.
-// The supplied parent must either be a complete key or nil.
-// The namespace of the new key is empty.
-func NameKey(kind, name string, parent *Key) *Key {
-	return &Key{
-		Kind:   kind,
-		Name:   name,
-		Parent: parent,
-	}
-}
-
-// IDKey creates a new key with an ID.
-// The supplied kind cannot be empty.
-// The supplied parent must either be a complete key or nil.
-// The namespace of the new key is empty.
-func IDKey(kind string, id int64, parent *Key) *Key {
-	return &Key{
-		Kind:   kind,
-		ID:     id,
-		Parent: parent,
-	}
-}

+ 0 - 491
vendor/cloud.google.com/go/datastore/load.go

@@ -1,491 +0,0 @@
-// Copyright 2014 Google Inc. All Rights Reserved.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//      http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package datastore
-
-import (
-	"fmt"
-	"reflect"
-	"strings"
-	"time"
-
-	"cloud.google.com/go/internal/fields"
-	pb "google.golang.org/genproto/googleapis/datastore/v1"
-)
-
-var (
-	typeOfByteSlice = reflect.TypeOf([]byte(nil))
-	typeOfTime      = reflect.TypeOf(time.Time{})
-	typeOfGeoPoint  = reflect.TypeOf(GeoPoint{})
-	typeOfKeyPtr    = reflect.TypeOf(&Key{})
-	typeOfEntityPtr = reflect.TypeOf(&Entity{})
-)
-
-// typeMismatchReason returns a string explaining why the property p could not
-// be stored in an entity field of type v.Type().
-func typeMismatchReason(p Property, v reflect.Value) string {
-	entityType := "empty"
-	switch p.Value.(type) {
-	case int64:
-		entityType = "int"
-	case bool:
-		entityType = "bool"
-	case string:
-		entityType = "string"
-	case float64:
-		entityType = "float"
-	case *Key:
-		entityType = "*datastore.Key"
-	case *Entity:
-		entityType = "*datastore.Entity"
-	case GeoPoint:
-		entityType = "GeoPoint"
-	case time.Time:
-		entityType = "time.Time"
-	case []byte:
-		entityType = "[]byte"
-	}
-
-	return fmt.Sprintf("type mismatch: %s versus %v", entityType, v.Type())
-}
-
-type propertyLoader struct {
-	// m holds the number of times a substruct field like "Foo.Bar.Baz" has
-	// been seen so far. The map is constructed lazily.
-	m map[string]int
-}
-
-func (l *propertyLoader) load(codec fields.List, structValue reflect.Value, p Property, prev map[string]struct{}) string {
-	sl, ok := p.Value.([]interface{})
-	if !ok {
-		return l.loadOneElement(codec, structValue, p, prev)
-	}
-	for _, val := range sl {
-		p.Value = val
-		if errStr := l.loadOneElement(codec, structValue, p, prev); errStr != "" {
-			return errStr
-		}
-	}
-	return ""
-}
-
-// loadOneElement loads the value of Property p into structValue based on the provided
-// codec. codec is used to find the field in structValue into which p should be loaded.
-// prev is the set of property names already seen for structValue.
-func (l *propertyLoader) loadOneElement(codec fields.List, structValue reflect.Value, p Property, prev map[string]struct{}) string {
-	var sliceOk bool
-	var sliceIndex int
-	var v reflect.Value
-
-	name := p.Name
-	fieldNames := strings.Split(name, ".")
-
-	for len(fieldNames) > 0 {
-		var field *fields.Field
-
-		// Start by trying to find a field with name. If none found,
-		// cut off the last field (delimited by ".") and find its parent
-		// in the codec.
-		// eg. for name "A.B.C.D", split off "A.B.C" and try to
-		// find a field in the codec with this name.
-		// Loop again with "A.B", etc.
-		for i := len(fieldNames); i > 0; i-- {
-			parent := strings.Join(fieldNames[:i], ".")
-			field = codec.Match(parent)
-			if field != nil {
-				fieldNames = fieldNames[i:]
-				break
-			}
-		}
-
-		// If we never found a matching field in the codec, return
-		// error message.
-		if field == nil {
-			return "no such struct field"
-		}
-
-		v = initField(structValue, field.Index)
-		if !v.IsValid() {
-			return "no such struct field"
-		}
-		if !v.CanSet() {
-			return "cannot set struct field"
-		}
-
-		// If field implements PLS, we delegate loading to the PLS's Load early,
-		// and stop iterating through fields.
-		ok, err := plsFieldLoad(v, p, fieldNames)
-		if err != nil {
-			return err.Error()
-		}
-		if ok {
-			return ""
-		}
-
-		if field.Type.Kind() == reflect.Struct {
-			codec, err = structCache.Fields(field.Type)
-			if err != nil {
-				return err.Error()
-			}
-			structValue = v
-		}
-
-		// If the element is a slice, we need to accommodate it.
-		if v.Kind() == reflect.Slice && v.Type() != typeOfByteSlice {
-			if l.m == nil {
-				l.m = make(map[string]int)
-			}
-			sliceIndex = l.m[p.Name]
-			l.m[p.Name] = sliceIndex + 1
-			for v.Len() <= sliceIndex {
-				v.Set(reflect.Append(v, reflect.New(v.Type().Elem()).Elem()))
-			}
-			structValue = v.Index(sliceIndex)
-
-			// If structValue implements PLS, we delegate loading to the PLS's
-			// Load early, and stop iterating through fields.
-			ok, err := plsFieldLoad(structValue, p, fieldNames)
-			if err != nil {
-				return err.Error()
-			}
-			if ok {
-				return ""
-			}
-
-			if structValue.Type().Kind() == reflect.Struct {
-				codec, err = structCache.Fields(structValue.Type())
-				if err != nil {
-					return err.Error()
-				}
-			}
-			sliceOk = true
-		}
-	}
-
-	var slice reflect.Value
-	if v.Kind() == reflect.Slice && v.Type().Elem().Kind() != reflect.Uint8 {
-		slice = v
-		v = reflect.New(v.Type().Elem()).Elem()
-	} else if _, ok := prev[p.Name]; ok && !sliceOk {
-		// Zero the field back out that was set previously, turns out
-		// it's a slice and we don't know what to do with it
-		v.Set(reflect.Zero(v.Type()))
-		return "multiple-valued property requires a slice field type"
-	}
-
-	prev[p.Name] = struct{}{}
-
-	if errReason := setVal(v, p); errReason != "" {
-		// Set the slice back to its zero value.
-		if slice.IsValid() {
-			slice.Set(reflect.Zero(slice.Type()))
-		}
-		return errReason
-	}
-
-	if slice.IsValid() {
-		slice.Index(sliceIndex).Set(v)
-	}
-
-	return ""
-}
-
-// plsFieldLoad first tries to converts v's value to a PLS, then v's addressed
-// value to a PLS. If neither succeeds, plsFieldLoad returns false for first return
-// value. Otherwise, the first return value will be true.
-// If v is successfully converted to a PLS, plsFieldLoad will then try to Load
-// the property p into v (by way of the PLS's Load method).
-//
-// If the field v has been flattened, the Property's name must be altered
-// before calling Load to reflect the field v.
-// For example, if our original field name was "A.B.C.D",
-// and at this point in iteration we had initialized the field
-// corresponding to "A" and have moved into the struct, so that now
-// v corresponds to the field named "B", then we want to let the
-// PLS handle this field (B)'s subfields ("C", "D"),
-// so we send the property to the PLS's Load, renamed to "C.D".
-//
-// If subfields are present, the field v has been flattened.
-func plsFieldLoad(v reflect.Value, p Property, subfields []string) (ok bool, err error) {
-	vpls, err := plsForLoad(v)
-	if err != nil {
-		return false, err
-	}
-
-	if vpls == nil {
-		return false, nil
-	}
-
-	// If Entity, load properties as well as key.
-	if e, ok := p.Value.(*Entity); ok {
-		err = loadEntity(vpls, e)
-		return true, err
-	}
-
-	// If flattened, we must alter the property's name to reflect
-	// the field v.
-	if len(subfields) > 0 {
-		p.Name = strings.Join(subfields, ".")
-	}
-
-	return true, vpls.Load([]Property{p})
-}
-
-// setVal sets 'v' to the value of the Property 'p'.
-func setVal(v reflect.Value, p Property) string {
-	pValue := p.Value
-	switch v.Kind() {
-	case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
-		x, ok := pValue.(int64)
-		if !ok && pValue != nil {
-			return typeMismatchReason(p, v)
-		}
-		if v.OverflowInt(x) {
-			return fmt.Sprintf("value %v overflows struct field of type %v", x, v.Type())
-		}
-		v.SetInt(x)
-	case reflect.Bool:
-		x, ok := pValue.(bool)
-		if !ok && pValue != nil {
-			return typeMismatchReason(p, v)
-		}
-		v.SetBool(x)
-	case reflect.String:
-		x, ok := pValue.(string)
-		if !ok && pValue != nil {
-			return typeMismatchReason(p, v)
-		}
-		v.SetString(x)
-	case reflect.Float32, reflect.Float64:
-		x, ok := pValue.(float64)
-		if !ok && pValue != nil {
-			return typeMismatchReason(p, v)
-		}
-		if v.OverflowFloat(x) {
-			return fmt.Sprintf("value %v overflows struct field of type %v", x, v.Type())
-		}
-		v.SetFloat(x)
-	case reflect.Ptr:
-		// v must be either a pointer to a Key or Entity.
-		if v.Type() != typeOfKeyPtr && v.Type().Elem().Kind() != reflect.Struct {
-			return typeMismatchReason(p, v)
-		}
-
-		if pValue == nil {
-			// If v is populated already, set it to nil.
-			if !v.IsNil() {
-				v.Set(reflect.New(v.Type()).Elem())
-			}
-			return ""
-		}
-
-		switch x := pValue.(type) {
-		case *Key:
-			if _, ok := v.Interface().(*Key); !ok {
-				return typeMismatchReason(p, v)
-			}
-			v.Set(reflect.ValueOf(x))
-		case *Entity:
-			if v.IsNil() {
-				v.Set(reflect.New(v.Type().Elem()))
-			}
-			err := loadEntity(v.Interface(), x)
-			if err != nil {
-				return err.Error()
-			}
-
-		default:
-			return typeMismatchReason(p, v)
-		}
-	case reflect.Struct:
-		switch v.Type() {
-		case typeOfTime:
-			x, ok := pValue.(time.Time)
-			if !ok && pValue != nil {
-				return typeMismatchReason(p, v)
-			}
-			v.Set(reflect.ValueOf(x))
-		case typeOfGeoPoint:
-			x, ok := pValue.(GeoPoint)
-			if !ok && pValue != nil {
-				return typeMismatchReason(p, v)
-			}
-			v.Set(reflect.ValueOf(x))
-		default:
-			ent, ok := pValue.(*Entity)
-			if !ok {
-				return typeMismatchReason(p, v)
-			}
-			err := loadEntity(v.Addr().Interface(), ent)
-			if err != nil {
-				return err.Error()
-			}
-		}
-	case reflect.Slice:
-		x, ok := pValue.([]byte)
-		if !ok && pValue != nil {
-			return typeMismatchReason(p, v)
-		}
-		if v.Type().Elem().Kind() != reflect.Uint8 {
-			return typeMismatchReason(p, v)
-		}
-		v.SetBytes(x)
-	default:
-		return typeMismatchReason(p, v)
-	}
-	return ""
-}
-
-// initField is similar to reflect's Value.FieldByIndex, in that it
-// returns the nested struct field corresponding to index, but it
-// initialises any nil pointers encountered when traversing the structure.
-func initField(val reflect.Value, index []int) reflect.Value {
-	for _, i := range index[:len(index)-1] {
-		val = val.Field(i)
-		if val.Kind() == reflect.Ptr {
-			if val.IsNil() {
-				val.Set(reflect.New(val.Type().Elem()))
-			}
-			val = val.Elem()
-		}
-	}
-	return val.Field(index[len(index)-1])
-}
-
-// loadEntityProto loads an EntityProto into PropertyLoadSaver or struct pointer.
-func loadEntityProto(dst interface{}, src *pb.Entity) error {
-	ent, err := protoToEntity(src)
-	if err != nil {
-		return err
-	}
-	return loadEntity(dst, ent)
-}
-
-func loadEntity(dst interface{}, ent *Entity) error {
-	if pls, ok := dst.(PropertyLoadSaver); ok {
-		err := pls.Load(ent.Properties)
-		if err != nil {
-			return err
-		}
-		if e, ok := dst.(KeyLoader); ok {
-			err = e.LoadKey(ent.Key)
-		}
-		return err
-	}
-	return loadEntityToStruct(dst, ent)
-}
-
-func loadEntityToStruct(dst interface{}, ent *Entity) error {
-	pls, err := newStructPLS(dst)
-	if err != nil {
-		return err
-	}
-	// Load properties.
-	err = pls.Load(ent.Properties)
-	if err != nil {
-		return err
-	}
-	// Load key.
-	keyField := pls.codec.Match(keyFieldName)
-	if keyField != nil && ent.Key != nil {
-		pls.v.FieldByIndex(keyField.Index).Set(reflect.ValueOf(ent.Key))
-	}
-
-	return nil
-}
-
-func (s structPLS) Load(props []Property) error {
-	var fieldName, errReason string
-	var l propertyLoader
-
-	prev := make(map[string]struct{})
-	for _, p := range props {
-		if errStr := l.load(s.codec, s.v, p, prev); errStr != "" {
-			// We don't return early, as we try to load as many properties as possible.
-			// It is valid to load an entity into a struct that cannot fully represent it.
-			// That case returns an error, but the caller is free to ignore it.
-			fieldName, errReason = p.Name, errStr
-		}
-	}
-	if errReason != "" {
-		return &ErrFieldMismatch{
-			StructType: s.v.Type(),
-			FieldName:  fieldName,
-			Reason:     errReason,
-		}
-	}
-	return nil
-}
-
-func protoToEntity(src *pb.Entity) (*Entity, error) {
-	props := make([]Property, 0, len(src.Properties))
-	for name, val := range src.Properties {
-		v, err := propToValue(val)
-		if err != nil {
-			return nil, err
-		}
-		props = append(props, Property{
-			Name:    name,
-			Value:   v,
-			NoIndex: val.ExcludeFromIndexes,
-		})
-	}
-	var key *Key
-	if src.Key != nil {
-		// Ignore any error, since nested entity values
-		// are allowed to have an invalid key.
-		key, _ = protoToKey(src.Key)
-	}
-
-	return &Entity{key, props}, nil
-}
-
-// propToValue returns a Go value that represents the PropertyValue. For
-// example, a TimestampValue becomes a time.Time.
-func propToValue(v *pb.Value) (interface{}, error) {
-	switch v := v.ValueType.(type) {
-	case *pb.Value_NullValue:
-		return nil, nil
-	case *pb.Value_BooleanValue:
-		return v.BooleanValue, nil
-	case *pb.Value_IntegerValue:
-		return v.IntegerValue, nil
-	case *pb.Value_DoubleValue:
-		return v.DoubleValue, nil
-	case *pb.Value_TimestampValue:
-		return time.Unix(v.TimestampValue.Seconds, int64(v.TimestampValue.Nanos)), nil
-	case *pb.Value_KeyValue:
-		return protoToKey(v.KeyValue)
-	case *pb.Value_StringValue:
-		return v.StringValue, nil
-	case *pb.Value_BlobValue:
-		return []byte(v.BlobValue), nil
-	case *pb.Value_GeoPointValue:
-		return GeoPoint{Lat: v.GeoPointValue.Latitude, Lng: v.GeoPointValue.Longitude}, nil
-	case *pb.Value_EntityValue:
-		return protoToEntity(v.EntityValue)
-	case *pb.Value_ArrayValue:
-		arr := make([]interface{}, 0, len(v.ArrayValue.Values))
-		for _, v := range v.ArrayValue.Values {
-			vv, err := propToValue(v)
-			if err != nil {
-				return nil, err
-			}
-			arr = append(arr, vv)
-		}
-		return arr, nil
-	default:
-		return nil, nil
-	}
-}

+ 0 - 342
vendor/cloud.google.com/go/datastore/prop.go

@@ -1,342 +0,0 @@
-// Copyright 2014 Google Inc. All Rights Reserved.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//      http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package datastore
-
-import (
-	"fmt"
-	"reflect"
-	"strings"
-	"unicode"
-
-	"cloud.google.com/go/internal/fields"
-)
-
-// Entities with more than this many indexed properties will not be saved.
-const maxIndexedProperties = 20000
-
-// []byte fields more than 1 megabyte long will not be loaded or saved.
-const maxBlobLen = 1 << 20
-
-// Property is a name/value pair plus some metadata. A datastore entity's
-// contents are loaded and saved as a sequence of Properties. Each property
-// name must be unique within an entity.
-type Property struct {
-	// Name is the property name.
-	Name string
-	// Value is the property value. The valid types are:
-	//	- int64
-	//	- bool
-	//	- string
-	//	- float64
-	//	- *Key
-	//	- time.Time
-	//	- GeoPoint
-	//	- []byte (up to 1 megabyte in length)
-	//	- *Entity (representing a nested struct)
-	// Value can also be:
-	//	- []interface{} where each element is one of the above types
-	// This set is smaller than the set of valid struct field types that the
-	// datastore can load and save. A Value's type must be explicitly on
-	// the list above; it is not sufficient for the underlying type to be
-	// on that list. For example, a Value of "type myInt64 int64" is
-	// invalid. Smaller-width integers and floats are also invalid. Again,
-	// this is more restrictive than the set of valid struct field types.
-	//
-	// A Value will have an opaque type when loading entities from an index,
-	// such as via a projection query. Load entities into a struct instead
-	// of a PropertyLoadSaver when using a projection query.
-	//
-	// A Value may also be the nil interface value; this is equivalent to
-	// Python's None but not directly representable by a Go struct. Loading
-	// a nil-valued property into a struct will set that field to the zero
-	// value.
-	Value interface{}
-	// NoIndex is whether the datastore cannot index this property.
-	// If NoIndex is set to false, []byte and string values are limited to
-	// 1500 bytes.
-	NoIndex bool
-}
-
-// An Entity is the value type for a nested struct.
-// This type is only used for a Property's Value.
-type Entity struct {
-	Key        *Key
-	Properties []Property
-}
-
-// PropertyLoadSaver can be converted from and to a slice of Properties.
-type PropertyLoadSaver interface {
-	Load([]Property) error
-	Save() ([]Property, error)
-}
-
-// KeyLoader can store a Key.
-type KeyLoader interface {
-	// PropertyLoadSaver is embedded because a KeyLoader
-	// must also always implement PropertyLoadSaver.
-	PropertyLoadSaver
-	LoadKey(k *Key) error
-}
-
-// PropertyList converts a []Property to implement PropertyLoadSaver.
-type PropertyList []Property
-
-var (
-	typeOfPropertyLoadSaver = reflect.TypeOf((*PropertyLoadSaver)(nil)).Elem()
-	typeOfPropertyList      = reflect.TypeOf(PropertyList(nil))
-)
-
-// Load loads all of the provided properties into l.
-// It does not first reset *l to an empty slice.
-func (l *PropertyList) Load(p []Property) error {
-	*l = append(*l, p...)
-	return nil
-}
-
-// Save saves all of l's properties as a slice of Properties.
-func (l *PropertyList) Save() ([]Property, error) {
-	return *l, nil
-}
-
-// validPropertyName returns whether name consists of one or more valid Go
-// identifiers joined by ".".
-func validPropertyName(name string) bool {
-	if name == "" {
-		return false
-	}
-	for _, s := range strings.Split(name, ".") {
-		if s == "" {
-			return false
-		}
-		first := true
-		for _, c := range s {
-			if first {
-				first = false
-				if c != '_' && !unicode.IsLetter(c) {
-					return false
-				}
-			} else {
-				if c != '_' && !unicode.IsLetter(c) && !unicode.IsDigit(c) {
-					return false
-				}
-			}
-		}
-	}
-	return true
-}
-
-// parseTag interprets datastore struct field tags
-func parseTag(t reflect.StructTag) (name string, keep bool, other interface{}, err error) {
-	s := t.Get("datastore")
-	parts := strings.Split(s, ",")
-	if parts[0] == "-" && len(parts) == 1 {
-		return "", false, nil, nil
-	}
-	if parts[0] != "" && !validPropertyName(parts[0]) {
-		err = fmt.Errorf("datastore: struct tag has invalid property name: %q", parts[0])
-		return "", false, nil, err
-	}
-
-	var opts saveOpts
-	if len(parts) > 1 {
-		for _, p := range parts[1:] {
-			switch p {
-			case "flatten":
-				opts.flatten = true
-			case "omitempty":
-				opts.omitEmpty = true
-			case "noindex":
-				opts.noIndex = true
-			default:
-				err = fmt.Errorf("datastore: struct tag has invalid option: %q", p)
-				return "", false, nil, err
-			}
-		}
-		other = opts
-	}
-	return parts[0], true, other, nil
-}
-
-func validateType(t reflect.Type) error {
-	if t.Kind() != reflect.Struct {
-		return fmt.Errorf("datastore: validate called with non-struct type %s", t)
-	}
-
-	return validateChildType(t, "", false, false, map[reflect.Type]bool{})
-}
-
-// validateChildType is a recursion helper func for validateType
-func validateChildType(t reflect.Type, fieldName string, flatten, prevSlice bool, prevTypes map[reflect.Type]bool) error {
-	if prevTypes[t] {
-		return nil
-	}
-	prevTypes[t] = true
-
-	switch t.Kind() {
-	case reflect.Slice:
-		if flatten && prevSlice {
-			return fmt.Errorf("datastore: flattening nested structs leads to a slice of slices: field %q", fieldName)
-		}
-		return validateChildType(t.Elem(), fieldName, flatten, true, prevTypes)
-	case reflect.Struct:
-		if t == typeOfTime || t == typeOfGeoPoint {
-			return nil
-		}
-
-		for i := 0; i < t.NumField(); i++ {
-			f := t.Field(i)
-
-			// If a named field is unexported, ignore it. An anonymous
-			// unexported field is processed, because it may contain
-			// exported fields, which are visible.
-			exported := (f.PkgPath == "")
-			if !exported && !f.Anonymous {
-				continue
-			}
-
-			_, keep, other, err := parseTag(f.Tag)
-			// Handle error from parseTag now instead of later (in cache.Fields call).
-			if err != nil {
-				return err
-			}
-			if !keep {
-				continue
-			}
-			if other != nil {
-				opts := other.(saveOpts)
-				flatten = flatten || opts.flatten
-			}
-			if err := validateChildType(f.Type, f.Name, flatten, prevSlice, prevTypes); err != nil {
-				return err
-			}
-		}
-	case reflect.Ptr:
-		if t == typeOfKeyPtr {
-			return nil
-		}
-		return validateChildType(t.Elem(), fieldName, flatten, prevSlice, prevTypes)
-	}
-	return nil
-}
-
-// isLeafType determines whether or not a type is a 'leaf type'
-// and should not be recursed into, but considered one field.
-func isLeafType(t reflect.Type) bool {
-	return t == typeOfTime || t == typeOfGeoPoint
-}
-
-// structCache collects the structs whose fields have already been calculated.
-var structCache = fields.NewCache(parseTag, validateType, isLeafType)
-
-// structPLS adapts a struct to be a PropertyLoadSaver.
-type structPLS struct {
-	v     reflect.Value
-	codec fields.List
-}
-
-// newStructPLS returns a structPLS, which implements the
-// PropertyLoadSaver interface, for the struct pointer p.
-func newStructPLS(p interface{}) (*structPLS, error) {
-	v := reflect.ValueOf(p)
-	if v.Kind() != reflect.Ptr || v.Elem().Kind() != reflect.Struct {
-		return nil, ErrInvalidEntityType
-	}
-	v = v.Elem()
-	f, err := structCache.Fields(v.Type())
-	if err != nil {
-		return nil, err
-	}
-	return &structPLS{v, f}, nil
-}
-
-// LoadStruct loads the properties from p to dst.
-// dst must be a struct pointer.
-//
-// The values of dst's unmatched struct fields are not modified,
-// and matching slice-typed fields are not reset before appending to
-// them. In particular, it is recommended to pass a pointer to a zero
-// valued struct on each LoadStruct call.
-func LoadStruct(dst interface{}, p []Property) error {
-	x, err := newStructPLS(dst)
-	if err != nil {
-		return err
-	}
-	return x.Load(p)
-}
-
-// SaveStruct returns the properties from src as a slice of Properties.
-// src must be a struct pointer.
-func SaveStruct(src interface{}) ([]Property, error) {
-	x, err := newStructPLS(src)
-	if err != nil {
-		return nil, err
-	}
-	return x.Save()
-}
-
-// plsForLoad tries to convert v to a PropertyLoadSaver.
-// If successful, plsForLoad returns a settable v as a PropertyLoadSaver.
-//
-// plsForLoad is intended to be used with nested struct fields which
-// may implement PropertyLoadSaver.
-//
-// v must be settable.
-func plsForLoad(v reflect.Value) (PropertyLoadSaver, error) {
-	var nilPtr bool
-	if v.Kind() == reflect.Ptr && v.IsNil() {
-		nilPtr = true
-		v.Set(reflect.New(v.Type().Elem()))
-	}
-
-	vpls, err := pls(v)
-	if nilPtr && (vpls == nil || err != nil) {
-		// unset v
-		v.Set(reflect.Zero(v.Type()))
-	}
-
-	return vpls, err
-}
-
-// plsForSave tries to convert v to a PropertyLoadSaver.
-// If successful, plsForSave returns v as a PropertyLoadSaver.
-//
-// plsForSave is intended to be used with nested struct fields which
-// may implement PropertyLoadSaver.
-//
-// v must be settable.
-func plsForSave(v reflect.Value) (PropertyLoadSaver, error) {
-	switch v.Kind() {
-	case reflect.Ptr, reflect.Slice, reflect.Map, reflect.Interface, reflect.Chan, reflect.Func:
-		// If v is nil, return early. v contains no data to save.
-		if v.IsNil() {
-			return nil, nil
-		}
-	}
-
-	return pls(v)
-}
-
-func pls(v reflect.Value) (PropertyLoadSaver, error) {
-	if v.Kind() != reflect.Ptr {
-		if _, ok := v.Interface().(PropertyLoadSaver); ok {
-			return nil, fmt.Errorf("datastore: PropertyLoadSaver methods must be implemented on a pointer to %T.", v.Interface())
-		}
-
-		v = v.Addr()
-	}
-
-	vpls, _ := v.Interface().(PropertyLoadSaver)
-	return vpls, nil
-}

+ 0 - 773
vendor/cloud.google.com/go/datastore/query.go

@@ -1,773 +0,0 @@
-// Copyright 2014 Google Inc. All Rights Reserved.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//      http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package datastore
-
-import (
-	"encoding/base64"
-	"errors"
-	"fmt"
-	"math"
-	"reflect"
-	"strconv"
-	"strings"
-
-	wrapperspb "github.com/golang/protobuf/ptypes/wrappers"
-	"golang.org/x/net/context"
-	"google.golang.org/api/iterator"
-	pb "google.golang.org/genproto/googleapis/datastore/v1"
-)
-
-type operator int
-
-const (
-	lessThan operator = iota + 1
-	lessEq
-	equal
-	greaterEq
-	greaterThan
-
-	keyFieldName = "__key__"
-)
-
-var operatorToProto = map[operator]pb.PropertyFilter_Operator{
-	lessThan:    pb.PropertyFilter_LESS_THAN,
-	lessEq:      pb.PropertyFilter_LESS_THAN_OR_EQUAL,
-	equal:       pb.PropertyFilter_EQUAL,
-	greaterEq:   pb.PropertyFilter_GREATER_THAN_OR_EQUAL,
-	greaterThan: pb.PropertyFilter_GREATER_THAN,
-}
-
-// filter is a conditional filter on query results.
-type filter struct {
-	FieldName string
-	Op        operator
-	Value     interface{}
-}
-
-type sortDirection bool
-
-const (
-	ascending  sortDirection = false
-	descending sortDirection = true
-)
-
-var sortDirectionToProto = map[sortDirection]pb.PropertyOrder_Direction{
-	ascending:  pb.PropertyOrder_ASCENDING,
-	descending: pb.PropertyOrder_DESCENDING,
-}
-
-// order is a sort order on query results.
-type order struct {
-	FieldName string
-	Direction sortDirection
-}
-
-// NewQuery creates a new Query for a specific entity kind.
-//
-// An empty kind means to return all entities, including entities created and
-// managed by other App Engine features, and is called a kindless query.
-// Kindless queries cannot include filters or sort orders on property values.
-func NewQuery(kind string) *Query {
-	return &Query{
-		kind:  kind,
-		limit: -1,
-	}
-}
-
-// Query represents a datastore query.
-type Query struct {
-	kind       string
-	ancestor   *Key
-	filter     []filter
-	order      []order
-	projection []string
-
-	distinct   bool
-	distinctOn []string
-	keysOnly   bool
-	eventual   bool
-	limit      int32
-	offset     int32
-	start      []byte
-	end        []byte
-
-	namespace string
-
-	trans *Transaction
-
-	err error
-}
-
-func (q *Query) clone() *Query {
-	x := *q
-	// Copy the contents of the slice-typed fields to a new backing store.
-	if len(q.filter) > 0 {
-		x.filter = make([]filter, len(q.filter))
-		copy(x.filter, q.filter)
-	}
-	if len(q.order) > 0 {
-		x.order = make([]order, len(q.order))
-		copy(x.order, q.order)
-	}
-	return &x
-}
-
-// Ancestor returns a derivative query with an ancestor filter.
-// The ancestor should not be nil.
-func (q *Query) Ancestor(ancestor *Key) *Query {
-	q = q.clone()
-	if ancestor == nil {
-		q.err = errors.New("datastore: nil query ancestor")
-		return q
-	}
-	q.ancestor = ancestor
-	return q
-}
-
-// EventualConsistency returns a derivative query that returns eventually
-// consistent results.
-// It only has an effect on ancestor queries.
-func (q *Query) EventualConsistency() *Query {
-	q = q.clone()
-	q.eventual = true
-	return q
-}
-
-// Namespace returns a derivative query that is associated with the given
-// namespace.
-//
-// A namespace may be used to partition data for multi-tenant applications.
-// For details, see https://cloud.google.com/datastore/docs/concepts/multitenancy.
-func (q *Query) Namespace(ns string) *Query {
-	q = q.clone()
-	q.namespace = ns
-	return q
-}
-
-// Transaction returns a derivative query that is associated with the given
-// transaction.
-//
-// All reads performed as part of the transaction will come from a single
-// consistent snapshot. Furthermore, if the transaction is set to a
-// serializable isolation level, another transaction cannot concurrently modify
-// the data that is read or modified by this transaction.
-func (q *Query) Transaction(t *Transaction) *Query {
-	q = q.clone()
-	q.trans = t
-	return q
-}
-
-// Filter returns a derivative query with a field-based filter.
-// The filterStr argument must be a field name followed by optional space,
-// followed by an operator, one of ">", "<", ">=", "<=", or "=".
-// Fields are compared against the provided value using the operator.
-// Multiple filters are AND'ed together.
-// Field names which contain spaces, quote marks, or operator characters
-// should be passed as quoted Go string literals as returned by strconv.Quote
-// or the fmt package's %q verb.
-func (q *Query) Filter(filterStr string, value interface{}) *Query {
-	q = q.clone()
-	filterStr = strings.TrimSpace(filterStr)
-	if filterStr == "" {
-		q.err = fmt.Errorf("datastore: invalid filter %q", filterStr)
-		return q
-	}
-	f := filter{
-		FieldName: strings.TrimRight(filterStr, " ><=!"),
-		Value:     value,
-	}
-	switch op := strings.TrimSpace(filterStr[len(f.FieldName):]); op {
-	case "<=":
-		f.Op = lessEq
-	case ">=":
-		f.Op = greaterEq
-	case "<":
-		f.Op = lessThan
-	case ">":
-		f.Op = greaterThan
-	case "=":
-		f.Op = equal
-	default:
-		q.err = fmt.Errorf("datastore: invalid operator %q in filter %q", op, filterStr)
-		return q
-	}
-	var err error
-	f.FieldName, err = unquote(f.FieldName)
-	if err != nil {
-		q.err = fmt.Errorf("datastore: invalid syntax for quoted field name %q", f.FieldName)
-		return q
-	}
-	q.filter = append(q.filter, f)
-	return q
-}
-
-// Order returns a derivative query with a field-based sort order. Orders are
-// applied in the order they are added. The default order is ascending; to sort
-// in descending order prefix the fieldName with a minus sign (-).
-// Field names which contain spaces, quote marks, or the minus sign
-// should be passed as quoted Go string literals as returned by strconv.Quote
-// or the fmt package's %q verb.
-func (q *Query) Order(fieldName string) *Query {
-	q = q.clone()
-	fieldName, dir := strings.TrimSpace(fieldName), ascending
-	if strings.HasPrefix(fieldName, "-") {
-		fieldName, dir = strings.TrimSpace(fieldName[1:]), descending
-	} else if strings.HasPrefix(fieldName, "+") {
-		q.err = fmt.Errorf("datastore: invalid order: %q", fieldName)
-		return q
-	}
-	fieldName, err := unquote(fieldName)
-	if err != nil {
-		q.err = fmt.Errorf("datastore: invalid syntax for quoted field name %q", fieldName)
-		return q
-	}
-	if fieldName == "" {
-		q.err = errors.New("datastore: empty order")
-		return q
-	}
-	q.order = append(q.order, order{
-		Direction: dir,
-		FieldName: fieldName,
-	})
-	return q
-}
-
-// unquote optionally interprets s as a double-quoted or backquoted Go
-// string literal if it begins with the relevant character.
-func unquote(s string) (string, error) {
-	if s == "" || (s[0] != '`' && s[0] != '"') {
-		return s, nil
-	}
-	return strconv.Unquote(s)
-}
-
-// Project returns a derivative query that yields only the given fields. It
-// cannot be used with KeysOnly.
-func (q *Query) Project(fieldNames ...string) *Query {
-	q = q.clone()
-	q.projection = append([]string(nil), fieldNames...)
-	return q
-}
-
-// Distinct returns a derivative query that yields de-duplicated entities with
-// respect to the set of projected fields. It is only used for projection
-// queries. Distinct cannot be used with DistinctOn.
-func (q *Query) Distinct() *Query {
-	q = q.clone()
-	q.distinct = true
-	return q
-}
-
-// DistinctOn returns a derivative query that yields de-duplicated entities with
-// respect to the set of the specified fields. It is only used for projection
-// queries. The field list should be a subset of the projected field list.
-// DistinctOn cannot be used with Distinct.
-func (q *Query) DistinctOn(fieldNames ...string) *Query {
-	q = q.clone()
-	q.distinctOn = fieldNames
-	return q
-}
-
-// KeysOnly returns a derivative query that yields only keys, not keys and
-// entities. It cannot be used with projection queries.
-func (q *Query) KeysOnly() *Query {
-	q = q.clone()
-	q.keysOnly = true
-	return q
-}
-
-// Limit returns a derivative query that has a limit on the number of results
-// returned. A negative value means unlimited.
-func (q *Query) Limit(limit int) *Query {
-	q = q.clone()
-	if limit < math.MinInt32 || limit > math.MaxInt32 {
-		q.err = errors.New("datastore: query limit overflow")
-		return q
-	}
-	q.limit = int32(limit)
-	return q
-}
-
-// Offset returns a derivative query that has an offset of how many keys to
-// skip over before returning results. A negative value is invalid.
-func (q *Query) Offset(offset int) *Query {
-	q = q.clone()
-	if offset < 0 {
-		q.err = errors.New("datastore: negative query offset")
-		return q
-	}
-	if offset > math.MaxInt32 {
-		q.err = errors.New("datastore: query offset overflow")
-		return q
-	}
-	q.offset = int32(offset)
-	return q
-}
-
-// Start returns a derivative query with the given start point.
-func (q *Query) Start(c Cursor) *Query {
-	q = q.clone()
-	q.start = c.cc
-	return q
-}
-
-// End returns a derivative query with the given end point.
-func (q *Query) End(c Cursor) *Query {
-	q = q.clone()
-	q.end = c.cc
-	return q
-}
-
-// toProto converts the query to a protocol buffer.
-func (q *Query) toProto(req *pb.RunQueryRequest) error {
-	if len(q.projection) != 0 && q.keysOnly {
-		return errors.New("datastore: query cannot both project and be keys-only")
-	}
-	if len(q.distinctOn) != 0 && q.distinct {
-		return errors.New("datastore: query cannot be both distinct and distinct-on")
-	}
-	dst := &pb.Query{}
-	if q.kind != "" {
-		dst.Kind = []*pb.KindExpression{{Name: q.kind}}
-	}
-	if q.projection != nil {
-		for _, propertyName := range q.projection {
-			dst.Projection = append(dst.Projection, &pb.Projection{Property: &pb.PropertyReference{Name: propertyName}})
-		}
-
-		for _, propertyName := range q.distinctOn {
-			dst.DistinctOn = append(dst.DistinctOn, &pb.PropertyReference{Name: propertyName})
-		}
-
-		if q.distinct {
-			for _, propertyName := range q.projection {
-				dst.DistinctOn = append(dst.DistinctOn, &pb.PropertyReference{Name: propertyName})
-			}
-		}
-	}
-	if q.keysOnly {
-		dst.Projection = []*pb.Projection{{Property: &pb.PropertyReference{Name: keyFieldName}}}
-	}
-
-	var filters []*pb.Filter
-	for _, qf := range q.filter {
-		if qf.FieldName == "" {
-			return errors.New("datastore: empty query filter field name")
-		}
-		v, err := interfaceToProto(reflect.ValueOf(qf.Value).Interface(), false)
-		if err != nil {
-			return fmt.Errorf("datastore: bad query filter value type: %v", err)
-		}
-		op, ok := operatorToProto[qf.Op]
-		if !ok {
-			return errors.New("datastore: unknown query filter operator")
-		}
-		xf := &pb.PropertyFilter{
-			Op:       op,
-			Property: &pb.PropertyReference{Name: qf.FieldName},
-			Value:    v,
-		}
-		filters = append(filters, &pb.Filter{
-			FilterType: &pb.Filter_PropertyFilter{PropertyFilter: xf},
-		})
-	}
-
-	if q.ancestor != nil {
-		filters = append(filters, &pb.Filter{
-			FilterType: &pb.Filter_PropertyFilter{PropertyFilter: &pb.PropertyFilter{
-				Property: &pb.PropertyReference{Name: keyFieldName},
-				Op:       pb.PropertyFilter_HAS_ANCESTOR,
-				Value:    &pb.Value{ValueType: &pb.Value_KeyValue{KeyValue: keyToProto(q.ancestor)}},
-			}}})
-	}
-
-	if len(filters) == 1 {
-		dst.Filter = filters[0]
-	} else if len(filters) > 1 {
-		dst.Filter = &pb.Filter{FilterType: &pb.Filter_CompositeFilter{CompositeFilter: &pb.CompositeFilter{
-			Op:      pb.CompositeFilter_AND,
-			Filters: filters,
-		}}}
-	}
-
-	for _, qo := range q.order {
-		if qo.FieldName == "" {
-			return errors.New("datastore: empty query order field name")
-		}
-		xo := &pb.PropertyOrder{
-			Property:  &pb.PropertyReference{Name: qo.FieldName},
-			Direction: sortDirectionToProto[qo.Direction],
-		}
-		dst.Order = append(dst.Order, xo)
-	}
-	if q.limit >= 0 {
-		dst.Limit = &wrapperspb.Int32Value{Value: q.limit}
-	}
-	dst.Offset = q.offset
-	dst.StartCursor = q.start
-	dst.EndCursor = q.end
-
-	if t := q.trans; t != nil {
-		if t.id == nil {
-			return errExpiredTransaction
-		}
-		if q.eventual {
-			return errors.New("datastore: cannot use EventualConsistency query in a transaction")
-		}
-		req.ReadOptions = &pb.ReadOptions{
-			ConsistencyType: &pb.ReadOptions_Transaction{Transaction: t.id},
-		}
-	}
-
-	if q.eventual {
-		req.ReadOptions = &pb.ReadOptions{ConsistencyType: &pb.ReadOptions_ReadConsistency_{ReadConsistency: pb.ReadOptions_EVENTUAL}}
-	}
-
-	req.QueryType = &pb.RunQueryRequest_Query{Query: dst}
-	return nil
-}
-
-// Count returns the number of results for the given query.
-//
-// The running time and number of API calls made by Count scale linearly with
-// with the sum of the query's offset and limit. Unless the result count is
-// expected to be small, it is best to specify a limit; otherwise Count will
-// continue until it finishes counting or the provided context expires.
-func (c *Client) Count(ctx context.Context, q *Query) (int, error) {
-	// Check that the query is well-formed.
-	if q.err != nil {
-		return 0, q.err
-	}
-
-	// Create a copy of the query, with keysOnly true (if we're not a projection,
-	// since the two are incompatible).
-	newQ := q.clone()
-	newQ.keysOnly = len(newQ.projection) == 0
-
-	// Create an iterator and use it to walk through the batches of results
-	// directly.
-	it := c.Run(ctx, newQ)
-	n := 0
-	for {
-		err := it.nextBatch()
-		if err == iterator.Done {
-			return n, nil
-		}
-		if err != nil {
-			return 0, err
-		}
-		n += len(it.results)
-	}
-}
-
-// GetAll runs the provided query in the given context and returns all keys
-// that match that query, as well as appending the values to dst.
-//
-// dst must have type *[]S or *[]*S or *[]P, for some struct type S or some non-
-// interface, non-pointer type P such that P or *P implements PropertyLoadSaver.
-//
-// As a special case, *PropertyList is an invalid type for dst, even though a
-// PropertyList is a slice of structs. It is treated as invalid to avoid being
-// mistakenly passed when *[]PropertyList was intended.
-//
-// The keys returned by GetAll will be in a 1-1 correspondence with the entities
-// added to dst.
-//
-// If q is a ``keys-only'' query, GetAll ignores dst and only returns the keys.
-//
-// The running time and number of API calls made by GetAll scale linearly with
-// with the sum of the query's offset and limit. Unless the result count is
-// expected to be small, it is best to specify a limit; otherwise GetAll will
-// continue until it finishes collecting results or the provided context
-// expires.
-func (c *Client) GetAll(ctx context.Context, q *Query, dst interface{}) ([]*Key, error) {
-	var (
-		dv               reflect.Value
-		mat              multiArgType
-		elemType         reflect.Type
-		errFieldMismatch error
-	)
-	if !q.keysOnly {
-		dv = reflect.ValueOf(dst)
-		if dv.Kind() != reflect.Ptr || dv.IsNil() {
-			return nil, ErrInvalidEntityType
-		}
-		dv = dv.Elem()
-		mat, elemType = checkMultiArg(dv)
-		if mat == multiArgTypeInvalid || mat == multiArgTypeInterface {
-			return nil, ErrInvalidEntityType
-		}
-	}
-
-	var keys []*Key
-	for t := c.Run(ctx, q); ; {
-		k, e, err := t.next()
-		if err == iterator.Done {
-			break
-		}
-		if err != nil {
-			return keys, err
-		}
-		if !q.keysOnly {
-			ev := reflect.New(elemType)
-			if elemType.Kind() == reflect.Map {
-				// This is a special case. The zero values of a map type are
-				// not immediately useful; they have to be make'd.
-				//
-				// Funcs and channels are similar, in that a zero value is not useful,
-				// but even a freshly make'd channel isn't useful: there's no fixed
-				// channel buffer size that is always going to be large enough, and
-				// there's no goroutine to drain the other end. Theoretically, these
-				// types could be supported, for example by sniffing for a constructor
-				// method or requiring prior registration, but for now it's not a
-				// frequent enough concern to be worth it. Programmers can work around
-				// it by explicitly using Iterator.Next instead of the Query.GetAll
-				// convenience method.
-				x := reflect.MakeMap(elemType)
-				ev.Elem().Set(x)
-			}
-			if err = loadEntityProto(ev.Interface(), e); err != nil {
-				if _, ok := err.(*ErrFieldMismatch); ok {
-					// We continue loading entities even in the face of field mismatch errors.
-					// If we encounter any other error, that other error is returned. Otherwise,
-					// an ErrFieldMismatch is returned.
-					errFieldMismatch = err
-				} else {
-					return keys, err
-				}
-			}
-			if mat != multiArgTypeStructPtr {
-				ev = ev.Elem()
-			}
-			dv.Set(reflect.Append(dv, ev))
-		}
-		keys = append(keys, k)
-	}
-	return keys, errFieldMismatch
-}
-
-// Run runs the given query in the given context.
-func (c *Client) Run(ctx context.Context, q *Query) *Iterator {
-	if q.err != nil {
-		return &Iterator{err: q.err}
-	}
-	t := &Iterator{
-		ctx:          ctx,
-		client:       c,
-		limit:        q.limit,
-		offset:       q.offset,
-		keysOnly:     q.keysOnly,
-		pageCursor:   q.start,
-		entityCursor: q.start,
-		req: &pb.RunQueryRequest{
-			ProjectId: c.dataset,
-		},
-	}
-	if q.namespace != "" {
-		t.req.PartitionId = &pb.PartitionId{
-			NamespaceId: q.namespace,
-		}
-	}
-
-	if err := q.toProto(t.req); err != nil {
-		t.err = err
-	}
-	return t
-}
-
-// Iterator is the result of running a query.
-type Iterator struct {
-	ctx    context.Context
-	client *Client
-	err    error
-
-	// results is the list of EntityResults still to be iterated over from the
-	// most recent API call. It will be nil if no requests have yet been issued.
-	results []*pb.EntityResult
-	// req is the request to send. It may be modified and used multiple times.
-	req *pb.RunQueryRequest
-
-	// limit is the limit on the number of results this iterator should return.
-	// The zero value is used to prevent further fetches from the server.
-	// A negative value means unlimited.
-	limit int32
-	// offset is the number of results that still need to be skipped.
-	offset int32
-	// keysOnly records whether the query was keys-only (skip entity loading).
-	keysOnly bool
-
-	// pageCursor is the compiled cursor for the next batch/page of result.
-	// TODO(djd): Can we delete this in favour of paging with the last
-	// entityCursor from each batch?
-	pageCursor []byte
-	// entityCursor is the compiled cursor of the next result.
-	entityCursor []byte
-}
-
-// Next returns the key of the next result. When there are no more results,
-// iterator.Done is returned as the error.
-//
-// If the query is not keys only and dst is non-nil, it also loads the entity
-// stored for that key into the struct pointer or PropertyLoadSaver dst, with
-// the same semantics and possible errors as for the Get function.
-func (t *Iterator) Next(dst interface{}) (*Key, error) {
-	k, e, err := t.next()
-	if err != nil {
-		return nil, err
-	}
-	if dst != nil && !t.keysOnly {
-		err = loadEntityProto(dst, e)
-	}
-	return k, err
-}
-
-func (t *Iterator) next() (*Key, *pb.Entity, error) {
-	// Fetch additional batches while there are no more results.
-	for t.err == nil && len(t.results) == 0 {
-		t.err = t.nextBatch()
-	}
-	if t.err != nil {
-		return nil, nil, t.err
-	}
-
-	// Extract the next result, update cursors, and parse the entity's key.
-	e := t.results[0]
-	t.results = t.results[1:]
-	t.entityCursor = e.Cursor
-	if len(t.results) == 0 {
-		t.entityCursor = t.pageCursor // At the end of the batch.
-	}
-	if e.Entity.Key == nil {
-		return nil, nil, errors.New("datastore: internal error: server did not return a key")
-	}
-	k, err := protoToKey(e.Entity.Key)
-	if err != nil || k.Incomplete() {
-		return nil, nil, errors.New("datastore: internal error: server returned an invalid key")
-	}
-
-	return k, e.Entity, nil
-}
-
-// nextBatch makes a single call to the server for a batch of results.
-func (t *Iterator) nextBatch() error {
-	if t.limit == 0 {
-		return iterator.Done // Short-circuits the zero-item response.
-	}
-
-	// Adjust the query with the latest start cursor, limit and offset.
-	q := t.req.GetQuery()
-	q.StartCursor = t.pageCursor
-	q.Offset = t.offset
-	if t.limit >= 0 {
-		q.Limit = &wrapperspb.Int32Value{Value: t.limit}
-	} else {
-		q.Limit = nil
-	}
-
-	// Run the query.
-	resp, err := t.client.client.RunQuery(t.ctx, t.req)
-	if err != nil {
-		return err
-	}
-
-	// Adjust any offset from skipped results.
-	skip := resp.Batch.SkippedResults
-	if skip < 0 {
-		return errors.New("datastore: internal error: negative number of skipped_results")
-	}
-	t.offset -= skip
-	if t.offset < 0 {
-		return errors.New("datastore: internal error: query skipped too many results")
-	}
-	if t.offset > 0 && len(resp.Batch.EntityResults) > 0 {
-		return errors.New("datastore: internal error: query returned results before requested offset")
-	}
-
-	// Adjust the limit.
-	if t.limit >= 0 {
-		t.limit -= int32(len(resp.Batch.EntityResults))
-		if t.limit < 0 {
-			return errors.New("datastore: internal error: query returned more results than the limit")
-		}
-	}
-
-	// If there are no more results available, set limit to zero to prevent
-	// further fetches. Otherwise, check that there is a next page cursor available.
-	if resp.Batch.MoreResults != pb.QueryResultBatch_NOT_FINISHED {
-		t.limit = 0
-	} else if resp.Batch.EndCursor == nil {
-		return errors.New("datastore: internal error: server did not return a cursor")
-	}
-
-	// Update cursors.
-	// If any results were skipped, use the SkippedCursor as the next entity cursor.
-	if skip > 0 {
-		t.entityCursor = resp.Batch.SkippedCursor
-	} else {
-		t.entityCursor = q.StartCursor
-	}
-	t.pageCursor = resp.Batch.EndCursor
-
-	t.results = resp.Batch.EntityResults
-	return nil
-}
-
-// Cursor returns a cursor for the iterator's current location.
-func (t *Iterator) Cursor() (Cursor, error) {
-	// If there is still an offset, we need to the skip those results first.
-	for t.err == nil && t.offset > 0 {
-		t.err = t.nextBatch()
-	}
-
-	if t.err != nil && t.err != iterator.Done {
-		return Cursor{}, t.err
-	}
-
-	return Cursor{t.entityCursor}, nil
-}
-
-// Cursor is an iterator's position. It can be converted to and from an opaque
-// string. A cursor can be used from different HTTP requests, but only with a
-// query with the same kind, ancestor, filter and order constraints.
-//
-// The zero Cursor can be used to indicate that there is no start and/or end
-// constraint for a query.
-type Cursor struct {
-	cc []byte
-}
-
-// String returns a base-64 string representation of a cursor.
-func (c Cursor) String() string {
-	if c.cc == nil {
-		return ""
-	}
-
-	return strings.TrimRight(base64.URLEncoding.EncodeToString(c.cc), "=")
-}
-
-// Decode decodes a cursor from its base-64 string representation.
-func DecodeCursor(s string) (Cursor, error) {
-	if s == "" {
-		return Cursor{}, nil
-	}
-	if n := len(s) % 4; n != 0 {
-		s += strings.Repeat("=", 4-n)
-	}
-	b, err := base64.URLEncoding.DecodeString(s)
-	if err != nil {
-		return Cursor{}, err
-	}
-	return Cursor{b}, nil
-}

+ 0 - 425
vendor/cloud.google.com/go/datastore/save.go

@@ -1,425 +0,0 @@
-// Copyright 4 Google Inc. All Rights Reserved.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//      http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package datastore
-
-import (
-	"errors"
-	"fmt"
-	"reflect"
-	"time"
-	"unicode/utf8"
-
-	timepb "github.com/golang/protobuf/ptypes/timestamp"
-	pb "google.golang.org/genproto/googleapis/datastore/v1"
-	llpb "google.golang.org/genproto/googleapis/type/latlng"
-)
-
-type saveOpts struct {
-	noIndex   bool
-	flatten   bool
-	omitEmpty bool
-}
-
-// saveEntity saves an EntityProto into a PropertyLoadSaver or struct pointer.
-func saveEntity(key *Key, src interface{}) (*pb.Entity, error) {
-	var err error
-	var props []Property
-	if e, ok := src.(PropertyLoadSaver); ok {
-		props, err = e.Save()
-	} else {
-		props, err = SaveStruct(src)
-	}
-	if err != nil {
-		return nil, err
-	}
-	return propertiesToProto(key, props)
-}
-
-// TODO(djd): Convert this and below to return ([]Property, error).
-func saveStructProperty(props *[]Property, name string, opts saveOpts, v reflect.Value) error {
-	p := Property{
-		Name:    name,
-		NoIndex: opts.noIndex,
-	}
-
-	if opts.omitEmpty && isEmptyValue(v) {
-		return nil
-	}
-
-	// First check if field type implements PLS. If so, use PLS to
-	// save.
-	ok, err := plsFieldSave(props, p, name, opts, v)
-	if err != nil {
-		return err
-	}
-	if ok {
-		return nil
-	}
-
-	switch x := v.Interface().(type) {
-	case *Key, time.Time, GeoPoint:
-		p.Value = x
-	default:
-		switch v.Kind() {
-		case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
-			p.Value = v.Int()
-		case reflect.Bool:
-			p.Value = v.Bool()
-		case reflect.String:
-			p.Value = v.String()
-		case reflect.Float32, reflect.Float64:
-			p.Value = v.Float()
-		case reflect.Slice:
-			if v.Type().Elem().Kind() == reflect.Uint8 {
-				p.Value = v.Bytes()
-			} else {
-				return saveSliceProperty(props, name, opts, v)
-			}
-		case reflect.Ptr:
-			if v.Type().Elem().Kind() != reflect.Struct {
-				return fmt.Errorf("datastore: unsupported struct field type: %s", v.Type())
-			}
-			if v.IsNil() {
-				return nil
-			}
-			v = v.Elem()
-			fallthrough
-		case reflect.Struct:
-			if !v.CanAddr() {
-				return fmt.Errorf("datastore: unsupported struct field: value is unaddressable")
-			}
-			vi := v.Addr().Interface()
-
-			sub, err := newStructPLS(vi)
-			if err != nil {
-				return fmt.Errorf("datastore: unsupported struct field: %v", err)
-			}
-
-			if opts.flatten {
-				return sub.save(props, opts, name+".")
-			}
-
-			var subProps []Property
-			err = sub.save(&subProps, opts, "")
-			if err != nil {
-				return err
-			}
-			subKey, err := sub.key(v)
-			if err != nil {
-				return err
-			}
-
-			p.Value = &Entity{
-				Key:        subKey,
-				Properties: subProps,
-			}
-		}
-	}
-	if p.Value == nil {
-		return fmt.Errorf("datastore: unsupported struct field type: %v", v.Type())
-	}
-	*props = append(*props, p)
-	return nil
-}
-
-// plsFieldSave first tries to converts v's value to a PLS, then v's addressed
-// value to a PLS. If neither succeeds, plsFieldSave returns false for first return
-// value.
-// If v is successfully converted to a PLS, plsFieldSave will then add the
-// Value to property p by way of the PLS's Save method, and append it to props.
-//
-// If the flatten option is present in opts, name must be prepended to each property's
-// name before it is appended to props. Eg. if name were "A" and a subproperty's name
-// were "B", the resultant name of the property to be appended to props would be "A.B".
-func plsFieldSave(props *[]Property, p Property, name string, opts saveOpts, v reflect.Value) (ok bool, err error) {
-	vpls, err := plsForSave(v)
-	if err != nil {
-		return false, err
-	}
-
-	if vpls == nil {
-		return false, nil
-	}
-
-	subProps, err := vpls.Save()
-	if err != nil {
-		return true, err
-	}
-
-	if opts.flatten {
-		for _, subp := range subProps {
-			subp.Name = name + "." + subp.Name
-			*props = append(*props, subp)
-		}
-		return true, nil
-	}
-
-	p.Value = &Entity{Properties: subProps}
-	*props = append(*props, p)
-
-	return true, nil
-}
-
-// key extracts the *Key struct field from struct v based on the structCodec of s.
-func (s structPLS) key(v reflect.Value) (*Key, error) {
-	if v.Kind() != reflect.Struct {
-		return nil, errors.New("datastore: cannot save key of non-struct type")
-	}
-
-	keyField := s.codec.Match(keyFieldName)
-
-	if keyField == nil {
-		return nil, nil
-	}
-
-	f := v.FieldByIndex(keyField.Index)
-	k, ok := f.Interface().(*Key)
-	if !ok {
-		return nil, fmt.Errorf("datastore: %s field on struct %T is not a *datastore.Key", keyFieldName, v.Interface())
-	}
-
-	return k, nil
-}
-
-func saveSliceProperty(props *[]Property, name string, opts saveOpts, v reflect.Value) error {
-	// Easy case: if the slice is empty, we're done.
-	if v.Len() == 0 {
-		return nil
-	}
-	// Work out the properties generated by the first element in the slice. This will
-	// usually be a single property, but will be more if this is a slice of structs.
-	var headProps []Property
-	if err := saveStructProperty(&headProps, name, opts, v.Index(0)); err != nil {
-		return err
-	}
-
-	// Convert the first element's properties into slice properties, and
-	// keep track of the values in a map.
-	values := make(map[string][]interface{}, len(headProps))
-	for _, p := range headProps {
-		values[p.Name] = append(make([]interface{}, 0, v.Len()), p.Value)
-	}
-
-	// Find the elements for the subsequent elements.
-	for i := 1; i < v.Len(); i++ {
-		elemProps := make([]Property, 0, len(headProps))
-		if err := saveStructProperty(&elemProps, name, opts, v.Index(i)); err != nil {
-			return err
-		}
-		for _, p := range elemProps {
-			v, ok := values[p.Name]
-			if !ok {
-				return fmt.Errorf("datastore: unexpected property %q in elem %d of slice", p.Name, i)
-			}
-			values[p.Name] = append(v, p.Value)
-		}
-	}
-
-	// Convert to the final properties.
-	for _, p := range headProps {
-		p.Value = values[p.Name]
-		*props = append(*props, p)
-	}
-	return nil
-}
-
-func (s structPLS) Save() ([]Property, error) {
-	var props []Property
-	if err := s.save(&props, saveOpts{}, ""); err != nil {
-		return nil, err
-	}
-	return props, nil
-}
-
-func (s structPLS) save(props *[]Property, opts saveOpts, prefix string) error {
-	for _, f := range s.codec {
-		name := prefix + f.Name
-		v := getField(s.v, f.Index)
-		if !v.IsValid() || !v.CanSet() {
-			continue
-		}
-
-		var tagOpts saveOpts
-		if f.ParsedTag != nil {
-			tagOpts = f.ParsedTag.(saveOpts)
-		}
-
-		var opts1 saveOpts
-		opts1.noIndex = opts.noIndex || tagOpts.noIndex
-		opts1.flatten = opts.flatten || tagOpts.flatten
-		opts1.omitEmpty = tagOpts.omitEmpty // don't propagate
-		if err := saveStructProperty(props, name, opts1, v); err != nil {
-			return err
-		}
-	}
-	return nil
-}
-
-// getField returns the field from v at the given index path.
-// If it encounters a nil-valued field in the path, getField
-// stops and returns a zero-valued reflect.Value, preventing the
-// panic that would have been caused by reflect's FieldByIndex.
-func getField(v reflect.Value, index []int) reflect.Value {
-	var zero reflect.Value
-	if v.Type().Kind() != reflect.Struct {
-		return zero
-	}
-
-	for _, i := range index {
-		if v.Kind() == reflect.Ptr && v.Type().Elem().Kind() == reflect.Struct {
-			if v.IsNil() {
-				return zero
-			}
-			v = v.Elem()
-		}
-		v = v.Field(i)
-	}
-	return v
-}
-
-func propertiesToProto(key *Key, props []Property) (*pb.Entity, error) {
-	e := &pb.Entity{
-		Key:        keyToProto(key),
-		Properties: map[string]*pb.Value{},
-	}
-	indexedProps := 0
-	for _, p := range props {
-		// Do not send a Key value a a field to datastore.
-		if p.Name == keyFieldName {
-			continue
-		}
-
-		val, err := interfaceToProto(p.Value, p.NoIndex)
-		if err != nil {
-			return nil, fmt.Errorf("datastore: %v for a Property with Name %q", err, p.Name)
-		}
-		if !p.NoIndex {
-			rVal := reflect.ValueOf(p.Value)
-			if rVal.Kind() == reflect.Slice && rVal.Type().Elem().Kind() != reflect.Uint8 {
-				indexedProps += rVal.Len()
-			} else {
-				indexedProps++
-			}
-		}
-		if indexedProps > maxIndexedProperties {
-			return nil, errors.New("datastore: too many indexed properties")
-		}
-
-		if _, ok := e.Properties[p.Name]; ok {
-			return nil, fmt.Errorf("datastore: duplicate Property with Name %q", p.Name)
-		}
-		e.Properties[p.Name] = val
-	}
-	return e, nil
-}
-
-func interfaceToProto(iv interface{}, noIndex bool) (*pb.Value, error) {
-	val := &pb.Value{ExcludeFromIndexes: noIndex}
-	switch v := iv.(type) {
-	case int:
-		val.ValueType = &pb.Value_IntegerValue{IntegerValue: int64(v)}
-	case int32:
-		val.ValueType = &pb.Value_IntegerValue{IntegerValue: int64(v)}
-	case int64:
-		val.ValueType = &pb.Value_IntegerValue{IntegerValue: v}
-	case bool:
-		val.ValueType = &pb.Value_BooleanValue{BooleanValue: v}
-	case string:
-		if len(v) > 1500 && !noIndex {
-			return nil, errors.New("string property too long to index")
-		}
-		if !utf8.ValidString(v) {
-			return nil, fmt.Errorf("string is not valid utf8: %q", v)
-		}
-		val.ValueType = &pb.Value_StringValue{StringValue: v}
-	case float32:
-		val.ValueType = &pb.Value_DoubleValue{DoubleValue: float64(v)}
-	case float64:
-		val.ValueType = &pb.Value_DoubleValue{DoubleValue: v}
-	case *Key:
-		if v == nil {
-			val.ValueType = &pb.Value_NullValue{}
-		} else {
-			val.ValueType = &pb.Value_KeyValue{KeyValue: keyToProto(v)}
-		}
-	case GeoPoint:
-		if !v.Valid() {
-			return nil, errors.New("invalid GeoPoint value")
-		}
-		val.ValueType = &pb.Value_GeoPointValue{GeoPointValue: &llpb.LatLng{
-			Latitude:  v.Lat,
-			Longitude: v.Lng,
-		}}
-	case time.Time:
-		if v.Before(minTime) || v.After(maxTime) {
-			return nil, errors.New("time value out of range")
-		}
-		val.ValueType = &pb.Value_TimestampValue{TimestampValue: &timepb.Timestamp{
-			Seconds: v.Unix(),
-			Nanos:   int32(v.Nanosecond()),
-		}}
-	case []byte:
-		if len(v) > 1500 && !noIndex {
-			return nil, errors.New("[]byte property too long to index")
-		}
-		val.ValueType = &pb.Value_BlobValue{BlobValue: v}
-	case *Entity:
-		e, err := propertiesToProto(v.Key, v.Properties)
-		if err != nil {
-			return nil, err
-		}
-		val.ValueType = &pb.Value_EntityValue{EntityValue: e}
-	case []interface{}:
-		arr := make([]*pb.Value, 0, len(v))
-		for i, v := range v {
-			elem, err := interfaceToProto(v, noIndex)
-			if err != nil {
-				return nil, fmt.Errorf("%v at index %d", err, i)
-			}
-			arr = append(arr, elem)
-		}
-		val.ValueType = &pb.Value_ArrayValue{ArrayValue: &pb.ArrayValue{Values: arr}}
-		// ArrayValues have ExcludeFromIndexes set on the individual items, rather
-		// than the top-level value.
-		val.ExcludeFromIndexes = false
-	default:
-		if iv != nil {
-			return nil, fmt.Errorf("invalid Value type %t", iv)
-		}
-		val.ValueType = &pb.Value_NullValue{}
-	}
-	// TODO(jbd): Support EntityValue.
-	return val, nil
-}
-
-// isEmptyValue is taken from the encoding/json package in the
-// standard library.
-func isEmptyValue(v reflect.Value) bool {
-	switch v.Kind() {
-	case reflect.Array, reflect.Map, reflect.Slice, reflect.String:
-		return v.Len() == 0
-	case reflect.Bool:
-		return !v.Bool()
-	case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
-		return v.Int() == 0
-	case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
-		return v.Uint() == 0
-	case reflect.Float32, reflect.Float64:
-		return v.Float() == 0
-	case reflect.Interface, reflect.Ptr:
-		return v.IsNil()
-	}
-	return false
-}

+ 0 - 41
vendor/cloud.google.com/go/datastore/testdata/index.yaml

@@ -1,41 +0,0 @@
-indexes:
-
-- kind: SQChild
-  ancestor: yes
-  properties:
-  - name: T
-  - name: I
-
-- kind: SQChild
-  ancestor: yes
-  properties:
-  - name: T
-  - name: I
-    direction: desc
-
-- kind: SQChild
-  ancestor: yes
-  properties:
-  - name: I
-  - name: T
-  - name: U
-
-- kind: SQChild
-  ancestor: yes
-  properties:
-  - name: I
-  - name: T
-  - name: U
-
-- kind: SQChild
-  ancestor: yes
-  properties:
-  - name: T
-  - name: J
-
-- kind: SQChild
-  ancestor: yes
-  properties:
-  - name: T
-  - name: J
-  - name: U

+ 0 - 36
vendor/cloud.google.com/go/datastore/time.go

@@ -1,36 +0,0 @@
-// Copyright 2014 Google Inc. All Rights Reserved.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//      http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package datastore
-
-import (
-	"math"
-	"time"
-)
-
-var (
-	minTime = time.Unix(int64(math.MinInt64)/1e6, (int64(math.MinInt64)%1e6)*1e3)
-	maxTime = time.Unix(int64(math.MaxInt64)/1e6, (int64(math.MaxInt64)%1e6)*1e3)
-)
-
-func toUnixMicro(t time.Time) int64 {
-	// We cannot use t.UnixNano() / 1e3 because we want to handle times more than
-	// 2^63 nanoseconds (which is about 292 years) away from 1970, and those cannot
-	// be represented in the numerator of a single int64 divide.
-	return t.Unix()*1e6 + int64(t.Nanosecond()/1e3)
-}
-
-func fromUnixMicro(t int64) time.Time {
-	return time.Unix(t/1e6, (t%1e6)*1e3)
-}

+ 0 - 310
vendor/cloud.google.com/go/datastore/transaction.go

@@ -1,310 +0,0 @@
-// Copyright 2014 Google Inc. All Rights Reserved.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//      http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package datastore
-
-import (
-	"errors"
-
-	"golang.org/x/net/context"
-	"google.golang.org/grpc"
-	"google.golang.org/grpc/codes"
-
-	pb "google.golang.org/genproto/googleapis/datastore/v1"
-)
-
-// ErrConcurrentTransaction is returned when a transaction is rolled back due
-// to a conflict with a concurrent transaction.
-var ErrConcurrentTransaction = errors.New("datastore: concurrent transaction")
-
-var errExpiredTransaction = errors.New("datastore: transaction expired")
-
-type transactionSettings struct {
-	attempts int
-}
-
-// newTransactionSettings creates a transactionSettings with a given TransactionOption slice.
-// Unconfigured options will be set to default values.
-func newTransactionSettings(opts []TransactionOption) *transactionSettings {
-	s := &transactionSettings{attempts: 3}
-	for _, o := range opts {
-		o.apply(s)
-	}
-	return s
-}
-
-// TransactionOption configures the way a transaction is executed.
-type TransactionOption interface {
-	apply(*transactionSettings)
-}
-
-// MaxAttempts returns a TransactionOption that overrides the default 3 attempt times.
-func MaxAttempts(attempts int) TransactionOption {
-	return maxAttempts(attempts)
-}
-
-type maxAttempts int
-
-func (w maxAttempts) apply(s *transactionSettings) {
-	if w > 0 {
-		s.attempts = int(w)
-	}
-}
-
-// Transaction represents a set of datastore operations to be committed atomically.
-//
-// Operations are enqueued by calling the Put and Delete methods on Transaction
-// (or their Multi-equivalents).  These operations are only committed when the
-// Commit method is invoked. To ensure consistency, reads must be performed by
-// using Transaction's Get method or by using the Transaction method when
-// building a query.
-//
-// A Transaction must be committed or rolled back exactly once.
-type Transaction struct {
-	id        []byte
-	client    *Client
-	ctx       context.Context
-	mutations []*pb.Mutation      // The mutations to apply.
-	pending   map[int]*PendingKey // Map from mutation index to incomplete keys pending transaction completion.
-}
-
-// NewTransaction starts a new transaction.
-func (c *Client) NewTransaction(ctx context.Context, opts ...TransactionOption) (*Transaction, error) {
-	for _, o := range opts {
-		if _, ok := o.(maxAttempts); ok {
-			return nil, errors.New("datastore: NewTransaction does not accept MaxAttempts option")
-		}
-	}
-	req := &pb.BeginTransactionRequest{
-		ProjectId: c.dataset,
-	}
-	resp, err := c.client.BeginTransaction(ctx, req)
-	if err != nil {
-		return nil, err
-	}
-
-	return &Transaction{
-		id:        resp.Transaction,
-		ctx:       ctx,
-		client:    c,
-		mutations: nil,
-		pending:   make(map[int]*PendingKey),
-	}, nil
-}
-
-// RunInTransaction runs f in a transaction. f is invoked with a Transaction
-// that f should use for all the transaction's datastore operations.
-//
-// f must not call Commit or Rollback on the provided Transaction.
-//
-// If f returns nil, RunInTransaction commits the transaction,
-// returning the Commit and a nil error if it succeeds. If the commit fails due
-// to a conflicting transaction, RunInTransaction retries f with a new
-// Transaction. It gives up and returns ErrConcurrentTransaction after three
-// failed attempts (or as configured with MaxAttempts).
-//
-// If f returns non-nil, then the transaction will be rolled back and
-// RunInTransaction will return the same error. The function f is not retried.
-//
-// Note that when f returns, the transaction is not committed. Calling code
-// must not assume that any of f's changes have been committed until
-// RunInTransaction returns nil.
-//
-// Since f may be called multiple times, f should usually be idempotent – that
-// is, it should have the same result when called multiple times. Note that
-// Transaction.Get will append when unmarshalling slice fields, so it is not
-// necessarily idempotent.
-func (c *Client) RunInTransaction(ctx context.Context, f func(tx *Transaction) error, opts ...TransactionOption) (*Commit, error) {
-	settings := newTransactionSettings(opts)
-	for n := 0; n < settings.attempts; n++ {
-		tx, err := c.NewTransaction(ctx)
-		if err != nil {
-			return nil, err
-		}
-		if err := f(tx); err != nil {
-			tx.Rollback()
-			return nil, err
-		}
-		if cmt, err := tx.Commit(); err != ErrConcurrentTransaction {
-			return cmt, err
-		}
-	}
-	return nil, ErrConcurrentTransaction
-}
-
-// Commit applies the enqueued operations atomically.
-func (t *Transaction) Commit() (*Commit, error) {
-	if t.id == nil {
-		return nil, errExpiredTransaction
-	}
-	req := &pb.CommitRequest{
-		ProjectId:           t.client.dataset,
-		TransactionSelector: &pb.CommitRequest_Transaction{Transaction: t.id},
-		Mutations:           t.mutations,
-		Mode:                pb.CommitRequest_TRANSACTIONAL,
-	}
-	t.id = nil
-	resp, err := t.client.client.Commit(t.ctx, req)
-	if err != nil {
-		if grpc.Code(err) == codes.Aborted {
-			return nil, ErrConcurrentTransaction
-		}
-		return nil, err
-	}
-
-	// Copy any newly minted keys into the returned keys.
-	commit := &Commit{}
-	for i, p := range t.pending {
-		if i >= len(resp.MutationResults) || resp.MutationResults[i].Key == nil {
-			return nil, errors.New("datastore: internal error: server returned the wrong mutation results")
-		}
-		key, err := protoToKey(resp.MutationResults[i].Key)
-		if err != nil {
-			return nil, errors.New("datastore: internal error: server returned an invalid key")
-		}
-		p.key = key
-		p.commit = commit
-	}
-
-	return commit, nil
-}
-
-// Rollback abandons a pending transaction.
-func (t *Transaction) Rollback() error {
-	if t.id == nil {
-		return errExpiredTransaction
-	}
-	id := t.id
-	t.id = nil
-	_, err := t.client.client.Rollback(t.ctx, &pb.RollbackRequest{
-		ProjectId:   t.client.dataset,
-		Transaction: id,
-	})
-	return err
-}
-
-// Get is the transaction-specific version of the package function Get.
-// All reads performed during the transaction will come from a single consistent
-// snapshot. Furthermore, if the transaction is set to a serializable isolation
-// level, another transaction cannot concurrently modify the data that is read
-// or modified by this transaction.
-func (t *Transaction) Get(key *Key, dst interface{}) error {
-	opts := &pb.ReadOptions{
-		ConsistencyType: &pb.ReadOptions_Transaction{Transaction: t.id},
-	}
-	err := t.client.get(t.ctx, []*Key{key}, []interface{}{dst}, opts)
-	if me, ok := err.(MultiError); ok {
-		return me[0]
-	}
-	return err
-}
-
-// GetMulti is a batch version of Get.
-func (t *Transaction) GetMulti(keys []*Key, dst interface{}) error {
-	if t.id == nil {
-		return errExpiredTransaction
-	}
-	opts := &pb.ReadOptions{
-		ConsistencyType: &pb.ReadOptions_Transaction{Transaction: t.id},
-	}
-	return t.client.get(t.ctx, keys, dst, opts)
-}
-
-// Put is the transaction-specific version of the package function Put.
-//
-// Put returns a PendingKey which can be resolved into a Key using the
-// return value from a successful Commit. If key is an incomplete key, the
-// returned pending key will resolve to a unique key generated by the
-// datastore.
-func (t *Transaction) Put(key *Key, src interface{}) (*PendingKey, error) {
-	h, err := t.PutMulti([]*Key{key}, []interface{}{src})
-	if err != nil {
-		if me, ok := err.(MultiError); ok {
-			return nil, me[0]
-		}
-		return nil, err
-	}
-	return h[0], nil
-}
-
-// PutMulti is a batch version of Put. One PendingKey is returned for each
-// element of src in the same order.
-func (t *Transaction) PutMulti(keys []*Key, src interface{}) ([]*PendingKey, error) {
-	if t.id == nil {
-		return nil, errExpiredTransaction
-	}
-	mutations, err := putMutations(keys, src)
-	if err != nil {
-		return nil, err
-	}
-	origin := len(t.mutations)
-	t.mutations = append(t.mutations, mutations...)
-
-	// Prepare the returned handles, pre-populating where possible.
-	ret := make([]*PendingKey, len(keys))
-	for i, key := range keys {
-		p := &PendingKey{}
-		if key.Incomplete() {
-			// This key will be in the final commit result.
-			t.pending[origin+i] = p
-		} else {
-			p.key = key
-		}
-		ret[i] = p
-	}
-
-	return ret, nil
-}
-
-// Delete is the transaction-specific version of the package function Delete.
-// Delete enqueues the deletion of the entity for the given key, to be
-// committed atomically upon calling Commit.
-func (t *Transaction) Delete(key *Key) error {
-	err := t.DeleteMulti([]*Key{key})
-	if me, ok := err.(MultiError); ok {
-		return me[0]
-	}
-	return err
-}
-
-// DeleteMulti is a batch version of Delete.
-func (t *Transaction) DeleteMulti(keys []*Key) error {
-	if t.id == nil {
-		return errExpiredTransaction
-	}
-	mutations, err := deleteMutations(keys)
-	if err != nil {
-		return err
-	}
-	t.mutations = append(t.mutations, mutations...)
-	return nil
-}
-
-// Commit represents the result of a committed transaction.
-type Commit struct{}
-
-// Key resolves a pending key handle into a final key.
-func (c *Commit) Key(p *PendingKey) *Key {
-	if c != p.commit {
-		panic("PendingKey was not created by corresponding transaction")
-	}
-	return p.key
-}
-
-// PendingKey represents the key for newly-inserted entity. It can be
-// resolved into a Key by calling the Key method of Commit.
-type PendingKey struct {
-	key    *Key
-	commit *Commit
-}

+ 0 - 215
vendor/cloud.google.com/go/debugger/apiv2/controller2_client.go

@@ -1,215 +0,0 @@
-// Copyright 2017, Google LLC All rights reserved.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//     http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// AUTO-GENERATED CODE. DO NOT EDIT.
-
-package debugger
-
-import (
-	"time"
-
-	"cloud.google.com/go/internal/version"
-	gax "github.com/googleapis/gax-go"
-	"golang.org/x/net/context"
-	"google.golang.org/api/option"
-	"google.golang.org/api/transport"
-	clouddebuggerpb "google.golang.org/genproto/googleapis/devtools/clouddebugger/v2"
-	"google.golang.org/grpc"
-	"google.golang.org/grpc/codes"
-	"google.golang.org/grpc/metadata"
-)
-
-// Controller2CallOptions contains the retry settings for each method of Controller2Client.
-type Controller2CallOptions struct {
-	RegisterDebuggee       []gax.CallOption
-	ListActiveBreakpoints  []gax.CallOption
-	UpdateActiveBreakpoint []gax.CallOption
-}
-
-func defaultController2ClientOptions() []option.ClientOption {
-	return []option.ClientOption{
-		option.WithEndpoint("clouddebugger.googleapis.com:443"),
-		option.WithScopes(DefaultAuthScopes()...),
-	}
-}
-
-func defaultController2CallOptions() *Controller2CallOptions {
-	retry := map[[2]string][]gax.CallOption{
-		{"default", "idempotent"}: {
-			gax.WithRetry(func() gax.Retryer {
-				return gax.OnCodes([]codes.Code{
-					codes.DeadlineExceeded,
-					codes.Unavailable,
-				}, gax.Backoff{
-					Initial:    100 * time.Millisecond,
-					Max:        60000 * time.Millisecond,
-					Multiplier: 1.3,
-				})
-			}),
-		},
-	}
-	return &Controller2CallOptions{
-		RegisterDebuggee:       retry[[2]string{"default", "non_idempotent"}],
-		ListActiveBreakpoints:  retry[[2]string{"default", "idempotent"}],
-		UpdateActiveBreakpoint: retry[[2]string{"default", "idempotent"}],
-	}
-}
-
-// Controller2Client is a client for interacting with Stackdriver Debugger API.
-type Controller2Client struct {
-	// The connection to the service.
-	conn *grpc.ClientConn
-
-	// The gRPC API client.
-	controller2Client clouddebuggerpb.Controller2Client
-
-	// The call options for this service.
-	CallOptions *Controller2CallOptions
-
-	// The x-goog-* metadata to be sent with each request.
-	xGoogMetadata metadata.MD
-}
-
-// NewController2Client creates a new controller2 client.
-//
-// The Controller service provides the API for orchestrating a collection of
-// debugger agents to perform debugging tasks. These agents are each attached
-// to a process of an application which may include one or more replicas.
-//
-// The debugger agents register with the Controller to identify the application
-// being debugged, the Debuggee. All agents that register with the same data,
-// represent the same Debuggee, and are assigned the same debuggee_id.
-//
-// The debugger agents call the Controller to retrieve  the list of active
-// Breakpoints. Agents with the same debuggee_id get the same breakpoints
-// list. An agent that can fulfill the breakpoint request updates the
-// Controller with the breakpoint result. The controller selects the first
-// result received and discards the rest of the results.
-// Agents that poll again for active breakpoints will no longer have
-// the completed breakpoint in the list and should remove that breakpoint from
-// their attached process.
-//
-// The Controller service does not provide a way to retrieve the results of
-// a completed breakpoint. This functionality is available using the Debugger
-// service.
-func NewController2Client(ctx context.Context, opts ...option.ClientOption) (*Controller2Client, error) {
-	conn, err := transport.DialGRPC(ctx, append(defaultController2ClientOptions(), opts...)...)
-	if err != nil {
-		return nil, err
-	}
-	c := &Controller2Client{
-		conn:        conn,
-		CallOptions: defaultController2CallOptions(),
-
-		controller2Client: clouddebuggerpb.NewController2Client(conn),
-	}
-	c.SetGoogleClientInfo()
-	return c, nil
-}
-
-// Connection returns the client's connection to the API service.
-func (c *Controller2Client) Connection() *grpc.ClientConn {
-	return c.conn
-}
-
-// Close closes the connection to the API service. The user should invoke this when
-// the client is no longer required.
-func (c *Controller2Client) Close() error {
-	return c.conn.Close()
-}
-
-// SetGoogleClientInfo sets the name and version of the application in
-// the `x-goog-api-client` header passed on each request. Intended for
-// use by Google-written clients.
-func (c *Controller2Client) SetGoogleClientInfo(keyval ...string) {
-	kv := append([]string{"gl-go", version.Go()}, keyval...)
-	kv = append(kv, "gapic", version.Repo, "gax", gax.Version, "grpc", grpc.Version)
-	c.xGoogMetadata = metadata.Pairs("x-goog-api-client", gax.XGoogHeader(kv...))
-}
-
-// RegisterDebuggee registers the debuggee with the controller service.
-//
-// All agents attached to the same application must call this method with
-// exactly the same request content to get back the same stable debuggee_id.
-// Agents should call this method again whenever google.rpc.Code.NOT_FOUND
-// is returned from any controller method.
-//
-// This protocol allows the controller service to disable debuggees, recover
-// from data loss, or change the debuggee_id format. Agents must handle
-// debuggee_id value changing upon re-registration.
-func (c *Controller2Client) RegisterDebuggee(ctx context.Context, req *clouddebuggerpb.RegisterDebuggeeRequest, opts ...gax.CallOption) (*clouddebuggerpb.RegisterDebuggeeResponse, error) {
-	ctx = insertMetadata(ctx, c.xGoogMetadata)
-	opts = append(c.CallOptions.RegisterDebuggee[0:len(c.CallOptions.RegisterDebuggee):len(c.CallOptions.RegisterDebuggee)], opts...)
-	var resp *clouddebuggerpb.RegisterDebuggeeResponse
-	err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
-		var err error
-		resp, err = c.controller2Client.RegisterDebuggee(ctx, req, settings.GRPC...)
-		return err
-	}, opts...)
-	if err != nil {
-		return nil, err
-	}
-	return resp, nil
-}
-
-// ListActiveBreakpoints returns the list of all active breakpoints for the debuggee.
-//
-// The breakpoint specification (location, condition, and expressions
-// fields) is semantically immutable, although the field values may
-// change. For example, an agent may update the location line number
-// to reflect the actual line where the breakpoint was set, but this
-// doesn't change the breakpoint semantics.
-//
-// This means that an agent does not need to check if a breakpoint has changed
-// when it encounters the same breakpoint on a successive call.
-// Moreover, an agent should remember the breakpoints that are completed
-// until the controller removes them from the active list to avoid
-// setting those breakpoints again.
-func (c *Controller2Client) ListActiveBreakpoints(ctx context.Context, req *clouddebuggerpb.ListActiveBreakpointsRequest, opts ...gax.CallOption) (*clouddebuggerpb.ListActiveBreakpointsResponse, error) {
-	ctx = insertMetadata(ctx, c.xGoogMetadata)
-	opts = append(c.CallOptions.ListActiveBreakpoints[0:len(c.CallOptions.ListActiveBreakpoints):len(c.CallOptions.ListActiveBreakpoints)], opts...)
-	var resp *clouddebuggerpb.ListActiveBreakpointsResponse
-	err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
-		var err error
-		resp, err = c.controller2Client.ListActiveBreakpoints(ctx, req, settings.GRPC...)
-		return err
-	}, opts...)
-	if err != nil {
-		return nil, err
-	}
-	return resp, nil
-}
-
-// UpdateActiveBreakpoint updates the breakpoint state or mutable fields.
-// The entire Breakpoint message must be sent back to the controller service.
-//
-// Updates to active breakpoint fields are only allowed if the new value
-// does not change the breakpoint specification. Updates to the location,
-// condition and expressions fields should not alter the breakpoint
-// semantics. These may only make changes such as canonicalizing a value
-// or snapping the location to the correct line of code.
-func (c *Controller2Client) UpdateActiveBreakpoint(ctx context.Context, req *clouddebuggerpb.UpdateActiveBreakpointRequest, opts ...gax.CallOption) (*clouddebuggerpb.UpdateActiveBreakpointResponse, error) {
-	ctx = insertMetadata(ctx, c.xGoogMetadata)
-	opts = append(c.CallOptions.UpdateActiveBreakpoint[0:len(c.CallOptions.UpdateActiveBreakpoint):len(c.CallOptions.UpdateActiveBreakpoint)], opts...)
-	var resp *clouddebuggerpb.UpdateActiveBreakpointResponse
-	err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
-		var err error
-		resp, err = c.controller2Client.UpdateActiveBreakpoint(ctx, req, settings.GRPC...)
-		return err
-	}, opts...)
-	if err != nil {
-		return nil, err
-	}
-	return resp, nil
-}

+ 0 - 211
vendor/cloud.google.com/go/debugger/apiv2/debugger2_client.go

@@ -1,211 +0,0 @@
-// Copyright 2017, Google LLC All rights reserved.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//     http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// AUTO-GENERATED CODE. DO NOT EDIT.
-
-package debugger
-
-import (
-	"time"
-
-	"cloud.google.com/go/internal/version"
-	gax "github.com/googleapis/gax-go"
-	"golang.org/x/net/context"
-	"google.golang.org/api/option"
-	"google.golang.org/api/transport"
-	clouddebuggerpb "google.golang.org/genproto/googleapis/devtools/clouddebugger/v2"
-	"google.golang.org/grpc"
-	"google.golang.org/grpc/codes"
-	"google.golang.org/grpc/metadata"
-)
-
-// Debugger2CallOptions contains the retry settings for each method of Debugger2Client.
-type Debugger2CallOptions struct {
-	SetBreakpoint    []gax.CallOption
-	GetBreakpoint    []gax.CallOption
-	DeleteBreakpoint []gax.CallOption
-	ListBreakpoints  []gax.CallOption
-	ListDebuggees    []gax.CallOption
-}
-
-func defaultDebugger2ClientOptions() []option.ClientOption {
-	return []option.ClientOption{
-		option.WithEndpoint("clouddebugger.googleapis.com:443"),
-		option.WithScopes(DefaultAuthScopes()...),
-	}
-}
-
-func defaultDebugger2CallOptions() *Debugger2CallOptions {
-	retry := map[[2]string][]gax.CallOption{
-		{"default", "idempotent"}: {
-			gax.WithRetry(func() gax.Retryer {
-				return gax.OnCodes([]codes.Code{
-					codes.DeadlineExceeded,
-					codes.Unavailable,
-				}, gax.Backoff{
-					Initial:    100 * time.Millisecond,
-					Max:        60000 * time.Millisecond,
-					Multiplier: 1.3,
-				})
-			}),
-		},
-	}
-	return &Debugger2CallOptions{
-		SetBreakpoint:    retry[[2]string{"default", "non_idempotent"}],
-		GetBreakpoint:    retry[[2]string{"default", "idempotent"}],
-		DeleteBreakpoint: retry[[2]string{"default", "idempotent"}],
-		ListBreakpoints:  retry[[2]string{"default", "idempotent"}],
-		ListDebuggees:    retry[[2]string{"default", "idempotent"}],
-	}
-}
-
-// Debugger2Client is a client for interacting with Stackdriver Debugger API.
-type Debugger2Client struct {
-	// The connection to the service.
-	conn *grpc.ClientConn
-
-	// The gRPC API client.
-	debugger2Client clouddebuggerpb.Debugger2Client
-
-	// The call options for this service.
-	CallOptions *Debugger2CallOptions
-
-	// The x-goog-* metadata to be sent with each request.
-	xGoogMetadata metadata.MD
-}
-
-// NewDebugger2Client creates a new debugger2 client.
-//
-// The Debugger service provides the API that allows users to collect run-time
-// information from a running application, without stopping or slowing it down
-// and without modifying its state.  An application may include one or
-// more replicated processes performing the same work.
-//
-// A debugged application is represented using the Debuggee concept. The
-// Debugger service provides a way to query for available debuggees, but does
-// not provide a way to create one.  A debuggee is created using the Controller
-// service, usually by running a debugger agent with the application.
-//
-// The Debugger service enables the client to set one or more Breakpoints on a
-// Debuggee and collect the results of the set Breakpoints.
-func NewDebugger2Client(ctx context.Context, opts ...option.ClientOption) (*Debugger2Client, error) {
-	conn, err := transport.DialGRPC(ctx, append(defaultDebugger2ClientOptions(), opts...)...)
-	if err != nil {
-		return nil, err
-	}
-	c := &Debugger2Client{
-		conn:        conn,
-		CallOptions: defaultDebugger2CallOptions(),
-
-		debugger2Client: clouddebuggerpb.NewDebugger2Client(conn),
-	}
-	c.SetGoogleClientInfo()
-	return c, nil
-}
-
-// Connection returns the client's connection to the API service.
-func (c *Debugger2Client) Connection() *grpc.ClientConn {
-	return c.conn
-}
-
-// Close closes the connection to the API service. The user should invoke this when
-// the client is no longer required.
-func (c *Debugger2Client) Close() error {
-	return c.conn.Close()
-}
-
-// SetGoogleClientInfo sets the name and version of the application in
-// the `x-goog-api-client` header passed on each request. Intended for
-// use by Google-written clients.
-func (c *Debugger2Client) SetGoogleClientInfo(keyval ...string) {
-	kv := append([]string{"gl-go", version.Go()}, keyval...)
-	kv = append(kv, "gapic", version.Repo, "gax", gax.Version, "grpc", grpc.Version)
-	c.xGoogMetadata = metadata.Pairs("x-goog-api-client", gax.XGoogHeader(kv...))
-}
-
-// SetBreakpoint sets the breakpoint to the debuggee.
-func (c *Debugger2Client) SetBreakpoint(ctx context.Context, req *clouddebuggerpb.SetBreakpointRequest, opts ...gax.CallOption) (*clouddebuggerpb.SetBreakpointResponse, error) {
-	ctx = insertMetadata(ctx, c.xGoogMetadata)
-	opts = append(c.CallOptions.SetBreakpoint[0:len(c.CallOptions.SetBreakpoint):len(c.CallOptions.SetBreakpoint)], opts...)
-	var resp *clouddebuggerpb.SetBreakpointResponse
-	err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
-		var err error
-		resp, err = c.debugger2Client.SetBreakpoint(ctx, req, settings.GRPC...)
-		return err
-	}, opts...)
-	if err != nil {
-		return nil, err
-	}
-	return resp, nil
-}
-
-// GetBreakpoint gets breakpoint information.
-func (c *Debugger2Client) GetBreakpoint(ctx context.Context, req *clouddebuggerpb.GetBreakpointRequest, opts ...gax.CallOption) (*clouddebuggerpb.GetBreakpointResponse, error) {
-	ctx = insertMetadata(ctx, c.xGoogMetadata)
-	opts = append(c.CallOptions.GetBreakpoint[0:len(c.CallOptions.GetBreakpoint):len(c.CallOptions.GetBreakpoint)], opts...)
-	var resp *clouddebuggerpb.GetBreakpointResponse
-	err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
-		var err error
-		resp, err = c.debugger2Client.GetBreakpoint(ctx, req, settings.GRPC...)
-		return err
-	}, opts...)
-	if err != nil {
-		return nil, err
-	}
-	return resp, nil
-}
-
-// DeleteBreakpoint deletes the breakpoint from the debuggee.
-func (c *Debugger2Client) DeleteBreakpoint(ctx context.Context, req *clouddebuggerpb.DeleteBreakpointRequest, opts ...gax.CallOption) error {
-	ctx = insertMetadata(ctx, c.xGoogMetadata)
-	opts = append(c.CallOptions.DeleteBreakpoint[0:len(c.CallOptions.DeleteBreakpoint):len(c.CallOptions.DeleteBreakpoint)], opts...)
-	err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
-		var err error
-		_, err = c.debugger2Client.DeleteBreakpoint(ctx, req, settings.GRPC...)
-		return err
-	}, opts...)
-	return err
-}
-
-// ListBreakpoints lists all breakpoints for the debuggee.
-func (c *Debugger2Client) ListBreakpoints(ctx context.Context, req *clouddebuggerpb.ListBreakpointsRequest, opts ...gax.CallOption) (*clouddebuggerpb.ListBreakpointsResponse, error) {
-	ctx = insertMetadata(ctx, c.xGoogMetadata)
-	opts = append(c.CallOptions.ListBreakpoints[0:len(c.CallOptions.ListBreakpoints):len(c.CallOptions.ListBreakpoints)], opts...)
-	var resp *clouddebuggerpb.ListBreakpointsResponse
-	err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
-		var err error
-		resp, err = c.debugger2Client.ListBreakpoints(ctx, req, settings.GRPC...)
-		return err
-	}, opts...)
-	if err != nil {
-		return nil, err
-	}
-	return resp, nil
-}
-
-// ListDebuggees lists all the debuggees that the user has access to.
-func (c *Debugger2Client) ListDebuggees(ctx context.Context, req *clouddebuggerpb.ListDebuggeesRequest, opts ...gax.CallOption) (*clouddebuggerpb.ListDebuggeesResponse, error) {
-	ctx = insertMetadata(ctx, c.xGoogMetadata)
-	opts = append(c.CallOptions.ListDebuggees[0:len(c.CallOptions.ListDebuggees):len(c.CallOptions.ListDebuggees)], opts...)
-	var resp *clouddebuggerpb.ListDebuggeesResponse
-	err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
-		var err error
-		resp, err = c.debugger2Client.ListDebuggees(ctx, req, settings.GRPC...)
-		return err
-	}, opts...)
-	if err != nil {
-		return nil, err
-	}
-	return resp, nil
-}

+ 0 - 50
vendor/cloud.google.com/go/debugger/apiv2/doc.go

@@ -1,50 +0,0 @@
-// Copyright 2017, Google LLC All rights reserved.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//     http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// AUTO-GENERATED CODE. DO NOT EDIT.
-
-// Package debugger is an auto-generated package for the
-// Stackdriver Debugger API.
-//
-//   NOTE: This package is in alpha. It is not stable, and is likely to change.
-//
-// Examines the call stack and variables of a running application
-// without stopping or slowing it down.
-//
-// Use the client at cloud.google.com/go/cmd/go-cloud-debug-agent in preference to this.
-package debugger // import "cloud.google.com/go/debugger/apiv2"
-
-import (
-	"golang.org/x/net/context"
-	"google.golang.org/grpc/metadata"
-)
-
-func insertMetadata(ctx context.Context, mds ...metadata.MD) context.Context {
-	out, _ := metadata.FromOutgoingContext(ctx)
-	out = out.Copy()
-	for _, md := range mds {
-		for k, v := range md {
-			out[k] = append(out[k], v...)
-		}
-	}
-	return metadata.NewOutgoingContext(ctx, out)
-}
-
-// DefaultAuthScopes reports the default set of authentication scopes to use with this package.
-func DefaultAuthScopes() []string {
-	return []string{
-		"https://www.googleapis.com/auth/cloud-platform",
-		"https://www.googleapis.com/auth/cloud_debugger",
-	}
-}

+ 0 - 437
vendor/cloud.google.com/go/dlp/apiv2beta1/dlp_client.go

@@ -1,437 +0,0 @@
-// Copyright 2017, Google LLC All rights reserved.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//     http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// AUTO-GENERATED CODE. DO NOT EDIT.
-
-package dlp
-
-import (
-	"time"
-
-	"cloud.google.com/go/internal/version"
-	"cloud.google.com/go/longrunning"
-	lroauto "cloud.google.com/go/longrunning/autogen"
-	gax "github.com/googleapis/gax-go"
-	"golang.org/x/net/context"
-	"google.golang.org/api/option"
-	"google.golang.org/api/transport"
-	longrunningpb "google.golang.org/genproto/googleapis/longrunning"
-	dlppb "google.golang.org/genproto/googleapis/privacy/dlp/v2beta1"
-	"google.golang.org/grpc"
-	"google.golang.org/grpc/codes"
-	"google.golang.org/grpc/metadata"
-)
-
-// CallOptions contains the retry settings for each method of Client.
-type CallOptions struct {
-	InspectContent         []gax.CallOption
-	RedactContent          []gax.CallOption
-	DeidentifyContent      []gax.CallOption
-	AnalyzeDataSourceRisk  []gax.CallOption
-	CreateInspectOperation []gax.CallOption
-	ListInspectFindings    []gax.CallOption
-	ListInfoTypes          []gax.CallOption
-	ListRootCategories     []gax.CallOption
-}
-
-func defaultClientOptions() []option.ClientOption {
-	return []option.ClientOption{
-		option.WithEndpoint("dlp.googleapis.com:443"),
-		option.WithScopes(DefaultAuthScopes()...),
-	}
-}
-
-func defaultCallOptions() *CallOptions {
-	retry := map[[2]string][]gax.CallOption{
-		{"default", "idempotent"}: {
-			gax.WithRetry(func() gax.Retryer {
-				return gax.OnCodes([]codes.Code{
-					codes.DeadlineExceeded,
-					codes.Unavailable,
-				}, gax.Backoff{
-					Initial:    100 * time.Millisecond,
-					Max:        60000 * time.Millisecond,
-					Multiplier: 1.3,
-				})
-			}),
-		},
-	}
-	return &CallOptions{
-		InspectContent:         retry[[2]string{"default", "non_idempotent"}],
-		RedactContent:          retry[[2]string{"default", "non_idempotent"}],
-		DeidentifyContent:      retry[[2]string{"default", "idempotent"}],
-		AnalyzeDataSourceRisk:  retry[[2]string{"default", "idempotent"}],
-		CreateInspectOperation: retry[[2]string{"default", "non_idempotent"}],
-		ListInspectFindings:    retry[[2]string{"default", "idempotent"}],
-		ListInfoTypes:          retry[[2]string{"default", "idempotent"}],
-		ListRootCategories:     retry[[2]string{"default", "idempotent"}],
-	}
-}
-
-// Client is a client for interacting with DLP API.
-type Client struct {
-	// The connection to the service.
-	conn *grpc.ClientConn
-
-	// The gRPC API client.
-	client dlppb.DlpServiceClient
-
-	// LROClient is used internally to handle longrunning operations.
-	// It is exposed so that its CallOptions can be modified if required.
-	// Users should not Close this client.
-	LROClient *lroauto.OperationsClient
-
-	// The call options for this service.
-	CallOptions *CallOptions
-
-	// The x-goog-* metadata to be sent with each request.
-	xGoogMetadata metadata.MD
-}
-
-// NewClient creates a new dlp service client.
-//
-// The DLP API is a service that allows clients
-// to detect the presence of Personally Identifiable Information (PII) and other
-// privacy-sensitive data in user-supplied, unstructured data streams, like text
-// blocks or images.
-// The service also includes methods for sensitive data redaction and
-// scheduling of data scans on Google Cloud Platform based data sets.
-func NewClient(ctx context.Context, opts ...option.ClientOption) (*Client, error) {
-	conn, err := transport.DialGRPC(ctx, append(defaultClientOptions(), opts...)...)
-	if err != nil {
-		return nil, err
-	}
-	c := &Client{
-		conn:        conn,
-		CallOptions: defaultCallOptions(),
-
-		client: dlppb.NewDlpServiceClient(conn),
-	}
-	c.setGoogleClientInfo()
-
-	c.LROClient, err = lroauto.NewOperationsClient(ctx, option.WithGRPCConn(conn))
-	if err != nil {
-		// This error "should not happen", since we are just reusing old connection
-		// and never actually need to dial.
-		// If this does happen, we could leak conn. However, we cannot close conn:
-		// If the user invoked the function with option.WithGRPCConn,
-		// we would close a connection that's still in use.
-		// TODO(pongad): investigate error conditions.
-		return nil, err
-	}
-	return c, nil
-}
-
-// Connection returns the client's connection to the API service.
-func (c *Client) Connection() *grpc.ClientConn {
-	return c.conn
-}
-
-// Close closes the connection to the API service. The user should invoke this when
-// the client is no longer required.
-func (c *Client) Close() error {
-	return c.conn.Close()
-}
-
-// setGoogleClientInfo sets the name and version of the application in
-// the `x-goog-api-client` header passed on each request. Intended for
-// use by Google-written clients.
-func (c *Client) setGoogleClientInfo(keyval ...string) {
-	kv := append([]string{"gl-go", version.Go()}, keyval...)
-	kv = append(kv, "gapic", version.Repo, "gax", gax.Version, "grpc", grpc.Version)
-	c.xGoogMetadata = metadata.Pairs("x-goog-api-client", gax.XGoogHeader(kv...))
-}
-
-// ResultPath returns the path for the result resource.
-func ResultPath(result string) string {
-	return "" +
-		"inspect/results/" +
-		result +
-		""
-}
-
-// InspectContent finds potentially sensitive info in a list of strings.
-// This method has limits on input size, processing time, and output size.
-func (c *Client) InspectContent(ctx context.Context, req *dlppb.InspectContentRequest, opts ...gax.CallOption) (*dlppb.InspectContentResponse, error) {
-	ctx = insertMetadata(ctx, c.xGoogMetadata)
-	opts = append(c.CallOptions.InspectContent[0:len(c.CallOptions.InspectContent):len(c.CallOptions.InspectContent)], opts...)
-	var resp *dlppb.InspectContentResponse
-	err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
-		var err error
-		resp, err = c.client.InspectContent(ctx, req, settings.GRPC...)
-		return err
-	}, opts...)
-	if err != nil {
-		return nil, err
-	}
-	return resp, nil
-}
-
-// RedactContent redacts potentially sensitive info from a list of strings.
-// This method has limits on input size, processing time, and output size.
-func (c *Client) RedactContent(ctx context.Context, req *dlppb.RedactContentRequest, opts ...gax.CallOption) (*dlppb.RedactContentResponse, error) {
-	ctx = insertMetadata(ctx, c.xGoogMetadata)
-	opts = append(c.CallOptions.RedactContent[0:len(c.CallOptions.RedactContent):len(c.CallOptions.RedactContent)], opts...)
-	var resp *dlppb.RedactContentResponse
-	err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
-		var err error
-		resp, err = c.client.RedactContent(ctx, req, settings.GRPC...)
-		return err
-	}, opts...)
-	if err != nil {
-		return nil, err
-	}
-	return resp, nil
-}
-
-// DeidentifyContent de-identifies potentially sensitive info from a list of strings.
-// This method has limits on input size and output size.
-func (c *Client) DeidentifyContent(ctx context.Context, req *dlppb.DeidentifyContentRequest, opts ...gax.CallOption) (*dlppb.DeidentifyContentResponse, error) {
-	ctx = insertMetadata(ctx, c.xGoogMetadata)
-	opts = append(c.CallOptions.DeidentifyContent[0:len(c.CallOptions.DeidentifyContent):len(c.CallOptions.DeidentifyContent)], opts...)
-	var resp *dlppb.DeidentifyContentResponse
-	err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
-		var err error
-		resp, err = c.client.DeidentifyContent(ctx, req, settings.GRPC...)
-		return err
-	}, opts...)
-	if err != nil {
-		return nil, err
-	}
-	return resp, nil
-}
-
-// AnalyzeDataSourceRisk schedules a job to compute risk analysis metrics over content in a Google
-// Cloud Platform repository.
-func (c *Client) AnalyzeDataSourceRisk(ctx context.Context, req *dlppb.AnalyzeDataSourceRiskRequest, opts ...gax.CallOption) (*AnalyzeDataSourceRiskOperation, error) {
-	ctx = insertMetadata(ctx, c.xGoogMetadata)
-	opts = append(c.CallOptions.AnalyzeDataSourceRisk[0:len(c.CallOptions.AnalyzeDataSourceRisk):len(c.CallOptions.AnalyzeDataSourceRisk)], opts...)
-	var resp *longrunningpb.Operation
-	err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
-		var err error
-		resp, err = c.client.AnalyzeDataSourceRisk(ctx, req, settings.GRPC...)
-		return err
-	}, opts...)
-	if err != nil {
-		return nil, err
-	}
-	return &AnalyzeDataSourceRiskOperation{
-		lro: longrunning.InternalNewOperation(c.LROClient, resp),
-	}, nil
-}
-
-// CreateInspectOperation schedules a job scanning content in a Google Cloud Platform data
-// repository.
-func (c *Client) CreateInspectOperation(ctx context.Context, req *dlppb.CreateInspectOperationRequest, opts ...gax.CallOption) (*CreateInspectOperationHandle, error) {
-	ctx = insertMetadata(ctx, c.xGoogMetadata)
-	opts = append(c.CallOptions.CreateInspectOperation[0:len(c.CallOptions.CreateInspectOperation):len(c.CallOptions.CreateInspectOperation)], opts...)
-	var resp *longrunningpb.Operation
-	err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
-		var err error
-		resp, err = c.client.CreateInspectOperation(ctx, req, settings.GRPC...)
-		return err
-	}, opts...)
-	if err != nil {
-		return nil, err
-	}
-	return &CreateInspectOperationHandle{
-		lro: longrunning.InternalNewOperation(c.LROClient, resp),
-	}, nil
-}
-
-// ListInspectFindings returns list of results for given inspect operation result set id.
-func (c *Client) ListInspectFindings(ctx context.Context, req *dlppb.ListInspectFindingsRequest, opts ...gax.CallOption) (*dlppb.ListInspectFindingsResponse, error) {
-	ctx = insertMetadata(ctx, c.xGoogMetadata)
-	opts = append(c.CallOptions.ListInspectFindings[0:len(c.CallOptions.ListInspectFindings):len(c.CallOptions.ListInspectFindings)], opts...)
-	var resp *dlppb.ListInspectFindingsResponse
-	err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
-		var err error
-		resp, err = c.client.ListInspectFindings(ctx, req, settings.GRPC...)
-		return err
-	}, opts...)
-	if err != nil {
-		return nil, err
-	}
-	return resp, nil
-}
-
-// ListInfoTypes returns sensitive information types for given category.
-func (c *Client) ListInfoTypes(ctx context.Context, req *dlppb.ListInfoTypesRequest, opts ...gax.CallOption) (*dlppb.ListInfoTypesResponse, error) {
-	ctx = insertMetadata(ctx, c.xGoogMetadata)
-	opts = append(c.CallOptions.ListInfoTypes[0:len(c.CallOptions.ListInfoTypes):len(c.CallOptions.ListInfoTypes)], opts...)
-	var resp *dlppb.ListInfoTypesResponse
-	err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
-		var err error
-		resp, err = c.client.ListInfoTypes(ctx, req, settings.GRPC...)
-		return err
-	}, opts...)
-	if err != nil {
-		return nil, err
-	}
-	return resp, nil
-}
-
-// ListRootCategories returns the list of root categories of sensitive information.
-func (c *Client) ListRootCategories(ctx context.Context, req *dlppb.ListRootCategoriesRequest, opts ...gax.CallOption) (*dlppb.ListRootCategoriesResponse, error) {
-	ctx = insertMetadata(ctx, c.xGoogMetadata)
-	opts = append(c.CallOptions.ListRootCategories[0:len(c.CallOptions.ListRootCategories):len(c.CallOptions.ListRootCategories)], opts...)
-	var resp *dlppb.ListRootCategoriesResponse
-	err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
-		var err error
-		resp, err = c.client.ListRootCategories(ctx, req, settings.GRPC...)
-		return err
-	}, opts...)
-	if err != nil {
-		return nil, err
-	}
-	return resp, nil
-}
-
-// AnalyzeDataSourceRiskOperation manages a long-running operation from AnalyzeDataSourceRisk.
-type AnalyzeDataSourceRiskOperation struct {
-	lro *longrunning.Operation
-}
-
-// AnalyzeDataSourceRiskOperation returns a new AnalyzeDataSourceRiskOperation from a given name.
-// The name must be that of a previously created AnalyzeDataSourceRiskOperation, possibly from a different process.
-func (c *Client) AnalyzeDataSourceRiskOperation(name string) *AnalyzeDataSourceRiskOperation {
-	return &AnalyzeDataSourceRiskOperation{
-		lro: longrunning.InternalNewOperation(c.LROClient, &longrunningpb.Operation{Name: name}),
-	}
-}
-
-// Wait blocks until the long-running operation is completed, returning the response and any errors encountered.
-//
-// See documentation of Poll for error-handling information.
-func (op *AnalyzeDataSourceRiskOperation) Wait(ctx context.Context, opts ...gax.CallOption) (*dlppb.RiskAnalysisOperationResult, error) {
-	var resp dlppb.RiskAnalysisOperationResult
-	if err := op.lro.WaitWithInterval(ctx, &resp, 45000*time.Millisecond, opts...); err != nil {
-		return nil, err
-	}
-	return &resp, nil
-}
-
-// Poll fetches the latest state of the long-running operation.
-//
-// Poll also fetches the latest metadata, which can be retrieved by Metadata.
-//
-// If Poll fails, the error is returned and op is unmodified. If Poll succeeds and
-// the operation has completed with failure, the error is returned and op.Done will return true.
-// If Poll succeeds and the operation has completed successfully,
-// op.Done will return true, and the response of the operation is returned.
-// If Poll succeeds and the operation has not completed, the returned response and error are both nil.
-func (op *AnalyzeDataSourceRiskOperation) Poll(ctx context.Context, opts ...gax.CallOption) (*dlppb.RiskAnalysisOperationResult, error) {
-	var resp dlppb.RiskAnalysisOperationResult
-	if err := op.lro.Poll(ctx, &resp, opts...); err != nil {
-		return nil, err
-	}
-	if !op.Done() {
-		return nil, nil
-	}
-	return &resp, nil
-}
-
-// Metadata returns metadata associated with the long-running operation.
-// Metadata itself does not contact the server, but Poll does.
-// To get the latest metadata, call this method after a successful call to Poll.
-// If the metadata is not available, the returned metadata and error are both nil.
-func (op *AnalyzeDataSourceRiskOperation) Metadata() (*dlppb.RiskAnalysisOperationMetadata, error) {
-	var meta dlppb.RiskAnalysisOperationMetadata
-	if err := op.lro.Metadata(&meta); err == longrunning.ErrNoMetadata {
-		return nil, nil
-	} else if err != nil {
-		return nil, err
-	}
-	return &meta, nil
-}
-
-// Done reports whether the long-running operation has completed.
-func (op *AnalyzeDataSourceRiskOperation) Done() bool {
-	return op.lro.Done()
-}
-
-// Name returns the name of the long-running operation.
-// The name is assigned by the server and is unique within the service from which the operation is created.
-func (op *AnalyzeDataSourceRiskOperation) Name() string {
-	return op.lro.Name()
-}
-
-// CreateInspectOperationHandle manages a long-running operation from CreateInspectOperation.
-type CreateInspectOperationHandle struct {
-	lro *longrunning.Operation
-}
-
-// CreateInspectOperationHandle returns a new CreateInspectOperationHandle from a given name.
-// The name must be that of a previously created CreateInspectOperationHandle, possibly from a different process.
-func (c *Client) CreateInspectOperationHandle(name string) *CreateInspectOperationHandle {
-	return &CreateInspectOperationHandle{
-		lro: longrunning.InternalNewOperation(c.LROClient, &longrunningpb.Operation{Name: name}),
-	}
-}
-
-// Wait blocks until the long-running operation is completed, returning the response and any errors encountered.
-//
-// See documentation of Poll for error-handling information.
-func (op *CreateInspectOperationHandle) Wait(ctx context.Context, opts ...gax.CallOption) (*dlppb.InspectOperationResult, error) {
-	var resp dlppb.InspectOperationResult
-	if err := op.lro.WaitWithInterval(ctx, &resp, 45000*time.Millisecond, opts...); err != nil {
-		return nil, err
-	}
-	return &resp, nil
-}
-
-// Poll fetches the latest state of the long-running operation.
-//
-// Poll also fetches the latest metadata, which can be retrieved by Metadata.
-//
-// If Poll fails, the error is returned and op is unmodified. If Poll succeeds and
-// the operation has completed with failure, the error is returned and op.Done will return true.
-// If Poll succeeds and the operation has completed successfully,
-// op.Done will return true, and the response of the operation is returned.
-// If Poll succeeds and the operation has not completed, the returned response and error are both nil.
-func (op *CreateInspectOperationHandle) Poll(ctx context.Context, opts ...gax.CallOption) (*dlppb.InspectOperationResult, error) {
-	var resp dlppb.InspectOperationResult
-	if err := op.lro.Poll(ctx, &resp, opts...); err != nil {
-		return nil, err
-	}
-	if !op.Done() {
-		return nil, nil
-	}
-	return &resp, nil
-}
-
-// Metadata returns metadata associated with the long-running operation.
-// Metadata itself does not contact the server, but Poll does.
-// To get the latest metadata, call this method after a successful call to Poll.
-// If the metadata is not available, the returned metadata and error are both nil.
-func (op *CreateInspectOperationHandle) Metadata() (*dlppb.InspectOperationMetadata, error) {
-	var meta dlppb.InspectOperationMetadata
-	if err := op.lro.Metadata(&meta); err == longrunning.ErrNoMetadata {
-		return nil, nil
-	} else if err != nil {
-		return nil, err
-	}
-	return &meta, nil
-}
-
-// Done reports whether the long-running operation has completed.
-func (op *CreateInspectOperationHandle) Done() bool {
-	return op.lro.Done()
-}
-
-// Name returns the name of the long-running operation.
-// The name is assigned by the server and is unique within the service from which the operation is created.
-func (op *CreateInspectOperationHandle) Name() string {
-	return op.lro.Name()
-}

+ 0 - 48
vendor/cloud.google.com/go/dlp/apiv2beta1/doc.go

@@ -1,48 +0,0 @@
-// Copyright 2017, Google LLC All rights reserved.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//     http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// AUTO-GENERATED CODE. DO NOT EDIT.
-
-// Package dlp is an auto-generated package for the
-// DLP API.
-//
-//   NOTE: This package is in alpha. It is not stable, and is likely to change.
-//
-// The Google Data Loss Prevention API provides methods for detection of
-// privacy-sensitive fragments in text, images, and Google Cloud Platform
-// storage repositories.
-package dlp // import "cloud.google.com/go/dlp/apiv2beta1"
-
-import (
-	"golang.org/x/net/context"
-	"google.golang.org/grpc/metadata"
-)
-
-func insertMetadata(ctx context.Context, mds ...metadata.MD) context.Context {
-	out, _ := metadata.FromOutgoingContext(ctx)
-	out = out.Copy()
-	for _, md := range mds {
-		for k, v := range md {
-			out[k] = append(out[k], v...)
-		}
-	}
-	return metadata.NewOutgoingContext(ctx, out)
-}
-
-// DefaultAuthScopes reports the default set of authentication scopes to use with this package.
-func DefaultAuthScopes() []string {
-	return []string{
-		"https://www.googleapis.com/auth/cloud-platform",
-	}
-}

+ 0 - 50
vendor/cloud.google.com/go/errorreporting/apiv1beta1/doc.go

@@ -1,50 +0,0 @@
-// Copyright 2017, Google LLC All rights reserved.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//     http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// AUTO-GENERATED CODE. DO NOT EDIT.
-
-// Package errorreporting is an auto-generated package for the
-// Stackdriver Error Reporting API.
-//
-//   NOTE: This package is in alpha. It is not stable, and is likely to change.
-//
-// Stackdriver Error Reporting groups and counts similar errors from cloud
-// services. The Stackdriver Error Reporting API provides a way to report new
-// errors and read access to error groups and their associated errors.
-//
-// Use the client at cloud.google.com/go/errorreporting in preference to this.
-package errorreporting // import "cloud.google.com/go/errorreporting/apiv1beta1"
-
-import (
-	"golang.org/x/net/context"
-	"google.golang.org/grpc/metadata"
-)
-
-func insertMetadata(ctx context.Context, mds ...metadata.MD) context.Context {
-	out, _ := metadata.FromOutgoingContext(ctx)
-	out = out.Copy()
-	for _, md := range mds {
-		for k, v := range md {
-			out[k] = append(out[k], v...)
-		}
-	}
-	return metadata.NewOutgoingContext(ctx, out)
-}
-
-// DefaultAuthScopes reports the default set of authentication scopes to use with this package.
-func DefaultAuthScopes() []string {
-	return []string{
-		"https://www.googleapis.com/auth/cloud-platform",
-	}
-}

+ 0 - 161
vendor/cloud.google.com/go/errorreporting/apiv1beta1/error_group_client.go

@@ -1,161 +0,0 @@
-// Copyright 2017, Google LLC All rights reserved.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//     http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// AUTO-GENERATED CODE. DO NOT EDIT.
-
-package errorreporting
-
-import (
-	"time"
-
-	"cloud.google.com/go/internal/version"
-	gax "github.com/googleapis/gax-go"
-	"golang.org/x/net/context"
-	"google.golang.org/api/option"
-	"google.golang.org/api/transport"
-	clouderrorreportingpb "google.golang.org/genproto/googleapis/devtools/clouderrorreporting/v1beta1"
-	"google.golang.org/grpc"
-	"google.golang.org/grpc/codes"
-	"google.golang.org/grpc/metadata"
-)
-
-// ErrorGroupCallOptions contains the retry settings for each method of ErrorGroupClient.
-type ErrorGroupCallOptions struct {
-	GetGroup    []gax.CallOption
-	UpdateGroup []gax.CallOption
-}
-
-func defaultErrorGroupClientOptions() []option.ClientOption {
-	return []option.ClientOption{
-		option.WithEndpoint("clouderrorreporting.googleapis.com:443"),
-		option.WithScopes(DefaultAuthScopes()...),
-	}
-}
-
-func defaultErrorGroupCallOptions() *ErrorGroupCallOptions {
-	retry := map[[2]string][]gax.CallOption{
-		{"default", "idempotent"}: {
-			gax.WithRetry(func() gax.Retryer {
-				return gax.OnCodes([]codes.Code{
-					codes.DeadlineExceeded,
-					codes.Unavailable,
-				}, gax.Backoff{
-					Initial:    100 * time.Millisecond,
-					Max:        60000 * time.Millisecond,
-					Multiplier: 1.3,
-				})
-			}),
-		},
-	}
-	return &ErrorGroupCallOptions{
-		GetGroup:    retry[[2]string{"default", "idempotent"}],
-		UpdateGroup: retry[[2]string{"default", "idempotent"}],
-	}
-}
-
-// ErrorGroupClient is a client for interacting with Stackdriver Error Reporting API.
-type ErrorGroupClient struct {
-	// The connection to the service.
-	conn *grpc.ClientConn
-
-	// The gRPC API client.
-	errorGroupClient clouderrorreportingpb.ErrorGroupServiceClient
-
-	// The call options for this service.
-	CallOptions *ErrorGroupCallOptions
-
-	// The x-goog-* metadata to be sent with each request.
-	xGoogMetadata metadata.MD
-}
-
-// NewErrorGroupClient creates a new error group service client.
-//
-// Service for retrieving and updating individual error groups.
-func NewErrorGroupClient(ctx context.Context, opts ...option.ClientOption) (*ErrorGroupClient, error) {
-	conn, err := transport.DialGRPC(ctx, append(defaultErrorGroupClientOptions(), opts...)...)
-	if err != nil {
-		return nil, err
-	}
-	c := &ErrorGroupClient{
-		conn:        conn,
-		CallOptions: defaultErrorGroupCallOptions(),
-
-		errorGroupClient: clouderrorreportingpb.NewErrorGroupServiceClient(conn),
-	}
-	c.SetGoogleClientInfo()
-	return c, nil
-}
-
-// Connection returns the client's connection to the API service.
-func (c *ErrorGroupClient) Connection() *grpc.ClientConn {
-	return c.conn
-}
-
-// Close closes the connection to the API service. The user should invoke this when
-// the client is no longer required.
-func (c *ErrorGroupClient) Close() error {
-	return c.conn.Close()
-}
-
-// SetGoogleClientInfo sets the name and version of the application in
-// the `x-goog-api-client` header passed on each request. Intended for
-// use by Google-written clients.
-func (c *ErrorGroupClient) SetGoogleClientInfo(keyval ...string) {
-	kv := append([]string{"gl-go", version.Go()}, keyval...)
-	kv = append(kv, "gapic", version.Repo, "gax", gax.Version, "grpc", grpc.Version)
-	c.xGoogMetadata = metadata.Pairs("x-goog-api-client", gax.XGoogHeader(kv...))
-}
-
-// ErrorGroupGroupPath returns the path for the group resource.
-func ErrorGroupGroupPath(project, group string) string {
-	return "" +
-		"projects/" +
-		project +
-		"/groups/" +
-		group +
-		""
-}
-
-// GetGroup get the specified group.
-func (c *ErrorGroupClient) GetGroup(ctx context.Context, req *clouderrorreportingpb.GetGroupRequest, opts ...gax.CallOption) (*clouderrorreportingpb.ErrorGroup, error) {
-	ctx = insertMetadata(ctx, c.xGoogMetadata)
-	opts = append(c.CallOptions.GetGroup[0:len(c.CallOptions.GetGroup):len(c.CallOptions.GetGroup)], opts...)
-	var resp *clouderrorreportingpb.ErrorGroup
-	err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
-		var err error
-		resp, err = c.errorGroupClient.GetGroup(ctx, req, settings.GRPC...)
-		return err
-	}, opts...)
-	if err != nil {
-		return nil, err
-	}
-	return resp, nil
-}
-
-// UpdateGroup replace the data for the specified group.
-// Fails if the group does not exist.
-func (c *ErrorGroupClient) UpdateGroup(ctx context.Context, req *clouderrorreportingpb.UpdateGroupRequest, opts ...gax.CallOption) (*clouderrorreportingpb.ErrorGroup, error) {
-	ctx = insertMetadata(ctx, c.xGoogMetadata)
-	opts = append(c.CallOptions.UpdateGroup[0:len(c.CallOptions.UpdateGroup):len(c.CallOptions.UpdateGroup)], opts...)
-	var resp *clouderrorreportingpb.ErrorGroup
-	err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
-		var err error
-		resp, err = c.errorGroupClient.UpdateGroup(ctx, req, settings.GRPC...)
-		return err
-	}, opts...)
-	if err != nil {
-		return nil, err
-	}
-	return resp, nil
-}

+ 0 - 301
vendor/cloud.google.com/go/errorreporting/apiv1beta1/error_stats_client.go

@@ -1,301 +0,0 @@
-// Copyright 2017, Google LLC All rights reserved.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//     http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// AUTO-GENERATED CODE. DO NOT EDIT.
-
-package errorreporting
-
-import (
-	"math"
-	"time"
-
-	"cloud.google.com/go/internal/version"
-	gax "github.com/googleapis/gax-go"
-	"golang.org/x/net/context"
-	"google.golang.org/api/iterator"
-	"google.golang.org/api/option"
-	"google.golang.org/api/transport"
-	clouderrorreportingpb "google.golang.org/genproto/googleapis/devtools/clouderrorreporting/v1beta1"
-	"google.golang.org/grpc"
-	"google.golang.org/grpc/codes"
-	"google.golang.org/grpc/metadata"
-)
-
-// ErrorStatsCallOptions contains the retry settings for each method of ErrorStatsClient.
-type ErrorStatsCallOptions struct {
-	ListGroupStats []gax.CallOption
-	ListEvents     []gax.CallOption
-	DeleteEvents   []gax.CallOption
-}
-
-func defaultErrorStatsClientOptions() []option.ClientOption {
-	return []option.ClientOption{
-		option.WithEndpoint("clouderrorreporting.googleapis.com:443"),
-		option.WithScopes(DefaultAuthScopes()...),
-	}
-}
-
-func defaultErrorStatsCallOptions() *ErrorStatsCallOptions {
-	retry := map[[2]string][]gax.CallOption{
-		{"default", "idempotent"}: {
-			gax.WithRetry(func() gax.Retryer {
-				return gax.OnCodes([]codes.Code{
-					codes.DeadlineExceeded,
-					codes.Unavailable,
-				}, gax.Backoff{
-					Initial:    100 * time.Millisecond,
-					Max:        60000 * time.Millisecond,
-					Multiplier: 1.3,
-				})
-			}),
-		},
-	}
-	return &ErrorStatsCallOptions{
-		ListGroupStats: retry[[2]string{"default", "idempotent"}],
-		ListEvents:     retry[[2]string{"default", "idempotent"}],
-		DeleteEvents:   retry[[2]string{"default", "idempotent"}],
-	}
-}
-
-// ErrorStatsClient is a client for interacting with Stackdriver Error Reporting API.
-type ErrorStatsClient struct {
-	// The connection to the service.
-	conn *grpc.ClientConn
-
-	// The gRPC API client.
-	errorStatsClient clouderrorreportingpb.ErrorStatsServiceClient
-
-	// The call options for this service.
-	CallOptions *ErrorStatsCallOptions
-
-	// The x-goog-* metadata to be sent with each request.
-	xGoogMetadata metadata.MD
-}
-
-// NewErrorStatsClient creates a new error stats service client.
-//
-// An API for retrieving and managing error statistics as well as data for
-// individual events.
-func NewErrorStatsClient(ctx context.Context, opts ...option.ClientOption) (*ErrorStatsClient, error) {
-	conn, err := transport.DialGRPC(ctx, append(defaultErrorStatsClientOptions(), opts...)...)
-	if err != nil {
-		return nil, err
-	}
-	c := &ErrorStatsClient{
-		conn:        conn,
-		CallOptions: defaultErrorStatsCallOptions(),
-
-		errorStatsClient: clouderrorreportingpb.NewErrorStatsServiceClient(conn),
-	}
-	c.SetGoogleClientInfo()
-	return c, nil
-}
-
-// Connection returns the client's connection to the API service.
-func (c *ErrorStatsClient) Connection() *grpc.ClientConn {
-	return c.conn
-}
-
-// Close closes the connection to the API service. The user should invoke this when
-// the client is no longer required.
-func (c *ErrorStatsClient) Close() error {
-	return c.conn.Close()
-}
-
-// SetGoogleClientInfo sets the name and version of the application in
-// the `x-goog-api-client` header passed on each request. Intended for
-// use by Google-written clients.
-func (c *ErrorStatsClient) SetGoogleClientInfo(keyval ...string) {
-	kv := append([]string{"gl-go", version.Go()}, keyval...)
-	kv = append(kv, "gapic", version.Repo, "gax", gax.Version, "grpc", grpc.Version)
-	c.xGoogMetadata = metadata.Pairs("x-goog-api-client", gax.XGoogHeader(kv...))
-}
-
-// ErrorStatsProjectPath returns the path for the project resource.
-func ErrorStatsProjectPath(project string) string {
-	return "" +
-		"projects/" +
-		project +
-		""
-}
-
-// ListGroupStats lists the specified groups.
-func (c *ErrorStatsClient) ListGroupStats(ctx context.Context, req *clouderrorreportingpb.ListGroupStatsRequest, opts ...gax.CallOption) *ErrorGroupStatsIterator {
-	ctx = insertMetadata(ctx, c.xGoogMetadata)
-	opts = append(c.CallOptions.ListGroupStats[0:len(c.CallOptions.ListGroupStats):len(c.CallOptions.ListGroupStats)], opts...)
-	it := &ErrorGroupStatsIterator{}
-	it.InternalFetch = func(pageSize int, pageToken string) ([]*clouderrorreportingpb.ErrorGroupStats, string, error) {
-		var resp *clouderrorreportingpb.ListGroupStatsResponse
-		req.PageToken = pageToken
-		if pageSize > math.MaxInt32 {
-			req.PageSize = math.MaxInt32
-		} else {
-			req.PageSize = int32(pageSize)
-		}
-		err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
-			var err error
-			resp, err = c.errorStatsClient.ListGroupStats(ctx, req, settings.GRPC...)
-			return err
-		}, opts...)
-		if err != nil {
-			return nil, "", err
-		}
-		return resp.ErrorGroupStats, resp.NextPageToken, nil
-	}
-	fetch := func(pageSize int, pageToken string) (string, error) {
-		items, nextPageToken, err := it.InternalFetch(pageSize, pageToken)
-		if err != nil {
-			return "", err
-		}
-		it.items = append(it.items, items...)
-		return nextPageToken, nil
-	}
-	it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf)
-	return it
-}
-
-// ListEvents lists the specified events.
-func (c *ErrorStatsClient) ListEvents(ctx context.Context, req *clouderrorreportingpb.ListEventsRequest, opts ...gax.CallOption) *ErrorEventIterator {
-	ctx = insertMetadata(ctx, c.xGoogMetadata)
-	opts = append(c.CallOptions.ListEvents[0:len(c.CallOptions.ListEvents):len(c.CallOptions.ListEvents)], opts...)
-	it := &ErrorEventIterator{}
-	it.InternalFetch = func(pageSize int, pageToken string) ([]*clouderrorreportingpb.ErrorEvent, string, error) {
-		var resp *clouderrorreportingpb.ListEventsResponse
-		req.PageToken = pageToken
-		if pageSize > math.MaxInt32 {
-			req.PageSize = math.MaxInt32
-		} else {
-			req.PageSize = int32(pageSize)
-		}
-		err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
-			var err error
-			resp, err = c.errorStatsClient.ListEvents(ctx, req, settings.GRPC...)
-			return err
-		}, opts...)
-		if err != nil {
-			return nil, "", err
-		}
-		return resp.ErrorEvents, resp.NextPageToken, nil
-	}
-	fetch := func(pageSize int, pageToken string) (string, error) {
-		items, nextPageToken, err := it.InternalFetch(pageSize, pageToken)
-		if err != nil {
-			return "", err
-		}
-		it.items = append(it.items, items...)
-		return nextPageToken, nil
-	}
-	it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf)
-	return it
-}
-
-// DeleteEvents deletes all error events of a given project.
-func (c *ErrorStatsClient) DeleteEvents(ctx context.Context, req *clouderrorreportingpb.DeleteEventsRequest, opts ...gax.CallOption) (*clouderrorreportingpb.DeleteEventsResponse, error) {
-	ctx = insertMetadata(ctx, c.xGoogMetadata)
-	opts = append(c.CallOptions.DeleteEvents[0:len(c.CallOptions.DeleteEvents):len(c.CallOptions.DeleteEvents)], opts...)
-	var resp *clouderrorreportingpb.DeleteEventsResponse
-	err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
-		var err error
-		resp, err = c.errorStatsClient.DeleteEvents(ctx, req, settings.GRPC...)
-		return err
-	}, opts...)
-	if err != nil {
-		return nil, err
-	}
-	return resp, nil
-}
-
-// ErrorEventIterator manages a stream of *clouderrorreportingpb.ErrorEvent.
-type ErrorEventIterator struct {
-	items    []*clouderrorreportingpb.ErrorEvent
-	pageInfo *iterator.PageInfo
-	nextFunc func() error
-
-	// InternalFetch is for use by the Google Cloud Libraries only.
-	// It is not part of the stable interface of this package.
-	//
-	// InternalFetch returns results from a single call to the underlying RPC.
-	// The number of results is no greater than pageSize.
-	// If there are no more results, nextPageToken is empty and err is nil.
-	InternalFetch func(pageSize int, pageToken string) (results []*clouderrorreportingpb.ErrorEvent, nextPageToken string, err error)
-}
-
-// PageInfo supports pagination. See the google.golang.org/api/iterator package for details.
-func (it *ErrorEventIterator) PageInfo() *iterator.PageInfo {
-	return it.pageInfo
-}
-
-// Next returns the next result. Its second return value is iterator.Done if there are no more
-// results. Once Next returns Done, all subsequent calls will return Done.
-func (it *ErrorEventIterator) Next() (*clouderrorreportingpb.ErrorEvent, error) {
-	var item *clouderrorreportingpb.ErrorEvent
-	if err := it.nextFunc(); err != nil {
-		return item, err
-	}
-	item = it.items[0]
-	it.items = it.items[1:]
-	return item, nil
-}
-
-func (it *ErrorEventIterator) bufLen() int {
-	return len(it.items)
-}
-
-func (it *ErrorEventIterator) takeBuf() interface{} {
-	b := it.items
-	it.items = nil
-	return b
-}
-
-// ErrorGroupStatsIterator manages a stream of *clouderrorreportingpb.ErrorGroupStats.
-type ErrorGroupStatsIterator struct {
-	items    []*clouderrorreportingpb.ErrorGroupStats
-	pageInfo *iterator.PageInfo
-	nextFunc func() error
-
-	// InternalFetch is for use by the Google Cloud Libraries only.
-	// It is not part of the stable interface of this package.
-	//
-	// InternalFetch returns results from a single call to the underlying RPC.
-	// The number of results is no greater than pageSize.
-	// If there are no more results, nextPageToken is empty and err is nil.
-	InternalFetch func(pageSize int, pageToken string) (results []*clouderrorreportingpb.ErrorGroupStats, nextPageToken string, err error)
-}
-
-// PageInfo supports pagination. See the google.golang.org/api/iterator package for details.
-func (it *ErrorGroupStatsIterator) PageInfo() *iterator.PageInfo {
-	return it.pageInfo
-}
-
-// Next returns the next result. Its second return value is iterator.Done if there are no more
-// results. Once Next returns Done, all subsequent calls will return Done.
-func (it *ErrorGroupStatsIterator) Next() (*clouderrorreportingpb.ErrorGroupStats, error) {
-	var item *clouderrorreportingpb.ErrorGroupStats
-	if err := it.nextFunc(); err != nil {
-		return item, err
-	}
-	item = it.items[0]
-	it.items = it.items[1:]
-	return item, nil
-}
-
-func (it *ErrorGroupStatsIterator) bufLen() int {
-	return len(it.items)
-}
-
-func (it *ErrorGroupStatsIterator) takeBuf() interface{} {
-	b := it.items
-	it.items = nil
-	return b
-}

+ 0 - 130
vendor/cloud.google.com/go/errorreporting/apiv1beta1/report_errors_client.go

@@ -1,130 +0,0 @@
-// Copyright 2017, Google LLC All rights reserved.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//     http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// AUTO-GENERATED CODE. DO NOT EDIT.
-
-package errorreporting
-
-import (
-	"cloud.google.com/go/internal/version"
-	gax "github.com/googleapis/gax-go"
-	"golang.org/x/net/context"
-	"google.golang.org/api/option"
-	"google.golang.org/api/transport"
-	clouderrorreportingpb "google.golang.org/genproto/googleapis/devtools/clouderrorreporting/v1beta1"
-	"google.golang.org/grpc"
-	"google.golang.org/grpc/metadata"
-)
-
-// ReportErrorsCallOptions contains the retry settings for each method of ReportErrorsClient.
-type ReportErrorsCallOptions struct {
-	ReportErrorEvent []gax.CallOption
-}
-
-func defaultReportErrorsClientOptions() []option.ClientOption {
-	return []option.ClientOption{
-		option.WithEndpoint("clouderrorreporting.googleapis.com:443"),
-		option.WithScopes(DefaultAuthScopes()...),
-	}
-}
-
-func defaultReportErrorsCallOptions() *ReportErrorsCallOptions {
-	retry := map[[2]string][]gax.CallOption{}
-	return &ReportErrorsCallOptions{
-		ReportErrorEvent: retry[[2]string{"default", "non_idempotent"}],
-	}
-}
-
-// ReportErrorsClient is a client for interacting with Stackdriver Error Reporting API.
-type ReportErrorsClient struct {
-	// The connection to the service.
-	conn *grpc.ClientConn
-
-	// The gRPC API client.
-	reportErrorsClient clouderrorreportingpb.ReportErrorsServiceClient
-
-	// The call options for this service.
-	CallOptions *ReportErrorsCallOptions
-
-	// The x-goog-* metadata to be sent with each request.
-	xGoogMetadata metadata.MD
-}
-
-// NewReportErrorsClient creates a new report errors service client.
-//
-// An API for reporting error events.
-func NewReportErrorsClient(ctx context.Context, opts ...option.ClientOption) (*ReportErrorsClient, error) {
-	conn, err := transport.DialGRPC(ctx, append(defaultReportErrorsClientOptions(), opts...)...)
-	if err != nil {
-		return nil, err
-	}
-	c := &ReportErrorsClient{
-		conn:        conn,
-		CallOptions: defaultReportErrorsCallOptions(),
-
-		reportErrorsClient: clouderrorreportingpb.NewReportErrorsServiceClient(conn),
-	}
-	c.SetGoogleClientInfo()
-	return c, nil
-}
-
-// Connection returns the client's connection to the API service.
-func (c *ReportErrorsClient) Connection() *grpc.ClientConn {
-	return c.conn
-}
-
-// Close closes the connection to the API service. The user should invoke this when
-// the client is no longer required.
-func (c *ReportErrorsClient) Close() error {
-	return c.conn.Close()
-}
-
-// SetGoogleClientInfo sets the name and version of the application in
-// the `x-goog-api-client` header passed on each request. Intended for
-// use by Google-written clients.
-func (c *ReportErrorsClient) SetGoogleClientInfo(keyval ...string) {
-	kv := append([]string{"gl-go", version.Go()}, keyval...)
-	kv = append(kv, "gapic", version.Repo, "gax", gax.Version, "grpc", grpc.Version)
-	c.xGoogMetadata = metadata.Pairs("x-goog-api-client", gax.XGoogHeader(kv...))
-}
-
-// ReportErrorsProjectPath returns the path for the project resource.
-func ReportErrorsProjectPath(project string) string {
-	return "" +
-		"projects/" +
-		project +
-		""
-}
-
-// ReportErrorEvent report an individual error event.
-//
-// This endpoint accepts <strong>either</strong> an OAuth token,
-// <strong>or</strong> an
-// <a href="https://support.google.com/cloud/answer/6158862">API key</a>
-// for authentication. To use an API key, append it to the URL as the value of
-// a key parameter. For example:<pre>POST https://clouderrorreporting.googleapis.com/v1beta1/projects/example-project/events:report?key=123ABC456</pre>
-func (c *ReportErrorsClient) ReportErrorEvent(ctx context.Context, req *clouderrorreportingpb.ReportErrorEventRequest, opts ...gax.CallOption) (*clouderrorreportingpb.ReportErrorEventResponse, error) {
-	ctx = insertMetadata(ctx, c.xGoogMetadata)
-	opts = append(c.CallOptions.ReportErrorEvent[0:len(c.CallOptions.ReportErrorEvent):len(c.CallOptions.ReportErrorEvent)], opts...)
-	var resp *clouderrorreportingpb.ReportErrorEventResponse
-	err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
-		var err error
-		resp, err = c.reportErrorsClient.ReportErrorEvent(ctx, req, settings.GRPC...)
-		return err
-	}, opts...)
-	if err != nil {
-		return nil, err
-	}
-	return resp, nil
-}

+ 0 - 230
vendor/cloud.google.com/go/errorreporting/errors.go

@@ -1,230 +0,0 @@
-// Copyright 2016 Google Inc. All Rights Reserved.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//      http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Package errorreporting is a Google Stackdriver Error Reporting library.
-//
-// This package is still experimental and subject to change.
-//
-// See https://cloud.google.com/error-reporting/ for more information.
-package errorreporting // import "cloud.google.com/go/errorreporting"
-
-import (
-	"bytes"
-	"fmt"
-	"log"
-	"net/http"
-	"runtime"
-	"time"
-
-	api "cloud.google.com/go/errorreporting/apiv1beta1"
-	"cloud.google.com/go/internal/version"
-	"github.com/golang/protobuf/ptypes"
-	gax "github.com/googleapis/gax-go"
-	"golang.org/x/net/context"
-	"google.golang.org/api/option"
-	"google.golang.org/api/support/bundler"
-	erpb "google.golang.org/genproto/googleapis/devtools/clouderrorreporting/v1beta1"
-)
-
-const (
-	userAgent = `gcloud-golang-errorreporting/20160701`
-)
-
-// Config is additional configuration for Client.
-type Config struct {
-	// ServiceName identifies the running program and is included in the error reports.
-	// Optional.
-	ServiceName string
-
-	// ServiceVersion identifies the version of the running program and is
-	// included in the error reports.
-	// Optional.
-	ServiceVersion string
-
-	// OnError is the function to call if any background
-	// tasks errored. By default, errors are logged.
-	OnError func(err error)
-}
-
-// Entry holds information about the reported error.
-type Entry struct {
-	Error error
-	Req   *http.Request // if error is associated with a request.
-	Stack []byte        // if user does not provide a stack trace, runtime.Stack will be called
-}
-
-// Client represents a Google Cloud Error Reporting client.
-type Client struct {
-	projectID      string
-	apiClient      client
-	serviceContext erpb.ServiceContext
-	bundler        *bundler.Bundler
-
-	onErrorFn func(err error)
-}
-
-var newClient = func(ctx context.Context, opts ...option.ClientOption) (client, error) {
-	client, err := api.NewReportErrorsClient(ctx, opts...)
-	if err != nil {
-		return nil, err
-	}
-	client.SetGoogleClientInfo("gccl", version.Repo)
-	return client, nil
-}
-
-// NewClient returns a new error reporting client. Generally you will want
-// to create a client on program initialization and use it through the lifetime
-// of the process.
-func NewClient(ctx context.Context, projectID string, cfg Config, opts ...option.ClientOption) (*Client, error) {
-	if cfg.ServiceName == "" {
-		cfg.ServiceName = "goapp"
-	}
-	c, err := newClient(ctx, opts...)
-	if err != nil {
-		return nil, fmt.Errorf("creating client: %v", err)
-	}
-
-	client := &Client{
-		apiClient: c,
-		projectID: "projects/" + projectID,
-		serviceContext: erpb.ServiceContext{
-			Service: cfg.ServiceName,
-			Version: cfg.ServiceVersion,
-		},
-	}
-	bundler := bundler.NewBundler((*erpb.ReportErrorEventRequest)(nil), func(bundle interface{}) {
-		reqs := bundle.([]*erpb.ReportErrorEventRequest)
-		for _, req := range reqs {
-			_, err = client.apiClient.ReportErrorEvent(ctx, req)
-			if err != nil {
-				client.onError(fmt.Errorf("failed to upload: %v", err))
-			}
-		}
-	})
-	// TODO(jbd): Optimize bundler limits.
-	bundler.DelayThreshold = 2 * time.Second
-	bundler.BundleCountThreshold = 100
-	bundler.BundleByteThreshold = 1000
-	bundler.BundleByteLimit = 1000
-	bundler.BufferedByteLimit = 10000
-	client.bundler = bundler
-	return client, nil
-}
-
-func (c *Client) onError(err error) {
-	if c.onErrorFn != nil {
-		c.onErrorFn(err)
-		return
-	}
-	log.Println(err)
-}
-
-// Close closes any resources held by the client.
-// Close should be called when the client is no longer needed.
-// It need not be called at program exit.
-func (c *Client) Close() error {
-	return c.apiClient.Close()
-}
-
-// Report writes an error report. It doesn't block. Errors in
-// writing the error report can be handled via Client.OnError.
-func (c *Client) Report(e Entry) {
-	var stack string
-	if e.Stack != nil {
-		stack = string(e.Stack)
-	}
-	req := c.makeReportErrorEventRequest(e.Req, e.Error.Error(), stack)
-	c.bundler.Add(req, 1)
-}
-
-// ReportSync writes an error report. It blocks until the entry is written.
-func (c *Client) ReportSync(ctx context.Context, e Entry) error {
-	var stack string
-	if e.Stack != nil {
-		stack = string(e.Stack)
-	}
-	req := c.makeReportErrorEventRequest(e.Req, e.Error.Error(), stack)
-	_, err := c.apiClient.ReportErrorEvent(ctx, req)
-	return err
-}
-
-// Flush blocks until all currently buffered error reports are sent.
-//
-// If any errors occurred since the last call to Flush, or the
-// creation of the client if this is the first call, then Flush report the
-// error via the (*Client).OnError handler.
-func (c *Client) Flush() {
-	c.bundler.Flush()
-}
-
-func (c *Client) makeReportErrorEventRequest(r *http.Request, msg string, stack string) *erpb.ReportErrorEventRequest {
-	if stack == "" {
-		// limit the stack trace to 16k.
-		var buf [16 * 1024]byte
-		stack = chopStack(buf[0:runtime.Stack(buf[:], false)])
-	}
-	message := msg + "\n" + stack
-
-	var errorContext *erpb.ErrorContext
-	if r != nil {
-		errorContext = &erpb.ErrorContext{
-			HttpRequest: &erpb.HttpRequestContext{
-				Method:    r.Method,
-				Url:       r.Host + r.RequestURI,
-				UserAgent: r.UserAgent(),
-				Referrer:  r.Referer(),
-				RemoteIp:  r.RemoteAddr,
-			},
-		}
-	}
-	return &erpb.ReportErrorEventRequest{
-		ProjectName: c.projectID,
-		Event: &erpb.ReportedErrorEvent{
-			EventTime:      ptypes.TimestampNow(),
-			ServiceContext: &c.serviceContext,
-			Message:        message,
-			Context:        errorContext,
-		},
-	}
-}
-
-// chopStack trims a stack trace so that the function which panics or calls
-// Report is first.
-func chopStack(s []byte) string {
-	f := []byte("cloud.google.com/go/errorreporting.(*Client).Report")
-
-	lfFirst := bytes.IndexByte(s, '\n')
-	if lfFirst == -1 {
-		return string(s)
-	}
-	stack := s[lfFirst:]
-	panicLine := bytes.Index(stack, f)
-	if panicLine == -1 {
-		return string(s)
-	}
-	stack = stack[panicLine+1:]
-	for i := 0; i < 2; i++ {
-		nextLine := bytes.IndexByte(stack, '\n')
-		if nextLine == -1 {
-			return string(s)
-		}
-		stack = stack[nextLine+1:]
-	}
-	return string(s[:lfFirst+1]) + string(stack)
-}
-
-type client interface {
-	ReportErrorEvent(ctx context.Context, req *erpb.ReportErrorEventRequest, opts ...gax.CallOption) (*erpb.ReportErrorEventResponse, error)
-	Close() error
-}

+ 0 - 48
vendor/cloud.google.com/go/firestore/apiv1beta1/doc.go

@@ -1,48 +0,0 @@
-// Copyright 2017, Google LLC All rights reserved.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//     http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// AUTO-GENERATED CODE. DO NOT EDIT.
-
-// Package firestore is an auto-generated package for the
-// Google Cloud Firestore API.
-//
-//   NOTE: This package is in beta. It is not stable, and may be subject to changes.
-//
-//
-// Use the client at cloud.google.com/go/firestore in preference to this.
-package firestore // import "cloud.google.com/go/firestore/apiv1beta1"
-
-import (
-	"golang.org/x/net/context"
-	"google.golang.org/grpc/metadata"
-)
-
-func insertMetadata(ctx context.Context, mds ...metadata.MD) context.Context {
-	out, _ := metadata.FromOutgoingContext(ctx)
-	out = out.Copy()
-	for _, md := range mds {
-		for k, v := range md {
-			out[k] = append(out[k], v...)
-		}
-	}
-	return metadata.NewOutgoingContext(ctx, out)
-}
-
-// DefaultAuthScopes reports the default set of authentication scopes to use with this package.
-func DefaultAuthScopes() []string {
-	return []string{
-		"https://www.googleapis.com/auth/cloud-platform",
-		"https://www.googleapis.com/auth/datastore",
-	}
-}

+ 0 - 544
vendor/cloud.google.com/go/firestore/apiv1beta1/firestore_client.go

@@ -1,544 +0,0 @@
-// Copyright 2017, Google LLC All rights reserved.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//     http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// AUTO-GENERATED CODE. DO NOT EDIT.
-
-package firestore
-
-import (
-	"math"
-	"time"
-
-	"cloud.google.com/go/internal/version"
-	gax "github.com/googleapis/gax-go"
-	"golang.org/x/net/context"
-	"google.golang.org/api/iterator"
-	"google.golang.org/api/option"
-	"google.golang.org/api/transport"
-	firestorepb "google.golang.org/genproto/googleapis/firestore/v1beta1"
-	"google.golang.org/grpc"
-	"google.golang.org/grpc/codes"
-	"google.golang.org/grpc/metadata"
-)
-
-// CallOptions contains the retry settings for each method of Client.
-type CallOptions struct {
-	GetDocument       []gax.CallOption
-	ListDocuments     []gax.CallOption
-	CreateDocument    []gax.CallOption
-	UpdateDocument    []gax.CallOption
-	DeleteDocument    []gax.CallOption
-	BatchGetDocuments []gax.CallOption
-	BeginTransaction  []gax.CallOption
-	Commit            []gax.CallOption
-	Rollback          []gax.CallOption
-	RunQuery          []gax.CallOption
-	Write             []gax.CallOption
-	Listen            []gax.CallOption
-	ListCollectionIds []gax.CallOption
-}
-
-func defaultClientOptions() []option.ClientOption {
-	return []option.ClientOption{
-		option.WithEndpoint("firestore.googleapis.com:443"),
-		option.WithScopes(DefaultAuthScopes()...),
-	}
-}
-
-func defaultCallOptions() *CallOptions {
-	retry := map[[2]string][]gax.CallOption{
-		{"default", "idempotent"}: {
-			gax.WithRetry(func() gax.Retryer {
-				return gax.OnCodes([]codes.Code{
-					codes.DeadlineExceeded,
-					codes.Unavailable,
-				}, gax.Backoff{
-					Initial:    100 * time.Millisecond,
-					Max:        60000 * time.Millisecond,
-					Multiplier: 1.3,
-				})
-			}),
-		},
-		{"streaming", "idempotent"}: {
-			gax.WithRetry(func() gax.Retryer {
-				return gax.OnCodes([]codes.Code{
-					codes.DeadlineExceeded,
-					codes.Unavailable,
-				}, gax.Backoff{
-					Initial:    100 * time.Millisecond,
-					Max:        60000 * time.Millisecond,
-					Multiplier: 1.3,
-				})
-			}),
-		},
-	}
-	return &CallOptions{
-		GetDocument:       retry[[2]string{"default", "idempotent"}],
-		ListDocuments:     retry[[2]string{"default", "idempotent"}],
-		CreateDocument:    retry[[2]string{"default", "non_idempotent"}],
-		UpdateDocument:    retry[[2]string{"default", "non_idempotent"}],
-		DeleteDocument:    retry[[2]string{"default", "idempotent"}],
-		BatchGetDocuments: retry[[2]string{"streaming", "idempotent"}],
-		BeginTransaction:  retry[[2]string{"default", "idempotent"}],
-		Commit:            retry[[2]string{"default", "non_idempotent"}],
-		Rollback:          retry[[2]string{"default", "idempotent"}],
-		RunQuery:          retry[[2]string{"default", "idempotent"}],
-		Write:             retry[[2]string{"streaming", "non_idempotent"}],
-		Listen:            retry[[2]string{"streaming", "idempotent"}],
-		ListCollectionIds: retry[[2]string{"default", "idempotent"}],
-	}
-}
-
-// Client is a client for interacting with Google Cloud Firestore API.
-type Client struct {
-	// The connection to the service.
-	conn *grpc.ClientConn
-
-	// The gRPC API client.
-	client firestorepb.FirestoreClient
-
-	// The call options for this service.
-	CallOptions *CallOptions
-
-	// The x-goog-* metadata to be sent with each request.
-	xGoogMetadata metadata.MD
-}
-
-// NewClient creates a new firestore client.
-//
-// The Cloud Firestore service.
-//
-// This service exposes several types of comparable timestamps:
-//
-//   create_time - The time at which a document was created. Changes only
-//   when a document is deleted, then re-created. Increases in a strict
-//   monotonic fashion.
-//
-//   update_time - The time at which a document was last updated. Changes
-//   every time a document is modified. Does not change when a write results
-//   in no modifications. Increases in a strict monotonic fashion.
-//
-//   read_time - The time at which a particular state was observed. Used
-//   to denote a consistent snapshot of the database or the time at which a
-//   Document was observed to not exist.
-//
-//   commit_time - The time at which the writes in a transaction were
-//   committed. Any read with an equal or greater read_time is guaranteed
-//   to see the effects of the transaction.
-func NewClient(ctx context.Context, opts ...option.ClientOption) (*Client, error) {
-	conn, err := transport.DialGRPC(ctx, append(defaultClientOptions(), opts...)...)
-	if err != nil {
-		return nil, err
-	}
-	c := &Client{
-		conn:        conn,
-		CallOptions: defaultCallOptions(),
-
-		client: firestorepb.NewFirestoreClient(conn),
-	}
-	c.SetGoogleClientInfo()
-	return c, nil
-}
-
-// Connection returns the client's connection to the API service.
-func (c *Client) Connection() *grpc.ClientConn {
-	return c.conn
-}
-
-// Close closes the connection to the API service. The user should invoke this when
-// the client is no longer required.
-func (c *Client) Close() error {
-	return c.conn.Close()
-}
-
-// SetGoogleClientInfo sets the name and version of the application in
-// the `x-goog-api-client` header passed on each request. Intended for
-// use by Google-written clients.
-func (c *Client) SetGoogleClientInfo(keyval ...string) {
-	kv := append([]string{"gl-go", version.Go()}, keyval...)
-	kv = append(kv, "gapic", version.Repo, "gax", gax.Version, "grpc", grpc.Version)
-	c.xGoogMetadata = metadata.Pairs("x-goog-api-client", gax.XGoogHeader(kv...))
-}
-
-// DatabaseRootPath returns the path for the database root resource.
-func DatabaseRootPath(project, database string) string {
-	return "" +
-		"projects/" +
-		project +
-		"/databases/" +
-		database +
-		""
-}
-
-// DocumentRootPath returns the path for the document root resource.
-func DocumentRootPath(project, database string) string {
-	return "" +
-		"projects/" +
-		project +
-		"/databases/" +
-		database +
-		"/documents" +
-		""
-}
-
-// DocumentPathPath returns the path for the document path resource.
-func DocumentPathPath(project, database, documentPath string) string {
-	return "" +
-		"projects/" +
-		project +
-		"/databases/" +
-		database +
-		"/documents/" +
-		documentPath +
-		""
-}
-
-// AnyPathPath returns the path for the any path resource.
-func AnyPathPath(project, database, document, anyPath string) string {
-	return "" +
-		"projects/" +
-		project +
-		"/databases/" +
-		database +
-		"/documents/" +
-		document +
-		"/" +
-		anyPath +
-		""
-}
-
-// GetDocument gets a single document.
-func (c *Client) GetDocument(ctx context.Context, req *firestorepb.GetDocumentRequest, opts ...gax.CallOption) (*firestorepb.Document, error) {
-	ctx = insertMetadata(ctx, c.xGoogMetadata)
-	opts = append(c.CallOptions.GetDocument[0:len(c.CallOptions.GetDocument):len(c.CallOptions.GetDocument)], opts...)
-	var resp *firestorepb.Document
-	err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
-		var err error
-		resp, err = c.client.GetDocument(ctx, req, settings.GRPC...)
-		return err
-	}, opts...)
-	if err != nil {
-		return nil, err
-	}
-	return resp, nil
-}
-
-// ListDocuments lists documents.
-func (c *Client) ListDocuments(ctx context.Context, req *firestorepb.ListDocumentsRequest, opts ...gax.CallOption) *DocumentIterator {
-	ctx = insertMetadata(ctx, c.xGoogMetadata)
-	opts = append(c.CallOptions.ListDocuments[0:len(c.CallOptions.ListDocuments):len(c.CallOptions.ListDocuments)], opts...)
-	it := &DocumentIterator{}
-	it.InternalFetch = func(pageSize int, pageToken string) ([]*firestorepb.Document, string, error) {
-		var resp *firestorepb.ListDocumentsResponse
-		req.PageToken = pageToken
-		if pageSize > math.MaxInt32 {
-			req.PageSize = math.MaxInt32
-		} else {
-			req.PageSize = int32(pageSize)
-		}
-		err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
-			var err error
-			resp, err = c.client.ListDocuments(ctx, req, settings.GRPC...)
-			return err
-		}, opts...)
-		if err != nil {
-			return nil, "", err
-		}
-		return resp.Documents, resp.NextPageToken, nil
-	}
-	fetch := func(pageSize int, pageToken string) (string, error) {
-		items, nextPageToken, err := it.InternalFetch(pageSize, pageToken)
-		if err != nil {
-			return "", err
-		}
-		it.items = append(it.items, items...)
-		return nextPageToken, nil
-	}
-	it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf)
-	return it
-}
-
-// CreateDocument creates a new document.
-func (c *Client) CreateDocument(ctx context.Context, req *firestorepb.CreateDocumentRequest, opts ...gax.CallOption) (*firestorepb.Document, error) {
-	ctx = insertMetadata(ctx, c.xGoogMetadata)
-	opts = append(c.CallOptions.CreateDocument[0:len(c.CallOptions.CreateDocument):len(c.CallOptions.CreateDocument)], opts...)
-	var resp *firestorepb.Document
-	err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
-		var err error
-		resp, err = c.client.CreateDocument(ctx, req, settings.GRPC...)
-		return err
-	}, opts...)
-	if err != nil {
-		return nil, err
-	}
-	return resp, nil
-}
-
-// UpdateDocument updates or inserts a document.
-func (c *Client) UpdateDocument(ctx context.Context, req *firestorepb.UpdateDocumentRequest, opts ...gax.CallOption) (*firestorepb.Document, error) {
-	ctx = insertMetadata(ctx, c.xGoogMetadata)
-	opts = append(c.CallOptions.UpdateDocument[0:len(c.CallOptions.UpdateDocument):len(c.CallOptions.UpdateDocument)], opts...)
-	var resp *firestorepb.Document
-	err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
-		var err error
-		resp, err = c.client.UpdateDocument(ctx, req, settings.GRPC...)
-		return err
-	}, opts...)
-	if err != nil {
-		return nil, err
-	}
-	return resp, nil
-}
-
-// DeleteDocument deletes a document.
-func (c *Client) DeleteDocument(ctx context.Context, req *firestorepb.DeleteDocumentRequest, opts ...gax.CallOption) error {
-	ctx = insertMetadata(ctx, c.xGoogMetadata)
-	opts = append(c.CallOptions.DeleteDocument[0:len(c.CallOptions.DeleteDocument):len(c.CallOptions.DeleteDocument)], opts...)
-	err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
-		var err error
-		_, err = c.client.DeleteDocument(ctx, req, settings.GRPC...)
-		return err
-	}, opts...)
-	return err
-}
-
-// BatchGetDocuments gets multiple documents.
-//
-// Documents returned by this method are not guaranteed to be returned in the
-// same order that they were requested.
-func (c *Client) BatchGetDocuments(ctx context.Context, req *firestorepb.BatchGetDocumentsRequest, opts ...gax.CallOption) (firestorepb.Firestore_BatchGetDocumentsClient, error) {
-	ctx = insertMetadata(ctx, c.xGoogMetadata)
-	opts = append(c.CallOptions.BatchGetDocuments[0:len(c.CallOptions.BatchGetDocuments):len(c.CallOptions.BatchGetDocuments)], opts...)
-	var resp firestorepb.Firestore_BatchGetDocumentsClient
-	err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
-		var err error
-		resp, err = c.client.BatchGetDocuments(ctx, req, settings.GRPC...)
-		return err
-	}, opts...)
-	if err != nil {
-		return nil, err
-	}
-	return resp, nil
-}
-
-// BeginTransaction starts a new transaction.
-func (c *Client) BeginTransaction(ctx context.Context, req *firestorepb.BeginTransactionRequest, opts ...gax.CallOption) (*firestorepb.BeginTransactionResponse, error) {
-	ctx = insertMetadata(ctx, c.xGoogMetadata)
-	opts = append(c.CallOptions.BeginTransaction[0:len(c.CallOptions.BeginTransaction):len(c.CallOptions.BeginTransaction)], opts...)
-	var resp *firestorepb.BeginTransactionResponse
-	err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
-		var err error
-		resp, err = c.client.BeginTransaction(ctx, req, settings.GRPC...)
-		return err
-	}, opts...)
-	if err != nil {
-		return nil, err
-	}
-	return resp, nil
-}
-
-// Commit commits a transaction, while optionally updating documents.
-func (c *Client) Commit(ctx context.Context, req *firestorepb.CommitRequest, opts ...gax.CallOption) (*firestorepb.CommitResponse, error) {
-	ctx = insertMetadata(ctx, c.xGoogMetadata)
-	opts = append(c.CallOptions.Commit[0:len(c.CallOptions.Commit):len(c.CallOptions.Commit)], opts...)
-	var resp *firestorepb.CommitResponse
-	err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
-		var err error
-		resp, err = c.client.Commit(ctx, req, settings.GRPC...)
-		return err
-	}, opts...)
-	if err != nil {
-		return nil, err
-	}
-	return resp, nil
-}
-
-// Rollback rolls back a transaction.
-func (c *Client) Rollback(ctx context.Context, req *firestorepb.RollbackRequest, opts ...gax.CallOption) error {
-	ctx = insertMetadata(ctx, c.xGoogMetadata)
-	opts = append(c.CallOptions.Rollback[0:len(c.CallOptions.Rollback):len(c.CallOptions.Rollback)], opts...)
-	err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
-		var err error
-		_, err = c.client.Rollback(ctx, req, settings.GRPC...)
-		return err
-	}, opts...)
-	return err
-}
-
-// RunQuery runs a query.
-func (c *Client) RunQuery(ctx context.Context, req *firestorepb.RunQueryRequest, opts ...gax.CallOption) (firestorepb.Firestore_RunQueryClient, error) {
-	ctx = insertMetadata(ctx, c.xGoogMetadata)
-	opts = append(c.CallOptions.RunQuery[0:len(c.CallOptions.RunQuery):len(c.CallOptions.RunQuery)], opts...)
-	var resp firestorepb.Firestore_RunQueryClient
-	err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
-		var err error
-		resp, err = c.client.RunQuery(ctx, req, settings.GRPC...)
-		return err
-	}, opts...)
-	if err != nil {
-		return nil, err
-	}
-	return resp, nil
-}
-
-// Write streams batches of document updates and deletes, in order.
-func (c *Client) Write(ctx context.Context, opts ...gax.CallOption) (firestorepb.Firestore_WriteClient, error) {
-	ctx = insertMetadata(ctx, c.xGoogMetadata)
-	opts = append(c.CallOptions.Write[0:len(c.CallOptions.Write):len(c.CallOptions.Write)], opts...)
-	var resp firestorepb.Firestore_WriteClient
-	err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
-		var err error
-		resp, err = c.client.Write(ctx, settings.GRPC...)
-		return err
-	}, opts...)
-	if err != nil {
-		return nil, err
-	}
-	return resp, nil
-}
-
-// Listen listens to changes.
-func (c *Client) Listen(ctx context.Context, opts ...gax.CallOption) (firestorepb.Firestore_ListenClient, error) {
-	ctx = insertMetadata(ctx, c.xGoogMetadata)
-	opts = append(c.CallOptions.Listen[0:len(c.CallOptions.Listen):len(c.CallOptions.Listen)], opts...)
-	var resp firestorepb.Firestore_ListenClient
-	err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
-		var err error
-		resp, err = c.client.Listen(ctx, settings.GRPC...)
-		return err
-	}, opts...)
-	if err != nil {
-		return nil, err
-	}
-	return resp, nil
-}
-
-// ListCollectionIds lists all the collection IDs underneath a document.
-func (c *Client) ListCollectionIds(ctx context.Context, req *firestorepb.ListCollectionIdsRequest, opts ...gax.CallOption) *StringIterator {
-	ctx = insertMetadata(ctx, c.xGoogMetadata)
-	opts = append(c.CallOptions.ListCollectionIds[0:len(c.CallOptions.ListCollectionIds):len(c.CallOptions.ListCollectionIds)], opts...)
-	it := &StringIterator{}
-	it.InternalFetch = func(pageSize int, pageToken string) ([]string, string, error) {
-		var resp *firestorepb.ListCollectionIdsResponse
-		req.PageToken = pageToken
-		if pageSize > math.MaxInt32 {
-			req.PageSize = math.MaxInt32
-		} else {
-			req.PageSize = int32(pageSize)
-		}
-		err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
-			var err error
-			resp, err = c.client.ListCollectionIds(ctx, req, settings.GRPC...)
-			return err
-		}, opts...)
-		if err != nil {
-			return nil, "", err
-		}
-		return resp.CollectionIds, resp.NextPageToken, nil
-	}
-	fetch := func(pageSize int, pageToken string) (string, error) {
-		items, nextPageToken, err := it.InternalFetch(pageSize, pageToken)
-		if err != nil {
-			return "", err
-		}
-		it.items = append(it.items, items...)
-		return nextPageToken, nil
-	}
-	it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf)
-	return it
-}
-
-// DocumentIterator manages a stream of *firestorepb.Document.
-type DocumentIterator struct {
-	items    []*firestorepb.Document
-	pageInfo *iterator.PageInfo
-	nextFunc func() error
-
-	// InternalFetch is for use by the Google Cloud Libraries only.
-	// It is not part of the stable interface of this package.
-	//
-	// InternalFetch returns results from a single call to the underlying RPC.
-	// The number of results is no greater than pageSize.
-	// If there are no more results, nextPageToken is empty and err is nil.
-	InternalFetch func(pageSize int, pageToken string) (results []*firestorepb.Document, nextPageToken string, err error)
-}
-
-// PageInfo supports pagination. See the google.golang.org/api/iterator package for details.
-func (it *DocumentIterator) PageInfo() *iterator.PageInfo {
-	return it.pageInfo
-}
-
-// Next returns the next result. Its second return value is iterator.Done if there are no more
-// results. Once Next returns Done, all subsequent calls will return Done.
-func (it *DocumentIterator) Next() (*firestorepb.Document, error) {
-	var item *firestorepb.Document
-	if err := it.nextFunc(); err != nil {
-		return item, err
-	}
-	item = it.items[0]
-	it.items = it.items[1:]
-	return item, nil
-}
-
-func (it *DocumentIterator) bufLen() int {
-	return len(it.items)
-}
-
-func (it *DocumentIterator) takeBuf() interface{} {
-	b := it.items
-	it.items = nil
-	return b
-}
-
-// StringIterator manages a stream of string.
-type StringIterator struct {
-	items    []string
-	pageInfo *iterator.PageInfo
-	nextFunc func() error
-
-	// InternalFetch is for use by the Google Cloud Libraries only.
-	// It is not part of the stable interface of this package.
-	//
-	// InternalFetch returns results from a single call to the underlying RPC.
-	// The number of results is no greater than pageSize.
-	// If there are no more results, nextPageToken is empty and err is nil.
-	InternalFetch func(pageSize int, pageToken string) (results []string, nextPageToken string, err error)
-}
-
-// PageInfo supports pagination. See the google.golang.org/api/iterator package for details.
-func (it *StringIterator) PageInfo() *iterator.PageInfo {
-	return it.pageInfo
-}
-
-// Next returns the next result. Its second return value is iterator.Done if there are no more
-// results. Once Next returns Done, all subsequent calls will return Done.
-func (it *StringIterator) Next() (string, error) {
-	var item string
-	if err := it.nextFunc(); err != nil {
-		return item, err
-	}
-	item = it.items[0]
-	it.items = it.items[1:]
-	return item, nil
-}
-
-func (it *StringIterator) bufLen() int {
-	return len(it.items)
-}
-
-func (it *StringIterator) takeBuf() interface{} {
-	b := it.items
-	it.items = nil
-	return b
-}

+ 0 - 261
vendor/cloud.google.com/go/firestore/client.go

@@ -1,261 +0,0 @@
-// Copyright 2017 Google Inc. All Rights Reserved.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//      http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package firestore
-
-import (
-	"errors"
-	"fmt"
-	"io"
-	"strings"
-	"time"
-
-	"google.golang.org/api/iterator"
-
-	vkit "cloud.google.com/go/firestore/apiv1beta1"
-
-	"cloud.google.com/go/internal/version"
-	pb "google.golang.org/genproto/googleapis/firestore/v1beta1"
-
-	"github.com/golang/protobuf/ptypes"
-	"golang.org/x/net/context"
-	"google.golang.org/api/option"
-	"google.golang.org/grpc/metadata"
-)
-
-// resourcePrefixHeader is the name of the metadata header used to indicate
-// the resource being operated on.
-const resourcePrefixHeader = "google-cloud-resource-prefix"
-
-// A Client provides access to the Firestore service.
-type Client struct {
-	c          *vkit.Client
-	projectID  string
-	databaseID string // A client is tied to a single database.
-}
-
-// NewClient creates a new Firestore client that uses the given project.
-func NewClient(ctx context.Context, projectID string, opts ...option.ClientOption) (*Client, error) {
-	vc, err := vkit.NewClient(ctx, opts...)
-	if err != nil {
-		return nil, err
-	}
-	vc.SetGoogleClientInfo("gccl", version.Repo)
-	c := &Client{
-		c:          vc,
-		projectID:  projectID,
-		databaseID: "(default)", // always "(default)", for now
-	}
-	return c, nil
-
-}
-
-// Close closes any resources held by the client.
-//
-// Close need not be called at program exit.
-func (c *Client) Close() error {
-	return c.c.Close()
-}
-
-func (c *Client) path() string {
-	return fmt.Sprintf("projects/%s/databases/%s", c.projectID, c.databaseID)
-}
-
-func withResourceHeader(ctx context.Context, resource string) context.Context {
-	md, _ := metadata.FromOutgoingContext(ctx)
-	md = md.Copy()
-	md[resourcePrefixHeader] = []string{resource}
-	return metadata.NewOutgoingContext(ctx, md)
-}
-
-// Collection creates a reference to a collection with the given path.
-// A path is a sequence of IDs separated by slashes.
-//
-// Collection returns nil if path contains an even number of IDs or any ID is empty.
-func (c *Client) Collection(path string) *CollectionRef {
-	coll, _ := c.idsToRef(strings.Split(path, "/"), c.path())
-	return coll
-}
-
-// Doc creates a reference to a document with the given path.
-// A path is a sequence of IDs separated by slashes.
-//
-// Doc returns nil if path contains an odd number of IDs or any ID is empty.
-func (c *Client) Doc(path string) *DocumentRef {
-	_, doc := c.idsToRef(strings.Split(path, "/"), c.path())
-	return doc
-}
-
-func (c *Client) idsToRef(IDs []string, dbPath string) (*CollectionRef, *DocumentRef) {
-	if len(IDs) == 0 {
-		return nil, nil
-	}
-	for _, id := range IDs {
-		if id == "" {
-			return nil, nil
-		}
-	}
-	coll := newTopLevelCollRef(c, dbPath, IDs[0])
-	i := 1
-	for i < len(IDs) {
-		doc := newDocRef(coll, IDs[i])
-		i++
-		if i == len(IDs) {
-			return nil, doc
-		}
-		coll = newCollRefWithParent(c, doc, IDs[i])
-		i++
-	}
-	return coll, nil
-}
-
-// GetAll retrieves multiple documents with a single call. The DocumentSnapshots are
-// returned in the order of the given DocumentRefs.
-//
-// If a document is not present, the corresponding DocumentSnapshot will be nil.
-func (c *Client) GetAll(ctx context.Context, docRefs []*DocumentRef) ([]*DocumentSnapshot, error) {
-	if err := checkTransaction(ctx); err != nil {
-		return nil, err
-	}
-	var docNames []string
-	for _, dr := range docRefs {
-		if dr == nil {
-			return nil, errNilDocRef
-		}
-		docNames = append(docNames, dr.Path)
-	}
-	req := &pb.BatchGetDocumentsRequest{
-		Database:  c.path(),
-		Documents: docNames,
-	}
-	streamClient, err := c.c.BatchGetDocuments(withResourceHeader(ctx, req.Database), req)
-	if err != nil {
-		return nil, err
-	}
-
-	// Read results from the stream and add them to a map.
-	docMap := map[string]*pb.Document{}
-	for {
-		res, err := streamClient.Recv()
-		if err == io.EOF {
-			break
-		}
-		if err != nil {
-			return nil, err
-		}
-		switch x := res.Result.(type) {
-		case *pb.BatchGetDocumentsResponse_Found:
-			docMap[x.Found.Name] = x.Found
-
-		case *pb.BatchGetDocumentsResponse_Missing:
-			if docMap[x.Missing] != nil {
-				return nil, fmt.Errorf("firestore: %q both missing and present", x.Missing)
-			}
-			docMap[x.Missing] = nil
-		default:
-			return nil, errors.New("firestore: unknown BatchGetDocumentsResponse result type")
-		}
-	}
-
-	// Put the documents we've gathered in the same order as the requesting slice of
-	// DocumentRefs.
-	docs := make([]*DocumentSnapshot, len(docNames))
-	for i, name := range docNames {
-		pbDoc, ok := docMap[name]
-		if !ok {
-			return nil, fmt.Errorf("firestore: passed %q to BatchGetDocuments but never saw response", name)
-		}
-		if pbDoc != nil {
-			doc, err := newDocumentSnapshot(docRefs[i], pbDoc, c)
-			if err != nil {
-				return nil, err
-			}
-			docs[i] = doc
-		}
-	}
-	return docs, nil
-}
-
-// Collections returns an interator over the top-level collections.
-func (c *Client) Collections(ctx context.Context) *CollectionIterator {
-	it := &CollectionIterator{
-		err:    checkTransaction(ctx),
-		client: c,
-		it: c.c.ListCollectionIds(
-			withResourceHeader(ctx, c.path()),
-			&pb.ListCollectionIdsRequest{Parent: c.path()}),
-	}
-	it.pageInfo, it.nextFunc = iterator.NewPageInfo(
-		it.fetch,
-		func() int { return len(it.items) },
-		func() interface{} { b := it.items; it.items = nil; return b })
-	return it
-}
-
-// Batch returns a WriteBatch.
-func (c *Client) Batch() *WriteBatch {
-	return &WriteBatch{c: c}
-}
-
-// commit calls the Commit RPC outside of a transaction.
-func (c *Client) commit(ctx context.Context, ws []*pb.Write) ([]*WriteResult, error) {
-	if err := checkTransaction(ctx); err != nil {
-		return nil, err
-	}
-	req := &pb.CommitRequest{
-		Database: c.path(),
-		Writes:   ws,
-	}
-	res, err := c.c.Commit(withResourceHeader(ctx, req.Database), req)
-	if err != nil {
-		return nil, err
-	}
-	if len(res.WriteResults) == 0 {
-		return nil, errors.New("firestore: missing WriteResult")
-	}
-	var wrs []*WriteResult
-	for _, pwr := range res.WriteResults {
-		wr, err := writeResultFromProto(pwr)
-		if err != nil {
-			return nil, err
-		}
-		wrs = append(wrs, wr)
-	}
-	return wrs, nil
-}
-
-func (c *Client) commit1(ctx context.Context, ws []*pb.Write) (*WriteResult, error) {
-	wrs, err := c.commit(ctx, ws)
-	if err != nil {
-		return nil, err
-	}
-	return wrs[0], nil
-}
-
-// A WriteResult is returned by methods that write documents.
-type WriteResult struct {
-	// The time at which the document was updated, or created if it did not
-	// previously exist. Writes that do not actually change the document do
-	// not change the update time.
-	UpdateTime time.Time
-}
-
-func writeResultFromProto(wr *pb.WriteResult) (*WriteResult, error) {
-	t, err := ptypes.Timestamp(wr.UpdateTime)
-	if err != nil {
-		t = time.Time{}
-		// TODO(jba): Follow up if Delete is supposed to return a nil timestamp.
-	}
-	return &WriteResult{UpdateTime: t}, nil
-}

+ 0 - 114
vendor/cloud.google.com/go/firestore/collref.go

@@ -1,114 +0,0 @@
-// Copyright 2017 Google Inc. All Rights Reserved.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//      http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package firestore
-
-import (
-	"math/rand"
-	"os"
-	"sync"
-	"time"
-
-	"golang.org/x/net/context"
-)
-
-// A CollectionRef is a reference to Firestore collection.
-type CollectionRef struct {
-	c *Client
-
-	// Typically Parent.Path, or c.path if Parent is nil.
-	// May be different if this CollectionRef was created from a stored reference
-	// to a different project/DB.
-	parentPath string
-
-	// Parent is the document of which this collection is a part. It is
-	// nil for top-level collections.
-	Parent *DocumentRef
-
-	// The full resource path of the collection: "projects/P/databases/D/documents..."
-	Path string
-
-	// ID is the collection identifier.
-	ID string
-
-	// Use the methods of Query on a CollectionRef to create and run queries.
-	Query
-}
-
-func newTopLevelCollRef(c *Client, dbPath, id string) *CollectionRef {
-	return &CollectionRef{
-		c:          c,
-		ID:         id,
-		parentPath: dbPath,
-		Path:       dbPath + "/documents/" + id,
-		Query:      Query{c: c, collectionID: id, parentPath: dbPath},
-	}
-}
-
-func newCollRefWithParent(c *Client, parent *DocumentRef, id string) *CollectionRef {
-	return &CollectionRef{
-		c:          c,
-		Parent:     parent,
-		ID:         id,
-		parentPath: parent.Path,
-		Path:       parent.Path + "/" + id,
-		Query:      Query{c: c, collectionID: id, parentPath: parent.Path},
-	}
-}
-
-// Doc returns a DocumentRef that refers to the document in the collection with the
-// given identifier.
-func (c *CollectionRef) Doc(id string) *DocumentRef {
-	if c == nil {
-		return nil
-	}
-	return newDocRef(c, id)
-}
-
-// NewDoc returns a DocumentRef with a uniquely generated ID.
-func (c *CollectionRef) NewDoc() *DocumentRef {
-	return c.Doc(uniqueID())
-}
-
-// Add generates a DocumentRef with a unique ID. It then creates the document
-// with the given data, which can be a map[string]interface{}, a struct or a
-// pointer to a struct.
-//
-// Add returns an error in the unlikely event that a document with the same ID
-// already exists.
-func (c *CollectionRef) Add(ctx context.Context, data interface{}) (*DocumentRef, *WriteResult, error) {
-	d := c.NewDoc()
-	wr, err := d.Create(ctx, data)
-	if err != nil {
-		return nil, nil, err
-	}
-	return d, wr, nil
-}
-
-const alphanum = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789"
-
-var (
-	rngMu sync.Mutex
-	rng   = rand.New(rand.NewSource(time.Now().UnixNano() ^ int64(os.Getpid())))
-)
-
-func uniqueID() string {
-	var b [20]byte
-	rngMu.Lock()
-	for i := 0; i < len(b); i++ {
-		b[i] = alphanum[rng.Intn(len(alphanum))]
-	}
-	rngMu.Unlock()
-	return string(b[:])
-}

+ 0 - 214
vendor/cloud.google.com/go/firestore/doc.go

@@ -1,214 +0,0 @@
-// Copyright 2017 Google Inc. All Rights Reserved.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//      http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// DO NOT EDIT doc.go. Modify internal/doc.template, then run make -C internal.
-
-/*
-Package firestore provides a client for reading and writing to a Cloud Firestore
-database.
-
-See https://cloud.google.com/firestore/docs for an introduction
-to Cloud Firestore and additional help on using the Firestore API.
-
-Creating a Client
-
-To start working with this package, create a client with a project ID:
-
-	ctx := context.Background()
-	client, err := firestore.NewClient(ctx, "projectID")
-	if err != nil {
-		// TODO: Handle error.
-	}
-
-CollectionRefs and DocumentRefs
-
-In Firestore, documents are sets of key-value pairs, and collections are groups of
-documents. A Firestore database consists of a hierarchy of alternating collections
-and documents, referred to by slash-separated paths like
-"States/California/Cities/SanFrancisco".
-
-This client is built around references to collections and documents. CollectionRefs
-and DocumentRefs are lightweight values that refer to the corresponding database
-entities. Creating a ref does not involve any network traffic.
-
-	states := client.Collection("States")
-	ny := states.Doc("NewYork")
-	// Or, in a single call:
-	ny = client.Doc("States/NewYork")
-
-Reading
-
-Use DocumentRef.Get to read a document. The result is a DocumentSnapshot.
-Call its Data method to obtain the entire document contents as a map.
-
-	docsnap, err := ny.Get(ctx)
-	if err != nil {
-		// TODO: Handle error.
-	}
-	dataMap := docsnap.Data()
-	fmt.Println(dataMap)
-
-You can also obtain a single field with DataAt, or extract the data into a struct
-with DataTo. With the type definition
-
-	type State struct {
-		Capital    string  `firestore:"capital"`
-		Population float64 `firestore:"pop"` // in millions
-	}
-
-we can extract the document's data into a value of type State:
-
-	var nyData State
-	if err := docsnap.DataTo(&nyData); err != nil {
-		// TODO: Handle error.
-	}
-
-Note that this client supports struct tags beginning with "firestore:" that work like
-the tags of the encoding/json package, letting you rename fields, ignore them, or
-omit their values when empty.
-
-To retrieve multiple documents from their references in a single call, use
-Client.GetAll.
-
-	docsnaps, err := client.GetAll(ctx, []*firestore.DocumentRef{
-		states.Doc("Wisconsin"), states.Doc("Ohio"),
-	})
-	if err != nil {
-		// TODO: Handle error.
-	}
-	for _, ds := range docsnaps {
-		_ = ds // TODO: Use ds.
-	}
-
-
-Writing
-
-For writing individual documents, use the methods on DocumentReference.
-Create creates a new document.
-
-	wr, err := ny.Create(ctx, State{
-		Capital:    "Albany",
-		Population: 19.8,
-	})
-	if err != nil {
-		// TODO: Handle error.
-	}
-	fmt.Println(wr)
-
-The first return value is a WriteResult, which contains the time
-at which the document was updated.
-
-Create fails if the document exists. Another method, Set, either replaces an existing
-document or creates a new one.
-
-	ca := states.Doc("California")
-	_, err = ca.Set(ctx, State{
-		Capital:    "Sacramento",
-		Population: 39.14,
-	})
-
-To update some fields of an existing document, use Update. It takes a list of
-paths to update and their corresponding values.
-
-	_, err = ca.Update(ctx, []firestore.Update{{Path: "capital", Value: "Sacramento"}})
-
-Use DocumentRef.Delete to delete a document.
-
-	_, err = ny.Delete(ctx)
-
-Preconditions
-
-You can condition Deletes or Updates on when a document was last changed. Specify
-these preconditions as an option to a Delete or Update method. The check and the
-write happen atomically with a single RPC.
-
-	docsnap, err = ca.Get(ctx)
-	if err != nil {
-		// TODO: Handle error.
-	}
-	_, err = ca.Update(ctx,
-		[]firestore.Update{{Path: "capital", Value: "Sacramento"}},
-		firestore.LastUpdateTime(docsnap.UpdateTime))
-
-Here we update a doc only if it hasn't changed since we read it.
-You could also do this with a transaction.
-
-To perform multiple writes at once, use a WriteBatch. Its methods chain
-for convenience.
-
-WriteBatch.Commit sends the collected writes to the server, where they happen
-atomically.
-
-	writeResults, err := client.Batch().
-		Create(ny, State{Capital: "Albany"}).
-		Update(ca, []firestore.Update{{Path: "capital", Value: "Sacramento"}}).
-		Delete(client.Doc("States/WestDakota")).
-		Commit(ctx)
-
-Queries
-
-You can use SQL to select documents from a collection. Begin with the collection, and
-build up a query using Select, Where and other methods of Query.
-
-	q := states.Where("pop", ">", 10).OrderBy("pop", firestore.Desc)
-
-Call the Query's Documents method to get an iterator, and use it like
-the other Google Cloud Client iterators.
-
-	iter := q.Documents(ctx)
-	for {
-		doc, err := iter.Next()
-		if err == iterator.Done {
-			break
-		}
-		if err != nil {
-			// TODO: Handle error.
-		}
-		fmt.Println(doc.Data())
-	}
-
-To get all the documents in a collection, you can use the collection itself
-as a query.
-
-	iter = client.Collection("States").Documents(ctx)
-
-Transactions
-
-Use a transaction to execute reads and writes atomically. All reads must happen
-before any writes. Transaction creation, commit, rollback and retry are handled for
-you by the Client.RunTransaction method; just provide a function and use the
-read and write methods of the Transaction passed to it.
-
-	ny := client.Doc("States/NewYork")
-	err := client.RunTransaction(ctx, func(ctx context.Context, tx *firestore.Transaction) error {
-		doc, err := tx.Get(ny) // tx.Get, NOT ny.Get!
-		if err != nil {
-			return err
-		}
-		pop, err := doc.DataAt("pop")
-		if err != nil {
-			return err
-		}
-		return tx.Update(ny, []firestore.Update{{Path: "pop", Value: pop.(float64) + 0.2}})
-	})
-	if err != nil {
-		// TODO: Handle error.
-	}
-
-Authentication
-
-See examples of authorization and authentication at
-https://godoc.org/cloud.google.com/go#pkg-examples.
-*/
-package firestore

+ 0 - 558
vendor/cloud.google.com/go/firestore/docref.go

@@ -1,558 +0,0 @@
-// Copyright 2017 Google Inc. All Rights Reserved.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//      http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package firestore
-
-import (
-	"errors"
-	"fmt"
-	"reflect"
-	"sort"
-
-	"golang.org/x/net/context"
-	"google.golang.org/api/iterator"
-
-	vkit "cloud.google.com/go/firestore/apiv1beta1"
-	pb "google.golang.org/genproto/googleapis/firestore/v1beta1"
-)
-
-var errNilDocRef = errors.New("firestore: nil DocumentRef")
-
-// A DocumentRef is a reference to a Firestore document.
-type DocumentRef struct {
-	// The CollectionRef that this document is a part of. Never nil.
-	Parent *CollectionRef
-
-	// The full resource path of the document: "projects/P/databases/D/documents..."
-	Path string
-
-	// The ID of the document: the last component of the resource path.
-	ID string
-}
-
-func newDocRef(parent *CollectionRef, id string) *DocumentRef {
-	return &DocumentRef{
-		Parent: parent,
-		ID:     id,
-		Path:   parent.Path + "/" + id,
-	}
-}
-
-// Collection returns a reference to sub-collection of this document.
-func (d *DocumentRef) Collection(id string) *CollectionRef {
-	return newCollRefWithParent(d.Parent.c, d, id)
-}
-
-// Get retrieves the document. It returns an error if the document does not exist.
-func (d *DocumentRef) Get(ctx context.Context) (*DocumentSnapshot, error) {
-	if err := checkTransaction(ctx); err != nil {
-		return nil, err
-	}
-	if d == nil {
-		return nil, errNilDocRef
-	}
-	doc, err := d.Parent.c.c.GetDocument(withResourceHeader(ctx, d.Parent.c.path()),
-		&pb.GetDocumentRequest{Name: d.Path})
-	// TODO(jba): verify that GetDocument returns NOT_FOUND.
-	if err != nil {
-		return nil, err
-	}
-	return newDocumentSnapshot(d, doc, d.Parent.c)
-}
-
-// Create creates the document with the given data.
-// It returns an error if a document with the same ID already exists.
-//
-// The data argument can be a map with string keys, a struct, or a pointer to a
-// struct. The map keys or exported struct fields become the fields of the firestore
-// document.
-// The values of data are converted to Firestore values as follows:
-//
-//   - bool converts to Bool.
-//   - string converts to String.
-//   - int, int8, int16, int32 and int64 convert to Integer.
-//   - uint8, uint16 and uint32 convert to Integer. uint64 is disallowed,
-//     because it can represent values that cannot be represented in an int64, which
-//     is the underlying type of a Integer.
-//   - float32 and float64 convert to Double.
-//   - []byte converts to Bytes.
-//   - time.Time converts to Timestamp.
-//   - latlng.LatLng converts to GeoPoint. latlng is the package
-//     "google.golang.org/genproto/googleapis/type/latlng".
-//   - Slices convert to Array.
-//   - Maps and structs convert to Map.
-//   - nils of any type convert to Null.
-//
-// Pointers and interface{} are also permitted, and their elements processed
-// recursively.
-//
-// Struct fields can have tags like those used by the encoding/json package. Tags
-// begin with "firestore:" and are followed by "-", meaning "ignore this field," or
-// an alternative name for the field. Following the name, these comma-separated
-// options may be provided:
-//
-//   - omitempty: Do not encode this field if it is empty. A value is empty
-//     if it is a zero value, or an array, slice or map of length zero.
-//   - serverTimestamp: The field must be of type time.Time. When writing, if
-//     the field has the zero value, the server will populate the stored document with
-//     the time that the request is processed.
-func (d *DocumentRef) Create(ctx context.Context, data interface{}) (*WriteResult, error) {
-	ws, err := d.newCreateWrites(data)
-	if err != nil {
-		return nil, err
-	}
-	return d.Parent.c.commit1(ctx, ws)
-}
-
-func (d *DocumentRef) newCreateWrites(data interface{}) ([]*pb.Write, error) {
-	if d == nil {
-		return nil, errNilDocRef
-	}
-	doc, serverTimestampPaths, err := toProtoDocument(data)
-	if err != nil {
-		return nil, err
-	}
-	doc.Name = d.Path
-	pc, err := exists(false).preconditionProto()
-	if err != nil {
-		return nil, err
-	}
-	return d.newUpdateWithTransform(doc, nil, pc, serverTimestampPaths, false), nil
-}
-
-// Set creates or overwrites the document with the given data. See DocumentRef.Create
-// for the acceptable values of data. Without options, Set overwrites the document
-// completely. Specify one of the Merge options to preserve an existing document's
-// fields.
-func (d *DocumentRef) Set(ctx context.Context, data interface{}, opts ...SetOption) (*WriteResult, error) {
-	ws, err := d.newSetWrites(data, opts)
-	if err != nil {
-		return nil, err
-	}
-	return d.Parent.c.commit1(ctx, ws)
-}
-
-func (d *DocumentRef) newSetWrites(data interface{}, opts []SetOption) ([]*pb.Write, error) {
-	if d == nil {
-		return nil, errNilDocRef
-	}
-	origFieldPaths, allPaths, err := processSetOptions(opts)
-	if err != nil {
-		return nil, err
-	}
-	doc, serverTimestampPaths, err := toProtoDocument(data)
-	if err != nil {
-		return nil, err
-	}
-	if len(origFieldPaths) > 0 {
-		// Keep only data fields corresponding to the given field paths.
-		doc.Fields = applyFieldPaths(doc.Fields, origFieldPaths, nil)
-	}
-	doc.Name = d.Path
-
-	var fieldPaths []FieldPath
-	if allPaths {
-		// MergeAll was passed. Check that the data is a map, and extract its field paths.
-		v := reflect.ValueOf(data)
-		if v.Kind() != reflect.Map {
-			return nil, errors.New("firestore: MergeAll can only be specified with map data")
-		}
-		fieldPaths = fieldPathsFromMap(v, nil)
-	} else if len(origFieldPaths) > 0 {
-		// Remove server timestamp paths that are not in the list of paths to merge.
-		// Note: this is technically O(n^2), but it is unlikely that there is more
-		// than one server timestamp path.
-		serverTimestampPaths = removePathsIf(serverTimestampPaths, func(fp FieldPath) bool {
-			return !fp.in(origFieldPaths)
-		})
-		// Remove server timestamp fields from fieldPaths. Those fields were removed
-		// from the document by toProtoDocument, so they should not be in the update
-		// mask.
-		// Note: this is technically O(n^2), but it is unlikely that there is
-		// more than one server timestamp path.
-		fieldPaths = removePathsIf(origFieldPaths, func(fp FieldPath) bool {
-			return fp.in(serverTimestampPaths)
-		})
-		// Check that all the remaining field paths in the merge option are in the document.
-		for _, fp := range fieldPaths {
-			if _, err := valueAtPath(fp, doc.Fields); err != nil {
-				return nil, err
-			}
-		}
-	}
-	return d.newUpdateWithTransform(doc, fieldPaths, nil, serverTimestampPaths, len(opts) == 0), nil
-}
-
-// Delete deletes the document. If the document doesn't exist, it does nothing
-// and returns no error.
-func (d *DocumentRef) Delete(ctx context.Context, preconds ...Precondition) (*WriteResult, error) {
-	ws, err := d.newDeleteWrites(preconds)
-	if err != nil {
-		return nil, err
-	}
-	return d.Parent.c.commit1(ctx, ws)
-}
-
-// Create a new map that contains only the field paths in fps.
-func applyFieldPaths(fields map[string]*pb.Value, fps []FieldPath, root FieldPath) map[string]*pb.Value {
-	r := map[string]*pb.Value{}
-	for k, v := range fields {
-		kpath := root.with(k)
-		if kpath.in(fps) {
-			r[k] = v
-		} else if mv := v.GetMapValue(); mv != nil {
-			if m2 := applyFieldPaths(mv.Fields, fps, kpath); m2 != nil {
-				r[k] = &pb.Value{&pb.Value_MapValue{&pb.MapValue{m2}}}
-			}
-		}
-	}
-	if len(r) == 0 {
-		return nil
-	}
-	return r
-}
-
-func fieldPathsFromMap(vmap reflect.Value, prefix FieldPath) []FieldPath {
-	// vmap is a map and its keys are strings.
-	// Each map key denotes a field; no splitting or escaping.
-	var fps []FieldPath
-	for _, k := range vmap.MapKeys() {
-		v := vmap.MapIndex(k)
-		fp := prefix.with(k.String())
-		if vm := extractMap(v); vm.IsValid() {
-			fps = append(fps, fieldPathsFromMap(vm, fp)...)
-		} else if v.Interface() != ServerTimestamp {
-			// ServerTimestamp fields do not go into the update mask.
-			fps = append(fps, fp)
-		}
-	}
-	return fps
-}
-
-func extractMap(v reflect.Value) reflect.Value {
-	switch v.Kind() {
-	case reflect.Map:
-		return v
-	case reflect.Interface:
-		return extractMap(v.Elem())
-	default:
-		return reflect.Value{}
-	}
-}
-
-// removePathsIf creates a new slice of FieldPaths that contains
-// exactly those elements of fps for which pred returns false.
-func removePathsIf(fps []FieldPath, pred func(FieldPath) bool) []FieldPath {
-	var result []FieldPath
-	for _, fp := range fps {
-		if !pred(fp) {
-			result = append(result, fp)
-		}
-	}
-	return result
-}
-
-func (d *DocumentRef) newDeleteWrites(preconds []Precondition) ([]*pb.Write, error) {
-	if d == nil {
-		return nil, errNilDocRef
-	}
-	pc, err := processPreconditionsForDelete(preconds)
-	if err != nil {
-		return nil, err
-	}
-	return []*pb.Write{{
-		Operation:       &pb.Write_Delete{d.Path},
-		CurrentDocument: pc,
-	}}, nil
-}
-
-func (d *DocumentRef) newUpdatePathWrites(updates []Update, preconds []Precondition) ([]*pb.Write, error) {
-	if len(updates) == 0 {
-		return nil, errors.New("firestore: no paths to update")
-	}
-	var fps []FieldPath
-	var fpvs []fpv
-	for _, u := range updates {
-		v, err := u.process()
-		if err != nil {
-			return nil, err
-		}
-		fps = append(fps, v.fieldPath)
-		fpvs = append(fpvs, v)
-	}
-	if err := checkNoDupOrPrefix(fps); err != nil {
-		return nil, err
-	}
-	m := createMapFromUpdates(fpvs)
-	return d.newUpdateWrites(m, fps, preconds)
-}
-
-// newUpdateWrites creates Write operations for an update.
-func (d *DocumentRef) newUpdateWrites(data interface{}, fieldPaths []FieldPath, preconds []Precondition) ([]*pb.Write, error) {
-	if d == nil {
-		return nil, errNilDocRef
-	}
-	pc, err := processPreconditionsForUpdate(preconds)
-	if err != nil {
-		return nil, err
-	}
-	doc, serverTimestampPaths, err := toProtoDocument(data)
-	if err != nil {
-		return nil, err
-	}
-	doc.Name = d.Path
-	return d.newUpdateWithTransform(doc, fieldPaths, pc, serverTimestampPaths, false), nil
-}
-
-var requestTimeTransform = &pb.DocumentTransform_FieldTransform_SetToServerValue{
-	pb.DocumentTransform_FieldTransform_REQUEST_TIME,
-}
-
-// newUpdateWithTransform constructs operations for a commit. Most generally, it
-// returns an update operation followed by a transform.
-//
-// If there are no serverTimestampPaths, the transform is omitted.
-//
-// If doc.Fields is empty, there are no updatePaths, and there is no precondition,
-// the update is omitted, unless updateOnEmpty is true.
-func (d *DocumentRef) newUpdateWithTransform(doc *pb.Document, updatePaths []FieldPath, pc *pb.Precondition, serverTimestampPaths []FieldPath, updateOnEmpty bool) []*pb.Write {
-	// Remove server timestamp fields from updatePaths. Those fields were removed
-	// from the document by toProtoDocument, so they should not be in the update
-	// mask.
-	// Note: this is technically O(n^2), but it is unlikely that there is
-	// more than one server timestamp path.
-	updatePaths = removePathsIf(updatePaths, func(fp FieldPath) bool {
-		return fp.in(serverTimestampPaths)
-	})
-	var ws []*pb.Write
-	if updateOnEmpty || len(doc.Fields) > 0 ||
-		len(updatePaths) > 0 || (pc != nil && len(serverTimestampPaths) == 0) {
-		var mask *pb.DocumentMask
-		if len(updatePaths) > 0 {
-			sfps := toServiceFieldPaths(updatePaths)
-			sort.Strings(sfps) // TODO(jba): make tests pass without this
-			mask = &pb.DocumentMask{FieldPaths: sfps}
-		}
-		w := &pb.Write{
-			Operation:       &pb.Write_Update{doc},
-			UpdateMask:      mask,
-			CurrentDocument: pc,
-		}
-		ws = append(ws, w)
-		pc = nil // If the precondition is in the write, we don't need it in the transform.
-	}
-	if len(serverTimestampPaths) > 0 || pc != nil {
-		ws = append(ws, d.newTransform(serverTimestampPaths, pc))
-	}
-	return ws
-}
-
-func (d *DocumentRef) newTransform(serverTimestampFieldPaths []FieldPath, pc *pb.Precondition) *pb.Write {
-	sort.Sort(byPath(serverTimestampFieldPaths)) // TODO(jba): make tests pass without this
-	var fts []*pb.DocumentTransform_FieldTransform
-	for _, p := range serverTimestampFieldPaths {
-		fts = append(fts, &pb.DocumentTransform_FieldTransform{
-			FieldPath:     p.toServiceFieldPath(),
-			TransformType: requestTimeTransform,
-		})
-	}
-	return &pb.Write{
-		Operation: &pb.Write_Transform{
-			&pb.DocumentTransform{
-				Document:        d.Path,
-				FieldTransforms: fts,
-				// TODO(jba): should the transform have the same preconditions as the write?
-			},
-		},
-		CurrentDocument: pc,
-	}
-}
-
-type sentinel int
-
-const (
-	// Delete is used as a value in a call to UpdateMap to indicate that the
-	// corresponding key should be deleted.
-	Delete sentinel = iota
-
-	// ServerTimestamp is used as a value in a call to UpdateMap to indicate that the
-	// key's value should be set to the time at which the server processed
-	// the request.
-	ServerTimestamp
-)
-
-func (s sentinel) String() string {
-	switch s {
-	case Delete:
-		return "Delete"
-	case ServerTimestamp:
-		return "ServerTimestamp"
-	default:
-		return "<?sentinel?>"
-	}
-}
-
-func isStructOrStructPtr(x interface{}) bool {
-	v := reflect.ValueOf(x)
-	if v.Kind() == reflect.Struct {
-		return true
-	}
-	if v.Kind() == reflect.Ptr && v.Elem().Kind() == reflect.Struct {
-		return true
-	}
-	return false
-}
-
-// An Update describes an update to a value referred to by a path.
-// An Update should have either a non-empty Path or a non-empty FieldPath,
-// but not both.
-//
-// See DocumentRef.Create for acceptable values.
-// To delete a field, specify firestore.Delete as the value.
-type Update struct {
-	Path      string // Will be split on dots, and must not contain any of "˜*/[]".
-	FieldPath FieldPath
-	Value     interface{}
-}
-
-// An fpv is a pair of validated FieldPath and value.
-type fpv struct {
-	fieldPath FieldPath
-	value     interface{}
-}
-
-func (u *Update) process() (v fpv, err error) {
-	if (u.Path != "") == (u.FieldPath != nil) {
-		return v, fmt.Errorf("firestore: update %+v should have exactly one of Path or FieldPath", u)
-	}
-	fp := u.FieldPath
-	if fp == nil {
-		fp, err = parseDotSeparatedString(u.Path)
-		if err != nil {
-			return v, err
-		}
-	}
-	if err := fp.validate(); err != nil {
-		return v, err
-	}
-	return fpv{fp, u.Value}, nil
-}
-
-// Update updates the document. The values at the given
-// field paths are replaced, but other fields of the stored document are untouched.
-func (d *DocumentRef) Update(ctx context.Context, updates []Update, preconds ...Precondition) (*WriteResult, error) {
-	ws, err := d.newUpdatePathWrites(updates, preconds)
-	if err != nil {
-		return nil, err
-	}
-	return d.Parent.c.commit1(ctx, ws)
-}
-
-// Collections returns an interator over the immediate sub-collections of the document.
-func (d *DocumentRef) Collections(ctx context.Context) *CollectionIterator {
-	client := d.Parent.c
-	it := &CollectionIterator{
-		err:    checkTransaction(ctx),
-		client: client,
-		parent: d,
-		it: client.c.ListCollectionIds(
-			withResourceHeader(ctx, client.path()),
-			&pb.ListCollectionIdsRequest{Parent: d.Path}),
-	}
-	it.pageInfo, it.nextFunc = iterator.NewPageInfo(
-		it.fetch,
-		func() int { return len(it.items) },
-		func() interface{} { b := it.items; it.items = nil; return b })
-	return it
-}
-
-// CollectionIterator is an iterator over sub-collections of a document.
-type CollectionIterator struct {
-	client   *Client
-	parent   *DocumentRef
-	it       *vkit.StringIterator
-	pageInfo *iterator.PageInfo
-	nextFunc func() error
-	items    []*CollectionRef
-	err      error
-}
-
-// PageInfo supports pagination. See the google.golang.org/api/iterator package for details.
-func (it *CollectionIterator) PageInfo() *iterator.PageInfo { return it.pageInfo }
-
-// Next returns the next result. Its second return value is iterator.Done if there
-// are no more results. Once Next returns Done, all subsequent calls will return
-// Done.
-func (it *CollectionIterator) Next() (*CollectionRef, error) {
-	if err := it.nextFunc(); err != nil {
-		return nil, err
-	}
-	item := it.items[0]
-	it.items = it.items[1:]
-	return item, nil
-}
-
-func (it *CollectionIterator) fetch(pageSize int, pageToken string) (string, error) {
-	if it.err != nil {
-		return "", it.err
-	}
-	return iterFetch(pageSize, pageToken, it.it.PageInfo(), func() error {
-		id, err := it.it.Next()
-		if err != nil {
-			return err
-		}
-		var cr *CollectionRef
-		if it.parent == nil {
-			cr = newTopLevelCollRef(it.client, it.client.path(), id)
-		} else {
-			cr = newCollRefWithParent(it.client, it.parent, id)
-		}
-		it.items = append(it.items, cr)
-		return nil
-	})
-}
-
-// GetAll returns all the collections remaining from the iterator.
-func (it *CollectionIterator) GetAll() ([]*CollectionRef, error) {
-	var crs []*CollectionRef
-	for {
-		cr, err := it.Next()
-		if err == iterator.Done {
-			break
-		}
-		if err != nil {
-			return nil, err
-		}
-		crs = append(crs, cr)
-	}
-	return crs, nil
-}
-
-// Common fetch code for iterators that are backed by vkit iterators.
-// TODO(jba): dedup with same function in logging/logadmin.
-func iterFetch(pageSize int, pageToken string, pi *iterator.PageInfo, next func() error) (string, error) {
-	pi.MaxSize = pageSize
-	pi.Token = pageToken
-	// Get one item, which will fill the buffer.
-	if err := next(); err != nil {
-		return "", err
-	}
-	// Collect the rest of the buffer.
-	for pi.Remaining() > 0 {
-		if err := next(); err != nil {
-			return "", err
-		}
-	}
-	return pi.Token, nil
-}

+ 0 - 261
vendor/cloud.google.com/go/firestore/document.go

@@ -1,261 +0,0 @@
-// Copyright 2017 Google Inc. All Rights Reserved.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//      http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package firestore
-
-import (
-	"errors"
-	"fmt"
-	"reflect"
-	"time"
-
-	pb "google.golang.org/genproto/googleapis/firestore/v1beta1"
-
-	"github.com/golang/protobuf/ptypes"
-)
-
-// A DocumentSnapshot contains document data and metadata.
-type DocumentSnapshot struct {
-	// The DocumentRef for this document.
-	Ref *DocumentRef
-
-	// Read-only. The time at which the document was created.
-	// Increases monotonically when a document is deleted then
-	// recreated. It can also be compared to values from other documents and
-	// the read time of a query.
-	CreateTime time.Time
-
-	// Read-only. The time at which the document was last changed. This value
-	// is initally set to CreateTime then increases monotonically with each
-	// change to the document. It can also be compared to values from other
-	// documents and the read time of a query.
-	UpdateTime time.Time
-
-	c     *Client
-	proto *pb.Document
-}
-
-// Data returns the DocumentSnapshot's fields as a map.
-// It is equivalent to
-//     var m map[string]interface{}
-//     d.DataTo(&m)
-func (d *DocumentSnapshot) Data() map[string]interface{} {
-	m, err := createMapFromValueMap(d.proto.Fields, d.c)
-	// Any error here is a bug in the client.
-	if err != nil {
-		panic(fmt.Sprintf("firestore: %v", err))
-	}
-	return m
-}
-
-// DataTo uses the document's fields to populate p, which can be a pointer to a
-// map[string]interface{} or a pointer to a struct.
-//
-// Firestore field values are converted to Go values as follows:
-//   - Null converts to nil.
-//   - Bool converts to bool.
-//   - String converts to string.
-//   - Integer converts int64. When setting a struct field, any signed or unsigned
-//     integer type is permitted except uint64. Overflow is detected and results in
-//     an error.
-//   - Double converts to float64. When setting a struct field, float32 is permitted.
-//     Overflow is detected and results in an error.
-//   - Bytes is converted to []byte.
-//   - Timestamp converts to time.Time.
-//   - GeoPoint converts to latlng.LatLng, where latlng is the package
-//     "google.golang.org/genproto/googleapis/type/latlng".
-//   - Arrays convert to []interface{}. When setting a struct field, the field
-//     may be a slice or array of any type and is populated recursively.
-//     Slices are resized to the incoming value's size, while arrays that are too
-//     long have excess elements filled with zero values. If the array is too short,
-//     excess incoming values will be dropped.
-//   - Maps convert to map[string]interface{}. When setting a struct field,
-//     maps of key type string and any value type are permitted, and are populated
-//     recursively.
-//   - References are converted to DocumentRefs.
-//
-// Field names given by struct field tags are observed, as described in
-// DocumentRef.Create.
-func (d *DocumentSnapshot) DataTo(p interface{}) error {
-	return setFromProtoValue(p, &pb.Value{&pb.Value_MapValue{&pb.MapValue{d.proto.Fields}}}, d.c)
-}
-
-// DataAt returns the data value denoted by path.
-//
-// The path argument can be a single field or a dot-separated sequence of
-// fields, and must not contain any of the runes "˜*/[]". Use DataAtPath instead for
-// such a path.
-//
-// See DocumentSnapshot.DataTo for how Firestore values are converted to Go values.
-func (d *DocumentSnapshot) DataAt(path string) (interface{}, error) {
-	fp, err := parseDotSeparatedString(path)
-	if err != nil {
-		return nil, err
-	}
-	return d.DataAtPath(fp)
-}
-
-// DataAtPath returns the data value denoted by the FieldPath fp.
-func (d *DocumentSnapshot) DataAtPath(fp FieldPath) (interface{}, error) {
-	v, err := valueAtPath(fp, d.proto.Fields)
-	if err != nil {
-		return nil, err
-	}
-	return createFromProtoValue(v, d.c)
-}
-
-// valueAtPath returns the value of m referred to by fp.
-func valueAtPath(fp FieldPath, m map[string]*pb.Value) (*pb.Value, error) {
-	for _, k := range fp[:len(fp)-1] {
-		v := m[k]
-		if v == nil {
-			return nil, fmt.Errorf("firestore: no field %q", k)
-		}
-		mv := v.GetMapValue()
-		if mv == nil {
-			return nil, fmt.Errorf("firestore: value for field %q is not a map", k)
-		}
-		m = mv.Fields
-	}
-	k := fp[len(fp)-1]
-	v := m[k]
-	if v == nil {
-		return nil, fmt.Errorf("firestore: no field %q", k)
-	}
-	return v, nil
-}
-
-// toProtoDocument converts a Go value to a Document proto.
-// Valid values are: map[string]T, struct, or pointer to a valid value.
-// It also returns a list of field paths for DocumentTransform (server timestamp).
-func toProtoDocument(x interface{}) (*pb.Document, []FieldPath, error) {
-	if x == nil {
-		return nil, nil, errors.New("firestore: nil document contents")
-	}
-	v := reflect.ValueOf(x)
-	pv, sawTransform, err := toProtoValue(v)
-	if err != nil {
-		return nil, nil, err
-	}
-	var fieldPaths []FieldPath
-	if sawTransform {
-		fieldPaths, err = extractTransformPaths(v, nil)
-		if err != nil {
-			return nil, nil, err
-		}
-	}
-	var fields map[string]*pb.Value
-	if pv != nil {
-		m := pv.GetMapValue()
-		if m == nil {
-			return nil, nil, fmt.Errorf("firestore: cannot covert value of type %T into a map", x)
-		}
-		fields = m.Fields
-	}
-	return &pb.Document{Fields: fields}, fieldPaths, nil
-}
-
-func extractTransformPaths(v reflect.Value, prefix FieldPath) ([]FieldPath, error) {
-	switch v.Kind() {
-	case reflect.Map:
-		return extractTransformPathsFromMap(v, prefix)
-	case reflect.Struct:
-		return extractTransformPathsFromStruct(v, prefix)
-	case reflect.Ptr:
-		if v.IsNil() {
-			return nil, nil
-		}
-		return extractTransformPaths(v.Elem(), prefix)
-	case reflect.Interface:
-		if v.NumMethod() == 0 { // empty interface: recurse on its contents
-			return extractTransformPaths(v.Elem(), prefix)
-		}
-		return nil, nil
-	default:
-		return nil, nil
-	}
-}
-
-func extractTransformPathsFromMap(v reflect.Value, prefix FieldPath) ([]FieldPath, error) {
-	var paths []FieldPath
-	for _, k := range v.MapKeys() {
-		sk := k.Interface().(string) // assume keys are strings; checked in toProtoValue
-		path := prefix.with(sk)
-		mi := v.MapIndex(k)
-		if mi.Interface() == ServerTimestamp {
-			paths = append(paths, path)
-		} else {
-			ps, err := extractTransformPaths(mi, path)
-			if err != nil {
-				return nil, err
-			}
-			paths = append(paths, ps...)
-		}
-	}
-	return paths, nil
-}
-
-func extractTransformPathsFromStruct(v reflect.Value, prefix FieldPath) ([]FieldPath, error) {
-	var paths []FieldPath
-	fields, err := fieldCache.Fields(v.Type())
-	if err != nil {
-		return nil, err
-	}
-	for _, f := range fields {
-		fv := v.FieldByIndex(f.Index)
-		path := prefix.with(f.Name)
-		opts := f.ParsedTag.(tagOptions)
-		if opts.serverTimestamp {
-			var isZero bool
-			switch f.Type {
-			case typeOfGoTime:
-				isZero = fv.Interface().(time.Time).IsZero()
-			case reflect.PtrTo(typeOfGoTime):
-				isZero = fv.IsNil() || fv.Elem().Interface().(time.Time).IsZero()
-			default:
-				return nil, fmt.Errorf("firestore: field %s of struct %s with serverTimestamp tag must be of type time.Time or *time.Time",
-					f.Name, v.Type())
-			}
-			if isZero {
-				paths = append(paths, path)
-			}
-		} else {
-			ps, err := extractTransformPaths(fv, path)
-			if err != nil {
-				return nil, err
-			}
-			paths = append(paths, ps...)
-		}
-	}
-	return paths, nil
-}
-
-func newDocumentSnapshot(ref *DocumentRef, proto *pb.Document, c *Client) (*DocumentSnapshot, error) {
-	d := &DocumentSnapshot{
-		Ref:   ref,
-		c:     c,
-		proto: proto,
-	}
-	ts, err := ptypes.Timestamp(proto.CreateTime)
-	if err != nil {
-		return nil, err
-	}
-	d.CreateTime = ts
-	ts, err = ptypes.Timestamp(proto.UpdateTime)
-	if err != nil {
-		return nil, err
-	}
-	d.UpdateTime = ts
-	return d, nil
-}

+ 0 - 210
vendor/cloud.google.com/go/firestore/fieldpath.go

@@ -1,210 +0,0 @@
-// Copyright 2017 Google Inc. All Rights Reserved.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//      http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package firestore
-
-import (
-	"bytes"
-	"errors"
-	"fmt"
-	"regexp"
-	"sort"
-	"strings"
-)
-
-// A FieldPath is a non-empty sequence of non-empty fields that reference a value.
-//
-// A FieldPath value should only be necessary if one of the field names contains
-// one of the runes ".˜*/[]". Most methods accept a simpler form of field path
-// as a string in which the individual fields are separated by dots.
-// For example,
-//   []string{"a", "b"}
-// is equivalent to the string form
-//   "a.b"
-// but
-//   []string{"*"}
-// has no equivalent string form.
-type FieldPath []string
-
-// parseDotSeparatedString constructs a FieldPath from a string that separates
-// path components with dots. Other than splitting at dots and checking for invalid
-// characters, it ignores everything else about the string,
-// including attempts to quote field path compontents. So "a.`b.c`.d" is parsed into
-// four parts, "a", "`b", "c`" and "d".
-func parseDotSeparatedString(s string) (FieldPath, error) {
-	const invalidRunes = "~*/[]"
-	if strings.ContainsAny(s, invalidRunes) {
-		return nil, fmt.Errorf("firestore: %q contains an invalid rune (one of %s)", s, invalidRunes)
-	}
-	fp := FieldPath(strings.Split(s, "."))
-	if err := fp.validate(); err != nil {
-		return nil, err
-	}
-	return fp, nil
-}
-
-func (fp1 FieldPath) equal(fp2 FieldPath) bool {
-	if len(fp1) != len(fp2) {
-		return false
-	}
-	for i, c1 := range fp1 {
-		if c1 != fp2[i] {
-			return false
-		}
-	}
-	return true
-}
-
-func (fp1 FieldPath) prefixOf(fp2 FieldPath) bool {
-	return len(fp1) <= len(fp2) && fp1.equal(fp2[:len(fp1)])
-}
-
-// Lexicographic ordering.
-func (fp1 FieldPath) less(fp2 FieldPath) bool {
-	for i := range fp1 {
-		switch {
-		case i >= len(fp2):
-			return false
-		case fp1[i] < fp2[i]:
-			return true
-		case fp1[i] > fp2[i]:
-			return false
-		}
-	}
-	// fp1 and fp2 are equal up to len(fp1).
-	return len(fp1) < len(fp2)
-}
-
-// validate checks the validity of fp and returns an error if it is invalid.
-func (fp FieldPath) validate() error {
-	if len(fp) == 0 {
-		return errors.New("firestore: empty field path")
-	}
-	for _, c := range fp {
-		if len(c) == 0 {
-			return errors.New("firestore: empty component in field path")
-		}
-	}
-	return nil
-}
-
-// with creates a new FieldPath consisting of fp followed by k.
-func (fp FieldPath) with(k string) FieldPath {
-	r := make(FieldPath, len(fp), len(fp)+1)
-	copy(r, fp)
-	return append(r, k)
-}
-
-// in reports whether fp is equal to one of the fps.
-func (fp FieldPath) in(fps []FieldPath) bool {
-	for _, e := range fps {
-		if fp.equal(e) {
-			return true
-		}
-	}
-	return false
-}
-
-// checkNoDupOrPrefix checks whether any FieldPath is a prefix of (or equal to)
-// another.
-// It modifies the order of FieldPaths in its argument (via sorting).
-func checkNoDupOrPrefix(fps []FieldPath) error {
-	// Sort fps lexicographically.
-	sort.Sort(byPath(fps))
-	// Check adjacent pairs for prefix.
-	for i := 1; i < len(fps); i++ {
-		if fps[i-1].prefixOf(fps[i]) {
-			return fmt.Errorf("field path %v cannot be used in the same update as %v", fps[i-1], fps[i])
-		}
-	}
-	return nil
-}
-
-type byPath []FieldPath
-
-func (b byPath) Len() int           { return len(b) }
-func (b byPath) Swap(i, j int)      { b[i], b[j] = b[j], b[i] }
-func (b byPath) Less(i, j int) bool { return b[i].less(b[j]) }
-
-// createMapFromUpdates uses a list of updates to construct a valid
-// Firestore data value in the form of a map. It assumes the FieldPaths in the updates
-// already been validated and checked for prefixes. If any field path is associated
-// with the Delete value, it is not stored in the map.
-func createMapFromUpdates(fpvs []fpv) map[string]interface{} {
-	m := map[string]interface{}{}
-	for _, v := range fpvs {
-		if v.value != Delete {
-			setAtPath(m, v.fieldPath, v.value)
-		}
-	}
-	return m
-}
-
-// setAtPath sets val at the location in m specified by fp, creating sub-maps as
-// needed. m must not be nil. fp is assumed to be valid.
-func setAtPath(m map[string]interface{}, fp FieldPath, val interface{}) {
-	if len(fp) == 1 {
-		m[fp[0]] = val
-	} else {
-		v, ok := m[fp[0]]
-		if !ok {
-			v = map[string]interface{}{}
-			m[fp[0]] = v
-		}
-		// The type assertion below cannot fail, because setAtPath is only called
-		// with either an empty map or one filled by setAtPath itself, and the
-		// set of FieldPaths it is called with has been checked to make sure that
-		// no path is the prefix of any other.
-		setAtPath(v.(map[string]interface{}), fp[1:], val)
-	}
-}
-
-// toServiceFieldPath converts fp the form required by the Firestore service.
-// It assumes fp has been validated.
-func (fp FieldPath) toServiceFieldPath() string {
-	cs := make([]string, len(fp))
-	for i, c := range fp {
-		cs[i] = toServiceFieldPathComponent(c)
-	}
-	return strings.Join(cs, ".")
-}
-
-func toServiceFieldPaths(fps []FieldPath) []string {
-	var sfps []string
-	for _, fp := range fps {
-		sfps = append(sfps, fp.toServiceFieldPath())
-	}
-	return sfps
-}
-
-// Google SQL syntax for an unquoted field.
-var unquotedFieldRegexp = regexp.MustCompile("^[A-Za-z_][A-Za-z_0-9]*$")
-
-// toServiceFieldPathComponent returns a string that represents key and is a valid
-// field path component.
-func toServiceFieldPathComponent(key string) string {
-	if unquotedFieldRegexp.MatchString(key) {
-		return key
-	}
-	var buf bytes.Buffer
-	buf.WriteRune('`')
-	for _, r := range key {
-		if r == '`' || r == '\\' {
-			buf.WriteRune('\\')
-		}
-		buf.WriteRune(r)
-	}
-	buf.WriteRune('`')
-	return buf.String()
-}

+ 0 - 423
vendor/cloud.google.com/go/firestore/from_value.go

@@ -1,423 +0,0 @@
-// Copyright 2017 Google Inc. All Rights Reserved.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//      http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package firestore
-
-import (
-	"errors"
-	"fmt"
-	"reflect"
-	"strings"
-
-	pb "google.golang.org/genproto/googleapis/firestore/v1beta1"
-
-	"github.com/golang/protobuf/ptypes"
-)
-
-func setFromProtoValue(x interface{}, vproto *pb.Value, c *Client) error {
-	v := reflect.ValueOf(x)
-	if v.Kind() != reflect.Ptr || v.IsNil() {
-		return errors.New("firestore: nil or not a pointer")
-	}
-	return setReflectFromProtoValue(v.Elem(), vproto, c)
-}
-
-// setReflectFromProtoValue sets v from a Firestore Value.
-// v must be a settable value.
-func setReflectFromProtoValue(v reflect.Value, vproto *pb.Value, c *Client) error {
-	typeErr := func() error {
-		return fmt.Errorf("firestore: cannot set type %s to %s", v.Type(), typeString(vproto))
-	}
-
-	val := vproto.ValueType
-	// A Null value sets anything nullable to nil, and has no effect
-	// on anything else.
-	if _, ok := val.(*pb.Value_NullValue); ok {
-		switch v.Kind() {
-		case reflect.Interface, reflect.Ptr, reflect.Map, reflect.Slice:
-			v.Set(reflect.Zero(v.Type()))
-		}
-		return nil
-	}
-
-	// Handle special types first.
-	switch v.Type() {
-	case typeOfByteSlice:
-		x, ok := val.(*pb.Value_BytesValue)
-		if !ok {
-			return typeErr()
-		}
-		v.SetBytes(x.BytesValue)
-		return nil
-
-	case typeOfGoTime:
-		x, ok := val.(*pb.Value_TimestampValue)
-		if !ok {
-			return typeErr()
-		}
-		t, err := ptypes.Timestamp(x.TimestampValue)
-		if err != nil {
-			return err
-		}
-		v.Set(reflect.ValueOf(t))
-		return nil
-
-	case typeOfLatLng:
-		x, ok := val.(*pb.Value_GeoPointValue)
-		if !ok {
-			return typeErr()
-		}
-		v.Set(reflect.ValueOf(x.GeoPointValue))
-		return nil
-
-	case typeOfDocumentRef:
-		x, ok := val.(*pb.Value_ReferenceValue)
-		if !ok {
-			return typeErr()
-		}
-		dr, err := pathToDoc(x.ReferenceValue, c)
-		if err != nil {
-			return err
-		}
-		v.Set(reflect.ValueOf(dr))
-		return nil
-	}
-
-	switch v.Kind() {
-	case reflect.Bool:
-		x, ok := val.(*pb.Value_BooleanValue)
-		if !ok {
-			return typeErr()
-		}
-		v.SetBool(x.BooleanValue)
-
-	case reflect.String:
-		x, ok := val.(*pb.Value_StringValue)
-		if !ok {
-			return typeErr()
-		}
-		v.SetString(x.StringValue)
-
-	case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
-		var i int64
-		switch x := val.(type) {
-		case *pb.Value_IntegerValue:
-			i = x.IntegerValue
-		case *pb.Value_DoubleValue:
-			f := x.DoubleValue
-			i = int64(f)
-			if float64(i) != f {
-				return fmt.Errorf("firestore: float %f does not fit into %s", f, v.Type())
-			}
-		default:
-			return typeErr()
-		}
-		if v.OverflowInt(i) {
-			return overflowErr(v, i)
-		}
-		v.SetInt(i)
-
-	case reflect.Uint8, reflect.Uint16, reflect.Uint32:
-		var u uint64
-		switch x := val.(type) {
-		case *pb.Value_IntegerValue:
-			u = uint64(x.IntegerValue)
-		case *pb.Value_DoubleValue:
-			f := x.DoubleValue
-			u = uint64(f)
-			if float64(u) != f {
-				return fmt.Errorf("firestore: float %f does not fit into %s", f, v.Type())
-			}
-		default:
-			return typeErr()
-		}
-		if v.OverflowUint(u) {
-			return overflowErr(v, u)
-		}
-		v.SetUint(u)
-
-	case reflect.Float32, reflect.Float64:
-		var f float64
-		switch x := val.(type) {
-		case *pb.Value_DoubleValue:
-			f = x.DoubleValue
-		case *pb.Value_IntegerValue:
-			f = float64(x.IntegerValue)
-			if int64(f) != x.IntegerValue {
-				return overflowErr(v, x.IntegerValue)
-			}
-		default:
-			return typeErr()
-		}
-		if v.OverflowFloat(f) {
-			return overflowErr(v, f)
-		}
-		v.SetFloat(f)
-
-	case reflect.Slice:
-		x, ok := val.(*pb.Value_ArrayValue)
-		if !ok {
-			return typeErr()
-		}
-		vals := x.ArrayValue.Values
-		vlen := v.Len()
-		xlen := len(vals)
-		// Make a slice of the right size, avoiding allocation if possible.
-		switch {
-		case vlen < xlen:
-			v.Set(reflect.MakeSlice(v.Type(), xlen, xlen))
-		case vlen > xlen:
-			v.SetLen(xlen)
-		}
-		return populateRepeated(v, vals, xlen, c)
-
-	case reflect.Array:
-		x, ok := val.(*pb.Value_ArrayValue)
-		if !ok {
-			return typeErr()
-		}
-		vals := x.ArrayValue.Values
-		xlen := len(vals)
-		vlen := v.Len()
-		minlen := vlen
-		// Set extra elements to their zero value.
-		if vlen > xlen {
-			z := reflect.Zero(v.Type().Elem())
-			for i := xlen; i < vlen; i++ {
-				v.Index(i).Set(z)
-			}
-			minlen = xlen
-		}
-		return populateRepeated(v, vals, minlen, c)
-
-	case reflect.Map:
-		x, ok := val.(*pb.Value_MapValue)
-		if !ok {
-			return typeErr()
-		}
-		return populateMap(v, x.MapValue.Fields, c)
-
-	case reflect.Ptr:
-		// If the pointer is nil, set it to a zero value.
-		if v.IsNil() {
-			v.Set(reflect.New(v.Type().Elem()))
-		}
-		return setReflectFromProtoValue(v.Elem(), vproto, c)
-
-	case reflect.Struct:
-		x, ok := val.(*pb.Value_MapValue)
-		if !ok {
-			return typeErr()
-		}
-		return populateStruct(v, x.MapValue.Fields, c)
-
-	case reflect.Interface:
-		if v.NumMethod() == 0 { // empty interface
-			// If v holds a pointer, set the pointer.
-			if !v.IsNil() && v.Elem().Kind() == reflect.Ptr {
-				return setReflectFromProtoValue(v.Elem(), vproto, c)
-			}
-			// Otherwise, create a fresh value.
-			x, err := createFromProtoValue(vproto, c)
-			if err != nil {
-				return err
-			}
-			v.Set(reflect.ValueOf(x))
-			return nil
-		}
-		// Any other kind of interface is an error.
-		fallthrough
-
-	default:
-		return fmt.Errorf("firestore: cannot set type %s", v.Type())
-	}
-	return nil
-}
-
-// populateRepeated sets the first n elements of vr, which must be a slice or
-// array, to the corresponding elements of vals.
-func populateRepeated(vr reflect.Value, vals []*pb.Value, n int, c *Client) error {
-	for i := 0; i < n; i++ {
-		if err := setReflectFromProtoValue(vr.Index(i), vals[i], c); err != nil {
-			return err
-		}
-	}
-	return nil
-}
-
-// populateMap sets the elements of vm, which must be a map, from the
-// corresponding elements of pm.
-//
-// Since a map value is not settable, this function always creates a new
-// element for each corresponding map key. Existing values of vm are
-// overwritten. This happens even if the map value is something like a pointer
-// to a struct, where we could in theory populate the existing struct value
-// instead of discarding it. This behavior matches encoding/json.
-func populateMap(vm reflect.Value, pm map[string]*pb.Value, c *Client) error {
-	t := vm.Type()
-	if t.Key().Kind() != reflect.String {
-		return errors.New("firestore: map key type is not string")
-	}
-	if vm.IsNil() {
-		vm.Set(reflect.MakeMap(t))
-	}
-	et := t.Elem()
-	for k, vproto := range pm {
-		el := reflect.New(et).Elem()
-		if err := setReflectFromProtoValue(el, vproto, c); err != nil {
-			return err
-		}
-		vm.SetMapIndex(reflect.ValueOf(k), el)
-	}
-	return nil
-}
-
-// createMapFromValueMap creates a fresh map and populates it with pm.
-func createMapFromValueMap(pm map[string]*pb.Value, c *Client) (map[string]interface{}, error) {
-	m := map[string]interface{}{}
-	for k, pv := range pm {
-		v, err := createFromProtoValue(pv, c)
-		if err != nil {
-			return nil, err
-		}
-		m[k] = v
-	}
-	return m, nil
-}
-
-// populateStruct sets the fields of vs, which must be a struct, from
-// the matching elements of pm.
-func populateStruct(vs reflect.Value, pm map[string]*pb.Value, c *Client) error {
-	fields, err := fieldCache.Fields(vs.Type())
-	if err != nil {
-		return err
-	}
-	for k, vproto := range pm {
-		f := fields.Match(k)
-		if f == nil {
-			continue
-		}
-		if err := setReflectFromProtoValue(vs.FieldByIndex(f.Index), vproto, c); err != nil {
-			return fmt.Errorf("%s.%s: %v", vs.Type(), f.Name, err)
-		}
-	}
-	return nil
-}
-
-func createFromProtoValue(vproto *pb.Value, c *Client) (interface{}, error) {
-	switch v := vproto.ValueType.(type) {
-	case *pb.Value_NullValue:
-		return nil, nil
-	case *pb.Value_BooleanValue:
-		return v.BooleanValue, nil
-	case *pb.Value_IntegerValue:
-		return v.IntegerValue, nil
-	case *pb.Value_DoubleValue:
-		return v.DoubleValue, nil
-	case *pb.Value_TimestampValue:
-		return ptypes.Timestamp(v.TimestampValue)
-	case *pb.Value_StringValue:
-		return v.StringValue, nil
-	case *pb.Value_BytesValue:
-		return v.BytesValue, nil
-	case *pb.Value_ReferenceValue:
-		return pathToDoc(v.ReferenceValue, c)
-	case *pb.Value_GeoPointValue:
-		return v.GeoPointValue, nil
-
-	case *pb.Value_ArrayValue:
-		vals := v.ArrayValue.Values
-		ret := make([]interface{}, len(vals))
-		for i, v := range vals {
-			r, err := createFromProtoValue(v, c)
-			if err != nil {
-				return nil, err
-			}
-			ret[i] = r
-		}
-		return ret, nil
-
-	case *pb.Value_MapValue:
-		fields := v.MapValue.Fields
-		ret := make(map[string]interface{}, len(fields))
-		for k, v := range fields {
-			r, err := createFromProtoValue(v, c)
-			if err != nil {
-				return nil, err
-			}
-			ret[k] = r
-		}
-		return ret, nil
-
-	default:
-		return nil, fmt.Errorf("firestore: unknown value type %T", v)
-	}
-}
-
-// Convert a document path to a DocumentRef.
-func pathToDoc(docPath string, c *Client) (*DocumentRef, error) {
-	projID, dbID, docIDs, err := parseDocumentPath(docPath)
-	if err != nil {
-		return nil, err
-	}
-	parentResourceName := fmt.Sprintf("projects/%s/databases/%s", projID, dbID)
-	_, doc := c.idsToRef(docIDs, parentResourceName)
-	return doc, nil
-}
-
-// A document path should be of the form "projects/P/databases/D/documents/coll1/doc1/coll2/doc2/...".
-func parseDocumentPath(path string) (projectID, databaseID string, docPath []string, err error) {
-	parts := strings.Split(path, "/")
-	if len(parts) < 6 || parts[0] != "projects" || parts[2] != "databases" || parts[4] != "documents" {
-		return "", "", nil, fmt.Errorf("firestore: malformed document path %q", path)
-	}
-	docp := parts[5:]
-	if len(docp)%2 != 0 {
-		return "", "", nil, fmt.Errorf("firestore: path %q refers to collection, not document", path)
-	}
-	return parts[1], parts[3], docp, nil
-}
-
-func typeString(vproto *pb.Value) string {
-	switch vproto.ValueType.(type) {
-	case *pb.Value_NullValue:
-		return "null"
-	case *pb.Value_BooleanValue:
-		return "bool"
-	case *pb.Value_IntegerValue:
-		return "int"
-	case *pb.Value_DoubleValue:
-		return "float"
-	case *pb.Value_TimestampValue:
-		return "timestamp"
-	case *pb.Value_StringValue:
-		return "string"
-	case *pb.Value_BytesValue:
-		return "bytes"
-	case *pb.Value_ReferenceValue:
-		return "reference"
-	case *pb.Value_GeoPointValue:
-		return "GeoPoint"
-	case *pb.Value_MapValue:
-		return "map"
-	case *pb.Value_ArrayValue:
-		return "array"
-	default:
-		return "<unknown Value type>"
-	}
-}
-
-func overflowErr(v reflect.Value, x interface{}) error {
-	return fmt.Errorf("firestore: value %v overflows type %s", x, v.Type())
-}

+ 0 - 3
vendor/cloud.google.com/go/firestore/genproto/README.md

@@ -1,3 +0,0 @@
-The contents of this directory are copied from 
-github.com/GoogleCloudPlatform/google-cloud-common/testing/firestore/genproto.
-

+ 0 - 662
vendor/cloud.google.com/go/firestore/genproto/test.pb.go

@@ -1,662 +0,0 @@
-// Code generated by protoc-gen-go.
-// source: test.proto
-// DO NOT EDIT!
-
-/*
-Package tests is a generated protocol buffer package.
-
-It is generated from these files:
-	test.proto
-
-It has these top-level messages:
-	Test
-	GetTest
-	CreateTest
-	SetTest
-	UpdateTest
-	UpdatePathsTest
-	DeleteTest
-	SetOption
-	FieldPath
-*/
-package tests
-
-import proto "github.com/golang/protobuf/proto"
-import fmt "fmt"
-import math "math"
-import google_firestore_v1beta14 "google.golang.org/genproto/googleapis/firestore/v1beta1"
-import google_firestore_v1beta1 "google.golang.org/genproto/googleapis/firestore/v1beta1"
-
-// Reference imports to suppress errors if they are not otherwise used.
-var _ = proto.Marshal
-var _ = fmt.Errorf
-var _ = math.Inf
-
-// This is a compile-time assertion to ensure that this generated file
-// is compatible with the proto package it is being compiled against.
-// A compilation error at this line likely means your copy of the
-// proto package needs to be updated.
-const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
-
-// A Test describes a single client method call and its expected result.
-type Test struct {
-	Description string `protobuf:"bytes,1,opt,name=description" json:"description,omitempty"`
-	// Types that are valid to be assigned to Test:
-	//	*Test_Get
-	//	*Test_Create
-	//	*Test_Set
-	//	*Test_Update
-	//	*Test_UpdatePaths
-	//	*Test_Delete
-	Test isTest_Test `protobuf_oneof:"test"`
-}
-
-func (m *Test) Reset()                    { *m = Test{} }
-func (m *Test) String() string            { return proto.CompactTextString(m) }
-func (*Test) ProtoMessage()               {}
-func (*Test) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} }
-
-type isTest_Test interface {
-	isTest_Test()
-}
-
-type Test_Get struct {
-	Get *GetTest `protobuf:"bytes,2,opt,name=get,oneof"`
-}
-type Test_Create struct {
-	Create *CreateTest `protobuf:"bytes,3,opt,name=create,oneof"`
-}
-type Test_Set struct {
-	Set *SetTest `protobuf:"bytes,4,opt,name=set,oneof"`
-}
-type Test_Update struct {
-	Update *UpdateTest `protobuf:"bytes,5,opt,name=update,oneof"`
-}
-type Test_UpdatePaths struct {
-	UpdatePaths *UpdatePathsTest `protobuf:"bytes,6,opt,name=update_paths,json=updatePaths,oneof"`
-}
-type Test_Delete struct {
-	Delete *DeleteTest `protobuf:"bytes,7,opt,name=delete,oneof"`
-}
-
-func (*Test_Get) isTest_Test()         {}
-func (*Test_Create) isTest_Test()      {}
-func (*Test_Set) isTest_Test()         {}
-func (*Test_Update) isTest_Test()      {}
-func (*Test_UpdatePaths) isTest_Test() {}
-func (*Test_Delete) isTest_Test()      {}
-
-func (m *Test) GetTest() isTest_Test {
-	if m != nil {
-		return m.Test
-	}
-	return nil
-}
-
-func (m *Test) GetDescription() string {
-	if m != nil {
-		return m.Description
-	}
-	return ""
-}
-
-func (m *Test) GetGet() *GetTest {
-	if x, ok := m.GetTest().(*Test_Get); ok {
-		return x.Get
-	}
-	return nil
-}
-
-func (m *Test) GetCreate() *CreateTest {
-	if x, ok := m.GetTest().(*Test_Create); ok {
-		return x.Create
-	}
-	return nil
-}
-
-func (m *Test) GetSet() *SetTest {
-	if x, ok := m.GetTest().(*Test_Set); ok {
-		return x.Set
-	}
-	return nil
-}
-
-func (m *Test) GetUpdate() *UpdateTest {
-	if x, ok := m.GetTest().(*Test_Update); ok {
-		return x.Update
-	}
-	return nil
-}
-
-func (m *Test) GetUpdatePaths() *UpdatePathsTest {
-	if x, ok := m.GetTest().(*Test_UpdatePaths); ok {
-		return x.UpdatePaths
-	}
-	return nil
-}
-
-func (m *Test) GetDelete() *DeleteTest {
-	if x, ok := m.GetTest().(*Test_Delete); ok {
-		return x.Delete
-	}
-	return nil
-}
-
-// XXX_OneofFuncs is for the internal use of the proto package.
-func (*Test) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) {
-	return _Test_OneofMarshaler, _Test_OneofUnmarshaler, _Test_OneofSizer, []interface{}{
-		(*Test_Get)(nil),
-		(*Test_Create)(nil),
-		(*Test_Set)(nil),
-		(*Test_Update)(nil),
-		(*Test_UpdatePaths)(nil),
-		(*Test_Delete)(nil),
-	}
-}
-
-func _Test_OneofMarshaler(msg proto.Message, b *proto.Buffer) error {
-	m := msg.(*Test)
-	// test
-	switch x := m.Test.(type) {
-	case *Test_Get:
-		b.EncodeVarint(2<<3 | proto.WireBytes)
-		if err := b.EncodeMessage(x.Get); err != nil {
-			return err
-		}
-	case *Test_Create:
-		b.EncodeVarint(3<<3 | proto.WireBytes)
-		if err := b.EncodeMessage(x.Create); err != nil {
-			return err
-		}
-	case *Test_Set:
-		b.EncodeVarint(4<<3 | proto.WireBytes)
-		if err := b.EncodeMessage(x.Set); err != nil {
-			return err
-		}
-	case *Test_Update:
-		b.EncodeVarint(5<<3 | proto.WireBytes)
-		if err := b.EncodeMessage(x.Update); err != nil {
-			return err
-		}
-	case *Test_UpdatePaths:
-		b.EncodeVarint(6<<3 | proto.WireBytes)
-		if err := b.EncodeMessage(x.UpdatePaths); err != nil {
-			return err
-		}
-	case *Test_Delete:
-		b.EncodeVarint(7<<3 | proto.WireBytes)
-		if err := b.EncodeMessage(x.Delete); err != nil {
-			return err
-		}
-	case nil:
-	default:
-		return fmt.Errorf("Test.Test has unexpected type %T", x)
-	}
-	return nil
-}
-
-func _Test_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) {
-	m := msg.(*Test)
-	switch tag {
-	case 2: // test.get
-		if wire != proto.WireBytes {
-			return true, proto.ErrInternalBadWireType
-		}
-		msg := new(GetTest)
-		err := b.DecodeMessage(msg)
-		m.Test = &Test_Get{msg}
-		return true, err
-	case 3: // test.create
-		if wire != proto.WireBytes {
-			return true, proto.ErrInternalBadWireType
-		}
-		msg := new(CreateTest)
-		err := b.DecodeMessage(msg)
-		m.Test = &Test_Create{msg}
-		return true, err
-	case 4: // test.set
-		if wire != proto.WireBytes {
-			return true, proto.ErrInternalBadWireType
-		}
-		msg := new(SetTest)
-		err := b.DecodeMessage(msg)
-		m.Test = &Test_Set{msg}
-		return true, err
-	case 5: // test.update
-		if wire != proto.WireBytes {
-			return true, proto.ErrInternalBadWireType
-		}
-		msg := new(UpdateTest)
-		err := b.DecodeMessage(msg)
-		m.Test = &Test_Update{msg}
-		return true, err
-	case 6: // test.update_paths
-		if wire != proto.WireBytes {
-			return true, proto.ErrInternalBadWireType
-		}
-		msg := new(UpdatePathsTest)
-		err := b.DecodeMessage(msg)
-		m.Test = &Test_UpdatePaths{msg}
-		return true, err
-	case 7: // test.delete
-		if wire != proto.WireBytes {
-			return true, proto.ErrInternalBadWireType
-		}
-		msg := new(DeleteTest)
-		err := b.DecodeMessage(msg)
-		m.Test = &Test_Delete{msg}
-		return true, err
-	default:
-		return false, nil
-	}
-}
-
-func _Test_OneofSizer(msg proto.Message) (n int) {
-	m := msg.(*Test)
-	// test
-	switch x := m.Test.(type) {
-	case *Test_Get:
-		s := proto.Size(x.Get)
-		n += proto.SizeVarint(2<<3 | proto.WireBytes)
-		n += proto.SizeVarint(uint64(s))
-		n += s
-	case *Test_Create:
-		s := proto.Size(x.Create)
-		n += proto.SizeVarint(3<<3 | proto.WireBytes)
-		n += proto.SizeVarint(uint64(s))
-		n += s
-	case *Test_Set:
-		s := proto.Size(x.Set)
-		n += proto.SizeVarint(4<<3 | proto.WireBytes)
-		n += proto.SizeVarint(uint64(s))
-		n += s
-	case *Test_Update:
-		s := proto.Size(x.Update)
-		n += proto.SizeVarint(5<<3 | proto.WireBytes)
-		n += proto.SizeVarint(uint64(s))
-		n += s
-	case *Test_UpdatePaths:
-		s := proto.Size(x.UpdatePaths)
-		n += proto.SizeVarint(6<<3 | proto.WireBytes)
-		n += proto.SizeVarint(uint64(s))
-		n += s
-	case *Test_Delete:
-		s := proto.Size(x.Delete)
-		n += proto.SizeVarint(7<<3 | proto.WireBytes)
-		n += proto.SizeVarint(uint64(s))
-		n += s
-	case nil:
-	default:
-		panic(fmt.Sprintf("proto: unexpected type %T in oneof", x))
-	}
-	return n
-}
-
-// Call to the DocumentRef.Get method.
-type GetTest struct {
-	// The path of the doc, e.g. "projects/projectID/databases/(default)/documents/C/d"
-	DocRefPath string `protobuf:"bytes,1,opt,name=doc_ref_path,json=docRefPath" json:"doc_ref_path,omitempty"`
-	// The request that the call should send to the Firestore service.
-	Request *google_firestore_v1beta14.GetDocumentRequest `protobuf:"bytes,2,opt,name=request" json:"request,omitempty"`
-}
-
-func (m *GetTest) Reset()                    { *m = GetTest{} }
-func (m *GetTest) String() string            { return proto.CompactTextString(m) }
-func (*GetTest) ProtoMessage()               {}
-func (*GetTest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{1} }
-
-func (m *GetTest) GetDocRefPath() string {
-	if m != nil {
-		return m.DocRefPath
-	}
-	return ""
-}
-
-func (m *GetTest) GetRequest() *google_firestore_v1beta14.GetDocumentRequest {
-	if m != nil {
-		return m.Request
-	}
-	return nil
-}
-
-// Call to DocumentRef.Create.
-type CreateTest struct {
-	// The path of the doc, e.g. "projects/projectID/databases/(default)/documents/C/d"
-	DocRefPath string `protobuf:"bytes,1,opt,name=doc_ref_path,json=docRefPath" json:"doc_ref_path,omitempty"`
-	// The data passed to Create, as JSON. The strings "Delete" and "ServerTimestamp"
-	// denote the two special sentinel values. Values that could be interpreted as integers
-	// (i.e. digit strings) should be treated as integers.
-	JsonData string `protobuf:"bytes,2,opt,name=json_data,json=jsonData" json:"json_data,omitempty"`
-	// The request that the call should generate.
-	Request *google_firestore_v1beta14.CommitRequest `protobuf:"bytes,3,opt,name=request" json:"request,omitempty"`
-	// If true, the call should result in an error without generating a request.
-	// If this is true, request should not be set.
-	IsError bool `protobuf:"varint,4,opt,name=is_error,json=isError" json:"is_error,omitempty"`
-}
-
-func (m *CreateTest) Reset()                    { *m = CreateTest{} }
-func (m *CreateTest) String() string            { return proto.CompactTextString(m) }
-func (*CreateTest) ProtoMessage()               {}
-func (*CreateTest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{2} }
-
-func (m *CreateTest) GetDocRefPath() string {
-	if m != nil {
-		return m.DocRefPath
-	}
-	return ""
-}
-
-func (m *CreateTest) GetJsonData() string {
-	if m != nil {
-		return m.JsonData
-	}
-	return ""
-}
-
-func (m *CreateTest) GetRequest() *google_firestore_v1beta14.CommitRequest {
-	if m != nil {
-		return m.Request
-	}
-	return nil
-}
-
-func (m *CreateTest) GetIsError() bool {
-	if m != nil {
-		return m.IsError
-	}
-	return false
-}
-
-// A call to DocumentRef.Set.
-type SetTest struct {
-	DocRefPath string                                   `protobuf:"bytes,1,opt,name=doc_ref_path,json=docRefPath" json:"doc_ref_path,omitempty"`
-	Option     *SetOption                               `protobuf:"bytes,2,opt,name=option" json:"option,omitempty"`
-	JsonData   string                                   `protobuf:"bytes,3,opt,name=json_data,json=jsonData" json:"json_data,omitempty"`
-	Request    *google_firestore_v1beta14.CommitRequest `protobuf:"bytes,4,opt,name=request" json:"request,omitempty"`
-	IsError    bool                                     `protobuf:"varint,5,opt,name=is_error,json=isError" json:"is_error,omitempty"`
-}
-
-func (m *SetTest) Reset()                    { *m = SetTest{} }
-func (m *SetTest) String() string            { return proto.CompactTextString(m) }
-func (*SetTest) ProtoMessage()               {}
-func (*SetTest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{3} }
-
-func (m *SetTest) GetDocRefPath() string {
-	if m != nil {
-		return m.DocRefPath
-	}
-	return ""
-}
-
-func (m *SetTest) GetOption() *SetOption {
-	if m != nil {
-		return m.Option
-	}
-	return nil
-}
-
-func (m *SetTest) GetJsonData() string {
-	if m != nil {
-		return m.JsonData
-	}
-	return ""
-}
-
-func (m *SetTest) GetRequest() *google_firestore_v1beta14.CommitRequest {
-	if m != nil {
-		return m.Request
-	}
-	return nil
-}
-
-func (m *SetTest) GetIsError() bool {
-	if m != nil {
-		return m.IsError
-	}
-	return false
-}
-
-// A call to the form of DocumentRef.Update that represents the data as a map
-// or dictionary.
-type UpdateTest struct {
-	DocRefPath   string                                   `protobuf:"bytes,1,opt,name=doc_ref_path,json=docRefPath" json:"doc_ref_path,omitempty"`
-	Precondition *google_firestore_v1beta1.Precondition   `protobuf:"bytes,2,opt,name=precondition" json:"precondition,omitempty"`
-	JsonData     string                                   `protobuf:"bytes,3,opt,name=json_data,json=jsonData" json:"json_data,omitempty"`
-	Request      *google_firestore_v1beta14.CommitRequest `protobuf:"bytes,4,opt,name=request" json:"request,omitempty"`
-	IsError      bool                                     `protobuf:"varint,5,opt,name=is_error,json=isError" json:"is_error,omitempty"`
-}
-
-func (m *UpdateTest) Reset()                    { *m = UpdateTest{} }
-func (m *UpdateTest) String() string            { return proto.CompactTextString(m) }
-func (*UpdateTest) ProtoMessage()               {}
-func (*UpdateTest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{4} }
-
-func (m *UpdateTest) GetDocRefPath() string {
-	if m != nil {
-		return m.DocRefPath
-	}
-	return ""
-}
-
-func (m *UpdateTest) GetPrecondition() *google_firestore_v1beta1.Precondition {
-	if m != nil {
-		return m.Precondition
-	}
-	return nil
-}
-
-func (m *UpdateTest) GetJsonData() string {
-	if m != nil {
-		return m.JsonData
-	}
-	return ""
-}
-
-func (m *UpdateTest) GetRequest() *google_firestore_v1beta14.CommitRequest {
-	if m != nil {
-		return m.Request
-	}
-	return nil
-}
-
-func (m *UpdateTest) GetIsError() bool {
-	if m != nil {
-		return m.IsError
-	}
-	return false
-}
-
-// A call to the form of DocumentRef.Update that represents the data as a list
-// of field paths and their values.
-type UpdatePathsTest struct {
-	DocRefPath   string                                 `protobuf:"bytes,1,opt,name=doc_ref_path,json=docRefPath" json:"doc_ref_path,omitempty"`
-	Precondition *google_firestore_v1beta1.Precondition `protobuf:"bytes,2,opt,name=precondition" json:"precondition,omitempty"`
-	// parallel sequences: field_paths[i] corresponds to json_values[i]
-	FieldPaths []*FieldPath                             `protobuf:"bytes,3,rep,name=field_paths,json=fieldPaths" json:"field_paths,omitempty"`
-	JsonValues []string                                 `protobuf:"bytes,4,rep,name=json_values,json=jsonValues" json:"json_values,omitempty"`
-	Request    *google_firestore_v1beta14.CommitRequest `protobuf:"bytes,5,opt,name=request" json:"request,omitempty"`
-	IsError    bool                                     `protobuf:"varint,6,opt,name=is_error,json=isError" json:"is_error,omitempty"`
-}
-
-func (m *UpdatePathsTest) Reset()                    { *m = UpdatePathsTest{} }
-func (m *UpdatePathsTest) String() string            { return proto.CompactTextString(m) }
-func (*UpdatePathsTest) ProtoMessage()               {}
-func (*UpdatePathsTest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{5} }
-
-func (m *UpdatePathsTest) GetDocRefPath() string {
-	if m != nil {
-		return m.DocRefPath
-	}
-	return ""
-}
-
-func (m *UpdatePathsTest) GetPrecondition() *google_firestore_v1beta1.Precondition {
-	if m != nil {
-		return m.Precondition
-	}
-	return nil
-}
-
-func (m *UpdatePathsTest) GetFieldPaths() []*FieldPath {
-	if m != nil {
-		return m.FieldPaths
-	}
-	return nil
-}
-
-func (m *UpdatePathsTest) GetJsonValues() []string {
-	if m != nil {
-		return m.JsonValues
-	}
-	return nil
-}
-
-func (m *UpdatePathsTest) GetRequest() *google_firestore_v1beta14.CommitRequest {
-	if m != nil {
-		return m.Request
-	}
-	return nil
-}
-
-func (m *UpdatePathsTest) GetIsError() bool {
-	if m != nil {
-		return m.IsError
-	}
-	return false
-}
-
-// A call to DocmentRef.Delete
-type DeleteTest struct {
-	DocRefPath   string                                   `protobuf:"bytes,1,opt,name=doc_ref_path,json=docRefPath" json:"doc_ref_path,omitempty"`
-	Precondition *google_firestore_v1beta1.Precondition   `protobuf:"bytes,2,opt,name=precondition" json:"precondition,omitempty"`
-	Request      *google_firestore_v1beta14.CommitRequest `protobuf:"bytes,3,opt,name=request" json:"request,omitempty"`
-	IsError      bool                                     `protobuf:"varint,4,opt,name=is_error,json=isError" json:"is_error,omitempty"`
-}
-
-func (m *DeleteTest) Reset()                    { *m = DeleteTest{} }
-func (m *DeleteTest) String() string            { return proto.CompactTextString(m) }
-func (*DeleteTest) ProtoMessage()               {}
-func (*DeleteTest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{6} }
-
-func (m *DeleteTest) GetDocRefPath() string {
-	if m != nil {
-		return m.DocRefPath
-	}
-	return ""
-}
-
-func (m *DeleteTest) GetPrecondition() *google_firestore_v1beta1.Precondition {
-	if m != nil {
-		return m.Precondition
-	}
-	return nil
-}
-
-func (m *DeleteTest) GetRequest() *google_firestore_v1beta14.CommitRequest {
-	if m != nil {
-		return m.Request
-	}
-	return nil
-}
-
-func (m *DeleteTest) GetIsError() bool {
-	if m != nil {
-		return m.IsError
-	}
-	return false
-}
-
-// An option to the DocumentRef.Set call.
-type SetOption struct {
-	All    bool         `protobuf:"varint,1,opt,name=all" json:"all,omitempty"`
-	Fields []*FieldPath `protobuf:"bytes,2,rep,name=fields" json:"fields,omitempty"`
-}
-
-func (m *SetOption) Reset()                    { *m = SetOption{} }
-func (m *SetOption) String() string            { return proto.CompactTextString(m) }
-func (*SetOption) ProtoMessage()               {}
-func (*SetOption) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{7} }
-
-func (m *SetOption) GetAll() bool {
-	if m != nil {
-		return m.All
-	}
-	return false
-}
-
-func (m *SetOption) GetFields() []*FieldPath {
-	if m != nil {
-		return m.Fields
-	}
-	return nil
-}
-
-// A field path.
-type FieldPath struct {
-	Field []string `protobuf:"bytes,1,rep,name=field" json:"field,omitempty"`
-}
-
-func (m *FieldPath) Reset()                    { *m = FieldPath{} }
-func (m *FieldPath) String() string            { return proto.CompactTextString(m) }
-func (*FieldPath) ProtoMessage()               {}
-func (*FieldPath) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{8} }
-
-func (m *FieldPath) GetField() []string {
-	if m != nil {
-		return m.Field
-	}
-	return nil
-}
-
-func init() {
-	proto.RegisterType((*Test)(nil), "tests.Test")
-	proto.RegisterType((*GetTest)(nil), "tests.GetTest")
-	proto.RegisterType((*CreateTest)(nil), "tests.CreateTest")
-	proto.RegisterType((*SetTest)(nil), "tests.SetTest")
-	proto.RegisterType((*UpdateTest)(nil), "tests.UpdateTest")
-	proto.RegisterType((*UpdatePathsTest)(nil), "tests.UpdatePathsTest")
-	proto.RegisterType((*DeleteTest)(nil), "tests.DeleteTest")
-	proto.RegisterType((*SetOption)(nil), "tests.SetOption")
-	proto.RegisterType((*FieldPath)(nil), "tests.FieldPath")
-}
-
-func init() { proto.RegisterFile("test.proto", fileDescriptor0) }
-
-var fileDescriptor0 = []byte{
-	// 559 bytes of a gzipped FileDescriptorProto
-	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xc4, 0x55, 0x4d, 0x6f, 0xd3, 0x40,
-	0x10, 0xc5, 0x71, 0xe2, 0x24, 0x93, 0x08, 0xca, 0x0a, 0x21, 0x53, 0x0e, 0x18, 0x4b, 0x40, 0x24,
-	0x50, 0xaa, 0xc0, 0x91, 0x13, 0x34, 0xb4, 0x88, 0x0b, 0xd5, 0x16, 0xb8, 0x46, 0xae, 0x3d, 0x09,
-	0x46, 0x8e, 0xd7, 0xec, 0xae, 0xfb, 0x9f, 0x38, 0x72, 0xe7, 0x47, 0x70, 0xe4, 0x8f, 0x70, 0x47,
-	0xfb, 0xe1, 0xda, 0x06, 0x59, 0xca, 0xa1, 0xb4, 0xb7, 0xf5, 0x9b, 0x37, 0x1f, 0xef, 0xcd, 0x6e,
-	0x02, 0x20, 0x51, 0xc8, 0x79, 0xc1, 0x99, 0x64, 0x64, 0xa0, 0xce, 0x62, 0x7f, 0xb6, 0x61, 0x6c,
-	0x93, 0xe1, 0xc1, 0x3a, 0xe5, 0x28, 0x24, 0xe3, 0x78, 0x70, 0xbe, 0x38, 0x43, 0x19, 0x2d, 0x6a,
-	0xc4, 0x24, 0xec, 0x3f, 0xea, 0x64, 0xc6, 0x6c, 0xbb, 0x65, 0xb9, 0xa1, 0x85, 0x3f, 0x7a, 0xd0,
-	0xff, 0x80, 0x42, 0x92, 0x00, 0x26, 0x09, 0x8a, 0x98, 0xa7, 0x85, 0x4c, 0x59, 0xee, 0x3b, 0x81,
-	0x33, 0x1b, 0xd3, 0x26, 0x44, 0x42, 0x70, 0x37, 0x28, 0xfd, 0x5e, 0xe0, 0xcc, 0x26, 0xcf, 0x6f,
-	0xce, 0xf5, 0x40, 0xf3, 0x63, 0x94, 0x2a, 0xfd, 0xed, 0x0d, 0xaa, 0x82, 0xe4, 0x29, 0x78, 0x31,
-	0xc7, 0x48, 0xa2, 0xef, 0x6a, 0xda, 0x6d, 0x4b, 0x3b, 0xd4, 0xa0, 0x65, 0x5a, 0x8a, 0x2a, 0x28,
-	0x50, 0xfa, 0xfd, 0x56, 0xc1, 0xd3, 0xba, 0xa0, 0x30, 0x05, 0xcb, 0x22, 0x51, 0x05, 0x07, 0xad,
-	0x82, 0x1f, 0x35, 0x58, 0x15, 0x34, 0x14, 0xf2, 0x12, 0xa6, 0xe6, 0xb4, 0x2a, 0x22, 0xf9, 0x59,
-	0xf8, 0x9e, 0x4e, 0xb9, 0xdb, 0x4a, 0x39, 0x51, 0x11, 0x9b, 0x37, 0x29, 0x6b, 0x48, 0x75, 0x4a,
-	0x30, 0x43, 0x89, 0xfe, 0xb0, 0xd5, 0x69, 0xa9, 0xc1, 0xaa, 0x93, 0xa1, 0xbc, 0xf6, 0xa0, 0xaf,
-	0xa2, 0xa1, 0x80, 0xa1, 0x75, 0x80, 0x04, 0x30, 0x4d, 0x58, 0xbc, 0xe2, 0xb8, 0xd6, 0xdd, 0xad,
-	0x83, 0x90, 0xb0, 0x98, 0xe2, 0x5a, 0xb5, 0x20, 0x47, 0x30, 0xe4, 0xf8, 0xb5, 0x44, 0x51, 0x99,
-	0xf8, 0x6c, 0x6e, 0x96, 0x34, 0xaf, 0x97, 0x67, 0x97, 0xa4, 0x7c, 0x5d, 0xb2, 0xb8, 0xdc, 0x62,
-	0x2e, 0xa9, 0xc9, 0xa1, 0x55, 0x72, 0xf8, 0xcd, 0x01, 0xa8, 0x0d, 0xdd, 0xa1, 0xf1, 0x7d, 0x18,
-	0x7f, 0x11, 0x2c, 0x5f, 0x25, 0x91, 0x8c, 0x74, 0xeb, 0x31, 0x1d, 0x29, 0x60, 0x19, 0xc9, 0x88,
-	0xbc, 0xaa, 0xa7, 0x32, 0x3b, 0x7b, 0xd2, 0x3d, 0xd5, 0x21, 0xdb, 0x6e, 0xd3, 0x7f, 0x06, 0x22,
-	0xf7, 0x60, 0x94, 0x8a, 0x15, 0x72, 0xce, 0xb8, 0xde, 0xe6, 0x88, 0x0e, 0x53, 0xf1, 0x46, 0x7d,
-	0x86, 0x3f, 0x1d, 0x18, 0x9e, 0xee, 0xec, 0xd0, 0x0c, 0x3c, 0x66, 0xee, 0x9f, 0x31, 0x68, 0xaf,
-	0xbe, 0x14, 0xef, 0x35, 0x4e, 0x6d, 0xbc, 0x2d, 0xc9, 0xed, 0x96, 0xd4, 0xbf, 0x04, 0x49, 0x83,
-	0xb6, 0xa4, 0xdf, 0x0e, 0x40, 0x7d, 0xfd, 0x76, 0x50, 0xf5, 0x0e, 0xa6, 0x05, 0xc7, 0x98, 0xe5,
-	0x49, 0xda, 0xd0, 0xf6, 0xb8, 0x7b, 0xa6, 0x93, 0x06, 0x9b, 0xb6, 0x72, 0xaf, 0x53, 0xf7, 0xf7,
-	0x1e, 0xdc, 0xfa, 0xeb, 0x0d, 0x5d, 0xb1, 0xf8, 0x05, 0x4c, 0xd6, 0x29, 0x66, 0x89, 0x7d, 0xde,
-	0x6e, 0xe0, 0x36, 0xee, 0xc8, 0x91, 0x8a, 0xa8, 0x96, 0x14, 0xd6, 0xd5, 0x51, 0x90, 0x07, 0x30,
-	0xd1, 0x7e, 0x9d, 0x47, 0x59, 0x89, 0xc2, 0xef, 0x07, 0xae, 0x9a, 0x4f, 0x41, 0x9f, 0x34, 0xd2,
-	0xf4, 0x6c, 0x70, 0x09, 0x9e, 0x79, 0x6d, 0xcf, 0x7e, 0x39, 0x00, 0xf5, 0x0f, 0xc8, 0x15, 0xdb,
-	0xf5, 0x7f, 0x5f, 0xf6, 0x31, 0x8c, 0x2f, 0x9e, 0x25, 0xd9, 0x03, 0x37, 0xca, 0x32, 0xad, 0x67,
-	0x44, 0xd5, 0x51, 0x3d, 0x65, 0xbd, 0x06, 0xe1, 0xf7, 0x3a, 0xd6, 0x64, 0xe3, 0xe1, 0x43, 0x18,
-	0x5f, 0x80, 0xe4, 0x0e, 0x0c, 0x34, 0xec, 0x3b, 0x7a, 0x53, 0xe6, 0xe3, 0xcc, 0xd3, 0x7f, 0x56,
-	0x2f, 0xfe, 0x04, 0x00, 0x00, 0xff, 0xff, 0x6c, 0x8e, 0x38, 0xdd, 0x12, 0x07, 0x00, 0x00,
-}

+ 0 - 16
vendor/cloud.google.com/go/firestore/internal/Makefile

@@ -1,16 +0,0 @@
-# Build doc.go from template and snippets.
-
-SHELL=/bin/bash
-
-../doc.go: build doc-snippets.go doc.template snipdoc.awk
-	@tmp=$$(mktemp) && \
-	awk -f snipdoc.awk doc-snippets.go doc.template > $$tmp && \
-	chmod +w $@ && \
-	mv $$tmp $@ && \
-	chmod -w $@
-	@echo "wrote $@"
-
-.PHONY: build
-
-build:
-	go build doc-snippets.go

+ 0 - 161
vendor/cloud.google.com/go/firestore/internal/doc-snippets.go

@@ -1,161 +0,0 @@
-// Copyright 2017 Google Inc. All Rights Reserved.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//      http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package internal
-
-import (
-	"fmt"
-
-	firestore "cloud.google.com/go/firestore"
-
-	"golang.org/x/net/context"
-	"google.golang.org/api/iterator"
-)
-
-const ELLIPSIS = 0
-
-//[ structDef
-type State struct {
-	Capital    string  `firestore:"capital"`
-	Population float64 `firestore:"pop"` // in millions
-}
-
-//]
-
-func f1() {
-	//[ NewClient
-	ctx := context.Background()
-	client, err := firestore.NewClient(ctx, "projectID")
-	if err != nil {
-		// TODO: Handle error.
-	}
-	//]
-	//[ refs
-	states := client.Collection("States")
-	ny := states.Doc("NewYork")
-	// Or, in a single call:
-	ny = client.Doc("States/NewYork")
-	//]
-	//[ docref.Get
-	docsnap, err := ny.Get(ctx)
-	if err != nil {
-		// TODO: Handle error.
-	}
-	dataMap := docsnap.Data()
-	fmt.Println(dataMap)
-	//]
-	//[ DataTo
-	var nyData State
-	if err := docsnap.DataTo(&nyData); err != nil {
-		// TODO: Handle error.
-	}
-	//]
-	//[ GetAll
-	docsnaps, err := client.GetAll(ctx, []*firestore.DocumentRef{
-		states.Doc("Wisconsin"), states.Doc("Ohio"),
-	})
-	if err != nil {
-		// TODO: Handle error.
-	}
-	for _, ds := range docsnaps {
-		_ = ds // TODO: Use ds.
-	}
-	//[ docref.Create
-	wr, err := ny.Create(ctx, State{
-		Capital:    "Albany",
-		Population: 19.8,
-	})
-	if err != nil {
-		// TODO: Handle error.
-	}
-	fmt.Println(wr)
-	//]
-	//[ docref.Set
-	ca := states.Doc("California")
-	_, err = ca.Set(ctx, State{
-		Capital:    "Sacramento",
-		Population: 39.14,
-	})
-	//]
-
-	//[ docref.Update
-	_, err = ca.Update(ctx, []firestore.Update{{Path: "capital", Value: "Sacramento"}})
-	//]
-
-	//[ docref.Delete
-	_, err = ny.Delete(ctx)
-	//]
-
-	//[ LUT-precond
-	docsnap, err = ca.Get(ctx)
-	if err != nil {
-		// TODO: Handle error.
-	}
-	_, err = ca.Update(ctx,
-		[]firestore.Update{{Path: "capital", Value: "Sacramento"}},
-		firestore.LastUpdateTime(docsnap.UpdateTime))
-	//]
-
-	//[ WriteBatch
-	writeResults, err := client.Batch().
-		Create(ny, State{Capital: "Albany"}).
-		Update(ca, []firestore.Update{{Path: "capital", Value: "Sacramento"}}).
-		Delete(client.Doc("States/WestDakota")).
-		Commit(ctx)
-	//]
-	_ = writeResults
-
-	//[ Query
-	q := states.Where("pop", ">", 10).OrderBy("pop", firestore.Desc)
-	//]
-	//[ Documents
-	iter := q.Documents(ctx)
-	for {
-		doc, err := iter.Next()
-		if err == iterator.Done {
-			break
-		}
-		if err != nil {
-			// TODO: Handle error.
-		}
-		fmt.Println(doc.Data())
-	}
-	//]
-
-	//[ CollQuery
-	iter = client.Collection("States").Documents(ctx)
-	//]
-}
-
-func txn() {
-	var ctx context.Context
-	var client *firestore.Client
-	//[ Transaction
-	ny := client.Doc("States/NewYork")
-	err := client.RunTransaction(ctx, func(ctx context.Context, tx *firestore.Transaction) error {
-		doc, err := tx.Get(ny) // tx.Get, NOT ny.Get!
-		if err != nil {
-			return err
-		}
-		pop, err := doc.DataAt("pop")
-		if err != nil {
-			return err
-		}
-		return tx.Update(ny, []firestore.Update{{Path: "pop", Value: pop.(float64) + 0.2}})
-	})
-	if err != nil {
-		// TODO: Handle error.
-	}
-	//]
-}

+ 0 - 142
vendor/cloud.google.com/go/firestore/internal/doc.template

@@ -1,142 +0,0 @@
-// Copyright 2017 Google Inc. All Rights Reserved.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//      http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// DO NOT EDIT doc.go. Modify internal/doc.template, then run make -C internal.
-
-/*
-Package firestore provides a client for reading and writing to a Cloud Firestore
-database.
-
-See https://cloud.google.com/firestore/docs for an introduction
-to Cloud Firestore and additional help on using the Firestore API.
-
-Creating a Client
-
-To start working with this package, create a client with a project ID:
-
-[NewClient]
-
-CollectionRefs and DocumentRefs
-
-In Firestore, documents are sets of key-value pairs, and collections are groups of
-documents. A Firestore database consists of a hierarchy of alternating collections
-and documents, referred to by slash-separated paths like
-"States/California/Cities/SanFrancisco".
-
-This client is built around references to collections and documents. CollectionRefs
-and DocumentRefs are lightweight values that refer to the corresponding database
-entities. Creating a ref does not involve any network traffic.
-
-[refs]
-
-Reading
-
-Use DocumentRef.Get to read a document. The result is a DocumentSnapshot.
-Call its Data method to obtain the entire document contents as a map.
-
-[docref.Get]
-
-You can also obtain a single field with DataAt, or extract the data into a struct
-with DataTo. With the type definition
-
-[structDef]
-
-we can extract the document's data into a value of type State:
-
-[DataTo]
-
-Note that this client supports struct tags beginning with "firestore:" that work like
-the tags of the encoding/json package, letting you rename fields, ignore them, or
-omit their values when empty.
-
-To retrieve multiple documents from their references in a single call, use
-Client.GetAll.
-
-[GetAll]
-
-Writing
-
-For writing individual documents, use the methods on DocumentReference.
-Create creates a new document.
-
-[docref.Create]
-
-The first return value is a WriteResult, which contains the time
-at which the document was updated.
-
-Create fails if the document exists. Another method, Set, either replaces an existing
-document or creates a new one.
-
-[docref.Set]
-
-To update some fields of an existing document, use Update. It takes a list of
-paths to update and their corresponding values.
-
-[docref.Update]
-
-Use DocumentRef.Delete to delete a document.
-
-[docref.Delete]
-
-Preconditions
-
-You can condition Deletes or Updates on when a document was last changed. Specify
-these preconditions as an option to a Delete or Update method. The check and the
-write happen atomically with a single RPC.
-
-[LUT-precond]
-
-Here we update a doc only if it hasn't changed since we read it.
-You could also do this with a transaction.
-
-To perform multiple writes at once, use a WriteBatch. Its methods chain
-for convenience.
-
-WriteBatch.Commit sends the collected writes to the server, where they happen
-atomically.
-
-[WriteBatch]
-
-Queries
-
-You can use SQL to select documents from a collection. Begin with the collection, and
-build up a query using Select, Where and other methods of Query.
-
-[Query]
-
-Call the Query's Documents method to get an iterator, and use it like
-the other Google Cloud Client iterators.
-
-[Documents]
-
-To get all the documents in a collection, you can use the collection itself
-as a query.
-
-[CollQuery]
-
-Transactions
-
-Use a transaction to execute reads and writes atomically. All reads must happen
-before any writes. Transaction creation, commit, rollback and retry are handled for
-you by the Client.RunTransaction method; just provide a function and use the
-read and write methods of the Transaction passed to it.
-
-[Transaction]
-
-Authentication
-
-See examples of authorization and authentication at
-https://godoc.org/cloud.google.com/go#pkg-examples.
-*/
-package firestore

+ 0 - 116
vendor/cloud.google.com/go/firestore/internal/snipdoc.awk

@@ -1,116 +0,0 @@
-# Copyright 2017 Google Inc. All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-#      http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-# snipdoc merges code snippets from Go source files into a template to
-# produce another go file (typically doc.go).
-#
-# Call with one or more .go files and a template file.
-#
-#    awk -f snipmd.awk foo.go bar.go doc.template
-#
-# In the Go files, start a snippet with
-#    //[ NAME
-# and end it with
-#    //]
-#
-# In the template, write
-#    [NAME]
-# on a line by itself to insert the snippet NAME on that line.
-#
-# The following transformations are made to the Go code:
-# - Trailing blank lines are removed.
-# - `ELLIPSIS` and `_ = ELLIPSIS` are replaced by `...`
-
-
-/^[ \t]*\/\/\[/ { # start snippet in Go file
-  if (inGo()) {
-    if ($2 == "") {
-      die("missing snippet name")
-    }
-    curSnip = $2
-    next
-  }
-}
-
-/^[ \t]*\/\/]/ {  # end snippet in Go file
-  if (inGo()) {
-    if (curSnip != "") {
-      # Remove all trailing newlines.
-      gsub(/[\t\n]+$/, "", snips[curSnip])
-      curSnip = ""
-      next
-    } else {
-      die("//] without corresponding //[")
-    }
-  }
-}
-
-ENDFILE {
-  if (curSnip != "") {
-    die("unclosed snippet: " curSnip)
-  }
-}
-
-/^\[.*\]$/ { # Snippet marker in template file.
-  if (inTemplate()) {
-    name = substr($1, 2, length($1)-2)
-    if (snips[name] == "") {
-      die("no snippet named " name)
-    }
-    printf("%s\n", snips[name])
-    afterSnip = 1
-    next
-  }
-}
-
-# Matches every line.
-{
-  if (curSnip != "") {
-    # If the first line in the snip has no indent, add the indent.
-    if (snips[curSnip] == "") {
-      if (index($0, "\t") == 1) {
-        extraIndent = ""
-      } else {
-        extraIndent = "\t"
-      }
-    }
-
-    line = $0
-    # Replace ELLIPSIS.
-    gsub(/_ = ELLIPSIS/, "...", line)
-    gsub(/ELLIPSIS/, "...", line)
-
-    snips[curSnip] = snips[curSnip] extraIndent line "\n"
-  } else if (inTemplate()) {
-    afterSnip = 0
-    # Copy to output.
-    print
-  }
-}
-
-
-
-function inTemplate() {
-  return match(FILENAME, /\.template$/)
-}
-
-function inGo() {
-  return match(FILENAME, /\.go$/)
-}
-
-
-function die(msg) {
-  printf("%s:%d: %s\n", FILENAME, FNR, msg) > "/dev/stderr"
-  exit 1
-}

+ 0 - 177
vendor/cloud.google.com/go/firestore/options.go

@@ -1,177 +0,0 @@
-// Copyright 2017 Google Inc. All Rights Reserved.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//      http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package firestore
-
-import (
-	"errors"
-	"fmt"
-	"time"
-
-	pb "google.golang.org/genproto/googleapis/firestore/v1beta1"
-
-	"github.com/golang/protobuf/ptypes"
-)
-
-// A Precondition modifies a Firestore update or delete operation.
-type Precondition interface {
-	// Returns the corresponding Precondition proto.
-	preconditionProto() (*pb.Precondition, error)
-}
-
-// Exists is a Precondition that checks for the existence of a resource before
-// writing to it. If the check fails, the write does not occur.
-var Exists Precondition
-
-func init() {
-	// Initialize here so godoc doesn't show the internal value.
-	Exists = exists(true)
-}
-
-type exists bool
-
-func (e exists) preconditionProto() (*pb.Precondition, error) {
-	return &pb.Precondition{
-		ConditionType: &pb.Precondition_Exists{bool(e)},
-	}, nil
-}
-
-func (e exists) String() string {
-	if e {
-		return "Exists"
-	} else {
-		return "DoesNotExist"
-	}
-}
-
-// LastUpdateTime returns a Precondition that checks that a resource must exist and
-// must have last been updated at the given time. If the check fails, the write
-// does not occur.
-func LastUpdateTime(t time.Time) Precondition { return lastUpdateTime(t) }
-
-type lastUpdateTime time.Time
-
-func (u lastUpdateTime) preconditionProto() (*pb.Precondition, error) {
-	ts, err := ptypes.TimestampProto(time.Time(u))
-	if err != nil {
-		return nil, err
-	}
-	return &pb.Precondition{
-		ConditionType: &pb.Precondition_UpdateTime{ts},
-	}, nil
-}
-
-func (u lastUpdateTime) String() string { return fmt.Sprintf("LastUpdateTime(%s)", time.Time(u)) }
-
-func processPreconditionsForDelete(preconds []Precondition) (*pb.Precondition, error) {
-	// At most one option permitted.
-	switch len(preconds) {
-	case 0:
-		return nil, nil
-	case 1:
-		return preconds[0].preconditionProto()
-	default:
-		return nil, fmt.Errorf("firestore: conflicting preconditions: %+v", preconds)
-	}
-}
-
-func processPreconditionsForUpdate(preconds []Precondition) (*pb.Precondition, error) {
-	// At most one option permitted, and it cannot be Exists.
-	switch len(preconds) {
-	case 0:
-		// If the user doesn't provide any options, default to Exists(true).
-		return exists(true).preconditionProto()
-	case 1:
-		if _, ok := preconds[0].(exists); ok {
-			return nil, errors.New("Cannot use Exists with Update")
-		}
-		return preconds[0].preconditionProto()
-	default:
-		return nil, fmt.Errorf("firestore: conflicting preconditions: %+v", preconds)
-	}
-}
-
-func processPreconditionsForVerify(preconds []Precondition) (*pb.Precondition, error) {
-	// At most one option permitted.
-	switch len(preconds) {
-	case 0:
-		return nil, nil
-	case 1:
-		return preconds[0].preconditionProto()
-	default:
-		return nil, fmt.Errorf("firestore: conflicting preconditions: %+v", preconds)
-	}
-}
-
-// A SetOption modifies a Firestore set operation.
-type SetOption interface {
-	fieldPaths() (fps []FieldPath, all bool, err error)
-}
-
-// MergeAll is a SetOption that causes all the field paths given in the data argument
-// to Set to be overwritten. It is not supported for struct data.
-var MergeAll SetOption = merge{all: true}
-
-// Merge returns a SetOption that causes only the given field paths to be
-// overwritten. Other fields on the existing document will be untouched. It is an
-// error if a provided field path does not refer to a value in the data passed to
-// Set.
-func Merge(fps ...FieldPath) SetOption {
-	for _, fp := range fps {
-		if err := fp.validate(); err != nil {
-			return merge{err: err}
-		}
-	}
-	return merge{paths: fps}
-}
-
-type merge struct {
-	all   bool
-	paths []FieldPath
-	err   error
-}
-
-func (m merge) String() string {
-	if m.err != nil {
-		return fmt.Sprintf("<Merge error: %v>", m.err)
-	}
-	if m.all {
-		return "MergeAll"
-	}
-	return fmt.Sprintf("Merge(%+v)", m.paths)
-}
-
-func (m merge) fieldPaths() (fps []FieldPath, all bool, err error) {
-	if m.err != nil {
-		return nil, false, m.err
-	}
-	if err := checkNoDupOrPrefix(m.paths); err != nil {
-		return nil, false, err
-	}
-	if m.all {
-		return nil, true, nil
-	}
-	return m.paths, false, nil
-}
-
-func processSetOptions(opts []SetOption) (fps []FieldPath, all bool, err error) {
-	switch len(opts) {
-	case 0:
-		return nil, false, nil
-	case 1:
-		return opts[0].fieldPaths()
-	default:
-		return nil, false, fmt.Errorf("conflicting options: %+v", opts)
-	}
-}

+ 0 - 463
vendor/cloud.google.com/go/firestore/query.go

@@ -1,463 +0,0 @@
-// Copyright 2017 Google Inc. All Rights Reserved.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//      http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package firestore
-
-import (
-	"errors"
-	"fmt"
-	"io"
-	"math"
-	"reflect"
-
-	"golang.org/x/net/context"
-
-	pb "google.golang.org/genproto/googleapis/firestore/v1beta1"
-
-	"github.com/golang/protobuf/ptypes/wrappers"
-	"google.golang.org/api/iterator"
-)
-
-// Query represents a Firestore query.
-//
-// Query values are immutable. Each Query method creates
-// a new Query; it does not modify the old.
-type Query struct {
-	c                      *Client
-	parentPath             string // path of the collection's parent
-	collectionID           string
-	selection              []FieldPath
-	filters                []filter
-	orders                 []order
-	offset                 int32
-	limit                  *wrappers.Int32Value
-	startVals, endVals     []interface{}
-	startBefore, endBefore bool
-	err                    error
-}
-
-// DocumentID is the special field name representing the ID of a document
-// in queries.
-const DocumentID = "__name__"
-
-// Select returns a new Query that specifies the paths
-// to return from the result documents.
-// Each path argument can be a single field or a dot-separated sequence of
-// fields, and must not contain any of the runes "˜*/[]".
-func (q Query) Select(paths ...string) Query {
-	var fps []FieldPath
-	for _, s := range paths {
-		fp, err := parseDotSeparatedString(s)
-		if err != nil {
-			q.err = err
-			return q
-		}
-		fps = append(fps, fp)
-	}
-	if fps == nil {
-		q.selection = []FieldPath{{DocumentID}}
-	} else {
-		q.selection = fps
-	}
-	return q
-}
-
-// SelectPaths returns a new Query that specifies the field paths
-// to return from the result documents.
-func (q Query) SelectPaths(fieldPaths ...FieldPath) Query {
-	q.selection = fieldPaths
-	return q
-}
-
-// Where returns a new Query that filters the set of results.
-// A Query can have multiple filters.
-// The path argument can be a single field or a dot-separated sequence of
-// fields, and must not contain any of the runes "˜*/[]".
-// The op argument must be one of "==", "<", "<=", ">" or ">=".
-func (q Query) Where(path, op string, value interface{}) Query {
-	fp, err := parseDotSeparatedString(path)
-	if err != nil {
-		q.err = err
-		return q
-	}
-	q.filters = append(append([]filter(nil), q.filters...), filter{fp, op, value})
-	return q
-}
-
-// WherePath returns a new Query that filters the set of results.
-// A Query can have multiple filters.
-// The op argument must be one of "==", "<", "<=", ">" or ">=".
-func (q Query) WherePath(fp FieldPath, op string, value interface{}) Query {
-	q.filters = append(append([]filter(nil), q.filters...), filter{fp, op, value})
-	return q
-}
-
-// Direction is the sort direction for result ordering.
-type Direction int32
-
-const (
-	// Asc sorts results from smallest to largest.
-	Asc Direction = Direction(pb.StructuredQuery_ASCENDING)
-
-	// Desc sorts results from largest to smallest.
-	Desc Direction = Direction(pb.StructuredQuery_DESCENDING)
-)
-
-// OrderBy returns a new Query that specifies the order in which results are
-// returned. A Query can have multiple OrderBy/OrderByPath specifications. OrderBy
-// appends the specification to the list of existing ones.
-//
-// The path argument can be a single field or a dot-separated sequence of
-// fields, and must not contain any of the runes "˜*/[]".
-//
-// To order by document name, use the special field path DocumentID.
-func (q Query) OrderBy(path string, dir Direction) Query {
-	fp, err := parseDotSeparatedString(path)
-	if err != nil {
-		q.err = err
-		return q
-	}
-	q.orders = append(append([]order(nil), q.orders...), order{fp, dir})
-	return q
-}
-
-// OrderByPath returns a new Query that specifies the order in which results are
-// returned. A Query can have multiple OrderBy/OrderByPath specifications.
-// OrderByPath appends the specification to the list of existing ones.
-func (q Query) OrderByPath(fp FieldPath, dir Direction) Query {
-	q.orders = append(append([]order(nil), q.orders...), order{fp, dir})
-	return q
-}
-
-// Offset returns a new Query that specifies the number of initial results to skip.
-// It must not be negative.
-func (q Query) Offset(n int) Query {
-	q.offset = trunc32(n)
-	return q
-}
-
-// Limit returns a new Query that specifies the maximum number of results to return.
-// It must not be negative.
-func (q Query) Limit(n int) Query {
-	q.limit = &wrappers.Int32Value{trunc32(n)}
-	return q
-}
-
-// StartAt returns a new Query that specifies that results should start at
-// the document with the given field values. The field path corresponding to
-// each value is taken from the corresponding OrderBy call. For example, in
-//   q.OrderBy("X", Asc).OrderBy("Y", Desc).StartAt(1, 2)
-// results will begin at the first document where X = 1 and Y = 2.
-//
-// If an OrderBy call uses the special DocumentID field path, the corresponding value
-// should be the document ID relative to the query's collection. For example, to
-// start at the document "NewYork" in the "States" collection, write
-//
-//   client.Collection("States").OrderBy(DocumentID, firestore.Asc).StartAt("NewYork")
-//
-// Calling StartAt overrides a previous call to StartAt or StartAfter.
-func (q Query) StartAt(fieldValues ...interface{}) Query {
-	q.startVals, q.startBefore = fieldValues, true
-	return q
-}
-
-// StartAfter returns a new Query that specifies that results should start just after
-// the document with the given field values. See Query.StartAt for more information.
-//
-// Calling StartAfter overrides a previous call to StartAt or StartAfter.
-func (q Query) StartAfter(fieldValues ...interface{}) Query {
-	q.startVals, q.startBefore = fieldValues, false
-	return q
-}
-
-// EndAt returns a new Query that specifies that results should end at the
-// document with the given field values. See Query.StartAt for more information.
-//
-// Calling EndAt overrides a previous call to EndAt or EndBefore.
-func (q Query) EndAt(fieldValues ...interface{}) Query {
-	q.endVals, q.endBefore = fieldValues, false
-	return q
-}
-
-// EndBefore returns a new Query that specifies that results should end just before
-// the document with the given field values. See Query.StartAt for more information.
-//
-// Calling EndBefore overrides a previous call to EndAt or EndBefore.
-func (q Query) EndBefore(fieldValues ...interface{}) Query {
-	q.endVals, q.endBefore = fieldValues, true
-	return q
-}
-
-func (q Query) query() *Query { return &q }
-
-func (q Query) toProto() (*pb.StructuredQuery, error) {
-	if q.err != nil {
-		return nil, q.err
-	}
-	if q.collectionID == "" {
-		return nil, errors.New("firestore: query created without CollectionRef")
-	}
-	p := &pb.StructuredQuery{
-		From:   []*pb.StructuredQuery_CollectionSelector{{CollectionId: q.collectionID}},
-		Offset: q.offset,
-		Limit:  q.limit,
-	}
-	if len(q.selection) > 0 {
-		p.Select = &pb.StructuredQuery_Projection{}
-		for _, fp := range q.selection {
-			if err := fp.validate(); err != nil {
-				return nil, err
-			}
-			p.Select.Fields = append(p.Select.Fields, fref(fp))
-		}
-	}
-	// If there is only filter, use it directly. Otherwise, construct
-	// a CompositeFilter.
-	if len(q.filters) == 1 {
-		pf, err := q.filters[0].toProto()
-		if err != nil {
-			return nil, err
-		}
-		p.Where = pf
-	} else if len(q.filters) > 1 {
-		cf := &pb.StructuredQuery_CompositeFilter{
-			Op: pb.StructuredQuery_CompositeFilter_AND,
-		}
-		p.Where = &pb.StructuredQuery_Filter{
-			FilterType: &pb.StructuredQuery_Filter_CompositeFilter{cf},
-		}
-		for _, f := range q.filters {
-			pf, err := f.toProto()
-			if err != nil {
-				return nil, err
-			}
-			cf.Filters = append(cf.Filters, pf)
-		}
-	}
-	for _, ord := range q.orders {
-		po, err := ord.toProto()
-		if err != nil {
-			return nil, err
-		}
-		p.OrderBy = append(p.OrderBy, po)
-	}
-	// StartAt and EndAt must have values that correspond exactly to the explicit order-by fields.
-	if len(q.startVals) != 0 {
-		vals, err := q.toPositionValues(q.startVals)
-		if err != nil {
-			return nil, err
-		}
-		p.StartAt = &pb.Cursor{Values: vals, Before: q.startBefore}
-	}
-	if len(q.endVals) != 0 {
-		vals, err := q.toPositionValues(q.endVals)
-		if err != nil {
-			return nil, err
-		}
-		p.EndAt = &pb.Cursor{Values: vals, Before: q.endBefore}
-	}
-	return p, nil
-}
-
-// toPositionValues converts the field values to protos.
-func (q *Query) toPositionValues(fieldValues []interface{}) ([]*pb.Value, error) {
-	if len(fieldValues) != len(q.orders) {
-		return nil, errors.New("firestore: number of field values in StartAt/StartAfter/EndAt/EndBefore does not match number of OrderBy fields")
-	}
-	vals := make([]*pb.Value, len(fieldValues))
-	var err error
-	for i, ord := range q.orders {
-		fval := fieldValues[i]
-		if len(ord.fieldPath) == 1 && ord.fieldPath[0] == DocumentID {
-			docID, ok := fval.(string)
-			if !ok {
-				return nil, fmt.Errorf("firestore: expected doc ID for DocumentID field, got %T", fval)
-			}
-			vals[i] = &pb.Value{&pb.Value_ReferenceValue{q.parentPath + "/documents/" + q.collectionID + "/" + docID}}
-		} else {
-			var sawTransform bool
-			vals[i], sawTransform, err = toProtoValue(reflect.ValueOf(fval))
-			if err != nil {
-				return nil, err
-			}
-			if sawTransform {
-				return nil, errors.New("firestore: ServerTimestamp disallowed in query value")
-			}
-		}
-	}
-	return vals, nil
-}
-
-type filter struct {
-	fieldPath FieldPath
-	op        string
-	value     interface{}
-}
-
-func (f filter) toProto() (*pb.StructuredQuery_Filter, error) {
-	if err := f.fieldPath.validate(); err != nil {
-		return nil, err
-	}
-	var op pb.StructuredQuery_FieldFilter_Operator
-	switch f.op {
-	case "<":
-		op = pb.StructuredQuery_FieldFilter_LESS_THAN
-	case "<=":
-		op = pb.StructuredQuery_FieldFilter_LESS_THAN_OR_EQUAL
-	case ">":
-		op = pb.StructuredQuery_FieldFilter_GREATER_THAN
-	case ">=":
-		op = pb.StructuredQuery_FieldFilter_GREATER_THAN_OR_EQUAL
-	case "==":
-		op = pb.StructuredQuery_FieldFilter_EQUAL
-	default:
-		return nil, fmt.Errorf("firestore: invalid operator %q", f.op)
-	}
-	val, sawTransform, err := toProtoValue(reflect.ValueOf(f.value))
-	if err != nil {
-		return nil, err
-	}
-	if sawTransform {
-		return nil, errors.New("firestore: ServerTimestamp disallowed in query value")
-	}
-	return &pb.StructuredQuery_Filter{
-		FilterType: &pb.StructuredQuery_Filter_FieldFilter{
-			&pb.StructuredQuery_FieldFilter{
-				Field: fref(f.fieldPath),
-				Op:    op,
-				Value: val,
-			},
-		},
-	}, nil
-}
-
-type order struct {
-	fieldPath FieldPath
-	dir       Direction
-}
-
-func (r order) toProto() (*pb.StructuredQuery_Order, error) {
-	if err := r.fieldPath.validate(); err != nil {
-		return nil, err
-	}
-	return &pb.StructuredQuery_Order{
-		Field:     fref(r.fieldPath),
-		Direction: pb.StructuredQuery_Direction(r.dir),
-	}, nil
-}
-
-func fref(fp FieldPath) *pb.StructuredQuery_FieldReference {
-	return &pb.StructuredQuery_FieldReference{fp.toServiceFieldPath()}
-}
-
-func trunc32(i int) int32 {
-	if i > math.MaxInt32 {
-		i = math.MaxInt32
-	}
-	return int32(i)
-}
-
-// Documents returns an iterator over the query's resulting documents.
-func (q Query) Documents(ctx context.Context) *DocumentIterator {
-	return &DocumentIterator{
-		ctx: withResourceHeader(ctx, q.c.path()),
-		q:   &q,
-		err: checkTransaction(ctx),
-	}
-}
-
-// DocumentIterator is an iterator over documents returned by a query.
-type DocumentIterator struct {
-	ctx          context.Context
-	q            *Query
-	tid          []byte // transaction ID, if any
-	streamClient pb.Firestore_RunQueryClient
-	err          error
-}
-
-// Next returns the next result. Its second return value is iterator.Done if there
-// are no more results. Once Next returns Done, all subsequent calls will return
-// Done.
-func (it *DocumentIterator) Next() (*DocumentSnapshot, error) {
-	if it.err != nil {
-		return nil, it.err
-	}
-	client := it.q.c
-	if it.streamClient == nil {
-		sq, err := it.q.toProto()
-		if err != nil {
-			it.err = err
-			return nil, err
-		}
-		req := &pb.RunQueryRequest{
-			Parent:    it.q.parentPath,
-			QueryType: &pb.RunQueryRequest_StructuredQuery{sq},
-		}
-		if it.tid != nil {
-			req.ConsistencySelector = &pb.RunQueryRequest_Transaction{it.tid}
-		}
-		it.streamClient, it.err = client.c.RunQuery(it.ctx, req)
-		if it.err != nil {
-			return nil, it.err
-		}
-	}
-	var res *pb.RunQueryResponse
-	var err error
-	for {
-		res, err = it.streamClient.Recv()
-		if err == io.EOF {
-			err = iterator.Done
-		}
-		if err != nil {
-			it.err = err
-			return nil, it.err
-		}
-		if res.Document != nil {
-			break
-		}
-		// No document => partial progress; keep receiving.
-	}
-	docRef, err := pathToDoc(res.Document.Name, client)
-	if err != nil {
-		it.err = err
-		return nil, err
-	}
-	doc, err := newDocumentSnapshot(docRef, res.Document, client)
-	if err != nil {
-		it.err = err
-		return nil, err
-	}
-	return doc, nil
-}
-
-// GetAll returns all the documents remaining from the iterator.
-func (it *DocumentIterator) GetAll() ([]*DocumentSnapshot, error) {
-	var docs []*DocumentSnapshot
-	for {
-		doc, err := it.Next()
-		if err == iterator.Done {
-			break
-		}
-		if err != nil {
-			return nil, err
-		}
-		docs = append(docs, doc)
-	}
-	return docs, nil
-}
-
-// TODO(jba): Does the iterator need a Stop or Close method? I don't think so--
-// I don't think the client can terminate a streaming receive except perhaps
-// by cancelling the context, and the user can do that themselves if they wish.
-// Find out for sure.

+ 0 - 11
vendor/cloud.google.com/go/firestore/testdata/Makefile

@@ -1,11 +0,0 @@
-# Copy textproto files in this directory from the source of truth.
-
-SRC=$(GOPATH)/src/github.com/GoogleCloudPlatform/google-cloud-common/testing/firestore/testdata
-
-.PHONY: refresh
-
-refresh:
-	-rm *.textproto
-	cp $(SRC)/*.textproto .
-	openssl dgst -sha1 $(SRC)/tests.binprotos > VERSION
-

+ 0 - 1
vendor/cloud.google.com/go/firestore/testdata/VERSION

@@ -1 +0,0 @@
-SHA1(/usr/local/google/home/jba/go/src/github.com/GoogleCloudPlatform/google-cloud-common/testing/firestore/testdata/tests.binprotos)= b0fbaaac8664945cb4f5667da092a6f9ededc57e

+ 0 - 27
vendor/cloud.google.com/go/firestore/testdata/create-basic.textproto

@@ -1,27 +0,0 @@
-# DO NOT MODIFY. This file was generated by
-# github.com/GoogleCloudPlatform/google-cloud-common/testing/firestore/cmd/generate-firestore-tests/generate-firestore-tests.go.
-
-# A simple call, resulting in a single update operation.
-
-description: "create: basic"
-create: <
-  doc_ref_path: "projects/projectID/databases/(default)/documents/C/d"
-  json_data: "{\"a\": 1}"
-  request: <
-    database: "projects/projectID/databases/(default)"
-    writes: <
-      update: <
-        name: "projects/projectID/databases/(default)/documents/C/d"
-        fields: <
-          key: "a"
-          value: <
-            integer_value: 1
-          >
-        >
-      >
-      current_document: <
-        exists: false
-      >
-    >
-  >
->

+ 0 - 61
vendor/cloud.google.com/go/firestore/testdata/create-complex.textproto

@@ -1,61 +0,0 @@
-# DO NOT MODIFY. This file was generated by
-# github.com/GoogleCloudPlatform/google-cloud-common/testing/firestore/cmd/generate-firestore-tests/generate-firestore-tests.go.
-
-# A call to a write method with complicated input data.
-
-description: "create: complex"
-create: <
-  doc_ref_path: "projects/projectID/databases/(default)/documents/C/d"
-  json_data: "{\"a\": [1, 2.5], \"b\": {\"c\": [\"three\", {\"d\": true}]}}"
-  request: <
-    database: "projects/projectID/databases/(default)"
-    writes: <
-      update: <
-        name: "projects/projectID/databases/(default)/documents/C/d"
-        fields: <
-          key: "a"
-          value: <
-            array_value: <
-              values: <
-                integer_value: 1
-              >
-              values: <
-                double_value: 2.5
-              >
-            >
-          >
-        >
-        fields: <
-          key: "b"
-          value: <
-            map_value: <
-              fields: <
-                key: "c"
-                value: <
-                  array_value: <
-                    values: <
-                      string_value: "three"
-                    >
-                    values: <
-                      map_value: <
-                        fields: <
-                          key: "d"
-                          value: <
-                            boolean_value: true
-                          >
-                        >
-                      >
-                    >
-                  >
-                >
-              >
-            >
-          >
-        >
-      >
-      current_document: <
-        exists: false
-      >
-    >
-  >
->

+ 0 - 13
vendor/cloud.google.com/go/firestore/testdata/create-del-noarray-nested.textproto

@@ -1,13 +0,0 @@
-# DO NOT MODIFY. This file was generated by
-# github.com/GoogleCloudPlatform/google-cloud-common/testing/firestore/cmd/generate-firestore-tests/generate-firestore-tests.go.
-
-# The Delete sentinel must be the value of a field. Deletes are implemented by
-# turning the path to the Delete sentinel into a FieldPath, and FieldPaths do not
-# support array indexing.
-
-description: "create: Delete cannot be anywhere inside an array value"
-create: <
-  doc_ref_path: "projects/projectID/databases/(default)/documents/C/d"
-  json_data: "{\"a\": [1, {\"b\": \"Delete\"}]}"
-  is_error: true
->

+ 0 - 13
vendor/cloud.google.com/go/firestore/testdata/create-del-noarray.textproto

@@ -1,13 +0,0 @@
-# DO NOT MODIFY. This file was generated by
-# github.com/GoogleCloudPlatform/google-cloud-common/testing/firestore/cmd/generate-firestore-tests/generate-firestore-tests.go.
-
-# The Delete sentinel must be the value of a field. Deletes are implemented by
-# turning the path to the Delete sentinel into a FieldPath, and FieldPaths do not
-# support array indexing.
-
-description: "create: Delete cannot be in an array value"
-create: <
-  doc_ref_path: "projects/projectID/databases/(default)/documents/C/d"
-  json_data: "{\"a\": [1, 2, \"Delete\"]}"
-  is_error: true
->

+ 0 - 20
vendor/cloud.google.com/go/firestore/testdata/create-empty.textproto

@@ -1,20 +0,0 @@
-# DO NOT MODIFY. This file was generated by
-# github.com/GoogleCloudPlatform/google-cloud-common/testing/firestore/cmd/generate-firestore-tests/generate-firestore-tests.go.
-
-
-description: "create: creating or setting an empty map"
-create: <
-  doc_ref_path: "projects/projectID/databases/(default)/documents/C/d"
-  json_data: "{}"
-  request: <
-    database: "projects/projectID/databases/(default)"
-    writes: <
-      update: <
-        name: "projects/projectID/databases/(default)/documents/C/d"
-      >
-      current_document: <
-        exists: false
-      >
-    >
-  >
->

+ 0 - 11
vendor/cloud.google.com/go/firestore/testdata/create-nodel.textproto

@@ -1,11 +0,0 @@
-# DO NOT MODIFY. This file was generated by
-# github.com/GoogleCloudPlatform/google-cloud-common/testing/firestore/cmd/generate-firestore-tests/generate-firestore-tests.go.
-
-# The Delete sentinel cannot be used in Create, or in Set without a Merge option.
-
-description: "create: Delete cannot appear in data"
-create: <
-  doc_ref_path: "projects/projectID/databases/(default)/documents/C/d"
-  json_data: "{\"a\": 1, \"b\": \"Delete\"}"
-  is_error: true
->

+ 0 - 40
vendor/cloud.google.com/go/firestore/testdata/create-nosplit.textproto

@@ -1,40 +0,0 @@
-# DO NOT MODIFY. This file was generated by
-# github.com/GoogleCloudPlatform/google-cloud-common/testing/firestore/cmd/generate-firestore-tests/generate-firestore-tests.go.
-
-# Create and Set treat their map keys literally. They do not split on dots.
-
-description: "create: don\342\200\231t split on dots"
-create: <
-  doc_ref_path: "projects/projectID/databases/(default)/documents/C/d"
-  json_data: "{ \"a.b\": { \"c.d\": 1 }, \"e\": 2 }"
-  request: <
-    database: "projects/projectID/databases/(default)"
-    writes: <
-      update: <
-        name: "projects/projectID/databases/(default)/documents/C/d"
-        fields: <
-          key: "a.b"
-          value: <
-            map_value: <
-              fields: <
-                key: "c.d"
-                value: <
-                  integer_value: 1
-                >
-              >
-            >
-          >
-        >
-        fields: <
-          key: "e"
-          value: <
-            integer_value: 2
-          >
-        >
-      >
-      current_document: <
-        exists: false
-      >
-    >
-  >
->

+ 0 - 41
vendor/cloud.google.com/go/firestore/testdata/create-special-chars.textproto

@@ -1,41 +0,0 @@
-# DO NOT MODIFY. This file was generated by
-# github.com/GoogleCloudPlatform/google-cloud-common/testing/firestore/cmd/generate-firestore-tests/generate-firestore-tests.go.
-
-# Create and Set treat their map keys literally. They do not escape special
-# characters.
-
-description: "create: non-alpha characters in map keys"
-create: <
-  doc_ref_path: "projects/projectID/databases/(default)/documents/C/d"
-  json_data: "{ \"*\": { \".\": 1 }, \"~\": 2 }"
-  request: <
-    database: "projects/projectID/databases/(default)"
-    writes: <
-      update: <
-        name: "projects/projectID/databases/(default)/documents/C/d"
-        fields: <
-          key: "*"
-          value: <
-            map_value: <
-              fields: <
-                key: "."
-                value: <
-                  integer_value: 1
-                >
-              >
-            >
-          >
-        >
-        fields: <
-          key: "~"
-          value: <
-            integer_value: 2
-          >
-        >
-      >
-      current_document: <
-        exists: false
-      >
-    >
-  >
->

+ 0 - 26
vendor/cloud.google.com/go/firestore/testdata/create-st-alone.textproto

@@ -1,26 +0,0 @@
-# DO NOT MODIFY. This file was generated by
-# github.com/GoogleCloudPlatform/google-cloud-common/testing/firestore/cmd/generate-firestore-tests/generate-firestore-tests.go.
-
-# If the only values in the input are ServerTimestamps, then no update operation
-# should be produced.
-
-description: "create: ServerTimestamp alone"
-create: <
-  doc_ref_path: "projects/projectID/databases/(default)/documents/C/d"
-  json_data: "{\"a\": \"ServerTimestamp\"}"
-  request: <
-    database: "projects/projectID/databases/(default)"
-    writes: <
-      transform: <
-        document: "projects/projectID/databases/(default)/documents/C/d"
-        field_transforms: <
-          field_path: "a"
-          set_to_server_value: REQUEST_TIME
-        >
-      >
-      current_document: <
-        exists: false
-      >
-    >
-  >
->

Niektóre pliki nie zostały wyświetlone z powodu dużej ilości zmienionych plików