Browse Source

Merge branch 'new_macaron'

Torkel Ödegaard 10 years ago
parent
commit
6da3af5e89
100 changed files with 9405 additions and 3839 deletions
  1. 41 17
      Godeps/Godeps.json
  2. 0 94
      Godeps/_workspace/src/github.com/Unknwon/macaron/README.md
  3. 0 370
      Godeps/_workspace/src/github.com/Unknwon/macaron/context_test.go
  4. 0 81
      Godeps/_workspace/src/github.com/Unknwon/macaron/gzip.go
  5. 0 65
      Godeps/_workspace/src/github.com/Unknwon/macaron/gzip_test.go
  6. 0 4
      Godeps/_workspace/src/github.com/Unknwon/macaron/inject/README.md
  7. 0 1
      Godeps/_workspace/src/github.com/Unknwon/macaron/inject/inject.goconvey
  8. 0 174
      Godeps/_workspace/src/github.com/Unknwon/macaron/inject/inject_test.go
  9. 0 67
      Godeps/_workspace/src/github.com/Unknwon/macaron/logger_test.go
  10. 0 218
      Godeps/_workspace/src/github.com/Unknwon/macaron/macaron_test.go
  11. 0 74
      Godeps/_workspace/src/github.com/Unknwon/macaron/recovery_test.go
  12. 0 581
      Godeps/_workspace/src/github.com/Unknwon/macaron/render_test.go
  13. 0 188
      Godeps/_workspace/src/github.com/Unknwon/macaron/response_writer_test.go
  14. 0 69
      Godeps/_workspace/src/github.com/Unknwon/macaron/return_handler_test.go
  15. 0 199
      Godeps/_workspace/src/github.com/Unknwon/macaron/router_test.go
  16. 0 246
      Godeps/_workspace/src/github.com/Unknwon/macaron/static_test.go
  17. 0 421
      Godeps/_workspace/src/github.com/Unknwon/macaron/tree.go
  18. 0 112
      Godeps/_workspace/src/github.com/Unknwon/macaron/tree_test.go
  19. 14 0
      Godeps/_workspace/src/github.com/go-macaron/binding/.travis.yml
  20. 0 0
      Godeps/_workspace/src/github.com/go-macaron/binding/LICENSE
  21. 20 0
      Godeps/_workspace/src/github.com/go-macaron/binding/README.md
  22. 181 136
      Godeps/_workspace/src/github.com/go-macaron/binding/binding.go
  23. 3 2
      Godeps/_workspace/src/github.com/go-macaron/binding/errors.go
  24. 14 0
      Godeps/_workspace/src/github.com/go-macaron/gzip/.travis.yml
  25. 0 0
      Godeps/_workspace/src/github.com/go-macaron/gzip/LICENSE
  26. 20 0
      Godeps/_workspace/src/github.com/go-macaron/gzip/README.md
  27. 118 0
      Godeps/_workspace/src/github.com/go-macaron/gzip/gzip.go
  28. 14 0
      Godeps/_workspace/src/github.com/go-macaron/inject/.travis.yml
  29. 191 0
      Godeps/_workspace/src/github.com/go-macaron/inject/LICENSE
  30. 11 0
      Godeps/_workspace/src/github.com/go-macaron/inject/README.md
  31. 15 0
      Godeps/_workspace/src/github.com/go-macaron/inject/inject.go
  32. 0 0
      Godeps/_workspace/src/github.com/go-macaron/session/.gitignore
  33. 14 0
      Godeps/_workspace/src/github.com/go-macaron/session/.travis.yml
  34. 0 0
      Godeps/_workspace/src/github.com/go-macaron/session/LICENSE
  35. 20 0
      Godeps/_workspace/src/github.com/go-macaron/session/README.md
  36. 2 2
      Godeps/_workspace/src/github.com/go-macaron/session/couchbase/couchbase.go
  37. 19 1
      Godeps/_workspace/src/github.com/go-macaron/session/file.go
  38. 2 2
      Godeps/_workspace/src/github.com/go-macaron/session/ledis/ledis.go
  39. 0 0
      Godeps/_workspace/src/github.com/go-macaron/session/ledis/ledis.goconvey
  40. 2 2
      Godeps/_workspace/src/github.com/go-macaron/session/memcache/memcache.go
  41. 0 0
      Godeps/_workspace/src/github.com/go-macaron/session/memcache/memcache.goconvey
  42. 217 212
      Godeps/_workspace/src/github.com/go-macaron/session/memory.go
  43. 2 2
      Godeps/_workspace/src/github.com/go-macaron/session/mysql/mysql.go
  44. 0 0
      Godeps/_workspace/src/github.com/go-macaron/session/mysql/mysql.goconvey
  45. 2 2
      Godeps/_workspace/src/github.com/go-macaron/session/nodb/nodb.go
  46. 0 0
      Godeps/_workspace/src/github.com/go-macaron/session/nodb/nodb.goconvey
  47. 2 2
      Godeps/_workspace/src/github.com/go-macaron/session/postgres/postgres.go
  48. 0 0
      Godeps/_workspace/src/github.com/go-macaron/session/postgres/postgres.goconvey
  49. 27 19
      Godeps/_workspace/src/github.com/go-macaron/session/redis/redis.go
  50. 0 0
      Godeps/_workspace/src/github.com/go-macaron/session/redis/redis.goconvey
  51. 3 5
      Godeps/_workspace/src/github.com/go-macaron/session/session.go
  52. 12 1
      Godeps/_workspace/src/github.com/go-macaron/session/utils.go
  53. 27 0
      Godeps/_workspace/src/github.com/klauspost/compress/LICENSE
  54. 32 0
      Godeps/_workspace/src/github.com/klauspost/compress/flate/copy.go
  55. 39 0
      Godeps/_workspace/src/github.com/klauspost/compress/flate/crc32_amd64.go
  56. 212 0
      Godeps/_workspace/src/github.com/klauspost/compress/flate/crc32_amd64.s
  57. 34 0
      Godeps/_workspace/src/github.com/klauspost/compress/flate/crc32_noasm.go
  58. 1293 0
      Godeps/_workspace/src/github.com/klauspost/compress/flate/deflate.go
  59. 78 0
      Godeps/_workspace/src/github.com/klauspost/compress/flate/fixedhuff.go
  60. 265 0
      Godeps/_workspace/src/github.com/klauspost/compress/flate/gen.go
  61. 690 0
      Godeps/_workspace/src/github.com/klauspost/compress/flate/huffman_bit_writer.go
  62. 363 0
      Godeps/_workspace/src/github.com/klauspost/compress/flate/huffman_code.go
  63. 846 0
      Godeps/_workspace/src/github.com/klauspost/compress/flate/inflate.go
  64. 48 0
      Godeps/_workspace/src/github.com/klauspost/compress/flate/reverse_bits.go
  65. 97 0
      Godeps/_workspace/src/github.com/klauspost/compress/flate/snappy.go
  66. 105 0
      Godeps/_workspace/src/github.com/klauspost/compress/flate/token.go
  67. 342 0
      Godeps/_workspace/src/github.com/klauspost/compress/gzip/gunzip.go
  68. 274 0
      Godeps/_workspace/src/github.com/klauspost/compress/gzip/gzip.go
  69. 24 0
      Godeps/_workspace/src/github.com/klauspost/cpuid/.gitignore
  70. 7 0
      Godeps/_workspace/src/github.com/klauspost/cpuid/.travis.yml
  71. 22 0
      Godeps/_workspace/src/github.com/klauspost/cpuid/LICENSE
  72. 145 0
      Godeps/_workspace/src/github.com/klauspost/cpuid/README.md
  73. 1022 0
      Godeps/_workspace/src/github.com/klauspost/cpuid/cpuid.go
  74. 40 0
      Godeps/_workspace/src/github.com/klauspost/cpuid/cpuid_386.s
  75. 40 0
      Godeps/_workspace/src/github.com/klauspost/cpuid/cpuid_amd64.s
  76. 17 0
      Godeps/_workspace/src/github.com/klauspost/cpuid/detect_intel.go
  77. 23 0
      Godeps/_workspace/src/github.com/klauspost/cpuid/detect_ref.go
  78. 3 0
      Godeps/_workspace/src/github.com/klauspost/cpuid/generate.go
  79. 476 0
      Godeps/_workspace/src/github.com/klauspost/cpuid/private-gen.go
  80. 6 0
      Godeps/_workspace/src/github.com/klauspost/cpuid/private/README.md
  81. 987 0
      Godeps/_workspace/src/github.com/klauspost/cpuid/private/cpuid.go
  82. 40 0
      Godeps/_workspace/src/github.com/klauspost/cpuid/private/cpuid_386.s
  83. 40 0
      Godeps/_workspace/src/github.com/klauspost/cpuid/private/cpuid_amd64.s
  84. 17 0
      Godeps/_workspace/src/github.com/klauspost/cpuid/private/cpuid_detect_intel.go
  85. 23 0
      Godeps/_workspace/src/github.com/klauspost/cpuid/private/cpuid_detect_ref.go
  86. 24 0
      Godeps/_workspace/src/github.com/klauspost/crc32/.gitignore
  87. 11 0
      Godeps/_workspace/src/github.com/klauspost/crc32/.travis.yml
  88. 28 0
      Godeps/_workspace/src/github.com/klauspost/crc32/LICENSE
  89. 84 0
      Godeps/_workspace/src/github.com/klauspost/crc32/README.md
  90. 182 0
      Godeps/_workspace/src/github.com/klauspost/crc32/crc32.go
  91. 62 0
      Godeps/_workspace/src/github.com/klauspost/crc32/crc32_amd64.go
  92. 237 0
      Godeps/_workspace/src/github.com/klauspost/crc32/crc32_amd64.s
  93. 39 0
      Godeps/_workspace/src/github.com/klauspost/crc32/crc32_amd64p32.go
  94. 67 0
      Godeps/_workspace/src/github.com/klauspost/crc32/crc32_amd64p32.s
  95. 28 0
      Godeps/_workspace/src/github.com/klauspost/crc32/crc32_generic.go
  96. 0 21
      Godeps/_workspace/src/github.com/macaron-contrib/binding/README.md
  97. 0 57
      Godeps/_workspace/src/github.com/macaron-contrib/binding/bind_test.go
  98. 0 115
      Godeps/_workspace/src/github.com/macaron-contrib/binding/common_test.go
  99. 0 162
      Godeps/_workspace/src/github.com/macaron-contrib/binding/errorhandler_test.go
  100. 0 115
      Godeps/_workspace/src/github.com/macaron-contrib/binding/errors_test.go

+ 41 - 17
Godeps/Godeps.json

@@ -14,10 +14,6 @@
 			"ImportPath": "github.com/Unknwon/com",
 			"Rev": "d9bcf409c8a368d06c9b347705c381e7c12d54df"
 		},
-		{
-			"ImportPath": "github.com/Unknwon/macaron",
-			"Rev": "93de4f3fad97bf246b838f828e2348f46f21f20a"
-		},
 		{
 			"ImportPath": "github.com/aws/aws-sdk-go/aws",
 			"Comment": "v1.0.0",
@@ -68,6 +64,11 @@
 			"Comment": "v1.0.0",
 			"Rev": "abb928e07c4108683d6b4d0b6ca08fe6bc0eee5f"
 		},
+		{
+			"ImportPath": "github.com/bradfitz/gomemcache/memcache",
+			"Comment": "release.r60-40-g72a6864",
+			"Rev": "72a68649ba712ee7c4b5b4a943a626bcd7d90eb8"
+		},
 		{
 			"ImportPath": "github.com/davecgh/go-spew/spew",
 			"Rev": "2df174808ee097f90d259e432cc04442cf60be21"
@@ -82,6 +83,22 @@
 			"Comment": "v1-19-g83e6542",
 			"Rev": "83e65426fd1c06626e88aa8a085e5bfed0208e29"
 		},
+		{
+			"ImportPath": "github.com/go-macaron/binding",
+			"Rev": "2502aaf4bce3a4e6451b4610847bfb8dffdb6266"
+		},
+		{
+			"ImportPath": "github.com/go-macaron/gzip",
+			"Rev": "4938e9be6b279d8426cb1c89a6bcf7af70b0c21d"
+		},
+		{
+			"ImportPath": "github.com/go-macaron/inject",
+			"Rev": "c5ab7bf3a307593cd44cb272d1a5beea473dd072"
+		},
+		{
+			"ImportPath": "github.com/go-macaron/session",
+			"Rev": "66031fcb37a0fff002a1f028eb0b3a815c78306b"
+		},
 		{
 			"ImportPath": "github.com/go-sql-driver/mysql",
 			"Comment": "v1.2-26-g9543750",
@@ -110,17 +127,25 @@
 			"Rev": "f1ac7f4f24f50328e6bc838ca4437d1612a0243c"
 		},
 		{
-			"ImportPath": "github.com/lib/pq",
-			"Comment": "go1.0-cutoff-13-g19eeca3",
-			"Rev": "19eeca3e30d2577b1761db471ec130810e67f532"
+			"ImportPath": "github.com/klauspost/compress/flate",
+			"Rev": "7b02889a2005228347aef0e76beeaee564d82f8c"
+		},
+		{
+			"ImportPath": "github.com/klauspost/compress/gzip",
+			"Rev": "7b02889a2005228347aef0e76beeaee564d82f8c"
+		},
+		{
+			"ImportPath": "github.com/klauspost/cpuid",
+			"Rev": "349c675778172472f5e8f3a3e0fe187e302e5a10"
 		},
 		{
-			"ImportPath": "github.com/macaron-contrib/binding",
-			"Rev": "0fbe4b9707e6eb556ef843e5471592f55ce0a5e7"
+			"ImportPath": "github.com/klauspost/crc32",
+			"Rev": "6834731faf32e62a2dd809d99fb24d1e4ae5a92d"
 		},
 		{
-			"ImportPath": "github.com/macaron-contrib/session",
-			"Rev": "31e841d95c7302b9ac456c830ea2d6dfcef4f84a"
+			"ImportPath": "github.com/lib/pq",
+			"Comment": "go1.0-cutoff-13-g19eeca3",
+			"Rev": "19eeca3e30d2577b1761db471ec130810e67f532"
 		},
 		{
 			"ImportPath": "github.com/mattn/go-sqlite3",
@@ -162,15 +187,14 @@
 			"Comment": "v0-16-g1772191",
 			"Rev": "177219109c97e7920c933e21c9b25f874357b237"
 		},
+		{
+			"ImportPath": "gopkg.in/macaron.v1",
+			"Rev": "1c6dd87797ae9319b4658cbd48d1d0420b279fd5"
+		},
 		{
 			"ImportPath": "gopkg.in/redis.v2",
 			"Comment": "v2.3.2",
 			"Rev": "e6179049628164864e6e84e973cfb56335748dea"
-		},
-    {
-      "ImportPath": "github.com/bradfitz/gomemcache/memcache",
-      "Comment": "release.r60-40-g72a6864",
-      "Rev": "72a68649ba712ee7c4b5b4a943a626bcd7d90eb8"
-    }
+		}
 	]
 }

+ 0 - 94
Godeps/_workspace/src/github.com/Unknwon/macaron/README.md

@@ -1,94 +0,0 @@
-Macaron [![Build Status](https://drone.io/github.com/Unknwon/macaron/status.png)](https://drone.io/github.com/Unknwon/macaron/latest) [![](http://gocover.io/_badge/github.com/Unknwon/macaron)](http://gocover.io/github.com/Unknwon/macaron)
-=======================
-
-![Macaron Logo](https://raw.githubusercontent.com/Unknwon/macaron/master/macaronlogo.png)
-
-Package macaron is a high productive and modular design web framework in Go.
-
-##### Current version: 0.5.4
-
-## Getting Started
-
-To install Macaron:
-
-	go get github.com/Unknwon/macaron
-
-The very basic usage of Macaron:
-
-```go
-package main
-
-import "github.com/Unknwon/macaron"
-
-func main() {
-	m := macaron.Classic()
-	m.Get("/", func() string {
-		return "Hello world!"
-	})
-	m.Run()
-}
-```
-
-## Features
-
-- Powerful routing with suburl.
-- Flexible routes combinations.
-- Unlimited nested group routers.
-- Directly integrate with existing services.
-- Dynamically change template files at runtime.
-- Allow to use in-memory template and static files.
-- Easy to plugin/unplugin features with modular design.
-- Handy dependency injection powered by [inject](https://github.com/codegangsta/inject).
-- Better router layer and less reflection make faster speed.
-
-## Middlewares
-
-Middlewares allow you easily plugin/unplugin features for your Macaron applications.
-
-There are already many [middlewares](https://github.com/macaron-contrib) to simplify your work:
-
-- gzip - Gzip compression to all requests
-- render - Go template engine
-- static - Serves static files
-- [binding](https://github.com/macaron-contrib/binding) - Request data binding and validation
-- [i18n](https://github.com/macaron-contrib/i18n) - Internationalization and Localization
-- [cache](https://github.com/macaron-contrib/cache) - Cache manager
-- [session](https://github.com/macaron-contrib/session) - Session manager
-- [csrf](https://github.com/macaron-contrib/csrf) - Generates and validates csrf tokens
-- [captcha](https://github.com/macaron-contrib/captcha) - Captcha service
-- [pongo2](https://github.com/macaron-contrib/pongo2) - Pongo2 template engine support
-- [sockets](https://github.com/macaron-contrib/sockets) - WebSockets channels binding
-- [bindata](https://github.com/macaron-contrib/bindata) - Embed binary data as static and template files
-- [toolbox](https://github.com/macaron-contrib/toolbox) - Health check, pprof, profile and statistic services
-- [oauth2](https://github.com/macaron-contrib/oauth2) - OAuth 2.0 backend
-- [switcher](https://github.com/macaron-contrib/switcher) - Multiple-site support
-- [method](https://github.com/macaron-contrib/method) - HTTP method override
-- [permissions2](https://github.com/xyproto/permissions2) - Cookies, users and permissions
-- [renders](https://github.com/macaron-contrib/renders) - Beego-like render engine(Macaron has built-in template engine, this is another option)
-
-## Use Cases
-
-- [Gogs](https://github.com/gogits/gogs): Go Git Service
-- [Gogs Web](https://github.com/gogits/gogsweb): Gogs official website
-- [Go Walker](https://gowalker.org): Go online API documentation
-- [Switch](https://github.com/gpmgo/switch): Gopm registry
-- [YouGam](http://yougam.com): Online Forum
-- [Car Girl](http://qcnl.gzsy.com/): Online campaign
-- [Critical Stack Intel](https://intel.criticalstack.com/): A 100% free intel marketplace from Critical Stack, Inc.
-
-## Getting Help
-
-- [API Reference](https://gowalker.org/github.com/Unknwon/macaron)
-- [Documentation](http://macaron.gogs.io)
-- [FAQs](http://macaron.gogs.io/docs/faqs)
-- [![Join the chat at https://gitter.im/Unknwon/macaron](https://badges.gitter.im/Join%20Chat.svg)](https://gitter.im/Unknwon/macaron?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge)
-
-## Credits
-
-- Basic design of [Martini](https://github.com/go-martini/martini).
-- Router layer of [beego](https://github.com/astaxie/beego).
-- Logo is modified by [@insionng](https://github.com/insionng) based on [Tribal Dragon](http://xtremeyamazaki.deviantart.com/art/Tribal-Dragon-27005087).
-
-## License
-
-This project is under Apache v2 License. See the [LICENSE](LICENSE) file for the full license text.

+ 0 - 370
Godeps/_workspace/src/github.com/Unknwon/macaron/context_test.go

@@ -1,370 +0,0 @@
-// Copyright 2014 Unknwon
-//
-// Licensed under the Apache License, Version 2.0 (the "License"): you may
-// not use this file except in compliance with the License. You may obtain
-// a copy of the License at
-//
-//     http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-// License for the specific language governing permissions and limitations
-// under the License.
-
-package macaron
-
-import (
-	"bytes"
-	"io/ioutil"
-	"net/http"
-	"net/http/httptest"
-	"net/url"
-	"strings"
-	"testing"
-	"time"
-
-	"github.com/Unknwon/com"
-
-	. "github.com/smartystreets/goconvey/convey"
-)
-
-func Test_Context(t *testing.T) {
-	Convey("Do advanced encapsulation operations", t, func() {
-		m := Classic()
-		m.Use(Renderers(RenderOptions{
-			Directory: "fixtures/basic",
-		}, "fixtures/basic2"))
-
-		Convey("Get request body", func() {
-			m.Get("/body1", func(ctx *Context) {
-				data, err := ioutil.ReadAll(ctx.Req.Body().ReadCloser())
-				So(err, ShouldBeNil)
-				So(string(data), ShouldEqual, "This is my request body")
-			})
-			m.Get("/body2", func(ctx *Context) {
-				data, err := ctx.Req.Body().Bytes()
-				So(err, ShouldBeNil)
-				So(string(data), ShouldEqual, "This is my request body")
-			})
-			m.Get("/body3", func(ctx *Context) {
-				data, err := ctx.Req.Body().String()
-				So(err, ShouldBeNil)
-				So(data, ShouldEqual, "This is my request body")
-			})
-
-			for i := 1; i <= 3; i++ {
-				resp := httptest.NewRecorder()
-				req, err := http.NewRequest("GET", "/body"+com.ToStr(i), nil)
-				req.Body = ioutil.NopCloser(bytes.NewBufferString("This is my request body"))
-				So(err, ShouldBeNil)
-				m.ServeHTTP(resp, req)
-			}
-		})
-
-		Convey("Get remote IP address", func() {
-			m.Get("/remoteaddr", func(ctx *Context) string {
-				return ctx.RemoteAddr()
-			})
-
-			resp := httptest.NewRecorder()
-			req, err := http.NewRequest("GET", "/remoteaddr", nil)
-			req.RemoteAddr = "127.0.0.1:3333"
-			So(err, ShouldBeNil)
-			m.ServeHTTP(resp, req)
-			So(resp.Body.String(), ShouldEqual, "127.0.0.1")
-		})
-
-		Convey("Render HTML", func() {
-
-			Convey("Normal HTML", func() {
-				m.Get("/html", func(ctx *Context) {
-					ctx.HTML(304, "hello", "Unknwon") // 304 for logger test.
-				})
-
-				resp := httptest.NewRecorder()
-				req, err := http.NewRequest("GET", "/html", nil)
-				So(err, ShouldBeNil)
-				m.ServeHTTP(resp, req)
-				So(resp.Body.String(), ShouldEqual, "<h1>Hello Unknwon</h1>")
-			})
-
-			Convey("HTML template set", func() {
-				m.Get("/html2", func(ctx *Context) {
-					ctx.Data["Name"] = "Unknwon"
-					ctx.HTMLSet(200, "basic2", "hello2")
-				})
-
-				resp := httptest.NewRecorder()
-				req, err := http.NewRequest("GET", "/html2", nil)
-				So(err, ShouldBeNil)
-				m.ServeHTTP(resp, req)
-				So(resp.Body.String(), ShouldEqual, "<h1>Hello Unknwon</h1>")
-			})
-
-			Convey("With layout", func() {
-				m.Get("/layout", func(ctx *Context) {
-					ctx.HTML(200, "hello", "Unknwon", HTMLOptions{"layout"})
-				})
-
-				resp := httptest.NewRecorder()
-				req, err := http.NewRequest("GET", "/layout", nil)
-				So(err, ShouldBeNil)
-				m.ServeHTTP(resp, req)
-				So(resp.Body.String(), ShouldEqual, "head<h1>Hello Unknwon</h1>foot")
-			})
-		})
-
-		Convey("Parse from and query", func() {
-			m.Get("/query", func(ctx *Context) string {
-				var buf bytes.Buffer
-				buf.WriteString(ctx.QueryTrim("name") + " ")
-				buf.WriteString(ctx.QueryEscape("name") + " ")
-				buf.WriteString(com.ToStr(ctx.QueryInt("int")) + " ")
-				buf.WriteString(com.ToStr(ctx.QueryInt64("int64")) + " ")
-				buf.WriteString(com.ToStr(ctx.QueryFloat64("float64")) + " ")
-				return buf.String()
-			})
-			m.Get("/query2", func(ctx *Context) string {
-				var buf bytes.Buffer
-				buf.WriteString(strings.Join(ctx.QueryStrings("list"), ",") + " ")
-				buf.WriteString(strings.Join(ctx.QueryStrings("404"), ",") + " ")
-				return buf.String()
-			})
-
-			resp := httptest.NewRecorder()
-			req, err := http.NewRequest("GET", "/query?name=Unknwon&int=12&int64=123&float64=1.25", nil)
-			So(err, ShouldBeNil)
-			m.ServeHTTP(resp, req)
-			So(resp.Body.String(), ShouldEqual, "Unknwon Unknwon 12 123 1.25 ")
-
-			resp = httptest.NewRecorder()
-			req, err = http.NewRequest("GET", "/query2?list=item1&list=item2", nil)
-			So(err, ShouldBeNil)
-			m.ServeHTTP(resp, req)
-			So(resp.Body.String(), ShouldEqual, "item1,item2  ")
-		})
-
-		Convey("URL parameter", func() {
-			m.Get("/:name/:int/:int64/:float64", func(ctx *Context) string {
-				var buf bytes.Buffer
-				ctx.SetParams("name", ctx.Params("name"))
-				buf.WriteString(ctx.Params(""))
-				buf.WriteString(ctx.Params(":name") + " ")
-				buf.WriteString(ctx.ParamsEscape(":name") + " ")
-				buf.WriteString(com.ToStr(ctx.ParamsInt(":int")) + " ")
-				buf.WriteString(com.ToStr(ctx.ParamsInt64(":int64")) + " ")
-				buf.WriteString(com.ToStr(ctx.ParamsFloat64(":float64")) + " ")
-				return buf.String()
-			})
-
-			resp := httptest.NewRecorder()
-			req, err := http.NewRequest("GET", "/user/1/13/1.24", nil)
-			So(err, ShouldBeNil)
-			m.ServeHTTP(resp, req)
-			So(resp.Body.String(), ShouldEqual, "user user 1 13 1.24 ")
-		})
-
-		Convey("Get file", func() {
-			m.Get("/getfile", func(ctx *Context) {
-				ctx.GetFile("hi")
-			})
-
-			resp := httptest.NewRecorder()
-			req, err := http.NewRequest("GET", "/getfile", nil)
-			So(err, ShouldBeNil)
-			m.ServeHTTP(resp, req)
-		})
-
-		Convey("Set and get cookie", func() {
-			m.Get("/set", func(ctx *Context) {
-				ctx.SetCookie("user", "Unknwon", 1, "/", "localhost", true, true)
-				ctx.SetCookie("user", "Unknwon", int32(1), "/", "localhost", 1)
-				ctx.SetCookie("user", "Unknwon", int64(1))
-			})
-
-			resp := httptest.NewRecorder()
-			req, err := http.NewRequest("GET", "/set", nil)
-			So(err, ShouldBeNil)
-			m.ServeHTTP(resp, req)
-			So(resp.Header().Get("Set-Cookie"), ShouldEqual, "user=Unknwon; Path=/; Domain=localhost; Max-Age=1; HttpOnly; Secure")
-
-			m.Get("/get", func(ctx *Context) string {
-				ctx.GetCookie("404")
-				So(ctx.GetCookieInt("uid"), ShouldEqual, 1)
-				So(ctx.GetCookieInt64("uid"), ShouldEqual, 1)
-				So(ctx.GetCookieFloat64("balance"), ShouldEqual, 1.25)
-				return ctx.GetCookie("user")
-			})
-
-			resp = httptest.NewRecorder()
-			req, err = http.NewRequest("GET", "/get", nil)
-			So(err, ShouldBeNil)
-			req.Header.Set("Cookie", "user=Unknwon; uid=1; balance=1.25")
-			m.ServeHTTP(resp, req)
-			So(resp.Body.String(), ShouldEqual, "Unknwon")
-		})
-
-		Convey("Set and get secure cookie", func() {
-			m.SetDefaultCookieSecret("macaron")
-			m.Get("/set", func(ctx *Context) {
-				ctx.SetSecureCookie("user", "Unknwon", 1)
-			})
-
-			resp := httptest.NewRecorder()
-			req, err := http.NewRequest("GET", "/set", nil)
-			So(err, ShouldBeNil)
-			m.ServeHTTP(resp, req)
-
-			cookie := resp.Header().Get("Set-Cookie")
-
-			m.Get("/get", func(ctx *Context) string {
-				name, ok := ctx.GetSecureCookie("user")
-				So(ok, ShouldBeTrue)
-				return name
-			})
-
-			resp = httptest.NewRecorder()
-			req, err = http.NewRequest("GET", "/get", nil)
-			So(err, ShouldBeNil)
-			req.Header.Set("Cookie", cookie)
-			m.ServeHTTP(resp, req)
-			So(resp.Body.String(), ShouldEqual, "Unknwon")
-		})
-
-		Convey("Serve files", func() {
-			m.Get("/file", func(ctx *Context) {
-				ctx.ServeFile("fixtures/custom_funcs/index.tmpl")
-			})
-
-			resp := httptest.NewRecorder()
-			req, err := http.NewRequest("GET", "/file", nil)
-			So(err, ShouldBeNil)
-			m.ServeHTTP(resp, req)
-			So(resp.Body.String(), ShouldEqual, "{{ myCustomFunc }}")
-
-			m.Get("/file2", func(ctx *Context) {
-				ctx.ServeFile("fixtures/custom_funcs/index.tmpl", "ok.tmpl")
-			})
-
-			resp = httptest.NewRecorder()
-			req, err = http.NewRequest("GET", "/file2", nil)
-			So(err, ShouldBeNil)
-			m.ServeHTTP(resp, req)
-			So(resp.Body.String(), ShouldEqual, "{{ myCustomFunc }}")
-		})
-
-		Convey("Serve file content", func() {
-			m.Get("/file", func(ctx *Context) {
-				ctx.ServeFileContent("fixtures/custom_funcs/index.tmpl")
-			})
-
-			resp := httptest.NewRecorder()
-			req, err := http.NewRequest("GET", "/file", nil)
-			So(err, ShouldBeNil)
-			m.ServeHTTP(resp, req)
-			So(resp.Body.String(), ShouldEqual, "{{ myCustomFunc }}")
-
-			m.Get("/file2", func(ctx *Context) {
-				ctx.ServeFileContent("fixtures/custom_funcs/index.tmpl", "ok.tmpl")
-			})
-
-			resp = httptest.NewRecorder()
-			req, err = http.NewRequest("GET", "/file2", nil)
-			So(err, ShouldBeNil)
-			m.ServeHTTP(resp, req)
-			So(resp.Body.String(), ShouldEqual, "{{ myCustomFunc }}")
-
-			m.Get("/file3", func(ctx *Context) {
-				ctx.ServeFileContent("404.tmpl")
-			})
-
-			resp = httptest.NewRecorder()
-			req, err = http.NewRequest("GET", "/file3", nil)
-			So(err, ShouldBeNil)
-			m.ServeHTTP(resp, req)
-			So(resp.Body.String(), ShouldEqual, "open 404.tmpl: no such file or directory\n")
-			So(resp.Code, ShouldEqual, 500)
-		})
-
-		Convey("Serve content", func() {
-			m.Get("/content", func(ctx *Context) {
-				ctx.ServeContent("content1", bytes.NewReader([]byte("Hello world!")))
-			})
-
-			resp := httptest.NewRecorder()
-			req, err := http.NewRequest("GET", "/content", nil)
-			So(err, ShouldBeNil)
-			m.ServeHTTP(resp, req)
-			So(resp.Body.String(), ShouldEqual, "Hello world!")
-
-			m.Get("/content2", func(ctx *Context) {
-				ctx.ServeContent("content1", bytes.NewReader([]byte("Hello world!")), time.Now())
-			})
-
-			resp = httptest.NewRecorder()
-			req, err = http.NewRequest("GET", "/content2", nil)
-			So(err, ShouldBeNil)
-			m.ServeHTTP(resp, req)
-			So(resp.Body.String(), ShouldEqual, "Hello world!")
-		})
-	})
-}
-
-func Test_Context_Render(t *testing.T) {
-	Convey("Invalid render", t, func() {
-		defer func() {
-			So(recover(), ShouldNotBeNil)
-		}()
-
-		m := New()
-		m.Get("/", func(ctx *Context) {
-			ctx.HTML(200, "hey")
-		})
-
-		resp := httptest.NewRecorder()
-		req, err := http.NewRequest("GET", "/", nil)
-		So(err, ShouldBeNil)
-		m.ServeHTTP(resp, req)
-	})
-}
-
-func Test_Context_Redirect(t *testing.T) {
-	Convey("Context with default redirect", t, func() {
-		url, err := url.Parse("http://localhost/path/one")
-		So(err, ShouldBeNil)
-		resp := httptest.NewRecorder()
-		req := http.Request{
-			Method: "GET",
-			URL:    url,
-		}
-		ctx := &Context{
-			Req:  Request{&req},
-			Resp: NewResponseWriter(resp),
-			Data: make(map[string]interface{}),
-		}
-		ctx.Redirect("two")
-
-		So(resp.Code, ShouldEqual, http.StatusFound)
-		So(resp.HeaderMap["Location"][0], ShouldEqual, "/path/two")
-	})
-
-	Convey("Context with custom redirect", t, func() {
-		url, err := url.Parse("http://localhost/path/one")
-		So(err, ShouldBeNil)
-		resp := httptest.NewRecorder()
-		req := http.Request{
-			Method: "GET",
-			URL:    url,
-		}
-		ctx := &Context{
-			Req:  Request{&req},
-			Resp: NewResponseWriter(resp),
-			Data: make(map[string]interface{}),
-		}
-		ctx.Redirect("two", 307)
-
-		So(resp.Code, ShouldEqual, http.StatusTemporaryRedirect)
-		So(resp.HeaderMap["Location"][0], ShouldEqual, "/path/two")
-	})
-}

+ 0 - 81
Godeps/_workspace/src/github.com/Unknwon/macaron/gzip.go

@@ -1,81 +0,0 @@
-// Copyright 2013 Martini Authors
-// Copyright 2014 Unknwon
-//
-// Licensed under the Apache License, Version 2.0 (the "License"): you may
-// not use this file except in compliance with the License. You may obtain
-// a copy of the License at
-//
-//     http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-// License for the specific language governing permissions and limitations
-// under the License.
-
-package macaron
-
-import (
-	"bufio"
-	"compress/gzip"
-	"fmt"
-	"net"
-	"net/http"
-	"strings"
-)
-
-const (
-	HeaderAcceptEncoding  = "Accept-Encoding"
-	HeaderContentEncoding = "Content-Encoding"
-	HeaderContentLength   = "Content-Length"
-	HeaderContentType     = "Content-Type"
-	HeaderVary            = "Vary"
-)
-
-// Gziper returns a Handler that adds gzip compression to all requests.
-// Make sure to include the Gzip middleware above other middleware
-// that alter the response body (like the render middleware).
-func Gziper() Handler {
-	return func(ctx *Context) {
-		if !strings.Contains(ctx.Req.Header.Get(HeaderAcceptEncoding), "gzip") {
-			return
-		}
-
-		headers := ctx.Resp.Header()
-		headers.Set(HeaderContentEncoding, "gzip")
-		headers.Set(HeaderVary, HeaderAcceptEncoding)
-
-		gz := gzip.NewWriter(ctx.Resp)
-		defer gz.Close()
-
-		gzw := gzipResponseWriter{gz, ctx.Resp}
-		ctx.Resp = gzw
-		ctx.MapTo(gzw, (*http.ResponseWriter)(nil))
-
-		ctx.Next()
-
-		// delete content length after we know we have been written to
-		gzw.Header().Del("Content-Length")
-	}
-}
-
-type gzipResponseWriter struct {
-	w *gzip.Writer
-	ResponseWriter
-}
-
-func (grw gzipResponseWriter) Write(p []byte) (int, error) {
-	if len(grw.Header().Get(HeaderContentType)) == 0 {
-		grw.Header().Set(HeaderContentType, http.DetectContentType(p))
-	}
-
-	return grw.w.Write(p)
-}
-
-func (grw gzipResponseWriter) Hijack() (net.Conn, *bufio.ReadWriter, error) {
-	hijacker, ok := grw.ResponseWriter.(http.Hijacker)
-	if !ok {
-		return nil, nil, fmt.Errorf("the ResponseWriter doesn't support the Hijacker interface")
-	}
-	return hijacker.Hijack()
-}

+ 0 - 65
Godeps/_workspace/src/github.com/Unknwon/macaron/gzip_test.go

@@ -1,65 +0,0 @@
-// Copyright 2013 Martini Authors
-// Copyright 2014 Unknwon
-//
-// Licensed under the Apache License, Version 2.0 (the "License"): you may
-// not use this file except in compliance with the License. You may obtain
-// a copy of the License at
-//
-//     http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-// License for the specific language governing permissions and limitations
-// under the License.
-
-package macaron
-
-import (
-	"net/http"
-	"net/http/httptest"
-	"strings"
-	"testing"
-
-	. "github.com/smartystreets/goconvey/convey"
-)
-
-func Test_Gzip(t *testing.T) {
-	Convey("Gzip response content", t, func() {
-		before := false
-
-		m := New()
-		m.Use(Gziper())
-		m.Use(func(r http.ResponseWriter) {
-			r.(ResponseWriter).Before(func(rw ResponseWriter) {
-				before = true
-			})
-		})
-		m.Get("/", func() string { return "hello wolrd!" })
-
-		// Not yet gzip.
-		resp := httptest.NewRecorder()
-		req, err := http.NewRequest("GET", "/", nil)
-		So(err, ShouldBeNil)
-		m.ServeHTTP(resp, req)
-
-		_, ok := resp.HeaderMap[HeaderContentEncoding]
-		So(ok, ShouldBeFalse)
-
-		ce := resp.Header().Get(HeaderContentEncoding)
-		So(strings.EqualFold(ce, "gzip"), ShouldBeFalse)
-
-		// Gzip now.
-		resp = httptest.NewRecorder()
-		req.Header.Set(HeaderAcceptEncoding, "gzip")
-		m.ServeHTTP(resp, req)
-
-		_, ok = resp.HeaderMap[HeaderContentEncoding]
-		So(ok, ShouldBeTrue)
-
-		ce = resp.Header().Get(HeaderContentEncoding)
-		So(strings.EqualFold(ce, "gzip"), ShouldBeTrue)
-
-		So(before, ShouldBeTrue)
-	})
-}

+ 0 - 4
Godeps/_workspace/src/github.com/Unknwon/macaron/inject/README.md

@@ -1,4 +0,0 @@
-inject
-======
-
-Dependency injection for go

+ 0 - 1
Godeps/_workspace/src/github.com/Unknwon/macaron/inject/inject.goconvey

@@ -1 +0,0 @@
-ignore

+ 0 - 174
Godeps/_workspace/src/github.com/Unknwon/macaron/inject/inject_test.go

@@ -1,174 +0,0 @@
-// Copyright 2013 Martini Authors
-// Copyright 2014 Unknwon
-//
-// Licensed under the Apache License, Version 2.0 (the "License"): you may
-// not use this file except in compliance with the License. You may obtain
-// a copy of the License at
-//
-//     http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-// License for the specific language governing permissions and limitations
-// under the License.
-
-package inject_test
-
-import (
-	"fmt"
-	"reflect"
-	"testing"
-
-	"github.com/Unknwon/macaron/inject"
-)
-
-type SpecialString interface {
-}
-
-type TestStruct struct {
-	Dep1 string        `inject:"t" json:"-"`
-	Dep2 SpecialString `inject`
-	Dep3 string
-}
-
-type Greeter struct {
-	Name string
-}
-
-func (g *Greeter) String() string {
-	return "Hello, My name is" + g.Name
-}
-
-/* Test Helpers */
-func expect(t *testing.T, a interface{}, b interface{}) {
-	if a != b {
-		t.Errorf("Expected %v (type %v) - Got %v (type %v)", b, reflect.TypeOf(b), a, reflect.TypeOf(a))
-	}
-}
-
-func refute(t *testing.T, a interface{}, b interface{}) {
-	if a == b {
-		t.Errorf("Did not expect %v (type %v) - Got %v (type %v)", b, reflect.TypeOf(b), a, reflect.TypeOf(a))
-	}
-}
-
-func Test_InjectorInvoke(t *testing.T) {
-	injector := inject.New()
-	expect(t, injector == nil, false)
-
-	dep := "some dependency"
-	injector.Map(dep)
-	dep2 := "another dep"
-	injector.MapTo(dep2, (*SpecialString)(nil))
-	dep3 := make(chan *SpecialString)
-	dep4 := make(chan *SpecialString)
-	typRecv := reflect.ChanOf(reflect.RecvDir, reflect.TypeOf(dep3).Elem())
-	typSend := reflect.ChanOf(reflect.SendDir, reflect.TypeOf(dep4).Elem())
-	injector.Set(typRecv, reflect.ValueOf(dep3))
-	injector.Set(typSend, reflect.ValueOf(dep4))
-
-	_, err := injector.Invoke(func(d1 string, d2 SpecialString, d3 <-chan *SpecialString, d4 chan<- *SpecialString) {
-		expect(t, d1, dep)
-		expect(t, d2, dep2)
-		expect(t, reflect.TypeOf(d3).Elem(), reflect.TypeOf(dep3).Elem())
-		expect(t, reflect.TypeOf(d4).Elem(), reflect.TypeOf(dep4).Elem())
-		expect(t, reflect.TypeOf(d3).ChanDir(), reflect.RecvDir)
-		expect(t, reflect.TypeOf(d4).ChanDir(), reflect.SendDir)
-	})
-
-	expect(t, err, nil)
-}
-
-func Test_InjectorInvokeReturnValues(t *testing.T) {
-	injector := inject.New()
-	expect(t, injector == nil, false)
-
-	dep := "some dependency"
-	injector.Map(dep)
-	dep2 := "another dep"
-	injector.MapTo(dep2, (*SpecialString)(nil))
-
-	result, err := injector.Invoke(func(d1 string, d2 SpecialString) string {
-		expect(t, d1, dep)
-		expect(t, d2, dep2)
-		return "Hello world"
-	})
-
-	expect(t, result[0].String(), "Hello world")
-	expect(t, err, nil)
-}
-
-func Test_InjectorApply(t *testing.T) {
-	injector := inject.New()
-
-	injector.Map("a dep").MapTo("another dep", (*SpecialString)(nil))
-
-	s := TestStruct{}
-	err := injector.Apply(&s)
-	expect(t, err, nil)
-
-	expect(t, s.Dep1, "a dep")
-	expect(t, s.Dep2, "another dep")
-}
-
-func Test_InterfaceOf(t *testing.T) {
-	iType := inject.InterfaceOf((*SpecialString)(nil))
-	expect(t, iType.Kind(), reflect.Interface)
-
-	iType = inject.InterfaceOf((**SpecialString)(nil))
-	expect(t, iType.Kind(), reflect.Interface)
-
-	// Expecting nil
-	defer func() {
-		rec := recover()
-		refute(t, rec, nil)
-	}()
-	iType = inject.InterfaceOf((*testing.T)(nil))
-}
-
-func Test_InjectorSet(t *testing.T) {
-	injector := inject.New()
-	typ := reflect.TypeOf("string")
-	typSend := reflect.ChanOf(reflect.SendDir, typ)
-	typRecv := reflect.ChanOf(reflect.RecvDir, typ)
-
-	// instantiating unidirectional channels is not possible using reflect
-	// http://golang.org/src/pkg/reflect/value.go?s=60463:60504#L2064
-	chanRecv := reflect.MakeChan(reflect.ChanOf(reflect.BothDir, typ), 0)
-	chanSend := reflect.MakeChan(reflect.ChanOf(reflect.BothDir, typ), 0)
-
-	injector.Set(typSend, chanSend)
-	injector.Set(typRecv, chanRecv)
-
-	expect(t, injector.GetVal(typSend).IsValid(), true)
-	expect(t, injector.GetVal(typRecv).IsValid(), true)
-	expect(t, injector.GetVal(chanSend.Type()).IsValid(), false)
-}
-
-func Test_InjectorGet(t *testing.T) {
-	injector := inject.New()
-
-	injector.Map("some dependency")
-
-	expect(t, injector.GetVal(reflect.TypeOf("string")).IsValid(), true)
-	expect(t, injector.GetVal(reflect.TypeOf(11)).IsValid(), false)
-}
-
-func Test_InjectorSetParent(t *testing.T) {
-	injector := inject.New()
-	injector.MapTo("another dep", (*SpecialString)(nil))
-
-	injector2 := inject.New()
-	injector2.SetParent(injector)
-
-	expect(t, injector2.GetVal(inject.InterfaceOf((*SpecialString)(nil))).IsValid(), true)
-}
-
-func TestInjectImplementors(t *testing.T) {
-	injector := inject.New()
-	g := &Greeter{"Jeremy"}
-	injector.Map(g)
-
-	expect(t, injector.GetVal(inject.InterfaceOf((*fmt.Stringer)(nil))).IsValid(), true)
-}

+ 0 - 67
Godeps/_workspace/src/github.com/Unknwon/macaron/logger_test.go

@@ -1,67 +0,0 @@
-// Copyright 2013 Martini Authors
-// Copyright 2014 Unknwon
-//
-// Licensed under the Apache License, Version 2.0 (the "License"): you may
-// not use this file except in compliance with the License. You may obtain
-// a copy of the License at
-//
-//     http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-// License for the specific language governing permissions and limitations
-// under the License.
-
-package macaron
-
-import (
-	"bytes"
-	"log"
-	"net/http"
-	"net/http/httptest"
-	"testing"
-
-	"github.com/Unknwon/com"
-
-	. "github.com/smartystreets/goconvey/convey"
-)
-
-func Test_Logger(t *testing.T) {
-	Convey("Global logger", t, func() {
-		buf := bytes.NewBufferString("")
-		m := New()
-		m.Map(log.New(buf, "[Macaron] ", 0))
-		m.Use(Logger())
-		m.Use(func(res http.ResponseWriter) {
-			res.WriteHeader(http.StatusNotFound)
-		})
-		m.Get("/", func() {})
-
-		resp := httptest.NewRecorder()
-		req, err := http.NewRequest("GET", "http://localhost:4000/", nil)
-		So(err, ShouldBeNil)
-		m.ServeHTTP(resp, req)
-		So(resp.Code, ShouldEqual, http.StatusNotFound)
-		So(len(buf.String()), ShouldBeGreaterThan, 0)
-	})
-
-	if ColorLog {
-		Convey("Color console output", t, func() {
-			m := Classic()
-			m.Get("/:code:int", func(ctx *Context) (int, string) {
-				return ctx.ParamsInt(":code"), ""
-			})
-
-			// Just for testing if logger would capture.
-			codes := []int{200, 201, 202, 301, 302, 304, 401, 403, 404, 500}
-			for _, code := range codes {
-				resp := httptest.NewRecorder()
-				req, err := http.NewRequest("GET", "http://localhost:4000/"+com.ToStr(code), nil)
-				So(err, ShouldBeNil)
-				m.ServeHTTP(resp, req)
-				So(resp.Code, ShouldEqual, code)
-			}
-		})
-	}
-}

+ 0 - 218
Godeps/_workspace/src/github.com/Unknwon/macaron/macaron_test.go

@@ -1,218 +0,0 @@
-// Copyright 2013 Martini Authors
-// Copyright 2014 Unknwon
-//
-// Licensed under the Apache License, Version 2.0 (the "License"): you may
-// not use this file except in compliance with the License. You may obtain
-// a copy of the License at
-//
-//     http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-// License for the specific language governing permissions and limitations
-// under the License.
-
-package macaron
-
-import (
-	"net/http"
-	"net/http/httptest"
-	"os"
-	"testing"
-	"time"
-
-	. "github.com/smartystreets/goconvey/convey"
-)
-
-func Test_Version(t *testing.T) {
-	Convey("Get version", t, func() {
-		So(Version(), ShouldEqual, _VERSION)
-	})
-}
-
-func Test_New(t *testing.T) {
-	Convey("Initialize a new instance", t, func() {
-		So(New(), ShouldNotBeNil)
-	})
-
-	Convey("Just test that Run doesn't bomb", t, func() {
-		go New().Run()
-		time.Sleep(1 * time.Second)
-		os.Setenv("PORT", "4001")
-		go New().Run("0.0.0.0")
-		go New().Run(4002)
-		go New().Run("0.0.0.0", 4003)
-	})
-}
-
-func Test_Macaron_Before(t *testing.T) {
-	Convey("Register before handlers", t, func() {
-		m := New()
-		m.Before(func(rw http.ResponseWriter, req *http.Request) bool {
-			return false
-		})
-		m.Before(func(rw http.ResponseWriter, req *http.Request) bool {
-			return true
-		})
-		resp := httptest.NewRecorder()
-		req, err := http.NewRequest("GET", "/", nil)
-		So(err, ShouldBeNil)
-		m.ServeHTTP(resp, req)
-	})
-}
-
-func Test_Macaron_ServeHTTP(t *testing.T) {
-	Convey("Serve HTTP requests", t, func() {
-		result := ""
-		m := New()
-		m.Use(func(c *Context) {
-			result += "foo"
-			c.Next()
-			result += "ban"
-		})
-		m.Use(func(c *Context) {
-			result += "bar"
-			c.Next()
-			result += "baz"
-		})
-		m.Get("/", func() {})
-		m.Action(func(res http.ResponseWriter, req *http.Request) {
-			result += "bat"
-			res.WriteHeader(http.StatusBadRequest)
-		})
-
-		resp := httptest.NewRecorder()
-		req, err := http.NewRequest("GET", "/", nil)
-		So(err, ShouldBeNil)
-		m.ServeHTTP(resp, req)
-		So(result, ShouldEqual, "foobarbatbazban")
-		So(resp.Code, ShouldEqual, http.StatusBadRequest)
-	})
-}
-
-func Test_Macaron_Handlers(t *testing.T) {
-	Convey("Add custom handlers", t, func() {
-		result := ""
-		batman := func(c *Context) {
-			result += "batman!"
-		}
-
-		m := New()
-		m.Use(func(c *Context) {
-			result += "foo"
-			c.Next()
-			result += "ban"
-		})
-		m.Handlers(
-			batman,
-			batman,
-			batman,
-		)
-
-		Convey("Add not callable function", func() {
-			defer func() {
-				So(recover(), ShouldNotBeNil)
-			}()
-			m.Use("shit")
-		})
-
-		m.Get("/", func() {})
-		m.Action(func(res http.ResponseWriter, req *http.Request) {
-			result += "bat"
-			res.WriteHeader(http.StatusBadRequest)
-		})
-
-		resp := httptest.NewRecorder()
-		req, err := http.NewRequest("GET", "/", nil)
-		So(err, ShouldBeNil)
-		m.ServeHTTP(resp, req)
-		So(result, ShouldEqual, "batman!batman!batman!bat")
-		So(resp.Code, ShouldEqual, http.StatusBadRequest)
-	})
-}
-
-func Test_Macaron_EarlyWrite(t *testing.T) {
-	Convey("Write early content to response", t, func() {
-		result := ""
-		m := New()
-		m.Use(func(res http.ResponseWriter) {
-			result += "foobar"
-			res.Write([]byte("Hello world"))
-		})
-		m.Use(func() {
-			result += "bat"
-		})
-		m.Get("/", func() {})
-		m.Action(func(res http.ResponseWriter) {
-			result += "baz"
-			res.WriteHeader(http.StatusBadRequest)
-		})
-
-		resp := httptest.NewRecorder()
-		req, err := http.NewRequest("GET", "/", nil)
-		So(err, ShouldBeNil)
-		m.ServeHTTP(resp, req)
-		So(result, ShouldEqual, "foobar")
-		So(resp.Code, ShouldEqual, http.StatusOK)
-	})
-}
-
-func Test_Macaron_Written(t *testing.T) {
-	Convey("Written sign", t, func() {
-		resp := httptest.NewRecorder()
-		m := New()
-		m.Handlers(func(res http.ResponseWriter) {
-			res.WriteHeader(http.StatusOK)
-		})
-
-		ctx := m.createContext(resp, &http.Request{Method: "GET"})
-		So(ctx.Written(), ShouldBeFalse)
-
-		ctx.run()
-		So(ctx.Written(), ShouldBeTrue)
-	})
-}
-
-func Test_Macaron_Basic_NoRace(t *testing.T) {
-	Convey("Make sure no race between requests", t, func() {
-		m := New()
-		handlers := []Handler{func() {}, func() {}}
-		// Ensure append will not realloc to trigger the race condition
-		m.handlers = handlers[:1]
-		m.Get("/", func() {})
-		req, _ := http.NewRequest("GET", "/", nil)
-		for i := 0; i < 2; i++ {
-			go func() {
-				resp := httptest.NewRecorder()
-				m.ServeHTTP(resp, req)
-			}()
-		}
-	})
-}
-
-func Test_SetENV(t *testing.T) {
-	Convey("Get and save environment variable", t, func() {
-		tests := []struct {
-			in  string
-			out string
-		}{
-			{"", "development"},
-			{"not_development", "not_development"},
-		}
-
-		for _, test := range tests {
-			setENV(test.in)
-			So(Env, ShouldEqual, test.out)
-		}
-	})
-}
-
-func Test_Config(t *testing.T) {
-	Convey("Set and get configuration object", t, func() {
-		So(Config(), ShouldNotBeNil)
-		cfg, err := SetConfig([]byte(""))
-		So(err, ShouldBeNil)
-		So(cfg, ShouldNotBeNil)
-	})
-}

+ 0 - 74
Godeps/_workspace/src/github.com/Unknwon/macaron/recovery_test.go

@@ -1,74 +0,0 @@
-// Copyright 2013 Martini Authors
-// Copyright 2014 Unknwon
-//
-// Licensed under the Apache License, Version 2.0 (the "License"): you may
-// not use this file except in compliance with the License. You may obtain
-// a copy of the License at
-//
-//     http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-// License for the specific language governing permissions and limitations
-// under the License.
-
-package macaron
-
-import (
-	"bytes"
-	"log"
-	"net/http"
-	"net/http/httptest"
-	"testing"
-
-	. "github.com/smartystreets/goconvey/convey"
-)
-
-func Test_Recovery(t *testing.T) {
-	Convey("Recovery from panic", t, func() {
-		buf := bytes.NewBufferString("")
-		setENV(DEV)
-
-		m := New()
-		m.Map(log.New(buf, "[Macaron] ", 0))
-		m.Use(func(res http.ResponseWriter, req *http.Request) {
-			res.Header().Set("Content-Type", "unpredictable")
-		})
-		m.Use(Recovery())
-		m.Use(func(res http.ResponseWriter, req *http.Request) {
-			panic("here is a panic!")
-		})
-		m.Get("/", func() {})
-
-		resp := httptest.NewRecorder()
-		req, err := http.NewRequest("GET", "/", nil)
-		So(err, ShouldBeNil)
-		m.ServeHTTP(resp, req)
-		So(resp.Code, ShouldEqual, http.StatusInternalServerError)
-		So(resp.HeaderMap.Get("Content-Type"), ShouldEqual, "text/html")
-		So(buf.String(), ShouldNotBeEmpty)
-	})
-
-	Convey("Revocery panic to another response writer", t, func() {
-		resp := httptest.NewRecorder()
-		resp2 := httptest.NewRecorder()
-		setENV(DEV)
-
-		m := New()
-		m.Use(Recovery())
-		m.Use(func(c *Context) {
-			c.MapTo(resp2, (*http.ResponseWriter)(nil))
-			panic("here is a panic!")
-		})
-		m.Get("/", func() {})
-
-		req, err := http.NewRequest("GET", "/", nil)
-		So(err, ShouldBeNil)
-		m.ServeHTTP(resp, req)
-
-		So(resp2.Code, ShouldEqual, http.StatusInternalServerError)
-		So(resp2.HeaderMap.Get("Content-Type"), ShouldEqual, "text/html")
-		So(resp2.Body.Len(), ShouldBeGreaterThan, 0)
-	})
-}

+ 0 - 581
Godeps/_workspace/src/github.com/Unknwon/macaron/render_test.go

@@ -1,581 +0,0 @@
-// Copyright 2013 Martini Authors
-// Copyright 2014 Unknwon
-//
-// Licensed under the Apache License, Version 2.0 (the "License"): you may
-// not use this file except in compliance with the License. You may obtain
-// a copy of the License at
-//
-//     http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-// License for the specific language governing permissions and limitations
-// under the License.
-
-package macaron
-
-import (
-	"encoding/xml"
-	"html/template"
-	"net/http"
-	"net/http/httptest"
-	"testing"
-	"time"
-
-	. "github.com/smartystreets/goconvey/convey"
-)
-
-type Greeting struct {
-	One string `json:"one"`
-	Two string `json:"two"`
-}
-
-type GreetingXML struct {
-	XMLName xml.Name `xml:"greeting"`
-	One     string   `xml:"one,attr"`
-	Two     string   `xml:"two,attr"`
-}
-
-func Test_Render_JSON(t *testing.T) {
-	Convey("Render JSON", t, func() {
-		m := Classic()
-		m.Use(Renderer())
-		m.Get("/foobar", func(r Render) {
-			r.JSON(300, Greeting{"hello", "world"})
-		})
-
-		resp := httptest.NewRecorder()
-		req, err := http.NewRequest("GET", "/foobar", nil)
-		So(err, ShouldBeNil)
-		m.ServeHTTP(resp, req)
-
-		So(resp.Code, ShouldEqual, http.StatusMultipleChoices)
-		So(resp.Header().Get(ContentType), ShouldEqual, ContentJSON+"; charset=UTF-8")
-		So(resp.Body.String(), ShouldEqual, `{"one":"hello","two":"world"}`)
-	})
-
-	Convey("Render JSON with prefix", t, func() {
-		m := Classic()
-		prefix := ")]}',\n"
-		m.Use(Renderer(RenderOptions{
-			PrefixJSON: []byte(prefix),
-		}))
-		m.Get("/foobar", func(r Render) {
-			r.JSON(300, Greeting{"hello", "world"})
-		})
-
-		resp := httptest.NewRecorder()
-		req, err := http.NewRequest("GET", "/foobar", nil)
-		So(err, ShouldBeNil)
-		m.ServeHTTP(resp, req)
-
-		So(resp.Code, ShouldEqual, http.StatusMultipleChoices)
-		So(resp.Header().Get(ContentType), ShouldEqual, ContentJSON+"; charset=UTF-8")
-		So(resp.Body.String(), ShouldEqual, prefix+`{"one":"hello","two":"world"}`)
-	})
-
-	Convey("Render Indented JSON", t, func() {
-		m := Classic()
-		m.Use(Renderer(RenderOptions{
-			IndentJSON: true,
-		}))
-		m.Get("/foobar", func(r Render) {
-			r.JSON(300, Greeting{"hello", "world"})
-		})
-
-		resp := httptest.NewRecorder()
-		req, err := http.NewRequest("GET", "/foobar", nil)
-		So(err, ShouldBeNil)
-		m.ServeHTTP(resp, req)
-
-		So(resp.Code, ShouldEqual, http.StatusMultipleChoices)
-		So(resp.Header().Get(ContentType), ShouldEqual, ContentJSON+"; charset=UTF-8")
-		So(resp.Body.String(), ShouldEqual, `{
-  "one": "hello",
-  "two": "world"
-}`)
-	})
-
-	Convey("Render JSON and return string", t, func() {
-		m := Classic()
-		m.Use(Renderer())
-		m.Get("/foobar", func(r Render) {
-			result, err := r.JSONString(Greeting{"hello", "world"})
-			So(err, ShouldBeNil)
-			So(result, ShouldEqual, `{"one":"hello","two":"world"}`)
-		})
-
-		resp := httptest.NewRecorder()
-		req, err := http.NewRequest("GET", "/foobar", nil)
-		So(err, ShouldBeNil)
-		m.ServeHTTP(resp, req)
-	})
-
-	Convey("Render with charset JSON", t, func() {
-		m := Classic()
-		m.Use(Renderer(RenderOptions{
-			Charset: "foobar",
-		}))
-		m.Get("/foobar", func(r Render) {
-			r.JSON(300, Greeting{"hello", "world"})
-		})
-
-		resp := httptest.NewRecorder()
-		req, err := http.NewRequest("GET", "/foobar", nil)
-		So(err, ShouldBeNil)
-		m.ServeHTTP(resp, req)
-
-		So(resp.Code, ShouldEqual, http.StatusMultipleChoices)
-		So(resp.Header().Get(ContentType), ShouldEqual, ContentJSON+"; charset=foobar")
-		So(resp.Body.String(), ShouldEqual, `{"one":"hello","two":"world"}`)
-	})
-}
-
-func Test_Render_XML(t *testing.T) {
-	Convey("Render XML", t, func() {
-		m := Classic()
-		m.Use(Renderer())
-		m.Get("/foobar", func(r Render) {
-			r.XML(300, GreetingXML{One: "hello", Two: "world"})
-		})
-
-		resp := httptest.NewRecorder()
-		req, err := http.NewRequest("GET", "/foobar", nil)
-		So(err, ShouldBeNil)
-		m.ServeHTTP(resp, req)
-
-		So(resp.Code, ShouldEqual, http.StatusMultipleChoices)
-		So(resp.Header().Get(ContentType), ShouldEqual, ContentXML+"; charset=UTF-8")
-		So(resp.Body.String(), ShouldEqual, `<greeting one="hello" two="world"></greeting>`)
-	})
-
-	Convey("Render XML with prefix", t, func() {
-		m := Classic()
-		prefix := ")]}',\n"
-		m.Use(Renderer(RenderOptions{
-			PrefixXML: []byte(prefix),
-		}))
-		m.Get("/foobar", func(r Render) {
-			r.XML(300, GreetingXML{One: "hello", Two: "world"})
-		})
-
-		resp := httptest.NewRecorder()
-		req, err := http.NewRequest("GET", "/foobar", nil)
-		So(err, ShouldBeNil)
-		m.ServeHTTP(resp, req)
-
-		So(resp.Code, ShouldEqual, http.StatusMultipleChoices)
-		So(resp.Header().Get(ContentType), ShouldEqual, ContentXML+"; charset=UTF-8")
-		So(resp.Body.String(), ShouldEqual, prefix+`<greeting one="hello" two="world"></greeting>`)
-	})
-
-	Convey("Render Indented XML", t, func() {
-		m := Classic()
-		m.Use(Renderer(RenderOptions{
-			IndentXML: true,
-		}))
-		m.Get("/foobar", func(r Render) {
-			r.XML(300, GreetingXML{One: "hello", Two: "world"})
-		})
-
-		resp := httptest.NewRecorder()
-		req, err := http.NewRequest("GET", "/foobar", nil)
-		So(err, ShouldBeNil)
-		m.ServeHTTP(resp, req)
-
-		So(resp.Code, ShouldEqual, http.StatusMultipleChoices)
-		So(resp.Header().Get(ContentType), ShouldEqual, ContentXML+"; charset=UTF-8")
-		So(resp.Body.String(), ShouldEqual, `<greeting one="hello" two="world"></greeting>`)
-	})
-}
-
-func Test_Render_HTML(t *testing.T) {
-	Convey("Render HTML", t, func() {
-		m := Classic()
-		m.Use(Renderers(RenderOptions{
-			Directory: "fixtures/basic",
-		}, "fixtures/basic2"))
-		m.Get("/foobar", func(r Render) {
-			r.HTML(200, "hello", "jeremy")
-			r.SetTemplatePath("", "fixtures/basic2")
-		})
-		m.Get("/foobar2", func(r Render) {
-			if r.HasTemplateSet("basic2") {
-				r.HTMLSet(200, "basic2", "hello", "jeremy")
-			}
-		})
-
-		resp := httptest.NewRecorder()
-		req, err := http.NewRequest("GET", "/foobar", nil)
-		So(err, ShouldBeNil)
-		m.ServeHTTP(resp, req)
-
-		So(resp.Code, ShouldEqual, http.StatusOK)
-		So(resp.Header().Get(ContentType), ShouldEqual, ContentHTML+"; charset=UTF-8")
-		So(resp.Body.String(), ShouldEqual, "<h1>Hello jeremy</h1>")
-
-		resp = httptest.NewRecorder()
-		req, err = http.NewRequest("GET", "/foobar2", nil)
-		So(err, ShouldBeNil)
-		m.ServeHTTP(resp, req)
-
-		So(resp.Code, ShouldEqual, http.StatusOK)
-		So(resp.Header().Get(ContentType), ShouldEqual, ContentHTML+"; charset=UTF-8")
-		So(resp.Body.String(), ShouldEqual, "<h1>What's up, jeremy</h1>")
-
-		Convey("Change render templates path", func() {
-			resp := httptest.NewRecorder()
-			req, err := http.NewRequest("GET", "/foobar", nil)
-			So(err, ShouldBeNil)
-			m.ServeHTTP(resp, req)
-
-			So(resp.Code, ShouldEqual, http.StatusOK)
-			So(resp.Header().Get(ContentType), ShouldEqual, ContentHTML+"; charset=UTF-8")
-			So(resp.Body.String(), ShouldEqual, "<h1>What's up, jeremy</h1>")
-		})
-	})
-
-	Convey("Render HTML and return string", t, func() {
-		m := Classic()
-		m.Use(Renderers(RenderOptions{
-			Directory: "fixtures/basic",
-		}, "basic2:fixtures/basic2"))
-		m.Get("/foobar", func(r Render) {
-			result, err := r.HTMLString("hello", "jeremy")
-			So(err, ShouldBeNil)
-			So(result, ShouldEqual, "<h1>Hello jeremy</h1>")
-		})
-		m.Get("/foobar2", func(r Render) {
-			result, err := r.HTMLSetString("basic2", "hello", "jeremy")
-			So(err, ShouldBeNil)
-			So(result, ShouldEqual, "<h1>What's up, jeremy</h1>")
-		})
-
-		resp := httptest.NewRecorder()
-		req, err := http.NewRequest("GET", "/foobar", nil)
-		So(err, ShouldBeNil)
-		m.ServeHTTP(resp, req)
-
-		resp = httptest.NewRecorder()
-		req, err = http.NewRequest("GET", "/foobar2", nil)
-		So(err, ShouldBeNil)
-		m.ServeHTTP(resp, req)
-	})
-
-	Convey("Render with nested HTML", t, func() {
-		m := Classic()
-		m.Use(Renderer(RenderOptions{
-			Directory: "fixtures/basic",
-		}))
-		m.Get("/foobar", func(r Render) {
-			r.HTML(200, "admin/index", "jeremy")
-		})
-
-		resp := httptest.NewRecorder()
-		req, err := http.NewRequest("GET", "/foobar", nil)
-		So(err, ShouldBeNil)
-		m.ServeHTTP(resp, req)
-
-		So(resp.Code, ShouldEqual, http.StatusOK)
-		So(resp.Header().Get(ContentType), ShouldEqual, ContentHTML+"; charset=UTF-8")
-		So(resp.Body.String(), ShouldEqual, "<h1>Admin jeremy</h1>")
-	})
-
-	Convey("Render bad HTML", t, func() {
-		m := Classic()
-		m.Use(Renderer(RenderOptions{
-			Directory: "fixtures/basic",
-		}))
-		m.Get("/foobar", func(r Render) {
-			r.HTML(200, "nope", nil)
-		})
-
-		resp := httptest.NewRecorder()
-		req, err := http.NewRequest("GET", "/foobar", nil)
-		So(err, ShouldBeNil)
-		m.ServeHTTP(resp, req)
-
-		So(resp.Code, ShouldEqual, http.StatusInternalServerError)
-		So(resp.Body.String(), ShouldEqual, "html/template: \"nope\" is undefined\n")
-	})
-
-	Convey("Invalid template set", t, func() {
-		Convey("Empty template set argument", func() {
-			defer func() {
-				So(recover(), ShouldNotBeNil)
-			}()
-			m := Classic()
-			m.Use(Renderers(RenderOptions{
-				Directory: "fixtures/basic",
-			}, ""))
-		})
-
-		Convey("Bad template set path", func() {
-			defer func() {
-				So(recover(), ShouldNotBeNil)
-			}()
-			m := Classic()
-			m.Use(Renderers(RenderOptions{
-				Directory: "fixtures/basic",
-			}, "404"))
-		})
-	})
-}
-
-func Test_Render_XHTML(t *testing.T) {
-	Convey("Render XHTML", t, func() {
-		m := Classic()
-		m.Use(Renderer(RenderOptions{
-			Directory:       "fixtures/basic",
-			HTMLContentType: ContentXHTML,
-		}))
-		m.Get("/foobar", func(r Render) {
-			r.HTML(200, "hello", "jeremy")
-		})
-
-		resp := httptest.NewRecorder()
-		req, err := http.NewRequest("GET", "/foobar", nil)
-		So(err, ShouldBeNil)
-		m.ServeHTTP(resp, req)
-
-		So(resp.Code, ShouldEqual, http.StatusOK)
-		So(resp.Header().Get(ContentType), ShouldEqual, ContentXHTML+"; charset=UTF-8")
-		So(resp.Body.String(), ShouldEqual, "<h1>Hello jeremy</h1>")
-	})
-}
-
-func Test_Render_Extensions(t *testing.T) {
-	Convey("Render with extensions", t, func() {
-		m := Classic()
-		m.Use(Renderer(RenderOptions{
-			Directory:  "fixtures/basic",
-			Extensions: []string{".tmpl", ".html"},
-		}))
-		m.Get("/foobar", func(r Render) {
-			r.HTML(200, "hypertext", nil)
-		})
-
-		resp := httptest.NewRecorder()
-		req, err := http.NewRequest("GET", "/foobar", nil)
-		So(err, ShouldBeNil)
-		m.ServeHTTP(resp, req)
-
-		So(resp.Code, ShouldEqual, http.StatusOK)
-		So(resp.Header().Get(ContentType), ShouldEqual, ContentHTML+"; charset=UTF-8")
-		So(resp.Body.String(), ShouldEqual, "Hypertext!")
-	})
-}
-
-func Test_Render_Funcs(t *testing.T) {
-	Convey("Render with functions", t, func() {
-		m := Classic()
-		m.Use(Renderer(RenderOptions{
-			Directory: "fixtures/custom_funcs",
-			Funcs: []template.FuncMap{
-				{
-					"myCustomFunc": func() string {
-						return "My custom function"
-					},
-				},
-			},
-		}))
-		m.Get("/foobar", func(r Render) {
-			r.HTML(200, "index", "jeremy")
-		})
-
-		resp := httptest.NewRecorder()
-		req, err := http.NewRequest("GET", "/foobar", nil)
-		So(err, ShouldBeNil)
-		m.ServeHTTP(resp, req)
-
-		So(resp.Body.String(), ShouldEqual, "My custom function")
-	})
-}
-
-func Test_Render_Layout(t *testing.T) {
-	Convey("Render with layout", t, func() {
-		m := Classic()
-		m.Use(Renderer(RenderOptions{
-			Directory: "fixtures/basic",
-			Layout:    "layout",
-		}))
-		m.Get("/foobar", func(r Render) {
-			r.HTML(200, "content", "jeremy")
-		})
-
-		resp := httptest.NewRecorder()
-		req, err := http.NewRequest("GET", "/foobar", nil)
-		So(err, ShouldBeNil)
-		m.ServeHTTP(resp, req)
-
-		So(resp.Body.String(), ShouldEqual, "head<h1>jeremy</h1>foot")
-	})
-
-	Convey("Render with current layout", t, func() {
-		m := Classic()
-		m.Use(Renderer(RenderOptions{
-			Directory: "fixtures/basic",
-			Layout:    "current_layout",
-		}))
-		m.Get("/foobar", func(r Render) {
-			r.HTML(200, "content", "jeremy")
-		})
-
-		resp := httptest.NewRecorder()
-		req, err := http.NewRequest("GET", "/foobar", nil)
-		So(err, ShouldBeNil)
-		m.ServeHTTP(resp, req)
-
-		So(resp.Body.String(), ShouldEqual, "content head<h1>jeremy</h1>content foot")
-	})
-
-	Convey("Render with override layout", t, func() {
-		m := Classic()
-		m.Use(Renderer(RenderOptions{
-			Directory: "fixtures/basic",
-			Layout:    "layout",
-		}))
-		m.Get("/foobar", func(r Render) {
-			r.HTML(200, "content", "jeremy", HTMLOptions{
-				Layout: "another_layout",
-			})
-		})
-
-		resp := httptest.NewRecorder()
-		req, err := http.NewRequest("GET", "/foobar", nil)
-		So(err, ShouldBeNil)
-		m.ServeHTTP(resp, req)
-
-		So(resp.Code, ShouldEqual, http.StatusOK)
-		So(resp.Header().Get(ContentType), ShouldEqual, ContentHTML+"; charset=UTF-8")
-		So(resp.Body.String(), ShouldEqual, "another head<h1>jeremy</h1>another foot")
-	})
-}
-
-func Test_Render_Delimiters(t *testing.T) {
-	Convey("Render with delimiters", t, func() {
-		m := Classic()
-		m.Use(Renderer(RenderOptions{
-			Delims:    Delims{"{[{", "}]}"},
-			Directory: "fixtures/basic",
-		}))
-		m.Get("/foobar", func(r Render) {
-			r.HTML(200, "delims", "jeremy")
-		})
-
-		resp := httptest.NewRecorder()
-		req, err := http.NewRequest("GET", "/foobar", nil)
-		So(err, ShouldBeNil)
-		m.ServeHTTP(resp, req)
-
-		So(resp.Code, ShouldEqual, http.StatusOK)
-		So(resp.Header().Get(ContentType), ShouldEqual, ContentHTML+"; charset=UTF-8")
-		So(resp.Body.String(), ShouldEqual, "<h1>Hello jeremy</h1>")
-	})
-}
-
-func Test_Render_BinaryData(t *testing.T) {
-	Convey("Render binary data", t, func() {
-		m := Classic()
-		m.Use(Renderer())
-		m.Get("/foobar", func(r Render) {
-			r.RawData(200, []byte("hello there"))
-		})
-		m.Get("/foobar2", func(r Render) {
-			r.RenderData(200, []byte("hello there"))
-		})
-
-		resp := httptest.NewRecorder()
-		req, err := http.NewRequest("GET", "/foobar", nil)
-		So(err, ShouldBeNil)
-		m.ServeHTTP(resp, req)
-
-		So(resp.Code, ShouldEqual, http.StatusOK)
-		So(resp.Header().Get(ContentType), ShouldEqual, ContentBinary)
-		So(resp.Body.String(), ShouldEqual, "hello there")
-
-		resp = httptest.NewRecorder()
-		req, err = http.NewRequest("GET", "/foobar2", nil)
-		So(err, ShouldBeNil)
-		m.ServeHTTP(resp, req)
-
-		So(resp.Code, ShouldEqual, http.StatusOK)
-		So(resp.Header().Get(ContentType), ShouldEqual, CONTENT_PLAIN)
-		So(resp.Body.String(), ShouldEqual, "hello there")
-	})
-
-	Convey("Render binary data with mime type", t, func() {
-		m := Classic()
-		m.Use(Renderer())
-		m.Get("/foobar", func(r Render) {
-			r.RW().Header().Set(ContentType, "image/jpeg")
-			r.RawData(200, []byte("..jpeg data.."))
-		})
-
-		resp := httptest.NewRecorder()
-		req, err := http.NewRequest("GET", "/foobar", nil)
-		So(err, ShouldBeNil)
-		m.ServeHTTP(resp, req)
-
-		So(resp.Code, ShouldEqual, http.StatusOK)
-		So(resp.Header().Get(ContentType), ShouldEqual, "image/jpeg")
-		So(resp.Body.String(), ShouldEqual, "..jpeg data..")
-	})
-}
-
-func Test_Render_Status(t *testing.T) {
-	Convey("Render with status 204", t, func() {
-		resp := httptest.NewRecorder()
-		r := TplRender{resp, newTemplateSet(), &RenderOptions{}, "", time.Now()}
-		r.Status(204)
-		So(resp.Code, ShouldEqual, http.StatusNoContent)
-	})
-
-	Convey("Render with status 404", t, func() {
-		resp := httptest.NewRecorder()
-		r := TplRender{resp, newTemplateSet(), &RenderOptions{}, "", time.Now()}
-		r.Error(404)
-		So(resp.Code, ShouldEqual, http.StatusNotFound)
-	})
-
-	Convey("Render with status 500", t, func() {
-		resp := httptest.NewRecorder()
-		r := TplRender{resp, newTemplateSet(), &RenderOptions{}, "", time.Now()}
-		r.Error(500)
-		So(resp.Code, ShouldEqual, http.StatusInternalServerError)
-	})
-}
-
-func Test_Render_NoRace(t *testing.T) {
-	Convey("Make sure render has no race", t, func() {
-		m := Classic()
-		m.Use(Renderer(RenderOptions{
-			Directory: "fixtures/basic",
-		}))
-		m.Get("/foobar", func(r Render) {
-			r.HTML(200, "hello", "world")
-		})
-
-		done := make(chan bool)
-		doreq := func() {
-			resp := httptest.NewRecorder()
-			req, _ := http.NewRequest("GET", "/foobar", nil)
-			m.ServeHTTP(resp, req)
-			done <- true
-		}
-		// Run two requests to check there is no race condition
-		go doreq()
-		go doreq()
-		<-done
-		<-done
-	})
-}
-
-func Test_GetExt(t *testing.T) {
-	Convey("Get extension", t, func() {
-		So(GetExt("test"), ShouldBeBlank)
-		So(GetExt("test.tmpl"), ShouldEqual, ".tmpl")
-		So(GetExt("test.go.tmpl"), ShouldEqual, ".go.tmpl")
-	})
-}

+ 0 - 188
Godeps/_workspace/src/github.com/Unknwon/macaron/response_writer_test.go

@@ -1,188 +0,0 @@
-// Copyright 2013 Martini Authors
-// Copyright 2014 Unknwon
-//
-// Licensed under the Apache License, Version 2.0 (the "License"): you may
-// not use this file except in compliance with the License. You may obtain
-// a copy of the License at
-//
-//     http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-// License for the specific language governing permissions and limitations
-// under the License.
-
-package macaron
-
-import (
-	"bufio"
-	"io"
-	"net"
-	"net/http"
-	"net/http/httptest"
-	"testing"
-	"time"
-
-	. "github.com/smartystreets/goconvey/convey"
-)
-
-type closeNotifyingRecorder struct {
-	*httptest.ResponseRecorder
-	closed chan bool
-}
-
-func newCloseNotifyingRecorder() *closeNotifyingRecorder {
-	return &closeNotifyingRecorder{
-		httptest.NewRecorder(),
-		make(chan bool, 1),
-	}
-}
-
-func (c *closeNotifyingRecorder) close() {
-	c.closed <- true
-}
-
-func (c *closeNotifyingRecorder) CloseNotify() <-chan bool {
-	return c.closed
-}
-
-type hijackableResponse struct {
-	Hijacked bool
-}
-
-func newHijackableResponse() *hijackableResponse {
-	return &hijackableResponse{}
-}
-
-func (h *hijackableResponse) Header() http.Header           { return nil }
-func (h *hijackableResponse) Write(buf []byte) (int, error) { return 0, nil }
-func (h *hijackableResponse) WriteHeader(code int)          {}
-func (h *hijackableResponse) Flush()                        {}
-func (h *hijackableResponse) Hijack() (net.Conn, *bufio.ReadWriter, error) {
-	h.Hijacked = true
-	return nil, nil, nil
-}
-
-func Test_ResponseWriter(t *testing.T) {
-	Convey("Write string to response writer", t, func() {
-		resp := httptest.NewRecorder()
-		rw := NewResponseWriter(resp)
-		rw.Write([]byte("Hello world"))
-
-		So(resp.Code, ShouldEqual, rw.Status())
-		So(resp.Body.String(), ShouldEqual, "Hello world")
-		So(rw.Status(), ShouldEqual, http.StatusOK)
-		So(rw.Size(), ShouldEqual, 11)
-		So(rw.Written(), ShouldBeTrue)
-	})
-
-	Convey("Write strings to response writer", t, func() {
-		resp := httptest.NewRecorder()
-		rw := NewResponseWriter(resp)
-		rw.Write([]byte("Hello world"))
-		rw.Write([]byte("foo bar bat baz"))
-
-		So(resp.Code, ShouldEqual, rw.Status())
-		So(resp.Body.String(), ShouldEqual, "Hello worldfoo bar bat baz")
-		So(rw.Status(), ShouldEqual, http.StatusOK)
-		So(rw.Size(), ShouldEqual, 26)
-		So(rw.Written(), ShouldBeTrue)
-	})
-
-	Convey("Write header to response writer", t, func() {
-		resp := httptest.NewRecorder()
-		rw := NewResponseWriter(resp)
-		rw.WriteHeader(http.StatusNotFound)
-
-		So(resp.Code, ShouldEqual, rw.Status())
-		So(resp.Body.String(), ShouldBeBlank)
-		So(rw.Status(), ShouldEqual, http.StatusNotFound)
-		So(rw.Size(), ShouldEqual, 0)
-	})
-
-	Convey("Write before response write", t, func() {
-		result := ""
-		resp := httptest.NewRecorder()
-		rw := NewResponseWriter(resp)
-		rw.Before(func(ResponseWriter) {
-			result += "foo"
-		})
-		rw.Before(func(ResponseWriter) {
-			result += "bar"
-		})
-		rw.WriteHeader(http.StatusNotFound)
-
-		So(resp.Code, ShouldEqual, rw.Status())
-		So(resp.Body.String(), ShouldBeBlank)
-		So(rw.Status(), ShouldEqual, http.StatusNotFound)
-		So(rw.Size(), ShouldEqual, 0)
-		So(result, ShouldEqual, "barfoo")
-	})
-
-	Convey("Response writer with Hijack", t, func() {
-		hijackable := newHijackableResponse()
-		rw := NewResponseWriter(hijackable)
-		hijacker, ok := rw.(http.Hijacker)
-		So(ok, ShouldBeTrue)
-		_, _, err := hijacker.Hijack()
-		So(err, ShouldBeNil)
-		So(hijackable.Hijacked, ShouldBeTrue)
-	})
-
-	Convey("Response writer with bad Hijack", t, func() {
-		hijackable := new(http.ResponseWriter)
-		rw := NewResponseWriter(*hijackable)
-		hijacker, ok := rw.(http.Hijacker)
-		So(ok, ShouldBeTrue)
-		_, _, err := hijacker.Hijack()
-		So(err, ShouldNotBeNil)
-	})
-
-	Convey("Response writer with close notify", t, func() {
-		resp := newCloseNotifyingRecorder()
-		rw := NewResponseWriter(resp)
-		closed := false
-		notifier := rw.(http.CloseNotifier).CloseNotify()
-		resp.close()
-		select {
-		case <-notifier:
-			closed = true
-		case <-time.After(time.Second):
-		}
-		So(closed, ShouldBeTrue)
-	})
-
-	Convey("Response writer with flusher", t, func() {
-		resp := httptest.NewRecorder()
-		rw := NewResponseWriter(resp)
-		_, ok := rw.(http.Flusher)
-		So(ok, ShouldBeTrue)
-	})
-
-	Convey("Response writer with flusher handler", t, func() {
-		m := Classic()
-		m.Get("/events", func(w http.ResponseWriter, r *http.Request) {
-			f, ok := w.(http.Flusher)
-			So(ok, ShouldBeTrue)
-
-			w.Header().Set("Content-Type", "text/event-stream")
-			w.Header().Set("Cache-Control", "no-cache")
-			w.Header().Set("Connection", "keep-alive")
-
-			for i := 0; i < 2; i++ {
-				time.Sleep(10 * time.Millisecond)
-				io.WriteString(w, "data: Hello\n\n")
-				f.Flush()
-			}
-		})
-
-		resp := httptest.NewRecorder()
-		req, err := http.NewRequest("GET", "/events", nil)
-		So(err, ShouldBeNil)
-		m.ServeHTTP(resp, req)
-
-		So(resp.Code, ShouldEqual, http.StatusOK)
-		So(resp.Body.String(), ShouldEqual, "data: Hello\n\ndata: Hello\n\n")
-	})
-}

+ 0 - 69
Godeps/_workspace/src/github.com/Unknwon/macaron/return_handler_test.go

@@ -1,69 +0,0 @@
-// Copyright 2014 Unknwon
-//
-// Licensed under the Apache License, Version 2.0 (the "License"): you may
-// not use this file except in compliance with the License. You may obtain
-// a copy of the License at
-//
-//     http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-// License for the specific language governing permissions and limitations
-// under the License.
-
-package macaron
-
-import (
-	"net/http"
-	"net/http/httptest"
-	"testing"
-
-	. "github.com/smartystreets/goconvey/convey"
-)
-
-func Test_Return_Handler(t *testing.T) {
-	Convey("Return with status and body", t, func() {
-		m := Classic()
-		m.Get("/", func() (int, string) {
-			return 418, "i'm a teapot"
-		})
-
-		resp := httptest.NewRecorder()
-		req, err := http.NewRequest("GET", "/", nil)
-		So(err, ShouldBeNil)
-		m.ServeHTTP(resp, req)
-
-		So(resp.Code, ShouldEqual, http.StatusTeapot)
-		So(resp.Body.String(), ShouldEqual, "i'm a teapot")
-	})
-
-	Convey("Return with pointer", t, func() {
-		m := Classic()
-		m.Get("/", func() *string {
-			str := "hello world"
-			return &str
-		})
-
-		resp := httptest.NewRecorder()
-		req, err := http.NewRequest("GET", "/", nil)
-		So(err, ShouldBeNil)
-		m.ServeHTTP(resp, req)
-
-		So(resp.Body.String(), ShouldEqual, "hello world")
-	})
-
-	Convey("Return with byte slice", t, func() {
-		m := Classic()
-		m.Get("/", func() []byte {
-			return []byte("hello world")
-		})
-
-		resp := httptest.NewRecorder()
-		req, err := http.NewRequest("GET", "/", nil)
-		So(err, ShouldBeNil)
-		m.ServeHTTP(resp, req)
-
-		So(resp.Body.String(), ShouldEqual, "hello world")
-	})
-}

+ 0 - 199
Godeps/_workspace/src/github.com/Unknwon/macaron/router_test.go

@@ -1,199 +0,0 @@
-// Copyright 2014 Unknwon
-//
-// Licensed under the Apache License, Version 2.0 (the "License"): you may
-// not use this file except in compliance with the License. You may obtain
-// a copy of the License at
-//
-//     http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-// License for the specific language governing permissions and limitations
-// under the License.
-
-package macaron
-
-import (
-	"net/http"
-	"net/http/httptest"
-	"testing"
-
-	. "github.com/smartystreets/goconvey/convey"
-)
-
-func Test_Router_Handle(t *testing.T) {
-	Convey("Register all HTTP methods routes", t, func() {
-		m := Classic()
-		m.Get("/get", func() string {
-			return "GET"
-		})
-		resp := httptest.NewRecorder()
-		req, err := http.NewRequest("GET", "/get", nil)
-		So(err, ShouldBeNil)
-		m.ServeHTTP(resp, req)
-		So(resp.Body.String(), ShouldEqual, "GET")
-
-		m.Patch("/patch", func() string {
-			return "PATCH"
-		})
-		resp = httptest.NewRecorder()
-		req, err = http.NewRequest("PATCH", "/patch", nil)
-		So(err, ShouldBeNil)
-		m.ServeHTTP(resp, req)
-		So(resp.Body.String(), ShouldEqual, "PATCH")
-
-		m.Post("/post", func() string {
-			return "POST"
-		})
-		resp = httptest.NewRecorder()
-		req, err = http.NewRequest("POST", "/post", nil)
-		So(err, ShouldBeNil)
-		m.ServeHTTP(resp, req)
-		So(resp.Body.String(), ShouldEqual, "POST")
-
-		m.Put("/put", func() string {
-			return "PUT"
-		})
-		resp = httptest.NewRecorder()
-		req, err = http.NewRequest("PUT", "/put", nil)
-		So(err, ShouldBeNil)
-		m.ServeHTTP(resp, req)
-		So(resp.Body.String(), ShouldEqual, "PUT")
-
-		m.Delete("/delete", func() string {
-			return "DELETE"
-		})
-		resp = httptest.NewRecorder()
-		req, err = http.NewRequest("DELETE", "/delete", nil)
-		So(err, ShouldBeNil)
-		m.ServeHTTP(resp, req)
-		So(resp.Body.String(), ShouldEqual, "DELETE")
-
-		m.Options("/options", func() string {
-			return "OPTIONS"
-		})
-		resp = httptest.NewRecorder()
-		req, err = http.NewRequest("OPTIONS", "/options", nil)
-		So(err, ShouldBeNil)
-		m.ServeHTTP(resp, req)
-		So(resp.Body.String(), ShouldEqual, "OPTIONS")
-
-		m.Head("/head", func() string {
-			return "HEAD"
-		})
-		resp = httptest.NewRecorder()
-		req, err = http.NewRequest("HEAD", "/head", nil)
-		So(err, ShouldBeNil)
-		m.ServeHTTP(resp, req)
-		So(resp.Body.String(), ShouldEqual, "HEAD")
-
-		m.Any("/any", func() string {
-			return "ANY"
-		})
-		resp = httptest.NewRecorder()
-		req, err = http.NewRequest("GET", "/any", nil)
-		So(err, ShouldBeNil)
-		m.ServeHTTP(resp, req)
-		So(resp.Body.String(), ShouldEqual, "ANY")
-
-		m.Route("/route", "GET,POST", func() string {
-			return "ROUTE"
-		})
-		resp = httptest.NewRecorder()
-		req, err = http.NewRequest("POST", "/route", nil)
-		So(err, ShouldBeNil)
-		m.ServeHTTP(resp, req)
-		So(resp.Body.String(), ShouldEqual, "ROUTE")
-	})
-
-	Convey("Register all HTTP methods routes with combo", t, func() {
-		m := Classic()
-		m.SetURLPrefix("/prefix")
-		m.Use(Renderer())
-		m.Combo("/", func(ctx *Context) {
-			ctx.Data["prefix"] = "Prefix_"
-		}).
-			Get(func(ctx *Context) string { return ctx.Data["prefix"].(string) + "GET" }).
-			Patch(func(ctx *Context) string { return ctx.Data["prefix"].(string) + "PATCH" }).
-			Post(func(ctx *Context) string { return ctx.Data["prefix"].(string) + "POST" }).
-			Put(func(ctx *Context) string { return ctx.Data["prefix"].(string) + "PUT" }).
-			Delete(func(ctx *Context) string { return ctx.Data["prefix"].(string) + "DELETE" }).
-			Options(func(ctx *Context) string { return ctx.Data["prefix"].(string) + "OPTIONS" }).
-			Head(func(ctx *Context) string { return ctx.Data["prefix"].(string) + "HEAD" })
-
-		for name := range _HTTP_METHODS {
-			resp := httptest.NewRecorder()
-			req, err := http.NewRequest(name, "/", nil)
-			So(err, ShouldBeNil)
-			m.ServeHTTP(resp, req)
-			So(resp.Body.String(), ShouldEqual, "Prefix_"+name)
-		}
-
-		defer func() {
-			So(recover(), ShouldNotBeNil)
-		}()
-		m.Combo("/").Get(func() {}).Get(nil)
-	})
-
-	Convey("Register duplicated routes", t, func() {
-		r := NewRouter()
-		r.Get("/")
-		r.Get("/")
-	})
-
-	Convey("Register invalid HTTP method", t, func() {
-		defer func() {
-			So(recover(), ShouldNotBeNil)
-		}()
-		r := NewRouter()
-		r.Handle("404", "/", nil)
-	})
-}
-
-func Test_Router_Group(t *testing.T) {
-	Convey("Register route group", t, func() {
-		m := Classic()
-		m.Group("/api", func() {
-			m.Group("/v1", func() {
-				m.Get("/list", func() string {
-					return "Well done!"
-				})
-			})
-		})
-		resp := httptest.NewRecorder()
-		req, err := http.NewRequest("GET", "/api/v1/list", nil)
-		So(err, ShouldBeNil)
-		m.ServeHTTP(resp, req)
-		So(resp.Body.String(), ShouldEqual, "Well done!")
-	})
-}
-
-func Test_Router_NotFound(t *testing.T) {
-	Convey("Custom not found handler", t, func() {
-		m := Classic()
-		m.Get("/", func() {})
-		m.NotFound(func() string {
-			return "Custom not found"
-		})
-		resp := httptest.NewRecorder()
-		req, err := http.NewRequest("GET", "/404", nil)
-		So(err, ShouldBeNil)
-		m.ServeHTTP(resp, req)
-		So(resp.Body.String(), ShouldEqual, "Custom not found")
-	})
-}
-
-func Test_Router_splat(t *testing.T) {
-	Convey("Register router with glob", t, func() {
-		m := Classic()
-		m.Get("/*", func(ctx *Context) string {
-			return ctx.Params("*")
-		})
-		resp := httptest.NewRecorder()
-		req, err := http.NewRequest("GET", "/hahaha", nil)
-		So(err, ShouldBeNil)
-		m.ServeHTTP(resp, req)
-		So(resp.Body.String(), ShouldEqual, "hahaha")
-	})
-}

+ 0 - 246
Godeps/_workspace/src/github.com/Unknwon/macaron/static_test.go

@@ -1,246 +0,0 @@
-// Copyright 2013 Martini Authors
-// Copyright 2014 Unknwon
-//
-// Licensed under the Apache License, Version 2.0 (the "License"): you may
-// not use this file except in compliance with the License. You may obtain
-// a copy of the License at
-//
-//     http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-// License for the specific language governing permissions and limitations
-// under the License.
-
-package macaron
-
-import (
-	"bytes"
-	"io/ioutil"
-	"net/http"
-	"net/http/httptest"
-	"os"
-	"path"
-	"strings"
-	"testing"
-
-	. "github.com/smartystreets/goconvey/convey"
-)
-
-var currentRoot, _ = os.Getwd()
-
-func Test_Static(t *testing.T) {
-	Convey("Serve static files", t, func() {
-		m := New()
-		m.Use(Static("./"))
-
-		resp := httptest.NewRecorder()
-		resp.Body = new(bytes.Buffer)
-		req, err := http.NewRequest("GET", "http://localhost:4000/macaron.go", nil)
-		So(err, ShouldBeNil)
-		m.ServeHTTP(resp, req)
-		So(resp.Code, ShouldEqual, http.StatusOK)
-		So(resp.Header().Get("Expires"), ShouldBeBlank)
-		So(resp.Body.Len(), ShouldBeGreaterThan, 0)
-
-		Convey("Change static path", func() {
-			m.Get("/", func(ctx *Context) {
-				ctx.ChangeStaticPath("./", "inject")
-			})
-
-			resp := httptest.NewRecorder()
-			req, err := http.NewRequest("GET", "/", nil)
-			So(err, ShouldBeNil)
-			m.ServeHTTP(resp, req)
-
-			resp = httptest.NewRecorder()
-			resp.Body = new(bytes.Buffer)
-			req, err = http.NewRequest("GET", "http://localhost:4000/inject.go", nil)
-			So(err, ShouldBeNil)
-			m.ServeHTTP(resp, req)
-			So(resp.Code, ShouldEqual, http.StatusOK)
-			So(resp.Header().Get("Expires"), ShouldBeBlank)
-			So(resp.Body.Len(), ShouldBeGreaterThan, 0)
-		})
-	})
-
-	Convey("Serve static files with local path", t, func() {
-		Root = os.TempDir()
-		f, err := ioutil.TempFile(Root, "static_content")
-		So(err, ShouldBeNil)
-		f.WriteString("Expected Content")
-		f.Close()
-
-		m := New()
-		m.Use(Static("."))
-
-		resp := httptest.NewRecorder()
-		resp.Body = new(bytes.Buffer)
-		req, err := http.NewRequest("GET", "http://localhost:4000/"+path.Base(strings.Replace(f.Name(), "\\", "/", -1)), nil)
-		So(err, ShouldBeNil)
-		m.ServeHTTP(resp, req)
-		So(resp.Code, ShouldEqual, http.StatusOK)
-		So(resp.Header().Get("Expires"), ShouldBeBlank)
-		So(resp.Body.String(), ShouldEqual, "Expected Content")
-	})
-
-	Convey("Serve static files with head", t, func() {
-		m := New()
-		m.Use(Static(currentRoot))
-
-		resp := httptest.NewRecorder()
-		resp.Body = new(bytes.Buffer)
-		req, err := http.NewRequest("HEAD", "http://localhost:4000/macaron.go", nil)
-		So(err, ShouldBeNil)
-		m.ServeHTTP(resp, req)
-		So(resp.Code, ShouldEqual, http.StatusOK)
-		So(resp.Body.Len(), ShouldEqual, 0)
-	})
-
-	Convey("Serve static files as post", t, func() {
-		m := New()
-		m.Use(Static(currentRoot))
-
-		resp := httptest.NewRecorder()
-		req, err := http.NewRequest("POST", "http://localhost:4000/macaron.go", nil)
-		So(err, ShouldBeNil)
-		m.ServeHTTP(resp, req)
-		So(resp.Code, ShouldEqual, http.StatusNotFound)
-	})
-
-	Convey("Serve static files with bad directory", t, func() {
-		m := Classic()
-		resp := httptest.NewRecorder()
-		req, err := http.NewRequest("GET", "http://localhost:4000/macaron.go", nil)
-		So(err, ShouldBeNil)
-		m.ServeHTTP(resp, req)
-		So(resp.Code, ShouldNotEqual, http.StatusOK)
-	})
-}
-
-func Test_Static_Options(t *testing.T) {
-	Convey("Serve static files with options logging", t, func() {
-		var buf bytes.Buffer
-		m := NewWithLogger(&buf)
-		opt := StaticOptions{}
-		m.Use(Static(currentRoot, opt))
-
-		resp := httptest.NewRecorder()
-		req, err := http.NewRequest("GET", "http://localhost:4000/macaron.go", nil)
-		So(err, ShouldBeNil)
-		m.ServeHTTP(resp, req)
-
-		So(resp.Code, ShouldEqual, http.StatusOK)
-		So(buf.String(), ShouldEqual, "[Macaron] [Static] Serving /macaron.go\n")
-
-		// Not disable logging.
-		m.Handlers()
-		buf.Reset()
-		opt.SkipLogging = true
-		m.Use(Static(currentRoot, opt))
-		m.ServeHTTP(resp, req)
-
-		So(resp.Code, ShouldEqual, http.StatusOK)
-		So(buf.Len(), ShouldEqual, 0)
-	})
-
-	Convey("Serve static files with options serve index", t, func() {
-		var buf bytes.Buffer
-		m := NewWithLogger(&buf)
-		opt := StaticOptions{IndexFile: "macaron.go"}
-		m.Use(Static(currentRoot, opt))
-
-		resp := httptest.NewRecorder()
-		req, err := http.NewRequest("GET", "http://localhost:4000/", nil)
-		So(err, ShouldBeNil)
-		m.ServeHTTP(resp, req)
-
-		So(resp.Code, ShouldEqual, http.StatusOK)
-		So(buf.String(), ShouldEqual, "[Macaron] [Static] Serving /macaron.go\n")
-	})
-
-	Convey("Serve static files with options prefix", t, func() {
-		var buf bytes.Buffer
-		m := NewWithLogger(&buf)
-		opt := StaticOptions{Prefix: "public"}
-		m.Use(Static(currentRoot, opt))
-
-		resp := httptest.NewRecorder()
-		req, err := http.NewRequest("GET", "http://localhost:4000/public/macaron.go", nil)
-		So(err, ShouldBeNil)
-		m.ServeHTTP(resp, req)
-
-		So(resp.Code, ShouldEqual, http.StatusOK)
-		So(buf.String(), ShouldEqual, "[Macaron] [Static] Serving /macaron.go\n")
-	})
-
-	Convey("Serve static files with options expires", t, func() {
-		var buf bytes.Buffer
-		m := NewWithLogger(&buf)
-		opt := StaticOptions{Expires: func() string { return "46" }}
-		m.Use(Static(currentRoot, opt))
-
-		resp := httptest.NewRecorder()
-		req, err := http.NewRequest("GET", "http://localhost:4000/macaron.go", nil)
-		So(err, ShouldBeNil)
-		m.ServeHTTP(resp, req)
-
-		So(resp.Header().Get("Expires"), ShouldEqual, "46")
-	})
-}
-
-func Test_Static_Redirect(t *testing.T) {
-	Convey("Serve static files with redirect", t, func() {
-		m := New()
-		m.Use(Static(currentRoot, StaticOptions{Prefix: "/public"}))
-
-		resp := httptest.NewRecorder()
-		req, err := http.NewRequest("GET", "http://localhost:4000/public", nil)
-		So(err, ShouldBeNil)
-		m.ServeHTTP(resp, req)
-
-		So(resp.Code, ShouldEqual, http.StatusFound)
-		So(resp.Header().Get("Location"), ShouldEqual, "/public/")
-	})
-}
-
-func Test_Statics(t *testing.T) {
-	Convey("Serve multiple static routers", t, func() {
-		Convey("Register empty directory", func() {
-			defer func() {
-				So(recover(), ShouldNotBeNil)
-			}()
-
-			m := New()
-			m.Use(Statics(StaticOptions{}))
-
-			resp := httptest.NewRecorder()
-			req, err := http.NewRequest("GET", "http://localhost:4000/", nil)
-			So(err, ShouldBeNil)
-			m.ServeHTTP(resp, req)
-		})
-
-		Convey("Serve normally", func() {
-			var buf bytes.Buffer
-			m := NewWithLogger(&buf)
-			m.Use(Statics(StaticOptions{}, currentRoot, currentRoot+"/inject"))
-
-			resp := httptest.NewRecorder()
-			req, err := http.NewRequest("GET", "http://localhost:4000/macaron.go", nil)
-			So(err, ShouldBeNil)
-			m.ServeHTTP(resp, req)
-
-			So(resp.Code, ShouldEqual, http.StatusOK)
-			So(buf.String(), ShouldEqual, "[Macaron] [Static] Serving /macaron.go\n")
-
-			resp = httptest.NewRecorder()
-			req, err = http.NewRequest("GET", "http://localhost:4000/inject/inject.go", nil)
-			So(err, ShouldBeNil)
-			m.ServeHTTP(resp, req)
-
-			So(resp.Code, ShouldEqual, http.StatusOK)
-			So(buf.String(), ShouldEndWith, "[Macaron] [Static] Serving /inject/inject.go\n")
-		})
-	})
-}

+ 0 - 421
Godeps/_workspace/src/github.com/Unknwon/macaron/tree.go

@@ -1,421 +0,0 @@
-// Copyright 2013 Beego Authors
-// Copyright 2014 Unknwon
-//
-// Licensed under the Apache License, Version 2.0 (the "License"): you may
-// not use this file except in compliance with the License. You may obtain
-// a copy of the License at
-//
-//     http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-// License for the specific language governing permissions and limitations
-// under the License.
-
-package macaron
-
-// NOTE: last sync 0c93364 on Dec 19, 2014.
-
-import (
-	"path"
-	"regexp"
-	"strings"
-
-	"github.com/Unknwon/com"
-)
-
-type leafInfo struct {
-	// Names of wildcards that lead to this leaf.
-	// eg, ["id" "name"] for the wildcard ":id" and ":name".
-	wildcards []string
-	// Not nil if the leaf is regexp.
-	regexps *regexp.Regexp
-	handle  Handle
-}
-
-func (leaf *leafInfo) match(wildcardValues []string) (ok bool, params Params) {
-	if leaf.regexps == nil {
-		if len(wildcardValues) == 0 && len(leaf.wildcards) > 0 {
-			if com.IsSliceContainsStr(leaf.wildcards, ":") {
-				params = make(map[string]string)
-				j := 0
-				for _, v := range leaf.wildcards {
-					if v == ":" {
-						continue
-					}
-					params[v] = ""
-					j += 1
-				}
-				return true, params
-			}
-			return false, nil
-		} else if len(wildcardValues) == 0 {
-			return true, nil // Static path.
-		}
-
-		// Match *
-		if len(leaf.wildcards) == 1 && leaf.wildcards[0] == ":splat" {
-			params = make(map[string]string)
-			params[":splat"] = path.Join(wildcardValues...)
-			return true, params
-		}
-
-		// Match *.*
-		if len(leaf.wildcards) == 3 && leaf.wildcards[0] == "." {
-			params = make(map[string]string)
-			lastone := wildcardValues[len(wildcardValues)-1]
-			strs := strings.SplitN(lastone, ".", 2)
-			if len(strs) == 2 {
-				params[":ext"] = strs[1]
-			} else {
-				params[":ext"] = ""
-			}
-			params[":path"] = path.Join(wildcardValues[:len(wildcardValues)-1]...) + "/" + strs[0]
-			return true, params
-		}
-
-		// Match :id
-		params = make(map[string]string)
-		j := 0
-		for _, v := range leaf.wildcards {
-			if v == ":" {
-				continue
-			}
-			if v == "." {
-				lastone := wildcardValues[len(wildcardValues)-1]
-				strs := strings.SplitN(lastone, ".", 2)
-				if len(strs) == 2 {
-					params[":ext"] = strs[1]
-				} else {
-					params[":ext"] = ""
-				}
-				if len(wildcardValues[j:]) == 1 {
-					params[":path"] = strs[0]
-				} else {
-					params[":path"] = path.Join(wildcardValues[j:]...) + "/" + strs[0]
-				}
-				return true, params
-			}
-			if len(wildcardValues) <= j {
-				return false, nil
-			}
-			params[v] = wildcardValues[j]
-			j++
-		}
-		if len(params) != len(wildcardValues) {
-			return false, nil
-		}
-		return true, params
-	}
-
-	if !leaf.regexps.MatchString(path.Join(wildcardValues...)) {
-		return false, nil
-	}
-	params = make(map[string]string)
-	matches := leaf.regexps.FindStringSubmatch(path.Join(wildcardValues...))
-	for i, match := range matches[1:] {
-		params[leaf.wildcards[i]] = match
-	}
-	return true, params
-}
-
-// Tree represents a router tree for Macaron instance.
-type Tree struct {
-	fixroutes map[string]*Tree
-	wildcard  *Tree
-	leaves    []*leafInfo
-}
-
-// NewTree initializes and returns a router tree.
-func NewTree() *Tree {
-	return &Tree{
-		fixroutes: make(map[string]*Tree),
-	}
-}
-
-// splitPath splites patthen into parts.
-//
-// Examples:
-//		"/" -> []
-// 		"/admin" -> ["admin"]
-// 		"/admin/" -> ["admin"]
-// 		"/admin/users" -> ["admin", "users"]
-func splitPath(pattern string) []string {
-	if len(pattern) == 0 {
-		return []string{}
-	}
-
-	elements := strings.Split(pattern, "/")
-	if elements[0] == "" {
-		elements = elements[1:]
-	}
-	if elements[len(elements)-1] == "" {
-		elements = elements[:len(elements)-1]
-	}
-	return elements
-}
-
-// AddRouter adds a new route to router tree.
-func (t *Tree) AddRouter(pattern string, handle Handle) {
-	t.addSegments(splitPath(pattern), handle, nil, "")
-}
-
-// splitSegment splits segment into parts.
-//
-// Examples:
-// 		"admin" -> false, nil, ""
-// 		":id" -> true, [:id], ""
-// 		"?:id" -> true, [: :id], ""        : meaning can empty
-// 		":id:int" -> true, [:id], ([0-9]+)
-// 		":name:string" -> true, [:name], ([\w]+)
-// 		":id([0-9]+)" -> true, [:id], ([0-9]+)
-// 		":id([0-9]+)_:name" -> true, [:id :name], ([0-9]+)_(.+)
-// 		"cms_:id_:page.html" -> true, [:id :page], cms_(.+)_(.+).html
-// 		"*" -> true, [:splat], ""
-// 		"*.*" -> true,[. :path :ext], ""      . meaning separator
-func splitSegment(key string) (bool, []string, string) {
-	if strings.HasPrefix(key, "*") {
-		if key == "*.*" {
-			return true, []string{".", ":path", ":ext"}, ""
-		} else {
-			return true, []string{":splat"}, ""
-		}
-	}
-	if strings.ContainsAny(key, ":") {
-		var paramsNum int
-		var out []rune
-		var start bool
-		var startexp bool
-		var param []rune
-		var expt []rune
-		var skipnum int
-		params := []string{}
-		reg := regexp.MustCompile(`[a-zA-Z0-9]+`)
-		for i, v := range key {
-			if skipnum > 0 {
-				skipnum -= 1
-				continue
-			}
-			if start {
-				//:id:int and :name:string
-				if v == ':' {
-					if len(key) >= i+4 {
-						if key[i+1:i+4] == "int" {
-							out = append(out, []rune("([0-9]+)")...)
-							params = append(params, ":"+string(param))
-							start = false
-							startexp = false
-							skipnum = 3
-							param = make([]rune, 0)
-							paramsNum += 1
-							continue
-						}
-					}
-					if len(key) >= i+7 {
-						if key[i+1:i+7] == "string" {
-							out = append(out, []rune(`([\w]+)`)...)
-							params = append(params, ":"+string(param))
-							paramsNum += 1
-							start = false
-							startexp = false
-							skipnum = 6
-							param = make([]rune, 0)
-							continue
-						}
-					}
-				}
-				// params only support a-zA-Z0-9
-				if reg.MatchString(string(v)) {
-					param = append(param, v)
-					continue
-				}
-				if v != '(' {
-					out = append(out, []rune(`(.+)`)...)
-					params = append(params, ":"+string(param))
-					param = make([]rune, 0)
-					paramsNum += 1
-					start = false
-					startexp = false
-				}
-			}
-			if startexp {
-				if v != ')' {
-					expt = append(expt, v)
-					continue
-				}
-			}
-			if v == ':' {
-				param = make([]rune, 0)
-				start = true
-			} else if v == '(' {
-				startexp = true
-				start = false
-				params = append(params, ":"+string(param))
-				paramsNum += 1
-				expt = make([]rune, 0)
-				expt = append(expt, '(')
-			} else if v == ')' {
-				startexp = false
-				expt = append(expt, ')')
-				out = append(out, expt...)
-				param = make([]rune, 0)
-			} else if v == '?' {
-				params = append(params, ":")
-			} else {
-				out = append(out, v)
-			}
-		}
-		if len(param) > 0 {
-			if paramsNum > 0 {
-				out = append(out, []rune(`(.+)`)...)
-			}
-			params = append(params, ":"+string(param))
-		}
-		return true, params, string(out)
-	} else {
-		return false, nil, ""
-	}
-}
-
-// addSegments add segments to the router tree.
-func (t *Tree) addSegments(segments []string, handle Handle, wildcards []string, reg string) {
-	// Fixed root route.
-	if len(segments) == 0 {
-		if reg != "" {
-			filterCards := make([]string, 0, len(wildcards))
-			for _, v := range wildcards {
-				if v == ":" || v == "." {
-					continue
-				}
-				filterCards = append(filterCards, v)
-			}
-			t.leaves = append(t.leaves, &leafInfo{
-				handle:    handle,
-				wildcards: filterCards,
-				regexps:   regexp.MustCompile("^" + reg + "$"),
-			})
-		} else {
-			t.leaves = append(t.leaves, &leafInfo{
-				handle:    handle,
-				wildcards: wildcards,
-			})
-		}
-		return
-	}
-
-	seg := segments[0]
-	iswild, params, regexpStr := splitSegment(seg)
-	//for the router  /login/*/access match /login/2009/11/access
-	if !iswild && com.IsSliceContainsStr(wildcards, ":splat") {
-		iswild = true
-		regexpStr = seg
-	}
-	if seg == "*" && len(wildcards) > 0 && reg == "" {
-		iswild = true
-		regexpStr = "(.+)"
-	}
-	if iswild {
-		if t.wildcard == nil {
-			t.wildcard = NewTree()
-		}
-		if regexpStr != "" {
-			if reg == "" {
-				rr := ""
-				for _, w := range wildcards {
-					if w == "." || w == ":" {
-						continue
-					}
-					if w == ":splat" {
-						rr = rr + "(.+)/"
-					} else {
-						rr = rr + "([^/]+)/"
-					}
-				}
-				regexpStr = rr + regexpStr
-			} else {
-				regexpStr = "/" + regexpStr
-			}
-		} else if reg != "" {
-			if seg == "*.*" {
-				regexpStr = "/([^.]+).(.+)"
-			} else {
-				for _, w := range params {
-					if w == "." || w == ":" {
-						continue
-					}
-					regexpStr = "/([^/]+)" + regexpStr
-				}
-			}
-		}
-		t.wildcard.addSegments(segments[1:], handle, append(wildcards, params...), reg+regexpStr)
-	} else {
-		subTree, ok := t.fixroutes[seg]
-		if !ok {
-			subTree = NewTree()
-			t.fixroutes[seg] = subTree
-		}
-		subTree.addSegments(segments[1:], handle, wildcards, reg)
-	}
-}
-
-func (t *Tree) match(segments []string, wildcardValues []string) (handle Handle, params Params) {
-	// Handle leaf nodes.
-	if len(segments) == 0 {
-		for _, l := range t.leaves {
-			if ok, pa := l.match(wildcardValues); ok {
-				return l.handle, pa
-			}
-		}
-		if t.wildcard != nil {
-			for _, l := range t.wildcard.leaves {
-				if ok, pa := l.match(wildcardValues); ok {
-					return l.handle, pa
-				}
-			}
-
-		}
-		return nil, nil
-	}
-
-	seg, segs := segments[0], segments[1:]
-
-	subTree, ok := t.fixroutes[seg]
-	if ok {
-		handle, params = subTree.match(segs, wildcardValues)
-	} else if len(segs) == 0 { //.json .xml
-		if subindex := strings.LastIndex(seg, "."); subindex != -1 {
-			subTree, ok = t.fixroutes[seg[:subindex]]
-			if ok {
-				handle, params = subTree.match(segs, wildcardValues)
-				if handle != nil {
-					if params == nil {
-						params = make(map[string]string)
-					}
-					params[":ext"] = seg[subindex+1:]
-					return handle, params
-				}
-			}
-		}
-	}
-	if handle == nil && t.wildcard != nil {
-		handle, params = t.wildcard.match(segs, append(wildcardValues, seg))
-	}
-	if handle == nil {
-		for _, l := range t.leaves {
-			if ok, pa := l.match(append(wildcardValues, segments...)); ok {
-				return l.handle, pa
-			}
-		}
-	}
-	return handle, params
-}
-
-// Match returns Handle and params if any route is matched.
-func (t *Tree) Match(pattern string) (Handle, Params) {
-	if len(pattern) == 0 || pattern[0] != '/' {
-		return nil, nil
-	}
-
-	return t.match(splitPath(pattern), nil)
-}

+ 0 - 112
Godeps/_workspace/src/github.com/Unknwon/macaron/tree_test.go

@@ -1,112 +0,0 @@
-// Copyright 2014 Unknwon
-//
-// Licensed under the Apache License, Version 2.0 (the "License"): you may
-// not use this file except in compliance with the License. You may obtain
-// a copy of the License at
-//
-//     http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-// License for the specific language governing permissions and limitations
-// under the License.
-
-package macaron
-
-import (
-	// "net/http"
-	"strings"
-	"testing"
-
-	. "github.com/smartystreets/goconvey/convey"
-)
-
-func Test_splitSegment(t *testing.T) {
-	type result struct {
-		Ok    bool
-		Parts []string
-		Regex string
-	}
-	cases := map[string]result{
-		"admin":              result{false, nil, ""},
-		":id":                result{true, []string{":id"}, ""},
-		"?:id":               result{true, []string{":", ":id"}, ""},
-		":id:int":            result{true, []string{":id"}, "([0-9]+)"},
-		":name:string":       result{true, []string{":name"}, `([\w]+)`},
-		":id([0-9]+)":        result{true, []string{":id"}, "([0-9]+)"},
-		":id([0-9]+)_:name":  result{true, []string{":id", ":name"}, "([0-9]+)_(.+)"},
-		"cms_:id_:page.html": result{true, []string{":id", ":page"}, "cms_(.+)_(.+).html"},
-		"*":                  result{true, []string{":splat"}, ""},
-		"*.*":                result{true, []string{".", ":path", ":ext"}, ""},
-	}
-	Convey("Splits segment into parts", t, func() {
-		for key, result := range cases {
-			ok, parts, regex := splitSegment(key)
-			So(ok, ShouldEqual, result.Ok)
-			if result.Parts == nil {
-				So(parts, ShouldBeNil)
-			} else {
-				So(parts, ShouldNotBeNil)
-				So(strings.Join(parts, " "), ShouldEqual, strings.Join(result.Parts, " "))
-			}
-			So(regex, ShouldEqual, result.Regex)
-		}
-	})
-}
-
-func Test_Tree_Match(t *testing.T) {
-	type result struct {
-		pattern string
-		reqUrl  string
-		params  map[string]string
-	}
-
-	cases := []result{
-		{"/:id", "/123", map[string]string{":id": "123"}},
-		{"/hello/?:id", "/hello", map[string]string{":id": ""}},
-		{"/", "/", nil},
-		{"", "", nil},
-		{"/customer/login", "/customer/login", nil},
-		{"/customer/login", "/customer/login.json", map[string]string{":ext": "json"}},
-		{"/*", "/customer/123", map[string]string{":splat": "customer/123"}},
-		{"/*", "/customer/2009/12/11", map[string]string{":splat": "customer/2009/12/11"}},
-		{"/aa/*/bb", "/aa/2009/bb", map[string]string{":splat": "2009"}},
-		{"/cc/*/dd", "/cc/2009/11/dd", map[string]string{":splat": "2009/11"}},
-		{"/ee/:year/*/ff", "/ee/2009/11/ff", map[string]string{":year": "2009", ":splat": "11"}},
-		{"/thumbnail/:size/uploads/*", "/thumbnail/100x100/uploads/items/2014/04/20/dPRCdChkUd651t1Hvs18.jpg",
-			map[string]string{":size": "100x100", ":splat": "items/2014/04/20/dPRCdChkUd651t1Hvs18.jpg"}},
-		{"/*.*", "/nice/api.json", map[string]string{":path": "nice/api", ":ext": "json"}},
-		{"/:name/*.*", "/nice/api.json", map[string]string{":name": "nice", ":path": "api", ":ext": "json"}},
-		{"/:name/test/*.*", "/nice/test/api.json", map[string]string{":name": "nice", ":path": "api", ":ext": "json"}},
-		{"/dl/:width:int/:height:int/*.*", "/dl/48/48/05ac66d9bda00a3acf948c43e306fc9a.jpg",
-			map[string]string{":width": "48", ":height": "48", ":ext": "jpg", ":path": "05ac66d9bda00a3acf948c43e306fc9a"}},
-		{"/v1/shop/:id:int", "/v1/shop/123", map[string]string{":id": "123"}},
-		{"/:year:int/:month:int/:id/:endid", "/1111/111/aaa/aaa", map[string]string{":year": "1111", ":month": "111", ":id": "aaa", ":endid": "aaa"}},
-		{"/v1/shop/:id/:name", "/v1/shop/123/nike", map[string]string{":id": "123", ":name": "nike"}},
-		{"/v1/shop/:id/account", "/v1/shop/123/account", map[string]string{":id": "123"}},
-		{"/v1/shop/:name:string", "/v1/shop/nike", map[string]string{":name": "nike"}},
-		{"/v1/shop/:id([0-9]+)", "/v1/shop//123", map[string]string{":id": "123"}},
-		{"/v1/shop/:id([0-9]+)_:name", "/v1/shop/123_nike", map[string]string{":id": "123", ":name": "nike"}},
-		{"/v1/shop/:id(.+)_cms.html", "/v1/shop/123_cms.html", map[string]string{":id": "123"}},
-		{"/v1/shop/cms_:id(.+)_:page(.+).html", "/v1/shop/cms_123_1.html", map[string]string{":id": "123", ":page": "1"}},
-		{"/v1/:v/cms/aaa_:id(.+)_:page(.+).html", "/v1/2/cms/aaa_123_1.html", map[string]string{":v": "2", ":id": "123", ":page": "1"}},
-		{"/v1/:v/cms_:id(.+)_:page(.+).html", "/v1/2/cms_123_1.html", map[string]string{":v": "2", ":id": "123", ":page": "1"}},
-		{"/v1/:v(.+)_cms/ttt_:id(.+)_:page(.+).html", "/v1/2_cms/ttt_123_1.html", map[string]string{":v": "2", ":id": "123", ":page": "1"}},
-	}
-
-	Convey("Match routers in tree", t, func() {
-		for _, c := range cases {
-			t := NewTree()
-			t.AddRouter(c.pattern, nil)
-			_, params := t.Match(c.reqUrl)
-			if params != nil {
-				for k, v := range c.params {
-					vv, ok := params[k]
-					So(ok, ShouldBeTrue)
-					So(vv, ShouldEqual, v)
-				}
-			}
-		}
-	})
-}

+ 14 - 0
Godeps/_workspace/src/github.com/go-macaron/binding/.travis.yml

@@ -0,0 +1,14 @@
+sudo: false
+language: go
+
+go:
+  - 1.3
+  - 1.4
+  - 1.5
+  - tip
+
+script: go test -v -cover -race
+
+notifications:
+  email:
+    - u@gogs.io

+ 0 - 0
Godeps/_workspace/src/github.com/macaron-contrib/binding/LICENSE → Godeps/_workspace/src/github.com/go-macaron/binding/LICENSE


+ 20 - 0
Godeps/_workspace/src/github.com/go-macaron/binding/README.md

@@ -0,0 +1,20 @@
+# binding [![Build Status](https://travis-ci.org/go-macaron/binding.svg?branch=master)](https://travis-ci.org/go-macaron/binding) [![](http://gocover.io/_badge/github.com/go-macaron/binding)](http://gocover.io/github.com/go-macaron/binding)
+
+Middleware binding provides request data binding and validation for [Macaron](https://github.com/go-macaron/macaron).
+
+### Installation
+
+	go get github.com/go-macaron/binding
+	
+## Getting Help
+
+- [API Reference](https://gowalker.org/github.com/go-macaron/binding)
+- [Documentation](http://go-macaron.com/docs/middlewares/binding)
+
+## Credits
+
+This package is a modified version of [martini-contrib/binding](https://github.com/martini-contrib/binding).
+
+## License
+
+This project is under the Apache License, Version 2.0. See the [LICENSE](LICENSE) file for the full license text.

+ 181 - 136
Godeps/_workspace/src/github.com/macaron-contrib/binding/binding.go → Godeps/_workspace/src/github.com/go-macaron/binding/binding.go

@@ -1,5 +1,5 @@
-// Copyright 2014 martini-contrib/binding Authors
-// Copyright 2014 Unknwon
+// Copyright 2014 Martini Authors
+// Copyright 2014 The Macaron Authors
 //
 // Licensed under the Apache License, Version 2.0 (the "License"): you may
 // not use this file except in compliance with the License. You may obtain
@@ -29,12 +29,10 @@ import (
 	"unicode/utf8"
 
 	"github.com/Unknwon/com"
-	"github.com/Unknwon/macaron"
+	"gopkg.in/macaron.v1"
 )
 
-// NOTE: last sync 1928ed2 on Aug 26, 2014.
-
-const _VERSION = "0.0.4"
+const _VERSION = "0.2.0"
 
 func Version() string {
 	return _VERSION
@@ -58,6 +56,7 @@ func bind(ctx *macaron.Context, obj interface{}, ifacePtr ...interface{}) {
 				errors.Add([]string{}, ERR_CONTENT_TYPE, "Unsupported Content-Type")
 			}
 			ctx.Map(errors)
+			ctx.Map(obj) // Map a fake struct so handler won't panic.
 		}
 	} else {
 		ctx.Invoke(Form(obj, ifacePtr...))
@@ -175,6 +174,14 @@ func MultipartForm(formStruct interface{}, ifacePtr ...interface{}) macaron.Hand
 				if parseErr != nil {
 					errors.Add([]string{}, ERR_DESERIALIZATION, parseErr.Error())
 				}
+
+				if ctx.Req.Form == nil {
+					ctx.Req.ParseForm()
+				}
+				for k, v := range form.Value {
+					ctx.Req.Form[k] = append(ctx.Req.Form[k], v...)
+				}
+
 				ctx.Req.MultipartForm = form
 			}
 		}
@@ -310,122 +317,162 @@ func validateStruct(errors Errors, obj interface{}) Errors {
 				field.Type.Elem().Kind() == reflect.Struct) {
 			errors = validateStruct(errors, fieldValue)
 		}
+		errors = validateField(errors, zero, field, fieldVal, fieldValue)
+	}
+	return errors
+}
 
-	VALIDATE_RULES:
-		for _, rule := range strings.Split(field.Tag.Get("binding"), ";") {
-			if len(rule) == 0 {
-				continue
+func validateField(errors Errors, zero interface{}, field reflect.StructField, fieldVal reflect.Value, fieldValue interface{}) Errors {
+	if fieldVal.Kind() == reflect.Slice {
+		for i := 0; i < fieldVal.Len(); i++ {
+			sliceVal := fieldVal.Index(i)
+			if sliceVal.Kind() == reflect.Ptr {
+				sliceVal = sliceVal.Elem()
 			}
 
-			switch {
-			case rule == "Required":
-				if reflect.DeepEqual(zero, fieldValue) {
-					errors.Add([]string{field.Name}, ERR_REQUIRED, "Required")
-					break VALIDATE_RULES
-				}
-			case rule == "AlphaDash":
-				if alphaDashPattern.MatchString(fmt.Sprintf("%v", fieldValue)) {
-					errors.Add([]string{field.Name}, ERR_ALPHA_DASH, "AlphaDash")
-					break VALIDATE_RULES
-				}
-			case rule == "AlphaDashDot":
-				if alphaDashDotPattern.MatchString(fmt.Sprintf("%v", fieldValue)) {
-					errors.Add([]string{field.Name}, ERR_ALPHA_DASH_DOT, "AlphaDashDot")
-					break VALIDATE_RULES
-				}
-			case strings.HasPrefix(rule, "MinSize("):
-				min, _ := strconv.Atoi(rule[8 : len(rule)-1])
-				if str, ok := fieldValue.(string); ok && utf8.RuneCountInString(str) < min {
-					errors.Add([]string{field.Name}, ERR_MIN_SIZE, "MinSize")
-					break VALIDATE_RULES
-				}
-				v := reflect.ValueOf(fieldValue)
-				if v.Kind() == reflect.Slice && v.Len() < min {
-					errors.Add([]string{field.Name}, ERR_MIN_SIZE, "MinSize")
-					break VALIDATE_RULES
-				}
-			case strings.HasPrefix(rule, "MaxSize("):
-				max, _ := strconv.Atoi(rule[8 : len(rule)-1])
-				if str, ok := fieldValue.(string); ok && utf8.RuneCountInString(str) > max {
-					errors.Add([]string{field.Name}, ERR_MAX_SIZE, "MaxSize")
-					break VALIDATE_RULES
-				}
-				v := reflect.ValueOf(fieldValue)
-				if v.Kind() == reflect.Slice && v.Len() > max {
-					errors.Add([]string{field.Name}, ERR_MAX_SIZE, "MaxSize")
-					break VALIDATE_RULES
-				}
-			case strings.HasPrefix(rule, "Range("):
-				nums := strings.Split(rule[6:len(rule)-1], ",")
-				if len(nums) != 2 {
-					break VALIDATE_RULES
-				}
-				val := com.StrTo(fmt.Sprintf("%v", fieldValue)).MustInt()
-				if val < com.StrTo(nums[0]).MustInt() || val > com.StrTo(nums[1]).MustInt() {
-					errors.Add([]string{field.Name}, ERR_RANGE, "Range")
-					break VALIDATE_RULES
-				}
-			case rule == "Email":
-				if !emailPattern.MatchString(fmt.Sprintf("%v", fieldValue)) {
-					errors.Add([]string{field.Name}, ERR_EMAIL, "Email")
-					break VALIDATE_RULES
-				}
-			case rule == "Url":
-				str := fmt.Sprintf("%v", fieldValue)
-				if len(str) == 0 {
-					continue
-				} else if !urlPattern.MatchString(str) {
-					errors.Add([]string{field.Name}, ERR_URL, "Url")
-					break VALIDATE_RULES
-				}
-			case strings.HasPrefix(rule, "In("):
-				if !in(fieldValue, rule[3:len(rule)-1]) {
-					errors.Add([]string{field.Name}, ERR_IN, "In")
-					break VALIDATE_RULES
-				}
-			case strings.HasPrefix(rule, "NotIn("):
-				if in(fieldValue, rule[6:len(rule)-1]) {
-					errors.Add([]string{field.Name}, ERR_NOT_INT, "NotIn")
-					break VALIDATE_RULES
-				}
-			case strings.HasPrefix(rule, "Include("):
-				if !strings.Contains(fmt.Sprintf("%v", fieldValue), rule[8:len(rule)-1]) {
-					errors.Add([]string{field.Name}, ERR_INCLUDE, "Include")
+			sliceValue := sliceVal.Interface()
+			zero := reflect.Zero(sliceVal.Type()).Interface()
+			if sliceVal.Kind() == reflect.Struct ||
+				(sliceVal.Kind() == reflect.Ptr && !reflect.DeepEqual(zero, sliceValue) &&
+					sliceVal.Elem().Kind() == reflect.Struct) {
+				errors = validateStruct(errors, sliceValue)
+			}
+			/* Apply validation rules to each item in a slice. ISSUE #3
+			else {
+				errors = validateField(errors, zero, field, sliceVal, sliceValue)
+			}*/
+		}
+	}
+
+VALIDATE_RULES:
+	for _, rule := range strings.Split(field.Tag.Get("binding"), ";") {
+		if len(rule) == 0 {
+			continue
+		}
+
+		switch {
+		case rule == "OmitEmpty":
+			if reflect.DeepEqual(zero, fieldValue) {
+				break VALIDATE_RULES
+			}
+		case rule == "Required":
+			if reflect.DeepEqual(zero, fieldValue) {
+				errors.Add([]string{field.Name}, ERR_REQUIRED, "Required")
+				break VALIDATE_RULES
+			}
+		case rule == "AlphaDash":
+			if alphaDashPattern.MatchString(fmt.Sprintf("%v", fieldValue)) {
+				errors.Add([]string{field.Name}, ERR_ALPHA_DASH, "AlphaDash")
+				break VALIDATE_RULES
+			}
+		case rule == "AlphaDashDot":
+			if alphaDashDotPattern.MatchString(fmt.Sprintf("%v", fieldValue)) {
+				errors.Add([]string{field.Name}, ERR_ALPHA_DASH_DOT, "AlphaDashDot")
+				break VALIDATE_RULES
+			}
+		case strings.HasPrefix(rule, "Size("):
+			size, _ := strconv.Atoi(rule[5 : len(rule)-1])
+			if str, ok := fieldValue.(string); ok && utf8.RuneCountInString(str) != size {
+				errors.Add([]string{field.Name}, ERR_SIZE, "Size")
+				break VALIDATE_RULES
+			}
+			v := reflect.ValueOf(fieldValue)
+			if v.Kind() == reflect.Slice && v.Len() != size {
+				errors.Add([]string{field.Name}, ERR_SIZE, "Size")
+				break VALIDATE_RULES
+			}
+		case strings.HasPrefix(rule, "MinSize("):
+			min, _ := strconv.Atoi(rule[8 : len(rule)-1])
+			if str, ok := fieldValue.(string); ok && utf8.RuneCountInString(str) < min {
+				errors.Add([]string{field.Name}, ERR_MIN_SIZE, "MinSize")
+				break VALIDATE_RULES
+			}
+			v := reflect.ValueOf(fieldValue)
+			if v.Kind() == reflect.Slice && v.Len() < min {
+				errors.Add([]string{field.Name}, ERR_MIN_SIZE, "MinSize")
+				break VALIDATE_RULES
+			}
+		case strings.HasPrefix(rule, "MaxSize("):
+			max, _ := strconv.Atoi(rule[8 : len(rule)-1])
+			if str, ok := fieldValue.(string); ok && utf8.RuneCountInString(str) > max {
+				errors.Add([]string{field.Name}, ERR_MAX_SIZE, "MaxSize")
+				break VALIDATE_RULES
+			}
+			v := reflect.ValueOf(fieldValue)
+			if v.Kind() == reflect.Slice && v.Len() > max {
+				errors.Add([]string{field.Name}, ERR_MAX_SIZE, "MaxSize")
+				break VALIDATE_RULES
+			}
+		case strings.HasPrefix(rule, "Range("):
+			nums := strings.Split(rule[6:len(rule)-1], ",")
+			if len(nums) != 2 {
+				break VALIDATE_RULES
+			}
+			val := com.StrTo(fmt.Sprintf("%v", fieldValue)).MustInt()
+			if val < com.StrTo(nums[0]).MustInt() || val > com.StrTo(nums[1]).MustInt() {
+				errors.Add([]string{field.Name}, ERR_RANGE, "Range")
+				break VALIDATE_RULES
+			}
+		case rule == "Email":
+			if !emailPattern.MatchString(fmt.Sprintf("%v", fieldValue)) {
+				errors.Add([]string{field.Name}, ERR_EMAIL, "Email")
+				break VALIDATE_RULES
+			}
+		case rule == "Url":
+			str := fmt.Sprintf("%v", fieldValue)
+			if len(str) == 0 {
+				continue
+			} else if !urlPattern.MatchString(str) {
+				errors.Add([]string{field.Name}, ERR_URL, "Url")
+				break VALIDATE_RULES
+			}
+		case strings.HasPrefix(rule, "In("):
+			if !in(fieldValue, rule[3:len(rule)-1]) {
+				errors.Add([]string{field.Name}, ERR_IN, "In")
+				break VALIDATE_RULES
+			}
+		case strings.HasPrefix(rule, "NotIn("):
+			if in(fieldValue, rule[6:len(rule)-1]) {
+				errors.Add([]string{field.Name}, ERR_NOT_INT, "NotIn")
+				break VALIDATE_RULES
+			}
+		case strings.HasPrefix(rule, "Include("):
+			if !strings.Contains(fmt.Sprintf("%v", fieldValue), rule[8:len(rule)-1]) {
+				errors.Add([]string{field.Name}, ERR_INCLUDE, "Include")
+				break VALIDATE_RULES
+			}
+		case strings.HasPrefix(rule, "Exclude("):
+			if strings.Contains(fmt.Sprintf("%v", fieldValue), rule[8:len(rule)-1]) {
+				errors.Add([]string{field.Name}, ERR_EXCLUDE, "Exclude")
+				break VALIDATE_RULES
+			}
+		case strings.HasPrefix(rule, "Default("):
+			if reflect.DeepEqual(zero, fieldValue) {
+				if fieldVal.CanAddr() {
+					setWithProperType(field.Type.Kind(), rule[8:len(rule)-1], fieldVal, field.Tag.Get("form"), errors)
+				} else {
+					errors.Add([]string{field.Name}, ERR_EXCLUDE, "Default")
 					break VALIDATE_RULES
 				}
-			case strings.HasPrefix(rule, "Exclude("):
-				if strings.Contains(fmt.Sprintf("%v", fieldValue), rule[8:len(rule)-1]) {
-					errors.Add([]string{field.Name}, ERR_EXCLUDE, "Exclude")
+			}
+		default:
+			// Apply custom validation rules.
+			for i := range ruleMapper {
+				if ruleMapper[i].IsMatch(rule) && !ruleMapper[i].IsValid(errors, field.Name, fieldValue) {
 					break VALIDATE_RULES
 				}
-			case strings.HasPrefix(rule, "Default("):
-				if reflect.DeepEqual(zero, fieldValue) {
-					if fieldVal.CanAddr() {
-						setWithProperType(field.Type.Kind(), rule[8:len(rule)-1], fieldVal, field.Tag.Get("form"), errors)
-					} else {
-						errors.Add([]string{field.Name}, ERR_EXCLUDE, "Default")
-						break VALIDATE_RULES
-					}
-				}
-			default:
-				// Apply custom validation rules.
-				for i := range ruleMapper {
-					if ruleMapper[i].IsMatch(rule) && !ruleMapper[i].IsValid(errors, field.Name, fieldValue) {
-						break VALIDATE_RULES
-					}
-				}
 			}
 		}
 	}
 	return errors
 }
 
-// NameMapper represents a form/json tag name mapper.
+// NameMapper represents a form tag name mapper.
 type NameMapper func(string) string
 
 var (
 	nameMapper = func(field string) string {
-		newstr := make([]rune, 0, 10)
+		newstr := make([]rune, 0, len(field))
 		for i, chr := range field {
 			if isUpper := 'A' <= chr && chr <= 'Z'; isUpper {
 				if i > 0 {
@@ -468,42 +515,40 @@ func mapForm(formStruct reflect.Value, form map[string][]string,
 		}
 
 		inputFieldName := parseFormName(typeField.Name, typeField.Tag.Get("form"))
-		if len(inputFieldName) > 0 {
-			if !structField.CanSet() {
-				continue
-			}
-
-			inputValue, exists := form[inputFieldName]
-			if exists {
-				numElems := len(inputValue)
-				if structField.Kind() == reflect.Slice && numElems > 0 {
-					sliceOf := structField.Type().Elem().Kind()
-					slice := reflect.MakeSlice(structField.Type(), numElems, numElems)
-					for i := 0; i < numElems; i++ {
-						setWithProperType(sliceOf, inputValue[i], slice.Index(i), inputFieldName, errors)
-					}
-					formStruct.Field(i).Set(slice)
-				} else {
-					setWithProperType(typeField.Type.Kind(), inputValue[0], structField, inputFieldName, errors)
-				}
-				continue
-			}
+		if len(inputFieldName) == 0 || !structField.CanSet() {
+			continue
+		}
 
-			inputFile, exists := formfile[inputFieldName]
-			if !exists {
-				continue
-			}
-			fhType := reflect.TypeOf((*multipart.FileHeader)(nil))
-			numElems := len(inputFile)
-			if structField.Kind() == reflect.Slice && numElems > 0 && structField.Type().Elem() == fhType {
+		inputValue, exists := form[inputFieldName]
+		if exists {
+			numElems := len(inputValue)
+			if structField.Kind() == reflect.Slice && numElems > 0 {
+				sliceOf := structField.Type().Elem().Kind()
 				slice := reflect.MakeSlice(structField.Type(), numElems, numElems)
 				for i := 0; i < numElems; i++ {
-					slice.Index(i).Set(reflect.ValueOf(inputFile[i]))
+					setWithProperType(sliceOf, inputValue[i], slice.Index(i), inputFieldName, errors)
 				}
-				structField.Set(slice)
-			} else if structField.Type() == fhType {
-				structField.Set(reflect.ValueOf(inputFile[0]))
+				formStruct.Field(i).Set(slice)
+			} else {
+				setWithProperType(typeField.Type.Kind(), inputValue[0], structField, inputFieldName, errors)
+			}
+			continue
+		}
+
+		inputFile, exists := formfile[inputFieldName]
+		if !exists {
+			continue
+		}
+		fhType := reflect.TypeOf((*multipart.FileHeader)(nil))
+		numElems := len(inputFile)
+		if structField.Kind() == reflect.Slice && numElems > 0 && structField.Type().Elem() == fhType {
+			slice := reflect.MakeSlice(structField.Type(), numElems, numElems)
+			for i := 0; i < numElems; i++ {
+				slice.Index(i).Set(reflect.ValueOf(inputFile[i]))
 			}
+			structField.Set(slice)
+		} else if structField.Type() == fhType {
+			structField.Set(reflect.ValueOf(inputFile[0]))
 		}
 	}
 }

+ 3 - 2
Godeps/_workspace/src/github.com/macaron-contrib/binding/errors.go → Godeps/_workspace/src/github.com/go-macaron/binding/errors.go

@@ -1,5 +1,5 @@
-// Copyright 2014 martini-contrib/binding Authors
-// Copyright 2014 Unknwon
+// Copyright 2014 Martini Authors
+// Copyright 2014 The Macaron Authors
 //
 // Licensed under the Apache License, Version 2.0 (the "License"): you may
 // not use this file except in compliance with the License. You may obtain
@@ -27,6 +27,7 @@ const (
 	ERR_REQUIRED       = "RequiredError"
 	ERR_ALPHA_DASH     = "AlphaDashError"
 	ERR_ALPHA_DASH_DOT = "AlphaDashDotError"
+	ERR_SIZE           = "SizeError"
 	ERR_MIN_SIZE       = "MinSizeError"
 	ERR_MAX_SIZE       = "MaxSizeError"
 	ERR_RANGE          = "RangeError"

+ 14 - 0
Godeps/_workspace/src/github.com/go-macaron/gzip/.travis.yml

@@ -0,0 +1,14 @@
+sudo: false
+language: go
+
+go:
+  - 1.3
+  - 1.4
+  - 1.5
+  - tip
+
+script: go test -v -cover -race
+
+notifications:
+  email:
+    - u@gogs.io

+ 0 - 0
Godeps/_workspace/src/github.com/Unknwon/macaron/LICENSE → Godeps/_workspace/src/github.com/go-macaron/gzip/LICENSE


+ 20 - 0
Godeps/_workspace/src/github.com/go-macaron/gzip/README.md

@@ -0,0 +1,20 @@
+# gzip [![Build Status](https://travis-ci.org/go-macaron/gzip.svg?branch=master)](https://travis-ci.org/go-macaron/gzip) [![](http://gocover.io/_badge/github.com/go-macaron/gzip)](http://gocover.io/github.com/go-macaron/gzip)
+
+Middleware gzip provides compress to responses for [Macaron](https://github.com/go-macaron/macaron).
+
+### Installation
+
+	go get github.com/go-macaron/gzip
+
+## Getting Help
+
+- [API Reference](https://gowalker.org/github.com/go-macaron/gzip)
+- [Documentation](http://go-macaron.com/docs/middlewares/gzip)
+
+## Credits
+
+This package is a modified version of [martini-contrib/gzip](https://github.com/martini-contrib/gzip).
+
+## License
+
+This project is under the Apache License, Version 2.0. See the [LICENSE](LICENSE) file for the full license text.

+ 118 - 0
Godeps/_workspace/src/github.com/go-macaron/gzip/gzip.go

@@ -0,0 +1,118 @@
+// Copyright 2013 Martini Authors
+// Copyright 2015 The Macaron Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License"): you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations
+// under the License.
+
+package gzip
+
+import (
+	"bufio"
+	"fmt"
+	"net"
+	"net/http"
+	"strings"
+
+	"github.com/klauspost/compress/gzip"
+	"gopkg.in/macaron.v1"
+)
+
+const (
+	_HEADER_ACCEPT_ENCODING  = "Accept-Encoding"
+	_HEADER_CONTENT_ENCODING = "Content-Encoding"
+	_HEADER_CONTENT_LENGTH   = "Content-Length"
+	_HEADER_CONTENT_TYPE     = "Content-Type"
+	_HEADER_VARY             = "Vary"
+)
+
+// Options represents a struct for specifying configuration options for the GZip middleware.
+type Options struct {
+	// Compression level. Can be DefaultCompression(-1), ConstantCompression(-2)
+	// or any integer value between BestSpeed(1) and BestCompression(9) inclusive.
+	CompressionLevel int
+}
+
+func isCompressionLevelValid(level int) bool {
+	return level == gzip.DefaultCompression ||
+		level == gzip.ConstantCompression ||
+		(level >= gzip.BestSpeed && level <= gzip.BestCompression)
+}
+
+func prepareOptions(options []Options) Options {
+	var opt Options
+	if len(options) > 0 {
+		opt = options[0]
+	}
+
+	if !isCompressionLevelValid(opt.CompressionLevel) {
+		// For web content, level 4 seems to be a sweet spot.
+		opt.CompressionLevel = 4
+	}
+	return opt
+}
+
+// Gziper returns a Handler that adds gzip compression to all requests.
+// Make sure to include the Gzip middleware above other middleware
+// that alter the response body (like the render middleware).
+func Gziper(options ...Options) macaron.Handler {
+	opt := prepareOptions(options)
+
+	return func(ctx *macaron.Context) {
+		if !strings.Contains(ctx.Req.Header.Get(_HEADER_ACCEPT_ENCODING), "gzip") {
+			return
+		}
+
+		headers := ctx.Resp.Header()
+		headers.Set(_HEADER_CONTENT_ENCODING, "gzip")
+		headers.Set(_HEADER_VARY, _HEADER_ACCEPT_ENCODING)
+
+		// We've made sure compression level is valid in prepareGzipOptions,
+		// no need to check same error again.
+		gz, err := gzip.NewWriterLevel(ctx.Resp, opt.CompressionLevel)
+		if err != nil {
+			panic(err.Error())
+		}
+		defer gz.Close()
+
+		gzw := gzipResponseWriter{gz, ctx.Resp}
+		ctx.Resp = gzw
+		ctx.MapTo(gzw, (*http.ResponseWriter)(nil))
+		if ctx.Render != nil {
+			ctx.Render.SetResponseWriter(gzw)
+		}
+
+		ctx.Next()
+
+		// delete content length after we know we have been written to
+		gzw.Header().Del("Content-Length")
+	}
+}
+
+type gzipResponseWriter struct {
+	w *gzip.Writer
+	macaron.ResponseWriter
+}
+
+func (grw gzipResponseWriter) Write(p []byte) (int, error) {
+	if len(grw.Header().Get(_HEADER_CONTENT_TYPE)) == 0 {
+		grw.Header().Set(_HEADER_CONTENT_TYPE, http.DetectContentType(p))
+	}
+	return grw.w.Write(p)
+}
+
+func (grw gzipResponseWriter) Hijack() (net.Conn, *bufio.ReadWriter, error) {
+	hijacker, ok := grw.ResponseWriter.(http.Hijacker)
+	if !ok {
+		return nil, nil, fmt.Errorf("the ResponseWriter doesn't support the Hijacker interface")
+	}
+	return hijacker.Hijack()
+}

+ 14 - 0
Godeps/_workspace/src/github.com/go-macaron/inject/.travis.yml

@@ -0,0 +1,14 @@
+sudo: false
+language: go
+
+go:
+  - 1.3
+  - 1.4
+  - 1.5
+  - tip
+
+script: go test -v -cover -race
+
+notifications:
+  email:
+    - u@gogs.io

+ 191 - 0
Godeps/_workspace/src/github.com/go-macaron/inject/LICENSE

@@ -0,0 +1,191 @@
+Apache License
+Version 2.0, January 2004
+http://www.apache.org/licenses/
+
+TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+1. Definitions.
+
+"License" shall mean the terms and conditions for use, reproduction, and
+distribution as defined by Sections 1 through 9 of this document.
+
+"Licensor" shall mean the copyright owner or entity authorized by the copyright
+owner that is granting the License.
+
+"Legal Entity" shall mean the union of the acting entity and all other entities
+that control, are controlled by, or are under common control with that entity.
+For the purposes of this definition, "control" means (i) the power, direct or
+indirect, to cause the direction or management of such entity, whether by
+contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the
+outstanding shares, or (iii) beneficial ownership of such entity.
+
+"You" (or "Your") shall mean an individual or Legal Entity exercising
+permissions granted by this License.
+
+"Source" form shall mean the preferred form for making modifications, including
+but not limited to software source code, documentation source, and configuration
+files.
+
+"Object" form shall mean any form resulting from mechanical transformation or
+translation of a Source form, including but not limited to compiled object code,
+generated documentation, and conversions to other media types.
+
+"Work" shall mean the work of authorship, whether in Source or Object form, made
+available under the License, as indicated by a copyright notice that is included
+in or attached to the work (an example is provided in the Appendix below).
+
+"Derivative Works" shall mean any work, whether in Source or Object form, that
+is based on (or derived from) the Work and for which the editorial revisions,
+annotations, elaborations, or other modifications represent, as a whole, an
+original work of authorship. For the purposes of this License, Derivative Works
+shall not include works that remain separable from, or merely link (or bind by
+name) to the interfaces of, the Work and Derivative Works thereof.
+
+"Contribution" shall mean any work of authorship, including the original version
+of the Work and any modifications or additions to that Work or Derivative Works
+thereof, that is intentionally submitted to Licensor for inclusion in the Work
+by the copyright owner or by an individual or Legal Entity authorized to submit
+on behalf of the copyright owner. For the purposes of this definition,
+"submitted" means any form of electronic, verbal, or written communication sent
+to the Licensor or its representatives, including but not limited to
+communication on electronic mailing lists, source code control systems, and
+issue tracking systems that are managed by, or on behalf of, the Licensor for
+the purpose of discussing and improving the Work, but excluding communication
+that is conspicuously marked or otherwise designated in writing by the copyright
+owner as "Not a Contribution."
+
+"Contributor" shall mean Licensor and any individual or Legal Entity on behalf
+of whom a Contribution has been received by Licensor and subsequently
+incorporated within the Work.
+
+2. Grant of Copyright License.
+
+Subject to the terms and conditions of this License, each Contributor hereby
+grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free,
+irrevocable copyright license to reproduce, prepare Derivative Works of,
+publicly display, publicly perform, sublicense, and distribute the Work and such
+Derivative Works in Source or Object form.
+
+3. Grant of Patent License.
+
+Subject to the terms and conditions of this License, each Contributor hereby
+grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free,
+irrevocable (except as stated in this section) patent license to make, have
+made, use, offer to sell, sell, import, and otherwise transfer the Work, where
+such license applies only to those patent claims licensable by such Contributor
+that are necessarily infringed by their Contribution(s) alone or by combination
+of their Contribution(s) with the Work to which such Contribution(s) was
+submitted. If You institute patent litigation against any entity (including a
+cross-claim or counterclaim in a lawsuit) alleging that the Work or a
+Contribution incorporated within the Work constitutes direct or contributory
+patent infringement, then any patent licenses granted to You under this License
+for that Work shall terminate as of the date such litigation is filed.
+
+4. Redistribution.
+
+You may reproduce and distribute copies of the Work or Derivative Works thereof
+in any medium, with or without modifications, and in Source or Object form,
+provided that You meet the following conditions:
+
+You must give any other recipients of the Work or Derivative Works a copy of
+this License; and
+You must cause any modified files to carry prominent notices stating that You
+changed the files; and
+You must retain, in the Source form of any Derivative Works that You distribute,
+all copyright, patent, trademark, and attribution notices from the Source form
+of the Work, excluding those notices that do not pertain to any part of the
+Derivative Works; and
+If the Work includes a "NOTICE" text file as part of its distribution, then any
+Derivative Works that You distribute must include a readable copy of the
+attribution notices contained within such NOTICE file, excluding those notices
+that do not pertain to any part of the Derivative Works, in at least one of the
+following places: within a NOTICE text file distributed as part of the
+Derivative Works; within the Source form or documentation, if provided along
+with the Derivative Works; or, within a display generated by the Derivative
+Works, if and wherever such third-party notices normally appear. The contents of
+the NOTICE file are for informational purposes only and do not modify the
+License. You may add Your own attribution notices within Derivative Works that
+You distribute, alongside or as an addendum to the NOTICE text from the Work,
+provided that such additional attribution notices cannot be construed as
+modifying the License.
+You may add Your own copyright statement to Your modifications and may provide
+additional or different license terms and conditions for use, reproduction, or
+distribution of Your modifications, or for any such Derivative Works as a whole,
+provided Your use, reproduction, and distribution of the Work otherwise complies
+with the conditions stated in this License.
+
+5. Submission of Contributions.
+
+Unless You explicitly state otherwise, any Contribution intentionally submitted
+for inclusion in the Work by You to the Licensor shall be under the terms and
+conditions of this License, without any additional terms or conditions.
+Notwithstanding the above, nothing herein shall supersede or modify the terms of
+any separate license agreement you may have executed with Licensor regarding
+such Contributions.
+
+6. Trademarks.
+
+This License does not grant permission to use the trade names, trademarks,
+service marks, or product names of the Licensor, except as required for
+reasonable and customary use in describing the origin of the Work and
+reproducing the content of the NOTICE file.
+
+7. Disclaimer of Warranty.
+
+Unless required by applicable law or agreed to in writing, Licensor provides the
+Work (and each Contributor provides its Contributions) on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied,
+including, without limitation, any warranties or conditions of TITLE,
+NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are
+solely responsible for determining the appropriateness of using or
+redistributing the Work and assume any risks associated with Your exercise of
+permissions under this License.
+
+8. Limitation of Liability.
+
+In no event and under no legal theory, whether in tort (including negligence),
+contract, or otherwise, unless required by applicable law (such as deliberate
+and grossly negligent acts) or agreed to in writing, shall any Contributor be
+liable to You for damages, including any direct, indirect, special, incidental,
+or consequential damages of any character arising as a result of this License or
+out of the use or inability to use the Work (including but not limited to
+damages for loss of goodwill, work stoppage, computer failure or malfunction, or
+any and all other commercial damages or losses), even if such Contributor has
+been advised of the possibility of such damages.
+
+9. Accepting Warranty or Additional Liability.
+
+While redistributing the Work or Derivative Works thereof, You may choose to
+offer, and charge a fee for, acceptance of support, warranty, indemnity, or
+other liability obligations and/or rights consistent with this License. However,
+in accepting such obligations, You may act only on Your own behalf and on Your
+sole responsibility, not on behalf of any other Contributor, and only if You
+agree to indemnify, defend, and hold each Contributor harmless for any liability
+incurred by, or claims asserted against, such Contributor by reason of your
+accepting any such warranty or additional liability.
+
+END OF TERMS AND CONDITIONS
+
+APPENDIX: How to apply the Apache License to your work
+
+To apply the Apache License to your work, attach the following boilerplate
+notice, with the fields enclosed by brackets "[]" replaced with your own
+identifying information. (Don't include the brackets!) The text should be
+enclosed in the appropriate comment syntax for the file format. We also
+recommend that a file or class name and description of purpose be included on
+the same "printed page" as the copyright notice for easier identification within
+third-party archives.
+
+   Copyright [yyyy] [name of copyright owner]
+
+   Licensed under the Apache License, Version 2.0 (the "License");
+   you may not use this file except in compliance with the License.
+   You may obtain a copy of the License at
+
+     http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.

+ 11 - 0
Godeps/_workspace/src/github.com/go-macaron/inject/README.md

@@ -0,0 +1,11 @@
+# inject [![Build Status](https://travis-ci.org/go-macaron/inject.svg?branch=master)](https://travis-ci.org/go-macaron/inject) [![](http://gocover.io/_badge/github.com/go-macaron/inject)](http://gocover.io/github.com/go-macaron/inject)
+
+Package inject provides utilities for mapping and injecting dependencies in various ways.
+
+**This a modified version of [codegangsta/inject](https://github.com/codegangsta/inject) for special purpose of Macaron**
+
+**Please use the original version if you need dependency injection feature**
+
+## License
+
+This project is under the Apache License, Version 2.0. See the [LICENSE](LICENSE) file for the full license text.

+ 15 - 0
Godeps/_workspace/src/github.com/Unknwon/macaron/inject/inject.go → Godeps/_workspace/src/github.com/go-macaron/inject/inject.go

@@ -1,3 +1,18 @@
+// Copyright 2013 Jeremy Saenz
+// Copyright 2015 The Macaron Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License"): you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations
+// under the License.
+
 // Package inject provides utilities for mapping and injecting dependencies in various ways.
 package inject
 

+ 0 - 0
Godeps/_workspace/src/github.com/macaron-contrib/session/.gitignore → Godeps/_workspace/src/github.com/go-macaron/session/.gitignore


+ 14 - 0
Godeps/_workspace/src/github.com/go-macaron/session/.travis.yml

@@ -0,0 +1,14 @@
+sudo: false
+language: go
+
+go:
+  - 1.3
+  - 1.4
+  - 1.5
+  - tip
+
+script: go test -v -cover -race
+
+notifications:
+  email:
+    - u@gogs.io

+ 0 - 0
Godeps/_workspace/src/github.com/macaron-contrib/session/LICENSE → Godeps/_workspace/src/github.com/go-macaron/session/LICENSE


+ 20 - 0
Godeps/_workspace/src/github.com/go-macaron/session/README.md

@@ -0,0 +1,20 @@
+# session [![Build Status](https://travis-ci.org/go-macaron/session.svg?branch=master)](https://travis-ci.org/go-macaron/session) [![](http://gocover.io/_badge/github.com/go-macaron/session)](http://gocover.io/github.com/go-macaron/session)
+
+Middleware session provides session management for [Macaron](https://github.com/go-macaron/macaron). It can use many session providers, including memory, file, Redis, Memcache, PostgreSQL, MySQL, Couchbase, Ledis and Nodb.
+
+### Installation
+
+	go get github.com/go-macaron/session
+	
+## Getting Help
+
+- [API Reference](https://gowalker.org/github.com/go-macaron/session)
+- [Documentation](http://go-macaron.com/docs/middlewares/session)
+
+## Credits
+
+This package is a modified version of [beego/session](https://github.com/astaxie/beego/tree/master/session).
+
+## License
+
+This project is under the Apache License, Version 2.0. See the [LICENSE](LICENSE) file for the full license text.

+ 2 - 2
Godeps/_workspace/src/github.com/macaron-contrib/session/couchbase/couchbase.go → Godeps/_workspace/src/github.com/go-macaron/session/couchbase/couchbase.go

@@ -1,5 +1,5 @@
 // Copyright 2013 Beego Authors
-// Copyright 2014 Unknwon
+// Copyright 2014 The Macaron Authors
 //
 // Licensed under the Apache License, Version 2.0 (the "License"): you may
 // not use this file except in compliance with the License. You may obtain
@@ -21,7 +21,7 @@ import (
 
 	"github.com/couchbaselabs/go-couchbase"
 
-	"github.com/macaron-contrib/session"
+	"github.com/go-macaron/session"
 )
 
 // CouchbaseSessionStore represents a couchbase session store implementation.

+ 19 - 1
Godeps/_workspace/src/github.com/macaron-contrib/session/file.go → Godeps/_workspace/src/github.com/go-macaron/session/file.go

@@ -1,5 +1,5 @@
 // Copyright 2013 Beego Authors
-// Copyright 2014 Unknwon
+// Copyright 2014 The Macaron Authors
 //
 // Licensed under the Apache License, Version 2.0 (the "License"): you may
 // not use this file except in compliance with the License. You may obtain
@@ -78,6 +78,9 @@ func (s *FileStore) ID() string {
 
 // Release releases resource and save data to provider.
 func (s *FileStore) Release() error {
+	s.p.lock.Lock()
+	defer s.p.lock.Unlock()
+
 	data, err := EncodeGob(s.data)
 	if err != nil {
 		return err
@@ -97,14 +100,17 @@ func (s *FileStore) Flush() error {
 
 // FileProvider represents a file session provider implementation.
 type FileProvider struct {
+	lock        sync.RWMutex
 	maxlifetime int64
 	rootPath    string
 }
 
 // Init initializes file session provider with given root path.
 func (p *FileProvider) Init(maxlifetime int64, rootPath string) error {
+	p.lock.Lock()
 	p.maxlifetime = maxlifetime
 	p.rootPath = rootPath
+	p.lock.Unlock()
 	return nil
 }
 
@@ -118,6 +124,8 @@ func (p *FileProvider) Read(sid string) (_ RawStore, err error) {
 	if err = os.MkdirAll(path.Dir(filename), os.ModePerm); err != nil {
 		return nil, err
 	}
+	p.lock.RLock()
+	defer p.lock.RUnlock()
 
 	var f *os.File
 	if com.IsFile(filename) {
@@ -152,15 +160,22 @@ func (p *FileProvider) Read(sid string) (_ RawStore, err error) {
 
 // Exist returns true if session with given ID exists.
 func (p *FileProvider) Exist(sid string) bool {
+	p.lock.RLock()
+	defer p.lock.RUnlock()
 	return com.IsFile(p.filepath(sid))
 }
 
 // Destory deletes a session by session ID.
 func (p *FileProvider) Destory(sid string) error {
+	p.lock.Lock()
+	defer p.lock.Unlock()
 	return os.Remove(p.filepath(sid))
 }
 
 func (p *FileProvider) regenerate(oldsid, sid string) (err error) {
+	p.lock.Lock()
+	defer p.lock.Unlock()
+
 	filename := p.filepath(sid)
 	if com.IsExist(filename) {
 		return fmt.Errorf("new sid '%s' already exists", sid)
@@ -219,6 +234,9 @@ func (p *FileProvider) Count() int {
 
 // GC calls GC to clean expired sessions.
 func (p *FileProvider) GC() {
+	p.lock.RLock()
+	defer p.lock.RUnlock()
+
 	if !com.IsExist(p.rootPath) {
 		return
 	}

+ 2 - 2
Godeps/_workspace/src/github.com/macaron-contrib/session/ledis/ledis.go → Godeps/_workspace/src/github.com/go-macaron/session/ledis/ledis.go

@@ -1,5 +1,5 @@
 // Copyright 2013 Beego Authors
-// Copyright 2014 Unknwon
+// Copyright 2014 The Macaron Authors
 //
 // Licensed under the Apache License, Version 2.0 (the "License"): you may
 // not use this file except in compliance with the License. You may obtain
@@ -25,7 +25,7 @@ import (
 	"github.com/siddontang/ledisdb/ledis"
 	"gopkg.in/ini.v1"
 
-	"github.com/macaron-contrib/session"
+	"github.com/go-macaron/session"
 )
 
 // LedisStore represents a ledis session store implementation.

+ 0 - 0
Godeps/_workspace/src/github.com/macaron-contrib/session/ledis/ledis.goconvey → Godeps/_workspace/src/github.com/go-macaron/session/ledis/ledis.goconvey


+ 2 - 2
Godeps/_workspace/src/github.com/macaron-contrib/session/memcache/memcache.go → Godeps/_workspace/src/github.com/go-macaron/session/memcache/memcache.go

@@ -1,5 +1,5 @@
 // Copyright 2013 Beego Authors
-// Copyright 2014 Unknwon
+// Copyright 2014 The Macaron Authors
 //
 // Licensed under the Apache License, Version 2.0 (the "License"): you may
 // not use this file except in compliance with the License. You may obtain
@@ -22,7 +22,7 @@ import (
 
 	"github.com/bradfitz/gomemcache/memcache"
 
-	"github.com/macaron-contrib/session"
+	"github.com/go-macaron/session"
 )
 
 // MemcacheStore represents a memcache session store implementation.

+ 0 - 0
Godeps/_workspace/src/github.com/macaron-contrib/session/memcache/memcache.goconvey → Godeps/_workspace/src/github.com/go-macaron/session/memcache/memcache.goconvey


+ 217 - 212
Godeps/_workspace/src/github.com/macaron-contrib/session/memory.go → Godeps/_workspace/src/github.com/go-macaron/session/memory.go

@@ -1,212 +1,217 @@
-// Copyright 2013 Beego Authors
-// Copyright 2014 Unknwon
-//
-// Licensed under the Apache License, Version 2.0 (the "License"): you may
-// not use this file except in compliance with the License. You may obtain
-// a copy of the License at
-//
-//     http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-// License for the specific language governing permissions and limitations
-// under the License.
-
-package session
-
-import (
-	"container/list"
-	"fmt"
-	"sync"
-	"time"
-)
-
-// MemStore represents a in-memory session store implementation.
-type MemStore struct {
-	sid        string
-	lock       sync.RWMutex
-	data       map[interface{}]interface{}
-	lastAccess time.Time
-}
-
-// NewMemStore creates and returns a memory session store.
-func NewMemStore(sid string) *MemStore {
-	return &MemStore{
-		sid:        sid,
-		data:       make(map[interface{}]interface{}),
-		lastAccess: time.Now(),
-	}
-}
-
-// Set sets value to given key in session.
-func (s *MemStore) Set(key, val interface{}) error {
-	s.lock.Lock()
-	defer s.lock.Unlock()
-
-	s.data[key] = val
-	return nil
-}
-
-// Get gets value by given key in session.
-func (s *MemStore) Get(key interface{}) interface{} {
-	s.lock.RLock()
-	defer s.lock.RUnlock()
-
-	return s.data[key]
-}
-
-// Delete deletes a key from session.
-func (s *MemStore) Delete(key interface{}) error {
-	s.lock.Lock()
-	defer s.lock.Unlock()
-
-	delete(s.data, key)
-	return nil
-}
-
-// ID returns current session ID.
-func (s *MemStore) ID() string {
-	return s.sid
-}
-
-// Release releases resource and save data to provider.
-func (_ *MemStore) Release() error {
-	return nil
-}
-
-// Flush deletes all session data.
-func (s *MemStore) Flush() error {
-	s.lock.Lock()
-	defer s.lock.Unlock()
-
-	s.data = make(map[interface{}]interface{})
-	return nil
-}
-
-// MemProvider represents a in-memory session provider implementation.
-type MemProvider struct {
-	lock        sync.RWMutex
-	maxLifetime int64
-	data        map[string]*list.Element
-	// A priority list whose lastAccess newer gets higer priority.
-	list *list.List
-}
-
-// Init initializes memory session provider.
-func (p *MemProvider) Init(maxLifetime int64, _ string) error {
-	p.maxLifetime = maxLifetime
-	return nil
-}
-
-// update expands time of session store by given ID.
-func (p *MemProvider) update(sid string) error {
-	p.lock.Lock()
-	defer p.lock.Unlock()
-
-	if e, ok := p.data[sid]; ok {
-		e.Value.(*MemStore).lastAccess = time.Now()
-		p.list.MoveToFront(e)
-		return nil
-	}
-	return nil
-}
-
-// Read returns raw session store by session ID.
-func (p *MemProvider) Read(sid string) (_ RawStore, err error) {
-	p.lock.RLock()
-	e, ok := p.data[sid]
-	p.lock.RUnlock()
-
-	if ok {
-		if err = p.update(sid); err != nil {
-			return nil, err
-		}
-		return e.Value.(*MemStore), nil
-	}
-
-	// Create a new session.
-	p.lock.Lock()
-	defer p.lock.Unlock()
-
-	s := NewMemStore(sid)
-	p.data[sid] = p.list.PushBack(s)
-	return s, nil
-}
-
-// Exist returns true if session with given ID exists.
-func (p *MemProvider) Exist(sid string) bool {
-	p.lock.RLock()
-	defer p.lock.RUnlock()
-
-	_, ok := p.data[sid]
-	return ok
-}
-
-// Destory deletes a session by session ID.
-func (p *MemProvider) Destory(sid string) error {
-	p.lock.Lock()
-	defer p.lock.Unlock()
-
-	e, ok := p.data[sid]
-	if !ok {
-		return nil
-	}
-
-	p.list.Remove(e)
-	delete(p.data, sid)
-	return nil
-}
-
-// Regenerate regenerates a session store from old session ID to new one.
-func (p *MemProvider) Regenerate(oldsid, sid string) (RawStore, error) {
-	if p.Exist(sid) {
-		return nil, fmt.Errorf("new sid '%s' already exists", sid)
-	}
-
-	s, err := p.Read(oldsid)
-	if err != nil {
-		return nil, err
-	}
-
-	if err = p.Destory(oldsid); err != nil {
-		return nil, err
-	}
-
-	s.(*MemStore).sid = sid
-	p.data[sid] = p.list.PushBack(s)
-	return s, nil
-}
-
-// Count counts and returns number of sessions.
-func (p *MemProvider) Count() int {
-	return p.list.Len()
-}
-
-// GC calls GC to clean expired sessions.
-func (p *MemProvider) GC() {
-	p.lock.RLock()
-	for {
-		// No session in the list.
-		e := p.list.Back()
-		if e == nil {
-			break
-		}
-
-		if (e.Value.(*MemStore).lastAccess.Unix() + p.maxLifetime) < time.Now().Unix() {
-			p.lock.RUnlock()
-			p.lock.Lock()
-			p.list.Remove(e)
-			delete(p.data, e.Value.(*MemStore).sid)
-			p.lock.Unlock()
-			p.lock.RLock()
-		} else {
-			break
-		}
-	}
-	p.lock.RUnlock()
-}
-
-func init() {
-	Register("memory", &MemProvider{list: list.New(), data: make(map[string]*list.Element)})
-}
+// Copyright 2013 Beego Authors
+// Copyright 2014 The Macaron Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License"): you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations
+// under the License.
+
+package session
+
+import (
+	"container/list"
+	"fmt"
+	"sync"
+	"time"
+)
+
+// MemStore represents a in-memory session store implementation.
+type MemStore struct {
+	sid        string
+	lock       sync.RWMutex
+	data       map[interface{}]interface{}
+	lastAccess time.Time
+}
+
+// NewMemStore creates and returns a memory session store.
+func NewMemStore(sid string) *MemStore {
+	return &MemStore{
+		sid:        sid,
+		data:       make(map[interface{}]interface{}),
+		lastAccess: time.Now(),
+	}
+}
+
+// Set sets value to given key in session.
+func (s *MemStore) Set(key, val interface{}) error {
+	s.lock.Lock()
+	defer s.lock.Unlock()
+
+	s.data[key] = val
+	return nil
+}
+
+// Get gets value by given key in session.
+func (s *MemStore) Get(key interface{}) interface{} {
+	s.lock.RLock()
+	defer s.lock.RUnlock()
+
+	return s.data[key]
+}
+
+// Delete deletes a key from session.
+func (s *MemStore) Delete(key interface{}) error {
+	s.lock.Lock()
+	defer s.lock.Unlock()
+
+	delete(s.data, key)
+	return nil
+}
+
+// ID returns current session ID.
+func (s *MemStore) ID() string {
+	return s.sid
+}
+
+// Release releases resource and save data to provider.
+func (_ *MemStore) Release() error {
+	return nil
+}
+
+// Flush deletes all session data.
+func (s *MemStore) Flush() error {
+	s.lock.Lock()
+	defer s.lock.Unlock()
+
+	s.data = make(map[interface{}]interface{})
+	return nil
+}
+
+// MemProvider represents a in-memory session provider implementation.
+type MemProvider struct {
+	lock        sync.RWMutex
+	maxLifetime int64
+	data        map[string]*list.Element
+	// A priority list whose lastAccess newer gets higer priority.
+	list *list.List
+}
+
+// Init initializes memory session provider.
+func (p *MemProvider) Init(maxLifetime int64, _ string) error {
+	p.lock.Lock()
+	p.maxLifetime = maxLifetime
+	p.lock.Unlock()
+	return nil
+}
+
+// update expands time of session store by given ID.
+func (p *MemProvider) update(sid string) error {
+	p.lock.Lock()
+	defer p.lock.Unlock()
+
+	if e, ok := p.data[sid]; ok {
+		e.Value.(*MemStore).lastAccess = time.Now()
+		p.list.MoveToFront(e)
+		return nil
+	}
+	return nil
+}
+
+// Read returns raw session store by session ID.
+func (p *MemProvider) Read(sid string) (_ RawStore, err error) {
+	p.lock.RLock()
+	e, ok := p.data[sid]
+	p.lock.RUnlock()
+
+	if ok {
+		if err = p.update(sid); err != nil {
+			return nil, err
+		}
+		return e.Value.(*MemStore), nil
+	}
+
+	// Create a new session.
+	p.lock.Lock()
+	defer p.lock.Unlock()
+
+	s := NewMemStore(sid)
+	p.data[sid] = p.list.PushBack(s)
+	return s, nil
+}
+
+// Exist returns true if session with given ID exists.
+func (p *MemProvider) Exist(sid string) bool {
+	p.lock.RLock()
+	defer p.lock.RUnlock()
+
+	_, ok := p.data[sid]
+	return ok
+}
+
+// Destory deletes a session by session ID.
+func (p *MemProvider) Destory(sid string) error {
+	p.lock.Lock()
+	defer p.lock.Unlock()
+
+	e, ok := p.data[sid]
+	if !ok {
+		return nil
+	}
+
+	p.list.Remove(e)
+	delete(p.data, sid)
+	return nil
+}
+
+// Regenerate regenerates a session store from old session ID to new one.
+func (p *MemProvider) Regenerate(oldsid, sid string) (RawStore, error) {
+	if p.Exist(sid) {
+		return nil, fmt.Errorf("new sid '%s' already exists", sid)
+	}
+
+	s, err := p.Read(oldsid)
+	if err != nil {
+		return nil, err
+	}
+
+	if err = p.Destory(oldsid); err != nil {
+		return nil, err
+	}
+
+	s.(*MemStore).sid = sid
+
+	p.lock.Lock()
+	defer p.lock.Unlock()
+	p.data[sid] = p.list.PushBack(s)
+	return s, nil
+}
+
+// Count counts and returns number of sessions.
+func (p *MemProvider) Count() int {
+	return p.list.Len()
+}
+
+// GC calls GC to clean expired sessions.
+func (p *MemProvider) GC() {
+	p.lock.RLock()
+	for {
+		// No session in the list.
+		e := p.list.Back()
+		if e == nil {
+			break
+		}
+
+		if (e.Value.(*MemStore).lastAccess.Unix() + p.maxLifetime) < time.Now().Unix() {
+			p.lock.RUnlock()
+			p.lock.Lock()
+			p.list.Remove(e)
+			delete(p.data, e.Value.(*MemStore).sid)
+			p.lock.Unlock()
+			p.lock.RLock()
+		} else {
+			break
+		}
+	}
+	p.lock.RUnlock()
+}
+
+func init() {
+	Register("memory", &MemProvider{list: list.New(), data: make(map[string]*list.Element)})
+}

+ 2 - 2
Godeps/_workspace/src/github.com/macaron-contrib/session/mysql/mysql.go → Godeps/_workspace/src/github.com/go-macaron/session/mysql/mysql.go

@@ -1,5 +1,5 @@
 // Copyright 2013 Beego Authors
-// Copyright 2014 Unknwon
+// Copyright 2014 The Macaron Authors
 //
 // Licensed under the Apache License, Version 2.0 (the "License"): you may
 // not use this file except in compliance with the License. You may obtain
@@ -24,7 +24,7 @@ import (
 
 	_ "github.com/go-sql-driver/mysql"
 
-	"github.com/macaron-contrib/session"
+	"github.com/go-macaron/session"
 )
 
 // MysqlStore represents a mysql session store implementation.

+ 0 - 0
Godeps/_workspace/src/github.com/macaron-contrib/session/mysql/mysql.goconvey → Godeps/_workspace/src/github.com/go-macaron/session/mysql/mysql.goconvey


+ 2 - 2
Godeps/_workspace/src/github.com/macaron-contrib/session/nodb/nodb.go → Godeps/_workspace/src/github.com/go-macaron/session/nodb/nodb.go

@@ -1,4 +1,4 @@
-// Copyright 2015 Unknwon
+// Copyright 2015 The Macaron Authors
 //
 // Licensed under the Apache License, Version 2.0 (the "License"): you may
 // not use this file except in compliance with the License. You may obtain
@@ -21,7 +21,7 @@ import (
 	"github.com/lunny/nodb"
 	"github.com/lunny/nodb/config"
 
-	"github.com/macaron-contrib/session"
+	"github.com/go-macaron/session"
 )
 
 // NodbStore represents a nodb session store implementation.

+ 0 - 0
Godeps/_workspace/src/github.com/macaron-contrib/session/nodb/nodb.goconvey → Godeps/_workspace/src/github.com/go-macaron/session/nodb/nodb.goconvey


+ 2 - 2
Godeps/_workspace/src/github.com/macaron-contrib/session/postgres/postgres.go → Godeps/_workspace/src/github.com/go-macaron/session/postgres/postgres.go

@@ -1,5 +1,5 @@
 // Copyright 2013 Beego Authors
-// Copyright 2014 Unknwon
+// Copyright 2014 The Macaron Authors
 //
 // Licensed under the Apache License, Version 2.0 (the "License"): you may
 // not use this file except in compliance with the License. You may obtain
@@ -24,7 +24,7 @@ import (
 
 	_ "github.com/lib/pq"
 
-	"github.com/macaron-contrib/session"
+	"github.com/go-macaron/session"
 )
 
 // PostgresStore represents a postgres session store implementation.

+ 0 - 0
Godeps/_workspace/src/github.com/macaron-contrib/session/postgres/postgres.goconvey → Godeps/_workspace/src/github.com/go-macaron/session/postgres/postgres.goconvey


+ 27 - 19
Godeps/_workspace/src/github.com/macaron-contrib/session/redis/redis.go → Godeps/_workspace/src/github.com/go-macaron/session/redis/redis.go

@@ -1,5 +1,5 @@
 // Copyright 2013 Beego Authors
-// Copyright 2014 Unknwon
+// Copyright 2014 The Macaron Authors
 //
 // Licensed under the Apache License, Version 2.0 (the "License"): you may
 // not use this file except in compliance with the License. You may obtain
@@ -25,22 +25,23 @@ import (
 	"gopkg.in/ini.v1"
 	"gopkg.in/redis.v2"
 
-	"github.com/macaron-contrib/session"
+	"github.com/go-macaron/session"
 )
 
 // RedisStore represents a redis session store implementation.
 type RedisStore struct {
-	c        *redis.Client
-	sid      string
-	duration time.Duration
-	lock     sync.RWMutex
-	data     map[interface{}]interface{}
+	c           *redis.Client
+	prefix, sid string
+	duration    time.Duration
+	lock        sync.RWMutex
+	data        map[interface{}]interface{}
 }
 
 // NewRedisStore creates and returns a redis session store.
-func NewRedisStore(c *redis.Client, sid string, dur time.Duration, kv map[interface{}]interface{}) *RedisStore {
+func NewRedisStore(c *redis.Client, prefix, sid string, dur time.Duration, kv map[interface{}]interface{}) *RedisStore {
 	return &RedisStore{
 		c:        c,
+		prefix:   prefix,
 		sid:      sid,
 		duration: dur,
 		data:     kv,
@@ -85,7 +86,7 @@ func (s *RedisStore) Release() error {
 		return err
 	}
 
-	return s.c.SetEx(s.sid, s.duration, string(data)).Err()
+	return s.c.SetEx(s.prefix+s.sid, s.duration, string(data)).Err()
 }
 
 // Flush deletes all session data.
@@ -101,10 +102,11 @@ func (s *RedisStore) Flush() error {
 type RedisProvider struct {
 	c        *redis.Client
 	duration time.Duration
+	prefix   string
 }
 
 // Init initializes redis session provider.
-// configs: network=tcp,addr=:6379,password=macaron,db=0,pool_size=100,idle_timeout=180
+// configs: network=tcp,addr=:6379,password=macaron,db=0,pool_size=100,idle_timeout=180,prefix=session;
 func (p *RedisProvider) Init(maxlifetime int64, configs string) (err error) {
 	p.duration, err = time.ParseDuration(fmt.Sprintf("%ds", maxlifetime))
 	if err != nil {
@@ -136,6 +138,8 @@ func (p *RedisProvider) Init(maxlifetime int64, configs string) (err error) {
 			if err != nil {
 				return fmt.Errorf("error parsing idle timeout: %v", err)
 			}
+		case "prefix":
+			p.prefix = v
 		default:
 			return fmt.Errorf("session/redis: unsupported option '%s'", k)
 		}
@@ -147,14 +151,15 @@ func (p *RedisProvider) Init(maxlifetime int64, configs string) (err error) {
 
 // Read returns raw session store by session ID.
 func (p *RedisProvider) Read(sid string) (session.RawStore, error) {
+	psid := p.prefix + sid
 	if !p.Exist(sid) {
-		if err := p.c.Set(sid, "").Err(); err != nil {
+		if err := p.c.Set(psid, "").Err(); err != nil {
 			return nil, err
 		}
 	}
 
 	var kv map[interface{}]interface{}
-	kvs, err := p.c.Get(sid).Result()
+	kvs, err := p.c.Get(psid).Result()
 	if err != nil {
 		return nil, err
 	}
@@ -167,37 +172,40 @@ func (p *RedisProvider) Read(sid string) (session.RawStore, error) {
 		}
 	}
 
-	return NewRedisStore(p.c, sid, p.duration, kv), nil
+	return NewRedisStore(p.c, p.prefix, sid, p.duration, kv), nil
 }
 
 // Exist returns true if session with given ID exists.
 func (p *RedisProvider) Exist(sid string) bool {
-	has, err := p.c.Exists(sid).Result()
+	has, err := p.c.Exists(p.prefix + sid).Result()
 	return err == nil && has
 }
 
 // Destory deletes a session by session ID.
 func (p *RedisProvider) Destory(sid string) error {
-	return p.c.Del(sid).Err()
+	return p.c.Del(p.prefix + sid).Err()
 }
 
 // Regenerate regenerates a session store from old session ID to new one.
 func (p *RedisProvider) Regenerate(oldsid, sid string) (_ session.RawStore, err error) {
+	poldsid := p.prefix + oldsid
+	psid := p.prefix + sid
+
 	if p.Exist(sid) {
 		return nil, fmt.Errorf("new sid '%s' already exists", sid)
 	} else if !p.Exist(oldsid) {
 		// Make a fake old session.
-		if err = p.c.SetEx(oldsid, p.duration, "").Err(); err != nil {
+		if err = p.c.SetEx(poldsid, p.duration, "").Err(); err != nil {
 			return nil, err
 		}
 	}
 
-	if err = p.c.Rename(oldsid, sid).Err(); err != nil {
+	if err = p.c.Rename(poldsid, psid).Err(); err != nil {
 		return nil, err
 	}
 
 	var kv map[interface{}]interface{}
-	kvs, err := p.c.Get(sid).Result()
+	kvs, err := p.c.Get(psid).Result()
 	if err != nil {
 		return nil, err
 	}
@@ -211,7 +219,7 @@ func (p *RedisProvider) Regenerate(oldsid, sid string) (_ session.RawStore, err
 		}
 	}
 
-	return NewRedisStore(p.c, sid, p.duration, kv), nil
+	return NewRedisStore(p.c, p.prefix, sid, p.duration, kv), nil
 }
 
 // Count counts and returns number of sessions.

+ 0 - 0
Godeps/_workspace/src/github.com/macaron-contrib/session/redis/redis.goconvey → Godeps/_workspace/src/github.com/go-macaron/session/redis/redis.goconvey


+ 3 - 5
Godeps/_workspace/src/github.com/macaron-contrib/session/session.go → Godeps/_workspace/src/github.com/go-macaron/session/session.go

@@ -1,5 +1,5 @@
 // Copyright 2013 Beego Authors
-// Copyright 2014 Unknwon
+// Copyright 2014 The Macaron Authors
 //
 // Licensed under the Apache License, Version 2.0 (the "License"): you may
 // not use this file except in compliance with the License. You may obtain
@@ -16,8 +16,6 @@
 // Package session a middleware that provides the session management of Macaron.
 package session
 
-// NOTE: last sync 000033e on Nov 4, 2014.
-
 import (
 	"encoding/hex"
 	"fmt"
@@ -25,10 +23,10 @@ import (
 	"net/url"
 	"time"
 
-	"github.com/Unknwon/macaron"
+	"gopkg.in/macaron.v1"
 )
 
-const _VERSION = "0.1.6"
+const _VERSION = "0.3.0"
 
 func Version() string {
 	return _VERSION

+ 12 - 1
Godeps/_workspace/src/github.com/macaron-contrib/session/utils.go → Godeps/_workspace/src/github.com/go-macaron/session/utils.go

@@ -1,5 +1,5 @@
 // Copyright 2013 Beego Authors
-// Copyright 2014 Unknwon
+// Copyright 2014 The Macaron Authors
 //
 // Licensed under the Apache License, Version 2.0 (the "License"): you may
 // not use this file except in compliance with the License. You may obtain
@@ -24,6 +24,17 @@ import (
 	"github.com/Unknwon/com"
 )
 
+func init() {
+	gob.Register([]interface{}{})
+	gob.Register(map[int]interface{}{})
+	gob.Register(map[string]interface{}{})
+	gob.Register(map[interface{}]interface{}{})
+	gob.Register(map[string]string{})
+	gob.Register(map[int]string{})
+	gob.Register(map[int]int{})
+	gob.Register(map[int]int64{})
+}
+
 func EncodeGob(obj map[interface{}]interface{}) ([]byte, error) {
 	for _, v := range obj {
 		gob.Register(v)

+ 27 - 0
Godeps/_workspace/src/github.com/klauspost/compress/LICENSE

@@ -0,0 +1,27 @@
+Copyright (c) 2012 The Go Authors. All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+   * Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+   * Redistributions in binary form must reproduce the above
+copyright notice, this list of conditions and the following disclaimer
+in the documentation and/or other materials provided with the
+distribution.
+   * Neither the name of Google Inc. nor the names of its
+contributors may be used to endorse or promote products derived from
+this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

+ 32 - 0
Godeps/_workspace/src/github.com/klauspost/compress/flate/copy.go

@@ -0,0 +1,32 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package flate
+
+// forwardCopy is like the built-in copy function except that it always goes
+// forward from the start, even if the dst and src overlap.
+// It is equivalent to:
+//   for i := 0; i < n; i++ {
+//     mem[dst+i] = mem[src+i]
+//   }
+func forwardCopy(mem []byte, dst, src, n int) {
+	if dst <= src {
+		copy(mem[dst:dst+n], mem[src:src+n])
+		return
+	}
+	for {
+		if dst >= src+n {
+			copy(mem[dst:dst+n], mem[src:src+n])
+			return
+		}
+		// There is some forward overlap.  The destination
+		// will be filled with a repeated pattern of mem[src:src+k].
+		// We copy one instance of the pattern here, then repeat.
+		// Each time around this loop k will double.
+		k := dst - src
+		copy(mem[dst:dst+k], mem[src:src+k])
+		n -= k
+		dst += k
+	}
+}

+ 39 - 0
Godeps/_workspace/src/github.com/klauspost/compress/flate/crc32_amd64.go

@@ -0,0 +1,39 @@
+//+build !noasm
+//+build !appengine
+
+// Copyright 2015, Klaus Post, see LICENSE for details.
+
+package flate
+
+import (
+	"github.com/klauspost/cpuid"
+)
+
+// crc32sse returns a hash for the first 4 bytes of the slice
+// len(a) must be >= 4.
+//go:noescape
+func crc32sse(a []byte) hash
+
+// crc32sseAll calculates hashes for each 4-byte set in a.
+// dst must be east len(a) - 4 in size.
+// The size is not checked by the assembly.
+//go:noescape
+func crc32sseAll(a []byte, dst []hash)
+
+// matchLenSSE4 returns the number of matching bytes in a and b
+// up to length 'max'. Both slices must be at least 'max'
+// bytes in size.
+// It uses the PCMPESTRI SSE 4.2 instruction.
+//go:noescape
+func matchLenSSE4(a, b []byte, max int) int
+
+// histogram accumulates a histogram of b in h.
+// h must be at least 256 entries in length,
+// and must be cleared before calling this function.
+//go:noescape
+func histogram(b []byte, h []int32)
+
+// Detect SSE 4.2 feature.
+func init() {
+	useSSE42 = cpuid.CPU.SSE42()
+}

+ 212 - 0
Godeps/_workspace/src/github.com/klauspost/compress/flate/crc32_amd64.s

@@ -0,0 +1,212 @@
+//+build !noasm !appengine
+
+// Copyright 2015, Klaus Post, see LICENSE for details.
+
+// func crc32sse(a []byte) hash
+TEXT ·crc32sse(SB), 7, $0
+	MOVQ a+0(FP), R10
+	XORQ BX, BX
+
+	// CRC32   dword (R10), EBX
+	BYTE $0xF2; BYTE $0x41; BYTE $0x0f
+	BYTE $0x38; BYTE $0xf1; BYTE $0x1a
+
+	MOVL BX, ret+24(FP)
+	RET
+
+// func crc32sseAll(a []byte, dst []hash)
+TEXT ·crc32sseAll(SB), 7, $0
+	MOVQ  a+0(FP), R8      // R8: src
+	MOVQ  a_len+8(FP), R10 // input length
+	MOVQ  dst+24(FP), R9   // R9: dst
+	SUBQ  $4, R10
+	JS    end
+	JZ    one_crc
+	MOVQ  R10, R13
+	SHRQ  $2, R10          // len/4
+	ANDQ  $3, R13          // len&3
+	XORQ  BX, BX
+	ADDQ  $1, R13
+	TESTQ R10, R10
+	JZ    rem_loop
+
+crc_loop:
+	MOVQ (R8), R11
+	XORQ BX, BX
+	XORQ DX, DX
+	XORQ DI, DI
+	MOVQ R11, R12
+	SHRQ $8, R11
+	MOVQ R12, AX
+	MOVQ R11, CX
+	SHRQ $16, R12
+	SHRQ $16, R11
+	MOVQ R12, SI
+
+	// CRC32   EAX, EBX
+	BYTE $0xF2; BYTE $0x0f
+	BYTE $0x38; BYTE $0xf1; BYTE $0xd8
+
+	// CRC32   ECX, EDX
+	BYTE $0xF2; BYTE $0x0f
+	BYTE $0x38; BYTE $0xf1; BYTE $0xd1
+
+	// CRC32   ESI, EDI
+	BYTE $0xF2; BYTE $0x0f
+	BYTE $0x38; BYTE $0xf1; BYTE $0xfe
+	MOVL BX, (R9)
+	MOVL DX, 4(R9)
+	MOVL DI, 8(R9)
+
+	XORQ BX, BX
+	MOVL R11, AX
+
+	// CRC32   EAX, EBX
+	BYTE $0xF2; BYTE $0x0f
+	BYTE $0x38; BYTE $0xf1; BYTE $0xd8
+	MOVL BX, 12(R9)
+
+	ADDQ $16, R9
+	ADDQ $4, R8
+	XORQ BX, BX
+	SUBQ $1, R10
+	JNZ  crc_loop
+
+rem_loop:
+	MOVL (R8), AX
+
+	// CRC32   EAX, EBX
+	BYTE $0xF2; BYTE $0x0f
+	BYTE $0x38; BYTE $0xf1; BYTE $0xd8
+
+	MOVL BX, (R9)
+	ADDQ $4, R9
+	ADDQ $1, R8
+	XORQ BX, BX
+	SUBQ $1, R13
+	JNZ  rem_loop
+
+end:
+	RET
+
+one_crc:
+	MOVQ $1, R13
+	XORQ BX, BX
+	JMP  rem_loop
+
+// func matchLenSSE4(a, b []byte, max int) int
+TEXT ·matchLenSSE4(SB), 7, $0
+	MOVQ a+0(FP), R8     // R8: &a
+	MOVQ b+24(FP), R9    // R9: &b
+	MOVQ max+48(FP), R10 // R10: max
+	XORQ R11, R11        // match length
+
+	MOVQ R10, R12
+	SHRQ $4, R10            // max/16
+	ANDQ $15, R12           // max & 15
+	CMPQ R10, $0
+	JEQ  matchlen_verysmall
+
+loopback_matchlen:
+	MOVOU (R8), X0 // a[x]
+	MOVOU (R9), X1 // b[x]
+
+	// PCMPESTRI $0x18, X1, X0
+	BYTE $0x66; BYTE $0x0f; BYTE $0x3a
+	BYTE $0x61; BYTE $0xc1; BYTE $0x18
+
+	JC match_ended
+
+	ADDQ $16, R8
+	ADDQ $16, R9
+	ADDQ $16, R11
+
+	SUBQ $1, R10
+	JNZ  loopback_matchlen
+
+matchlen_verysmall:
+	CMPQ R12, $0
+	JEQ  done_matchlen
+
+loopback_matchlen_single:
+	// Naiive, but small use
+	MOVB (R8), R13
+	MOVB (R9), R14
+	CMPB R13, R14
+	JNE  done_matchlen
+	ADDQ $1, R8
+	ADDQ $1, R9
+	ADDQ $1, R11
+	SUBQ $1, R12
+	JNZ  loopback_matchlen_single
+	MOVQ R11, ret+56(FP)
+	RET
+
+match_ended:
+	ADDQ CX, R11
+
+done_matchlen:
+	MOVQ R11, ret+56(FP)
+	RET
+
+// func histogram(b []byte, h []int32)
+TEXT ·histogram(SB), 7, $0
+	MOVQ b+0(FP), SI     // SI: &b
+	MOVQ b_len+8(FP), R9 // R9: len(b)
+	MOVQ h+24(FP), DI    // DI: Histogram
+	MOVQ R9, R8
+	SHRQ $3, R8
+	JZ   hist1
+	XORQ R11, R11
+
+loop_hist8:
+	MOVQ (SI), R10
+
+	MOVB R10, R11
+	INCL (DI)(R11*4)
+	SHRQ $8, R10
+
+	MOVB R10, R11
+	INCL (DI)(R11*4)
+	SHRQ $8, R10
+
+	MOVB R10, R11
+	INCL (DI)(R11*4)
+	SHRQ $8, R10
+
+	MOVB R10, R11
+	INCL (DI)(R11*4)
+	SHRQ $8, R10
+
+	MOVB R10, R11
+	INCL (DI)(R11*4)
+	SHRQ $8, R10
+
+	MOVB R10, R11
+	INCL (DI)(R11*4)
+	SHRQ $8, R10
+
+	MOVB R10, R11
+	INCL (DI)(R11*4)
+	SHRQ $8, R10
+
+	INCL (DI)(R10*4)
+
+	ADDQ $8, SI
+	DECQ R8
+	JNZ  loop_hist8
+
+hist1:
+	ANDQ $7, R9
+	JZ   end_hist
+	XORQ R10, R10
+
+loop_hist1:
+	MOVB (SI), R10
+	INCL (DI)(R10*4)
+	INCQ SI
+	DECQ R9
+	JNZ  loop_hist1
+
+end_hist:
+	RET

+ 34 - 0
Godeps/_workspace/src/github.com/klauspost/compress/flate/crc32_noasm.go

@@ -0,0 +1,34 @@
+//+build !amd64 noasm appengine
+
+// Copyright 2015, Klaus Post, see LICENSE for details.
+
+package flate
+
+func init() {
+	useSSE42 = false
+}
+
+// crc32sse should never be called.
+func crc32sse(a []byte) hash {
+	panic("no assembler")
+}
+
+// crc32sseAll should never be called.
+func crc32sseAll(a []byte, dst []hash) {
+	panic("no assembler")
+}
+
+// matchLenSSE4 should never be called.
+func matchLenSSE4(a, b []byte, max int) int {
+	panic("no assembler")
+	return 0
+}
+
+// histogram accumulates a histogram of b in h.
+// h must be at least 256 entries in length,
+// and must be cleared before calling this function.
+func histogram(b []byte, h []int32) {
+	for _, t := range b {
+		h[t]++
+	}
+}

+ 1293 - 0
Godeps/_workspace/src/github.com/klauspost/compress/flate/deflate.go

@@ -0,0 +1,1293 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Copyright (c) 2015 Klaus Post
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package flate
+
+import (
+	"fmt"
+	"io"
+	"math"
+)
+
+const (
+	NoCompression       = 0
+	BestSpeed           = 1
+	fastCompression     = 3
+	BestCompression     = 9
+	DefaultCompression  = -1
+	ConstantCompression = -2 // Does only Huffman encoding
+	logWindowSize       = 15
+	windowSize          = 1 << logWindowSize
+	windowMask          = windowSize - 1
+	logMaxOffsetSize    = 15  // Standard DEFLATE
+	minMatchLength      = 4   // The smallest match that the compressor looks for
+	maxMatchLength      = 258 // The longest match for the compressor
+	minOffsetSize       = 1   // The shortest offset that makes any sense
+
+	// The maximum number of tokens we put into a single flat block, just too
+	// stop things from getting too large.
+	maxFlateBlockTokens = 1 << 14
+	maxStoreBlockSize   = 65535
+	hashBits            = 17 // After 17 performance degrades
+	hashSize            = 1 << hashBits
+	hashMask            = (1 << hashBits) - 1
+	hashShift           = (hashBits + minMatchLength - 1) / minMatchLength
+	maxHashOffset       = 1 << 24
+
+	skipNever = math.MaxInt32
+)
+
+var useSSE42 bool
+
+type compressionLevel struct {
+	good, lazy, nice, chain, fastSkipHashing, level int
+}
+
+var levels = []compressionLevel{
+	{}, // 0
+	// For levels 1-3 we don't bother trying with lazy matches
+	{4, 0, 8, 4, 4, 1},
+	{4, 0, 16, 8, 5, 2},
+	{4, 0, 32, 32, 6, 3},
+	// Levels 4-9 use increasingly more lazy matching
+	// and increasingly stringent conditions for "good enough".
+	{4, 4, 16, 16, skipNever, 4},
+	{8, 16, 32, 32, skipNever, 5},
+	{8, 16, 128, 128, skipNever, 6},
+	{8, 32, 128, 256, skipNever, 7},
+	{32, 128, 258, 1024, skipNever, 8},
+	{32, 258, 258, 4096, skipNever, 9},
+}
+
+type hashid uint32
+
+type compressor struct {
+	compressionLevel
+
+	w          *huffmanBitWriter
+	bulkHasher func([]byte, []hash)
+
+	// compression algorithm
+	fill func(*compressor, []byte) int // copy data to window
+	step func(*compressor)             // process window
+	sync bool                          // requesting flush
+
+	// Input hash chains
+	// hashHead[hashValue] contains the largest inputIndex with the specified hash value
+	// If hashHead[hashValue] is within the current window, then
+	// hashPrev[hashHead[hashValue] & windowMask] contains the previous index
+	// with the same hash value.
+	chainHead  int
+	hashHead   []hashid
+	hashPrev   []hashid
+	hashOffset int
+
+	// input window: unprocessed data is window[index:windowEnd]
+	index         int
+	window        []byte
+	windowEnd     int
+	blockStart    int  // window index where current tokens start
+	byteAvailable bool // if true, still need to process window[index-1].
+
+	// queued output tokens
+	tokens tokens
+
+	// deflate state
+	length         int
+	offset         int
+	hash           hash
+	maxInsertIndex int
+	err            error
+	ii             uint16 // position of last match, intended to overflow to reset.
+
+	hashMatch [maxMatchLength + minMatchLength]hash
+}
+
+type hash int32
+
+func (d *compressor) fillDeflate(b []byte) int {
+	if d.index >= 2*windowSize-(minMatchLength+maxMatchLength) {
+		// shift the window by windowSize
+		copy(d.window, d.window[windowSize:2*windowSize])
+		d.index -= windowSize
+		d.windowEnd -= windowSize
+		if d.blockStart >= windowSize {
+			d.blockStart -= windowSize
+		} else {
+			d.blockStart = math.MaxInt32
+		}
+		d.hashOffset += windowSize
+		if d.hashOffset > maxHashOffset {
+			delta := d.hashOffset - 1
+			d.hashOffset -= delta
+			d.chainHead -= delta
+			for i, v := range d.hashPrev {
+				if int(v) > delta {
+					d.hashPrev[i] = hashid(int(v) - delta)
+				} else {
+					d.hashPrev[i] = 0
+				}
+			}
+			for i, v := range d.hashHead {
+				if int(v) > delta {
+					d.hashHead[i] = hashid(int(v) - delta)
+				} else {
+					d.hashHead[i] = 0
+				}
+			}
+		}
+	}
+	n := copy(d.window[d.windowEnd:], b)
+	d.windowEnd += n
+	return n
+}
+
+func (d *compressor) writeBlock(tok tokens, index int, eof bool) error {
+	if index > 0 || eof {
+		var window []byte
+		if d.blockStart <= index {
+			window = d.window[d.blockStart:index]
+		}
+		d.blockStart = index
+		d.w.writeBlock(tok, eof, window)
+		return d.w.err
+	}
+	return nil
+}
+
+// fillWindow will fill the current window with the supplied
+// dictionary and calculate all hashes.
+// This is much faster than doing a full encode.
+// Should only be used after a start/reset.
+func (d *compressor) fillWindow(b []byte) {
+	// Do not fill window if we are in store-only mode,
+	// use constant or Snappy compression.
+	if d.compressionLevel.level == 0 {
+		return
+	}
+	// If we are given too much, cut it.
+	if len(b) > windowSize {
+		b = b[len(b)-windowSize:]
+	}
+	// Add all to window.
+	n := copy(d.window[d.windowEnd:], b)
+
+	// Calculate 256 hashes at the time (more L1 cache hits)
+	loops := (n + 256 - minMatchLength) / 256
+	for j := 0; j < loops; j++ {
+		startindex := j * 256
+		end := startindex + 256 + minMatchLength - 1
+		if end > n {
+			end = n
+		}
+		tocheck := d.window[startindex:end]
+		dstSize := len(tocheck) - minMatchLength + 1
+
+		if dstSize <= 0 {
+			continue
+		}
+
+		dst := d.hashMatch[:dstSize]
+		d.bulkHasher(tocheck, dst)
+		var newH hash
+		for i, val := range dst {
+			di := i + startindex
+			newH = val & hashMask
+			// Get previous value with the same hash.
+			// Our chain should point to the previous value.
+			d.hashPrev[di&windowMask] = d.hashHead[newH]
+			// Set the head of the hash chain to us.
+			d.hashHead[newH] = hashid(di + d.hashOffset)
+		}
+		d.hash = newH
+	}
+	// Update window information.
+	d.windowEnd += n
+	d.index = n
+}
+
+// Try to find a match starting at index whose length is greater than prevSize.
+// We only look at chainCount possibilities before giving up.
+// pos = d.index, prevHead = d.chainHead-d.hashOffset, prevLength=minMatchLength-1, lookahead
+func (d *compressor) findMatch(pos int, prevHead int, prevLength int, lookahead int) (length, offset int, ok bool) {
+	minMatchLook := maxMatchLength
+	if lookahead < minMatchLook {
+		minMatchLook = lookahead
+	}
+
+	win := d.window[0 : pos+minMatchLook]
+
+	// We quit when we get a match that's at least nice long
+	nice := len(win) - pos
+	if d.nice < nice {
+		nice = d.nice
+	}
+
+	// If we've got a match that's good enough, only look in 1/4 the chain.
+	tries := d.chain
+	length = prevLength
+	if length >= d.good {
+		tries >>= 2
+	}
+
+	wEnd := win[pos+length]
+	wPos := win[pos:]
+	minIndex := pos - windowSize
+
+	for i := prevHead; tries > 0; tries-- {
+		if wEnd == win[i+length] {
+			n := matchLen(win[i:], wPos, minMatchLook)
+
+			if n > length && (n > minMatchLength || pos-i <= 4096) {
+				length = n
+				offset = pos - i
+				ok = true
+				if n >= nice {
+					// The match is good enough that we don't try to find a better one.
+					break
+				}
+				wEnd = win[pos+n]
+			}
+		}
+		if i == minIndex {
+			// hashPrev[i & windowMask] has already been overwritten, so stop now.
+			break
+		}
+		i = int(d.hashPrev[i&windowMask]) - d.hashOffset
+		if i < minIndex || i < 0 {
+			break
+		}
+	}
+	return
+}
+
+// Try to find a match starting at index whose length is greater than prevSize.
+// We only look at chainCount possibilities before giving up.
+// pos = d.index, prevHead = d.chainHead-d.hashOffset, prevLength=minMatchLength-1, lookahead
+func (d *compressor) findMatchSSE(pos int, prevHead int, prevLength int, lookahead int) (length, offset int, ok bool) {
+	minMatchLook := maxMatchLength
+	if lookahead < minMatchLook {
+		minMatchLook = lookahead
+	}
+
+	win := d.window[0 : pos+minMatchLook]
+
+	// We quit when we get a match that's at least nice long
+	nice := len(win) - pos
+	if d.nice < nice {
+		nice = d.nice
+	}
+
+	// If we've got a match that's good enough, only look in 1/4 the chain.
+	tries := d.chain
+	length = prevLength
+	if length >= d.good {
+		tries >>= 2
+	}
+
+	wEnd := win[pos+length]
+	wPos := win[pos:]
+	minIndex := pos - windowSize
+
+	for i := prevHead; tries > 0; tries-- {
+		if wEnd == win[i+length] {
+			n := matchLenSSE4(win[i:], wPos, minMatchLook)
+
+			if n > length && (n > minMatchLength || pos-i <= 4096) {
+				length = n
+				offset = pos - i
+				ok = true
+				if n >= nice {
+					// The match is good enough that we don't try to find a better one.
+					break
+				}
+				wEnd = win[pos+n]
+			}
+		}
+		if i == minIndex {
+			// hashPrev[i & windowMask] has already been overwritten, so stop now.
+			break
+		}
+		i = int(d.hashPrev[i&windowMask]) - d.hashOffset
+		if i < minIndex || i < 0 {
+			break
+		}
+	}
+	return
+}
+
+func (d *compressor) writeStoredBlock(buf []byte) error {
+	if d.w.writeStoredHeader(len(buf), false); d.w.err != nil {
+		return d.w.err
+	}
+	d.w.writeBytes(buf)
+	return d.w.err
+}
+
+// oldHash is the hash function used when no native crc32 calculation
+// or similar is present.
+func oldHash(b []byte) hash {
+	return hash(b[0])<<(hashShift*3) + hash(b[1])<<(hashShift*2) + hash(b[2])<<hashShift + hash(b[3])
+}
+
+// oldBulkHash will compute hashes using the same
+// algorithm as oldHash
+func oldBulkHash(b []byte, dst []hash) {
+	if len(b) < minMatchLength {
+		return
+	}
+	h := oldHash(b)
+	dst[0] = h
+	i := 1
+	end := len(b) - minMatchLength + 1
+	for ; i < end; i++ {
+		h = (h << hashShift) + hash(b[i+3])
+		dst[i] = h
+	}
+}
+
+// matchLen returns the number of matching bytes in a and b
+// up to length 'max'. Both slices must be at least 'max'
+// bytes in size.
+func matchLen(a, b []byte, max int) int {
+	a = a[:max]
+	for i, av := range a {
+		if b[i] != av {
+			return i
+		}
+	}
+	return max
+}
+
+func (d *compressor) initDeflate() {
+	d.hashHead = make([]hashid, hashSize)
+	d.hashPrev = make([]hashid, windowSize)
+	d.window = make([]byte, 2*windowSize)
+	d.hashOffset = 1
+	d.tokens.tokens = make([]token, maxFlateBlockTokens+1)
+	d.length = minMatchLength - 1
+	d.offset = 0
+	d.byteAvailable = false
+	d.index = 0
+	d.hash = 0
+	d.chainHead = -1
+	d.bulkHasher = oldBulkHash
+	if useSSE42 {
+		d.bulkHasher = crc32sseAll
+	}
+}
+
+// Assumes that d.fastSkipHashing != skipNever,
+// otherwise use deflateNoSkip
+func (d *compressor) deflate() {
+
+	// Sanity enables additional runtime tests.
+	// It's intended to be used during development
+	// to supplement the currently ad-hoc unit tests.
+	const sanity = false
+
+	if d.windowEnd-d.index < minMatchLength+maxMatchLength && !d.sync {
+		return
+	}
+
+	d.maxInsertIndex = d.windowEnd - (minMatchLength - 1)
+	if d.index < d.maxInsertIndex {
+		d.hash = oldHash(d.window[d.index:d.index+minMatchLength]) & hashMask
+	}
+
+	for {
+		if sanity && d.index > d.windowEnd {
+			panic("index > windowEnd")
+		}
+		lookahead := d.windowEnd - d.index
+		if lookahead < minMatchLength+maxMatchLength {
+			if !d.sync {
+				return
+			}
+			if sanity && d.index > d.windowEnd {
+				panic("index > windowEnd")
+			}
+			if lookahead == 0 {
+				if d.tokens.n > 0 {
+					if d.err = d.writeBlock(d.tokens, d.index, false); d.err != nil {
+						return
+					}
+					d.tokens.n = 0
+				}
+				return
+			}
+		}
+		if d.index < d.maxInsertIndex {
+			// Update the hash
+			d.hash = oldHash(d.window[d.index:d.index+minMatchLength]) & hashMask
+			ch := d.hashHead[d.hash]
+			d.chainHead = int(ch)
+			d.hashPrev[d.index&windowMask] = ch
+			d.hashHead[d.hash] = hashid(d.index + d.hashOffset)
+		}
+		d.length = minMatchLength - 1
+		d.offset = 0
+		minIndex := d.index - windowSize
+		if minIndex < 0 {
+			minIndex = 0
+		}
+
+		if d.chainHead-d.hashOffset >= minIndex && lookahead > minMatchLength-1 {
+			if newLength, newOffset, ok := d.findMatch(d.index, d.chainHead-d.hashOffset, minMatchLength-1, lookahead); ok {
+				d.length = newLength
+				d.offset = newOffset
+			}
+		}
+		if d.length >= minMatchLength {
+			d.ii = 0
+			// There was a match at the previous step, and the current match is
+			// not better. Output the previous match.
+			// "d.length-3" should NOT be "d.length-minMatchLength", since the format always assume 3
+			d.tokens.tokens[d.tokens.n] = matchToken(uint32(d.length-3), uint32(d.offset-minOffsetSize))
+			d.tokens.n++
+			// Insert in the hash table all strings up to the end of the match.
+			// index and index-1 are already inserted. If there is not enough
+			// lookahead, the last two strings are not inserted into the hash
+			// table.
+			if d.length <= d.fastSkipHashing {
+				var newIndex int
+				newIndex = d.index + d.length
+				// Calculate missing hashes
+				end := newIndex
+				if end > d.maxInsertIndex {
+					end = d.maxInsertIndex
+				}
+				end += minMatchLength - 1
+				startindex := d.index + 1
+				if startindex > d.maxInsertIndex {
+					startindex = d.maxInsertIndex
+				}
+				tocheck := d.window[startindex:end]
+				dstSize := len(tocheck) - minMatchLength + 1
+				if dstSize > 0 {
+					dst := d.hashMatch[:dstSize]
+					oldBulkHash(tocheck, dst)
+					var newH hash
+					for i, val := range dst {
+						di := i + startindex
+						newH = val & hashMask
+						// Get previous value with the same hash.
+						// Our chain should point to the previous value.
+						d.hashPrev[di&windowMask] = d.hashHead[newH]
+						// Set the head of the hash chain to us.
+						d.hashHead[newH] = hashid(di + d.hashOffset)
+					}
+					d.hash = newH
+				}
+				d.index = newIndex
+			} else {
+				// For matches this long, we don't bother inserting each individual
+				// item into the table.
+				d.index += d.length
+				if d.index < d.maxInsertIndex {
+					d.hash = oldHash(d.window[d.index:d.index+minMatchLength]) & hashMask
+				}
+			}
+			if d.tokens.n == maxFlateBlockTokens {
+				// The block includes the current character
+				if d.err = d.writeBlock(d.tokens, d.index, false); d.err != nil {
+					return
+				}
+				d.tokens.n = 0
+			}
+		} else {
+			d.ii++
+			end := d.index + int(d.ii>>uint(d.fastSkipHashing)) + 1
+			if end > d.windowEnd {
+				end = d.windowEnd
+			}
+			for i := d.index; i < end; i++ {
+				d.tokens.tokens[d.tokens.n] = literalToken(uint32(d.window[i]))
+				d.tokens.n++
+				if d.tokens.n == maxFlateBlockTokens {
+					if d.err = d.writeBlock(d.tokens, i+1, false); d.err != nil {
+						return
+					}
+					d.tokens.n = 0
+				}
+			}
+			d.index = end
+		}
+	}
+}
+
+// deflateLazy is the same as deflate, but with d.fastSkipHashing == skipNever,
+// meaning it always has lazy matching on.
+func (d *compressor) deflateLazy() {
+	// Sanity enables additional runtime tests.
+	// It's intended to be used during development
+	// to supplement the currently ad-hoc unit tests.
+	const sanity = false
+
+	if d.windowEnd-d.index < minMatchLength+maxMatchLength && !d.sync {
+		return
+	}
+
+	d.maxInsertIndex = d.windowEnd - (minMatchLength - 1)
+	if d.index < d.maxInsertIndex {
+		d.hash = oldHash(d.window[d.index:d.index+minMatchLength]) & hashMask
+	}
+
+	for {
+		if sanity && d.index > d.windowEnd {
+			panic("index > windowEnd")
+		}
+		lookahead := d.windowEnd - d.index
+		if lookahead < minMatchLength+maxMatchLength {
+			if !d.sync {
+				return
+			}
+			if sanity && d.index > d.windowEnd {
+				panic("index > windowEnd")
+			}
+			if lookahead == 0 {
+				// Flush current output block if any.
+				if d.byteAvailable {
+					// There is still one pending token that needs to be flushed
+					d.tokens.tokens[d.tokens.n] = literalToken(uint32(d.window[d.index-1]))
+					d.tokens.n++
+					d.byteAvailable = false
+				}
+				if d.tokens.n > 0 {
+					if d.err = d.writeBlock(d.tokens, d.index, false); d.err != nil {
+						return
+					}
+					d.tokens.n = 0
+				}
+				return
+			}
+		}
+		if d.index < d.maxInsertIndex {
+			// Update the hash
+			d.hash = oldHash(d.window[d.index:d.index+minMatchLength]) & hashMask
+			ch := d.hashHead[d.hash]
+			d.chainHead = int(ch)
+			d.hashPrev[d.index&windowMask] = ch
+			d.hashHead[d.hash] = hashid(d.index + d.hashOffset)
+		}
+		prevLength := d.length
+		prevOffset := d.offset
+		d.length = minMatchLength - 1
+		d.offset = 0
+		minIndex := d.index - windowSize
+		if minIndex < 0 {
+			minIndex = 0
+		}
+
+		if d.chainHead-d.hashOffset >= minIndex && lookahead > prevLength && prevLength < d.lazy {
+			if newLength, newOffset, ok := d.findMatch(d.index, d.chainHead-d.hashOffset, minMatchLength-1, lookahead); ok {
+				d.length = newLength
+				d.offset = newOffset
+			}
+		}
+		if prevLength >= minMatchLength && d.length <= prevLength {
+			// There was a match at the previous step, and the current match is
+			// not better. Output the previous match.
+			d.tokens.tokens[d.tokens.n] = matchToken(uint32(prevLength-3), uint32(prevOffset-minOffsetSize))
+			d.tokens.n++
+
+			// Insert in the hash table all strings up to the end of the match.
+			// index and index-1 are already inserted. If there is not enough
+			// lookahead, the last two strings are not inserted into the hash
+			// table.
+			var newIndex int
+			newIndex = d.index + prevLength - 1
+			// Calculate missing hashes
+			end := newIndex
+			if end > d.maxInsertIndex {
+				end = d.maxInsertIndex
+			}
+			end += minMatchLength - 1
+			startindex := d.index + 1
+			if startindex > d.maxInsertIndex {
+				startindex = d.maxInsertIndex
+			}
+			tocheck := d.window[startindex:end]
+			dstSize := len(tocheck) - minMatchLength + 1
+			if dstSize > 0 {
+				dst := d.hashMatch[:dstSize]
+				oldBulkHash(tocheck, dst)
+				var newH hash
+				for i, val := range dst {
+					di := i + startindex
+					newH = val & hashMask
+					// Get previous value with the same hash.
+					// Our chain should point to the previous value.
+					d.hashPrev[di&windowMask] = d.hashHead[newH]
+					// Set the head of the hash chain to us.
+					d.hashHead[newH] = hashid(di + d.hashOffset)
+				}
+				d.hash = newH
+			}
+
+			d.index = newIndex
+			d.byteAvailable = false
+			d.length = minMatchLength - 1
+			if d.tokens.n == maxFlateBlockTokens {
+				// The block includes the current character
+				if d.err = d.writeBlock(d.tokens, d.index, false); d.err != nil {
+					return
+				}
+				d.tokens.n = 0
+			}
+		} else {
+			// Reset, if we got a match this run.
+			if d.length >= minMatchLength {
+				d.ii = 0
+			}
+			// We have a byte waiting. Emit it.
+			if d.byteAvailable {
+				d.ii++
+				d.tokens.tokens[d.tokens.n] = literalToken(uint32(d.window[d.index-1]))
+				d.tokens.n++
+				if d.tokens.n == maxFlateBlockTokens {
+					if d.err = d.writeBlock(d.tokens, d.index, false); d.err != nil {
+						return
+					}
+					d.tokens.n = 0
+				}
+				d.index++
+
+				// If we have a long run of no matches, skip additional bytes
+				// Resets when d.ii overflows after 64KB.
+				if d.ii > 31 {
+					n := int(d.ii >> 6)
+					for j := 0; j < n; j++ {
+						if d.index >= d.windowEnd-1 {
+							break
+						}
+
+						d.tokens.tokens[d.tokens.n] = literalToken(uint32(d.window[d.index-1]))
+						d.tokens.n++
+						if d.tokens.n == maxFlateBlockTokens {
+							if d.err = d.writeBlock(d.tokens, d.index, false); d.err != nil {
+								return
+							}
+							d.tokens.n = 0
+						}
+						d.index++
+					}
+					// Flush last byte
+					d.tokens.tokens[d.tokens.n] = literalToken(uint32(d.window[d.index-1]))
+					d.tokens.n++
+					d.byteAvailable = false
+					// d.length = minMatchLength - 1 // not needed, since d.ii is reset above, so it should never be > minMatchLength
+					if d.tokens.n == maxFlateBlockTokens {
+						if d.err = d.writeBlock(d.tokens, d.index, false); d.err != nil {
+							return
+						}
+						d.tokens.n = 0
+					}
+				}
+			} else {
+				d.index++
+				d.byteAvailable = true
+			}
+		}
+	}
+}
+
+// Assumes that d.fastSkipHashing != skipNever,
+// otherwise use deflateNoSkip
+func (d *compressor) deflateSSE() {
+
+	// Sanity enables additional runtime tests.
+	// It's intended to be used during development
+	// to supplement the currently ad-hoc unit tests.
+	const sanity = false
+
+	if d.windowEnd-d.index < minMatchLength+maxMatchLength && !d.sync {
+		return
+	}
+
+	d.maxInsertIndex = d.windowEnd - (minMatchLength - 1)
+	if d.index < d.maxInsertIndex {
+		d.hash = oldHash(d.window[d.index:d.index+minMatchLength]) & hashMask
+	}
+
+	for {
+		if sanity && d.index > d.windowEnd {
+			panic("index > windowEnd")
+		}
+		lookahead := d.windowEnd - d.index
+		if lookahead < minMatchLength+maxMatchLength {
+			if !d.sync {
+				return
+			}
+			if sanity && d.index > d.windowEnd {
+				panic("index > windowEnd")
+			}
+			if lookahead == 0 {
+				if d.tokens.n > 0 {
+					if d.err = d.writeBlock(d.tokens, d.index, false); d.err != nil {
+						return
+					}
+					d.tokens.n = 0
+				}
+				return
+			}
+		}
+		if d.index < d.maxInsertIndex {
+			// Update the hash
+			d.hash = crc32sse(d.window[d.index:d.index+minMatchLength]) & hashMask
+			ch := d.hashHead[d.hash]
+			d.chainHead = int(ch)
+			d.hashPrev[d.index&windowMask] = ch
+			d.hashHead[d.hash] = hashid(d.index + d.hashOffset)
+		}
+		d.length = minMatchLength - 1
+		d.offset = 0
+		minIndex := d.index - windowSize
+		if minIndex < 0 {
+			minIndex = 0
+		}
+
+		if d.chainHead-d.hashOffset >= minIndex && lookahead > minMatchLength-1 {
+			if newLength, newOffset, ok := d.findMatchSSE(d.index, d.chainHead-d.hashOffset, minMatchLength-1, lookahead); ok {
+				d.length = newLength
+				d.offset = newOffset
+			}
+		}
+		if d.length >= minMatchLength {
+			d.ii = 0
+			// There was a match at the previous step, and the current match is
+			// not better. Output the previous match.
+			// "d.length-3" should NOT be "d.length-minMatchLength", since the format always assume 3
+			d.tokens.tokens[d.tokens.n] = matchToken(uint32(d.length-3), uint32(d.offset-minOffsetSize))
+			d.tokens.n++
+			// Insert in the hash table all strings up to the end of the match.
+			// index and index-1 are already inserted. If there is not enough
+			// lookahead, the last two strings are not inserted into the hash
+			// table.
+			if d.length <= d.fastSkipHashing {
+				var newIndex int
+				newIndex = d.index + d.length
+				// Calculate missing hashes
+				end := newIndex
+				if end > d.maxInsertIndex {
+					end = d.maxInsertIndex
+				}
+				end += minMatchLength - 1
+				startindex := d.index + 1
+				if startindex > d.maxInsertIndex {
+					startindex = d.maxInsertIndex
+				}
+				tocheck := d.window[startindex:end]
+				dstSize := len(tocheck) - minMatchLength + 1
+				if dstSize > 0 {
+					dst := d.hashMatch[:dstSize]
+
+					crc32sseAll(tocheck, dst)
+					var newH hash
+					for i, val := range dst {
+						di := i + startindex
+						newH = val & hashMask
+						// Get previous value with the same hash.
+						// Our chain should point to the previous value.
+						d.hashPrev[di&windowMask] = d.hashHead[newH]
+						// Set the head of the hash chain to us.
+						d.hashHead[newH] = hashid(di + d.hashOffset)
+					}
+					d.hash = newH
+				}
+				d.index = newIndex
+			} else {
+				// For matches this long, we don't bother inserting each individual
+				// item into the table.
+				d.index += d.length
+				if d.index < d.maxInsertIndex {
+					d.hash = crc32sse(d.window[d.index:d.index+minMatchLength]) & hashMask
+				}
+			}
+			if d.tokens.n == maxFlateBlockTokens {
+				// The block includes the current character
+				if d.err = d.writeBlock(d.tokens, d.index, false); d.err != nil {
+					return
+				}
+				d.tokens.n = 0
+			}
+		} else {
+			d.ii++
+			end := d.index + int(d.ii>>uint(d.fastSkipHashing)) + 1
+			if end > d.windowEnd {
+				end = d.windowEnd
+			}
+			for i := d.index; i < end; i++ {
+				d.tokens.tokens[d.tokens.n] = literalToken(uint32(d.window[i]))
+				d.tokens.n++
+				if d.tokens.n == maxFlateBlockTokens {
+					if d.err = d.writeBlock(d.tokens, i+1, false); d.err != nil {
+						return
+					}
+					d.tokens.n = 0
+				}
+			}
+			d.index = end
+		}
+	}
+}
+
+// deflateLazy is the same as deflate, but with d.fastSkipHashing == skipNever,
+// meaning it always has lazy matching on.
+func (d *compressor) deflateLazySSE() {
+	// Sanity enables additional runtime tests.
+	// It's intended to be used during development
+	// to supplement the currently ad-hoc unit tests.
+	const sanity = false
+
+	if d.windowEnd-d.index < minMatchLength+maxMatchLength && !d.sync {
+		return
+	}
+
+	d.maxInsertIndex = d.windowEnd - (minMatchLength - 1)
+	if d.index < d.maxInsertIndex {
+		d.hash = crc32sse(d.window[d.index:d.index+minMatchLength]) & hashMask
+	}
+
+	for {
+		if sanity && d.index > d.windowEnd {
+			panic("index > windowEnd")
+		}
+		lookahead := d.windowEnd - d.index
+		if lookahead < minMatchLength+maxMatchLength {
+			if !d.sync {
+				return
+			}
+			if sanity && d.index > d.windowEnd {
+				panic("index > windowEnd")
+			}
+			if lookahead == 0 {
+				// Flush current output block if any.
+				if d.byteAvailable {
+					// There is still one pending token that needs to be flushed
+					d.tokens.tokens[d.tokens.n] = literalToken(uint32(d.window[d.index-1]))
+					d.tokens.n++
+					d.byteAvailable = false
+				}
+				if d.tokens.n > 0 {
+					if d.err = d.writeBlock(d.tokens, d.index, false); d.err != nil {
+						return
+					}
+					d.tokens.n = 0
+				}
+				return
+			}
+		}
+		if d.index < d.maxInsertIndex {
+			// Update the hash
+			d.hash = crc32sse(d.window[d.index:d.index+minMatchLength]) & hashMask
+			ch := d.hashHead[d.hash]
+			d.chainHead = int(ch)
+			d.hashPrev[d.index&windowMask] = ch
+			d.hashHead[d.hash] = hashid(d.index + d.hashOffset)
+		}
+		prevLength := d.length
+		prevOffset := d.offset
+		d.length = minMatchLength - 1
+		d.offset = 0
+		minIndex := d.index - windowSize
+		if minIndex < 0 {
+			minIndex = 0
+		}
+
+		if d.chainHead-d.hashOffset >= minIndex && lookahead > prevLength && prevLength < d.lazy {
+			if newLength, newOffset, ok := d.findMatchSSE(d.index, d.chainHead-d.hashOffset, minMatchLength-1, lookahead); ok {
+				d.length = newLength
+				d.offset = newOffset
+			}
+		}
+		if prevLength >= minMatchLength && d.length <= prevLength {
+			// There was a match at the previous step, and the current match is
+			// not better. Output the previous match.
+			d.tokens.tokens[d.tokens.n] = matchToken(uint32(prevLength-3), uint32(prevOffset-minOffsetSize))
+			d.tokens.n++
+
+			// Insert in the hash table all strings up to the end of the match.
+			// index and index-1 are already inserted. If there is not enough
+			// lookahead, the last two strings are not inserted into the hash
+			// table.
+			var newIndex int
+			newIndex = d.index + prevLength - 1
+			// Calculate missing hashes
+			end := newIndex
+			if end > d.maxInsertIndex {
+				end = d.maxInsertIndex
+			}
+			end += minMatchLength - 1
+			startindex := d.index + 1
+			if startindex > d.maxInsertIndex {
+				startindex = d.maxInsertIndex
+			}
+			tocheck := d.window[startindex:end]
+			dstSize := len(tocheck) - minMatchLength + 1
+			if dstSize > 0 {
+				dst := d.hashMatch[:dstSize]
+				crc32sseAll(tocheck, dst)
+				var newH hash
+				for i, val := range dst {
+					di := i + startindex
+					newH = val & hashMask
+					// Get previous value with the same hash.
+					// Our chain should point to the previous value.
+					d.hashPrev[di&windowMask] = d.hashHead[newH]
+					// Set the head of the hash chain to us.
+					d.hashHead[newH] = hashid(di + d.hashOffset)
+				}
+				d.hash = newH
+			}
+
+			d.index = newIndex
+			d.byteAvailable = false
+			d.length = minMatchLength - 1
+			if d.tokens.n == maxFlateBlockTokens {
+				// The block includes the current character
+				if d.err = d.writeBlock(d.tokens, d.index, false); d.err != nil {
+					return
+				}
+				d.tokens.n = 0
+			}
+		} else {
+			// Reset, if we got a match this run.
+			if d.length >= minMatchLength {
+				d.ii = 0
+			}
+			// We have a byte waiting. Emit it.
+			if d.byteAvailable {
+				d.ii++
+				d.tokens.tokens[d.tokens.n] = literalToken(uint32(d.window[d.index-1]))
+				d.tokens.n++
+				if d.tokens.n == maxFlateBlockTokens {
+					if d.err = d.writeBlock(d.tokens, d.index, false); d.err != nil {
+						return
+					}
+					d.tokens.n = 0
+				}
+				d.index++
+
+				// If we have a long run of no matches, skip additional bytes
+				// Resets when d.ii overflows after 64KB.
+				if d.ii > 31 {
+					n := int(d.ii >> 6)
+					for j := 0; j < n; j++ {
+						if d.index >= d.windowEnd-1 {
+							break
+						}
+
+						d.tokens.tokens[d.tokens.n] = literalToken(uint32(d.window[d.index-1]))
+						d.tokens.n++
+						if d.tokens.n == maxFlateBlockTokens {
+							if d.err = d.writeBlock(d.tokens, d.index, false); d.err != nil {
+								return
+							}
+							d.tokens.n = 0
+						}
+						d.index++
+					}
+					// Flush last byte
+					d.tokens.tokens[d.tokens.n] = literalToken(uint32(d.window[d.index-1]))
+					d.tokens.n++
+					d.byteAvailable = false
+					// d.length = minMatchLength - 1 // not needed, since d.ii is reset above, so it should never be > minMatchLength
+					if d.tokens.n == maxFlateBlockTokens {
+						if d.err = d.writeBlock(d.tokens, d.index, false); d.err != nil {
+							return
+						}
+						d.tokens.n = 0
+					}
+				}
+			} else {
+				d.index++
+				d.byteAvailable = true
+			}
+		}
+	}
+}
+
+func (d *compressor) fillStore(b []byte) int {
+	n := copy(d.window[d.windowEnd:], b)
+	d.windowEnd += n
+	return n
+}
+
+func (d *compressor) store() {
+	if d.windowEnd > 0 {
+		d.err = d.writeStoredBlock(d.window[:d.windowEnd])
+	}
+	d.windowEnd = 0
+}
+
+// fillHuff will fill the buffer with data for huffman-only compression.
+// The number of bytes copied is returned.
+func (d *compressor) fillHuff(b []byte) int {
+	n := copy(d.window[d.windowEnd:], b)
+	d.windowEnd += n
+	return n
+}
+
+// storeHuff will compress and store the currently added data,
+// if enough has been accumulated or we at the end of the stream.
+// Any error that occurred will be in d.err
+func (d *compressor) storeHuff() {
+	// We only compress if we have maxStoreBlockSize or we are at end-of-stream
+	if d.windowEnd < maxStoreBlockSize && !d.sync {
+		return
+	}
+	if d.windowEnd == 0 {
+		return
+	}
+	d.w.writeBlockHuff(false, d.window[:d.windowEnd])
+	d.err = d.w.err
+	d.windowEnd = 0
+}
+
+// storeHuff will compress and store the currently added data,
+// if enough has been accumulated or we at the end of the stream.
+// Any error that occurred will be in d.err
+func (d *compressor) storeSnappy() {
+	// We only compress if we have maxStoreBlockSize.
+	if d.windowEnd < maxStoreBlockSize && !d.sync {
+		return
+	}
+	if d.windowEnd == 0 {
+		return
+	}
+	snappyEncode(&d.tokens, d.window[:d.windowEnd])
+	d.w.writeBlock(d.tokens, false, d.window[:d.windowEnd])
+	d.err = d.w.err
+	d.tokens.n = 0
+	d.windowEnd = 0
+}
+
+// write will add input byte to the stream.
+// Unless an error occurs all bytes will be consumed.
+func (d *compressor) write(b []byte) (n int, err error) {
+	if d.err != nil {
+		return 0, d.err
+	}
+	n = len(b)
+	for len(b) > 0 {
+		d.step(d)
+		b = b[d.fill(d, b):]
+		if d.err != nil {
+			return 0, d.err
+		}
+	}
+	return n, d.err
+}
+
+func (d *compressor) syncFlush() error {
+	d.sync = true
+	if d.err != nil {
+		return d.err
+	}
+	d.step(d)
+	if d.err == nil {
+		d.w.writeStoredHeader(0, false)
+		d.w.flush()
+		d.err = d.w.err
+	}
+	d.sync = false
+	return d.err
+}
+
+func (d *compressor) init(w io.Writer, level int) (err error) {
+	d.w = newHuffmanBitWriter(w)
+
+	switch {
+	case level == NoCompression:
+		d.window = make([]byte, maxStoreBlockSize)
+		d.fill = (*compressor).fillStore
+		d.step = (*compressor).store
+	case level == ConstantCompression:
+		d.window = make([]byte, maxStoreBlockSize)
+		d.fill = (*compressor).fillHuff
+		d.step = (*compressor).storeHuff
+	case level == 1:
+		d.window = make([]byte, maxStoreBlockSize)
+		d.fill = (*compressor).fillHuff
+		d.step = (*compressor).storeSnappy
+		d.tokens.tokens = make([]token, maxStoreBlockSize+1)
+	case level == DefaultCompression:
+		level = 6
+		fallthrough
+	case 2 <= level && level <= 9:
+		d.compressionLevel = levels[level]
+		d.initDeflate()
+		d.fill = (*compressor).fillDeflate
+		if d.fastSkipHashing == skipNever {
+			if useSSE42 {
+				d.step = (*compressor).deflateLazySSE
+			} else {
+				d.step = (*compressor).deflateLazy
+			}
+		} else {
+			if useSSE42 {
+				d.step = (*compressor).deflateSSE
+			} else {
+				d.step = (*compressor).deflate
+
+			}
+		}
+	default:
+		return fmt.Errorf("flate: invalid compression level %d: want value in range [-2, 9]", level)
+	}
+	return nil
+}
+
+// Used for zeroing the hash slice
+var hzeroes [256]hashid
+
+// reset the state of the compressor.
+func (d *compressor) reset(w io.Writer) {
+	d.w.reset(w)
+	d.sync = false
+	d.err = nil
+	switch d.compressionLevel.chain {
+	case 0:
+		// level was NoCompression or ConstantCompresssion.
+		d.windowEnd = 0
+	default:
+		d.chainHead = -1
+		for s := d.hashHead; len(s) > 0; {
+			n := copy(s, hzeroes[:])
+			s = s[n:]
+		}
+		for s := d.hashPrev; len(s) > 0; s = s[len(hzeroes):] {
+			copy(s, hzeroes[:])
+		}
+		d.hashOffset = 1
+
+		d.index, d.windowEnd = 0, 0
+		d.blockStart, d.byteAvailable = 0, false
+
+		d.tokens.n = 0
+		d.length = minMatchLength - 1
+		d.offset = 0
+		d.hash = 0
+		d.ii = 0
+		d.maxInsertIndex = 0
+	}
+}
+
+func (d *compressor) close() error {
+	if d.err != nil {
+		return d.err
+	}
+	d.sync = true
+	d.step(d)
+	if d.err != nil {
+		return d.err
+	}
+	if d.w.writeStoredHeader(0, true); d.w.err != nil {
+		return d.w.err
+	}
+	d.w.flush()
+	return d.w.err
+}
+
+// NewWriter returns a new Writer compressing data at the given level.
+// Following zlib, levels range from 1 (BestSpeed) to 9 (BestCompression);
+// higher levels typically run slower but compress more. Level 0
+// (NoCompression) does not attempt any compression; it only adds the
+// necessary DEFLATE framing. Level -1 (DefaultCompression) uses the default
+// compression level.
+// Level -2 (ConstantCompression) will use Huffman compression only, giving
+// a very fast compression for all types of input, but sacrificing considerable
+// compression efficiency.
+//
+// If level is in the range [-2, 9] then the error returned will be nil.
+// Otherwise the error returned will be non-nil.
+func NewWriter(w io.Writer, level int) (*Writer, error) {
+	var dw Writer
+	if err := dw.d.init(w, level); err != nil {
+		return nil, err
+	}
+	return &dw, nil
+}
+
+// NewWriterDict is like NewWriter but initializes the new
+// Writer with a preset dictionary.  The returned Writer behaves
+// as if the dictionary had been written to it without producing
+// any compressed output.  The compressed data written to w
+// can only be decompressed by a Reader initialized with the
+// same dictionary.
+func NewWriterDict(w io.Writer, level int, dict []byte) (*Writer, error) {
+	dw := &dictWriter{w}
+	zw, err := NewWriter(dw, level)
+	if err != nil {
+		return nil, err
+	}
+	zw.d.fillWindow(dict)
+	zw.dict = append(zw.dict, dict...) // duplicate dictionary for Reset method.
+	return zw, err
+}
+
+type dictWriter struct {
+	w io.Writer
+}
+
+func (w *dictWriter) Write(b []byte) (n int, err error) {
+	return w.w.Write(b)
+}
+
+// A Writer takes data written to it and writes the compressed
+// form of that data to an underlying writer (see NewWriter).
+type Writer struct {
+	d    compressor
+	dict []byte
+}
+
+// Write writes data to w, which will eventually write the
+// compressed form of data to its underlying writer.
+func (w *Writer) Write(data []byte) (n int, err error) {
+	return w.d.write(data)
+}
+
+// Flush flushes any pending compressed data to the underlying writer.
+// It is useful mainly in compressed network protocols, to ensure that
+// a remote reader has enough data to reconstruct a packet.
+// Flush does not return until the data has been written.
+// If the underlying writer returns an error, Flush returns that error.
+//
+// In the terminology of the zlib library, Flush is equivalent to Z_SYNC_FLUSH.
+func (w *Writer) Flush() error {
+	// For more about flushing:
+	// http://www.bolet.org/~pornin/deflate-flush.html
+	return w.d.syncFlush()
+}
+
+// Close flushes and closes the writer.
+func (w *Writer) Close() error {
+	return w.d.close()
+}
+
+// Reset discards the writer's state and makes it equivalent to
+// the result of NewWriter or NewWriterDict called with dst
+// and w's level and dictionary.
+func (w *Writer) Reset(dst io.Writer) {
+	if dw, ok := w.d.w.w.(*dictWriter); ok {
+		// w was created with NewWriterDict
+		dw.w = dst
+		w.d.reset(dw)
+		w.d.fillWindow(w.dict)
+	} else {
+		// w was created with NewWriter
+		w.d.reset(dst)
+	}
+}
+
+// ResetDict discards the writer's state and makes it equivalent to
+// the result of NewWriter or NewWriterDict called with dst
+// and w's level, but sets a specific dictionary.
+func (w *Writer) ResetDict(dst io.Writer, dict []byte) {
+	w.dict = dict
+	w.d.reset(dst)
+	w.d.fillWindow(w.dict)
+}

+ 78 - 0
Godeps/_workspace/src/github.com/klauspost/compress/flate/fixedhuff.go

@@ -0,0 +1,78 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package flate
+
+// autogenerated by go run gen.go -output fixedhuff.go, DO NOT EDIT
+
+var fixedHuffmanDecoder = huffmanDecoder{
+	7,
+	[huffmanNumChunks]uint32{
+		0x1007, 0x0508, 0x0108, 0x1188, 0x1107, 0x0708, 0x0308, 0x0c09,
+		0x1087, 0x0608, 0x0208, 0x0a09, 0x0008, 0x0808, 0x0408, 0x0e09,
+		0x1047, 0x0588, 0x0188, 0x0909, 0x1147, 0x0788, 0x0388, 0x0d09,
+		0x10c7, 0x0688, 0x0288, 0x0b09, 0x0088, 0x0888, 0x0488, 0x0f09,
+		0x1027, 0x0548, 0x0148, 0x11c8, 0x1127, 0x0748, 0x0348, 0x0c89,
+		0x10a7, 0x0648, 0x0248, 0x0a89, 0x0048, 0x0848, 0x0448, 0x0e89,
+		0x1067, 0x05c8, 0x01c8, 0x0989, 0x1167, 0x07c8, 0x03c8, 0x0d89,
+		0x10e7, 0x06c8, 0x02c8, 0x0b89, 0x00c8, 0x08c8, 0x04c8, 0x0f89,
+		0x1017, 0x0528, 0x0128, 0x11a8, 0x1117, 0x0728, 0x0328, 0x0c49,
+		0x1097, 0x0628, 0x0228, 0x0a49, 0x0028, 0x0828, 0x0428, 0x0e49,
+		0x1057, 0x05a8, 0x01a8, 0x0949, 0x1157, 0x07a8, 0x03a8, 0x0d49,
+		0x10d7, 0x06a8, 0x02a8, 0x0b49, 0x00a8, 0x08a8, 0x04a8, 0x0f49,
+		0x1037, 0x0568, 0x0168, 0x11e8, 0x1137, 0x0768, 0x0368, 0x0cc9,
+		0x10b7, 0x0668, 0x0268, 0x0ac9, 0x0068, 0x0868, 0x0468, 0x0ec9,
+		0x1077, 0x05e8, 0x01e8, 0x09c9, 0x1177, 0x07e8, 0x03e8, 0x0dc9,
+		0x10f7, 0x06e8, 0x02e8, 0x0bc9, 0x00e8, 0x08e8, 0x04e8, 0x0fc9,
+		0x1007, 0x0518, 0x0118, 0x1198, 0x1107, 0x0718, 0x0318, 0x0c29,
+		0x1087, 0x0618, 0x0218, 0x0a29, 0x0018, 0x0818, 0x0418, 0x0e29,
+		0x1047, 0x0598, 0x0198, 0x0929, 0x1147, 0x0798, 0x0398, 0x0d29,
+		0x10c7, 0x0698, 0x0298, 0x0b29, 0x0098, 0x0898, 0x0498, 0x0f29,
+		0x1027, 0x0558, 0x0158, 0x11d8, 0x1127, 0x0758, 0x0358, 0x0ca9,
+		0x10a7, 0x0658, 0x0258, 0x0aa9, 0x0058, 0x0858, 0x0458, 0x0ea9,
+		0x1067, 0x05d8, 0x01d8, 0x09a9, 0x1167, 0x07d8, 0x03d8, 0x0da9,
+		0x10e7, 0x06d8, 0x02d8, 0x0ba9, 0x00d8, 0x08d8, 0x04d8, 0x0fa9,
+		0x1017, 0x0538, 0x0138, 0x11b8, 0x1117, 0x0738, 0x0338, 0x0c69,
+		0x1097, 0x0638, 0x0238, 0x0a69, 0x0038, 0x0838, 0x0438, 0x0e69,
+		0x1057, 0x05b8, 0x01b8, 0x0969, 0x1157, 0x07b8, 0x03b8, 0x0d69,
+		0x10d7, 0x06b8, 0x02b8, 0x0b69, 0x00b8, 0x08b8, 0x04b8, 0x0f69,
+		0x1037, 0x0578, 0x0178, 0x11f8, 0x1137, 0x0778, 0x0378, 0x0ce9,
+		0x10b7, 0x0678, 0x0278, 0x0ae9, 0x0078, 0x0878, 0x0478, 0x0ee9,
+		0x1077, 0x05f8, 0x01f8, 0x09e9, 0x1177, 0x07f8, 0x03f8, 0x0de9,
+		0x10f7, 0x06f8, 0x02f8, 0x0be9, 0x00f8, 0x08f8, 0x04f8, 0x0fe9,
+		0x1007, 0x0508, 0x0108, 0x1188, 0x1107, 0x0708, 0x0308, 0x0c19,
+		0x1087, 0x0608, 0x0208, 0x0a19, 0x0008, 0x0808, 0x0408, 0x0e19,
+		0x1047, 0x0588, 0x0188, 0x0919, 0x1147, 0x0788, 0x0388, 0x0d19,
+		0x10c7, 0x0688, 0x0288, 0x0b19, 0x0088, 0x0888, 0x0488, 0x0f19,
+		0x1027, 0x0548, 0x0148, 0x11c8, 0x1127, 0x0748, 0x0348, 0x0c99,
+		0x10a7, 0x0648, 0x0248, 0x0a99, 0x0048, 0x0848, 0x0448, 0x0e99,
+		0x1067, 0x05c8, 0x01c8, 0x0999, 0x1167, 0x07c8, 0x03c8, 0x0d99,
+		0x10e7, 0x06c8, 0x02c8, 0x0b99, 0x00c8, 0x08c8, 0x04c8, 0x0f99,
+		0x1017, 0x0528, 0x0128, 0x11a8, 0x1117, 0x0728, 0x0328, 0x0c59,
+		0x1097, 0x0628, 0x0228, 0x0a59, 0x0028, 0x0828, 0x0428, 0x0e59,
+		0x1057, 0x05a8, 0x01a8, 0x0959, 0x1157, 0x07a8, 0x03a8, 0x0d59,
+		0x10d7, 0x06a8, 0x02a8, 0x0b59, 0x00a8, 0x08a8, 0x04a8, 0x0f59,
+		0x1037, 0x0568, 0x0168, 0x11e8, 0x1137, 0x0768, 0x0368, 0x0cd9,
+		0x10b7, 0x0668, 0x0268, 0x0ad9, 0x0068, 0x0868, 0x0468, 0x0ed9,
+		0x1077, 0x05e8, 0x01e8, 0x09d9, 0x1177, 0x07e8, 0x03e8, 0x0dd9,
+		0x10f7, 0x06e8, 0x02e8, 0x0bd9, 0x00e8, 0x08e8, 0x04e8, 0x0fd9,
+		0x1007, 0x0518, 0x0118, 0x1198, 0x1107, 0x0718, 0x0318, 0x0c39,
+		0x1087, 0x0618, 0x0218, 0x0a39, 0x0018, 0x0818, 0x0418, 0x0e39,
+		0x1047, 0x0598, 0x0198, 0x0939, 0x1147, 0x0798, 0x0398, 0x0d39,
+		0x10c7, 0x0698, 0x0298, 0x0b39, 0x0098, 0x0898, 0x0498, 0x0f39,
+		0x1027, 0x0558, 0x0158, 0x11d8, 0x1127, 0x0758, 0x0358, 0x0cb9,
+		0x10a7, 0x0658, 0x0258, 0x0ab9, 0x0058, 0x0858, 0x0458, 0x0eb9,
+		0x1067, 0x05d8, 0x01d8, 0x09b9, 0x1167, 0x07d8, 0x03d8, 0x0db9,
+		0x10e7, 0x06d8, 0x02d8, 0x0bb9, 0x00d8, 0x08d8, 0x04d8, 0x0fb9,
+		0x1017, 0x0538, 0x0138, 0x11b8, 0x1117, 0x0738, 0x0338, 0x0c79,
+		0x1097, 0x0638, 0x0238, 0x0a79, 0x0038, 0x0838, 0x0438, 0x0e79,
+		0x1057, 0x05b8, 0x01b8, 0x0979, 0x1157, 0x07b8, 0x03b8, 0x0d79,
+		0x10d7, 0x06b8, 0x02b8, 0x0b79, 0x00b8, 0x08b8, 0x04b8, 0x0f79,
+		0x1037, 0x0578, 0x0178, 0x11f8, 0x1137, 0x0778, 0x0378, 0x0cf9,
+		0x10b7, 0x0678, 0x0278, 0x0af9, 0x0078, 0x0878, 0x0478, 0x0ef9,
+		0x1077, 0x05f8, 0x01f8, 0x09f9, 0x1177, 0x07f8, 0x03f8, 0x0df9,
+		0x10f7, 0x06f8, 0x02f8, 0x0bf9, 0x00f8, 0x08f8, 0x04f8, 0x0ff9,
+	},
+	nil, 0,
+}

+ 265 - 0
Godeps/_workspace/src/github.com/klauspost/compress/flate/gen.go

@@ -0,0 +1,265 @@
+// Copyright 2012 The Go Authors.  All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build ignore
+
+// This program generates fixedhuff.go
+// Invoke as
+//
+//	go run gen.go -output fixedhuff.go
+
+package main
+
+import (
+	"bytes"
+	"flag"
+	"fmt"
+	"go/format"
+	"io/ioutil"
+	"log"
+)
+
+var filename = flag.String("output", "fixedhuff.go", "output file name")
+
+const maxCodeLen = 16
+
+// Note: the definition of the huffmanDecoder struct is copied from
+// inflate.go, as it is private to the implementation.
+
+// chunk & 15 is number of bits
+// chunk >> 4 is value, including table link
+
+const (
+	huffmanChunkBits  = 9
+	huffmanNumChunks  = 1 << huffmanChunkBits
+	huffmanCountMask  = 15
+	huffmanValueShift = 4
+)
+
+type huffmanDecoder struct {
+	min      int                      // the minimum code length
+	chunks   [huffmanNumChunks]uint32 // chunks as described above
+	links    [][]uint32               // overflow links
+	linkMask uint32                   // mask the width of the link table
+}
+
+// Initialize Huffman decoding tables from array of code lengths.
+// Following this function, h is guaranteed to be initialized into a complete
+// tree (i.e., neither over-subscribed nor under-subscribed). The exception is a
+// degenerate case where the tree has only a single symbol with length 1. Empty
+// trees are permitted.
+func (h *huffmanDecoder) init(bits []int) bool {
+	// Sanity enables additional runtime tests during Huffman
+	// table construction.  It's intended to be used during
+	// development to supplement the currently ad-hoc unit tests.
+	const sanity = false
+
+	if h.min != 0 {
+		*h = huffmanDecoder{}
+	}
+
+	// Count number of codes of each length,
+	// compute min and max length.
+	var count [maxCodeLen]int
+	var min, max int
+	for _, n := range bits {
+		if n == 0 {
+			continue
+		}
+		if min == 0 || n < min {
+			min = n
+		}
+		if n > max {
+			max = n
+		}
+		count[n]++
+	}
+
+	// Empty tree. The decompressor.huffSym function will fail later if the tree
+	// is used. Technically, an empty tree is only valid for the HDIST tree and
+	// not the HCLEN and HLIT tree. However, a stream with an empty HCLEN tree
+	// is guaranteed to fail since it will attempt to use the tree to decode the
+	// codes for the HLIT and HDIST trees. Similarly, an empty HLIT tree is
+	// guaranteed to fail later since the compressed data section must be
+	// composed of at least one symbol (the end-of-block marker).
+	if max == 0 {
+		return true
+	}
+
+	code := 0
+	var nextcode [maxCodeLen]int
+	for i := min; i <= max; i++ {
+		code <<= 1
+		nextcode[i] = code
+		code += count[i]
+	}
+
+	// Check that the coding is complete (i.e., that we've
+	// assigned all 2-to-the-max possible bit sequences).
+	// Exception: To be compatible with zlib, we also need to
+	// accept degenerate single-code codings.  See also
+	// TestDegenerateHuffmanCoding.
+	if code != 1<<uint(max) && !(code == 1 && max == 1) {
+		return false
+	}
+
+	h.min = min
+	if max > huffmanChunkBits {
+		numLinks := 1 << (uint(max) - huffmanChunkBits)
+		h.linkMask = uint32(numLinks - 1)
+
+		// create link tables
+		link := nextcode[huffmanChunkBits+1] >> 1
+		h.links = make([][]uint32, huffmanNumChunks-link)
+		for j := uint(link); j < huffmanNumChunks; j++ {
+			reverse := int(reverseByte[j>>8]) | int(reverseByte[j&0xff])<<8
+			reverse >>= uint(16 - huffmanChunkBits)
+			off := j - uint(link)
+			if sanity && h.chunks[reverse] != 0 {
+				panic("impossible: overwriting existing chunk")
+			}
+			h.chunks[reverse] = uint32(off<<huffmanValueShift | (huffmanChunkBits + 1))
+			h.links[off] = make([]uint32, numLinks)
+		}
+	}
+
+	for i, n := range bits {
+		if n == 0 {
+			continue
+		}
+		code := nextcode[n]
+		nextcode[n]++
+		chunk := uint32(i<<huffmanValueShift | n)
+		reverse := int(reverseByte[code>>8]) | int(reverseByte[code&0xff])<<8
+		reverse >>= uint(16 - n)
+		if n <= huffmanChunkBits {
+			for off := reverse; off < len(h.chunks); off += 1 << uint(n) {
+				// We should never need to overwrite
+				// an existing chunk.  Also, 0 is
+				// never a valid chunk, because the
+				// lower 4 "count" bits should be
+				// between 1 and 15.
+				if sanity && h.chunks[off] != 0 {
+					panic("impossible: overwriting existing chunk")
+				}
+				h.chunks[off] = chunk
+			}
+		} else {
+			j := reverse & (huffmanNumChunks - 1)
+			if sanity && h.chunks[j]&huffmanCountMask != huffmanChunkBits+1 {
+				// Longer codes should have been
+				// associated with a link table above.
+				panic("impossible: not an indirect chunk")
+			}
+			value := h.chunks[j] >> huffmanValueShift
+			linktab := h.links[value]
+			reverse >>= huffmanChunkBits
+			for off := reverse; off < len(linktab); off += 1 << uint(n-huffmanChunkBits) {
+				if sanity && linktab[off] != 0 {
+					panic("impossible: overwriting existing chunk")
+				}
+				linktab[off] = chunk
+			}
+		}
+	}
+
+	if sanity {
+		// Above we've sanity checked that we never overwrote
+		// an existing entry.  Here we additionally check that
+		// we filled the tables completely.
+		for i, chunk := range h.chunks {
+			if chunk == 0 {
+				// As an exception, in the degenerate
+				// single-code case, we allow odd
+				// chunks to be missing.
+				if code == 1 && i%2 == 1 {
+					continue
+				}
+				panic("impossible: missing chunk")
+			}
+		}
+		for _, linktab := range h.links {
+			for _, chunk := range linktab {
+				if chunk == 0 {
+					panic("impossible: missing chunk")
+				}
+			}
+		}
+	}
+
+	return true
+}
+
+func main() {
+	flag.Parse()
+
+	var h huffmanDecoder
+	var bits [288]int
+	initReverseByte()
+	for i := 0; i < 144; i++ {
+		bits[i] = 8
+	}
+	for i := 144; i < 256; i++ {
+		bits[i] = 9
+	}
+	for i := 256; i < 280; i++ {
+		bits[i] = 7
+	}
+	for i := 280; i < 288; i++ {
+		bits[i] = 8
+	}
+	h.init(bits[:])
+	if h.links != nil {
+		log.Fatal("Unexpected links table in fixed Huffman decoder")
+	}
+
+	var buf bytes.Buffer
+
+	fmt.Fprintf(&buf, `// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.`+"\n\n")
+
+	fmt.Fprintln(&buf, "package flate")
+	fmt.Fprintln(&buf)
+	fmt.Fprintln(&buf, "// autogenerated by go run gen.go -output fixedhuff.go, DO NOT EDIT")
+	fmt.Fprintln(&buf)
+	fmt.Fprintln(&buf, "var fixedHuffmanDecoder = huffmanDecoder{")
+	fmt.Fprintf(&buf, "\t%d,\n", h.min)
+	fmt.Fprintln(&buf, "\t[huffmanNumChunks]uint32{")
+	for i := 0; i < huffmanNumChunks; i++ {
+		if i&7 == 0 {
+			fmt.Fprintf(&buf, "\t\t")
+		} else {
+			fmt.Fprintf(&buf, " ")
+		}
+		fmt.Fprintf(&buf, "0x%04x,", h.chunks[i])
+		if i&7 == 7 {
+			fmt.Fprintln(&buf)
+		}
+	}
+	fmt.Fprintln(&buf, "\t},")
+	fmt.Fprintln(&buf, "\tnil, 0,")
+	fmt.Fprintln(&buf, "}")
+
+	data, err := format.Source(buf.Bytes())
+	if err != nil {
+		log.Fatal(err)
+	}
+	err = ioutil.WriteFile(*filename, data, 0644)
+	if err != nil {
+		log.Fatal(err)
+	}
+}
+
+var reverseByte [256]byte
+
+func initReverseByte() {
+	for x := 0; x < 256; x++ {
+		var result byte
+		for i := uint(0); i < 8; i++ {
+			result |= byte(((x >> i) & 1) << (7 - i))
+		}
+		reverseByte[x] = result
+	}
+}

+ 690 - 0
Godeps/_workspace/src/github.com/klauspost/compress/flate/huffman_bit_writer.go

@@ -0,0 +1,690 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package flate
+
+import (
+	"io"
+	"math"
+)
+
+const (
+	// The largest offset code.
+	offsetCodeCount = 30
+
+	// The special code used to mark the end of a block.
+	endBlockMarker = 256
+
+	// The first length code.
+	lengthCodesStart = 257
+
+	// The number of codegen codes.
+	codegenCodeCount = 19
+	badCode          = 255
+
+	// Output byte buffer size
+	// Must be multiple of 6 (48 bits) + 8
+	bufferSize = 240 + 8
+)
+
+// The number of extra bits needed by length code X - LENGTH_CODES_START.
+var lengthExtraBits = []int8{
+	/* 257 */ 0, 0, 0,
+	/* 260 */ 0, 0, 0, 0, 0, 1, 1, 1, 1, 2,
+	/* 270 */ 2, 2, 2, 3, 3, 3, 3, 4, 4, 4,
+	/* 280 */ 4, 5, 5, 5, 5, 0,
+}
+
+// The length indicated by length code X - LENGTH_CODES_START.
+var lengthBase = []uint32{
+	0, 1, 2, 3, 4, 5, 6, 7, 8, 10,
+	12, 14, 16, 20, 24, 28, 32, 40, 48, 56,
+	64, 80, 96, 112, 128, 160, 192, 224, 255,
+}
+
+// offset code word extra bits.
+var offsetExtraBits = []int8{
+	0, 0, 0, 0, 1, 1, 2, 2, 3, 3,
+	4, 4, 5, 5, 6, 6, 7, 7, 8, 8,
+	9, 9, 10, 10, 11, 11, 12, 12, 13, 13,
+	/* extended window */
+	14, 14, 15, 15, 16, 16, 17, 17, 18, 18, 19, 19, 20, 20,
+}
+
+var offsetBase = []uint32{
+	/* normal deflate */
+	0x000000, 0x000001, 0x000002, 0x000003, 0x000004,
+	0x000006, 0x000008, 0x00000c, 0x000010, 0x000018,
+	0x000020, 0x000030, 0x000040, 0x000060, 0x000080,
+	0x0000c0, 0x000100, 0x000180, 0x000200, 0x000300,
+	0x000400, 0x000600, 0x000800, 0x000c00, 0x001000,
+	0x001800, 0x002000, 0x003000, 0x004000, 0x006000,
+
+	/* extended window */
+	0x008000, 0x00c000, 0x010000, 0x018000, 0x020000,
+	0x030000, 0x040000, 0x060000, 0x080000, 0x0c0000,
+	0x100000, 0x180000, 0x200000, 0x300000,
+}
+
+// The odd order in which the codegen code sizes are written.
+var codegenOrder = []uint32{16, 17, 18, 0, 8, 7, 9, 6, 10, 5, 11, 4, 12, 3, 13, 2, 14, 1, 15}
+
+type huffmanBitWriter struct {
+	w io.Writer
+	// Data waiting to be written is bytes[0:nbytes]
+	// and then the low nbits of bits.
+	bits            uint64
+	nbits           uint
+	bytes           [bufferSize]byte
+	nbytes          int
+	literalFreq     []int32
+	offsetFreq      []int32
+	codegen         []uint8
+	codegenFreq     []int32
+	literalEncoding *huffmanEncoder
+	offsetEncoding  *huffmanEncoder
+	codegenEncoding *huffmanEncoder
+	err             error
+}
+
+func newHuffmanBitWriter(w io.Writer) *huffmanBitWriter {
+	return &huffmanBitWriter{
+		w:               w,
+		literalFreq:     make([]int32, maxNumLit),
+		offsetFreq:      make([]int32, offsetCodeCount),
+		codegen:         make([]uint8, maxNumLit+offsetCodeCount+1),
+		codegenFreq:     make([]int32, codegenCodeCount),
+		literalEncoding: newHuffmanEncoder(maxNumLit),
+		offsetEncoding:  newHuffmanEncoder(offsetCodeCount),
+		codegenEncoding: newHuffmanEncoder(codegenCodeCount),
+	}
+}
+
+func (w *huffmanBitWriter) reset(writer io.Writer) {
+	w.w = writer
+	w.bits, w.nbits, w.nbytes, w.err = 0, 0, 0, nil
+	w.bytes = [bufferSize]byte{}
+	for i := range w.codegen {
+		w.codegen[i] = 0
+	}
+	for _, s := range [...][]int32{w.literalFreq, w.offsetFreq, w.codegenFreq} {
+		for i := range s {
+			s[i] = 0
+		}
+	}
+	encs := []*huffmanEncoder{w.literalEncoding, w.codegenEncoding}
+	// Don't reset, if we are huffman only mode
+	if w.offsetEncoding != huffOffset {
+		encs = append(encs, w.offsetEncoding)
+	}
+	for _, enc := range encs {
+		for i := range enc.codes {
+			enc.codes[i] = 0
+		}
+	}
+}
+
+/* Inlined in writeBits
+func (w *huffmanBitWriter) flushBits() {
+	if w.err != nil {
+		w.nbits = 0
+		return
+	}
+	bits := w.bits
+	w.bits >>= 16
+	w.nbits -= 16
+	n := w.nbytes
+	w.bytes[n] = byte(bits)
+	w.bytes[n+1] = byte(bits >> 8)
+	if n += 2; n >= len(w.bytes) {
+		_, w.err = w.w.Write(w.bytes[0:])
+		n = 0
+	}
+	w.nbytes = n
+}
+*/
+
+func (w *huffmanBitWriter) flush() {
+	if w.err != nil {
+		w.nbits = 0
+		return
+	}
+	n := w.nbytes
+	for w.nbits != 0 {
+		w.bytes[n] = byte(w.bits)
+		w.bits >>= 8
+		if w.nbits > 8 { // Avoid underflow
+			w.nbits -= 8
+		} else {
+			w.nbits = 0
+		}
+		n++
+	}
+	w.bits = 0
+	_, w.err = w.w.Write(w.bytes[0:n])
+	w.nbytes = 0
+}
+
+func (w *huffmanBitWriter) writeBits(b int32, nb uint) {
+	w.bits |= uint64(b) << w.nbits
+	w.nbits += nb
+	if w.nbits >= 48 {
+		bits := w.bits
+		w.bits >>= 48
+		w.nbits -= 48
+		n := w.nbytes
+		w.bytes[n] = byte(bits)
+		w.bytes[n+1] = byte(bits >> 8)
+		w.bytes[n+2] = byte(bits >> 16)
+		w.bytes[n+3] = byte(bits >> 24)
+		w.bytes[n+4] = byte(bits >> 32)
+		w.bytes[n+5] = byte(bits >> 40)
+		n += 6
+		if n >= bufferSize-8 {
+			_, w.err = w.w.Write(w.bytes[:bufferSize-8])
+			n = 0
+		}
+		w.nbytes = n
+	}
+}
+
+func (w *huffmanBitWriter) writeBytes(bytes []byte) {
+	if w.err != nil {
+		return
+	}
+	n := w.nbytes
+	for w.nbits != 0 {
+		w.bytes[n] = byte(w.bits)
+		w.bits >>= 8
+		w.nbits -= 8
+		n++
+	}
+	if w.nbits != 0 {
+		w.err = InternalError("writeBytes with unfinished bits")
+		return
+	}
+	if n != 0 {
+		_, w.err = w.w.Write(w.bytes[0:n])
+		if w.err != nil {
+			return
+		}
+	}
+	w.nbytes = 0
+	_, w.err = w.w.Write(bytes)
+}
+
+// RFC 1951 3.2.7 specifies a special run-length encoding for specifying
+// the literal and offset lengths arrays (which are concatenated into a single
+// array).  This method generates that run-length encoding.
+//
+// The result is written into the codegen array, and the frequencies
+// of each code is written into the codegenFreq array.
+// Codes 0-15 are single byte codes. Codes 16-18 are followed by additional
+// information.  Code badCode is an end marker
+//
+//  numLiterals      The number of literals in literalEncoding
+//  numOffsets       The number of offsets in offsetEncoding
+func (w *huffmanBitWriter) generateCodegen(numLiterals int, numOffsets int) {
+	for i := range w.codegenFreq {
+		w.codegenFreq[i] = 0
+	}
+	// Note that we are using codegen both as a temporary variable for holding
+	// a copy of the frequencies, and as the place where we put the result.
+	// This is fine because the output is always shorter than the input used
+	// so far.
+	codegen := w.codegen // cache
+	// Copy the concatenated code sizes to codegen.  Put a marker at the end.
+	//copy(codegen[0:numLiterals], w.literalEncoding.codeBits)
+	cgnl := codegen[0:numLiterals]
+	for i := range cgnl {
+		cgnl[i] = uint8(w.literalEncoding.codes[i].bits())
+	}
+
+	//copy(codegen[numLiterals:numLiterals+numOffsets], w.offsetEncoding.codeBits)
+	cgnl = codegen[numLiterals : numLiterals+numOffsets]
+	for i := range cgnl {
+		cgnl[i] = uint8(w.offsetEncoding.codes[i].bits())
+	}
+	codegen[numLiterals+numOffsets] = badCode
+
+	size := codegen[0]
+	count := 1
+	outIndex := 0
+	for inIndex := 1; size != badCode; inIndex++ {
+		// INVARIANT: We have seen "count" copies of size that have not yet
+		// had output generated for them.
+		nextSize := codegen[inIndex]
+		if nextSize == size {
+			count++
+			continue
+		}
+		// We need to generate codegen indicating "count" of size.
+		if size != 0 {
+			codegen[outIndex] = size
+			outIndex++
+			w.codegenFreq[size]++
+			count--
+			for count >= 3 {
+				n := 6
+				if n > count {
+					n = count
+				}
+				codegen[outIndex] = 16
+				outIndex++
+				codegen[outIndex] = uint8(n - 3)
+				outIndex++
+				w.codegenFreq[16]++
+				count -= n
+			}
+		} else {
+			for count >= 11 {
+				n := 138
+				if n > count {
+					n = count
+				}
+				codegen[outIndex] = 18
+				outIndex++
+				codegen[outIndex] = uint8(n - 11)
+				outIndex++
+				w.codegenFreq[18]++
+				count -= n
+			}
+			if count >= 3 {
+				// count >= 3 && count <= 10
+				codegen[outIndex] = 17
+				outIndex++
+				codegen[outIndex] = uint8(count - 3)
+				outIndex++
+				w.codegenFreq[17]++
+				count = 0
+			}
+		}
+		count--
+		for ; count >= 0; count-- {
+			codegen[outIndex] = size
+			outIndex++
+			w.codegenFreq[size]++
+		}
+		// Set up invariant for next time through the loop.
+		size = nextSize
+		count = 1
+	}
+	// Marker indicating the end of the codegen.
+	codegen[outIndex] = badCode
+}
+
+/* non-inlined:
+func (w *huffmanBitWriter) writeCode(code *huffmanEncoder, literal uint32) {
+	if w.err != nil {
+		return
+	}
+	c := code.codes[literal]
+	w.writeBits(int32(c.code()), int32(c.bits()))
+}
+*/
+
+func (w *huffmanBitWriter) writeCode(code *huffmanEncoder, literal uint32) {
+	if w.err != nil {
+		return
+	}
+	c := code.codes[literal]
+	w.bits |= uint64(c.code()) << w.nbits
+	w.nbits += c.bits()
+	if w.nbits >= 48 {
+		bits := w.bits
+		w.bits >>= 48
+		w.nbits -= 48
+		n := w.nbytes
+		w.bytes[n] = byte(bits)
+		w.bytes[n+1] = byte(bits >> 8)
+		w.bytes[n+2] = byte(bits >> 16)
+		w.bytes[n+3] = byte(bits >> 24)
+		w.bytes[n+4] = byte(bits >> 32)
+		w.bytes[n+5] = byte(bits >> 40)
+		n += 6
+		if n >= bufferSize-8 {
+			_, w.err = w.w.Write(w.bytes[:bufferSize-8])
+			n = 0
+		}
+		w.nbytes = n
+	}
+
+}
+
+// Write the header of a dynamic Huffman block to the output stream.
+//
+//  numLiterals  The number of literals specified in codegen
+//  numOffsets   The number of offsets specified in codegen
+//  numCodegens  The number of codegens used in codegen
+func (w *huffmanBitWriter) writeDynamicHeader(numLiterals int, numOffsets int, numCodegens int, isEof bool) {
+	if w.err != nil {
+		return
+	}
+	var firstBits int32 = 4
+	if isEof {
+		firstBits = 5
+	}
+	w.writeBits(firstBits, 3)
+	w.writeBits(int32(numLiterals-257), 5)
+	w.writeBits(int32(numOffsets-1), 5)
+	w.writeBits(int32(numCodegens-4), 4)
+
+	for i := 0; i < numCodegens; i++ {
+		//value := w.codegenEncoding.codeBits[codegenOrder[i]]
+		value := w.codegenEncoding.codes[codegenOrder[i]].bits()
+		w.writeBits(int32(value), 3)
+	}
+
+	i := 0
+	for {
+		var codeWord int = int(w.codegen[i])
+		i++
+		if codeWord == badCode {
+			break
+		}
+		// The low byte contains the actual code to generate.
+		w.writeCode(w.codegenEncoding, uint32(codeWord))
+
+		switch codeWord {
+		case 16:
+			w.writeBits(int32(w.codegen[i]), 2)
+			i++
+			break
+		case 17:
+			w.writeBits(int32(w.codegen[i]), 3)
+			i++
+			break
+		case 18:
+			w.writeBits(int32(w.codegen[i]), 7)
+			i++
+			break
+		}
+	}
+}
+
+func (w *huffmanBitWriter) writeStoredHeader(length int, isEof bool) {
+	if w.err != nil {
+		return
+	}
+	var flag int32
+	if isEof {
+		flag = 1
+	}
+	w.writeBits(flag, 3)
+	w.flush()
+	w.writeBits(int32(length), 16)
+	w.writeBits(int32(^uint16(length)), 16)
+}
+
+func (w *huffmanBitWriter) writeFixedHeader(isEof bool) {
+	if w.err != nil {
+		return
+	}
+	// Indicate that we are a fixed Huffman block
+	var value int32 = 2
+	if isEof {
+		value = 3
+	}
+	w.writeBits(value, 3)
+}
+
+func (w *huffmanBitWriter) writeBlock(tok tokens, eof bool, input []byte) {
+	if w.err != nil {
+		return
+	}
+	copy(w.literalFreq, zeroLits[:])
+
+	for i := range w.offsetFreq {
+		w.offsetFreq[i] = 0
+	}
+
+	tok.tokens[tok.n] = endBlockMarker
+	tokens := tok.tokens[0 : tok.n+1]
+
+	for _, t := range tokens {
+		switch t.typ() {
+		case literalType:
+			w.literalFreq[t.literal()]++
+		case matchType:
+			length := t.length()
+			offset := t.offset()
+			w.literalFreq[lengthCodesStart+lengthCode(length)]++
+			w.offsetFreq[offsetCode(offset)]++
+		}
+	}
+
+	// get the number of literals
+	numLiterals := len(w.literalFreq)
+	for w.literalFreq[numLiterals-1] == 0 {
+		numLiterals--
+	}
+	// get the number of offsets
+	numOffsets := len(w.offsetFreq)
+	for numOffsets > 0 && w.offsetFreq[numOffsets-1] == 0 {
+		numOffsets--
+	}
+	if numOffsets == 0 {
+		// We haven't found a single match. If we want to go with the dynamic encoding,
+		// we should count at least one offset to be sure that the offset huffman tree could be encoded.
+		w.offsetFreq[0] = 1
+		numOffsets = 1
+	}
+
+	w.literalEncoding.generate(w.literalFreq, 15)
+	w.offsetEncoding.generate(w.offsetFreq, 15)
+
+	storedBytes := 0
+	if input != nil {
+		storedBytes = len(input)
+	}
+	var extraBits int64
+	var storedSize int64 = math.MaxInt64
+	if storedBytes <= maxStoreBlockSize && input != nil {
+		storedSize = int64((storedBytes + 5) * 8)
+		// We only bother calculating the costs of the extra bits required by
+		// the length of offset fields (which will be the same for both fixed
+		// and dynamic encoding), if we need to compare those two encodings
+		// against stored encoding.
+		for lengthCode := lengthCodesStart + 8; lengthCode < numLiterals; lengthCode++ {
+			// First eight length codes have extra size = 0.
+			extraBits += int64(w.literalFreq[lengthCode]) * int64(lengthExtraBits[lengthCode-lengthCodesStart])
+		}
+		for offsetCode := 4; offsetCode < numOffsets; offsetCode++ {
+			// First four offset codes have extra size = 0.
+			extraBits += int64(w.offsetFreq[offsetCode]) * int64(offsetExtraBits[offsetCode])
+		}
+	}
+
+	// Figure out smallest code.
+	// Fixed Huffman baseline.
+	var size = int64(3) +
+		fixedLiteralEncoding.bitLength(w.literalFreq) +
+		fixedOffsetEncoding.bitLength(w.offsetFreq) +
+		extraBits
+	var literalEncoding = fixedLiteralEncoding
+	var offsetEncoding = fixedOffsetEncoding
+
+	// Dynamic Huffman?
+	var numCodegens int
+
+	// Generate codegen and codegenFrequencies, which indicates how to encode
+	// the literalEncoding and the offsetEncoding.
+	w.generateCodegen(numLiterals, numOffsets)
+	w.codegenEncoding.generate(w.codegenFreq, 7)
+	numCodegens = len(w.codegenFreq)
+	for numCodegens > 4 && w.codegenFreq[codegenOrder[numCodegens-1]] == 0 {
+		numCodegens--
+	}
+	dynamicHeader := int64(3+5+5+4+(3*numCodegens)) +
+		w.codegenEncoding.bitLength(w.codegenFreq) +
+		int64(extraBits) +
+		int64(w.codegenFreq[16]*2) +
+		int64(w.codegenFreq[17]*3) +
+		int64(w.codegenFreq[18]*7)
+	dynamicSize := dynamicHeader +
+		w.literalEncoding.bitLength(w.literalFreq) +
+		w.offsetEncoding.bitLength(w.offsetFreq)
+
+	if dynamicSize < size {
+		size = dynamicSize
+		literalEncoding = w.literalEncoding
+		offsetEncoding = w.offsetEncoding
+	}
+
+	// Stored bytes?
+	if storedSize < size {
+		w.writeStoredHeader(storedBytes, eof)
+		w.writeBytes(input[0:storedBytes])
+		return
+	}
+
+	// Huffman.
+	if literalEncoding == fixedLiteralEncoding {
+		w.writeFixedHeader(eof)
+	} else {
+		w.writeDynamicHeader(numLiterals, numOffsets, numCodegens, eof)
+	}
+	for _, t := range tokens {
+		switch t.typ() {
+		case literalType:
+			w.writeCode(literalEncoding, t.literal())
+			break
+		case matchType:
+			// Write the length
+			length := t.length()
+			lengthCode := lengthCode(length)
+			w.writeCode(literalEncoding, lengthCode+lengthCodesStart)
+			extraLengthBits := uint(lengthExtraBits[lengthCode])
+			if extraLengthBits > 0 {
+				extraLength := int32(length - lengthBase[lengthCode])
+				w.writeBits(extraLength, extraLengthBits)
+			}
+			// Write the offset
+			offset := t.offset()
+			offsetCode := offsetCode(offset)
+			w.writeCode(offsetEncoding, offsetCode)
+			extraOffsetBits := uint(offsetExtraBits[offsetCode])
+			if extraOffsetBits > 0 {
+				extraOffset := int32(offset - offsetBase[offsetCode])
+				w.writeBits(extraOffset, extraOffsetBits)
+			}
+			break
+		default:
+			panic("unknown token type: " + string(t))
+		}
+	}
+}
+
+var huffOffset *huffmanEncoder
+var zeroLits [maxNumLit]int32
+
+func init() {
+	var w = newHuffmanBitWriter(nil)
+	w.offsetFreq[0] = 1
+	w.offsetEncoding = newHuffmanEncoder(offsetCodeCount)
+	w.offsetEncoding.generate(w.offsetFreq, 15)
+	huffOffset = w.offsetEncoding
+}
+
+// writeBlockHuff will write a block of bytes as either
+// Huffman encoded literals, or uncompressed bytes depending
+// on what yields the smallest result.
+func (w *huffmanBitWriter) writeBlockHuff(eof bool, input []byte) {
+	if w.err != nil {
+		return
+	}
+	// Clear histogram
+	copy(w.literalFreq, zeroLits[:])
+
+	// Add everything as literals
+	histogram(input, w.literalFreq)
+
+	w.literalFreq[endBlockMarker]++
+
+	// get the number of literals
+	numLiterals := len(w.literalFreq)
+	for w.literalFreq[numLiterals-1] == 0 {
+		numLiterals--
+	}
+
+	numOffsets := 1
+
+	w.literalEncoding.generate(w.literalFreq, 15)
+	w.offsetEncoding = huffOffset
+
+	storedBytes := len(input)
+
+	var extraBits int64
+	var storedSize int64 = math.MaxInt64
+	if storedBytes <= maxStoreBlockSize {
+		storedSize = int64((storedBytes + 5) * 8)
+		// We only bother calculating the costs of the extra bits required by
+		// the length of offset fields (which will be the same for both fixed
+		// and dynamic encoding), if we need to compare those two encodings
+		// against stored encoding.
+		for lengthCode := lengthCodesStart + 8; lengthCode < numLiterals; lengthCode++ {
+			// First eight length codes have extra size = 0.
+			extraBits += int64(w.literalFreq[lengthCode]) * int64(lengthExtraBits[lengthCode-lengthCodesStart])
+		}
+	}
+
+	// Figure out smallest code.
+	// Always use dynamic Huffman or Store
+	var numCodegens int
+
+	// Generate codegen and codegenFrequencies, which indicates how to encode
+	// the literalEncoding and the offsetEncoding.
+	w.generateCodegen(numLiterals, numOffsets)
+	w.codegenEncoding.generate(w.codegenFreq, 7)
+	numCodegens = len(w.codegenFreq)
+	for numCodegens > 4 && w.codegenFreq[codegenOrder[numCodegens-1]] == 0 {
+		numCodegens--
+	}
+	dynamicHeader := int64(3+5+5+4+(3*numCodegens)) +
+		w.codegenEncoding.bitLength(w.codegenFreq) +
+		int64(extraBits) +
+		int64(w.codegenFreq[16]*2) +
+		int64(w.codegenFreq[17]*3) +
+		int64(w.codegenFreq[18]*7)
+	size := dynamicHeader +
+		w.literalEncoding.bitLength(w.literalFreq) +
+		1 /*w.offsetEncoding.bitLength(w.offsetFreq)*/
+
+	// Stored bytes?
+	if storedSize < size {
+		w.writeStoredHeader(storedBytes, eof)
+		w.writeBytes(input[0:storedBytes])
+		return
+	}
+
+	// Huffman.
+	w.writeDynamicHeader(numLiterals, numOffsets, numCodegens, eof)
+	for _, t := range input {
+		// Bitwriting inlined, ~30% speedup
+		c := w.literalEncoding.codes[t]
+		w.bits |= uint64(c.code()) << w.nbits
+		w.nbits += c.bits()
+		if w.nbits >= 48 {
+			bits := w.bits
+			w.bits >>= 48
+			w.nbits -= 48
+			n := w.nbytes
+			w.bytes[n] = byte(bits)
+			w.bytes[n+1] = byte(bits >> 8)
+			w.bytes[n+2] = byte(bits >> 16)
+			w.bytes[n+3] = byte(bits >> 24)
+			w.bytes[n+4] = byte(bits >> 32)
+			w.bytes[n+5] = byte(bits >> 40)
+			n += 6
+			if n >= bufferSize-8 {
+				_, w.err = w.w.Write(w.bytes[:bufferSize-8])
+				w.nbytes = 0
+			} else {
+				w.nbytes = n
+			}
+		}
+	}
+	// Write EOB
+	w.writeCode(w.literalEncoding, endBlockMarker)
+}

+ 363 - 0
Godeps/_workspace/src/github.com/klauspost/compress/flate/huffman_code.go

@@ -0,0 +1,363 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package flate
+
+import (
+	"math"
+	"sort"
+)
+
+type hcode uint32
+
+type huffmanEncoder struct {
+	codes     []hcode
+	freqcache []literalNode
+	bitCount  [17]int32
+	lns       literalNodeSorter
+	lfs       literalFreqSorter
+}
+
+type literalNode struct {
+	literal uint16
+	freq    int32
+}
+
+// A levelInfo describes the state of the constructed tree for a given depth.
+type levelInfo struct {
+	// Our level.  for better printing
+	level int32
+
+	// The frequency of the last node at this level
+	lastFreq int32
+
+	// The frequency of the next character to add to this level
+	nextCharFreq int32
+
+	// The frequency of the next pair (from level below) to add to this level.
+	// Only valid if the "needed" value of the next lower level is 0.
+	nextPairFreq int32
+
+	// The number of chains remaining to generate for this level before moving
+	// up to the next level
+	needed int32
+}
+
+func (h hcode) codeBits() (code uint16, bits uint8) {
+	return uint16(h), uint8(h >> 16)
+}
+
+func (h *hcode) set(code uint16, bits uint8) {
+	*h = hcode(code) | hcode(uint32(bits)<<16)
+}
+
+func (h *hcode) setBits(bits uint8) {
+	*h = hcode(*h&0xffff) | hcode(uint32(bits)<<16)
+}
+
+func toCode(code uint16, bits uint8) hcode {
+	return hcode(code) | hcode(uint32(bits)<<16)
+}
+
+func (h hcode) code() (code uint16) {
+	return uint16(h)
+}
+
+func (h hcode) bits() (bits uint) {
+	return uint(h >> 16)
+}
+
+func maxNode() literalNode { return literalNode{math.MaxUint16, math.MaxInt32} }
+
+func newHuffmanEncoder(size int) *huffmanEncoder {
+	return &huffmanEncoder{codes: make([]hcode, size), freqcache: nil}
+}
+
+// Generates a HuffmanCode corresponding to the fixed literal table
+func generateFixedLiteralEncoding() *huffmanEncoder {
+	h := newHuffmanEncoder(maxNumLit)
+	codes := h.codes
+	var ch uint16
+	for ch = 0; ch < maxNumLit; ch++ {
+		var bits uint16
+		var size uint8
+		switch {
+		case ch < 144:
+			// size 8, 000110000  .. 10111111
+			bits = ch + 48
+			size = 8
+			break
+		case ch < 256:
+			// size 9, 110010000 .. 111111111
+			bits = ch + 400 - 144
+			size = 9
+			break
+		case ch < 280:
+			// size 7, 0000000 .. 0010111
+			bits = ch - 256
+			size = 7
+			break
+		default:
+			// size 8, 11000000 .. 11000111
+			bits = ch + 192 - 280
+			size = 8
+		}
+		codes[ch] = toCode(reverseBits(bits, size), size)
+	}
+	return h
+}
+
+func generateFixedOffsetEncoding() *huffmanEncoder {
+	h := newHuffmanEncoder(30)
+	codes := h.codes
+	for ch := uint16(0); ch < 30; ch++ {
+		codes[ch] = toCode(reverseBits(ch, 5), 5)
+	}
+	return h
+}
+
+var fixedLiteralEncoding *huffmanEncoder = generateFixedLiteralEncoding()
+var fixedOffsetEncoding *huffmanEncoder = generateFixedOffsetEncoding()
+
+func (h *huffmanEncoder) bitLength(freq []int32) int64 {
+	var total int64
+	for i, f := range freq {
+		if f != 0 {
+			total += int64(f) * int64(h.codes[i].bits())
+		}
+	}
+	return total
+}
+
+const maxBitsLimit = 16
+
+// Return the number of literals assigned to each bit size in the Huffman encoding
+//
+// This method is only called when list.length >= 3
+// The cases of 0, 1, and 2 literals are handled by special case code.
+//
+// list  An array of the literals with non-zero frequencies
+//             and their associated frequencies.  The array is in order of increasing
+//             frequency, and has as its last element a special element with frequency
+//             MaxInt32
+// maxBits     The maximum number of bits that should be used to encode any literal.
+//             Must be less than 16.
+// return      An integer array in which array[i] indicates the number of literals
+//             that should be encoded in i bits.
+func (h *huffmanEncoder) bitCounts(list []literalNode, maxBits int32) []int32 {
+	if maxBits >= maxBitsLimit {
+		panic("flate: maxBits too large")
+	}
+	n := int32(len(list))
+	list = list[0 : n+1]
+	list[n] = maxNode()
+
+	// The tree can't have greater depth than n - 1, no matter what.  This
+	// saves a little bit of work in some small cases
+	if maxBits > n-1 {
+		maxBits = n - 1
+	}
+
+	// Create information about each of the levels.
+	// A bogus "Level 0" whose sole purpose is so that
+	// level1.prev.needed==0.  This makes level1.nextPairFreq
+	// be a legitimate value that never gets chosen.
+	var levels [maxBitsLimit]levelInfo
+	// leafCounts[i] counts the number of literals at the left
+	// of ancestors of the rightmost node at level i.
+	// leafCounts[i][j] is the number of literals at the left
+	// of the level j ancestor.
+	var leafCounts [maxBitsLimit][maxBitsLimit]int32
+
+	for level := int32(1); level <= maxBits; level++ {
+		// For every level, the first two items are the first two characters.
+		// We initialize the levels as if we had already figured this out.
+		levels[level] = levelInfo{
+			level:        level,
+			lastFreq:     list[1].freq,
+			nextCharFreq: list[2].freq,
+			nextPairFreq: list[0].freq + list[1].freq,
+		}
+		leafCounts[level][level] = 2
+		if level == 1 {
+			levels[level].nextPairFreq = math.MaxInt32
+		}
+	}
+
+	// We need a total of 2*n - 2 items at top level and have already generated 2.
+	levels[maxBits].needed = 2*n - 4
+
+	level := maxBits
+	for {
+		l := &levels[level]
+		if l.nextPairFreq == math.MaxInt32 && l.nextCharFreq == math.MaxInt32 {
+			// We've run out of both leafs and pairs.
+			// End all calculations for this level.
+			// To make sure we never come back to this level or any lower level,
+			// set nextPairFreq impossibly large.
+			l.needed = 0
+			levels[level+1].nextPairFreq = math.MaxInt32
+			level++
+			continue
+		}
+
+		prevFreq := l.lastFreq
+		if l.nextCharFreq < l.nextPairFreq {
+			// The next item on this row is a leaf node.
+			n := leafCounts[level][level] + 1
+			l.lastFreq = l.nextCharFreq
+			// Lower leafCounts are the same of the previous node.
+			leafCounts[level][level] = n
+			l.nextCharFreq = list[n].freq
+		} else {
+			// The next item on this row is a pair from the previous row.
+			// nextPairFreq isn't valid until we generate two
+			// more values in the level below
+			l.lastFreq = l.nextPairFreq
+			// Take leaf counts from the lower level, except counts[level] remains the same.
+			copy(leafCounts[level][:level], leafCounts[level-1][:level])
+			levels[l.level-1].needed = 2
+		}
+
+		if l.needed--; l.needed == 0 {
+			// We've done everything we need to do for this level.
+			// Continue calculating one level up.  Fill in nextPairFreq
+			// of that level with the sum of the two nodes we've just calculated on
+			// this level.
+			if l.level == maxBits {
+				// All done!
+				break
+			}
+			levels[l.level+1].nextPairFreq = prevFreq + l.lastFreq
+			level++
+		} else {
+			// If we stole from below, move down temporarily to replenish it.
+			for levels[level-1].needed > 0 {
+				level--
+			}
+		}
+	}
+
+	// Somethings is wrong if at the end, the top level is null or hasn't used
+	// all of the leaves.
+	if leafCounts[maxBits][maxBits] != n {
+		panic("leafCounts[maxBits][maxBits] != n")
+	}
+
+	bitCount := h.bitCount[:maxBits+1]
+	//make([]int32, maxBits+1)
+	bits := 1
+	counts := &leafCounts[maxBits]
+	for level := maxBits; level > 0; level-- {
+		// chain.leafCount gives the number of literals requiring at least "bits"
+		// bits to encode.
+		bitCount[bits] = counts[level] - counts[level-1]
+		bits++
+	}
+	return bitCount
+}
+
+// Look at the leaves and assign them a bit count and an encoding as specified
+// in RFC 1951 3.2.2
+func (h *huffmanEncoder) assignEncodingAndSize(bitCount []int32, list []literalNode) {
+	code := uint16(0)
+	for n, bits := range bitCount {
+		code <<= 1
+		if n == 0 || bits == 0 {
+			continue
+		}
+		// The literals list[len(list)-bits] .. list[len(list)-bits]
+		// are encoded using "bits" bits, and get the values
+		// code, code + 1, ....  The code values are
+		// assigned in literal order (not frequency order).
+		chunk := list[len(list)-int(bits):]
+
+		h.lns.Sort(chunk)
+		for _, node := range chunk {
+			h.codes[node.literal] = toCode(reverseBits(code, uint8(n)), uint8(n))
+			code++
+		}
+		list = list[0 : len(list)-int(bits)]
+	}
+}
+
+// Update this Huffman Code object to be the minimum code for the specified frequency count.
+//
+// freq  An array of frequencies, in which frequency[i] gives the frequency of literal i.
+// maxBits  The maximum number of bits to use for any literal.
+func (h *huffmanEncoder) generate(freq []int32, maxBits int32) {
+	if h.freqcache == nil {
+		h.freqcache = make([]literalNode, 300)
+	}
+	list := h.freqcache[:len(freq)+1]
+	// Number of non-zero literals
+	count := 0
+	// Set list to be the set of all non-zero literals and their frequencies
+	for i, f := range freq {
+		if f != 0 {
+			list[count] = literalNode{uint16(i), f}
+			count++
+		} else {
+			list[count] = literalNode{}
+			//h.codeBits[i] = 0
+			h.codes[i].setBits(0)
+		}
+	}
+	list[len(freq)] = literalNode{}
+	// If freq[] is shorter than codeBits[], fill rest of codeBits[] with zeros
+	// FIXME: Doesn't do what it says on the tin (klauspost)
+	//h.codeBits = h.codeBits[0:len(freq)]
+
+	list = list[0:count]
+	if count <= 2 {
+		// Handle the small cases here, because they are awkward for the general case code.  With
+		// two or fewer literals, everything has bit length 1.
+		for i, node := range list {
+			// "list" is in order of increasing literal value.
+			h.codes[node.literal].set(uint16(i), 1)
+			//h.codeBits[node.literal] = 1
+			//h.code[node.literal] = uint16(i)
+		}
+		return
+	}
+	h.lfs.Sort(list)
+
+	// Get the number of literals for each bit count
+	bitCount := h.bitCounts(list, maxBits)
+	// And do the assignment
+	h.assignEncodingAndSize(bitCount, list)
+}
+
+type literalNodeSorter []literalNode
+
+func (s *literalNodeSorter) Sort(a []literalNode) {
+	*s = literalNodeSorter(a)
+	sort.Sort(s)
+}
+
+func (s literalNodeSorter) Len() int { return len(s) }
+
+func (s literalNodeSorter) Less(i, j int) bool {
+	return s[i].literal < s[j].literal
+}
+
+func (s literalNodeSorter) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
+
+type literalFreqSorter []literalNode
+
+func (s *literalFreqSorter) Sort(a []literalNode) {
+	*s = literalFreqSorter(a)
+	sort.Sort(s)
+}
+
+func (s literalFreqSorter) Len() int { return len(s) }
+
+func (s literalFreqSorter) Less(i, j int) bool {
+	if s[i].freq == s[j].freq {
+		return s[i].literal < s[j].literal
+	}
+	return s[i].freq < s[j].freq
+}
+
+func (s literalFreqSorter) Swap(i, j int) { s[i], s[j] = s[j], s[i] }

+ 846 - 0
Godeps/_workspace/src/github.com/klauspost/compress/flate/inflate.go

@@ -0,0 +1,846 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:generate go run gen.go -output fixedhuff.go
+
+// Package flate implements the DEFLATE compressed data format, described in
+// RFC 1951.  The gzip and zlib packages implement access to DEFLATE-based file
+// formats.
+package flate
+
+import (
+	"bufio"
+	"io"
+	"strconv"
+)
+
+const (
+	maxCodeLen = 16    // max length of Huffman code
+	maxHist    = 32768 // max history required
+	// The next three numbers come from the RFC section 3.2.7, with the
+	// additional proviso in section 3.2.5 which implies that distance codes
+	// 30 and 31 should never occur in compressed data.
+	maxNumLit  = 286
+	maxNumDist = 30
+	numCodes   = 19 // number of codes in Huffman meta-code
+)
+
+// A CorruptInputError reports the presence of corrupt input at a given offset.
+type CorruptInputError int64
+
+func (e CorruptInputError) Error() string {
+	return "flate: corrupt input before offset " + strconv.FormatInt(int64(e), 10)
+}
+
+// An InternalError reports an error in the flate code itself.
+type InternalError string
+
+func (e InternalError) Error() string { return "flate: internal error: " + string(e) }
+
+// A ReadError reports an error encountered while reading input.
+type ReadError struct {
+	Offset int64 // byte offset where error occurred
+	Err    error // error returned by underlying Read
+}
+
+func (e *ReadError) Error() string {
+	return "flate: read error at offset " + strconv.FormatInt(e.Offset, 10) + ": " + e.Err.Error()
+}
+
+// A WriteError reports an error encountered while writing output.
+type WriteError struct {
+	Offset int64 // byte offset where error occurred
+	Err    error // error returned by underlying Write
+}
+
+func (e *WriteError) Error() string {
+	return "flate: write error at offset " + strconv.FormatInt(e.Offset, 10) + ": " + e.Err.Error()
+}
+
+// Resetter resets a ReadCloser returned by NewReader or NewReaderDict to
+// to switch to a new underlying Reader. This permits reusing a ReadCloser
+// instead of allocating a new one.
+type Resetter interface {
+	// Reset discards any buffered data and resets the Resetter as if it was
+	// newly initialized with the given reader.
+	Reset(r io.Reader, dict []byte) error
+}
+
+// Note that much of the implementation of huffmanDecoder is also copied
+// into gen.go (in package main) for the purpose of precomputing the
+// fixed huffman tables so they can be included statically.
+
+// The data structure for decoding Huffman tables is based on that of
+// zlib. There is a lookup table of a fixed bit width (huffmanChunkBits),
+// For codes smaller than the table width, there are multiple entries
+// (each combination of trailing bits has the same value). For codes
+// larger than the table width, the table contains a link to an overflow
+// table. The width of each entry in the link table is the maximum code
+// size minus the chunk width.
+
+// Note that you can do a lookup in the table even without all bits
+// filled. Since the extra bits are zero, and the DEFLATE Huffman codes
+// have the property that shorter codes come before longer ones, the
+// bit length estimate in the result is a lower bound on the actual
+// number of bits.
+
+// chunk & 15 is number of bits
+// chunk >> 4 is value, including table link
+
+const (
+	huffmanChunkBits  = 9
+	huffmanNumChunks  = 1 << huffmanChunkBits
+	huffmanCountMask  = 15
+	huffmanValueShift = 4
+)
+
+type huffmanDecoder struct {
+	min      int                      // the minimum code length
+	chunks   [huffmanNumChunks]uint32 // chunks as described above
+	links    [][]uint32               // overflow links
+	linkMask uint32                   // mask the width of the link table
+}
+
+// Initialize Huffman decoding tables from array of code lengths.
+// Following this function, h is guaranteed to be initialized into a complete
+// tree (i.e., neither over-subscribed nor under-subscribed). The exception is a
+// degenerate case where the tree has only a single symbol with length 1. Empty
+// trees are permitted.
+func (h *huffmanDecoder) init(bits []int) bool {
+	// Sanity enables additional runtime tests during Huffman
+	// table construction.  It's intended to be used during
+	// development to supplement the currently ad-hoc unit tests.
+	const sanity = false
+
+	if h.min != 0 {
+		*h = huffmanDecoder{}
+	}
+
+	// Count number of codes of each length,
+	// compute min and max length.
+	var count [maxCodeLen]int
+	var min, max int
+	for _, n := range bits {
+		if n == 0 {
+			continue
+		}
+		if min == 0 || n < min {
+			min = n
+		}
+		if n > max {
+			max = n
+		}
+		count[n]++
+	}
+
+	// Empty tree. The decompressor.huffSym function will fail later if the tree
+	// is used. Technically, an empty tree is only valid for the HDIST tree and
+	// not the HCLEN and HLIT tree. However, a stream with an empty HCLEN tree
+	// is guaranteed to fail since it will attempt to use the tree to decode the
+	// codes for the HLIT and HDIST trees. Similarly, an empty HLIT tree is
+	// guaranteed to fail later since the compressed data section must be
+	// composed of at least one symbol (the end-of-block marker).
+	if max == 0 {
+		return true
+	}
+
+	code := 0
+	var nextcode [maxCodeLen]int
+	for i := min; i <= max; i++ {
+		code <<= 1
+		nextcode[i] = code
+		code += count[i]
+	}
+
+	// Check that the coding is complete (i.e., that we've
+	// assigned all 2-to-the-max possible bit sequences).
+	// Exception: To be compatible with zlib, we also need to
+	// accept degenerate single-code codings.  See also
+	// TestDegenerateHuffmanCoding.
+	if code != 1<<uint(max) && !(code == 1 && max == 1) {
+		return false
+	}
+
+	h.min = min
+	if max > huffmanChunkBits {
+		numLinks := 1 << (uint(max) - huffmanChunkBits)
+		h.linkMask = uint32(numLinks - 1)
+
+		// create link tables
+		link := nextcode[huffmanChunkBits+1] >> 1
+		h.links = make([][]uint32, huffmanNumChunks-link)
+		for j := uint(link); j < huffmanNumChunks; j++ {
+			reverse := int(reverseByte[j>>8]) | int(reverseByte[j&0xff])<<8
+			reverse >>= uint(16 - huffmanChunkBits)
+			off := j - uint(link)
+			if sanity && h.chunks[reverse] != 0 {
+				panic("impossible: overwriting existing chunk")
+			}
+			h.chunks[reverse] = uint32(off<<huffmanValueShift | (huffmanChunkBits + 1))
+			h.links[off] = make([]uint32, numLinks)
+		}
+	}
+
+	for i, n := range bits {
+		if n == 0 {
+			continue
+		}
+		code := nextcode[n]
+		nextcode[n]++
+		chunk := uint32(i<<huffmanValueShift | n)
+		reverse := int(reverseByte[code>>8]) | int(reverseByte[code&0xff])<<8
+		reverse >>= uint(16 - n)
+		if n <= huffmanChunkBits {
+			for off := reverse; off < len(h.chunks); off += 1 << uint(n) {
+				// We should never need to overwrite
+				// an existing chunk.  Also, 0 is
+				// never a valid chunk, because the
+				// lower 4 "count" bits should be
+				// between 1 and 15.
+				if sanity && h.chunks[off] != 0 {
+					panic("impossible: overwriting existing chunk")
+				}
+				h.chunks[off] = chunk
+			}
+		} else {
+			j := reverse & (huffmanNumChunks - 1)
+			if sanity && h.chunks[j]&huffmanCountMask != huffmanChunkBits+1 {
+				// Longer codes should have been
+				// associated with a link table above.
+				panic("impossible: not an indirect chunk")
+			}
+			value := h.chunks[j] >> huffmanValueShift
+			linktab := h.links[value]
+			reverse >>= huffmanChunkBits
+			for off := reverse; off < len(linktab); off += 1 << uint(n-huffmanChunkBits) {
+				if sanity && linktab[off] != 0 {
+					panic("impossible: overwriting existing chunk")
+				}
+				linktab[off] = chunk
+			}
+		}
+	}
+
+	if sanity {
+		// Above we've sanity checked that we never overwrote
+		// an existing entry.  Here we additionally check that
+		// we filled the tables completely.
+		for i, chunk := range h.chunks {
+			if chunk == 0 {
+				// As an exception, in the degenerate
+				// single-code case, we allow odd
+				// chunks to be missing.
+				if code == 1 && i%2 == 1 {
+					continue
+				}
+				panic("impossible: missing chunk")
+			}
+		}
+		for _, linktab := range h.links {
+			for _, chunk := range linktab {
+				if chunk == 0 {
+					panic("impossible: missing chunk")
+				}
+			}
+		}
+	}
+
+	return true
+}
+
+// The actual read interface needed by NewReader.
+// If the passed in io.Reader does not also have ReadByte,
+// the NewReader will introduce its own buffering.
+type Reader interface {
+	io.Reader
+	io.ByteReader
+}
+
+// Decompress state.
+type decompressor struct {
+	// Input source.
+	r       Reader
+	roffset int64
+	woffset int64
+
+	// Input bits, in top of b.
+	b  uint32
+	nb uint
+
+	// Huffman decoders for literal/length, distance.
+	h1, h2 huffmanDecoder
+
+	// Length arrays used to define Huffman codes.
+	bits     *[maxNumLit + maxNumDist]int
+	codebits *[numCodes]int
+
+	// Output history, buffer.
+	hist  *[maxHist]byte
+	hp    int  // current output position in buffer
+	hw    int  // have written hist[0:hw] already
+	hfull bool // buffer has filled at least once
+
+	// Temporary buffer (avoids repeated allocation).
+	buf [4]byte
+
+	// Next step in the decompression,
+	// and decompression state.
+	step     func(*decompressor)
+	final    bool
+	err      error
+	toRead   []byte
+	hl, hd   *huffmanDecoder
+	copyLen  int
+	copyDist int
+}
+
+func (f *decompressor) nextBlock() {
+	if f.final {
+		if f.hw != f.hp {
+			f.flush((*decompressor).nextBlock)
+			return
+		}
+		f.err = io.EOF
+		return
+	}
+	for f.nb < 1+2 {
+		if f.err = f.moreBits(); f.err != nil {
+			return
+		}
+	}
+	f.final = f.b&1 == 1
+	f.b >>= 1
+	typ := f.b & 3
+	f.b >>= 2
+	f.nb -= 1 + 2
+	switch typ {
+	case 0:
+		f.dataBlock()
+	case 1:
+		// compressed, fixed Huffman tables
+		f.hl = &fixedHuffmanDecoder
+		f.hd = nil
+		f.huffmanBlock()
+	case 2:
+		// compressed, dynamic Huffman tables
+		if f.err = f.readHuffman(); f.err != nil {
+			break
+		}
+		f.hl = &f.h1
+		f.hd = &f.h2
+		f.huffmanBlock()
+	default:
+		// 3 is reserved.
+		f.err = CorruptInputError(f.roffset)
+	}
+}
+
+func (f *decompressor) Read(b []byte) (int, error) {
+	for {
+		if len(f.toRead) > 0 {
+			n := copy(b, f.toRead)
+			f.toRead = f.toRead[n:]
+			return n, nil
+		}
+		if f.err != nil {
+			return 0, f.err
+		}
+		f.step(f)
+	}
+}
+
+// Support the io.WriteTo interface for io.Copy and friends.
+func (f *decompressor) WriteTo(w io.Writer) (int64, error) {
+	total := int64(0)
+	for {
+		if f.err != nil {
+			if f.err == io.EOF {
+				return total, nil
+			}
+			return total, f.err
+		}
+		if len(f.toRead) > 0 {
+			var n int
+			n, f.err = w.Write(f.toRead)
+			if f.err != nil {
+				return total, f.err
+			}
+			if n != len(f.toRead) {
+				return total, io.ErrShortWrite
+			}
+			f.toRead = f.toRead[:0]
+			total += int64(n)
+		}
+		f.step(f)
+	}
+}
+
+func (f *decompressor) Close() error {
+	if f.err == io.EOF {
+		return nil
+	}
+	return f.err
+}
+
+// RFC 1951 section 3.2.7.
+// Compression with dynamic Huffman codes
+
+var codeOrder = [...]int{16, 17, 18, 0, 8, 7, 9, 6, 10, 5, 11, 4, 12, 3, 13, 2, 14, 1, 15}
+
+func (f *decompressor) readHuffman() error {
+	// HLIT[5], HDIST[5], HCLEN[4].
+	for f.nb < 5+5+4 {
+		if err := f.moreBits(); err != nil {
+			return err
+		}
+	}
+	nlit := int(f.b&0x1F) + 257
+	if nlit > maxNumLit {
+		return CorruptInputError(f.roffset)
+	}
+	f.b >>= 5
+	ndist := int(f.b&0x1F) + 1
+	if ndist > maxNumDist {
+		return CorruptInputError(f.roffset)
+	}
+	f.b >>= 5
+	nclen := int(f.b&0xF) + 4
+	// numCodes is 19, so nclen is always valid.
+	f.b >>= 4
+	f.nb -= 5 + 5 + 4
+
+	// (HCLEN+4)*3 bits: code lengths in the magic codeOrder order.
+	for i := 0; i < nclen; i++ {
+		for f.nb < 3 {
+			if err := f.moreBits(); err != nil {
+				return err
+			}
+		}
+		f.codebits[codeOrder[i]] = int(f.b & 0x7)
+		f.b >>= 3
+		f.nb -= 3
+	}
+	for i := nclen; i < len(codeOrder); i++ {
+		f.codebits[codeOrder[i]] = 0
+	}
+	if !f.h1.init(f.codebits[0:]) {
+		return CorruptInputError(f.roffset)
+	}
+
+	// HLIT + 257 code lengths, HDIST + 1 code lengths,
+	// using the code length Huffman code.
+	for i, n := 0, nlit+ndist; i < n; {
+		x, err := f.huffSym(&f.h1)
+		if err != nil {
+			return err
+		}
+		if x < 16 {
+			// Actual length.
+			f.bits[i] = x
+			i++
+			continue
+		}
+		// Repeat previous length or zero.
+		var rep int
+		var nb uint
+		var b int
+		switch x {
+		default:
+			return InternalError("unexpected length code")
+		case 16:
+			rep = 3
+			nb = 2
+			if i == 0 {
+				return CorruptInputError(f.roffset)
+			}
+			b = f.bits[i-1]
+		case 17:
+			rep = 3
+			nb = 3
+			b = 0
+		case 18:
+			rep = 11
+			nb = 7
+			b = 0
+		}
+		for f.nb < nb {
+			if err := f.moreBits(); err != nil {
+				return err
+			}
+		}
+		rep += int(f.b & uint32(1<<nb-1))
+		f.b >>= nb
+		f.nb -= nb
+		if i+rep > n {
+			return CorruptInputError(f.roffset)
+		}
+		for j := 0; j < rep; j++ {
+			f.bits[i] = b
+			i++
+		}
+	}
+
+	if !f.h1.init(f.bits[0:nlit]) || !f.h2.init(f.bits[nlit:nlit+ndist]) {
+		return CorruptInputError(f.roffset)
+	}
+
+	// In order to preserve the property that we never read any extra bytes
+	// after the end of the DEFLATE stream, huffSym conservatively reads min
+	// bits at a time until it decodes the symbol. However, since every block
+	// must end with an EOB marker, we can use that as the minimum number of
+	// bits to read and guarantee we never read past the end of the stream.
+	if f.bits[endBlockMarker] > 0 {
+		f.h1.min = f.bits[endBlockMarker] // Length of EOB marker
+	}
+
+	return nil
+}
+
+// Decode a single Huffman block from f.
+// hl and hd are the Huffman states for the lit/length values
+// and the distance values, respectively.  If hd == nil, using the
+// fixed distance encoding associated with fixed Huffman blocks.
+func (f *decompressor) huffmanBlock() {
+	for {
+		v, err := f.huffSym(f.hl)
+		if err != nil {
+			f.err = err
+			return
+		}
+		var n uint // number of bits extra
+		var length int
+		switch {
+		case v < 256:
+			f.hist[f.hp] = byte(v)
+			f.hp++
+			if f.hp == len(f.hist) {
+				// After the flush, continue this loop.
+				f.flush((*decompressor).huffmanBlock)
+				return
+			}
+			continue
+		case v == 256:
+			// Done with huffman block; read next block.
+			f.step = (*decompressor).nextBlock
+			return
+		// otherwise, reference to older data
+		case v < 265:
+			length = v - (257 - 3)
+			n = 0
+		case v < 269:
+			length = v*2 - (265*2 - 11)
+			n = 1
+		case v < 273:
+			length = v*4 - (269*4 - 19)
+			n = 2
+		case v < 277:
+			length = v*8 - (273*8 - 35)
+			n = 3
+		case v < 281:
+			length = v*16 - (277*16 - 67)
+			n = 4
+		case v < 285:
+			length = v*32 - (281*32 - 131)
+			n = 5
+		case v < maxNumLit:
+			length = 258
+			n = 0
+		default:
+			f.err = CorruptInputError(f.roffset)
+			return
+		}
+		if n > 0 {
+			for f.nb < n {
+				if err = f.moreBits(); err != nil {
+					f.err = err
+					return
+				}
+			}
+			length += int(f.b & uint32(1<<n-1))
+			f.b >>= n
+			f.nb -= n
+		}
+
+		var dist int
+		if f.hd == nil {
+			for f.nb < 5 {
+				if err = f.moreBits(); err != nil {
+					f.err = err
+					return
+				}
+			}
+			dist = int(reverseByte[(f.b&0x1F)<<3])
+			f.b >>= 5
+			f.nb -= 5
+		} else {
+			if dist, err = f.huffSym(f.hd); err != nil {
+				f.err = err
+				return
+			}
+		}
+
+		switch {
+		case dist < 4:
+			dist++
+		case dist < maxNumDist:
+			nb := uint(dist-2) >> 1
+			// have 1 bit in bottom of dist, need nb more.
+			extra := (dist & 1) << nb
+			for f.nb < nb {
+				if err = f.moreBits(); err != nil {
+					f.err = err
+					return
+				}
+			}
+			extra |= int(f.b & uint32(1<<nb-1))
+			f.b >>= nb
+			f.nb -= nb
+			dist = 1<<(nb+1) + 1 + extra
+		default:
+			f.err = CorruptInputError(f.roffset)
+			return
+		}
+
+		// Copy history[-dist:-dist+length] into output.
+		if dist > len(f.hist) {
+			f.err = InternalError("bad history distance")
+			return
+		}
+
+		// No check on length; encoding can be prescient.
+		if !f.hfull && dist > f.hp {
+			f.err = CorruptInputError(f.roffset)
+			return
+		}
+
+		f.copyLen, f.copyDist = length, dist
+		if f.copyHist() {
+			return
+		}
+	}
+}
+
+// copyHist copies f.copyLen bytes from f.hist (f.copyDist bytes ago) to itself.
+// It reports whether the f.hist buffer is full.
+func (f *decompressor) copyHist() bool {
+	p := f.hp - f.copyDist
+	if p < 0 {
+		p += len(f.hist)
+	}
+	for f.copyLen > 0 {
+		n := f.copyLen
+		if x := len(f.hist) - f.hp; n > x {
+			n = x
+		}
+		if x := len(f.hist) - p; n > x {
+			n = x
+		}
+		forwardCopy(f.hist[:], f.hp, p, n)
+		p += n
+		f.hp += n
+		f.copyLen -= n
+		if f.hp == len(f.hist) {
+			// After flush continue copying out of history.
+			f.flush((*decompressor).copyHuff)
+			return true
+		}
+		if p == len(f.hist) {
+			p = 0
+		}
+	}
+	return false
+}
+
+func (f *decompressor) copyHuff() {
+	if f.copyHist() {
+		return
+	}
+	f.huffmanBlock()
+}
+
+// Copy a single uncompressed data block from input to output.
+func (f *decompressor) dataBlock() {
+	// Uncompressed.
+	// Discard current half-byte.
+	f.nb = 0
+	f.b = 0
+
+	// Length then ones-complement of length.
+	nr, err := io.ReadFull(f.r, f.buf[0:4])
+	f.roffset += int64(nr)
+	if err != nil {
+		f.err = &ReadError{f.roffset, err}
+		return
+	}
+	n := int(f.buf[0]) | int(f.buf[1])<<8
+	nn := int(f.buf[2]) | int(f.buf[3])<<8
+	if uint16(nn) != uint16(^n) {
+		f.err = CorruptInputError(f.roffset)
+		return
+	}
+
+	if n == 0 {
+		// 0-length block means sync
+		f.flush((*decompressor).nextBlock)
+		return
+	}
+
+	f.copyLen = n
+	f.copyData()
+}
+
+// copyData copies f.copyLen bytes from the underlying reader into f.hist.
+// It pauses for reads when f.hist is full.
+func (f *decompressor) copyData() {
+	n := f.copyLen
+	for n > 0 {
+		m := len(f.hist) - f.hp
+		if m > n {
+			m = n
+		}
+		m, err := io.ReadFull(f.r, f.hist[f.hp:f.hp+m])
+		f.roffset += int64(m)
+		if err != nil {
+			f.err = &ReadError{f.roffset, err}
+			return
+		}
+		n -= m
+		f.hp += m
+		if f.hp == len(f.hist) {
+			f.copyLen = n
+			f.flush((*decompressor).copyData)
+			return
+		}
+	}
+	f.step = (*decompressor).nextBlock
+}
+
+func (f *decompressor) setDict(dict []byte) {
+	if len(dict) > len(f.hist) {
+		// Will only remember the tail.
+		dict = dict[len(dict)-len(f.hist):]
+	}
+
+	f.hp = copy(f.hist[:], dict)
+	if f.hp == len(f.hist) {
+		f.hp = 0
+		f.hfull = true
+	}
+	f.hw = f.hp
+}
+
+func (f *decompressor) moreBits() error {
+	c, err := f.r.ReadByte()
+	if err != nil {
+		if err == io.EOF {
+			err = io.ErrUnexpectedEOF
+		}
+		return err
+	}
+	f.roffset++
+	f.b |= uint32(c) << f.nb
+	f.nb += 8
+	return nil
+}
+
+// Read the next Huffman-encoded symbol from f according to h.
+func (f *decompressor) huffSym(h *huffmanDecoder) (int, error) {
+	// Since a huffmanDecoder can be empty or be composed of a degenerate tree
+	// with single element, huffSym must error on these two edge cases. In both
+	// cases, the chunks slice will be 0 for the invalid sequence, leading it
+	// satisfy the n == 0 check below.
+	n := uint(h.min)
+	for {
+		for f.nb < n {
+			if err := f.moreBits(); err != nil {
+				return 0, err
+			}
+		}
+		chunk := h.chunks[f.b&(huffmanNumChunks-1)]
+		n = uint(chunk & huffmanCountMask)
+		if n > huffmanChunkBits {
+			chunk = h.links[chunk>>huffmanValueShift][(f.b>>huffmanChunkBits)&h.linkMask]
+			n = uint(chunk & huffmanCountMask)
+		}
+		if n <= f.nb {
+			if n == 0 {
+				f.err = CorruptInputError(f.roffset)
+				return 0, f.err
+			}
+			f.b >>= n
+			f.nb -= n
+			return int(chunk >> huffmanValueShift), nil
+		}
+	}
+}
+
+// Flush any buffered output to the underlying writer.
+func (f *decompressor) flush(step func(*decompressor)) {
+	f.toRead = f.hist[f.hw:f.hp]
+	f.woffset += int64(f.hp - f.hw)
+	f.hw = f.hp
+	if f.hp == len(f.hist) {
+		f.hp = 0
+		f.hw = 0
+		f.hfull = true
+	}
+	f.step = step
+}
+
+func makeReader(r io.Reader) Reader {
+	if rr, ok := r.(Reader); ok {
+		return rr
+	}
+	return bufio.NewReader(r)
+}
+
+func (f *decompressor) Reset(r io.Reader, dict []byte) error {
+	*f = decompressor{
+		r:        makeReader(r),
+		bits:     f.bits,
+		codebits: f.codebits,
+		hist:     f.hist,
+		step:     (*decompressor).nextBlock,
+	}
+	if dict != nil {
+		f.setDict(dict)
+	}
+	return nil
+}
+
+// NewReader returns a new ReadCloser that can be used
+// to read the uncompressed version of r.
+// If r does not also implement io.ByteReader,
+// the decompressor may read more data than necessary from r.
+// It is the caller's responsibility to call Close on the ReadCloser
+// when finished reading.
+//
+// The ReadCloser returned by NewReader also implements Resetter.
+func NewReader(r io.Reader) io.ReadCloser {
+	var f decompressor
+	f.bits = new([maxNumLit + maxNumDist]int)
+	f.codebits = new([numCodes]int)
+	f.r = makeReader(r)
+	f.hist = new([maxHist]byte)
+	f.step = (*decompressor).nextBlock
+	return &f
+}
+
+// NewReaderDict is like NewReader but initializes the reader
+// with a preset dictionary.  The returned Reader behaves as if
+// the uncompressed data stream started with the given dictionary,
+// which has already been read.  NewReaderDict is typically used
+// to read data compressed by NewWriterDict.
+//
+// The ReadCloser returned by NewReader also implements Resetter.
+func NewReaderDict(r io.Reader, dict []byte) io.ReadCloser {
+	var f decompressor
+	f.r = makeReader(r)
+	f.hist = new([maxHist]byte)
+	f.bits = new([maxNumLit + maxNumDist]int)
+	f.codebits = new([numCodes]int)
+	f.step = (*decompressor).nextBlock
+	f.setDict(dict)
+	return &f
+}

+ 48 - 0
Godeps/_workspace/src/github.com/klauspost/compress/flate/reverse_bits.go

@@ -0,0 +1,48 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package flate
+
+var reverseByte = [256]byte{
+	0x00, 0x80, 0x40, 0xc0, 0x20, 0xa0, 0x60, 0xe0,
+	0x10, 0x90, 0x50, 0xd0, 0x30, 0xb0, 0x70, 0xf0,
+	0x08, 0x88, 0x48, 0xc8, 0x28, 0xa8, 0x68, 0xe8,
+	0x18, 0x98, 0x58, 0xd8, 0x38, 0xb8, 0x78, 0xf8,
+	0x04, 0x84, 0x44, 0xc4, 0x24, 0xa4, 0x64, 0xe4,
+	0x14, 0x94, 0x54, 0xd4, 0x34, 0xb4, 0x74, 0xf4,
+	0x0c, 0x8c, 0x4c, 0xcc, 0x2c, 0xac, 0x6c, 0xec,
+	0x1c, 0x9c, 0x5c, 0xdc, 0x3c, 0xbc, 0x7c, 0xfc,
+	0x02, 0x82, 0x42, 0xc2, 0x22, 0xa2, 0x62, 0xe2,
+	0x12, 0x92, 0x52, 0xd2, 0x32, 0xb2, 0x72, 0xf2,
+	0x0a, 0x8a, 0x4a, 0xca, 0x2a, 0xaa, 0x6a, 0xea,
+	0x1a, 0x9a, 0x5a, 0xda, 0x3a, 0xba, 0x7a, 0xfa,
+	0x06, 0x86, 0x46, 0xc6, 0x26, 0xa6, 0x66, 0xe6,
+	0x16, 0x96, 0x56, 0xd6, 0x36, 0xb6, 0x76, 0xf6,
+	0x0e, 0x8e, 0x4e, 0xce, 0x2e, 0xae, 0x6e, 0xee,
+	0x1e, 0x9e, 0x5e, 0xde, 0x3e, 0xbe, 0x7e, 0xfe,
+	0x01, 0x81, 0x41, 0xc1, 0x21, 0xa1, 0x61, 0xe1,
+	0x11, 0x91, 0x51, 0xd1, 0x31, 0xb1, 0x71, 0xf1,
+	0x09, 0x89, 0x49, 0xc9, 0x29, 0xa9, 0x69, 0xe9,
+	0x19, 0x99, 0x59, 0xd9, 0x39, 0xb9, 0x79, 0xf9,
+	0x05, 0x85, 0x45, 0xc5, 0x25, 0xa5, 0x65, 0xe5,
+	0x15, 0x95, 0x55, 0xd5, 0x35, 0xb5, 0x75, 0xf5,
+	0x0d, 0x8d, 0x4d, 0xcd, 0x2d, 0xad, 0x6d, 0xed,
+	0x1d, 0x9d, 0x5d, 0xdd, 0x3d, 0xbd, 0x7d, 0xfd,
+	0x03, 0x83, 0x43, 0xc3, 0x23, 0xa3, 0x63, 0xe3,
+	0x13, 0x93, 0x53, 0xd3, 0x33, 0xb3, 0x73, 0xf3,
+	0x0b, 0x8b, 0x4b, 0xcb, 0x2b, 0xab, 0x6b, 0xeb,
+	0x1b, 0x9b, 0x5b, 0xdb, 0x3b, 0xbb, 0x7b, 0xfb,
+	0x07, 0x87, 0x47, 0xc7, 0x27, 0xa7, 0x67, 0xe7,
+	0x17, 0x97, 0x57, 0xd7, 0x37, 0xb7, 0x77, 0xf7,
+	0x0f, 0x8f, 0x4f, 0xcf, 0x2f, 0xaf, 0x6f, 0xef,
+	0x1f, 0x9f, 0x5f, 0xdf, 0x3f, 0xbf, 0x7f, 0xff,
+}
+
+func reverseUint16(v uint16) uint16 {
+	return uint16(reverseByte[v>>8]) | uint16(reverseByte[v&0xFF])<<8
+}
+
+func reverseBits(number uint16, bitLength byte) uint16 {
+	return reverseUint16(number << uint8(16-bitLength))
+}

+ 97 - 0
Godeps/_workspace/src/github.com/klauspost/compress/flate/snappy.go

@@ -0,0 +1,97 @@
+// Copyright 2011 The Snappy-Go Authors. All rights reserved.
+// Modified for deflate by Klaus Post (c) 2015.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package flate
+
+// We limit how far copy back-references can go, the same as the C++ code.
+const maxOffset = 1 << 15
+
+// emitLiteral writes a literal chunk and returns the number of bytes written.
+func emitLiteral(dst *tokens, lit []byte) {
+	ol := dst.n
+	for i, v := range lit {
+		dst.tokens[i+ol] = token(v)
+	}
+	dst.n += len(lit)
+}
+
+// emitCopy writes a copy chunk and returns the number of bytes written.
+func emitCopy(dst *tokens, offset, length int) {
+	dst.tokens[dst.n] = matchToken(uint32(length-3), uint32(offset-minOffsetSize))
+	dst.n++
+}
+
+// snappyEncode uses Snappy-like compression, but stores as Huffman
+// blocks.
+func snappyEncode(dst *tokens, src []byte) {
+	// Return early if src is short.
+	if len(src) <= 4 {
+		if len(src) != 0 {
+			emitLiteral(dst, src)
+		}
+		return
+	}
+
+	// Initialize the hash table. Its size ranges from 1<<8 to 1<<14 inclusive.
+	const maxTableSize = 1 << 14
+	shift, tableSize := uint(32-8), 1<<8
+	for tableSize < maxTableSize && tableSize < len(src) {
+		shift--
+		tableSize *= 2
+	}
+	var table [maxTableSize]int
+	var misses int
+	// Iterate over the source bytes.
+	var (
+		s   int // The iterator position.
+		t   int // The last position with the same hash as s.
+		lit int // The start position of any pending literal bytes.
+	)
+	for s+3 < len(src) {
+		// Update the hash table.
+		b0, b1, b2, b3 := src[s], src[s+1], src[s+2], src[s+3]
+		h := uint32(b0) | uint32(b1)<<8 | uint32(b2)<<16 | uint32(b3)<<24
+		p := &table[(h*0x1e35a7bd)>>shift]
+		// We need to to store values in [-1, inf) in table. To save
+		// some initialization time, (re)use the table's zero value
+		// and shift the values against this zero: add 1 on writes,
+		// subtract 1 on reads.
+		t, *p = *p-1, s+1
+		// If t is invalid or src[s:s+4] differs from src[t:t+4], accumulate a literal byte.
+		if t < 0 || s-t >= maxOffset || b0 != src[t] || b1 != src[t+1] || b2 != src[t+2] || b3 != src[t+3] {
+			misses++
+			// Skip 1 byte for 16 consecutive missed.
+			s += 1 + (misses >> 4)
+			continue
+		}
+		// Otherwise, we have a match. First, emit any pending literal bytes.
+		if lit != s {
+			emitLiteral(dst, src[lit:s])
+		}
+		// Extend the match to be as long as possible.
+		s0 := s
+		s1 := s + maxMatchLength
+		if s1 > len(src) {
+			s1 = len(src)
+		}
+		s, t = s+4, t+4
+		for s < s1 && src[s] == src[t] {
+			s++
+			t++
+		}
+		misses = 0
+		// Emit the copied bytes.
+		// inlined: emitCopy(dst, s-t, s-s0)
+
+		dst.tokens[dst.n] = matchToken(uint32(s-s0-3), uint32(s-t-minOffsetSize))
+		dst.n++
+		lit = s
+	}
+
+	// Emit any final pending literal bytes and return.
+	if lit != len(src) {
+		emitLiteral(dst, src[lit:])
+	}
+}

+ 105 - 0
Godeps/_workspace/src/github.com/klauspost/compress/flate/token.go

@@ -0,0 +1,105 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package flate
+
+const (
+	// 2 bits:   type   0 = literal  1=EOF  2=Match   3=Unused
+	// 8 bits:   xlength = length - MIN_MATCH_LENGTH
+	// 22 bits   xoffset = offset - MIN_OFFSET_SIZE, or literal
+	lengthShift = 22
+	offsetMask  = 1<<lengthShift - 1
+	typeMask    = 3 << 30
+	literalType = 0 << 30
+	matchType   = 1 << 30
+)
+
+// The length code for length X (MIN_MATCH_LENGTH <= X <= MAX_MATCH_LENGTH)
+// is lengthCodes[length - MIN_MATCH_LENGTH]
+var lengthCodes = [...]uint32{
+	0, 1, 2, 3, 4, 5, 6, 7, 8, 8,
+	9, 9, 10, 10, 11, 11, 12, 12, 12, 12,
+	13, 13, 13, 13, 14, 14, 14, 14, 15, 15,
+	15, 15, 16, 16, 16, 16, 16, 16, 16, 16,
+	17, 17, 17, 17, 17, 17, 17, 17, 18, 18,
+	18, 18, 18, 18, 18, 18, 19, 19, 19, 19,
+	19, 19, 19, 19, 20, 20, 20, 20, 20, 20,
+	20, 20, 20, 20, 20, 20, 20, 20, 20, 20,
+	21, 21, 21, 21, 21, 21, 21, 21, 21, 21,
+	21, 21, 21, 21, 21, 21, 22, 22, 22, 22,
+	22, 22, 22, 22, 22, 22, 22, 22, 22, 22,
+	22, 22, 23, 23, 23, 23, 23, 23, 23, 23,
+	23, 23, 23, 23, 23, 23, 23, 23, 24, 24,
+	24, 24, 24, 24, 24, 24, 24, 24, 24, 24,
+	24, 24, 24, 24, 24, 24, 24, 24, 24, 24,
+	24, 24, 24, 24, 24, 24, 24, 24, 24, 24,
+	25, 25, 25, 25, 25, 25, 25, 25, 25, 25,
+	25, 25, 25, 25, 25, 25, 25, 25, 25, 25,
+	25, 25, 25, 25, 25, 25, 25, 25, 25, 25,
+	25, 25, 26, 26, 26, 26, 26, 26, 26, 26,
+	26, 26, 26, 26, 26, 26, 26, 26, 26, 26,
+	26, 26, 26, 26, 26, 26, 26, 26, 26, 26,
+	26, 26, 26, 26, 27, 27, 27, 27, 27, 27,
+	27, 27, 27, 27, 27, 27, 27, 27, 27, 27,
+	27, 27, 27, 27, 27, 27, 27, 27, 27, 27,
+	27, 27, 27, 27, 27, 28,
+}
+
+var offsetCodes = [...]uint32{
+	0, 1, 2, 3, 4, 4, 5, 5, 6, 6, 6, 6, 7, 7, 7, 7,
+	8, 8, 8, 8, 8, 8, 8, 8, 9, 9, 9, 9, 9, 9, 9, 9,
+	10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10,
+	11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11,
+	12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12,
+	12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12,
+	13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13,
+	13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13,
+	14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14,
+	14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14,
+	14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14,
+	14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14,
+	15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15,
+	15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15,
+	15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15,
+	15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15,
+}
+
+type token uint32
+
+type tokens struct {
+	tokens []token
+	n      int
+}
+
+// Convert a literal into a literal token.
+func literalToken(literal uint32) token { return token(literalType + literal) }
+
+// Convert a < xlength, xoffset > pair into a match token.
+func matchToken(xlength uint32, xoffset uint32) token {
+	return token(matchType + xlength<<lengthShift + xoffset)
+}
+
+// Returns the type of a token
+func (t token) typ() uint32 { return uint32(t) & typeMask }
+
+// Returns the literal of a literal token
+func (t token) literal() uint32 { return uint32(t - literalType) }
+
+// Returns the extra offset of a match token
+func (t token) offset() uint32 { return uint32(t) & offsetMask }
+
+func (t token) length() uint32 { return uint32((t - matchType) >> lengthShift) }
+
+func lengthCode(len uint32) uint32 { return lengthCodes[len] }
+
+// Returns the offset code corresponding to a specific offset
+func offsetCode(off uint32) uint32 {
+	if off < uint32(len(offsetCodes)) {
+		return offsetCodes[off]
+	} else if off>>7 < uint32(len(offsetCodes)) {
+		return offsetCodes[off>>7] + 14
+	} else {
+		return offsetCodes[off>>14] + 28
+	}
+}

+ 342 - 0
Godeps/_workspace/src/github.com/klauspost/compress/gzip/gunzip.go

@@ -0,0 +1,342 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package gzip implements reading and writing of gzip format compressed files,
+// as specified in RFC 1952.
+package gzip
+
+import (
+	"bufio"
+	"errors"
+	"hash"
+	"io"
+	"time"
+
+	"github.com/klauspost/compress/flate"
+	"github.com/klauspost/crc32"
+)
+
+const (
+	gzipID1     = 0x1f
+	gzipID2     = 0x8b
+	gzipDeflate = 8
+	flagText    = 1 << 0
+	flagHdrCrc  = 1 << 1
+	flagExtra   = 1 << 2
+	flagName    = 1 << 3
+	flagComment = 1 << 4
+)
+
+func makeReader(r io.Reader) flate.Reader {
+	if rr, ok := r.(flate.Reader); ok {
+		return rr
+	}
+	return bufio.NewReader(r)
+}
+
+var (
+	// ErrChecksum is returned when reading GZIP data that has an invalid checksum.
+	ErrChecksum = errors.New("gzip: invalid checksum")
+	// ErrHeader is returned when reading GZIP data that has an invalid header.
+	ErrHeader = errors.New("gzip: invalid header")
+)
+
+// The gzip file stores a header giving metadata about the compressed file.
+// That header is exposed as the fields of the Writer and Reader structs.
+type Header struct {
+	Comment string    // comment
+	Extra   []byte    // "extra data"
+	ModTime time.Time // modification time
+	Name    string    // file name
+	OS      byte      // operating system type
+}
+
+// A Reader is an io.Reader that can be read to retrieve
+// uncompressed data from a gzip-format compressed file.
+//
+// In general, a gzip file can be a concatenation of gzip files,
+// each with its own header.  Reads from the Reader
+// return the concatenation of the uncompressed data of each.
+// Only the first header is recorded in the Reader fields.
+//
+// Gzip files store a length and checksum of the uncompressed data.
+// The Reader will return a ErrChecksum when Read
+// reaches the end of the uncompressed data if it does not
+// have the expected length or checksum.  Clients should treat data
+// returned by Read as tentative until they receive the io.EOF
+// marking the end of the data.
+type Reader struct {
+	Header
+	r            flate.Reader
+	decompressor io.ReadCloser
+	digest       hash.Hash32
+	size         uint32
+	flg          byte
+	buf          [512]byte
+	err          error
+	multistream  bool
+}
+
+// NewReader creates a new Reader reading the given reader.
+// If r does not also implement io.ByteReader,
+// the decompressor may read more data than necessary from r.
+// It is the caller's responsibility to call Close on the Reader when done.
+func NewReader(r io.Reader) (*Reader, error) {
+	z := new(Reader)
+	z.r = makeReader(r)
+	z.multistream = true
+	z.digest = crc32.NewIEEE()
+	if err := z.readHeader(true); err != nil {
+		return nil, err
+	}
+	return z, nil
+}
+
+// Reset discards the Reader z's state and makes it equivalent to the
+// result of its original state from NewReader, but reading from r instead.
+// This permits reusing a Reader rather than allocating a new one.
+func (z *Reader) Reset(r io.Reader) error {
+	z.r = makeReader(r)
+	if z.digest == nil {
+		z.digest = crc32.NewIEEE()
+	} else {
+		z.digest.Reset()
+	}
+	z.size = 0
+	z.err = nil
+	z.multistream = true
+	return z.readHeader(true)
+}
+
+// Multistream controls whether the reader supports multistream files.
+//
+// If enabled (the default), the Reader expects the input to be a sequence
+// of individually gzipped data streams, each with its own header and
+// trailer, ending at EOF. The effect is that the concatenation of a sequence
+// of gzipped files is treated as equivalent to the gzip of the concatenation
+// of the sequence. This is standard behavior for gzip readers.
+//
+// Calling Multistream(false) disables this behavior; disabling the behavior
+// can be useful when reading file formats that distinguish individual gzip
+// data streams or mix gzip data streams with other data streams.
+// In this mode, when the Reader reaches the end of the data stream,
+// Read returns io.EOF. If the underlying reader implements io.ByteReader,
+// it will be left positioned just after the gzip stream.
+// To start the next stream, call z.Reset(r) followed by z.Multistream(false).
+// If there is no next stream, z.Reset(r) will return io.EOF.
+func (z *Reader) Multistream(ok bool) {
+	z.multistream = ok
+}
+
+// GZIP (RFC 1952) is little-endian, unlike ZLIB (RFC 1950).
+func get4(p []byte) uint32 {
+	return uint32(p[0]) | uint32(p[1])<<8 | uint32(p[2])<<16 | uint32(p[3])<<24
+}
+
+func (z *Reader) readString() (string, error) {
+	var err error
+	needconv := false
+	for i := 0; ; i++ {
+		if i >= len(z.buf) {
+			return "", ErrHeader
+		}
+		z.buf[i], err = z.r.ReadByte()
+		if err != nil {
+			return "", err
+		}
+		if z.buf[i] > 0x7f {
+			needconv = true
+		}
+		if z.buf[i] == 0 {
+			// GZIP (RFC 1952) specifies that strings are NUL-terminated ISO 8859-1 (Latin-1).
+			if needconv {
+				s := make([]rune, 0, i)
+				for _, v := range z.buf[0:i] {
+					s = append(s, rune(v))
+				}
+				return string(s), nil
+			}
+			return string(z.buf[0:i]), nil
+		}
+	}
+}
+
+func (z *Reader) read2() (uint32, error) {
+	_, err := io.ReadFull(z.r, z.buf[0:2])
+	if err != nil {
+		return 0, err
+	}
+	return uint32(z.buf[0]) | uint32(z.buf[1])<<8, nil
+}
+
+func (z *Reader) readHeader(save bool) error {
+	_, err := io.ReadFull(z.r, z.buf[0:10])
+	if err != nil {
+		return err
+	}
+	if z.buf[0] != gzipID1 || z.buf[1] != gzipID2 || z.buf[2] != gzipDeflate {
+		return ErrHeader
+	}
+	z.flg = z.buf[3]
+	if save {
+		z.ModTime = time.Unix(int64(get4(z.buf[4:8])), 0)
+		// z.buf[8] is xfl, ignored
+		z.OS = z.buf[9]
+	}
+	z.digest.Reset()
+	z.digest.Write(z.buf[0:10])
+
+	if z.flg&flagExtra != 0 {
+		n, err := z.read2()
+		if err != nil {
+			return err
+		}
+		data := make([]byte, n)
+		if _, err = io.ReadFull(z.r, data); err != nil {
+			return err
+		}
+		if save {
+			z.Extra = data
+		}
+	}
+
+	var s string
+	if z.flg&flagName != 0 {
+		if s, err = z.readString(); err != nil {
+			return err
+		}
+		if save {
+			z.Name = s
+		}
+	}
+
+	if z.flg&flagComment != 0 {
+		if s, err = z.readString(); err != nil {
+			return err
+		}
+		if save {
+			z.Comment = s
+		}
+	}
+
+	if z.flg&flagHdrCrc != 0 {
+		n, err := z.read2()
+		if err != nil {
+			return err
+		}
+		sum := z.digest.Sum32() & 0xFFFF
+		if n != sum {
+			return ErrHeader
+		}
+	}
+
+	z.digest.Reset()
+	if z.decompressor == nil {
+		z.decompressor = flate.NewReader(z.r)
+	} else {
+		z.decompressor.(flate.Resetter).Reset(z.r, nil)
+	}
+	return nil
+}
+
+func (z *Reader) Read(p []byte) (n int, err error) {
+	if z.err != nil {
+		return 0, z.err
+	}
+	if len(p) == 0 {
+		return 0, nil
+	}
+
+	n, err = z.decompressor.Read(p)
+	z.digest.Write(p[0:n])
+	z.size += uint32(n)
+	if n != 0 || err != io.EOF {
+		z.err = err
+		return
+	}
+
+	// Finished file; check checksum + size.
+	if _, err := io.ReadFull(z.r, z.buf[0:8]); err != nil {
+		z.err = err
+		return 0, err
+	}
+	crc32, isize := get4(z.buf[0:4]), get4(z.buf[4:8])
+	sum := z.digest.Sum32()
+	if sum != crc32 || isize != z.size {
+		z.err = ErrChecksum
+		return 0, z.err
+	}
+
+	// File is ok; is there another?
+	if !z.multistream {
+		return 0, io.EOF
+	}
+
+	if err = z.readHeader(false); err != nil {
+		z.err = err
+		return
+	}
+
+	// Yes.  Reset and read from it.
+	z.digest.Reset()
+	z.size = 0
+	return z.Read(p)
+}
+
+// Support the io.WriteTo interface for io.Copy and friends.
+func (z *Reader) WriteTo(w io.Writer) (int64, error) {
+	total := int64(0)
+	for {
+		if z.err != nil {
+			if z.err == io.EOF {
+				return total, nil
+			}
+			return total, z.err
+		}
+
+		// We write both to output and digest.
+		mw := io.MultiWriter(w, z.digest)
+		n, err := z.decompressor.(io.WriterTo).WriteTo(mw)
+		total += n
+		z.size += uint32(n)
+		if err != nil {
+			z.err = err
+			return total, z.err
+		}
+
+		// Finished file; check checksum + size.
+		if _, err := io.ReadFull(z.r, z.buf[0:8]); err != nil {
+			z.err = err
+			return 0, err
+		}
+		crc32, isize := get4(z.buf[0:4]), get4(z.buf[4:8])
+		sum := z.digest.Sum32()
+		if sum != crc32 || isize != z.size {
+			z.err = ErrChecksum
+			return 0, z.err
+		}
+
+		// File is ok; is there another?
+		if !z.multistream {
+			return total, nil
+		}
+
+		err = z.readHeader(false)
+		// There was not more
+		if err == io.EOF {
+			return total, nil
+		}
+		if err != nil {
+			z.err = err
+			return total, err
+		}
+
+		// Yes.  Reset and read from it.
+		z.digest.Reset()
+		z.size = 0
+	}
+}
+
+// Close closes the Reader. It does not close the underlying io.Reader.
+func (z *Reader) Close() error { return z.decompressor.Close() }

+ 274 - 0
Godeps/_workspace/src/github.com/klauspost/compress/gzip/gzip.go

@@ -0,0 +1,274 @@
+// Copyright 2010 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package gzip
+
+import (
+	"errors"
+	"fmt"
+	"hash"
+	"io"
+
+	"github.com/klauspost/compress/flate"
+	"github.com/klauspost/crc32"
+)
+
+// These constants are copied from the flate package, so that code that imports
+// "compress/gzip" does not also have to import "compress/flate".
+const (
+	NoCompression       = flate.NoCompression
+	BestSpeed           = flate.BestSpeed
+	BestCompression     = flate.BestCompression
+	DefaultCompression  = flate.DefaultCompression
+	ConstantCompression = flate.ConstantCompression
+)
+
+// A Writer is an io.WriteCloser.
+// Writes to a Writer are compressed and written to w.
+type Writer struct {
+	Header
+	w           io.Writer
+	level       int
+	wroteHeader bool
+	compressor  *flate.Writer
+	digest      hash.Hash32
+	size        uint32
+	closed      bool
+	buf         [10]byte
+	err         error
+}
+
+// NewWriter returns a new Writer.
+// Writes to the returned writer are compressed and written to w.
+//
+// It is the caller's responsibility to call Close on the WriteCloser when done.
+// Writes may be buffered and not flushed until Close.
+//
+// Callers that wish to set the fields in Writer.Header must do so before
+// the first call to Write or Close. The Comment and Name header fields are
+// UTF-8 strings in Go, but the underlying format requires NUL-terminated ISO
+// 8859-1 (Latin-1). NUL or non-Latin-1 runes in those strings will lead to an
+// error on Write.
+func NewWriter(w io.Writer) *Writer {
+	z, _ := NewWriterLevel(w, DefaultCompression)
+	return z
+}
+
+// NewWriterLevel is like NewWriter but specifies the compression level instead
+// of assuming DefaultCompression.
+//
+// The compression level can be ConstantCompression, DefaultCompression,
+// NoCompression, or any integer value between BestSpeed and BestCompression
+// inclusive. The error returned will be nil if the level is valid.
+func NewWriterLevel(w io.Writer, level int) (*Writer, error) {
+	if level < ConstantCompression || level > BestCompression {
+		return nil, fmt.Errorf("gzip: invalid compression level: %d", level)
+	}
+	z := new(Writer)
+	z.init(w, level)
+	return z, nil
+}
+
+func (z *Writer) init(w io.Writer, level int) {
+	digest := z.digest
+	if digest != nil {
+		digest.Reset()
+	} else {
+		digest = crc32.NewIEEE()
+	}
+	compressor := z.compressor
+	if compressor != nil {
+		compressor.Reset(w)
+	}
+	*z = Writer{
+		Header: Header{
+			OS: 255, // unknown
+		},
+		w:          w,
+		level:      level,
+		digest:     digest,
+		compressor: compressor,
+	}
+}
+
+// Reset discards the Writer z's state and makes it equivalent to the
+// result of its original state from NewWriter or NewWriterLevel, but
+// writing to w instead. This permits reusing a Writer rather than
+// allocating a new one.
+func (z *Writer) Reset(w io.Writer) {
+	z.init(w, z.level)
+}
+
+// GZIP (RFC 1952) is little-endian, unlike ZLIB (RFC 1950).
+func put2(p []byte, v uint16) {
+	p[0] = uint8(v >> 0)
+	p[1] = uint8(v >> 8)
+}
+
+func put4(p []byte, v uint32) {
+	p[0] = uint8(v >> 0)
+	p[1] = uint8(v >> 8)
+	p[2] = uint8(v >> 16)
+	p[3] = uint8(v >> 24)
+}
+
+// writeBytes writes a length-prefixed byte slice to z.w.
+func (z *Writer) writeBytes(b []byte) error {
+	if len(b) > 0xffff {
+		return errors.New("gzip.Write: Extra data is too large")
+	}
+	put2(z.buf[0:2], uint16(len(b)))
+	_, err := z.w.Write(z.buf[0:2])
+	if err != nil {
+		return err
+	}
+	_, err = z.w.Write(b)
+	return err
+}
+
+// writeString writes a UTF-8 string s in GZIP's format to z.w.
+// GZIP (RFC 1952) specifies that strings are NUL-terminated ISO 8859-1 (Latin-1).
+func (z *Writer) writeString(s string) (err error) {
+	// GZIP stores Latin-1 strings; error if non-Latin-1; convert if non-ASCII.
+	needconv := false
+	for _, v := range s {
+		if v == 0 || v > 0xff {
+			return errors.New("gzip.Write: non-Latin-1 header string")
+		}
+		if v > 0x7f {
+			needconv = true
+		}
+	}
+	if needconv {
+		b := make([]byte, 0, len(s))
+		for _, v := range s {
+			b = append(b, byte(v))
+		}
+		_, err = z.w.Write(b)
+	} else {
+		_, err = io.WriteString(z.w, s)
+	}
+	if err != nil {
+		return err
+	}
+	// GZIP strings are NUL-terminated.
+	z.buf[0] = 0
+	_, err = z.w.Write(z.buf[0:1])
+	return err
+}
+
+// Write writes a compressed form of p to the underlying io.Writer. The
+// compressed bytes are not necessarily flushed until the Writer is closed.
+func (z *Writer) Write(p []byte) (int, error) {
+	if z.err != nil {
+		return 0, z.err
+	}
+	var n int
+	// Write the GZIP header lazily.
+	if !z.wroteHeader {
+		z.wroteHeader = true
+		z.buf[0] = gzipID1
+		z.buf[1] = gzipID2
+		z.buf[2] = gzipDeflate
+		z.buf[3] = 0
+		if z.Extra != nil {
+			z.buf[3] |= 0x04
+		}
+		if z.Name != "" {
+			z.buf[3] |= 0x08
+		}
+		if z.Comment != "" {
+			z.buf[3] |= 0x10
+		}
+		put4(z.buf[4:8], uint32(z.ModTime.Unix()))
+		if z.level == BestCompression {
+			z.buf[8] = 2
+		} else if z.level == BestSpeed {
+			z.buf[8] = 4
+		} else {
+			z.buf[8] = 0
+		}
+		z.buf[9] = z.OS
+		n, z.err = z.w.Write(z.buf[0:10])
+		if z.err != nil {
+			return n, z.err
+		}
+		if z.Extra != nil {
+			z.err = z.writeBytes(z.Extra)
+			if z.err != nil {
+				return n, z.err
+			}
+		}
+		if z.Name != "" {
+			z.err = z.writeString(z.Name)
+			if z.err != nil {
+				return n, z.err
+			}
+		}
+		if z.Comment != "" {
+			z.err = z.writeString(z.Comment)
+			if z.err != nil {
+				return n, z.err
+			}
+		}
+		if z.compressor == nil {
+			z.compressor, _ = flate.NewWriter(z.w, z.level)
+		}
+	}
+	z.size += uint32(len(p))
+	z.digest.Write(p)
+	n, z.err = z.compressor.Write(p)
+	return n, z.err
+}
+
+// Flush flushes any pending compressed data to the underlying writer.
+//
+// It is useful mainly in compressed network protocols, to ensure that
+// a remote reader has enough data to reconstruct a packet. Flush does
+// not return until the data has been written. If the underlying
+// writer returns an error, Flush returns that error.
+//
+// In the terminology of the zlib library, Flush is equivalent to Z_SYNC_FLUSH.
+func (z *Writer) Flush() error {
+	if z.err != nil {
+		return z.err
+	}
+	if z.closed {
+		return nil
+	}
+	if !z.wroteHeader {
+		z.Write(nil)
+		if z.err != nil {
+			return z.err
+		}
+	}
+	z.err = z.compressor.Flush()
+	return z.err
+}
+
+// Close closes the Writer, flushing any unwritten data to the underlying
+// io.Writer, but does not close the underlying io.Writer.
+func (z *Writer) Close() error {
+	if z.err != nil {
+		return z.err
+	}
+	if z.closed {
+		return nil
+	}
+	z.closed = true
+	if !z.wroteHeader {
+		z.Write(nil)
+		if z.err != nil {
+			return z.err
+		}
+	}
+	z.err = z.compressor.Close()
+	if z.err != nil {
+		return z.err
+	}
+	put4(z.buf[0:4], z.digest.Sum32())
+	put4(z.buf[4:8], z.size)
+	_, z.err = z.w.Write(z.buf[0:8])
+	return z.err
+}

+ 24 - 0
Godeps/_workspace/src/github.com/klauspost/cpuid/.gitignore

@@ -0,0 +1,24 @@
+# Compiled Object files, Static and Dynamic libs (Shared Objects)
+*.o
+*.a
+*.so
+
+# Folders
+_obj
+_test
+
+# Architecture specific extensions/prefixes
+*.[568vq]
+[568vq].out
+
+*.cgo1.go
+*.cgo2.c
+_cgo_defun.c
+_cgo_gotypes.go
+_cgo_export.*
+
+_testmain.go
+
+*.exe
+*.test
+*.prof

+ 7 - 0
Godeps/_workspace/src/github.com/klauspost/cpuid/.travis.yml

@@ -0,0 +1,7 @@
+language: go
+
+go:
+  - 1.3
+  - 1.4
+  - 1.5
+  - tip

+ 22 - 0
Godeps/_workspace/src/github.com/klauspost/cpuid/LICENSE

@@ -0,0 +1,22 @@
+The MIT License (MIT)
+
+Copyright (c) 2015 Klaus Post
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
+

+ 145 - 0
Godeps/_workspace/src/github.com/klauspost/cpuid/README.md

@@ -0,0 +1,145 @@
+# cpuid
+Package cpuid provides information about the CPU running the current program.
+
+CPU features are detected on startup, and kept for fast access through the life of the application.
+Currently x86 / x64 (AMD64) is supported, and no external C (cgo) code is used, which should make the library very easy to use.
+
+You can access the CPU information by accessing the shared CPU variable of the cpuid library.
+
+Package home: https://github.com/klauspost/cpuid
+
+[![GoDoc][1]][2] [![Build Status][3]][4]
+
+[1]: https://godoc.org/github.com/klauspost/cpuid?status.svg
+[2]: https://godoc.org/github.com/klauspost/cpuid
+[3]: https://travis-ci.org/klauspost/cpuid.svg
+[4]: https://travis-ci.org/klauspost/cpuid
+
+# features
+## CPU Instructions
+*  **CMOV** (i686 CMOV)
+*  **NX** (NX (No-Execute) bit)
+*  **AMD3DNOW** (AMD 3DNOW)
+*  **AMD3DNOWEXT** (AMD 3DNowExt)
+*  **MMX** (standard MMX)
+*  **MMXEXT** (SSE integer functions or AMD MMX ext)
+*  **SSE** (SSE functions)
+*  **SSE2** (P4 SSE functions)
+*  **SSE3** (Prescott SSE3 functions)
+*  **SSSE3** (Conroe SSSE3 functions)
+*  **SSE4** (Penryn SSE4.1 functions)
+*  **SSE4A** (AMD Barcelona microarchitecture SSE4a instructions)
+*  **SSE42** (Nehalem SSE4.2 functions)
+*  **AVX** (AVX functions)
+*  **AVX2** (AVX2 functions)
+*  **FMA3** (Intel FMA 3)
+*  **FMA4** (Bulldozer FMA4 functions)
+*  **XOP** (Bulldozer XOP functions)
+*  **F16C** (Half-precision floating-point conversion)
+*  **BMI1** (Bit Manipulation Instruction Set 1)
+*  **BMI2** (Bit Manipulation Instruction Set 2)
+*  **TBM** (AMD Trailing Bit Manipulation)
+*  **LZCNT** (LZCNT instruction)
+*  **POPCNT** (POPCNT instruction)
+*  **AESNI** (Advanced Encryption Standard New Instructions)
+*  **CLMUL** (Carry-less Multiplication)
+*  **HTT** (Hyperthreading (enabled))
+*  **HLE** (Hardware Lock Elision)
+*  **RTM** (Restricted Transactional Memory)
+*  **RDRAND** (RDRAND instruction is available)
+*  **RDSEED** (RDSEED instruction is available)
+*  **ADX** (Intel ADX (Multi-Precision Add-Carry Instruction Extensions))
+*  **SHA** (Intel SHA Extensions)
+*  **AVX512F** (AVX-512 Foundation)
+*  **AVX512DQ** (AVX-512 Doubleword and Quadword Instructions)
+*  **AVX512IFMA** (AVX-512 Integer Fused Multiply-Add Instructions)
+*  **AVX512PF** (AVX-512 Prefetch Instructions)
+*  **AVX512ER** (AVX-512 Exponential and Reciprocal Instructions)
+*  **AVX512CD** (AVX-512 Conflict Detection Instructions)
+*  **AVX512BW** (AVX-512 Byte and Word Instructions)
+*  **AVX512VL** (AVX-512 Vector Length Extensions)
+*  **AVX512VBMI** (AVX-512 Vector Bit Manipulation Instructions)
+*  **MPX** (Intel MPX (Memory Protection Extensions))
+*  **ERMS** (Enhanced REP MOVSB/STOSB)
+*  **RDTSCP** (RDTSCP Instruction)
+*  **CX16** (CMPXCHG16B Instruction)
+*  **SGX** (Software Guard Extensions, with activation details)
+
+## Performance
+*  **RDTSCP()** Returns current cycle count. Can be used for benchmarking.
+*  **SSE2SLOW** (SSE2 is supported, but usually not faster)
+*  **SSE3SLOW** (SSE3 is supported, but usually not faster)
+*  **ATOM** (Atom processor, some SSSE3 instructions are slower)
+*  **Cache line** (Probable size of a cache line).
+*  **L1, L2, L3 Cache size** on newer Intel/AMD CPUs.
+
+## Cpu Vendor/VM
+* **Intel**
+* **AMD**
+* **VIA**
+* **Transmeta**
+* **NSC**
+* **KVM**  (Kernel-based Virtual Machine)
+* **MSVM** (Microsoft Hyper-V or Windows Virtual PC)
+* **VMware**
+* **XenHVM**
+
+# installing
+
+```go get github.com/klauspost/cpuid```
+
+# example
+
+```Go
+package main
+
+import (
+	"fmt"
+	"github.com/klauspost/cpuid"
+)
+
+func main() {
+	// Print basic CPU information:
+	fmt.Println("Name:", cpuid.CPU.BrandName)
+	fmt.Println("PhysicalCores:", cpuid.CPU.PhysicalCores)
+	fmt.Println("ThreadsPerCore:", cpuid.CPU.ThreadsPerCore)
+	fmt.Println("LogicalCores:", cpuid.CPU.LogicalCores)
+	fmt.Println("Family", cpuid.CPU.Family, "Model:", cpuid.CPU.Model)
+	fmt.Println("Features:", cpuid.CPU.Features)
+	fmt.Println("Cacheline bytes:", cpuid.CPU.CacheLine)
+	fmt.Println("L1 Data Cache:", cpuid.CPU.Cache.L1D, "bytes")
+	fmt.Println("L1 Instruction Cache:", cpuid.CPU.Cache.L1D, "bytes")
+	fmt.Println("L2 Cache:", cpuid.CPU.Cache.L2, "bytes")
+	fmt.Println("L3 Cache:", cpuid.CPU.Cache.L3, "bytes")
+
+	// Test if we have a specific feature:
+	if cpuid.CPU.SSE() {
+		fmt.Println("We have Streaming SIMD Extensions")
+	}
+}
+```
+
+Sample output:
+```
+>go run main.go
+Name: Intel(R) Core(TM) i5-2540M CPU @ 2.60GHz
+PhysicalCores: 2
+ThreadsPerCore: 2
+LogicalCores: 4
+Family 6 Model: 42
+Features: CMOV,MMX,MMXEXT,SSE,SSE2,SSE3,SSSE3,SSE4.1,SSE4.2,AVX,AESNI,CLMUL
+Cacheline bytes: 64
+We have Streaming SIMD Extensions
+```
+
+# private package
+
+In the "private" folder you can find an autogenerated version of the library you can include in your own packages.
+
+For this purpose all exports are removed, and functions and constants are lowercased.
+
+This is not a recommended way of using the library, but provided for convenience, if it is difficult for you to use external packages.
+
+# license
+
+This code is published under an MIT license. See LICENSE file for more information.

+ 1022 - 0
Godeps/_workspace/src/github.com/klauspost/cpuid/cpuid.go

@@ -0,0 +1,1022 @@
+// Copyright (c) 2015 Klaus Post, released under MIT License. See LICENSE file.
+
+// Package cpuid provides information about the CPU running the current program.
+//
+// CPU features are detected on startup, and kept for fast access through the life of the application.
+// Currently x86 / x64 (AMD64) is supported.
+//
+// You can access the CPU information by accessing the shared CPU variable of the cpuid library.
+//
+// Package home: https://github.com/klauspost/cpuid
+package cpuid
+
+import "strings"
+
+// Vendor is a representation of a CPU vendor.
+type Vendor int
+
+const (
+	Other Vendor = iota
+	Intel
+	AMD
+	VIA
+	Transmeta
+	NSC
+	KVM  // Kernel-based Virtual Machine
+	MSVM // Microsoft Hyper-V or Windows Virtual PC
+	VMware
+	XenHVM
+)
+
+const (
+	CMOV        = 1 << iota // i686 CMOV
+	NX                      // NX (No-Execute) bit
+	AMD3DNOW                // AMD 3DNOW
+	AMD3DNOWEXT             // AMD 3DNowExt
+	MMX                     // standard MMX
+	MMXEXT                  // SSE integer functions or AMD MMX ext
+	SSE                     // SSE functions
+	SSE2                    // P4 SSE functions
+	SSE3                    // Prescott SSE3 functions
+	SSSE3                   // Conroe SSSE3 functions
+	SSE4                    // Penryn SSE4.1 functions
+	SSE4A                   // AMD Barcelona microarchitecture SSE4a instructions
+	SSE42                   // Nehalem SSE4.2 functions
+	AVX                     // AVX functions
+	AVX2                    // AVX2 functions
+	FMA3                    // Intel FMA 3
+	FMA4                    // Bulldozer FMA4 functions
+	XOP                     // Bulldozer XOP functions
+	F16C                    // Half-precision floating-point conversion
+	BMI1                    // Bit Manipulation Instruction Set 1
+	BMI2                    // Bit Manipulation Instruction Set 2
+	TBM                     // AMD Trailing Bit Manipulation
+	LZCNT                   // LZCNT instruction
+	POPCNT                  // POPCNT instruction
+	AESNI                   // Advanced Encryption Standard New Instructions
+	CLMUL                   // Carry-less Multiplication
+	HTT                     // Hyperthreading (enabled)
+	HLE                     // Hardware Lock Elision
+	RTM                     // Restricted Transactional Memory
+	RDRAND                  // RDRAND instruction is available
+	RDSEED                  // RDSEED instruction is available
+	ADX                     // Intel ADX (Multi-Precision Add-Carry Instruction Extensions)
+	SHA                     // Intel SHA Extensions
+	AVX512F                 // AVX-512 Foundation
+	AVX512DQ                // AVX-512 Doubleword and Quadword Instructions
+	AVX512IFMA              // AVX-512 Integer Fused Multiply-Add Instructions
+	AVX512PF                // AVX-512 Prefetch Instructions
+	AVX512ER                // AVX-512 Exponential and Reciprocal Instructions
+	AVX512CD                // AVX-512 Conflict Detection Instructions
+	AVX512BW                // AVX-512 Byte and Word Instructions
+	AVX512VL                // AVX-512 Vector Length Extensions
+	AVX512VBMI              // AVX-512 Vector Bit Manipulation Instructions
+	MPX                     // Intel MPX (Memory Protection Extensions)
+	ERMS                    // Enhanced REP MOVSB/STOSB
+	RDTSCP                  // RDTSCP Instruction
+	CX16                    // CMPXCHG16B Instruction
+	SGX                     // Software Guard Extensions
+
+	// Performance indicators
+	SSE2SLOW // SSE2 is supported, but usually not faster
+	SSE3SLOW // SSE3 is supported, but usually not faster
+	ATOM     // Atom processor, some SSSE3 instructions are slower
+)
+
+var flagNames = map[Flags]string{
+	CMOV:        "CMOV",        // i686 CMOV
+	NX:          "NX",          // NX (No-Execute) bit
+	AMD3DNOW:    "AMD3DNOW",    // AMD 3DNOW
+	AMD3DNOWEXT: "AMD3DNOWEXT", // AMD 3DNowExt
+	MMX:         "MMX",         // Standard MMX
+	MMXEXT:      "MMXEXT",      // SSE integer functions or AMD MMX ext
+	SSE:         "SSE",         // SSE functions
+	SSE2:        "SSE2",        // P4 SSE2 functions
+	SSE3:        "SSE3",        // Prescott SSE3 functions
+	SSSE3:       "SSSE3",       // Conroe SSSE3 functions
+	SSE4:        "SSE4.1",      // Penryn SSE4.1 functions
+	SSE4A:       "SSE4A",       // AMD Barcelona microarchitecture SSE4a instructions
+	SSE42:       "SSE4.2",      // Nehalem SSE4.2 functions
+	AVX:         "AVX",         // AVX functions
+	AVX2:        "AVX2",        // AVX functions
+	FMA3:        "FMA3",        // Intel FMA 3
+	FMA4:        "FMA4",        // Bulldozer FMA4 functions
+	XOP:         "XOP",         // Bulldozer XOP functions
+	F16C:        "F16C",        // Half-precision floating-point conversion
+	BMI1:        "BMI1",        // Bit Manipulation Instruction Set 1
+	BMI2:        "BMI2",        // Bit Manipulation Instruction Set 2
+	TBM:         "TBM",         // AMD Trailing Bit Manipulation
+	LZCNT:       "LZCNT",       // LZCNT instruction
+	POPCNT:      "POPCNT",      // POPCNT instruction
+	AESNI:       "AESNI",       // Advanced Encryption Standard New Instructions
+	CLMUL:       "CLMUL",       // Carry-less Multiplication
+	HTT:         "HTT",         // Hyperthreading (enabled)
+	HLE:         "HLE",         // Hardware Lock Elision
+	RTM:         "RTM",         // Restricted Transactional Memory
+	RDRAND:      "RDRAND",      // RDRAND instruction is available
+	RDSEED:      "RDSEED",      // RDSEED instruction is available
+	ADX:         "ADX",         // Intel ADX (Multi-Precision Add-Carry Instruction Extensions)
+	SHA:         "SHA",         // Intel SHA Extensions
+	AVX512F:     "AVX512F",     // AVX-512 Foundation
+	AVX512DQ:    "AVX512DQ",    // AVX-512 Doubleword and Quadword Instructions
+	AVX512IFMA:  "AVX512IFMA",  // AVX-512 Integer Fused Multiply-Add Instructions
+	AVX512PF:    "AVX512PF",    // AVX-512 Prefetch Instructions
+	AVX512ER:    "AVX512ER",    // AVX-512 Exponential and Reciprocal Instructions
+	AVX512CD:    "AVX512CD",    // AVX-512 Conflict Detection Instructions
+	AVX512BW:    "AVX512BW",    // AVX-512 Byte and Word Instructions
+	AVX512VL:    "AVX512VL",    // AVX-512 Vector Length Extensions
+	AVX512VBMI:  "AVX512VBMI",  // AVX-512 Vector Bit Manipulation Instructions
+	MPX:         "MPX",         // Intel MPX (Memory Protection Extensions)
+	ERMS:        "ERMS",        // Enhanced REP MOVSB/STOSB
+	RDTSCP:      "RDTSCP",      // RDTSCP Instruction
+	CX16:        "CX16",        // CMPXCHG16B Instruction
+	SGX:         "SGX",         // Software Guard Extensions
+
+	// Performance indicators
+	SSE2SLOW: "SSE2SLOW", // SSE2 supported, but usually not faster
+	SSE3SLOW: "SSE3SLOW", // SSE3 supported, but usually not faster
+	ATOM:     "ATOM",     // Atom processor, some SSSE3 instructions are slower
+
+}
+
+// CPUInfo contains information about the detected system CPU.
+type CPUInfo struct {
+	BrandName      string // Brand name reported by the CPU
+	VendorID       Vendor // Comparable CPU vendor ID
+	Features       Flags  // Features of the CPU
+	PhysicalCores  int    // Number of physical processor cores in your CPU. Will be 0 if undetectable.
+	ThreadsPerCore int    // Number of threads per physical core. Will be 1 if undetectable.
+	LogicalCores   int    // Number of physical cores times threads that can run on each core through the use of hyperthreading. Will be 0 if undetectable.
+	Family         int    // CPU family number
+	Model          int    // CPU model number
+	CacheLine      int    // Cache line size in bytes. Will be 0 if undetectable.
+	Cache          struct {
+		L1I int // L1 Instruction Cache (per core or shared). Will be -1 if undetected
+		L1D int // L1 Data Cache (per core or shared). Will be -1 if undetected
+		L2  int // L2 Cache (per core or shared). Will be -1 if undetected
+		L3  int // L3 Instruction Cache (per core or shared). Will be -1 if undetected
+	}
+	SGX       SGXSupport
+	maxFunc   uint32
+	maxExFunc uint32
+}
+
+var cpuid func(op uint32) (eax, ebx, ecx, edx uint32)
+var cpuidex func(op, op2 uint32) (eax, ebx, ecx, edx uint32)
+var xgetbv func(index uint32) (eax, edx uint32)
+var rdtscpAsm func() (eax, ebx, ecx, edx uint32)
+
+// CPU contains information about the CPU as detected on startup,
+// or when Detect last was called.
+//
+// Use this as the primary entry point to you data,
+// this way queries are
+var CPU CPUInfo
+
+func init() {
+	initCPU()
+	Detect()
+}
+
+// Detect will re-detect current CPU info.
+// This will replace the content of the exported CPU variable.
+//
+// Unless you expect the CPU to change while you are running your program
+// you should not need to call this function.
+// If you call this, you must ensure that no other goroutine is accessing the
+// exported CPU variable.
+func Detect() {
+	CPU.maxFunc = maxFunctionID()
+	CPU.maxExFunc = maxExtendedFunction()
+	CPU.BrandName = brandName()
+	CPU.CacheLine = cacheLine()
+	CPU.Family, CPU.Model = familyModel()
+	CPU.Features = support()
+	CPU.SGX = sgx(CPU.Features&SGX != 0)
+	CPU.ThreadsPerCore = threadsPerCore()
+	CPU.LogicalCores = logicalCores()
+	CPU.PhysicalCores = physicalCores()
+	CPU.VendorID = vendorID()
+	CPU.cacheSize()
+}
+
+// Generated here: http://play.golang.org/p/BxFH2Gdc0G
+
+// Cmov indicates support of CMOV instructions
+func (c CPUInfo) Cmov() bool {
+	return c.Features&CMOV != 0
+}
+
+// Amd3dnow indicates support of AMD 3DNOW! instructions
+func (c CPUInfo) Amd3dnow() bool {
+	return c.Features&AMD3DNOW != 0
+}
+
+// Amd3dnowExt indicates support of AMD 3DNOW! Extended instructions
+func (c CPUInfo) Amd3dnowExt() bool {
+	return c.Features&AMD3DNOWEXT != 0
+}
+
+// MMX indicates support of MMX instructions
+func (c CPUInfo) MMX() bool {
+	return c.Features&MMX != 0
+}
+
+// MMXExt indicates support of MMXEXT instructions
+// (SSE integer functions or AMD MMX ext)
+func (c CPUInfo) MMXExt() bool {
+	return c.Features&MMXEXT != 0
+}
+
+// SSE indicates support of SSE instructions
+func (c CPUInfo) SSE() bool {
+	return c.Features&SSE != 0
+}
+
+// SSE2 indicates support of SSE 2 instructions
+func (c CPUInfo) SSE2() bool {
+	return c.Features&SSE2 != 0
+}
+
+// SSE3 indicates support of SSE 3 instructions
+func (c CPUInfo) SSE3() bool {
+	return c.Features&SSE3 != 0
+}
+
+// SSSE3 indicates support of SSSE 3 instructions
+func (c CPUInfo) SSSE3() bool {
+	return c.Features&SSSE3 != 0
+}
+
+// SSE4 indicates support of SSE 4 (also called SSE 4.1) instructions
+func (c CPUInfo) SSE4() bool {
+	return c.Features&SSE4 != 0
+}
+
+// SSE42 indicates support of SSE4.2 instructions
+func (c CPUInfo) SSE42() bool {
+	return c.Features&SSE42 != 0
+}
+
+// AVX indicates support of AVX instructions
+// and operating system support of AVX instructions
+func (c CPUInfo) AVX() bool {
+	return c.Features&AVX != 0
+}
+
+// AVX2 indicates support of AVX2 instructions
+func (c CPUInfo) AVX2() bool {
+	return c.Features&AVX2 != 0
+}
+
+// FMA3 indicates support of FMA3 instructions
+func (c CPUInfo) FMA3() bool {
+	return c.Features&FMA3 != 0
+}
+
+// FMA4 indicates support of FMA4 instructions
+func (c CPUInfo) FMA4() bool {
+	return c.Features&FMA4 != 0
+}
+
+// XOP indicates support of XOP instructions
+func (c CPUInfo) XOP() bool {
+	return c.Features&XOP != 0
+}
+
+// F16C indicates support of F16C instructions
+func (c CPUInfo) F16C() bool {
+	return c.Features&F16C != 0
+}
+
+// BMI1 indicates support of BMI1 instructions
+func (c CPUInfo) BMI1() bool {
+	return c.Features&BMI1 != 0
+}
+
+// BMI2 indicates support of BMI2 instructions
+func (c CPUInfo) BMI2() bool {
+	return c.Features&BMI2 != 0
+}
+
+// TBM indicates support of TBM instructions
+// (AMD Trailing Bit Manipulation)
+func (c CPUInfo) TBM() bool {
+	return c.Features&TBM != 0
+}
+
+// Lzcnt indicates support of LZCNT instruction
+func (c CPUInfo) Lzcnt() bool {
+	return c.Features&LZCNT != 0
+}
+
+// Popcnt indicates support of POPCNT instruction
+func (c CPUInfo) Popcnt() bool {
+	return c.Features&POPCNT != 0
+}
+
+// HTT indicates the processor has Hyperthreading enabled
+func (c CPUInfo) HTT() bool {
+	return c.Features&HTT != 0
+}
+
+// SSE2Slow indicates that SSE2 may be slow on this processor
+func (c CPUInfo) SSE2Slow() bool {
+	return c.Features&SSE2SLOW != 0
+}
+
+// SSE3Slow indicates that SSE3 may be slow on this processor
+func (c CPUInfo) SSE3Slow() bool {
+	return c.Features&SSE3SLOW != 0
+}
+
+// AesNi indicates support of AES-NI instructions
+// (Advanced Encryption Standard New Instructions)
+func (c CPUInfo) AesNi() bool {
+	return c.Features&AESNI != 0
+}
+
+// Clmul indicates support of CLMUL instructions
+// (Carry-less Multiplication)
+func (c CPUInfo) Clmul() bool {
+	return c.Features&CLMUL != 0
+}
+
+// NX indicates support of NX (No-Execute) bit
+func (c CPUInfo) NX() bool {
+	return c.Features&NX != 0
+}
+
+// SSE4A indicates support of AMD Barcelona microarchitecture SSE4a instructions
+func (c CPUInfo) SSE4A() bool {
+	return c.Features&SSE4A != 0
+}
+
+// HLE indicates support of Hardware Lock Elision
+func (c CPUInfo) HLE() bool {
+	return c.Features&HLE != 0
+}
+
+// RTM indicates support of Restricted Transactional Memory
+func (c CPUInfo) RTM() bool {
+	return c.Features&RTM != 0
+}
+
+// Rdrand indicates support of RDRAND instruction is available
+func (c CPUInfo) Rdrand() bool {
+	return c.Features&RDRAND != 0
+}
+
+// Rdseed indicates support of RDSEED instruction is available
+func (c CPUInfo) Rdseed() bool {
+	return c.Features&RDSEED != 0
+}
+
+// ADX indicates support of Intel ADX (Multi-Precision Add-Carry Instruction Extensions)
+func (c CPUInfo) ADX() bool {
+	return c.Features&ADX != 0
+}
+
+// SHA indicates support of Intel SHA Extensions
+func (c CPUInfo) SHA() bool {
+	return c.Features&SHA != 0
+}
+
+// AVX512F indicates support of AVX-512 Foundation
+func (c CPUInfo) AVX512F() bool {
+	return c.Features&AVX512F != 0
+}
+
+// AVX512DQ indicates support of AVX-512 Doubleword and Quadword Instructions
+func (c CPUInfo) AVX512DQ() bool {
+	return c.Features&AVX512DQ != 0
+}
+
+// AVX512IFMA indicates support of AVX-512 Integer Fused Multiply-Add Instructions
+func (c CPUInfo) AVX512IFMA() bool {
+	return c.Features&AVX512IFMA != 0
+}
+
+// AVX512PF indicates support of AVX-512 Prefetch Instructions
+func (c CPUInfo) AVX512PF() bool {
+	return c.Features&AVX512PF != 0
+}
+
+// AVX512ER indicates support of AVX-512 Exponential and Reciprocal Instructions
+func (c CPUInfo) AVX512ER() bool {
+	return c.Features&AVX512ER != 0
+}
+
+// AVX512CD indicates support of AVX-512 Conflict Detection Instructions
+func (c CPUInfo) AVX512CD() bool {
+	return c.Features&AVX512CD != 0
+}
+
+// AVX512BW indicates support of AVX-512 Byte and Word Instructions
+func (c CPUInfo) AVX512BW() bool {
+	return c.Features&AVX512BW != 0
+}
+
+// AVX512VL indicates support of AVX-512 Vector Length Extensions
+func (c CPUInfo) AVX512VL() bool {
+	return c.Features&AVX512VL != 0
+}
+
+// AVX512VBMI indicates support of AVX-512 Vector Bit Manipulation Instructions
+func (c CPUInfo) AVX512VBMI() bool {
+	return c.Features&AVX512VBMI != 0
+}
+
+// MPX indicates support of Intel MPX (Memory Protection Extensions)
+func (c CPUInfo) MPX() bool {
+	return c.Features&MPX != 0
+}
+
+// ERMS indicates support of Enhanced REP MOVSB/STOSB
+func (c CPUInfo) ERMS() bool {
+	return c.Features&ERMS != 0
+}
+
+func (c CPUInfo) RDTSCP() bool {
+	return c.Features&RDTSCP != 0
+}
+
+func (c CPUInfo) CX16() bool {
+	return c.Features&CX16 != 0
+}
+
+// Atom indicates an Atom processor
+func (c CPUInfo) Atom() bool {
+	return c.Features&ATOM != 0
+}
+
+// Intel returns true if vendor is recognized as Intel
+func (c CPUInfo) Intel() bool {
+	return c.VendorID == Intel
+}
+
+// AMD returns true if vendor is recognized as AMD
+func (c CPUInfo) AMD() bool {
+	return c.VendorID == AMD
+}
+
+// Transmeta returns true if vendor is recognized as Transmeta
+func (c CPUInfo) Transmeta() bool {
+	return c.VendorID == Transmeta
+}
+
+// NSC returns true if vendor is recognized as National Semiconductor
+func (c CPUInfo) NSC() bool {
+	return c.VendorID == NSC
+}
+
+// VIA returns true if vendor is recognized as VIA
+func (c CPUInfo) VIA() bool {
+	return c.VendorID == VIA
+}
+
+// RTCounter returns the 64-bit time-stamp counter
+// Uses the RDTSCP instruction. The value 0 is returned
+// if the CPU does not support the instruction.
+func (c CPUInfo) RTCounter() uint64 {
+	if !c.RDTSCP() {
+		return 0
+	}
+	a, _, _, d := rdtscpAsm()
+	return uint64(a) | (uint64(d) << 32)
+}
+
+// Ia32TscAux returns the IA32_TSC_AUX part of the RDTSCP.
+// This variable is OS dependent, but on Linux contains information
+// about the current cpu/core the code is running on.
+// If the RDTSCP instruction isn't supported on the CPU, the value 0 is returned.
+func (c CPUInfo) Ia32TscAux() uint32 {
+	if !c.RDTSCP() {
+		return 0
+	}
+	_, _, ecx, _ := rdtscpAsm()
+	return ecx
+}
+
+// LogicalCPU will return the Logical CPU the code is currently executing on.
+// This is likely to change when the OS re-schedules the running thread
+// to another CPU.
+// If the current core cannot be detected, -1 will be returned.
+func (c CPUInfo) LogicalCPU() int {
+	if c.maxFunc < 1 {
+		return -1
+	}
+	_, ebx, _, _ := cpuid(1)
+	return int(ebx >> 24)
+}
+
+// VM Will return true if the cpu id indicates we are in
+// a virtual machine. This is only a hint, and will very likely
+// have many false negatives.
+func (c CPUInfo) VM() bool {
+	switch c.VendorID {
+	case MSVM, KVM, VMware, XenHVM:
+		return true
+	}
+	return false
+}
+
+// Flags contains detected cpu features and caracteristics
+type Flags uint64
+
+// String returns a string representation of the detected
+// CPU features.
+func (f Flags) String() string {
+	return strings.Join(f.Strings(), ",")
+}
+
+// Strings returns and array of the detected features.
+func (f Flags) Strings() []string {
+	s := support()
+	r := make([]string, 0, 20)
+	for i := uint(0); i < 64; i++ {
+		key := Flags(1 << i)
+		val := flagNames[key]
+		if s&key != 0 {
+			r = append(r, val)
+		}
+	}
+	return r
+}
+
+func maxExtendedFunction() uint32 {
+	eax, _, _, _ := cpuid(0x80000000)
+	return eax
+}
+
+func maxFunctionID() uint32 {
+	a, _, _, _ := cpuid(0)
+	return a
+}
+
+func brandName() string {
+	if maxExtendedFunction() >= 0x80000004 {
+		v := make([]uint32, 0, 48)
+		for i := uint32(0); i < 3; i++ {
+			a, b, c, d := cpuid(0x80000002 + i)
+			v = append(v, a, b, c, d)
+		}
+		return strings.Trim(string(valAsString(v...)), " ")
+	}
+	return "unknown"
+}
+
+func threadsPerCore() int {
+	mfi := maxFunctionID()
+	if mfi < 0x4 || vendorID() != Intel {
+		return 1
+	}
+
+	if mfi < 0xb {
+		_, b, _, d := cpuid(1)
+		if (d & (1 << 28)) != 0 {
+			// v will contain logical core count
+			v := (b >> 16) & 255
+			if v > 1 {
+				a4, _, _, _ := cpuid(4)
+				// physical cores
+				v2 := (a4 >> 26) + 1
+				if v2 > 0 {
+					return int(v) / int(v2)
+				}
+			}
+		}
+		return 1
+	}
+	_, b, _, _ := cpuidex(0xb, 0)
+	if b&0xffff == 0 {
+		return 1
+	}
+	return int(b & 0xffff)
+}
+
+func logicalCores() int {
+	mfi := maxFunctionID()
+	switch vendorID() {
+	case Intel:
+		// Use this on old Intel processors
+		if mfi < 0xb {
+			if mfi < 1 {
+				return 0
+			}
+			// CPUID.1:EBX[23:16] represents the maximum number of addressable IDs (initial APIC ID)
+			// that can be assigned to logical processors in a physical package.
+			// The value may not be the same as the number of logical processors that are present in the hardware of a physical package.
+			_, ebx, _, _ := cpuid(1)
+			logical := (ebx >> 16) & 0xff
+			return int(logical)
+		}
+		_, b, _, _ := cpuidex(0xb, 1)
+		return int(b & 0xffff)
+	case AMD:
+		_, b, _, _ := cpuid(1)
+		return int((b >> 16) & 0xff)
+	default:
+		return 0
+	}
+}
+
+func familyModel() (int, int) {
+	if maxFunctionID() < 0x1 {
+		return 0, 0
+	}
+	eax, _, _, _ := cpuid(1)
+	family := ((eax >> 8) & 0xf) + ((eax >> 20) & 0xff)
+	model := ((eax >> 4) & 0xf) + ((eax >> 12) & 0xf0)
+	return int(family), int(model)
+}
+
+func physicalCores() int {
+	switch vendorID() {
+	case Intel:
+		return logicalCores() / threadsPerCore()
+	case AMD:
+		if maxExtendedFunction() >= 0x80000008 {
+			_, _, c, _ := cpuid(0x80000008)
+			return int(c&0xff) + 1
+		}
+	}
+	return 0
+}
+
+// Except from http://en.wikipedia.org/wiki/CPUID#EAX.3D0:_Get_vendor_ID
+var vendorMapping = map[string]Vendor{
+	"AMDisbetter!": AMD,
+	"AuthenticAMD": AMD,
+	"CentaurHauls": VIA,
+	"GenuineIntel": Intel,
+	"TransmetaCPU": Transmeta,
+	"GenuineTMx86": Transmeta,
+	"Geode by NSC": NSC,
+	"VIA VIA VIA ": VIA,
+	"KVMKVMKVMKVM": KVM,
+	"Microsoft Hv": MSVM,
+	"VMwareVMware": VMware,
+	"XenVMMXenVMM": XenHVM,
+}
+
+func vendorID() Vendor {
+	_, b, c, d := cpuid(0)
+	v := valAsString(b, d, c)
+	vend, ok := vendorMapping[string(v)]
+	if !ok {
+		return Other
+	}
+	return vend
+}
+
+func cacheLine() int {
+	if maxFunctionID() < 0x1 {
+		return 0
+	}
+
+	_, ebx, _, _ := cpuid(1)
+	cache := (ebx & 0xff00) >> 5 // cflush size
+	if cache == 0 && maxExtendedFunction() >= 0x80000006 {
+		_, _, ecx, _ := cpuid(0x80000006)
+		cache = ecx & 0xff // cacheline size
+	}
+	// TODO: Read from Cache and TLB Information
+	return int(cache)
+}
+
+func (c *CPUInfo) cacheSize() {
+	c.Cache.L1D = -1
+	c.Cache.L1I = -1
+	c.Cache.L2 = -1
+	c.Cache.L3 = -1
+	vendor := vendorID()
+	switch vendor {
+	case Intel:
+		if maxFunctionID() < 4 {
+			return
+		}
+		for i := uint32(0); ; i++ {
+			eax, ebx, ecx, _ := cpuidex(4, i)
+			cacheType := eax & 15
+			if cacheType == 0 {
+				break
+			}
+			cacheLevel := (eax >> 5) & 7
+			coherency := int(ebx&0xfff) + 1
+			partitions := int((ebx>>12)&0x3ff) + 1
+			associativity := int((ebx>>22)&0x3ff) + 1
+			sets := int(ecx) + 1
+			size := associativity * partitions * coherency * sets
+			switch cacheLevel {
+			case 1:
+				if cacheType == 1 {
+					// 1 = Data Cache
+					c.Cache.L1D = size
+				} else if cacheType == 2 {
+					// 2 = Instruction Cache
+					c.Cache.L1I = size
+				} else {
+					if c.Cache.L1D < 0 {
+						c.Cache.L1I = size
+					}
+					if c.Cache.L1I < 0 {
+						c.Cache.L1I = size
+					}
+				}
+			case 2:
+				c.Cache.L2 = size
+			case 3:
+				c.Cache.L3 = size
+			}
+		}
+	case AMD:
+		// Untested.
+		if maxExtendedFunction() < 0x80000005 {
+			return
+		}
+		_, _, ecx, edx := cpuid(0x80000005)
+		c.Cache.L1D = int(((ecx >> 24) & 0xFF) * 1024)
+		c.Cache.L1I = int(((edx >> 24) & 0xFF) * 1024)
+
+		if maxExtendedFunction() < 0x80000006 {
+			return
+		}
+		_, _, ecx, _ = cpuid(0x80000006)
+		c.Cache.L2 = int(((ecx >> 16) & 0xFFFF) * 1024)
+	}
+
+	return
+}
+
+type SGXSupport struct {
+	Available           bool
+	SGX1Supported       bool
+	SGX2Supported       bool
+	MaxEnclaveSizeNot64 int64
+	MaxEnclaveSize64    int64
+}
+
+func sgx(available bool) (rval SGXSupport) {
+	rval.Available = available
+
+	if !available {
+		return
+	}
+
+	a, _, _, d := cpuidex(0x12, 0)
+	rval.SGX1Supported = a&0x01 != 0
+	rval.SGX2Supported = a&0x02 != 0
+	rval.MaxEnclaveSizeNot64 = 1 << (d & 0xFF)     // pow 2
+	rval.MaxEnclaveSize64 = 1 << ((d >> 8) & 0xFF) // pow 2
+
+	return
+}
+
+func support() Flags {
+	mfi := maxFunctionID()
+	vend := vendorID()
+	if mfi < 0x1 {
+		return 0
+	}
+	rval := uint64(0)
+	_, _, c, d := cpuid(1)
+	if (d & (1 << 15)) != 0 {
+		rval |= CMOV
+	}
+	if (d & (1 << 23)) != 0 {
+		rval |= MMX
+	}
+	if (d & (1 << 25)) != 0 {
+		rval |= MMXEXT
+	}
+	if (d & (1 << 25)) != 0 {
+		rval |= SSE
+	}
+	if (d & (1 << 26)) != 0 {
+		rval |= SSE2
+	}
+	if (c & 1) != 0 {
+		rval |= SSE3
+	}
+	if (c & 0x00000200) != 0 {
+		rval |= SSSE3
+	}
+	if (c & 0x00080000) != 0 {
+		rval |= SSE4
+	}
+	if (c & 0x00100000) != 0 {
+		rval |= SSE42
+	}
+	if (c & (1 << 25)) != 0 {
+		rval |= AESNI
+	}
+	if (c & (1 << 1)) != 0 {
+		rval |= CLMUL
+	}
+	if c&(1<<23) != 0 {
+		rval |= POPCNT
+	}
+	if c&(1<<30) != 0 {
+		rval |= RDRAND
+	}
+	if c&(1<<29) != 0 {
+		rval |= F16C
+	}
+	if c&(1<<13) != 0 {
+		rval |= CX16
+	}
+	if vend == Intel && (d&(1<<28)) != 0 && mfi >= 4 {
+		if threadsPerCore() > 1 {
+			rval |= HTT
+		}
+	}
+
+	// Check XGETBV, OXSAVE and AVX bits
+	if c&(1<<26) != 0 && c&(1<<27) != 0 && c&(1<<28) != 0 {
+		// Check for OS support
+		eax, _ := xgetbv(0)
+		if (eax & 0x6) == 0x6 {
+			rval |= AVX
+			if (c & 0x00001000) != 0 {
+				rval |= FMA3
+			}
+		}
+	}
+
+	// Check AVX2, AVX2 requires OS support, but BMI1/2 don't.
+	if mfi >= 7 {
+		_, ebx, ecx, _ := cpuidex(7, 0)
+		if (rval&AVX) != 0 && (ebx&0x00000020) != 0 {
+			rval |= AVX2
+		}
+		if (ebx & 0x00000008) != 0 {
+			rval |= BMI1
+			if (ebx & 0x00000100) != 0 {
+				rval |= BMI2
+			}
+		}
+		if ebx&(1<<2) != 0 {
+			rval |= SGX
+		}
+		if ebx&(1<<4) != 0 {
+			rval |= HLE
+		}
+		if ebx&(1<<9) != 0 {
+			rval |= ERMS
+		}
+		if ebx&(1<<11) != 0 {
+			rval |= RTM
+		}
+		if ebx&(1<<14) != 0 {
+			rval |= MPX
+		}
+		if ebx&(1<<18) != 0 {
+			rval |= RDSEED
+		}
+		if ebx&(1<<19) != 0 {
+			rval |= ADX
+		}
+		if ebx&(1<<29) != 0 {
+			rval |= SHA
+		}
+
+		// Only detect AVX-512 features if XGETBV is supported
+		if c&((1<<26)|(1<<27)) == (1<<26)|(1<<27) {
+			// Check for OS support
+			eax, _ := xgetbv(0)
+
+			// Verify that XCR0[7:5] = ‘111b’ (OPMASK state, upper 256-bit of ZMM0-ZMM15 and
+			// ZMM16-ZMM31 state are enabled by OS)
+			/// and that XCR0[2:1] = ‘11b’ (XMM state and YMM state are enabled by OS).
+			if (eax>>5)&7 == 7 && (eax>>1)&3 == 3 {
+				if ebx&(1<<16) != 0 {
+					rval |= AVX512F
+				}
+				if ebx&(1<<17) != 0 {
+					rval |= AVX512DQ
+				}
+				if ebx&(1<<21) != 0 {
+					rval |= AVX512IFMA
+				}
+				if ebx&(1<<26) != 0 {
+					rval |= AVX512PF
+				}
+				if ebx&(1<<27) != 0 {
+					rval |= AVX512ER
+				}
+				if ebx&(1<<28) != 0 {
+					rval |= AVX512CD
+				}
+				if ebx&(1<<30) != 0 {
+					rval |= AVX512BW
+				}
+				if ebx&(1<<31) != 0 {
+					rval |= AVX512VL
+				}
+				// ecx
+				if ecx&(1<<1) != 0 {
+					rval |= AVX512VBMI
+				}
+			}
+		}
+	}
+
+	if maxExtendedFunction() >= 0x80000001 {
+		_, _, c, d := cpuid(0x80000001)
+		if (c & (1 << 5)) != 0 {
+			rval |= LZCNT
+			rval |= POPCNT
+		}
+		if (d & (1 << 31)) != 0 {
+			rval |= AMD3DNOW
+		}
+		if (d & (1 << 30)) != 0 {
+			rval |= AMD3DNOWEXT
+		}
+		if (d & (1 << 23)) != 0 {
+			rval |= MMX
+		}
+		if (d & (1 << 22)) != 0 {
+			rval |= MMXEXT
+		}
+		if (c & (1 << 6)) != 0 {
+			rval |= SSE4A
+		}
+		if d&(1<<20) != 0 {
+			rval |= NX
+		}
+		if d&(1<<27) != 0 {
+			rval |= RDTSCP
+		}
+
+		/* Allow for selectively disabling SSE2 functions on AMD processors
+		   with SSE2 support but not SSE4a. This includes Athlon64, some
+		   Opteron, and some Sempron processors. MMX, SSE, or 3DNow! are faster
+		   than SSE2 often enough to utilize this special-case flag.
+		   AV_CPU_FLAG_SSE2 and AV_CPU_FLAG_SSE2SLOW are both set in this case
+		   so that SSE2 is used unless explicitly disabled by checking
+		   AV_CPU_FLAG_SSE2SLOW. */
+		if vendorID() != Intel &&
+			rval&SSE2 != 0 && (c&0x00000040) == 0 {
+			rval |= SSE2SLOW
+		}
+
+		/* XOP and FMA4 use the AVX instruction coding scheme, so they can't be
+		 * used unless the OS has AVX support. */
+		if (rval & AVX) != 0 {
+			if (c & 0x00000800) != 0 {
+				rval |= XOP
+			}
+			if (c & 0x00010000) != 0 {
+				rval |= FMA4
+			}
+		}
+
+		if vendorID() == Intel {
+			family, model := familyModel()
+			if family == 6 && (model == 9 || model == 13 || model == 14) {
+				/* 6/9 (pentium-m "banias"), 6/13 (pentium-m "dothan"), and
+				 * 6/14 (core1 "yonah") theoretically support sse2, but it's
+				 * usually slower than mmx. */
+				if (rval & SSE2) != 0 {
+					rval |= SSE2SLOW
+				}
+				if (rval & SSE3) != 0 {
+					rval |= SSE3SLOW
+				}
+			}
+			/* The Atom processor has SSSE3 support, which is useful in many cases,
+			 * but sometimes the SSSE3 version is slower than the SSE2 equivalent
+			 * on the Atom, but is generally faster on other processors supporting
+			 * SSSE3. This flag allows for selectively disabling certain SSSE3
+			 * functions on the Atom. */
+			if family == 6 && model == 28 {
+				rval |= ATOM
+			}
+		}
+	}
+	return Flags(rval)
+}
+
+func valAsString(values ...uint32) []byte {
+	r := make([]byte, 4*len(values))
+	for i, v := range values {
+		dst := r[i*4:]
+		dst[0] = byte(v & 0xff)
+		dst[1] = byte((v >> 8) & 0xff)
+		dst[2] = byte((v >> 16) & 0xff)
+		dst[3] = byte((v >> 24) & 0xff)
+		switch {
+		case dst[0] == 0:
+			return r[:i*4]
+		case dst[1] == 0:
+			return r[:i*4+1]
+		case dst[2] == 0:
+			return r[:i*4+2]
+		case dst[3] == 0:
+			return r[:i*4+3]
+		}
+	}
+	return r
+}

+ 40 - 0
Godeps/_workspace/src/github.com/klauspost/cpuid/cpuid_386.s

@@ -0,0 +1,40 @@
+// Copyright (c) 2015 Klaus Post, released under MIT License. See LICENSE file.
+
+// func asmCpuid(op uint32) (eax, ebx, ecx, edx uint32)
+TEXT ·asmCpuid(SB), 7, $0
+	XORL CX, CX
+	MOVL op+0(FP), AX
+	CPUID
+	MOVL AX, eax+4(FP)
+	MOVL BX, ebx+8(FP)
+	MOVL CX, ecx+12(FP)
+	MOVL DX, edx+16(FP)
+	RET
+
+// func asmCpuidex(op, op2 uint32) (eax, ebx, ecx, edx uint32)
+TEXT ·asmCpuidex(SB), 7, $0
+	MOVL op+0(FP), AX
+	MOVL op2+4(FP), CX
+	CPUID
+	MOVL AX, eax+8(FP)
+	MOVL BX, ebx+12(FP)
+	MOVL CX, ecx+16(FP)
+	MOVL DX, edx+20(FP)
+	RET
+
+// func xgetbv(index uint32) (eax, edx uint32)
+TEXT ·asmXgetbv(SB), 7, $0
+	MOVL index+0(FP), CX
+	BYTE $0x0f; BYTE $0x01; BYTE $0xd0 // XGETBV
+	MOVL AX, eax+4(FP)
+	MOVL DX, edx+8(FP)
+	RET
+
+// func asmRdtscpAsm() (eax, ebx, ecx, edx uint32)
+TEXT ·asmRdtscpAsm(SB), 7, $0
+	BYTE $0x0F; BYTE $0x01; BYTE $0xF9 // RDTSCP
+	MOVL AX, eax+0(FP)
+	MOVL BX, ebx+4(FP)
+	MOVL CX, ecx+8(FP)
+	MOVL DX, edx+12(FP)
+	RET

+ 40 - 0
Godeps/_workspace/src/github.com/klauspost/cpuid/cpuid_amd64.s

@@ -0,0 +1,40 @@
+// Copyright (c) 2015 Klaus Post, released under MIT License. See LICENSE file.
+
+// func asmCpuid(op uint32) (eax, ebx, ecx, edx uint32)
+TEXT ·asmCpuid(SB), 7, $0
+	XORQ CX, CX
+	MOVL op+0(FP), AX
+	CPUID
+	MOVL AX, eax+8(FP)
+	MOVL BX, ebx+12(FP)
+	MOVL CX, ecx+16(FP)
+	MOVL DX, edx+20(FP)
+	RET
+
+// func asmCpuidex(op, op2 uint32) (eax, ebx, ecx, edx uint32)
+TEXT ·asmCpuidex(SB), 7, $0
+	MOVL op+0(FP), AX
+	MOVL op2+4(FP), CX
+	CPUID
+	MOVL AX, eax+8(FP)
+	MOVL BX, ebx+12(FP)
+	MOVL CX, ecx+16(FP)
+	MOVL DX, edx+20(FP)
+	RET
+
+// func asmXgetbv(index uint32) (eax, edx uint32)
+TEXT ·asmXgetbv(SB), 7, $0
+	MOVL index+0(FP), CX
+	BYTE $0x0f; BYTE $0x01; BYTE $0xd0 // XGETBV
+	MOVL AX, eax+8(FP)
+	MOVL DX, edx+12(FP)
+	RET
+
+// func asmRdtscpAsm() (eax, ebx, ecx, edx uint32)
+TEXT ·asmRdtscpAsm(SB), 7, $0
+	BYTE $0x0F; BYTE $0x01; BYTE $0xF9 // RDTSCP
+	MOVL AX, eax+0(FP)
+	MOVL BX, ebx+4(FP)
+	MOVL CX, ecx+8(FP)
+	MOVL DX, edx+12(FP)
+	RET

+ 17 - 0
Godeps/_workspace/src/github.com/klauspost/cpuid/detect_intel.go

@@ -0,0 +1,17 @@
+// Copyright (c) 2015 Klaus Post, released under MIT License. See LICENSE file.
+
+// +build 386 amd64
+
+package cpuid
+
+func asmCpuid(op uint32) (eax, ebx, ecx, edx uint32)
+func asmCpuidex(op, op2 uint32) (eax, ebx, ecx, edx uint32)
+func asmXgetbv(index uint32) (eax, edx uint32)
+func asmRdtscpAsm() (eax, ebx, ecx, edx uint32)
+
+func initCPU() {
+	cpuid = asmCpuid
+	cpuidex = asmCpuidex
+	xgetbv = asmXgetbv
+	rdtscpAsm = asmRdtscpAsm
+}

+ 23 - 0
Godeps/_workspace/src/github.com/klauspost/cpuid/detect_ref.go

@@ -0,0 +1,23 @@
+// Copyright (c) 2015 Klaus Post, released under MIT License. See LICENSE file.
+
+// +build !amd64,!386
+
+package cpuid
+
+func initCPU() {
+	cpuid = func(op uint32) (eax, ebx, ecx, edx uint32) {
+		return 0, 0, 0, 0
+	}
+
+	cpuidex = func(op, op2 uint32) (eax, ebx, ecx, edx uint32) {
+		return 0, 0, 0, 0
+	}
+
+	xgetbv = func(index uint32) (eax, edx uint32) {
+		return 0, 0
+	}
+
+	rdtscpAsm = func() (eax, ebx, ecx, edx uint32) {
+		return 0, 0, 0, 0
+	}
+}

+ 3 - 0
Godeps/_workspace/src/github.com/klauspost/cpuid/generate.go

@@ -0,0 +1,3 @@
+package cpuid
+
+//go:generate go run private-gen.go

+ 476 - 0
Godeps/_workspace/src/github.com/klauspost/cpuid/private-gen.go

@@ -0,0 +1,476 @@
+// +build ignore
+
+package main
+
+import (
+	"bytes"
+	"fmt"
+	"go/ast"
+	"go/parser"
+	"go/printer"
+	"go/token"
+	"io"
+	"io/ioutil"
+	"log"
+	"os"
+	"reflect"
+	"strings"
+	"unicode"
+	"unicode/utf8"
+)
+
+var inFiles = []string{"cpuid.go", "cpuid_test.go"}
+var copyFiles = []string{"cpuid_amd64.s", "cpuid_386.s", "detect_ref.go", "detect_intel.go"}
+var fileSet = token.NewFileSet()
+var reWrites = []rewrite{
+	initRewrite("CPUInfo -> cpuInfo"),
+	initRewrite("Vendor -> vendor"),
+	initRewrite("Flags -> flags"),
+	initRewrite("Detect -> detect"),
+	initRewrite("CPU -> cpu"),
+}
+var excludeNames = map[string]bool{"string": true, "join": true, "trim": true,
+	// cpuid_test.go
+	"t": true, "println": true, "logf": true, "log": true, "fatalf": true, "fatal": true,
+}
+
+var excludePrefixes = []string{"test", "benchmark"}
+
+func main() {
+	Package := "private"
+	parserMode := parser.ParseComments
+	exported := make(map[string]rewrite)
+	for _, file := range inFiles {
+		in, err := os.Open(file)
+		if err != nil {
+			log.Fatalf("opening input", err)
+		}
+
+		src, err := ioutil.ReadAll(in)
+		if err != nil {
+			log.Fatalf("reading input", err)
+		}
+
+		astfile, err := parser.ParseFile(fileSet, file, src, parserMode)
+		if err != nil {
+			log.Fatalf("parsing input", err)
+		}
+
+		for _, rw := range reWrites {
+			astfile = rw(astfile)
+		}
+
+		// Inspect the AST and print all identifiers and literals.
+		var startDecl token.Pos
+		var endDecl token.Pos
+		ast.Inspect(astfile, func(n ast.Node) bool {
+			var s string
+			switch x := n.(type) {
+			case *ast.Ident:
+				if x.IsExported() {
+					t := strings.ToLower(x.Name)
+					for _, pre := range excludePrefixes {
+						if strings.HasPrefix(t, pre) {
+							return true
+						}
+					}
+					if excludeNames[t] != true {
+						//if x.Pos() > startDecl && x.Pos() < endDecl {
+						exported[x.Name] = initRewrite(x.Name + " -> " + t)
+					}
+				}
+
+			case *ast.GenDecl:
+				if x.Tok == token.CONST && x.Lparen > 0 {
+					startDecl = x.Lparen
+					endDecl = x.Rparen
+					// fmt.Printf("Decl:%s -> %s\n", fileSet.Position(startDecl), fileSet.Position(endDecl))
+				}
+			}
+			if s != "" {
+				fmt.Printf("%s:\t%s\n", fileSet.Position(n.Pos()), s)
+			}
+			return true
+		})
+
+		for _, rw := range exported {
+			astfile = rw(astfile)
+		}
+
+		var buf bytes.Buffer
+
+		printer.Fprint(&buf, fileSet, astfile)
+
+		// Remove package documentation and insert information
+		s := buf.String()
+		ind := strings.Index(buf.String(), "\npackage cpuid")
+		s = s[ind:]
+		s = "// Generated, DO NOT EDIT,\n" +
+			"// but copy it to your own project and rename the package.\n" +
+			"// See more at http://github.com/klauspost/cpuid\n" +
+			s
+
+		outputName := Package + string(os.PathSeparator) + file
+
+		err = ioutil.WriteFile(outputName, []byte(s), 0644)
+		if err != nil {
+			log.Fatalf("writing output: %s", err)
+		}
+		log.Println("Generated", outputName)
+	}
+
+	for _, file := range copyFiles {
+		dst := ""
+		if strings.HasPrefix(file, "cpuid") {
+			dst = Package + string(os.PathSeparator) + file
+		} else {
+			dst = Package + string(os.PathSeparator) + "cpuid_" + file
+		}
+		err := copyFile(file, dst)
+		if err != nil {
+			log.Fatalf("copying file: %s", err)
+		}
+		log.Println("Copied", dst)
+	}
+}
+
+// CopyFile copies a file from src to dst. If src and dst files exist, and are
+// the same, then return success. Copy the file contents from src to dst.
+func copyFile(src, dst string) (err error) {
+	sfi, err := os.Stat(src)
+	if err != nil {
+		return
+	}
+	if !sfi.Mode().IsRegular() {
+		// cannot copy non-regular files (e.g., directories,
+		// symlinks, devices, etc.)
+		return fmt.Errorf("CopyFile: non-regular source file %s (%q)", sfi.Name(), sfi.Mode().String())
+	}
+	dfi, err := os.Stat(dst)
+	if err != nil {
+		if !os.IsNotExist(err) {
+			return
+		}
+	} else {
+		if !(dfi.Mode().IsRegular()) {
+			return fmt.Errorf("CopyFile: non-regular destination file %s (%q)", dfi.Name(), dfi.Mode().String())
+		}
+		if os.SameFile(sfi, dfi) {
+			return
+		}
+	}
+	err = copyFileContents(src, dst)
+	return
+}
+
+// copyFileContents copies the contents of the file named src to the file named
+// by dst. The file will be created if it does not already exist. If the
+// destination file exists, all it's contents will be replaced by the contents
+// of the source file.
+func copyFileContents(src, dst string) (err error) {
+	in, err := os.Open(src)
+	if err != nil {
+		return
+	}
+	defer in.Close()
+	out, err := os.Create(dst)
+	if err != nil {
+		return
+	}
+	defer func() {
+		cerr := out.Close()
+		if err == nil {
+			err = cerr
+		}
+	}()
+	if _, err = io.Copy(out, in); err != nil {
+		return
+	}
+	err = out.Sync()
+	return
+}
+
+type rewrite func(*ast.File) *ast.File
+
+// Mostly copied from gofmt
+func initRewrite(rewriteRule string) rewrite {
+	f := strings.Split(rewriteRule, "->")
+	if len(f) != 2 {
+		fmt.Fprintf(os.Stderr, "rewrite rule must be of the form 'pattern -> replacement'\n")
+		os.Exit(2)
+	}
+	pattern := parseExpr(f[0], "pattern")
+	replace := parseExpr(f[1], "replacement")
+	return func(p *ast.File) *ast.File { return rewriteFile(pattern, replace, p) }
+}
+
+// parseExpr parses s as an expression.
+// It might make sense to expand this to allow statement patterns,
+// but there are problems with preserving formatting and also
+// with what a wildcard for a statement looks like.
+func parseExpr(s, what string) ast.Expr {
+	x, err := parser.ParseExpr(s)
+	if err != nil {
+		fmt.Fprintf(os.Stderr, "parsing %s %s at %s\n", what, s, err)
+		os.Exit(2)
+	}
+	return x
+}
+
+// Keep this function for debugging.
+/*
+func dump(msg string, val reflect.Value) {
+	fmt.Printf("%s:\n", msg)
+	ast.Print(fileSet, val.Interface())
+	fmt.Println()
+}
+*/
+
+// rewriteFile applies the rewrite rule 'pattern -> replace' to an entire file.
+func rewriteFile(pattern, replace ast.Expr, p *ast.File) *ast.File {
+	cmap := ast.NewCommentMap(fileSet, p, p.Comments)
+	m := make(map[string]reflect.Value)
+	pat := reflect.ValueOf(pattern)
+	repl := reflect.ValueOf(replace)
+
+	var rewriteVal func(val reflect.Value) reflect.Value
+	rewriteVal = func(val reflect.Value) reflect.Value {
+		// don't bother if val is invalid to start with
+		if !val.IsValid() {
+			return reflect.Value{}
+		}
+		for k := range m {
+			delete(m, k)
+		}
+		val = apply(rewriteVal, val)
+		if match(m, pat, val) {
+			val = subst(m, repl, reflect.ValueOf(val.Interface().(ast.Node).Pos()))
+		}
+		return val
+	}
+
+	r := apply(rewriteVal, reflect.ValueOf(p)).Interface().(*ast.File)
+	r.Comments = cmap.Filter(r).Comments() // recreate comments list
+	return r
+}
+
+// set is a wrapper for x.Set(y); it protects the caller from panics if x cannot be changed to y.
+func set(x, y reflect.Value) {
+	// don't bother if x cannot be set or y is invalid
+	if !x.CanSet() || !y.IsValid() {
+		return
+	}
+	defer func() {
+		if x := recover(); x != nil {
+			if s, ok := x.(string); ok &&
+				(strings.Contains(s, "type mismatch") || strings.Contains(s, "not assignable")) {
+				// x cannot be set to y - ignore this rewrite
+				return
+			}
+			panic(x)
+		}
+	}()
+	x.Set(y)
+}
+
+// Values/types for special cases.
+var (
+	objectPtrNil = reflect.ValueOf((*ast.Object)(nil))
+	scopePtrNil  = reflect.ValueOf((*ast.Scope)(nil))
+
+	identType     = reflect.TypeOf((*ast.Ident)(nil))
+	objectPtrType = reflect.TypeOf((*ast.Object)(nil))
+	positionType  = reflect.TypeOf(token.NoPos)
+	callExprType  = reflect.TypeOf((*ast.CallExpr)(nil))
+	scopePtrType  = reflect.TypeOf((*ast.Scope)(nil))
+)
+
+// apply replaces each AST field x in val with f(x), returning val.
+// To avoid extra conversions, f operates on the reflect.Value form.
+func apply(f func(reflect.Value) reflect.Value, val reflect.Value) reflect.Value {
+	if !val.IsValid() {
+		return reflect.Value{}
+	}
+
+	// *ast.Objects introduce cycles and are likely incorrect after
+	// rewrite; don't follow them but replace with nil instead
+	if val.Type() == objectPtrType {
+		return objectPtrNil
+	}
+
+	// similarly for scopes: they are likely incorrect after a rewrite;
+	// replace them with nil
+	if val.Type() == scopePtrType {
+		return scopePtrNil
+	}
+
+	switch v := reflect.Indirect(val); v.Kind() {
+	case reflect.Slice:
+		for i := 0; i < v.Len(); i++ {
+			e := v.Index(i)
+			set(e, f(e))
+		}
+	case reflect.Struct:
+		for i := 0; i < v.NumField(); i++ {
+			e := v.Field(i)
+			set(e, f(e))
+		}
+	case reflect.Interface:
+		e := v.Elem()
+		set(v, f(e))
+	}
+	return val
+}
+
+func isWildcard(s string) bool {
+	rune, size := utf8.DecodeRuneInString(s)
+	return size == len(s) && unicode.IsLower(rune)
+}
+
+// match returns true if pattern matches val,
+// recording wildcard submatches in m.
+// If m == nil, match checks whether pattern == val.
+func match(m map[string]reflect.Value, pattern, val reflect.Value) bool {
+	// Wildcard matches any expression.  If it appears multiple
+	// times in the pattern, it must match the same expression
+	// each time.
+	if m != nil && pattern.IsValid() && pattern.Type() == identType {
+		name := pattern.Interface().(*ast.Ident).Name
+		if isWildcard(name) && val.IsValid() {
+			// wildcards only match valid (non-nil) expressions.
+			if _, ok := val.Interface().(ast.Expr); ok && !val.IsNil() {
+				if old, ok := m[name]; ok {
+					return match(nil, old, val)
+				}
+				m[name] = val
+				return true
+			}
+		}
+	}
+
+	// Otherwise, pattern and val must match recursively.
+	if !pattern.IsValid() || !val.IsValid() {
+		return !pattern.IsValid() && !val.IsValid()
+	}
+	if pattern.Type() != val.Type() {
+		return false
+	}
+
+	// Special cases.
+	switch pattern.Type() {
+	case identType:
+		// For identifiers, only the names need to match
+		// (and none of the other *ast.Object information).
+		// This is a common case, handle it all here instead
+		// of recursing down any further via reflection.
+		p := pattern.Interface().(*ast.Ident)
+		v := val.Interface().(*ast.Ident)
+		return p == nil && v == nil || p != nil && v != nil && p.Name == v.Name
+	case objectPtrType, positionType:
+		// object pointers and token positions always match
+		return true
+	case callExprType:
+		// For calls, the Ellipsis fields (token.Position) must
+		// match since that is how f(x) and f(x...) are different.
+		// Check them here but fall through for the remaining fields.
+		p := pattern.Interface().(*ast.CallExpr)
+		v := val.Interface().(*ast.CallExpr)
+		if p.Ellipsis.IsValid() != v.Ellipsis.IsValid() {
+			return false
+		}
+	}
+
+	p := reflect.Indirect(pattern)
+	v := reflect.Indirect(val)
+	if !p.IsValid() || !v.IsValid() {
+		return !p.IsValid() && !v.IsValid()
+	}
+
+	switch p.Kind() {
+	case reflect.Slice:
+		if p.Len() != v.Len() {
+			return false
+		}
+		for i := 0; i < p.Len(); i++ {
+			if !match(m, p.Index(i), v.Index(i)) {
+				return false
+			}
+		}
+		return true
+
+	case reflect.Struct:
+		for i := 0; i < p.NumField(); i++ {
+			if !match(m, p.Field(i), v.Field(i)) {
+				return false
+			}
+		}
+		return true
+
+	case reflect.Interface:
+		return match(m, p.Elem(), v.Elem())
+	}
+
+	// Handle token integers, etc.
+	return p.Interface() == v.Interface()
+}
+
+// subst returns a copy of pattern with values from m substituted in place
+// of wildcards and pos used as the position of tokens from the pattern.
+// if m == nil, subst returns a copy of pattern and doesn't change the line
+// number information.
+func subst(m map[string]reflect.Value, pattern reflect.Value, pos reflect.Value) reflect.Value {
+	if !pattern.IsValid() {
+		return reflect.Value{}
+	}
+
+	// Wildcard gets replaced with map value.
+	if m != nil && pattern.Type() == identType {
+		name := pattern.Interface().(*ast.Ident).Name
+		if isWildcard(name) {
+			if old, ok := m[name]; ok {
+				return subst(nil, old, reflect.Value{})
+			}
+		}
+	}
+
+	if pos.IsValid() && pattern.Type() == positionType {
+		// use new position only if old position was valid in the first place
+		if old := pattern.Interface().(token.Pos); !old.IsValid() {
+			return pattern
+		}
+		return pos
+	}
+
+	// Otherwise copy.
+	switch p := pattern; p.Kind() {
+	case reflect.Slice:
+		v := reflect.MakeSlice(p.Type(), p.Len(), p.Len())
+		for i := 0; i < p.Len(); i++ {
+			v.Index(i).Set(subst(m, p.Index(i), pos))
+		}
+		return v
+
+	case reflect.Struct:
+		v := reflect.New(p.Type()).Elem()
+		for i := 0; i < p.NumField(); i++ {
+			v.Field(i).Set(subst(m, p.Field(i), pos))
+		}
+		return v
+
+	case reflect.Ptr:
+		v := reflect.New(p.Type()).Elem()
+		if elem := p.Elem(); elem.IsValid() {
+			v.Set(subst(m, elem, pos).Addr())
+		}
+		return v
+
+	case reflect.Interface:
+		v := reflect.New(p.Type()).Elem()
+		if elem := p.Elem(); elem.IsValid() {
+			v.Set(subst(m, elem, pos))
+		}
+		return v
+	}
+
+	return pattern
+}

+ 6 - 0
Godeps/_workspace/src/github.com/klauspost/cpuid/private/README.md

@@ -0,0 +1,6 @@
+# cpuid private
+
+This is a specially converted of the cpuid package, so it can be included in
+a package without exporting anything.
+
+Package home: https://github.com/klauspost/cpuid

+ 987 - 0
Godeps/_workspace/src/github.com/klauspost/cpuid/private/cpuid.go

@@ -0,0 +1,987 @@
+// Generated, DO NOT EDIT,
+// but copy it to your own project and rename the package.
+// See more at http://github.com/klauspost/cpuid
+
+package cpuid
+
+import (
+	"strings"
+)
+
+// Vendor is a representation of a CPU vendor.
+type vendor int
+
+const (
+	other	vendor	= iota
+	intel
+	amd
+	via
+	transmeta
+	nsc
+	kvm	// Kernel-based Virtual Machine
+	msvm	// Microsoft Hyper-V or Windows Virtual PC
+	vmware
+	xenhvm
+)
+
+const (
+	cmov		= 1 << iota	// i686 CMOV
+	nx				// NX (No-Execute) bit
+	amd3dnow			// AMD 3DNOW
+	amd3dnowext			// AMD 3DNowExt
+	mmx				// standard MMX
+	mmxext				// SSE integer functions or AMD MMX ext
+	sse				// SSE functions
+	sse2				// P4 SSE functions
+	sse3				// Prescott SSE3 functions
+	ssse3				// Conroe SSSE3 functions
+	sse4				// Penryn SSE4.1 functions
+	sse4a				// AMD Barcelona microarchitecture SSE4a instructions
+	sse42				// Nehalem SSE4.2 functions
+	avx				// AVX functions
+	avx2				// AVX2 functions
+	fma3				// Intel FMA 3
+	fma4				// Bulldozer FMA4 functions
+	xop				// Bulldozer XOP functions
+	f16c				// Half-precision floating-point conversion
+	bmi1				// Bit Manipulation Instruction Set 1
+	bmi2				// Bit Manipulation Instruction Set 2
+	tbm				// AMD Trailing Bit Manipulation
+	lzcnt				// LZCNT instruction
+	popcnt				// POPCNT instruction
+	aesni				// Advanced Encryption Standard New Instructions
+	clmul				// Carry-less Multiplication
+	htt				// Hyperthreading (enabled)
+	hle				// Hardware Lock Elision
+	rtm				// Restricted Transactional Memory
+	rdrand				// RDRAND instruction is available
+	rdseed				// RDSEED instruction is available
+	adx				// Intel ADX (Multi-Precision Add-Carry Instruction Extensions)
+	sha				// Intel SHA Extensions
+	avx512f				// AVX-512 Foundation
+	avx512dq			// AVX-512 Doubleword and Quadword Instructions
+	avx512ifma			// AVX-512 Integer Fused Multiply-Add Instructions
+	avx512pf			// AVX-512 Prefetch Instructions
+	avx512er			// AVX-512 Exponential and Reciprocal Instructions
+	avx512cd			// AVX-512 Conflict Detection Instructions
+	avx512bw			// AVX-512 Byte and Word Instructions
+	avx512vl			// AVX-512 Vector Length Extensions
+	avx512vbmi			// AVX-512 Vector Bit Manipulation Instructions
+	mpx				// Intel MPX (Memory Protection Extensions)
+	erms				// Enhanced REP MOVSB/STOSB
+	rdtscp				// RDTSCP Instruction
+	cx16				// CMPXCHG16B Instruction
+
+	// Performance indicators
+	sse2slow	// SSE2 is supported, but usually not faster
+	sse3slow	// SSE3 is supported, but usually not faster
+	atom		// Atom processor, some SSSE3 instructions are slower
+)
+
+var flagNames = map[flags]string{
+	cmov:		"CMOV",		// i686 CMOV
+	nx:		"NX",		// NX (No-Execute) bit
+	amd3dnow:	"AMD3DNOW",	// AMD 3DNOW
+	amd3dnowext:	"AMD3DNOWEXT",	// AMD 3DNowExt
+	mmx:		"MMX",		// Standard MMX
+	mmxext:		"MMXEXT",	// SSE integer functions or AMD MMX ext
+	sse:		"SSE",		// SSE functions
+	sse2:		"SSE2",		// P4 SSE2 functions
+	sse3:		"SSE3",		// Prescott SSE3 functions
+	ssse3:		"SSSE3",	// Conroe SSSE3 functions
+	sse4:		"SSE4.1",	// Penryn SSE4.1 functions
+	sse4a:		"SSE4A",	// AMD Barcelona microarchitecture SSE4a instructions
+	sse42:		"SSE4.2",	// Nehalem SSE4.2 functions
+	avx:		"AVX",		// AVX functions
+	avx2:		"AVX2",		// AVX functions
+	fma3:		"FMA3",		// Intel FMA 3
+	fma4:		"FMA4",		// Bulldozer FMA4 functions
+	xop:		"XOP",		// Bulldozer XOP functions
+	f16c:		"F16C",		// Half-precision floating-point conversion
+	bmi1:		"BMI1",		// Bit Manipulation Instruction Set 1
+	bmi2:		"BMI2",		// Bit Manipulation Instruction Set 2
+	tbm:		"TBM",		// AMD Trailing Bit Manipulation
+	lzcnt:		"LZCNT",	// LZCNT instruction
+	popcnt:		"POPCNT",	// POPCNT instruction
+	aesni:		"AESNI",	// Advanced Encryption Standard New Instructions
+	clmul:		"CLMUL",	// Carry-less Multiplication
+	htt:		"HTT",		// Hyperthreading (enabled)
+	hle:		"HLE",		// Hardware Lock Elision
+	rtm:		"RTM",		// Restricted Transactional Memory
+	rdrand:		"RDRAND",	// RDRAND instruction is available
+	rdseed:		"RDSEED",	// RDSEED instruction is available
+	adx:		"ADX",		// Intel ADX (Multi-Precision Add-Carry Instruction Extensions)
+	sha:		"SHA",		// Intel SHA Extensions
+	avx512f:	"AVX512F",	// AVX-512 Foundation
+	avx512dq:	"AVX512DQ",	// AVX-512 Doubleword and Quadword Instructions
+	avx512ifma:	"AVX512IFMA",	// AVX-512 Integer Fused Multiply-Add Instructions
+	avx512pf:	"AVX512PF",	// AVX-512 Prefetch Instructions
+	avx512er:	"AVX512ER",	// AVX-512 Exponential and Reciprocal Instructions
+	avx512cd:	"AVX512CD",	// AVX-512 Conflict Detection Instructions
+	avx512bw:	"AVX512BW",	// AVX-512 Byte and Word Instructions
+	avx512vl:	"AVX512VL",	// AVX-512 Vector Length Extensions
+	avx512vbmi:	"AVX512VBMI",	// AVX-512 Vector Bit Manipulation Instructions
+	mpx:		"MPX",		// Intel MPX (Memory Protection Extensions)
+	erms:		"ERMS",		// Enhanced REP MOVSB/STOSB
+	rdtscp:		"RDTSCP",	// RDTSCP Instruction
+	cx16:		"CX16",		// CMPXCHG16B Instruction
+
+	// Performance indicators
+	sse2slow:	"SSE2SLOW",	// SSE2 supported, but usually not faster
+	sse3slow:	"SSE3SLOW",	// SSE3 supported, but usually not faster
+	atom:		"ATOM",		// Atom processor, some SSSE3 instructions are slower
+
+}
+
+// CPUInfo contains information about the detected system CPU.
+type cpuInfo struct {
+	brandname	string	// Brand name reported by the CPU
+	vendorid	vendor	// Comparable CPU vendor ID
+	features	flags	// Features of the CPU
+	physicalcores	int	// Number of physical processor cores in your CPU. Will be 0 if undetectable.
+	threadspercore	int	// Number of threads per physical core. Will be 1 if undetectable.
+	logicalcores	int	// Number of physical cores times threads that can run on each core through the use of hyperthreading. Will be 0 if undetectable.
+	family		int	// CPU family number
+	model		int	// CPU model number
+	cacheline	int	// Cache line size in bytes. Will be 0 if undetectable.
+	cache		struct {
+		l1i	int	// L1 Instruction Cache (per core or shared). Will be -1 if undetected
+		l1d	int	// L1 Data Cache (per core or shared). Will be -1 if undetected
+		l2	int	// L2 Cache (per core or shared). Will be -1 if undetected
+		l3	int	// L3 Instruction Cache (per core or shared). Will be -1 if undetected
+	}
+	maxFunc		uint32
+	maxExFunc	uint32
+}
+
+var cpuid func(op uint32) (eax, ebx, ecx, edx uint32)
+var cpuidex func(op, op2 uint32) (eax, ebx, ecx, edx uint32)
+var xgetbv func(index uint32) (eax, edx uint32)
+var rdtscpAsm func() (eax, ebx, ecx, edx uint32)
+
+// CPU contains information about the CPU as detected on startup,
+// or when Detect last was called.
+//
+// Use this as the primary entry point to you data,
+// this way queries are
+var cpu cpuInfo
+
+func init() {
+	initCPU()
+	detect()
+}
+
+// Detect will re-detect current CPU info.
+// This will replace the content of the exported CPU variable.
+//
+// Unless you expect the CPU to change while you are running your program
+// you should not need to call this function.
+// If you call this, you must ensure that no other goroutine is accessing the
+// exported CPU variable.
+func detect() {
+	cpu.maxFunc = maxFunctionID()
+	cpu.maxExFunc = maxExtendedFunction()
+	cpu.brandname = brandName()
+	cpu.cacheline = cacheLine()
+	cpu.family, cpu.model = familyModel()
+	cpu.features = support()
+	cpu.threadspercore = threadsPerCore()
+	cpu.logicalcores = logicalCores()
+	cpu.physicalcores = physicalCores()
+	cpu.vendorid = vendorID()
+	cpu.cacheSize()
+}
+
+// Generated here: http://play.golang.org/p/BxFH2Gdc0G
+
+// Cmov indicates support of CMOV instructions
+func (c cpuInfo) cmov() bool {
+	return c.features&cmov != 0
+}
+
+// Amd3dnow indicates support of AMD 3DNOW! instructions
+func (c cpuInfo) amd3dnow() bool {
+	return c.features&amd3dnow != 0
+}
+
+// Amd3dnowExt indicates support of AMD 3DNOW! Extended instructions
+func (c cpuInfo) amd3dnowext() bool {
+	return c.features&amd3dnowext != 0
+}
+
+// MMX indicates support of MMX instructions
+func (c cpuInfo) mmx() bool {
+	return c.features&mmx != 0
+}
+
+// MMXExt indicates support of MMXEXT instructions
+// (SSE integer functions or AMD MMX ext)
+func (c cpuInfo) mmxext() bool {
+	return c.features&mmxext != 0
+}
+
+// SSE indicates support of SSE instructions
+func (c cpuInfo) sse() bool {
+	return c.features&sse != 0
+}
+
+// SSE2 indicates support of SSE 2 instructions
+func (c cpuInfo) sse2() bool {
+	return c.features&sse2 != 0
+}
+
+// SSE3 indicates support of SSE 3 instructions
+func (c cpuInfo) sse3() bool {
+	return c.features&sse3 != 0
+}
+
+// SSSE3 indicates support of SSSE 3 instructions
+func (c cpuInfo) ssse3() bool {
+	return c.features&ssse3 != 0
+}
+
+// SSE4 indicates support of SSE 4 (also called SSE 4.1) instructions
+func (c cpuInfo) sse4() bool {
+	return c.features&sse4 != 0
+}
+
+// SSE42 indicates support of SSE4.2 instructions
+func (c cpuInfo) sse42() bool {
+	return c.features&sse42 != 0
+}
+
+// AVX indicates support of AVX instructions
+// and operating system support of AVX instructions
+func (c cpuInfo) avx() bool {
+	return c.features&avx != 0
+}
+
+// AVX2 indicates support of AVX2 instructions
+func (c cpuInfo) avx2() bool {
+	return c.features&avx2 != 0
+}
+
+// FMA3 indicates support of FMA3 instructions
+func (c cpuInfo) fma3() bool {
+	return c.features&fma3 != 0
+}
+
+// FMA4 indicates support of FMA4 instructions
+func (c cpuInfo) fma4() bool {
+	return c.features&fma4 != 0
+}
+
+// XOP indicates support of XOP instructions
+func (c cpuInfo) xop() bool {
+	return c.features&xop != 0
+}
+
+// F16C indicates support of F16C instructions
+func (c cpuInfo) f16c() bool {
+	return c.features&f16c != 0
+}
+
+// BMI1 indicates support of BMI1 instructions
+func (c cpuInfo) bmi1() bool {
+	return c.features&bmi1 != 0
+}
+
+// BMI2 indicates support of BMI2 instructions
+func (c cpuInfo) bmi2() bool {
+	return c.features&bmi2 != 0
+}
+
+// TBM indicates support of TBM instructions
+// (AMD Trailing Bit Manipulation)
+func (c cpuInfo) tbm() bool {
+	return c.features&tbm != 0
+}
+
+// Lzcnt indicates support of LZCNT instruction
+func (c cpuInfo) lzcnt() bool {
+	return c.features&lzcnt != 0
+}
+
+// Popcnt indicates support of POPCNT instruction
+func (c cpuInfo) popcnt() bool {
+	return c.features&popcnt != 0
+}
+
+// HTT indicates the processor has Hyperthreading enabled
+func (c cpuInfo) htt() bool {
+	return c.features&htt != 0
+}
+
+// SSE2Slow indicates that SSE2 may be slow on this processor
+func (c cpuInfo) sse2slow() bool {
+	return c.features&sse2slow != 0
+}
+
+// SSE3Slow indicates that SSE3 may be slow on this processor
+func (c cpuInfo) sse3slow() bool {
+	return c.features&sse3slow != 0
+}
+
+// AesNi indicates support of AES-NI instructions
+// (Advanced Encryption Standard New Instructions)
+func (c cpuInfo) aesni() bool {
+	return c.features&aesni != 0
+}
+
+// Clmul indicates support of CLMUL instructions
+// (Carry-less Multiplication)
+func (c cpuInfo) clmul() bool {
+	return c.features&clmul != 0
+}
+
+// NX indicates support of NX (No-Execute) bit
+func (c cpuInfo) nx() bool {
+	return c.features&nx != 0
+}
+
+// SSE4A indicates support of AMD Barcelona microarchitecture SSE4a instructions
+func (c cpuInfo) sse4a() bool {
+	return c.features&sse4a != 0
+}
+
+// HLE indicates support of Hardware Lock Elision
+func (c cpuInfo) hle() bool {
+	return c.features&hle != 0
+}
+
+// RTM indicates support of Restricted Transactional Memory
+func (c cpuInfo) rtm() bool {
+	return c.features&rtm != 0
+}
+
+// Rdrand indicates support of RDRAND instruction is available
+func (c cpuInfo) rdrand() bool {
+	return c.features&rdrand != 0
+}
+
+// Rdseed indicates support of RDSEED instruction is available
+func (c cpuInfo) rdseed() bool {
+	return c.features&rdseed != 0
+}
+
+// ADX indicates support of Intel ADX (Multi-Precision Add-Carry Instruction Extensions)
+func (c cpuInfo) adx() bool {
+	return c.features&adx != 0
+}
+
+// SHA indicates support of Intel SHA Extensions
+func (c cpuInfo) sha() bool {
+	return c.features&sha != 0
+}
+
+// AVX512F indicates support of AVX-512 Foundation
+func (c cpuInfo) avx512f() bool {
+	return c.features&avx512f != 0
+}
+
+// AVX512DQ indicates support of AVX-512 Doubleword and Quadword Instructions
+func (c cpuInfo) avx512dq() bool {
+	return c.features&avx512dq != 0
+}
+
+// AVX512IFMA indicates support of AVX-512 Integer Fused Multiply-Add Instructions
+func (c cpuInfo) avx512ifma() bool {
+	return c.features&avx512ifma != 0
+}
+
+// AVX512PF indicates support of AVX-512 Prefetch Instructions
+func (c cpuInfo) avx512pf() bool {
+	return c.features&avx512pf != 0
+}
+
+// AVX512ER indicates support of AVX-512 Exponential and Reciprocal Instructions
+func (c cpuInfo) avx512er() bool {
+	return c.features&avx512er != 0
+}
+
+// AVX512CD indicates support of AVX-512 Conflict Detection Instructions
+func (c cpuInfo) avx512cd() bool {
+	return c.features&avx512cd != 0
+}
+
+// AVX512BW indicates support of AVX-512 Byte and Word Instructions
+func (c cpuInfo) avx512bw() bool {
+	return c.features&avx512bw != 0
+}
+
+// AVX512VL indicates support of AVX-512 Vector Length Extensions
+func (c cpuInfo) avx512vl() bool {
+	return c.features&avx512vl != 0
+}
+
+// AVX512VBMI indicates support of AVX-512 Vector Bit Manipulation Instructions
+func (c cpuInfo) avx512vbmi() bool {
+	return c.features&avx512vbmi != 0
+}
+
+// MPX indicates support of Intel MPX (Memory Protection Extensions)
+func (c cpuInfo) mpx() bool {
+	return c.features&mpx != 0
+}
+
+// ERMS indicates support of Enhanced REP MOVSB/STOSB
+func (c cpuInfo) erms() bool {
+	return c.features&erms != 0
+}
+
+func (c cpuInfo) rdtscp() bool {
+	return c.features&rdtscp != 0
+}
+
+func (c cpuInfo) cx16() bool {
+	return c.features&cx16 != 0
+}
+
+// Atom indicates an Atom processor
+func (c cpuInfo) atom() bool {
+	return c.features&atom != 0
+}
+
+// Intel returns true if vendor is recognized as Intel
+func (c cpuInfo) intel() bool {
+	return c.vendorid == intel
+}
+
+// AMD returns true if vendor is recognized as AMD
+func (c cpuInfo) amd() bool {
+	return c.vendorid == amd
+}
+
+// Transmeta returns true if vendor is recognized as Transmeta
+func (c cpuInfo) transmeta() bool {
+	return c.vendorid == transmeta
+}
+
+// NSC returns true if vendor is recognized as National Semiconductor
+func (c cpuInfo) nsc() bool {
+	return c.vendorid == nsc
+}
+
+// VIA returns true if vendor is recognized as VIA
+func (c cpuInfo) via() bool {
+	return c.vendorid == via
+}
+
+// RTCounter returns the 64-bit time-stamp counter
+// Uses the RDTSCP instruction. The value 0 is returned
+// if the CPU does not support the instruction.
+func (c cpuInfo) rtcounter() uint64 {
+	if !c.rdtscp() {
+		return 0
+	}
+	a, _, _, d := rdtscpAsm()
+	return uint64(a) | (uint64(d) << 32)
+}
+
+// Ia32TscAux returns the IA32_TSC_AUX part of the RDTSCP.
+// This variable is OS dependent, but on Linux contains information
+// about the current cpu/core the code is running on.
+// If the RDTSCP instruction isn't supported on the CPU, the value 0 is returned.
+func (c cpuInfo) ia32tscaux() uint32 {
+	if !c.rdtscp() {
+		return 0
+	}
+	_, _, ecx, _ := rdtscpAsm()
+	return ecx
+}
+
+// LogicalCPU will return the Logical CPU the code is currently executing on.
+// This is likely to change when the OS re-schedules the running thread
+// to another CPU.
+// If the current core cannot be detected, -1 will be returned.
+func (c cpuInfo) logicalcpu() int {
+	if c.maxFunc < 1 {
+		return -1
+	}
+	_, ebx, _, _ := cpuid(1)
+	return int(ebx >> 24)
+}
+
+// VM Will return true if the cpu id indicates we are in
+// a virtual machine. This is only a hint, and will very likely
+// have many false negatives.
+func (c cpuInfo) vm() bool {
+	switch c.vendorid {
+	case msvm, kvm, vmware, xenhvm:
+		return true
+	}
+	return false
+}
+
+// Flags contains detected cpu features and caracteristics
+type flags uint64
+
+// String returns a string representation of the detected
+// CPU features.
+func (f flags) String() string {
+	return strings.Join(f.strings(), ",")
+}
+
+// Strings returns and array of the detected features.
+func (f flags) strings() []string {
+	s := support()
+	r := make([]string, 0, 20)
+	for i := uint(0); i < 64; i++ {
+		key := flags(1 << i)
+		val := flagNames[key]
+		if s&key != 0 {
+			r = append(r, val)
+		}
+	}
+	return r
+}
+
+func maxExtendedFunction() uint32 {
+	eax, _, _, _ := cpuid(0x80000000)
+	return eax
+}
+
+func maxFunctionID() uint32 {
+	a, _, _, _ := cpuid(0)
+	return a
+}
+
+func brandName() string {
+	if maxExtendedFunction() >= 0x80000004 {
+		v := make([]uint32, 0, 48)
+		for i := uint32(0); i < 3; i++ {
+			a, b, c, d := cpuid(0x80000002 + i)
+			v = append(v, a, b, c, d)
+		}
+		return strings.Trim(string(valAsString(v...)), " ")
+	}
+	return "unknown"
+}
+
+func threadsPerCore() int {
+	mfi := maxFunctionID()
+	if mfi < 0x4 || vendorID() != intel {
+		return 1
+	}
+
+	if mfi < 0xb {
+		_, b, _, d := cpuid(1)
+		if (d & (1 << 28)) != 0 {
+			// v will contain logical core count
+			v := (b >> 16) & 255
+			if v > 1 {
+				a4, _, _, _ := cpuid(4)
+				// physical cores
+				v2 := (a4 >> 26) + 1
+				if v2 > 0 {
+					return int(v) / int(v2)
+				}
+			}
+		}
+		return 1
+	}
+	_, b, _, _ := cpuidex(0xb, 0)
+	if b&0xffff == 0 {
+		return 1
+	}
+	return int(b & 0xffff)
+}
+
+func logicalCores() int {
+	mfi := maxFunctionID()
+	switch vendorID() {
+	case intel:
+		// Use this on old Intel processors
+		if mfi < 0xb {
+			if mfi < 1 {
+				return 0
+			}
+			// CPUID.1:EBX[23:16] represents the maximum number of addressable IDs (initial APIC ID)
+			// that can be assigned to logical processors in a physical package.
+			// The value may not be the same as the number of logical processors that are present in the hardware of a physical package.
+			_, ebx, _, _ := cpuid(1)
+			logical := (ebx >> 16) & 0xff
+			return int(logical)
+		}
+		_, b, _, _ := cpuidex(0xb, 1)
+		return int(b & 0xffff)
+	case amd:
+		_, b, _, _ := cpuid(1)
+		return int((b >> 16) & 0xff)
+	default:
+		return 0
+	}
+}
+
+func familyModel() (int, int) {
+	if maxFunctionID() < 0x1 {
+		return 0, 0
+	}
+	eax, _, _, _ := cpuid(1)
+	family := ((eax >> 8) & 0xf) + ((eax >> 20) & 0xff)
+	model := ((eax >> 4) & 0xf) + ((eax >> 12) & 0xf0)
+	return int(family), int(model)
+}
+
+func physicalCores() int {
+	switch vendorID() {
+	case intel:
+		return logicalCores() / threadsPerCore()
+	case amd:
+		if maxExtendedFunction() >= 0x80000008 {
+			_, _, c, _ := cpuid(0x80000008)
+			return int(c&0xff) + 1
+		}
+	}
+	return 0
+}
+
+// Except from http://en.wikipedia.org/wiki/CPUID#EAX.3D0:_Get_vendor_ID
+var vendorMapping = map[string]vendor{
+	"AMDisbetter!":	amd,
+	"AuthenticAMD":	amd,
+	"CentaurHauls":	via,
+	"GenuineIntel":	intel,
+	"TransmetaCPU":	transmeta,
+	"GenuineTMx86":	transmeta,
+	"Geode by NSC":	nsc,
+	"VIA VIA VIA ":	via,
+	"KVMKVMKVMKVM":	kvm,
+	"Microsoft Hv":	msvm,
+	"VMwareVMware":	vmware,
+	"XenVMMXenVMM":	xenhvm,
+}
+
+func vendorID() vendor {
+	_, b, c, d := cpuid(0)
+	v := valAsString(b, d, c)
+	vend, ok := vendorMapping[string(v)]
+	if !ok {
+		return other
+	}
+	return vend
+}
+
+func cacheLine() int {
+	if maxFunctionID() < 0x1 {
+		return 0
+	}
+
+	_, ebx, _, _ := cpuid(1)
+	cache := (ebx & 0xff00) >> 5	// cflush size
+	if cache == 0 && maxExtendedFunction() >= 0x80000006 {
+		_, _, ecx, _ := cpuid(0x80000006)
+		cache = ecx & 0xff	// cacheline size
+	}
+	// TODO: Read from Cache and TLB Information
+	return int(cache)
+}
+
+func (c *cpuInfo) cacheSize() {
+	c.cache.l1d = -1
+	c.cache.l1i = -1
+	c.cache.l2 = -1
+	c.cache.l3 = -1
+	vendor := vendorID()
+	switch vendor {
+	case intel:
+		if maxFunctionID() < 4 {
+			return
+		}
+		for i := uint32(0); ; i++ {
+			eax, ebx, ecx, _ := cpuidex(4, i)
+			cacheType := eax & 15
+			if cacheType == 0 {
+				break
+			}
+			cacheLevel := (eax >> 5) & 7
+			coherency := int(ebx&0xfff) + 1
+			partitions := int((ebx>>12)&0x3ff) + 1
+			associativity := int((ebx>>22)&0x3ff) + 1
+			sets := int(ecx) + 1
+			size := associativity * partitions * coherency * sets
+			switch cacheLevel {
+			case 1:
+				if cacheType == 1 {
+					// 1 = Data Cache
+					c.cache.l1d = size
+				} else if cacheType == 2 {
+					// 2 = Instruction Cache
+					c.cache.l1i = size
+				} else {
+					if c.cache.l1d < 0 {
+						c.cache.l1i = size
+					}
+					if c.cache.l1i < 0 {
+						c.cache.l1i = size
+					}
+				}
+			case 2:
+				c.cache.l2 = size
+			case 3:
+				c.cache.l3 = size
+			}
+		}
+	case amd:
+		// Untested.
+		if maxExtendedFunction() < 0x80000005 {
+			return
+		}
+		_, _, ecx, edx := cpuid(0x80000005)
+		c.cache.l1d = int(((ecx >> 24) & 0xFF) * 1024)
+		c.cache.l1i = int(((edx >> 24) & 0xFF) * 1024)
+
+		if maxExtendedFunction() < 0x80000006 {
+			return
+		}
+		_, _, ecx, _ = cpuid(0x80000006)
+		c.cache.l2 = int(((ecx >> 16) & 0xFFFF) * 1024)
+	}
+
+	return
+}
+
+func support() flags {
+	mfi := maxFunctionID()
+	vend := vendorID()
+	if mfi < 0x1 {
+		return 0
+	}
+	rval := uint64(0)
+	_, _, c, d := cpuid(1)
+	if (d & (1 << 15)) != 0 {
+		rval |= cmov
+	}
+	if (d & (1 << 23)) != 0 {
+		rval |= mmx
+	}
+	if (d & (1 << 25)) != 0 {
+		rval |= mmxext
+	}
+	if (d & (1 << 25)) != 0 {
+		rval |= sse
+	}
+	if (d & (1 << 26)) != 0 {
+		rval |= sse2
+	}
+	if (c & 1) != 0 {
+		rval |= sse3
+	}
+	if (c & 0x00000200) != 0 {
+		rval |= ssse3
+	}
+	if (c & 0x00080000) != 0 {
+		rval |= sse4
+	}
+	if (c & 0x00100000) != 0 {
+		rval |= sse42
+	}
+	if (c & (1 << 25)) != 0 {
+		rval |= aesni
+	}
+	if (c & (1 << 1)) != 0 {
+		rval |= clmul
+	}
+	if c&(1<<23) != 0 {
+		rval |= popcnt
+	}
+	if c&(1<<30) != 0 {
+		rval |= rdrand
+	}
+	if c&(1<<29) != 0 {
+		rval |= f16c
+	}
+	if c&(1<<13) != 0 {
+		rval |= cx16
+	}
+	if vend == intel && (d&(1<<28)) != 0 && mfi >= 4 {
+		if threadsPerCore() > 1 {
+			rval |= htt
+		}
+	}
+
+	// Check XGETBV, OXSAVE and AVX bits
+	if c&(1<<26) != 0 && c&(1<<27) != 0 && c&(1<<28) != 0 {
+		// Check for OS support
+		eax, _ := xgetbv(0)
+		if (eax & 0x6) == 0x6 {
+			rval |= avx
+			if (c & 0x00001000) != 0 {
+				rval |= fma3
+			}
+		}
+	}
+
+	// Check AVX2, AVX2 requires OS support, but BMI1/2 don't.
+	if mfi >= 7 {
+		_, ebx, ecx, _ := cpuidex(7, 0)
+		if (rval&avx) != 0 && (ebx&0x00000020) != 0 {
+			rval |= avx2
+		}
+		if (ebx & 0x00000008) != 0 {
+			rval |= bmi1
+			if (ebx & 0x00000100) != 0 {
+				rval |= bmi2
+			}
+		}
+		if ebx&(1<<4) != 0 {
+			rval |= hle
+		}
+		if ebx&(1<<9) != 0 {
+			rval |= erms
+		}
+		if ebx&(1<<11) != 0 {
+			rval |= rtm
+		}
+		if ebx&(1<<14) != 0 {
+			rval |= mpx
+		}
+		if ebx&(1<<18) != 0 {
+			rval |= rdseed
+		}
+		if ebx&(1<<19) != 0 {
+			rval |= adx
+		}
+		if ebx&(1<<29) != 0 {
+			rval |= sha
+		}
+
+		// Only detect AVX-512 features if XGETBV is supported
+		if c&((1<<26)|(1<<27)) == (1<<26)|(1<<27) {
+			// Check for OS support
+			eax, _ := xgetbv(0)
+
+			// Verify that XCR0[7:5] = ‘111b’ (OPMASK state, upper 256-bit of ZMM0-ZMM15 and
+			// ZMM16-ZMM31 state are enabled by OS)
+			/// and that XCR0[2:1] = ‘11b’ (XMM state and YMM state are enabled by OS).
+			if (eax>>5)&7 == 7 && (eax>>1)&3 == 3 {
+				if ebx&(1<<16) != 0 {
+					rval |= avx512f
+				}
+				if ebx&(1<<17) != 0 {
+					rval |= avx512dq
+				}
+				if ebx&(1<<21) != 0 {
+					rval |= avx512ifma
+				}
+				if ebx&(1<<26) != 0 {
+					rval |= avx512pf
+				}
+				if ebx&(1<<27) != 0 {
+					rval |= avx512er
+				}
+				if ebx&(1<<28) != 0 {
+					rval |= avx512cd
+				}
+				if ebx&(1<<30) != 0 {
+					rval |= avx512bw
+				}
+				if ebx&(1<<31) != 0 {
+					rval |= avx512vl
+				}
+				// ecx
+				if ecx&(1<<1) != 0 {
+					rval |= avx512vbmi
+				}
+			}
+		}
+	}
+
+	if maxExtendedFunction() >= 0x80000001 {
+		_, _, c, d := cpuid(0x80000001)
+		if (c & (1 << 5)) != 0 {
+			rval |= lzcnt
+			rval |= popcnt
+		}
+		if (d & (1 << 31)) != 0 {
+			rval |= amd3dnow
+		}
+		if (d & (1 << 30)) != 0 {
+			rval |= amd3dnowext
+		}
+		if (d & (1 << 23)) != 0 {
+			rval |= mmx
+		}
+		if (d & (1 << 22)) != 0 {
+			rval |= mmxext
+		}
+		if (c & (1 << 6)) != 0 {
+			rval |= sse4a
+		}
+		if d&(1<<20) != 0 {
+			rval |= nx
+		}
+		if d&(1<<27) != 0 {
+			rval |= rdtscp
+		}
+
+		/* Allow for selectively disabling SSE2 functions on AMD processors
+		   with SSE2 support but not SSE4a. This includes Athlon64, some
+		   Opteron, and some Sempron processors. MMX, SSE, or 3DNow! are faster
+		   than SSE2 often enough to utilize this special-case flag.
+		   AV_CPU_FLAG_SSE2 and AV_CPU_FLAG_SSE2SLOW are both set in this case
+		   so that SSE2 is used unless explicitly disabled by checking
+		   AV_CPU_FLAG_SSE2SLOW. */
+		if vendorID() != intel &&
+			rval&sse2 != 0 && (c&0x00000040) == 0 {
+			rval |= sse2slow
+		}
+
+		/* XOP and FMA4 use the AVX instruction coding scheme, so they can't be
+		 * used unless the OS has AVX support. */
+		if (rval & avx) != 0 {
+			if (c & 0x00000800) != 0 {
+				rval |= xop
+			}
+			if (c & 0x00010000) != 0 {
+				rval |= fma4
+			}
+		}
+
+		if vendorID() == intel {
+			family, model := familyModel()
+			if family == 6 && (model == 9 || model == 13 || model == 14) {
+				/* 6/9 (pentium-m "banias"), 6/13 (pentium-m "dothan"), and
+				 * 6/14 (core1 "yonah") theoretically support sse2, but it's
+				 * usually slower than mmx. */
+				if (rval & sse2) != 0 {
+					rval |= sse2slow
+				}
+				if (rval & sse3) != 0 {
+					rval |= sse3slow
+				}
+			}
+			/* The Atom processor has SSSE3 support, which is useful in many cases,
+			 * but sometimes the SSSE3 version is slower than the SSE2 equivalent
+			 * on the Atom, but is generally faster on other processors supporting
+			 * SSSE3. This flag allows for selectively disabling certain SSSE3
+			 * functions on the Atom. */
+			if family == 6 && model == 28 {
+				rval |= atom
+			}
+		}
+	}
+	return flags(rval)
+}
+
+func valAsString(values ...uint32) []byte {
+	r := make([]byte, 4*len(values))
+	for i, v := range values {
+		dst := r[i*4:]
+		dst[0] = byte(v & 0xff)
+		dst[1] = byte((v >> 8) & 0xff)
+		dst[2] = byte((v >> 16) & 0xff)
+		dst[3] = byte((v >> 24) & 0xff)
+		switch {
+		case dst[0] == 0:
+			return r[:i*4]
+		case dst[1] == 0:
+			return r[:i*4+1]
+		case dst[2] == 0:
+			return r[:i*4+2]
+		case dst[3] == 0:
+			return r[:i*4+3]
+		}
+	}
+	return r
+}

+ 40 - 0
Godeps/_workspace/src/github.com/klauspost/cpuid/private/cpuid_386.s

@@ -0,0 +1,40 @@
+// Copyright (c) 2015 Klaus Post, released under MIT License. See LICENSE file.
+
+// func asmCpuid(op uint32) (eax, ebx, ecx, edx uint32)
+TEXT ·asmCpuid(SB), 7, $0
+	XORL CX, CX
+	MOVL op+0(FP), AX
+	CPUID
+	MOVL AX, eax+4(FP)
+	MOVL BX, ebx+8(FP)
+	MOVL CX, ecx+12(FP)
+	MOVL DX, edx+16(FP)
+	RET
+
+// func asmCpuidex(op, op2 uint32) (eax, ebx, ecx, edx uint32)
+TEXT ·asmCpuidex(SB), 7, $0
+	MOVL op+0(FP), AX
+	MOVL op2+4(FP), CX
+	CPUID
+	MOVL AX, eax+8(FP)
+	MOVL BX, ebx+12(FP)
+	MOVL CX, ecx+16(FP)
+	MOVL DX, edx+20(FP)
+	RET
+
+// func xgetbv(index uint32) (eax, edx uint32)
+TEXT ·asmXgetbv(SB), 7, $0
+	MOVL index+0(FP), CX
+	BYTE $0x0f; BYTE $0x01; BYTE $0xd0 // XGETBV
+	MOVL AX, eax+4(FP)
+	MOVL DX, edx+8(FP)
+	RET
+
+// func asmRdtscpAsm() (eax, ebx, ecx, edx uint32)
+TEXT ·asmRdtscpAsm(SB), 7, $0
+	BYTE $0x0F; BYTE $0x01; BYTE $0xF9 // RDTSCP
+	MOVL AX, eax+0(FP)
+	MOVL BX, ebx+4(FP)
+	MOVL CX, ecx+8(FP)
+	MOVL DX, edx+12(FP)
+	RET

+ 40 - 0
Godeps/_workspace/src/github.com/klauspost/cpuid/private/cpuid_amd64.s

@@ -0,0 +1,40 @@
+// Copyright (c) 2015 Klaus Post, released under MIT License. See LICENSE file.
+
+// func asmCpuid(op uint32) (eax, ebx, ecx, edx uint32)
+TEXT ·asmCpuid(SB), 7, $0
+	XORQ CX, CX
+	MOVL op+0(FP), AX
+	CPUID
+	MOVL AX, eax+8(FP)
+	MOVL BX, ebx+12(FP)
+	MOVL CX, ecx+16(FP)
+	MOVL DX, edx+20(FP)
+	RET
+
+// func asmCpuidex(op, op2 uint32) (eax, ebx, ecx, edx uint32)
+TEXT ·asmCpuidex(SB), 7, $0
+	MOVL op+0(FP), AX
+	MOVL op2+4(FP), CX
+	CPUID
+	MOVL AX, eax+8(FP)
+	MOVL BX, ebx+12(FP)
+	MOVL CX, ecx+16(FP)
+	MOVL DX, edx+20(FP)
+	RET
+
+// func asmXgetbv(index uint32) (eax, edx uint32)
+TEXT ·asmXgetbv(SB), 7, $0
+	MOVL index+0(FP), CX
+	BYTE $0x0f; BYTE $0x01; BYTE $0xd0 // XGETBV
+	MOVL AX, eax+8(FP)
+	MOVL DX, edx+12(FP)
+	RET
+
+// func asmRdtscpAsm() (eax, ebx, ecx, edx uint32)
+TEXT ·asmRdtscpAsm(SB), 7, $0
+	BYTE $0x0F; BYTE $0x01; BYTE $0xF9 // RDTSCP
+	MOVL AX, eax+0(FP)
+	MOVL BX, ebx+4(FP)
+	MOVL CX, ecx+8(FP)
+	MOVL DX, edx+12(FP)
+	RET

+ 17 - 0
Godeps/_workspace/src/github.com/klauspost/cpuid/private/cpuid_detect_intel.go

@@ -0,0 +1,17 @@
+// Copyright (c) 2015 Klaus Post, released under MIT License. See LICENSE file.
+
+// +build 386 amd64
+
+package cpuid
+
+func asmCpuid(op uint32) (eax, ebx, ecx, edx uint32)
+func asmCpuidex(op, op2 uint32) (eax, ebx, ecx, edx uint32)
+func asmXgetbv(index uint32) (eax, edx uint32)
+func asmRdtscpAsm() (eax, ebx, ecx, edx uint32)
+
+func initCPU() {
+	cpuid = asmCpuid
+	cpuidex = asmCpuidex
+	xgetbv = asmXgetbv
+	rdtscpAsm = asmRdtscpAsm
+}

+ 23 - 0
Godeps/_workspace/src/github.com/klauspost/cpuid/private/cpuid_detect_ref.go

@@ -0,0 +1,23 @@
+// Copyright (c) 2015 Klaus Post, released under MIT License. See LICENSE file.
+
+// +build !amd64,!386
+
+package cpuid
+
+func initCPU() {
+	cpuid = func(op uint32) (eax, ebx, ecx, edx uint32) {
+		return 0, 0, 0, 0
+	}
+
+	cpuidex = func(op, op2 uint32) (eax, ebx, ecx, edx uint32) {
+		return 0, 0, 0, 0
+	}
+
+	xgetbv = func(index uint32) (eax, edx uint32) {
+		return 0, 0
+	}
+
+	rdtscpAsm = func() (eax, ebx, ecx, edx uint32) {
+		return 0, 0, 0, 0
+	}
+}

+ 24 - 0
Godeps/_workspace/src/github.com/klauspost/crc32/.gitignore

@@ -0,0 +1,24 @@
+# Compiled Object files, Static and Dynamic libs (Shared Objects)
+*.o
+*.a
+*.so
+
+# Folders
+_obj
+_test
+
+# Architecture specific extensions/prefixes
+*.[568vq]
+[568vq].out
+
+*.cgo1.go
+*.cgo2.c
+_cgo_defun.c
+_cgo_gotypes.go
+_cgo_export.*
+
+_testmain.go
+
+*.exe
+*.test
+*.prof

+ 11 - 0
Godeps/_workspace/src/github.com/klauspost/crc32/.travis.yml

@@ -0,0 +1,11 @@
+language: go
+
+go:
+  - 1.3
+  - 1.4
+  - 1.5
+  - tip
+
+script: 
+ - go test -v .
+ - go test -v -race .

+ 28 - 0
Godeps/_workspace/src/github.com/klauspost/crc32/LICENSE

@@ -0,0 +1,28 @@
+Copyright (c) 2012 The Go Authors. All rights reserved.
+Copyright (c) 2015 Klaus Post
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+   * Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+   * Redistributions in binary form must reproduce the above
+copyright notice, this list of conditions and the following disclaimer
+in the documentation and/or other materials provided with the
+distribution.
+   * Neither the name of Google Inc. nor the names of its
+contributors may be used to endorse or promote products derived from
+this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

+ 84 - 0
Godeps/_workspace/src/github.com/klauspost/crc32/README.md

@@ -0,0 +1,84 @@
+# crc32
+CRC32 hash with x64 optimizations
+
+This package is a drop-in replacement for the standard library `hash/crc32` package, that features SSE 4.2 optimizations on x64 platforms, for a 10x speedup.
+
+[![Build Status](https://travis-ci.org/klauspost/crc32.svg?branch=master)](https://travis-ci.org/klauspost/crc32)
+
+# usage
+
+Install using `go get github.com/klauspost/crc32`. This library is based on Go 1.5 code and requires Go 1.3 or newer.
+
+Replace `import "hash/crc32"` with `import "github.com/klauspost/crc32"` and you are good to go.
+
+# changes
+
+* Dec 4, 2015: Uses the "slice-by-8" trick more extensively, which gives a 1.5 to 2.5x speedup if assembler is unavailable.
+
+
+# performance
+
+For IEEE tables (the most common), there is approximately a factor 10 speedup with "CLMUL" (Carryless multiplication) instruction:
+```
+benchmark            old ns/op     new ns/op     delta
+BenchmarkCrc32KB     99955         10258         -89.74%
+
+benchmark            old MB/s     new MB/s     speedup
+BenchmarkCrc32KB     327.83       3194.20      9.74x
+```
+
+For other tables and "CLMUL"  capable machines the performance is the same as the standard library.
+
+Here are some detailed benchmarks, comparing to go 1.5 standard library with and without assembler enabled.
+
+```
+Std:   Standard Go 1.5 library
+Crc:   Indicates IEEE type CRC.
+40B:   Size of each slice encoded.
+NoAsm: Assembler was disabled (ie. not an AMD64 or SSE 4.2+ capable machine).
+Castagnoli: Castagnoli CRC type.
+
+BenchmarkStdCrc40B-4            10000000               158 ns/op         252.88 MB/s
+BenchmarkCrc40BNoAsm-4          20000000               105 ns/op         377.38 MB/s (slice8)
+BenchmarkCrc40B-4               20000000               105 ns/op         378.77 MB/s (slice8)
+
+BenchmarkStdCrc1KB-4              500000              3604 ns/op         284.10 MB/s
+BenchmarkCrc1KBNoAsm-4           1000000              1463 ns/op         699.79 MB/s (slice8)
+BenchmarkCrc1KB-4                3000000               396 ns/op        2583.69 MB/s (asm)
+
+BenchmarkStdCrc8KB-4              200000             11417 ns/op         717.48 MB/s (slice8)
+BenchmarkCrc8KBNoAsm-4            200000             11317 ns/op         723.85 MB/s (slice8)
+BenchmarkCrc8KB-4                 500000              2919 ns/op        2805.73 MB/s (asm)
+
+BenchmarkStdCrc32KB-4              30000             45749 ns/op         716.24 MB/s (slice8)
+BenchmarkCrc32KBNoAsm-4            30000             45109 ns/op         726.42 MB/s (slice8)
+BenchmarkCrc32KB-4                100000             11497 ns/op        2850.09 MB/s (asm)
+
+BenchmarkStdNoAsmCastagnol40B-4 10000000               161 ns/op         246.94 MB/s
+BenchmarkStdCastagnoli40B-4     50000000              28.4 ns/op        1410.69 MB/s (asm)
+BenchmarkCastagnoli40BNoAsm-4   20000000               100 ns/op         398.01 MB/s (slice8)
+BenchmarkCastagnoli40B-4        50000000              28.2 ns/op        1419.54 MB/s (asm)
+
+BenchmarkStdNoAsmCastagnoli1KB-4  500000              3622 ns/op        282.67 MB/s
+BenchmarkStdCastagnoli1KB-4     10000000               144 ns/op        7099.78 MB/s (asm)
+BenchmarkCastagnoli1KBNoAsm-4    1000000              1475 ns/op         694.14 MB/s (slice8)
+BenchmarkCastagnoli1KB-4        10000000               146 ns/op        6993.35 MB/s (asm)
+
+BenchmarkStdNoAsmCastagnoli8KB-4  50000              28781 ns/op         284.63 MB/s
+BenchmarkStdCastagnoli8KB-4      1000000              1029 ns/op        7957.89 MB/s (asm)
+BenchmarkCastagnoli8KBNoAsm-4     200000             11410 ns/op         717.94 MB/s (slice8)
+BenchmarkCastagnoli8KB-4         1000000              1000 ns/op        8188.71 MB/s (asm)
+
+BenchmarkStdNoAsmCastagnoli32KB-4  10000            115426 ns/op         283.89 MB/s
+BenchmarkStdCastagnoli32KB-4      300000              4065 ns/op        8059.13 MB/s (asm)
+BenchmarkCastagnoli32KBNoAsm-4     30000             45171 ns/op         725.41 MB/s (slice8)
+BenchmarkCastagnoli32KB-4         500000              4077 ns/op        8035.89 MB/s (asm)
+```
+
+The IEEE assembler optimizations has been submitted and will be part of the Go 1.6 standard library.
+
+However, the improved use of slice-by-8 has not, but will probably be submitted for Go 1.7.
+
+# license
+
+Standard Go license. Changes are Copyright (c) 2015 Klaus Post under same conditions.

+ 182 - 0
Godeps/_workspace/src/github.com/klauspost/crc32/crc32.go

@@ -0,0 +1,182 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package crc32 implements the 32-bit cyclic redundancy check, or CRC-32,
+// checksum. See http://en.wikipedia.org/wiki/Cyclic_redundancy_check for
+// information.
+//
+// Polynomials are represented in LSB-first form also known as reversed representation.
+//
+// See http://en.wikipedia.org/wiki/Mathematics_of_cyclic_redundancy_checks#Reversed_representations_and_reciprocal_polynomials
+// for information.
+package crc32
+
+import (
+	"hash"
+	"sync"
+)
+
+// The size of a CRC-32 checksum in bytes.
+const Size = 4
+
+// Predefined polynomials.
+const (
+	// IEEE is by far and away the most common CRC-32 polynomial.
+	// Used by ethernet (IEEE 802.3), v.42, fddi, gzip, zip, png, ...
+	IEEE = 0xedb88320
+
+	// Castagnoli's polynomial, used in iSCSI.
+	// Has better error detection characteristics than IEEE.
+	// http://dx.doi.org/10.1109/26.231911
+	Castagnoli = 0x82f63b78
+
+	// Koopman's polynomial.
+	// Also has better error detection characteristics than IEEE.
+	// http://dx.doi.org/10.1109/DSN.2002.1028931
+	Koopman = 0xeb31d82e
+)
+
+// Table is a 256-word table representing the polynomial for efficient processing.
+type Table [256]uint32
+
+// castagnoliTable points to a lazily initialized Table for the Castagnoli
+// polynomial. MakeTable will always return this value when asked to make a
+// Castagnoli table so we can compare against it to find when the caller is
+// using this polynomial.
+var castagnoliTable *Table
+var castagnoliTable8 *slicing8Table
+var castagnoliOnce sync.Once
+
+func castagnoliInit() {
+	castagnoliTable = makeTable(Castagnoli)
+	castagnoliTable8 = makeTable8(Castagnoli)
+}
+
+// IEEETable is the table for the IEEE polynomial.
+var IEEETable = makeTable(IEEE)
+
+// slicing8Table is array of 8 Tables
+type slicing8Table [8]Table
+
+// iEEETable8 is the slicing8Table for IEEE
+var iEEETable8 *slicing8Table
+var iEEETable8Once sync.Once
+
+// MakeTable returns the Table constructed from the specified polynomial.
+func MakeTable(poly uint32) *Table {
+	switch poly {
+	case IEEE:
+		return IEEETable
+	case Castagnoli:
+		castagnoliOnce.Do(castagnoliInit)
+		return castagnoliTable
+	}
+	return makeTable(poly)
+}
+
+// makeTable returns the Table constructed from the specified polynomial.
+func makeTable(poly uint32) *Table {
+	t := new(Table)
+	for i := 0; i < 256; i++ {
+		crc := uint32(i)
+		for j := 0; j < 8; j++ {
+			if crc&1 == 1 {
+				crc = (crc >> 1) ^ poly
+			} else {
+				crc >>= 1
+			}
+		}
+		t[i] = crc
+	}
+	return t
+}
+
+// makeTable8 returns slicing8Table constructed from the specified polynomial.
+func makeTable8(poly uint32) *slicing8Table {
+	t := new(slicing8Table)
+	t[0] = *makeTable(poly)
+	for i := 0; i < 256; i++ {
+		crc := t[0][i]
+		for j := 1; j < 8; j++ {
+			crc = t[0][crc&0xFF] ^ (crc >> 8)
+			t[j][i] = crc
+		}
+	}
+	return t
+}
+
+// digest represents the partial evaluation of a checksum.
+type digest struct {
+	crc uint32
+	tab *Table
+}
+
+// New creates a new hash.Hash32 computing the CRC-32 checksum
+// using the polynomial represented by the Table.
+func New(tab *Table) hash.Hash32 { return &digest{0, tab} }
+
+// NewIEEE creates a new hash.Hash32 computing the CRC-32 checksum
+// using the IEEE polynomial.
+func NewIEEE() hash.Hash32 { return New(IEEETable) }
+
+func (d *digest) Size() int { return Size }
+
+func (d *digest) BlockSize() int { return 1 }
+
+func (d *digest) Reset() { d.crc = 0 }
+
+func update(crc uint32, tab *Table, p []byte) uint32 {
+	crc = ^crc
+	for _, v := range p {
+		crc = tab[byte(crc)^v] ^ (crc >> 8)
+	}
+	return ^crc
+}
+
+// updateSlicingBy8 updates CRC using Slicing-by-8
+func updateSlicingBy8(crc uint32, tab *slicing8Table, p []byte) uint32 {
+	crc = ^crc
+	for len(p) > 8 {
+		crc ^= uint32(p[0]) | uint32(p[1])<<8 | uint32(p[2])<<16 | uint32(p[3])<<24
+		crc = tab[0][p[7]] ^ tab[1][p[6]] ^ tab[2][p[5]] ^ tab[3][p[4]] ^
+			tab[4][crc>>24] ^ tab[5][(crc>>16)&0xFF] ^
+			tab[6][(crc>>8)&0xFF] ^ tab[7][crc&0xFF]
+		p = p[8:]
+	}
+	crc = ^crc
+	if len(p) == 0 {
+		return crc
+	}
+	return update(crc, &tab[0], p)
+}
+
+// Update returns the result of adding the bytes in p to the crc.
+func Update(crc uint32, tab *Table, p []byte) uint32 {
+	if tab == castagnoliTable {
+		return updateCastagnoli(crc, p)
+	} else if tab == IEEETable {
+		return updateIEEE(crc, p)
+	}
+	return update(crc, tab, p)
+}
+
+func (d *digest) Write(p []byte) (n int, err error) {
+	d.crc = Update(d.crc, d.tab, p)
+	return len(p), nil
+}
+
+func (d *digest) Sum32() uint32 { return d.crc }
+
+func (d *digest) Sum(in []byte) []byte {
+	s := d.Sum32()
+	return append(in, byte(s>>24), byte(s>>16), byte(s>>8), byte(s))
+}
+
+// Checksum returns the CRC-32 checksum of data
+// using the polynomial represented by the Table.
+func Checksum(data []byte, tab *Table) uint32 { return Update(0, tab, data) }
+
+// ChecksumIEEE returns the CRC-32 checksum of data
+// using the IEEE polynomial.
+func ChecksumIEEE(data []byte) uint32 { return updateIEEE(0, data) }

+ 62 - 0
Godeps/_workspace/src/github.com/klauspost/crc32/crc32_amd64.go

@@ -0,0 +1,62 @@
+//+build !appengine,!gccgo
+
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package crc32
+
+// This file contains the code to call the SSE 4.2 version of the Castagnoli
+// and IEEE CRC.
+
+// haveSSE41/haveSSE42/haveCLMUL are defined in crc_amd64.s and uses
+// CPUID to test for SSE 4.1, 4.2 and CLMUL support.
+func haveSSE41() bool
+func haveSSE42() bool
+func haveCLMUL() bool
+
+// castagnoliSSE42 is defined in crc_amd64.s and uses the SSE4.2 CRC32
+// instruction.
+// go:noescape
+func castagnoliSSE42(crc uint32, p []byte) uint32
+
+// ieeeCLMUL is defined in crc_amd64.s and uses the PCLMULQDQ
+// instruction as well as SSE 4.1.
+// go:noescape
+func ieeeCLMUL(crc uint32, p []byte) uint32
+
+var sse42 = haveSSE42()
+var useFastIEEE = haveCLMUL() && haveSSE41()
+
+func updateCastagnoli(crc uint32, p []byte) uint32 {
+	if sse42 {
+		return castagnoliSSE42(crc, p)
+	}
+	// only use slicing-by-8 when input is >= 16 Bytes
+	if len(p) >= 16 {
+		return updateSlicingBy8(crc, castagnoliTable8, p)
+	}
+	return update(crc, castagnoliTable, p)
+}
+
+func updateIEEE(crc uint32, p []byte) uint32 {
+	if useFastIEEE && len(p) >= 64 {
+		left := len(p) & 15
+		do := len(p) - left
+		crc := ^ieeeCLMUL(^crc, p[:do])
+		if left > 0 {
+			crc = update(crc, IEEETable, p[do:])
+		}
+		return crc
+	}
+
+	// only use slicing-by-8 when input is >= 16 Bytes
+	if len(p) >= 16 {
+		iEEETable8Once.Do(func() {
+			iEEETable8 = makeTable8(IEEE)
+		})
+		return updateSlicingBy8(crc, iEEETable8, p)
+	}
+
+	return update(crc, IEEETable, p)
+}

+ 237 - 0
Godeps/_workspace/src/github.com/klauspost/crc32/crc32_amd64.s

@@ -0,0 +1,237 @@
+//+build gc
+
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+#define NOSPLIT 4
+#define RODATA 8
+
+// func castagnoliSSE42(crc uint32, p []byte) uint32
+TEXT ·castagnoliSSE42(SB), NOSPLIT, $0
+	MOVL crc+0(FP), AX    // CRC value
+	MOVQ p+8(FP), SI      // data pointer
+	MOVQ p_len+16(FP), CX // len(p)
+
+	NOTL AX
+
+	// If there's less than 8 bytes to process, we do it byte-by-byte.
+	CMPQ CX, $8
+	JL   cleanup
+
+	// Process individual bytes until the input is 8-byte aligned.
+startup:
+	MOVQ SI, BX
+	ANDQ $7, BX
+	JZ   aligned
+
+	CRC32B (SI), AX
+	DECQ   CX
+	INCQ   SI
+	JMP    startup
+
+aligned:
+	// The input is now 8-byte aligned and we can process 8-byte chunks.
+	CMPQ CX, $8
+	JL   cleanup
+
+	CRC32Q (SI), AX
+	ADDQ   $8, SI
+	SUBQ   $8, CX
+	JMP    aligned
+
+cleanup:
+	// We may have some bytes left over that we process one at a time.
+	CMPQ CX, $0
+	JE   done
+
+	CRC32B (SI), AX
+	INCQ   SI
+	DECQ   CX
+	JMP    cleanup
+
+done:
+	NOTL AX
+	MOVL AX, ret+32(FP)
+	RET
+
+// func haveSSE42() bool
+TEXT ·haveSSE42(SB), NOSPLIT, $0
+	XORQ AX, AX
+	INCL AX
+	CPUID
+	SHRQ $20, CX
+	ANDQ $1, CX
+	MOVB CX, ret+0(FP)
+	RET
+
+// func haveCLMUL() bool
+TEXT ·haveCLMUL(SB), NOSPLIT, $0
+	XORQ AX, AX
+	INCL AX
+	CPUID
+	SHRQ $1, CX
+	ANDQ $1, CX
+	MOVB CX, ret+0(FP)
+	RET
+
+// func haveSSE41() bool
+TEXT ·haveSSE41(SB), NOSPLIT, $0
+	XORQ AX, AX
+	INCL AX
+	CPUID
+	SHRQ $19, CX
+	ANDQ $1, CX
+	MOVB CX, ret+0(FP)
+	RET
+
+// CRC32 polynomial data
+//
+// These constants are lifted from the
+// Linux kernel, since they avoid the costly
+// PSHUFB 16 byte reversal proposed in the
+// original Intel paper.
+DATA r2r1kp<>+0(SB)/8, $0x154442bd4
+DATA r2r1kp<>+8(SB)/8, $0x1c6e41596
+DATA r4r3kp<>+0(SB)/8, $0x1751997d0
+DATA r4r3kp<>+8(SB)/8, $0x0ccaa009e
+DATA rupolykp<>+0(SB)/8, $0x1db710641
+DATA rupolykp<>+8(SB)/8, $0x1f7011641
+DATA r5kp<>+0(SB)/8, $0x163cd6124
+
+GLOBL r2r1kp<>(SB), RODATA, $16
+GLOBL r4r3kp<>(SB), RODATA, $16
+GLOBL rupolykp<>(SB), RODATA, $16
+GLOBL r5kp<>(SB), RODATA, $8
+
+// Based on http://www.intel.com/content/dam/www/public/us/en/documents/white-papers/fast-crc-computation-generic-polynomials-pclmulqdq-paper.pdf
+// len(p) must be at least 64, and must be a multiple of 16.
+
+// func ieeeCLMUL(crc uint32, p []byte) uint32
+TEXT ·ieeeCLMUL(SB), NOSPLIT, $0
+	MOVL crc+0(FP), X0    // Initial CRC value
+	MOVQ p+8(FP), SI      // data pointer
+	MOVQ p_len+16(FP), CX // len(p)
+
+	MOVOU (SI), X1
+	MOVOU 16(SI), X2
+	MOVOU 32(SI), X3
+	MOVOU 48(SI), X4
+	PXOR  X0, X1
+	ADDQ  $64, SI    // buf+=64
+	SUBQ  $64, CX    // len-=64
+	CMPQ  CX, $64    // Less than 64 bytes left
+	JB    remain64
+
+	MOVOU r2r1kp<>+0(SB), X0
+
+loopback64:
+	MOVOA X1, X5
+	MOVOA X2, X6
+	MOVOA X3, X7
+	MOVOA X4, X8
+
+	PCLMULQDQ $0, X0, X1
+	PCLMULQDQ $0, X0, X2
+	PCLMULQDQ $0, X0, X3
+	PCLMULQDQ $0, X0, X4
+
+	// Load next early
+	MOVOU (SI), X11
+	MOVOU 16(SI), X12
+	MOVOU 32(SI), X13
+	MOVOU 48(SI), X14
+
+	PCLMULQDQ $0x11, X0, X5
+	PCLMULQDQ $0x11, X0, X6
+	PCLMULQDQ $0x11, X0, X7
+	PCLMULQDQ $0x11, X0, X8
+
+	PXOR X5, X1
+	PXOR X6, X2
+	PXOR X7, X3
+	PXOR X8, X4
+
+	PXOR X11, X1
+	PXOR X12, X2
+	PXOR X13, X3
+	PXOR X14, X4
+
+	ADDQ $0x40, DI
+	ADDQ $64, SI    // buf+=64
+	SUBQ $64, CX    // len-=64
+	CMPQ CX, $64    // Less than 64 bytes left?
+	JGE  loopback64
+
+	// Fold result into a single register (X1)
+remain64:
+	MOVOU r4r3kp<>+0(SB), X0
+
+	MOVOA     X1, X5
+	PCLMULQDQ $0, X0, X1
+	PCLMULQDQ $0x11, X0, X5
+	PXOR      X5, X1
+	PXOR      X2, X1
+
+	MOVOA     X1, X5
+	PCLMULQDQ $0, X0, X1
+	PCLMULQDQ $0x11, X0, X5
+	PXOR      X5, X1
+	PXOR      X3, X1
+
+	MOVOA     X1, X5
+	PCLMULQDQ $0, X0, X1
+	PCLMULQDQ $0x11, X0, X5
+	PXOR      X5, X1
+	PXOR      X4, X1
+
+	// More than 16 bytes left?
+	CMPQ CX, $16
+	JB   finish
+
+	// Encode 16 bytes
+remain16:
+	MOVOU     (SI), X10
+	MOVOA     X1, X5
+	PCLMULQDQ $0, X0, X1
+	PCLMULQDQ $0x11, X0, X5
+	PXOR      X5, X1
+	PXOR      X10, X1
+	SUBQ      $16, CX
+	ADDQ      $16, SI
+	CMPQ      CX, $16
+	JGE       remain16
+
+finish:
+	// Fold final result into 32 bits and return it
+	PCMPEQB   X3, X3
+	PCLMULQDQ $1, X1, X0
+	PSRLDQ    $8, X1
+	PXOR      X0, X1
+
+	MOVOA X1, X2
+	MOVQ  r5kp<>+0(SB), X0
+
+	// Creates 32 bit mask. Note that we don't care about upper half.
+	PSRLQ $32, X3
+
+	PSRLDQ    $4, X2
+	PAND      X3, X1
+	PCLMULQDQ $0, X0, X1
+	PXOR      X2, X1
+
+	MOVOU rupolykp<>+0(SB), X0
+
+	MOVOA     X1, X2
+	PAND      X3, X1
+	PCLMULQDQ $0x10, X0, X1
+	PAND      X3, X1
+	PCLMULQDQ $0, X0, X1
+	PXOR      X2, X1
+
+	// PEXTRD   $1, X1, AX  (SSE 4.1)
+	BYTE $0x66; BYTE $0x0f; BYTE $0x3a
+	BYTE $0x16; BYTE $0xc8; BYTE $0x01
+	MOVL AX, ret+32(FP)
+
+	RET

+ 39 - 0
Godeps/_workspace/src/github.com/klauspost/crc32/crc32_amd64p32.go

@@ -0,0 +1,39 @@
+//+build !appengine,!gccgo
+
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package crc32
+
+// This file contains the code to call the SSE 4.2 version of the Castagnoli
+// CRC.
+
+// haveSSE42 is defined in crc_amd64p32.s and uses CPUID to test for 4.2
+// support.
+func haveSSE42() bool
+
+// castagnoliSSE42 is defined in crc_amd64.s and uses the SSE4.2 CRC32
+// instruction.
+func castagnoliSSE42(crc uint32, p []byte) uint32
+
+var sse42 = haveSSE42()
+
+func updateCastagnoli(crc uint32, p []byte) uint32 {
+	if sse42 {
+		return castagnoliSSE42(crc, p)
+	}
+	return update(crc, castagnoliTable, p)
+}
+
+func updateIEEE(crc uint32, p []byte) uint32 {
+	// only use slicing-by-8 when input is >= 4KB
+	if len(p) >= 4096 {
+		iEEETable8Once.Do(func() {
+			iEEETable8 = makeTable8(IEEE)
+		})
+		return updateSlicingBy8(crc, iEEETable8, p)
+	}
+
+	return update(crc, IEEETable, p)
+}

+ 67 - 0
Godeps/_workspace/src/github.com/klauspost/crc32/crc32_amd64p32.s

@@ -0,0 +1,67 @@
+//+build gc
+
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+#define NOSPLIT 4
+#define RODATA 8
+
+// func castagnoliSSE42(crc uint32, p []byte) uint32
+TEXT ·castagnoliSSE42(SB), NOSPLIT, $0
+	MOVL crc+0(FP), AX   // CRC value
+	MOVL p+4(FP), SI     // data pointer
+	MOVL p_len+8(FP), CX // len(p)
+
+	NOTL AX
+
+	// If there's less than 8 bytes to process, we do it byte-by-byte.
+	CMPQ CX, $8
+	JL   cleanup
+
+	// Process individual bytes until the input is 8-byte aligned.
+startup:
+	MOVQ SI, BX
+	ANDQ $7, BX
+	JZ   aligned
+
+	CRC32B (SI), AX
+	DECQ   CX
+	INCQ   SI
+	JMP    startup
+
+aligned:
+	// The input is now 8-byte aligned and we can process 8-byte chunks.
+	CMPQ CX, $8
+	JL   cleanup
+
+	CRC32Q (SI), AX
+	ADDQ   $8, SI
+	SUBQ   $8, CX
+	JMP    aligned
+
+cleanup:
+	// We may have some bytes left over that we process one at a time.
+	CMPQ CX, $0
+	JE   done
+
+	CRC32B (SI), AX
+	INCQ   SI
+	DECQ   CX
+	JMP    cleanup
+
+done:
+	NOTL AX
+	MOVL AX, ret+16(FP)
+	RET
+
+// func haveSSE42() bool
+TEXT ·haveSSE42(SB), NOSPLIT, $0
+	XORQ AX, AX
+	INCL AX
+	CPUID
+	SHRQ $20, CX
+	ANDQ $1, CX
+	MOVB CX, ret+0(FP)
+	RET
+

+ 28 - 0
Godeps/_workspace/src/github.com/klauspost/crc32/crc32_generic.go

@@ -0,0 +1,28 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build 386 arm arm64 ppc64 ppc64le appengine gccgo
+
+package crc32
+
+// The file contains the generic version of updateCastagnoli which does
+// slicing-by-8, or uses the fallback for very small sizes.
+func updateCastagnoli(crc uint32, p []byte) uint32 {
+	// only use slicing-by-8 when input is >= 16 Bytes
+	if len(p) >= 16 {
+		return updateSlicingBy8(crc, castagnoliTable8, p)
+	}
+	return update(crc, castagnoliTable, p)
+}
+
+func updateIEEE(crc uint32, p []byte) uint32 {
+	// only use slicing-by-8 when input is >= 16 Bytes
+	if len(p) >= 16 {
+		iEEETable8Once.Do(func() {
+			iEEETable8 = makeTable8(IEEE)
+		})
+		return updateSlicingBy8(crc, iEEETable8, p)
+	}
+	return update(crc, IEEETable, p)
+}

+ 0 - 21
Godeps/_workspace/src/github.com/macaron-contrib/binding/README.md

@@ -1,21 +0,0 @@
-binding [![Build Status](https://drone.io/github.com/macaron-contrib/binding/status.png)](https://drone.io/github.com/macaron-contrib/binding/latest) [![](http://gocover.io/_badge/github.com/macaron-contrib/binding)](http://gocover.io/github.com/macaron-contrib/binding)
-=======
-
-Middlware binding provides request data binding and validation for [Macaron](https://github.com/Unknwon/macaron).
-
-### Installation
-
-	go get github.com/macaron-contrib/binding
-	
-## Getting Help
-
-- [API Reference](https://gowalker.org/github.com/macaron-contrib/binding)
-- [Documentation](http://macaron.gogs.io/docs/middlewares/binding)
-
-## Credits
-
-This package is forked from [martini-contrib/binding](https://github.com/martini-contrib/binding) with modifications.
-
-## License
-
-This project is under Apache v2 License. See the [LICENSE](LICENSE) file for the full license text.

+ 0 - 57
Godeps/_workspace/src/github.com/macaron-contrib/binding/bind_test.go

@@ -1,57 +0,0 @@
-// Copyright 2014 martini-contrib/binding Authors
-// Copyright 2014 Unknwon
-//
-// Licensed under the Apache License, Version 2.0 (the "License"): you may
-// not use this file except in compliance with the License. You may obtain
-// a copy of the License at
-//
-//     http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-// License for the specific language governing permissions and limitations
-// under the License.
-
-package binding
-
-import (
-	"testing"
-
-	. "github.com/smartystreets/goconvey/convey"
-)
-
-func Test_Bind(t *testing.T) {
-	Convey("Bind test", t, func() {
-		Convey("Bind form", func() {
-			for _, testCase := range formTestCases {
-				performFormTest(t, Bind, testCase)
-			}
-		})
-
-		Convey("Bind JSON", func() {
-			for _, testCase := range jsonTestCases {
-				performJsonTest(t, Bind, testCase)
-			}
-		})
-
-		Convey("Bind multipart form", func() {
-			for _, testCase := range multipartFormTestCases {
-				performMultipartFormTest(t, Bind, testCase)
-			}
-		})
-
-		Convey("Bind with file", func() {
-			for _, testCase := range fileTestCases {
-				performFileTest(t, Bind, testCase)
-				performFileTest(t, BindIgnErr, testCase)
-			}
-		})
-	})
-}
-
-func Test_Version(t *testing.T) {
-	Convey("Get package version", t, func() {
-		So(Version(), ShouldEqual, _VERSION)
-	})
-}

+ 0 - 115
Godeps/_workspace/src/github.com/macaron-contrib/binding/common_test.go

@@ -1,115 +0,0 @@
-// Copyright 2014 martini-contrib/binding Authors
-// Copyright 2014 Unknwon
-//
-// Licensed under the Apache License, Version 2.0 (the "License"): you may
-// not use this file except in compliance with the License. You may obtain
-// a copy of the License at
-//
-//     http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-// License for the specific language governing permissions and limitations
-// under the License.
-
-package binding
-
-import (
-	"mime/multipart"
-
-	"github.com/Unknwon/macaron"
-)
-
-// These types are mostly contrived examples, but they're used
-// across many test cases. The idea is to cover all the scenarios
-// that this binding package might encounter in actual use.
-type (
-	// For basic test cases with a required field
-	Post struct {
-		Title   string `form:"title" json:"title" binding:"Required"`
-		Content string `form:"content" json:"content"`
-	}
-
-	// To be used as a nested struct (with a required field)
-	Person struct {
-		Name  string `form:"name" json:"name" binding:"Required"`
-		Email string `form:"email" json:"email"`
-	}
-
-	// For advanced test cases: multiple values, embedded
-	// and nested structs, an ignored field, and single
-	// and multiple file uploads
-	BlogPost struct {
-		Post
-		Id          int     `binding:"Required"` // JSON not specified here for test coverage
-		Ignored     string  `form:"-" json:"-"`
-		Ratings     []int   `form:"rating" json:"ratings"`
-		Author      Person  `json:"author"`
-		Coauthor    *Person `json:"coauthor"`
-		HeaderImage *multipart.FileHeader
-		Pictures    []*multipart.FileHeader `form:"picture"`
-		unexported  string                  `form:"unexported"`
-	}
-
-	EmbedPerson struct {
-		*Person
-	}
-
-	SadForm struct {
-		AlphaDash    string   `form:"AlphaDash" binding:"AlphaDash"`
-		AlphaDashDot string   `form:"AlphaDashDot" binding:"AlphaDashDot"`
-		MinSize      string   `form:"MinSize" binding:"MinSize(5)"`
-		MinSizeSlice []string `form:"MinSizeSlice" binding:"MinSize(5)"`
-		MaxSize      string   `form:"MaxSize" binding:"MaxSize(1)"`
-		MaxSizeSlice []string `form:"MaxSizeSlice" binding:"MaxSize(1)"`
-		Range        int      `form:"Range" binding:"Range(1,2)"`
-		RangeInvalid int      `form:"RangeInvalid" binding:"Range(1)"`
-		Email        string   `binding:"Email"`
-		Url          string   `form:"Url" binding:"Url"`
-		UrlEmpty     string   `form:"UrlEmpty" binding:"Url"`
-		In           string   `form:"In" binding:"Default(0);In(1,2,3)"`
-		InInvalid    string   `form:"InInvalid" binding:"In(1,2,3)"`
-		NotIn        string   `form:"NotIn" binding:"NotIn(1,2,3)"`
-		Include      string   `form:"Include" binding:"Include(a)"`
-		Exclude      string   `form:"Exclude" binding:"Exclude(a)"`
-	}
-
-	CustomErrorHandle struct {
-		Rule `binding:"CustomRule"`
-	}
-
-	// The common function signature of the handlers going under test.
-	handlerFunc func(interface{}, ...interface{}) macaron.Handler
-
-	// Used for testing mapping an interface to the context
-	// If used (withInterface = true in the testCases), a modeler
-	// should be mapped to the context as well as BlogPost, meaning
-	// you can receive a modeler in your application instead of a
-	// concrete BlogPost.
-	modeler interface {
-		Model() string
-	}
-)
-
-func (p Post) Validate(ctx *macaron.Context, errs Errors) Errors {
-	if len(p.Title) < 10 {
-		errs = append(errs, Error{
-			FieldNames:     []string{"title"},
-			Classification: "LengthError",
-			Message:        "Life is too short",
-		})
-	}
-	return errs
-}
-
-func (p Post) Model() string {
-	return p.Title
-}
-
-func (_ CustomErrorHandle) Error(_ *macaron.Context, _ Errors) {}
-
-const (
-	testRoute       = "/test"
-	formContentType = "application/x-www-form-urlencoded"
-)

+ 0 - 162
Godeps/_workspace/src/github.com/macaron-contrib/binding/errorhandler_test.go

@@ -1,162 +0,0 @@
-// Copyright 2014 martini-contrib/binding Authors
-// Copyright 2014 Unknwon
-//
-// Licensed under the Apache License, Version 2.0 (the "License"): you may
-// not use this file except in compliance with the License. You may obtain
-// a copy of the License at
-//
-//     http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-// License for the specific language governing permissions and limitations
-// under the License.
-
-package binding
-
-import (
-	"io/ioutil"
-	"net/http"
-	"net/http/httptest"
-	"testing"
-
-	. "github.com/smartystreets/goconvey/convey"
-)
-
-var errorTestCases = []errorTestCase{
-	{
-		description: "No errors",
-		errors:      Errors{},
-		expected: errorTestResult{
-			statusCode: http.StatusOK,
-		},
-	},
-	{
-		description: "Deserialization error",
-		errors: Errors{
-			{
-				Classification: ERR_DESERIALIZATION,
-				Message:        "Some parser error here",
-			},
-		},
-		expected: errorTestResult{
-			statusCode:  http.StatusBadRequest,
-			contentType: _JSON_CONTENT_TYPE,
-			body:        `[{"classification":"DeserializationError","message":"Some parser error here"}]`,
-		},
-	},
-	{
-		description: "Content-Type error",
-		errors: Errors{
-			{
-				Classification: ERR_CONTENT_TYPE,
-				Message:        "Empty Content-Type",
-			},
-		},
-		expected: errorTestResult{
-			statusCode:  http.StatusUnsupportedMediaType,
-			contentType: _JSON_CONTENT_TYPE,
-			body:        `[{"classification":"ContentTypeError","message":"Empty Content-Type"}]`,
-		},
-	},
-	{
-		description: "Requirement error",
-		errors: Errors{
-			{
-				FieldNames:     []string{"some_field"},
-				Classification: ERR_REQUIRED,
-				Message:        "Required",
-			},
-		},
-		expected: errorTestResult{
-			statusCode:  STATUS_UNPROCESSABLE_ENTITY,
-			contentType: _JSON_CONTENT_TYPE,
-			body:        `[{"fieldNames":["some_field"],"classification":"RequiredError","message":"Required"}]`,
-		},
-	},
-	{
-		description: "Bad header error",
-		errors: Errors{
-			{
-				Classification: "HeaderError",
-				Message:        "The X-Something header must be specified",
-			},
-		},
-		expected: errorTestResult{
-			statusCode:  STATUS_UNPROCESSABLE_ENTITY,
-			contentType: _JSON_CONTENT_TYPE,
-			body:        `[{"classification":"HeaderError","message":"The X-Something header must be specified"}]`,
-		},
-	},
-	{
-		description: "Custom field error",
-		errors: Errors{
-			{
-				FieldNames:     []string{"month", "year"},
-				Classification: "DateError",
-				Message:        "The month and year must be in the future",
-			},
-		},
-		expected: errorTestResult{
-			statusCode:  STATUS_UNPROCESSABLE_ENTITY,
-			contentType: _JSON_CONTENT_TYPE,
-			body:        `[{"fieldNames":["month","year"],"classification":"DateError","message":"The month and year must be in the future"}]`,
-		},
-	},
-	{
-		description: "Multiple errors",
-		errors: Errors{
-			{
-				FieldNames:     []string{"foo"},
-				Classification: ERR_REQUIRED,
-				Message:        "Required",
-			},
-			{
-				FieldNames:     []string{"foo"},
-				Classification: "LengthError",
-				Message:        "The length of the 'foo' field is too short",
-			},
-		},
-		expected: errorTestResult{
-			statusCode:  STATUS_UNPROCESSABLE_ENTITY,
-			contentType: _JSON_CONTENT_TYPE,
-			body:        `[{"fieldNames":["foo"],"classification":"RequiredError","message":"Required"},{"fieldNames":["foo"],"classification":"LengthError","message":"The length of the 'foo' field is too short"}]`,
-		},
-	},
-}
-
-func Test_ErrorHandler(t *testing.T) {
-	Convey("Error handler", t, func() {
-		for _, testCase := range errorTestCases {
-			performErrorTest(t, testCase)
-		}
-	})
-}
-
-func performErrorTest(t *testing.T, testCase errorTestCase) {
-	resp := httptest.NewRecorder()
-
-	errorHandler(testCase.errors, resp)
-
-	So(resp.Code, ShouldEqual, testCase.expected.statusCode)
-	So(resp.Header().Get("Content-Type"), ShouldEqual, testCase.expected.contentType)
-
-	actualBody, err := ioutil.ReadAll(resp.Body)
-	So(err, ShouldBeNil)
-	So(string(actualBody), ShouldEqual, testCase.expected.body)
-}
-
-type (
-	errorTestCase struct {
-		description string
-		errors      Errors
-		expected    errorTestResult
-	}
-
-	errorTestResult struct {
-		statusCode  int
-		contentType string
-		body        string
-	}
-)

+ 0 - 115
Godeps/_workspace/src/github.com/macaron-contrib/binding/errors_test.go

@@ -1,115 +0,0 @@
-// Copyright 2014 martini-contrib/binding Authors
-// Copyright 2014 Unknwon
-//
-// Licensed under the Apache License, Version 2.0 (the "License"): you may
-// not use this file except in compliance with the License. You may obtain
-// a copy of the License at
-//
-//     http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-// License for the specific language governing permissions and limitations
-// under the License.
-
-package binding
-
-import (
-	"fmt"
-	"testing"
-
-	. "github.com/smartystreets/goconvey/convey"
-)
-
-func Test_ErrorsAdd(t *testing.T) {
-	Convey("Add new error", t, func() {
-		var actual Errors
-		expected := Errors{
-			Error{
-				FieldNames:     []string{"Field1", "Field2"},
-				Classification: "ErrorClass",
-				Message:        "Some message",
-			},
-		}
-
-		actual.Add(expected[0].FieldNames, expected[0].Classification, expected[0].Message)
-
-		So(len(actual), ShouldEqual, 1)
-		So(fmt.Sprintf("%#v", actual), ShouldEqual, fmt.Sprintf("%#v", expected))
-	})
-}
-
-func Test_ErrorsLen(t *testing.T) {
-	Convey("Get number of errors", t, func() {
-		So(errorsTestSet.Len(), ShouldEqual, len(errorsTestSet))
-	})
-}
-
-func Test_ErrorsHas(t *testing.T) {
-	Convey("Check error class", t, func() {
-		So(errorsTestSet.Has("ClassA"), ShouldBeTrue)
-		So(errorsTestSet.Has("ClassQ"), ShouldBeFalse)
-	})
-}
-
-func Test_ErrorGetters(t *testing.T) {
-	Convey("Get error detail", t, func() {
-		err := Error{
-			FieldNames:     []string{"field1", "field2"},
-			Classification: "ErrorClass",
-			Message:        "The message",
-		}
-
-		fieldsActual := err.Fields()
-
-		So(len(fieldsActual), ShouldEqual, 2)
-		So(fieldsActual[0], ShouldEqual, "field1")
-		So(fieldsActual[1], ShouldEqual, "field2")
-
-		So(err.Kind(), ShouldEqual, "ErrorClass")
-		So(err.Error(), ShouldEqual, "The message")
-	})
-}
-
-/*
-func TestErrorsWithClass(t *testing.T) {
-	expected := Errors{
-		errorsTestSet[0],
-		errorsTestSet[3],
-	}
-	actualStr := fmt.Sprintf("%#v", errorsTestSet.WithClass("ClassA"))
-	expectedStr := fmt.Sprintf("%#v", expected)
-	if actualStr != expectedStr {
-		t.Errorf("Expected:\n%s\nbut got:\n%s", expectedStr, actualStr)
-	}
-}
-*/
-
-var errorsTestSet = Errors{
-	Error{
-		FieldNames:     []string{},
-		Classification: "ClassA",
-		Message:        "Foobar",
-	},
-	Error{
-		FieldNames:     []string{},
-		Classification: "ClassB",
-		Message:        "Foo",
-	},
-	Error{
-		FieldNames:     []string{"field1", "field2"},
-		Classification: "ClassB",
-		Message:        "Foobar",
-	},
-	Error{
-		FieldNames:     []string{"field2"},
-		Classification: "ClassA",
-		Message:        "Foobar",
-	},
-	Error{
-		FieldNames:     []string{"field2"},
-		Classification: "ClassB",
-		Message:        "Foobar",
-	},
-}

Some files were not shown because too many files changed in this diff