sample_test.go 8.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367
  1. // includes code from
  2. // https://raw.githubusercontent.com/rcrowley/go-metrics/master/sample.go
  3. // Copyright 2012 Richard Crowley. All rights reserved.
  4. package metrics
  5. import (
  6. "math/rand"
  7. "runtime"
  8. "testing"
  9. "time"
  10. )
  11. // Benchmark{Compute,Copy}{1000,1000000} demonstrate that, even for relatively
  12. // expensive computations like Variance, the cost of copying the Sample, as
  13. // approximated by a make and copy, is much greater than the cost of the
  14. // computation for small samples and only slightly less for large samples.
  15. func BenchmarkCompute1000(b *testing.B) {
  16. s := make([]int64, 1000)
  17. for i := 0; i < len(s); i++ {
  18. s[i] = int64(i)
  19. }
  20. b.ResetTimer()
  21. for i := 0; i < b.N; i++ {
  22. SampleVariance(s)
  23. }
  24. }
  25. func BenchmarkCompute1000000(b *testing.B) {
  26. s := make([]int64, 1000000)
  27. for i := 0; i < len(s); i++ {
  28. s[i] = int64(i)
  29. }
  30. b.ResetTimer()
  31. for i := 0; i < b.N; i++ {
  32. SampleVariance(s)
  33. }
  34. }
  35. func BenchmarkCopy1000(b *testing.B) {
  36. s := make([]int64, 1000)
  37. for i := 0; i < len(s); i++ {
  38. s[i] = int64(i)
  39. }
  40. b.ResetTimer()
  41. for i := 0; i < b.N; i++ {
  42. sCopy := make([]int64, len(s))
  43. copy(sCopy, s)
  44. }
  45. }
  46. func BenchmarkCopy1000000(b *testing.B) {
  47. s := make([]int64, 1000000)
  48. for i := 0; i < len(s); i++ {
  49. s[i] = int64(i)
  50. }
  51. b.ResetTimer()
  52. for i := 0; i < b.N; i++ {
  53. sCopy := make([]int64, len(s))
  54. copy(sCopy, s)
  55. }
  56. }
  57. func BenchmarkExpDecaySample257(b *testing.B) {
  58. benchmarkSample(b, NewExpDecaySample(257, 0.015))
  59. }
  60. func BenchmarkExpDecaySample514(b *testing.B) {
  61. benchmarkSample(b, NewExpDecaySample(514, 0.015))
  62. }
  63. func BenchmarkExpDecaySample1028(b *testing.B) {
  64. benchmarkSample(b, NewExpDecaySample(1028, 0.015))
  65. }
  66. func BenchmarkUniformSample257(b *testing.B) {
  67. benchmarkSample(b, NewUniformSample(257))
  68. }
  69. func BenchmarkUniformSample514(b *testing.B) {
  70. benchmarkSample(b, NewUniformSample(514))
  71. }
  72. func BenchmarkUniformSample1028(b *testing.B) {
  73. benchmarkSample(b, NewUniformSample(1028))
  74. }
  75. func TestExpDecaySample10(t *testing.T) {
  76. rand.Seed(1)
  77. s := NewExpDecaySample(100, 0.99)
  78. for i := 0; i < 10; i++ {
  79. s.Update(int64(i))
  80. }
  81. if size := s.Count(); 10 != size {
  82. t.Errorf("s.Count(): 10 != %v\n", size)
  83. }
  84. if size := s.Size(); 10 != size {
  85. t.Errorf("s.Size(): 10 != %v\n", size)
  86. }
  87. if l := len(s.Values()); 10 != l {
  88. t.Errorf("len(s.Values()): 10 != %v\n", l)
  89. }
  90. for _, v := range s.Values() {
  91. if v > 10 || v < 0 {
  92. t.Errorf("out of range [0, 10): %v\n", v)
  93. }
  94. }
  95. }
  96. func TestExpDecaySample100(t *testing.T) {
  97. rand.Seed(1)
  98. s := NewExpDecaySample(1000, 0.01)
  99. for i := 0; i < 100; i++ {
  100. s.Update(int64(i))
  101. }
  102. if size := s.Count(); 100 != size {
  103. t.Errorf("s.Count(): 100 != %v\n", size)
  104. }
  105. if size := s.Size(); 100 != size {
  106. t.Errorf("s.Size(): 100 != %v\n", size)
  107. }
  108. if l := len(s.Values()); 100 != l {
  109. t.Errorf("len(s.Values()): 100 != %v\n", l)
  110. }
  111. for _, v := range s.Values() {
  112. if v > 100 || v < 0 {
  113. t.Errorf("out of range [0, 100): %v\n", v)
  114. }
  115. }
  116. }
  117. func TestExpDecaySample1000(t *testing.T) {
  118. rand.Seed(1)
  119. s := NewExpDecaySample(100, 0.99)
  120. for i := 0; i < 1000; i++ {
  121. s.Update(int64(i))
  122. }
  123. if size := s.Count(); 1000 != size {
  124. t.Errorf("s.Count(): 1000 != %v\n", size)
  125. }
  126. if size := s.Size(); 100 != size {
  127. t.Errorf("s.Size(): 100 != %v\n", size)
  128. }
  129. if l := len(s.Values()); 100 != l {
  130. t.Errorf("len(s.Values()): 100 != %v\n", l)
  131. }
  132. for _, v := range s.Values() {
  133. if v > 1000 || v < 0 {
  134. t.Errorf("out of range [0, 1000): %v\n", v)
  135. }
  136. }
  137. }
  138. // This test makes sure that the sample's priority is not amplified by using
  139. // nanosecond duration since start rather than second duration since start.
  140. // The priority becomes +Inf quickly after starting if this is done,
  141. // effectively freezing the set of samples until a rescale step happens.
  142. func TestExpDecaySampleNanosecondRegression(t *testing.T) {
  143. rand.Seed(1)
  144. s := NewExpDecaySample(100, 0.99)
  145. for i := 0; i < 100; i++ {
  146. s.Update(10)
  147. }
  148. time.Sleep(1 * time.Millisecond)
  149. for i := 0; i < 100; i++ {
  150. s.Update(20)
  151. }
  152. v := s.Values()
  153. avg := float64(0)
  154. for i := 0; i < len(v); i++ {
  155. avg += float64(v[i])
  156. }
  157. avg /= float64(len(v))
  158. if avg > 16 || avg < 14 {
  159. t.Errorf("out of range [14, 16]: %v\n", avg)
  160. }
  161. }
  162. func TestExpDecaySampleRescale(t *testing.T) {
  163. s := NewExpDecaySample(2, 0.001).(*ExpDecaySample)
  164. s.update(time.Now(), 1)
  165. s.update(time.Now().Add(time.Hour+time.Microsecond), 1)
  166. for _, v := range s.values.Values() {
  167. if v.k == 0.0 {
  168. t.Fatal("v.k == 0.0")
  169. }
  170. }
  171. }
  172. func TestExpDecaySampleSnapshot(t *testing.T) {
  173. now := time.Now()
  174. rand.Seed(1)
  175. s := NewExpDecaySample(100, 0.99)
  176. for i := 1; i <= 10000; i++ {
  177. s.(*ExpDecaySample).update(now.Add(time.Duration(i)), int64(i))
  178. }
  179. snapshot := s.Snapshot()
  180. s.Update(1)
  181. testExpDecaySampleStatistics(t, snapshot)
  182. }
  183. func TestExpDecaySampleStatistics(t *testing.T) {
  184. now := time.Now()
  185. rand.Seed(1)
  186. s := NewExpDecaySample(100, 0.99)
  187. for i := 1; i <= 10000; i++ {
  188. s.(*ExpDecaySample).update(now.Add(time.Duration(i)), int64(i))
  189. }
  190. testExpDecaySampleStatistics(t, s)
  191. }
  192. func TestUniformSample(t *testing.T) {
  193. rand.Seed(1)
  194. s := NewUniformSample(100)
  195. for i := 0; i < 1000; i++ {
  196. s.Update(int64(i))
  197. }
  198. if size := s.Count(); 1000 != size {
  199. t.Errorf("s.Count(): 1000 != %v\n", size)
  200. }
  201. if size := s.Size(); 100 != size {
  202. t.Errorf("s.Size(): 100 != %v\n", size)
  203. }
  204. if l := len(s.Values()); 100 != l {
  205. t.Errorf("len(s.Values()): 100 != %v\n", l)
  206. }
  207. for _, v := range s.Values() {
  208. if v > 1000 || v < 0 {
  209. t.Errorf("out of range [0, 100): %v\n", v)
  210. }
  211. }
  212. }
  213. func TestUniformSampleIncludesTail(t *testing.T) {
  214. rand.Seed(1)
  215. s := NewUniformSample(100)
  216. max := 100
  217. for i := 0; i < max; i++ {
  218. s.Update(int64(i))
  219. }
  220. v := s.Values()
  221. sum := 0
  222. exp := (max - 1) * max / 2
  223. for i := 0; i < len(v); i++ {
  224. sum += int(v[i])
  225. }
  226. if exp != sum {
  227. t.Errorf("sum: %v != %v\n", exp, sum)
  228. }
  229. }
  230. func TestUniformSampleSnapshot(t *testing.T) {
  231. s := NewUniformSample(100)
  232. for i := 1; i <= 10000; i++ {
  233. s.Update(int64(i))
  234. }
  235. snapshot := s.Snapshot()
  236. s.Update(1)
  237. testUniformSampleStatistics(t, snapshot)
  238. }
  239. func TestUniformSampleStatistics(t *testing.T) {
  240. rand.Seed(1)
  241. s := NewUniformSample(100)
  242. for i := 1; i <= 10000; i++ {
  243. s.Update(int64(i))
  244. }
  245. testUniformSampleStatistics(t, s)
  246. }
  247. func benchmarkSample(b *testing.B, s Sample) {
  248. var memStats runtime.MemStats
  249. runtime.ReadMemStats(&memStats)
  250. pauseTotalNs := memStats.PauseTotalNs
  251. b.ResetTimer()
  252. for i := 0; i < b.N; i++ {
  253. s.Update(1)
  254. }
  255. b.StopTimer()
  256. runtime.GC()
  257. runtime.ReadMemStats(&memStats)
  258. b.Logf("GC cost: %d ns/op", int(memStats.PauseTotalNs-pauseTotalNs)/b.N)
  259. }
  260. func testExpDecaySampleStatistics(t *testing.T, s Sample) {
  261. if count := s.Count(); 10000 != count {
  262. t.Errorf("s.Count(): 10000 != %v\n", count)
  263. }
  264. if min := s.Min(); 107 != min {
  265. t.Errorf("s.Min(): 107 != %v\n", min)
  266. }
  267. if max := s.Max(); 10000 != max {
  268. t.Errorf("s.Max(): 10000 != %v\n", max)
  269. }
  270. if mean := s.Mean(); 4965.98 != mean {
  271. t.Errorf("s.Mean(): 4965.98 != %v\n", mean)
  272. }
  273. if stdDev := s.StdDev(); 2959.825156930727 != stdDev {
  274. t.Errorf("s.StdDev(): 2959.825156930727 != %v\n", stdDev)
  275. }
  276. ps := s.Percentiles([]float64{0.5, 0.75, 0.99})
  277. if 4615 != ps[0] {
  278. t.Errorf("median: 4615 != %v\n", ps[0])
  279. }
  280. if 7672 != ps[1] {
  281. t.Errorf("75th percentile: 7672 != %v\n", ps[1])
  282. }
  283. if 9998.99 != ps[2] {
  284. t.Errorf("99th percentile: 9998.99 != %v\n", ps[2])
  285. }
  286. }
  287. func testUniformSampleStatistics(t *testing.T, s Sample) {
  288. if count := s.Count(); 10000 != count {
  289. t.Errorf("s.Count(): 10000 != %v\n", count)
  290. }
  291. if min := s.Min(); 37 != min {
  292. t.Errorf("s.Min(): 37 != %v\n", min)
  293. }
  294. if max := s.Max(); 9989 != max {
  295. t.Errorf("s.Max(): 9989 != %v\n", max)
  296. }
  297. if mean := s.Mean(); 4748.14 != mean {
  298. t.Errorf("s.Mean(): 4748.14 != %v\n", mean)
  299. }
  300. if stdDev := s.StdDev(); 2826.684117548333 != stdDev {
  301. t.Errorf("s.StdDev(): 2826.684117548333 != %v\n", stdDev)
  302. }
  303. ps := s.Percentiles([]float64{0.5, 0.75, 0.99})
  304. if 4599 != ps[0] {
  305. t.Errorf("median: 4599 != %v\n", ps[0])
  306. }
  307. if 7380.5 != ps[1] {
  308. t.Errorf("75th percentile: 7380.5 != %v\n", ps[1])
  309. }
  310. if 9986.429999999998 != ps[2] {
  311. t.Errorf("99th percentile: 9986.429999999998 != %v\n", ps[2])
  312. }
  313. }
  314. // TestUniformSampleConcurrentUpdateCount would expose data race problems with
  315. // concurrent Update and Count calls on Sample when test is called with -race
  316. // argument
  317. func TestUniformSampleConcurrentUpdateCount(t *testing.T) {
  318. if testing.Short() {
  319. t.Skip("skipping in short mode")
  320. }
  321. s := NewUniformSample(100)
  322. for i := 0; i < 100; i++ {
  323. s.Update(int64(i))
  324. }
  325. quit := make(chan struct{})
  326. go func() {
  327. t := time.NewTicker(10 * time.Millisecond)
  328. for {
  329. select {
  330. case <-t.C:
  331. s.Update(rand.Int63())
  332. case <-quit:
  333. t.Stop()
  334. return
  335. }
  336. }
  337. }()
  338. for i := 0; i < 1000; i++ {
  339. s.Count()
  340. time.Sleep(5 * time.Millisecond)
  341. }
  342. quit <- struct{}{}
  343. }