time_series_query_test.go 19 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606
  1. package elasticsearch
  2. import (
  3. "fmt"
  4. "testing"
  5. "time"
  6. "github.com/grafana/grafana/pkg/tsdb/elasticsearch/client"
  7. "github.com/grafana/grafana/pkg/components/simplejson"
  8. "github.com/grafana/grafana/pkg/tsdb"
  9. . "github.com/smartystreets/goconvey/convey"
  10. )
  11. func TestExecuteTimeSeriesQuery(t *testing.T) {
  12. from := time.Date(2018, 5, 15, 17, 50, 0, 0, time.UTC)
  13. to := time.Date(2018, 5, 15, 17, 55, 0, 0, time.UTC)
  14. fromStr := fmt.Sprintf("%d", from.UnixNano()/int64(time.Millisecond))
  15. toStr := fmt.Sprintf("%d", to.UnixNano()/int64(time.Millisecond))
  16. Convey("Test execute time series query", t, func() {
  17. Convey("With defaults on es 2", func() {
  18. c := newFakeClient(2)
  19. _, err := executeTsdbQuery(c, `{
  20. "timeField": "@timestamp",
  21. "bucketAggs": [{ "type": "date_histogram", "field": "@timestamp", "id": "2" }],
  22. "metrics": [{"type": "count", "id": "0" }]
  23. }`, from, to, 15*time.Second)
  24. So(err, ShouldBeNil)
  25. sr := c.multisearchRequests[0].Requests[0]
  26. rangeFilter := sr.Query.Bool.Filters[0].(*es.RangeFilter)
  27. So(rangeFilter.Key, ShouldEqual, c.timeField)
  28. So(rangeFilter.Lte, ShouldEqual, toStr)
  29. So(rangeFilter.Gte, ShouldEqual, fromStr)
  30. So(rangeFilter.Format, ShouldEqual, es.DateFormatEpochMS)
  31. So(sr.Aggs[0].Key, ShouldEqual, "2")
  32. dateHistogramAgg := sr.Aggs[0].Aggregation.Aggregation.(*es.DateHistogramAgg)
  33. So(dateHistogramAgg.Field, ShouldEqual, "@timestamp")
  34. So(dateHistogramAgg.ExtendedBounds.Min, ShouldEqual, fromStr)
  35. So(dateHistogramAgg.ExtendedBounds.Max, ShouldEqual, toStr)
  36. })
  37. Convey("With defaults on es 5", func() {
  38. c := newFakeClient(5)
  39. _, err := executeTsdbQuery(c, `{
  40. "timeField": "@timestamp",
  41. "bucketAggs": [{ "type": "date_histogram", "field": "@timestamp", "id": "2" }],
  42. "metrics": [{"type": "count", "id": "0" }]
  43. }`, from, to, 15*time.Second)
  44. So(err, ShouldBeNil)
  45. sr := c.multisearchRequests[0].Requests[0]
  46. So(sr.Query.Bool.Filters[0].(*es.RangeFilter).Key, ShouldEqual, c.timeField)
  47. So(sr.Aggs[0].Key, ShouldEqual, "2")
  48. So(sr.Aggs[0].Aggregation.Aggregation.(*es.DateHistogramAgg).ExtendedBounds.Min, ShouldEqual, fromStr)
  49. So(sr.Aggs[0].Aggregation.Aggregation.(*es.DateHistogramAgg).ExtendedBounds.Max, ShouldEqual, toStr)
  50. })
  51. Convey("With multiple bucket aggs", func() {
  52. c := newFakeClient(5)
  53. _, err := executeTsdbQuery(c, `{
  54. "timeField": "@timestamp",
  55. "bucketAggs": [
  56. { "type": "terms", "field": "@host", "id": "2", "settings": { "size": "0", "order": "asc" } },
  57. { "type": "date_histogram", "field": "@timestamp", "id": "3" }
  58. ],
  59. "metrics": [{"type": "count", "id": "1" }]
  60. }`, from, to, 15*time.Second)
  61. So(err, ShouldBeNil)
  62. sr := c.multisearchRequests[0].Requests[0]
  63. firstLevel := sr.Aggs[0]
  64. So(firstLevel.Key, ShouldEqual, "2")
  65. termsAgg := firstLevel.Aggregation.Aggregation.(*es.TermsAggregation)
  66. So(termsAgg.Field, ShouldEqual, "@host")
  67. So(termsAgg.Size, ShouldEqual, 500)
  68. secondLevel := firstLevel.Aggregation.Aggs[0]
  69. So(secondLevel.Key, ShouldEqual, "3")
  70. So(secondLevel.Aggregation.Aggregation.(*es.DateHistogramAgg).Field, ShouldEqual, "@timestamp")
  71. })
  72. Convey("With select field", func() {
  73. c := newFakeClient(5)
  74. _, err := executeTsdbQuery(c, `{
  75. "timeField": "@timestamp",
  76. "bucketAggs": [
  77. { "type": "date_histogram", "field": "@timestamp", "id": "2" }
  78. ],
  79. "metrics": [{"type": "avg", "field": "@value", "id": "1" }]
  80. }`, from, to, 15*time.Second)
  81. So(err, ShouldBeNil)
  82. sr := c.multisearchRequests[0].Requests[0]
  83. firstLevel := sr.Aggs[0]
  84. So(firstLevel.Key, ShouldEqual, "2")
  85. So(firstLevel.Aggregation.Aggregation.(*es.DateHistogramAgg).Field, ShouldEqual, "@timestamp")
  86. secondLevel := firstLevel.Aggregation.Aggs[0]
  87. So(secondLevel.Key, ShouldEqual, "1")
  88. So(secondLevel.Aggregation.Type, ShouldEqual, "avg")
  89. So(secondLevel.Aggregation.Aggregation.(*es.MetricAggregation).Field, ShouldEqual, "@value")
  90. })
  91. Convey("With term agg and order by metric agg", func() {
  92. c := newFakeClient(5)
  93. _, err := executeTsdbQuery(c, `{
  94. "timeField": "@timestamp",
  95. "bucketAggs": [
  96. {
  97. "type": "terms",
  98. "field": "@host",
  99. "id": "2",
  100. "settings": { "size": "5", "order": "asc", "orderBy": "5" }
  101. },
  102. { "type": "date_histogram", "field": "@timestamp", "id": "3" }
  103. ],
  104. "metrics": [
  105. {"type": "count", "id": "1" },
  106. {"type": "avg", "field": "@value", "id": "5" }
  107. ]
  108. }`, from, to, 15*time.Second)
  109. So(err, ShouldBeNil)
  110. sr := c.multisearchRequests[0].Requests[0]
  111. avgAggOrderBy := sr.Aggs[0].Aggregation.Aggs[0]
  112. So(avgAggOrderBy.Key, ShouldEqual, "5")
  113. So(avgAggOrderBy.Aggregation.Type, ShouldEqual, "avg")
  114. avgAgg := sr.Aggs[0].Aggregation.Aggs[1].Aggregation.Aggs[0]
  115. So(avgAgg.Key, ShouldEqual, "5")
  116. So(avgAgg.Aggregation.Type, ShouldEqual, "avg")
  117. })
  118. Convey("With metric percentiles", func() {
  119. c := newFakeClient(5)
  120. _, err := executeTsdbQuery(c, `{
  121. "timeField": "@timestamp",
  122. "bucketAggs": [
  123. { "type": "date_histogram", "field": "@timestamp", "id": "3" }
  124. ],
  125. "metrics": [
  126. {
  127. "id": "1",
  128. "type": "percentiles",
  129. "field": "@load_time",
  130. "settings": {
  131. "percents": [ "1", "2", "3", "4" ]
  132. }
  133. }
  134. ]
  135. }`, from, to, 15*time.Second)
  136. So(err, ShouldBeNil)
  137. sr := c.multisearchRequests[0].Requests[0]
  138. percentilesAgg := sr.Aggs[0].Aggregation.Aggs[0]
  139. So(percentilesAgg.Key, ShouldEqual, "1")
  140. So(percentilesAgg.Aggregation.Type, ShouldEqual, "percentiles")
  141. metricAgg := percentilesAgg.Aggregation.Aggregation.(*es.MetricAggregation)
  142. percents := metricAgg.Settings["percents"].([]interface{})
  143. So(percents, ShouldHaveLength, 4)
  144. So(percents[0], ShouldEqual, "1")
  145. So(percents[1], ShouldEqual, "2")
  146. So(percents[2], ShouldEqual, "3")
  147. So(percents[3], ShouldEqual, "4")
  148. })
  149. Convey("With filters aggs on es 2", func() {
  150. c := newFakeClient(2)
  151. _, err := executeTsdbQuery(c, `{
  152. "timeField": "@timestamp",
  153. "bucketAggs": [
  154. {
  155. "id": "2",
  156. "type": "filters",
  157. "settings": {
  158. "filters": [ { "query": "@metric:cpu" }, { "query": "@metric:logins.count" } ]
  159. }
  160. },
  161. { "type": "date_histogram", "field": "@timestamp", "id": "4" }
  162. ],
  163. "metrics": [{"type": "count", "id": "1" }]
  164. }`, from, to, 15*time.Second)
  165. So(err, ShouldBeNil)
  166. sr := c.multisearchRequests[0].Requests[0]
  167. filtersAgg := sr.Aggs[0]
  168. So(filtersAgg.Key, ShouldEqual, "2")
  169. So(filtersAgg.Aggregation.Type, ShouldEqual, "filters")
  170. fAgg := filtersAgg.Aggregation.Aggregation.(*es.FiltersAggregation)
  171. So(fAgg.Filters["@metric:cpu"].(*es.QueryStringFilter).Query, ShouldEqual, "@metric:cpu")
  172. So(fAgg.Filters["@metric:logins.count"].(*es.QueryStringFilter).Query, ShouldEqual, "@metric:logins.count")
  173. dateHistogramAgg := sr.Aggs[0].Aggregation.Aggs[0]
  174. So(dateHistogramAgg.Key, ShouldEqual, "4")
  175. So(dateHistogramAgg.Aggregation.Aggregation.(*es.DateHistogramAgg).Field, ShouldEqual, "@timestamp")
  176. })
  177. Convey("With filters aggs on es 5", func() {
  178. c := newFakeClient(5)
  179. _, err := executeTsdbQuery(c, `{
  180. "timeField": "@timestamp",
  181. "bucketAggs": [
  182. {
  183. "id": "2",
  184. "type": "filters",
  185. "settings": {
  186. "filters": [ { "query": "@metric:cpu" }, { "query": "@metric:logins.count" } ]
  187. }
  188. },
  189. { "type": "date_histogram", "field": "@timestamp", "id": "4" }
  190. ],
  191. "metrics": [{"type": "count", "id": "1" }]
  192. }`, from, to, 15*time.Second)
  193. So(err, ShouldBeNil)
  194. sr := c.multisearchRequests[0].Requests[0]
  195. filtersAgg := sr.Aggs[0]
  196. So(filtersAgg.Key, ShouldEqual, "2")
  197. So(filtersAgg.Aggregation.Type, ShouldEqual, "filters")
  198. fAgg := filtersAgg.Aggregation.Aggregation.(*es.FiltersAggregation)
  199. So(fAgg.Filters["@metric:cpu"].(*es.QueryStringFilter).Query, ShouldEqual, "@metric:cpu")
  200. So(fAgg.Filters["@metric:logins.count"].(*es.QueryStringFilter).Query, ShouldEqual, "@metric:logins.count")
  201. dateHistogramAgg := sr.Aggs[0].Aggregation.Aggs[0]
  202. So(dateHistogramAgg.Key, ShouldEqual, "4")
  203. So(dateHistogramAgg.Aggregation.Aggregation.(*es.DateHistogramAgg).Field, ShouldEqual, "@timestamp")
  204. })
  205. Convey("With raw document metric", func() {
  206. c := newFakeClient(5)
  207. _, err := executeTsdbQuery(c, `{
  208. "timeField": "@timestamp",
  209. "bucketAggs": [],
  210. "metrics": [{ "id": "1", "type": "raw_document", "settings": {} }]
  211. }`, from, to, 15*time.Second)
  212. So(err, ShouldBeNil)
  213. sr := c.multisearchRequests[0].Requests[0]
  214. So(sr.Size, ShouldEqual, 500)
  215. })
  216. Convey("With raw document metric size set", func() {
  217. c := newFakeClient(5)
  218. _, err := executeTsdbQuery(c, `{
  219. "timeField": "@timestamp",
  220. "bucketAggs": [],
  221. "metrics": [{ "id": "1", "type": "raw_document", "settings": { "size": 1337 } }]
  222. }`, from, to, 15*time.Second)
  223. So(err, ShouldBeNil)
  224. sr := c.multisearchRequests[0].Requests[0]
  225. So(sr.Size, ShouldEqual, 1337)
  226. })
  227. Convey("With date histogram agg", func() {
  228. c := newFakeClient(5)
  229. _, err := executeTsdbQuery(c, `{
  230. "timeField": "@timestamp",
  231. "bucketAggs": [
  232. {
  233. "id": "2",
  234. "type": "date_histogram",
  235. "field": "@timestamp",
  236. "settings": { "interval": "auto", "min_doc_count": 2 }
  237. }
  238. ],
  239. "metrics": [{"type": "count", "id": "1" }]
  240. }`, from, to, 15*time.Second)
  241. So(err, ShouldBeNil)
  242. sr := c.multisearchRequests[0].Requests[0]
  243. firstLevel := sr.Aggs[0]
  244. So(firstLevel.Key, ShouldEqual, "2")
  245. So(firstLevel.Aggregation.Type, ShouldEqual, "date_histogram")
  246. hAgg := firstLevel.Aggregation.Aggregation.(*es.DateHistogramAgg)
  247. So(hAgg.Field, ShouldEqual, "@timestamp")
  248. So(hAgg.Interval, ShouldEqual, "$__interval")
  249. So(hAgg.MinDocCount, ShouldEqual, 2)
  250. })
  251. Convey("With histogram agg", func() {
  252. c := newFakeClient(5)
  253. _, err := executeTsdbQuery(c, `{
  254. "timeField": "@timestamp",
  255. "bucketAggs": [
  256. {
  257. "id": "3",
  258. "type": "histogram",
  259. "field": "bytes",
  260. "settings": { "interval": 10, "min_doc_count": 2, "missing": 5 }
  261. }
  262. ],
  263. "metrics": [{"type": "count", "id": "1" }]
  264. }`, from, to, 15*time.Second)
  265. So(err, ShouldBeNil)
  266. sr := c.multisearchRequests[0].Requests[0]
  267. firstLevel := sr.Aggs[0]
  268. So(firstLevel.Key, ShouldEqual, "3")
  269. So(firstLevel.Aggregation.Type, ShouldEqual, "histogram")
  270. hAgg := firstLevel.Aggregation.Aggregation.(*es.HistogramAgg)
  271. So(hAgg.Field, ShouldEqual, "bytes")
  272. So(hAgg.Interval, ShouldEqual, 10)
  273. So(hAgg.MinDocCount, ShouldEqual, 2)
  274. So(*hAgg.Missing, ShouldEqual, 5)
  275. })
  276. Convey("With geo hash grid agg", func() {
  277. c := newFakeClient(5)
  278. _, err := executeTsdbQuery(c, `{
  279. "timeField": "@timestamp",
  280. "bucketAggs": [
  281. {
  282. "id": "3",
  283. "type": "geohash_grid",
  284. "field": "@location",
  285. "settings": { "precision": 3 }
  286. }
  287. ],
  288. "metrics": [{"type": "count", "id": "1" }]
  289. }`, from, to, 15*time.Second)
  290. So(err, ShouldBeNil)
  291. sr := c.multisearchRequests[0].Requests[0]
  292. firstLevel := sr.Aggs[0]
  293. So(firstLevel.Key, ShouldEqual, "3")
  294. So(firstLevel.Aggregation.Type, ShouldEqual, "geohash_grid")
  295. ghGridAgg := firstLevel.Aggregation.Aggregation.(*es.GeoHashGridAggregation)
  296. So(ghGridAgg.Field, ShouldEqual, "@location")
  297. So(ghGridAgg.Precision, ShouldEqual, 3)
  298. })
  299. Convey("With moving average", func() {
  300. c := newFakeClient(5)
  301. _, err := executeTsdbQuery(c, `{
  302. "timeField": "@timestamp",
  303. "bucketAggs": [
  304. { "type": "date_histogram", "field": "@timestamp", "id": "4" }
  305. ],
  306. "metrics": [
  307. { "id": "3", "type": "sum", "field": "@value" },
  308. {
  309. "id": "2",
  310. "type": "moving_avg",
  311. "field": "3",
  312. "pipelineAgg": "3"
  313. }
  314. ]
  315. }`, from, to, 15*time.Second)
  316. So(err, ShouldBeNil)
  317. sr := c.multisearchRequests[0].Requests[0]
  318. firstLevel := sr.Aggs[0]
  319. So(firstLevel.Key, ShouldEqual, "4")
  320. So(firstLevel.Aggregation.Type, ShouldEqual, "date_histogram")
  321. So(firstLevel.Aggregation.Aggs, ShouldHaveLength, 2)
  322. sumAgg := firstLevel.Aggregation.Aggs[0]
  323. So(sumAgg.Key, ShouldEqual, "3")
  324. So(sumAgg.Aggregation.Type, ShouldEqual, "sum")
  325. mAgg := sumAgg.Aggregation.Aggregation.(*es.MetricAggregation)
  326. So(mAgg.Field, ShouldEqual, "@value")
  327. movingAvgAgg := firstLevel.Aggregation.Aggs[1]
  328. So(movingAvgAgg.Key, ShouldEqual, "2")
  329. So(movingAvgAgg.Aggregation.Type, ShouldEqual, "moving_avg")
  330. pl := movingAvgAgg.Aggregation.Aggregation.(*es.PipelineAggregation)
  331. So(pl.BucketPath, ShouldEqual, "3")
  332. })
  333. Convey("With broken moving average", func() {
  334. c := newFakeClient(5)
  335. _, err := executeTsdbQuery(c, `{
  336. "timeField": "@timestamp",
  337. "bucketAggs": [
  338. { "type": "date_histogram", "field": "@timestamp", "id": "5" }
  339. ],
  340. "metrics": [
  341. { "id": "3", "type": "sum", "field": "@value" },
  342. {
  343. "id": "2",
  344. "type": "moving_avg",
  345. "pipelineAgg": "3"
  346. },
  347. {
  348. "id": "4",
  349. "type": "moving_avg",
  350. "pipelineAgg": "Metric to apply moving average"
  351. }
  352. ]
  353. }`, from, to, 15*time.Second)
  354. So(err, ShouldBeNil)
  355. sr := c.multisearchRequests[0].Requests[0]
  356. firstLevel := sr.Aggs[0]
  357. So(firstLevel.Key, ShouldEqual, "5")
  358. So(firstLevel.Aggregation.Type, ShouldEqual, "date_histogram")
  359. So(firstLevel.Aggregation.Aggs, ShouldHaveLength, 2)
  360. movingAvgAgg := firstLevel.Aggregation.Aggs[1]
  361. So(movingAvgAgg.Key, ShouldEqual, "2")
  362. plAgg := movingAvgAgg.Aggregation.Aggregation.(*es.PipelineAggregation)
  363. So(plAgg.BucketPath, ShouldEqual, "3")
  364. })
  365. Convey("With derivative", func() {
  366. c := newFakeClient(5)
  367. _, err := executeTsdbQuery(c, `{
  368. "timeField": "@timestamp",
  369. "bucketAggs": [
  370. { "type": "date_histogram", "field": "@timestamp", "id": "4" }
  371. ],
  372. "metrics": [
  373. { "id": "3", "type": "sum", "field": "@value" },
  374. {
  375. "id": "2",
  376. "type": "derivative",
  377. "pipelineAgg": "3"
  378. }
  379. ]
  380. }`, from, to, 15*time.Second)
  381. So(err, ShouldBeNil)
  382. sr := c.multisearchRequests[0].Requests[0]
  383. firstLevel := sr.Aggs[0]
  384. So(firstLevel.Key, ShouldEqual, "4")
  385. So(firstLevel.Aggregation.Type, ShouldEqual, "date_histogram")
  386. derivativeAgg := firstLevel.Aggregation.Aggs[1]
  387. So(derivativeAgg.Key, ShouldEqual, "2")
  388. plAgg := derivativeAgg.Aggregation.Aggregation.(*es.PipelineAggregation)
  389. So(plAgg.BucketPath, ShouldEqual, "3")
  390. })
  391. })
  392. }
  393. type fakeClient struct {
  394. version int
  395. timeField string
  396. multiSearchResponse *es.MultiSearchResponse
  397. multiSearchError error
  398. builder *es.MultiSearchRequestBuilder
  399. multisearchRequests []*es.MultiSearchRequest
  400. }
  401. func newFakeClient(version int) *fakeClient {
  402. return &fakeClient{
  403. version: version,
  404. timeField: "@timestamp",
  405. multisearchRequests: make([]*es.MultiSearchRequest, 0),
  406. multiSearchResponse: &es.MultiSearchResponse{},
  407. }
  408. }
  409. func (c *fakeClient) GetVersion() int {
  410. return c.version
  411. }
  412. func (c *fakeClient) GetTimeField() string {
  413. return c.timeField
  414. }
  415. func (c *fakeClient) GetMinInterval(queryInterval string) (time.Duration, error) {
  416. return 15 * time.Second, nil
  417. }
  418. func (c *fakeClient) ExecuteMultisearch(r *es.MultiSearchRequest) (*es.MultiSearchResponse, error) {
  419. c.multisearchRequests = append(c.multisearchRequests, r)
  420. return c.multiSearchResponse, c.multiSearchError
  421. }
  422. func (c *fakeClient) MultiSearch() *es.MultiSearchRequestBuilder {
  423. c.builder = es.NewMultiSearchRequestBuilder(c.version)
  424. return c.builder
  425. }
  426. func newTsdbQuery(body string) (*tsdb.TsdbQuery, error) {
  427. json, err := simplejson.NewJson([]byte(body))
  428. if err != nil {
  429. return nil, err
  430. }
  431. return &tsdb.TsdbQuery{
  432. Queries: []*tsdb.Query{
  433. {
  434. Model: json,
  435. },
  436. },
  437. }, nil
  438. }
  439. func executeTsdbQuery(c es.Client, body string, from, to time.Time, minInterval time.Duration) (*tsdb.Response, error) {
  440. json, err := simplejson.NewJson([]byte(body))
  441. if err != nil {
  442. return nil, err
  443. }
  444. fromStr := fmt.Sprintf("%d", from.UnixNano()/int64(time.Millisecond))
  445. toStr := fmt.Sprintf("%d", to.UnixNano()/int64(time.Millisecond))
  446. tsdbQuery := &tsdb.TsdbQuery{
  447. Queries: []*tsdb.Query{
  448. {
  449. Model: json,
  450. },
  451. },
  452. TimeRange: tsdb.NewTimeRange(fromStr, toStr),
  453. }
  454. query := newTimeSeriesQuery(c, tsdbQuery, tsdb.NewIntervalCalculator(&tsdb.IntervalOptions{MinInterval: minInterval}))
  455. return query.execute()
  456. }
  457. func TestTimeSeriesQueryParser(t *testing.T) {
  458. Convey("Test time series query parser", t, func() {
  459. p := newTimeSeriesQueryParser()
  460. Convey("Should be able to parse query", func() {
  461. body := `{
  462. "timeField": "@timestamp",
  463. "query": "@metric:cpu",
  464. "alias": "{{@hostname}} {{metric}}",
  465. "metrics": [
  466. {
  467. "field": "@value",
  468. "id": "1",
  469. "meta": {},
  470. "settings": {
  471. "percents": [
  472. "90"
  473. ]
  474. },
  475. "type": "percentiles"
  476. },
  477. {
  478. "type": "count",
  479. "field": "select field",
  480. "id": "4",
  481. "settings": {},
  482. "meta": {}
  483. }
  484. ],
  485. "bucketAggs": [
  486. {
  487. "fake": true,
  488. "field": "@hostname",
  489. "id": "3",
  490. "settings": {
  491. "min_doc_count": 1,
  492. "order": "desc",
  493. "orderBy": "_term",
  494. "size": "10"
  495. },
  496. "type": "terms"
  497. },
  498. {
  499. "field": "@timestamp",
  500. "id": "2",
  501. "settings": {
  502. "interval": "5m",
  503. "min_doc_count": 0,
  504. "trimEdges": 0
  505. },
  506. "type": "date_histogram"
  507. }
  508. ]
  509. }`
  510. tsdbQuery, err := newTsdbQuery(body)
  511. So(err, ShouldBeNil)
  512. queries, err := p.parse(tsdbQuery)
  513. So(err, ShouldBeNil)
  514. So(queries, ShouldHaveLength, 1)
  515. q := queries[0]
  516. So(q.TimeField, ShouldEqual, "@timestamp")
  517. So(q.RawQuery, ShouldEqual, "@metric:cpu")
  518. So(q.Alias, ShouldEqual, "{{@hostname}} {{metric}}")
  519. So(q.Metrics, ShouldHaveLength, 2)
  520. So(q.Metrics[0].Field, ShouldEqual, "@value")
  521. So(q.Metrics[0].ID, ShouldEqual, "1")
  522. So(q.Metrics[0].Type, ShouldEqual, "percentiles")
  523. So(q.Metrics[0].Hide, ShouldBeFalse)
  524. So(q.Metrics[0].PipelineAggregate, ShouldEqual, "")
  525. So(q.Metrics[0].Settings.Get("percents").MustStringArray()[0], ShouldEqual, "90")
  526. So(q.Metrics[1].Field, ShouldEqual, "select field")
  527. So(q.Metrics[1].ID, ShouldEqual, "4")
  528. So(q.Metrics[1].Type, ShouldEqual, "count")
  529. So(q.Metrics[1].Hide, ShouldBeFalse)
  530. So(q.Metrics[1].PipelineAggregate, ShouldEqual, "")
  531. So(q.Metrics[1].Settings.MustMap(), ShouldBeEmpty)
  532. So(q.BucketAggs, ShouldHaveLength, 2)
  533. So(q.BucketAggs[0].Field, ShouldEqual, "@hostname")
  534. So(q.BucketAggs[0].ID, ShouldEqual, "3")
  535. So(q.BucketAggs[0].Type, ShouldEqual, "terms")
  536. So(q.BucketAggs[0].Settings.Get("min_doc_count").MustInt64(), ShouldEqual, 1)
  537. So(q.BucketAggs[0].Settings.Get("order").MustString(), ShouldEqual, "desc")
  538. So(q.BucketAggs[0].Settings.Get("orderBy").MustString(), ShouldEqual, "_term")
  539. So(q.BucketAggs[0].Settings.Get("size").MustString(), ShouldEqual, "10")
  540. So(q.BucketAggs[1].Field, ShouldEqual, "@timestamp")
  541. So(q.BucketAggs[1].ID, ShouldEqual, "2")
  542. So(q.BucketAggs[1].Type, ShouldEqual, "date_histogram")
  543. So(q.BucketAggs[1].Settings.Get("interval").MustString(), ShouldEqual, "5m")
  544. So(q.BucketAggs[1].Settings.Get("min_doc_count").MustInt64(), ShouldEqual, 0)
  545. So(q.BucketAggs[1].Settings.Get("trimEdges").MustInt64(), ShouldEqual, 0)
  546. })
  547. })
  548. }