time_series_query_test.go 19 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604
  1. package elasticsearch
  2. import (
  3. "fmt"
  4. "testing"
  5. "time"
  6. "github.com/grafana/grafana/pkg/tsdb/elasticsearch/client"
  7. "github.com/grafana/grafana/pkg/components/simplejson"
  8. "github.com/grafana/grafana/pkg/tsdb"
  9. . "github.com/smartystreets/goconvey/convey"
  10. )
  11. func TestExecuteTimeSeriesQuery(t *testing.T) {
  12. from := time.Date(2018, 5, 15, 17, 50, 0, 0, time.UTC)
  13. to := time.Date(2018, 5, 15, 17, 55, 0, 0, time.UTC)
  14. fromStr := fmt.Sprintf("%d", from.UnixNano()/int64(time.Millisecond))
  15. toStr := fmt.Sprintf("%d", to.UnixNano()/int64(time.Millisecond))
  16. Convey("Test execute time series query", t, func() {
  17. Convey("With defaults on es 2", func() {
  18. c := newFakeClient(2)
  19. _, err := executeTsdbQuery(c, `{
  20. "timeField": "@timestamp",
  21. "bucketAggs": [{ "type": "date_histogram", "field": "@timestamp", "id": "2" }],
  22. "metrics": [{"type": "count", "id": "0" }]
  23. }`, from, to, 15*time.Second)
  24. So(err, ShouldBeNil)
  25. sr := c.multisearchRequests[0].Requests[0]
  26. rangeFilter := sr.Query.Bool.Filters[0].(*es.RangeFilter)
  27. So(rangeFilter.Key, ShouldEqual, c.timeField)
  28. So(rangeFilter.Lte, ShouldEqual, toStr)
  29. So(rangeFilter.Gte, ShouldEqual, fromStr)
  30. So(rangeFilter.Format, ShouldEqual, es.DateFormatEpochMS)
  31. So(sr.Aggs[0].Key, ShouldEqual, "2")
  32. dateHistogramAgg := sr.Aggs[0].Aggregation.Aggregation.(*es.DateHistogramAgg)
  33. So(dateHistogramAgg.Field, ShouldEqual, "@timestamp")
  34. So(dateHistogramAgg.ExtendedBounds.Min, ShouldEqual, fromStr)
  35. So(dateHistogramAgg.ExtendedBounds.Max, ShouldEqual, toStr)
  36. })
  37. Convey("With defaults on es 5", func() {
  38. c := newFakeClient(5)
  39. _, err := executeTsdbQuery(c, `{
  40. "timeField": "@timestamp",
  41. "bucketAggs": [{ "type": "date_histogram", "field": "@timestamp", "id": "2" }],
  42. "metrics": [{"type": "count", "id": "0" }]
  43. }`, from, to, 15*time.Second)
  44. So(err, ShouldBeNil)
  45. sr := c.multisearchRequests[0].Requests[0]
  46. So(sr.Query.Bool.Filters[0].(*es.RangeFilter).Key, ShouldEqual, c.timeField)
  47. So(sr.Aggs[0].Key, ShouldEqual, "2")
  48. So(sr.Aggs[0].Aggregation.Aggregation.(*es.DateHistogramAgg).ExtendedBounds.Min, ShouldEqual, fromStr)
  49. So(sr.Aggs[0].Aggregation.Aggregation.(*es.DateHistogramAgg).ExtendedBounds.Max, ShouldEqual, toStr)
  50. })
  51. Convey("With multiple bucket aggs", func() {
  52. c := newFakeClient(5)
  53. _, err := executeTsdbQuery(c, `{
  54. "timeField": "@timestamp",
  55. "bucketAggs": [
  56. { "type": "terms", "field": "@host", "id": "2" },
  57. { "type": "date_histogram", "field": "@timestamp", "id": "3" }
  58. ],
  59. "metrics": [{"type": "count", "id": "1" }]
  60. }`, from, to, 15*time.Second)
  61. So(err, ShouldBeNil)
  62. sr := c.multisearchRequests[0].Requests[0]
  63. firstLevel := sr.Aggs[0]
  64. So(firstLevel.Key, ShouldEqual, "2")
  65. So(firstLevel.Aggregation.Aggregation.(*es.TermsAggregation).Field, ShouldEqual, "@host")
  66. secondLevel := firstLevel.Aggregation.Aggs[0]
  67. So(secondLevel.Key, ShouldEqual, "3")
  68. So(secondLevel.Aggregation.Aggregation.(*es.DateHistogramAgg).Field, ShouldEqual, "@timestamp")
  69. })
  70. Convey("With select field", func() {
  71. c := newFakeClient(5)
  72. _, err := executeTsdbQuery(c, `{
  73. "timeField": "@timestamp",
  74. "bucketAggs": [
  75. { "type": "date_histogram", "field": "@timestamp", "id": "2" }
  76. ],
  77. "metrics": [{"type": "avg", "field": "@value", "id": "1" }]
  78. }`, from, to, 15*time.Second)
  79. So(err, ShouldBeNil)
  80. sr := c.multisearchRequests[0].Requests[0]
  81. firstLevel := sr.Aggs[0]
  82. So(firstLevel.Key, ShouldEqual, "2")
  83. So(firstLevel.Aggregation.Aggregation.(*es.DateHistogramAgg).Field, ShouldEqual, "@timestamp")
  84. secondLevel := firstLevel.Aggregation.Aggs[0]
  85. So(secondLevel.Key, ShouldEqual, "1")
  86. So(secondLevel.Aggregation.Type, ShouldEqual, "avg")
  87. So(secondLevel.Aggregation.Aggregation.(*es.MetricAggregation).Field, ShouldEqual, "@value")
  88. })
  89. Convey("With term agg and order by metric agg", func() {
  90. c := newFakeClient(5)
  91. _, err := executeTsdbQuery(c, `{
  92. "timeField": "@timestamp",
  93. "bucketAggs": [
  94. {
  95. "type": "terms",
  96. "field": "@host",
  97. "id": "2",
  98. "settings": { "size": "5", "order": "asc", "orderBy": "5" }
  99. },
  100. { "type": "date_histogram", "field": "@timestamp", "id": "3" }
  101. ],
  102. "metrics": [
  103. {"type": "count", "id": "1" },
  104. {"type": "avg", "field": "@value", "id": "5" }
  105. ]
  106. }`, from, to, 15*time.Second)
  107. So(err, ShouldBeNil)
  108. sr := c.multisearchRequests[0].Requests[0]
  109. avgAggOrderBy := sr.Aggs[0].Aggregation.Aggs[0]
  110. So(avgAggOrderBy.Key, ShouldEqual, "5")
  111. So(avgAggOrderBy.Aggregation.Type, ShouldEqual, "avg")
  112. avgAgg := sr.Aggs[0].Aggregation.Aggs[1].Aggregation.Aggs[0]
  113. So(avgAgg.Key, ShouldEqual, "5")
  114. So(avgAgg.Aggregation.Type, ShouldEqual, "avg")
  115. })
  116. Convey("With metric percentiles", func() {
  117. c := newFakeClient(5)
  118. _, err := executeTsdbQuery(c, `{
  119. "timeField": "@timestamp",
  120. "bucketAggs": [
  121. { "type": "date_histogram", "field": "@timestamp", "id": "3" }
  122. ],
  123. "metrics": [
  124. {
  125. "id": "1",
  126. "type": "percentiles",
  127. "field": "@load_time",
  128. "settings": {
  129. "percents": [ "1", "2", "3", "4" ]
  130. }
  131. }
  132. ]
  133. }`, from, to, 15*time.Second)
  134. So(err, ShouldBeNil)
  135. sr := c.multisearchRequests[0].Requests[0]
  136. percentilesAgg := sr.Aggs[0].Aggregation.Aggs[0]
  137. So(percentilesAgg.Key, ShouldEqual, "1")
  138. So(percentilesAgg.Aggregation.Type, ShouldEqual, "percentiles")
  139. metricAgg := percentilesAgg.Aggregation.Aggregation.(*es.MetricAggregation)
  140. percents := metricAgg.Settings["percents"].([]interface{})
  141. So(percents, ShouldHaveLength, 4)
  142. So(percents[0], ShouldEqual, "1")
  143. So(percents[1], ShouldEqual, "2")
  144. So(percents[2], ShouldEqual, "3")
  145. So(percents[3], ShouldEqual, "4")
  146. })
  147. Convey("With filters aggs on es 2", func() {
  148. c := newFakeClient(2)
  149. _, err := executeTsdbQuery(c, `{
  150. "timeField": "@timestamp",
  151. "bucketAggs": [
  152. {
  153. "id": "2",
  154. "type": "filters",
  155. "settings": {
  156. "filters": [ { "query": "@metric:cpu" }, { "query": "@metric:logins.count" } ]
  157. }
  158. },
  159. { "type": "date_histogram", "field": "@timestamp", "id": "4" }
  160. ],
  161. "metrics": [{"type": "count", "id": "1" }]
  162. }`, from, to, 15*time.Second)
  163. So(err, ShouldBeNil)
  164. sr := c.multisearchRequests[0].Requests[0]
  165. filtersAgg := sr.Aggs[0]
  166. So(filtersAgg.Key, ShouldEqual, "2")
  167. So(filtersAgg.Aggregation.Type, ShouldEqual, "filters")
  168. fAgg := filtersAgg.Aggregation.Aggregation.(*es.FiltersAggregation)
  169. So(fAgg.Filters["@metric:cpu"].(*es.QueryStringFilter).Query, ShouldEqual, "@metric:cpu")
  170. So(fAgg.Filters["@metric:logins.count"].(*es.QueryStringFilter).Query, ShouldEqual, "@metric:logins.count")
  171. dateHistogramAgg := sr.Aggs[0].Aggregation.Aggs[0]
  172. So(dateHistogramAgg.Key, ShouldEqual, "4")
  173. So(dateHistogramAgg.Aggregation.Aggregation.(*es.DateHistogramAgg).Field, ShouldEqual, "@timestamp")
  174. })
  175. Convey("With filters aggs on es 5", func() {
  176. c := newFakeClient(5)
  177. _, err := executeTsdbQuery(c, `{
  178. "timeField": "@timestamp",
  179. "bucketAggs": [
  180. {
  181. "id": "2",
  182. "type": "filters",
  183. "settings": {
  184. "filters": [ { "query": "@metric:cpu" }, { "query": "@metric:logins.count" } ]
  185. }
  186. },
  187. { "type": "date_histogram", "field": "@timestamp", "id": "4" }
  188. ],
  189. "metrics": [{"type": "count", "id": "1" }]
  190. }`, from, to, 15*time.Second)
  191. So(err, ShouldBeNil)
  192. sr := c.multisearchRequests[0].Requests[0]
  193. filtersAgg := sr.Aggs[0]
  194. So(filtersAgg.Key, ShouldEqual, "2")
  195. So(filtersAgg.Aggregation.Type, ShouldEqual, "filters")
  196. fAgg := filtersAgg.Aggregation.Aggregation.(*es.FiltersAggregation)
  197. So(fAgg.Filters["@metric:cpu"].(*es.QueryStringFilter).Query, ShouldEqual, "@metric:cpu")
  198. So(fAgg.Filters["@metric:logins.count"].(*es.QueryStringFilter).Query, ShouldEqual, "@metric:logins.count")
  199. dateHistogramAgg := sr.Aggs[0].Aggregation.Aggs[0]
  200. So(dateHistogramAgg.Key, ShouldEqual, "4")
  201. So(dateHistogramAgg.Aggregation.Aggregation.(*es.DateHistogramAgg).Field, ShouldEqual, "@timestamp")
  202. })
  203. Convey("With raw document metric", func() {
  204. c := newFakeClient(5)
  205. _, err := executeTsdbQuery(c, `{
  206. "timeField": "@timestamp",
  207. "bucketAggs": [],
  208. "metrics": [{ "id": "1", "type": "raw_document", "settings": {} }]
  209. }`, from, to, 15*time.Second)
  210. So(err, ShouldBeNil)
  211. sr := c.multisearchRequests[0].Requests[0]
  212. So(sr.Size, ShouldEqual, 500)
  213. })
  214. Convey("With raw document metric size set", func() {
  215. c := newFakeClient(5)
  216. _, err := executeTsdbQuery(c, `{
  217. "timeField": "@timestamp",
  218. "bucketAggs": [],
  219. "metrics": [{ "id": "1", "type": "raw_document", "settings": { "size": 1337 } }]
  220. }`, from, to, 15*time.Second)
  221. So(err, ShouldBeNil)
  222. sr := c.multisearchRequests[0].Requests[0]
  223. So(sr.Size, ShouldEqual, 1337)
  224. })
  225. Convey("With date histogram agg", func() {
  226. c := newFakeClient(5)
  227. _, err := executeTsdbQuery(c, `{
  228. "timeField": "@timestamp",
  229. "bucketAggs": [
  230. {
  231. "id": "2",
  232. "type": "date_histogram",
  233. "field": "@timestamp",
  234. "settings": { "interval": "auto", "min_doc_count": 2 }
  235. }
  236. ],
  237. "metrics": [{"type": "count", "id": "1" }]
  238. }`, from, to, 15*time.Second)
  239. So(err, ShouldBeNil)
  240. sr := c.multisearchRequests[0].Requests[0]
  241. firstLevel := sr.Aggs[0]
  242. So(firstLevel.Key, ShouldEqual, "2")
  243. So(firstLevel.Aggregation.Type, ShouldEqual, "date_histogram")
  244. hAgg := firstLevel.Aggregation.Aggregation.(*es.DateHistogramAgg)
  245. So(hAgg.Field, ShouldEqual, "@timestamp")
  246. So(hAgg.Interval, ShouldEqual, "$__interval")
  247. So(hAgg.MinDocCount, ShouldEqual, 2)
  248. })
  249. Convey("With histogram agg", func() {
  250. c := newFakeClient(5)
  251. _, err := executeTsdbQuery(c, `{
  252. "timeField": "@timestamp",
  253. "bucketAggs": [
  254. {
  255. "id": "3",
  256. "type": "histogram",
  257. "field": "bytes",
  258. "settings": { "interval": 10, "min_doc_count": 2, "missing": 5 }
  259. }
  260. ],
  261. "metrics": [{"type": "count", "id": "1" }]
  262. }`, from, to, 15*time.Second)
  263. So(err, ShouldBeNil)
  264. sr := c.multisearchRequests[0].Requests[0]
  265. firstLevel := sr.Aggs[0]
  266. So(firstLevel.Key, ShouldEqual, "3")
  267. So(firstLevel.Aggregation.Type, ShouldEqual, "histogram")
  268. hAgg := firstLevel.Aggregation.Aggregation.(*es.HistogramAgg)
  269. So(hAgg.Field, ShouldEqual, "bytes")
  270. So(hAgg.Interval, ShouldEqual, 10)
  271. So(hAgg.MinDocCount, ShouldEqual, 2)
  272. So(*hAgg.Missing, ShouldEqual, 5)
  273. })
  274. Convey("With geo hash grid agg", func() {
  275. c := newFakeClient(5)
  276. _, err := executeTsdbQuery(c, `{
  277. "timeField": "@timestamp",
  278. "bucketAggs": [
  279. {
  280. "id": "3",
  281. "type": "geohash_grid",
  282. "field": "@location",
  283. "settings": { "precision": 3 }
  284. }
  285. ],
  286. "metrics": [{"type": "count", "id": "1" }]
  287. }`, from, to, 15*time.Second)
  288. So(err, ShouldBeNil)
  289. sr := c.multisearchRequests[0].Requests[0]
  290. firstLevel := sr.Aggs[0]
  291. So(firstLevel.Key, ShouldEqual, "3")
  292. So(firstLevel.Aggregation.Type, ShouldEqual, "geohash_grid")
  293. ghGridAgg := firstLevel.Aggregation.Aggregation.(*es.GeoHashGridAggregation)
  294. So(ghGridAgg.Field, ShouldEqual, "@location")
  295. So(ghGridAgg.Precision, ShouldEqual, 3)
  296. })
  297. Convey("With moving average", func() {
  298. c := newFakeClient(5)
  299. _, err := executeTsdbQuery(c, `{
  300. "timeField": "@timestamp",
  301. "bucketAggs": [
  302. { "type": "date_histogram", "field": "@timestamp", "id": "4" }
  303. ],
  304. "metrics": [
  305. { "id": "3", "type": "sum", "field": "@value" },
  306. {
  307. "id": "2",
  308. "type": "moving_avg",
  309. "field": "3",
  310. "pipelineAgg": "3"
  311. }
  312. ]
  313. }`, from, to, 15*time.Second)
  314. So(err, ShouldBeNil)
  315. sr := c.multisearchRequests[0].Requests[0]
  316. firstLevel := sr.Aggs[0]
  317. So(firstLevel.Key, ShouldEqual, "4")
  318. So(firstLevel.Aggregation.Type, ShouldEqual, "date_histogram")
  319. So(firstLevel.Aggregation.Aggs, ShouldHaveLength, 2)
  320. sumAgg := firstLevel.Aggregation.Aggs[0]
  321. So(sumAgg.Key, ShouldEqual, "3")
  322. So(sumAgg.Aggregation.Type, ShouldEqual, "sum")
  323. mAgg := sumAgg.Aggregation.Aggregation.(*es.MetricAggregation)
  324. So(mAgg.Field, ShouldEqual, "@value")
  325. movingAvgAgg := firstLevel.Aggregation.Aggs[1]
  326. So(movingAvgAgg.Key, ShouldEqual, "2")
  327. So(movingAvgAgg.Aggregation.Type, ShouldEqual, "moving_avg")
  328. pl := movingAvgAgg.Aggregation.Aggregation.(*es.PipelineAggregation)
  329. So(pl.BucketPath, ShouldEqual, "3")
  330. })
  331. Convey("With broken moving average", func() {
  332. c := newFakeClient(5)
  333. _, err := executeTsdbQuery(c, `{
  334. "timeField": "@timestamp",
  335. "bucketAggs": [
  336. { "type": "date_histogram", "field": "@timestamp", "id": "5" }
  337. ],
  338. "metrics": [
  339. { "id": "3", "type": "sum", "field": "@value" },
  340. {
  341. "id": "2",
  342. "type": "moving_avg",
  343. "pipelineAgg": "3"
  344. },
  345. {
  346. "id": "4",
  347. "type": "moving_avg",
  348. "pipelineAgg": "Metric to apply moving average"
  349. }
  350. ]
  351. }`, from, to, 15*time.Second)
  352. So(err, ShouldBeNil)
  353. sr := c.multisearchRequests[0].Requests[0]
  354. firstLevel := sr.Aggs[0]
  355. So(firstLevel.Key, ShouldEqual, "5")
  356. So(firstLevel.Aggregation.Type, ShouldEqual, "date_histogram")
  357. So(firstLevel.Aggregation.Aggs, ShouldHaveLength, 2)
  358. movingAvgAgg := firstLevel.Aggregation.Aggs[1]
  359. So(movingAvgAgg.Key, ShouldEqual, "2")
  360. plAgg := movingAvgAgg.Aggregation.Aggregation.(*es.PipelineAggregation)
  361. So(plAgg.BucketPath, ShouldEqual, "3")
  362. })
  363. Convey("With derivative", func() {
  364. c := newFakeClient(5)
  365. _, err := executeTsdbQuery(c, `{
  366. "timeField": "@timestamp",
  367. "bucketAggs": [
  368. { "type": "date_histogram", "field": "@timestamp", "id": "4" }
  369. ],
  370. "metrics": [
  371. { "id": "3", "type": "sum", "field": "@value" },
  372. {
  373. "id": "2",
  374. "type": "derivative",
  375. "pipelineAgg": "3"
  376. }
  377. ]
  378. }`, from, to, 15*time.Second)
  379. So(err, ShouldBeNil)
  380. sr := c.multisearchRequests[0].Requests[0]
  381. firstLevel := sr.Aggs[0]
  382. So(firstLevel.Key, ShouldEqual, "4")
  383. So(firstLevel.Aggregation.Type, ShouldEqual, "date_histogram")
  384. derivativeAgg := firstLevel.Aggregation.Aggs[1]
  385. So(derivativeAgg.Key, ShouldEqual, "2")
  386. plAgg := derivativeAgg.Aggregation.Aggregation.(*es.PipelineAggregation)
  387. So(plAgg.BucketPath, ShouldEqual, "3")
  388. })
  389. })
  390. }
  391. type fakeClient struct {
  392. version int
  393. timeField string
  394. multiSearchResponse *es.MultiSearchResponse
  395. multiSearchError error
  396. builder *es.MultiSearchRequestBuilder
  397. multisearchRequests []*es.MultiSearchRequest
  398. }
  399. func newFakeClient(version int) *fakeClient {
  400. return &fakeClient{
  401. version: version,
  402. timeField: "@timestamp",
  403. multisearchRequests: make([]*es.MultiSearchRequest, 0),
  404. multiSearchResponse: &es.MultiSearchResponse{},
  405. }
  406. }
  407. func (c *fakeClient) GetVersion() int {
  408. return c.version
  409. }
  410. func (c *fakeClient) GetTimeField() string {
  411. return c.timeField
  412. }
  413. func (c *fakeClient) GetMinInterval(queryInterval string) (time.Duration, error) {
  414. return 15 * time.Second, nil
  415. }
  416. func (c *fakeClient) ExecuteMultisearch(r *es.MultiSearchRequest) (*es.MultiSearchResponse, error) {
  417. c.multisearchRequests = append(c.multisearchRequests, r)
  418. return c.multiSearchResponse, c.multiSearchError
  419. }
  420. func (c *fakeClient) MultiSearch() *es.MultiSearchRequestBuilder {
  421. c.builder = es.NewMultiSearchRequestBuilder(c.version)
  422. return c.builder
  423. }
  424. func newTsdbQuery(body string) (*tsdb.TsdbQuery, error) {
  425. json, err := simplejson.NewJson([]byte(body))
  426. if err != nil {
  427. return nil, err
  428. }
  429. return &tsdb.TsdbQuery{
  430. Queries: []*tsdb.Query{
  431. {
  432. Model: json,
  433. },
  434. },
  435. }, nil
  436. }
  437. func executeTsdbQuery(c es.Client, body string, from, to time.Time, minInterval time.Duration) (*tsdb.Response, error) {
  438. json, err := simplejson.NewJson([]byte(body))
  439. if err != nil {
  440. return nil, err
  441. }
  442. fromStr := fmt.Sprintf("%d", from.UnixNano()/int64(time.Millisecond))
  443. toStr := fmt.Sprintf("%d", to.UnixNano()/int64(time.Millisecond))
  444. tsdbQuery := &tsdb.TsdbQuery{
  445. Queries: []*tsdb.Query{
  446. {
  447. Model: json,
  448. },
  449. },
  450. TimeRange: tsdb.NewTimeRange(fromStr, toStr),
  451. }
  452. query := newTimeSeriesQuery(c, tsdbQuery, tsdb.NewIntervalCalculator(&tsdb.IntervalOptions{MinInterval: minInterval}))
  453. return query.execute()
  454. }
  455. func TestTimeSeriesQueryParser(t *testing.T) {
  456. Convey("Test time series query parser", t, func() {
  457. p := newTimeSeriesQueryParser()
  458. Convey("Should be able to parse query", func() {
  459. body := `{
  460. "timeField": "@timestamp",
  461. "query": "@metric:cpu",
  462. "alias": "{{@hostname}} {{metric}}",
  463. "metrics": [
  464. {
  465. "field": "@value",
  466. "id": "1",
  467. "meta": {},
  468. "settings": {
  469. "percents": [
  470. "90"
  471. ]
  472. },
  473. "type": "percentiles"
  474. },
  475. {
  476. "type": "count",
  477. "field": "select field",
  478. "id": "4",
  479. "settings": {},
  480. "meta": {}
  481. }
  482. ],
  483. "bucketAggs": [
  484. {
  485. "fake": true,
  486. "field": "@hostname",
  487. "id": "3",
  488. "settings": {
  489. "min_doc_count": 1,
  490. "order": "desc",
  491. "orderBy": "_term",
  492. "size": "10"
  493. },
  494. "type": "terms"
  495. },
  496. {
  497. "field": "@timestamp",
  498. "id": "2",
  499. "settings": {
  500. "interval": "5m",
  501. "min_doc_count": 0,
  502. "trimEdges": 0
  503. },
  504. "type": "date_histogram"
  505. }
  506. ]
  507. }`
  508. tsdbQuery, err := newTsdbQuery(body)
  509. So(err, ShouldBeNil)
  510. queries, err := p.parse(tsdbQuery)
  511. So(err, ShouldBeNil)
  512. So(queries, ShouldHaveLength, 1)
  513. q := queries[0]
  514. So(q.TimeField, ShouldEqual, "@timestamp")
  515. So(q.RawQuery, ShouldEqual, "@metric:cpu")
  516. So(q.Alias, ShouldEqual, "{{@hostname}} {{metric}}")
  517. So(q.Metrics, ShouldHaveLength, 2)
  518. So(q.Metrics[0].Field, ShouldEqual, "@value")
  519. So(q.Metrics[0].ID, ShouldEqual, "1")
  520. So(q.Metrics[0].Type, ShouldEqual, "percentiles")
  521. So(q.Metrics[0].Hide, ShouldBeFalse)
  522. So(q.Metrics[0].PipelineAggregate, ShouldEqual, "")
  523. So(q.Metrics[0].Settings.Get("percents").MustStringArray()[0], ShouldEqual, "90")
  524. So(q.Metrics[1].Field, ShouldEqual, "select field")
  525. So(q.Metrics[1].ID, ShouldEqual, "4")
  526. So(q.Metrics[1].Type, ShouldEqual, "count")
  527. So(q.Metrics[1].Hide, ShouldBeFalse)
  528. So(q.Metrics[1].PipelineAggregate, ShouldEqual, "")
  529. So(q.Metrics[1].Settings.MustMap(), ShouldBeEmpty)
  530. So(q.BucketAggs, ShouldHaveLength, 2)
  531. So(q.BucketAggs[0].Field, ShouldEqual, "@hostname")
  532. So(q.BucketAggs[0].ID, ShouldEqual, "3")
  533. So(q.BucketAggs[0].Type, ShouldEqual, "terms")
  534. So(q.BucketAggs[0].Settings.Get("min_doc_count").MustInt64(), ShouldEqual, 1)
  535. So(q.BucketAggs[0].Settings.Get("order").MustString(), ShouldEqual, "desc")
  536. So(q.BucketAggs[0].Settings.Get("orderBy").MustString(), ShouldEqual, "_term")
  537. So(q.BucketAggs[0].Settings.Get("size").MustString(), ShouldEqual, "10")
  538. So(q.BucketAggs[1].Field, ShouldEqual, "@timestamp")
  539. So(q.BucketAggs[1].ID, ShouldEqual, "2")
  540. So(q.BucketAggs[1].Type, ShouldEqual, "date_histogram")
  541. So(q.BucketAggs[1].Settings.Get("interval").MustString(), ShouldEqual, "5m")
  542. So(q.BucketAggs[1].Settings.Get("min_doc_count").MustInt64(), ShouldEqual, 0)
  543. So(q.BucketAggs[1].Settings.Get("trimEdges").MustInt64(), ShouldEqual, 0)
  544. })
  545. })
  546. }