time_series_query.go 10 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378
  1. package elasticsearch
  2. import (
  3. "fmt"
  4. "strconv"
  5. "github.com/grafana/grafana/pkg/components/simplejson"
  6. "github.com/grafana/grafana/pkg/tsdb"
  7. "github.com/grafana/grafana/pkg/tsdb/elasticsearch/client"
  8. )
  9. type timeSeriesQuery struct {
  10. client es.Client
  11. tsdbQuery *tsdb.TsdbQuery
  12. intervalCalculator tsdb.IntervalCalculator
  13. }
  14. var newTimeSeriesQuery = func(client es.Client, tsdbQuery *tsdb.TsdbQuery, intervalCalculator tsdb.IntervalCalculator) *timeSeriesQuery {
  15. return &timeSeriesQuery{
  16. client: client,
  17. tsdbQuery: tsdbQuery,
  18. intervalCalculator: intervalCalculator,
  19. }
  20. }
  21. func (e *timeSeriesQuery) execute() (*tsdb.Response, error) {
  22. result := &tsdb.Response{}
  23. result.Results = make(map[string]*tsdb.QueryResult)
  24. tsQueryParser := newTimeSeriesQueryParser()
  25. queries, err := tsQueryParser.parse(e.tsdbQuery)
  26. if err != nil {
  27. return nil, err
  28. }
  29. ms := e.client.MultiSearch()
  30. from := fmt.Sprintf("%d", e.tsdbQuery.TimeRange.GetFromAsMsEpoch())
  31. to := fmt.Sprintf("%d", e.tsdbQuery.TimeRange.GetToAsMsEpoch())
  32. for _, q := range queries {
  33. minInterval, err := e.client.GetMinInterval(q.Interval)
  34. if err != nil {
  35. return nil, err
  36. }
  37. interval := e.intervalCalculator.Calculate(e.tsdbQuery.TimeRange, minInterval)
  38. b := ms.Search(interval)
  39. b.Size(0)
  40. filters := b.Query().Bool().Filter()
  41. filters.AddDateRangeFilter(e.client.GetTimeField(), to, from, es.DateFormatEpochMS)
  42. if q.RawQuery != "" {
  43. filters.AddQueryStringFilter(q.RawQuery, true)
  44. }
  45. if len(q.BucketAggs) == 0 {
  46. if len(q.Metrics) == 0 || q.Metrics[0].Type != "raw_document" {
  47. result.Results[q.RefID] = &tsdb.QueryResult{
  48. RefId: q.RefID,
  49. Error: fmt.Errorf("invalid query, missing metrics and aggregations"),
  50. ErrorString: "invalid query, missing metrics and aggregations",
  51. }
  52. continue
  53. }
  54. metric := q.Metrics[0]
  55. b.Size(metric.Settings.Get("size").MustInt(500))
  56. b.SortDesc("@timestamp", "boolean")
  57. b.AddDocValueField("@timestamp")
  58. continue
  59. }
  60. aggBuilder := b.Agg()
  61. // iterate backwards to create aggregations bottom-down
  62. for _, bucketAgg := range q.BucketAggs {
  63. switch bucketAgg.Type {
  64. case dateHistType:
  65. aggBuilder = addDateHistogramAgg(aggBuilder, bucketAgg, from, to)
  66. case histogramType:
  67. aggBuilder = addHistogramAgg(aggBuilder, bucketAgg)
  68. case filtersType:
  69. aggBuilder = addFiltersAgg(aggBuilder, bucketAgg)
  70. case termsType:
  71. aggBuilder = addTermsAgg(aggBuilder, bucketAgg, q.Metrics)
  72. case geohashGridType:
  73. aggBuilder = addGeoHashGridAgg(aggBuilder, bucketAgg)
  74. }
  75. }
  76. for _, m := range q.Metrics {
  77. if m.Type == countType {
  78. continue
  79. }
  80. if isPipelineAgg(m.Type) {
  81. if isPipelineAggWithMultipleBucketPaths(m.Type) {
  82. if len(m.PipelineVariables) > 0 {
  83. bucketPaths := map[string]interface{}{}
  84. for name, pipelineAgg := range m.PipelineVariables {
  85. if _, err := strconv.Atoi(pipelineAgg); err == nil {
  86. var appliedAgg *MetricAgg
  87. for _, pipelineMetric := range q.Metrics {
  88. if pipelineMetric.ID == pipelineAgg {
  89. appliedAgg = pipelineMetric
  90. break
  91. }
  92. }
  93. if appliedAgg != nil {
  94. if appliedAgg.Type == countType {
  95. bucketPaths[name] = "_count"
  96. } else {
  97. bucketPaths[name] = pipelineAgg
  98. }
  99. }
  100. }
  101. }
  102. aggBuilder.Pipeline(m.ID, m.Type, bucketPaths, func(a *es.PipelineAggregation) {
  103. a.Settings = m.Settings.MustMap()
  104. })
  105. } else {
  106. continue
  107. }
  108. } else {
  109. if _, err := strconv.Atoi(m.PipelineAggregate); err == nil {
  110. var appliedAgg *MetricAgg
  111. for _, pipelineMetric := range q.Metrics {
  112. if pipelineMetric.ID == m.PipelineAggregate {
  113. appliedAgg = pipelineMetric
  114. break
  115. }
  116. }
  117. if appliedAgg != nil {
  118. bucketPath := m.PipelineAggregate
  119. if appliedAgg.Type == countType {
  120. bucketPath = "_count"
  121. }
  122. aggBuilder.Pipeline(m.ID, m.Type, bucketPath, func(a *es.PipelineAggregation) {
  123. a.Settings = m.Settings.MustMap()
  124. })
  125. }
  126. } else {
  127. continue
  128. }
  129. }
  130. } else {
  131. aggBuilder.Metric(m.ID, m.Type, m.Field, func(a *es.MetricAggregation) {
  132. a.Settings = m.Settings.MustMap()
  133. })
  134. }
  135. }
  136. }
  137. req, err := ms.Build()
  138. if err != nil {
  139. return nil, err
  140. }
  141. res, err := e.client.ExecuteMultisearch(req)
  142. if err != nil {
  143. return nil, err
  144. }
  145. rp := newResponseParser(res.Responses, queries)
  146. return rp.getTimeSeries()
  147. }
  148. func addDateHistogramAgg(aggBuilder es.AggBuilder, bucketAgg *BucketAgg, timeFrom, timeTo string) es.AggBuilder {
  149. aggBuilder.DateHistogram(bucketAgg.ID, bucketAgg.Field, func(a *es.DateHistogramAgg, b es.AggBuilder) {
  150. a.Interval = bucketAgg.Settings.Get("interval").MustString("auto")
  151. a.MinDocCount = bucketAgg.Settings.Get("min_doc_count").MustInt(0)
  152. a.ExtendedBounds = &es.ExtendedBounds{Min: timeFrom, Max: timeTo}
  153. a.Format = bucketAgg.Settings.Get("format").MustString(es.DateFormatEpochMS)
  154. if a.Interval == "auto" {
  155. a.Interval = "$__interval"
  156. }
  157. if offset, err := bucketAgg.Settings.Get("offset").String(); err == nil {
  158. a.Offset = offset
  159. }
  160. if missing, err := bucketAgg.Settings.Get("missing").String(); err == nil {
  161. a.Missing = &missing
  162. }
  163. aggBuilder = b
  164. })
  165. return aggBuilder
  166. }
  167. func addHistogramAgg(aggBuilder es.AggBuilder, bucketAgg *BucketAgg) es.AggBuilder {
  168. aggBuilder.Histogram(bucketAgg.ID, bucketAgg.Field, func(a *es.HistogramAgg, b es.AggBuilder) {
  169. a.Interval = bucketAgg.Settings.Get("interval").MustInt(1000)
  170. a.MinDocCount = bucketAgg.Settings.Get("min_doc_count").MustInt(0)
  171. if missing, err := bucketAgg.Settings.Get("missing").Int(); err == nil {
  172. a.Missing = &missing
  173. }
  174. aggBuilder = b
  175. })
  176. return aggBuilder
  177. }
  178. func addTermsAgg(aggBuilder es.AggBuilder, bucketAgg *BucketAgg, metrics []*MetricAgg) es.AggBuilder {
  179. aggBuilder.Terms(bucketAgg.ID, bucketAgg.Field, func(a *es.TermsAggregation, b es.AggBuilder) {
  180. if size, err := bucketAgg.Settings.Get("size").Int(); err == nil {
  181. a.Size = size
  182. } else if size, err := bucketAgg.Settings.Get("size").String(); err == nil {
  183. a.Size, err = strconv.Atoi(size)
  184. if err != nil {
  185. a.Size = 500
  186. }
  187. } else {
  188. a.Size = 500
  189. }
  190. if a.Size == 0 {
  191. a.Size = 500
  192. }
  193. if minDocCount, err := bucketAgg.Settings.Get("min_doc_count").Int(); err == nil {
  194. a.MinDocCount = &minDocCount
  195. }
  196. if missing, err := bucketAgg.Settings.Get("missing").String(); err == nil {
  197. a.Missing = &missing
  198. }
  199. if orderBy, err := bucketAgg.Settings.Get("orderBy").String(); err == nil {
  200. a.Order[orderBy] = bucketAgg.Settings.Get("order").MustString("desc")
  201. if _, err := strconv.Atoi(orderBy); err == nil {
  202. for _, m := range metrics {
  203. if m.ID == orderBy {
  204. b.Metric(m.ID, m.Type, m.Field, nil)
  205. break
  206. }
  207. }
  208. }
  209. }
  210. aggBuilder = b
  211. })
  212. return aggBuilder
  213. }
  214. func addFiltersAgg(aggBuilder es.AggBuilder, bucketAgg *BucketAgg) es.AggBuilder {
  215. filters := make(map[string]interface{})
  216. for _, filter := range bucketAgg.Settings.Get("filters").MustArray() {
  217. json := simplejson.NewFromAny(filter)
  218. query := json.Get("query").MustString()
  219. label := json.Get("label").MustString()
  220. if label == "" {
  221. label = query
  222. }
  223. filters[label] = &es.QueryStringFilter{Query: query, AnalyzeWildcard: true}
  224. }
  225. if len(filters) > 0 {
  226. aggBuilder.Filters(bucketAgg.ID, func(a *es.FiltersAggregation, b es.AggBuilder) {
  227. a.Filters = filters
  228. aggBuilder = b
  229. })
  230. }
  231. return aggBuilder
  232. }
  233. func addGeoHashGridAgg(aggBuilder es.AggBuilder, bucketAgg *BucketAgg) es.AggBuilder {
  234. aggBuilder.GeoHashGrid(bucketAgg.ID, bucketAgg.Field, func(a *es.GeoHashGridAggregation, b es.AggBuilder) {
  235. a.Precision = bucketAgg.Settings.Get("precision").MustInt(3)
  236. aggBuilder = b
  237. })
  238. return aggBuilder
  239. }
  240. type timeSeriesQueryParser struct{}
  241. func newTimeSeriesQueryParser() *timeSeriesQueryParser {
  242. return &timeSeriesQueryParser{}
  243. }
  244. func (p *timeSeriesQueryParser) parse(tsdbQuery *tsdb.TsdbQuery) ([]*Query, error) {
  245. queries := make([]*Query, 0)
  246. for _, q := range tsdbQuery.Queries {
  247. model := q.Model
  248. timeField, err := model.Get("timeField").String()
  249. if err != nil {
  250. return nil, err
  251. }
  252. rawQuery := model.Get("query").MustString()
  253. bucketAggs, err := p.parseBucketAggs(model)
  254. if err != nil {
  255. return nil, err
  256. }
  257. metrics, err := p.parseMetrics(model)
  258. if err != nil {
  259. return nil, err
  260. }
  261. alias := model.Get("alias").MustString("")
  262. interval := strconv.FormatInt(q.IntervalMs, 10) + "ms"
  263. queries = append(queries, &Query{
  264. TimeField: timeField,
  265. RawQuery: rawQuery,
  266. BucketAggs: bucketAggs,
  267. Metrics: metrics,
  268. Alias: alias,
  269. Interval: interval,
  270. RefID: q.RefId,
  271. })
  272. }
  273. return queries, nil
  274. }
  275. func (p *timeSeriesQueryParser) parseBucketAggs(model *simplejson.Json) ([]*BucketAgg, error) {
  276. var err error
  277. var result []*BucketAgg
  278. for _, t := range model.Get("bucketAggs").MustArray() {
  279. aggJSON := simplejson.NewFromAny(t)
  280. agg := &BucketAgg{}
  281. agg.Type, err = aggJSON.Get("type").String()
  282. if err != nil {
  283. return nil, err
  284. }
  285. agg.ID, err = aggJSON.Get("id").String()
  286. if err != nil {
  287. return nil, err
  288. }
  289. agg.Field = aggJSON.Get("field").MustString()
  290. agg.Settings = simplejson.NewFromAny(aggJSON.Get("settings").MustMap())
  291. result = append(result, agg)
  292. }
  293. return result, nil
  294. }
  295. func (p *timeSeriesQueryParser) parseMetrics(model *simplejson.Json) ([]*MetricAgg, error) {
  296. var err error
  297. var result []*MetricAgg
  298. for _, t := range model.Get("metrics").MustArray() {
  299. metricJSON := simplejson.NewFromAny(t)
  300. metric := &MetricAgg{}
  301. metric.Field = metricJSON.Get("field").MustString()
  302. metric.Hide = metricJSON.Get("hide").MustBool(false)
  303. metric.ID = metricJSON.Get("id").MustString()
  304. metric.PipelineAggregate = metricJSON.Get("pipelineAgg").MustString()
  305. metric.Settings = simplejson.NewFromAny(metricJSON.Get("settings").MustMap())
  306. metric.Meta = simplejson.NewFromAny(metricJSON.Get("meta").MustMap())
  307. metric.Type, err = metricJSON.Get("type").String()
  308. if err != nil {
  309. return nil, err
  310. }
  311. if isPipelineAggWithMultipleBucketPaths(metric.Type) {
  312. metric.PipelineVariables = map[string]string{}
  313. pvArr := metricJSON.Get("pipelineVariables").MustArray()
  314. for _, v := range pvArr {
  315. kv := v.(map[string]interface{})
  316. metric.PipelineVariables[kv["name"].(string)] = kv["pipelineAgg"].(string)
  317. }
  318. }
  319. result = append(result, metric)
  320. }
  321. return result, nil
  322. }