postgres.go 8.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320
  1. package postgres
  2. import (
  3. "container/list"
  4. "context"
  5. "fmt"
  6. "math"
  7. "net/url"
  8. "strconv"
  9. "time"
  10. "github.com/go-xorm/core"
  11. "github.com/grafana/grafana/pkg/components/null"
  12. "github.com/grafana/grafana/pkg/log"
  13. "github.com/grafana/grafana/pkg/models"
  14. "github.com/grafana/grafana/pkg/tsdb"
  15. )
  16. type PostgresQueryEndpoint struct {
  17. sqlEngine tsdb.SqlEngine
  18. log log.Logger
  19. }
  20. func init() {
  21. tsdb.RegisterTsdbQueryEndpoint("postgres", NewPostgresQueryEndpoint)
  22. }
  23. func NewPostgresQueryEndpoint(datasource *models.DataSource) (tsdb.TsdbQueryEndpoint, error) {
  24. endpoint := &PostgresQueryEndpoint{
  25. log: log.New("tsdb.postgres"),
  26. }
  27. endpoint.sqlEngine = &tsdb.DefaultSqlEngine{
  28. MacroEngine: NewPostgresMacroEngine(),
  29. }
  30. cnnstr := generateConnectionString(datasource)
  31. endpoint.log.Debug("getEngine", "connection", cnnstr)
  32. if err := endpoint.sqlEngine.InitEngine("postgres", datasource, cnnstr); err != nil {
  33. return nil, err
  34. }
  35. return endpoint, nil
  36. }
  37. func generateConnectionString(datasource *models.DataSource) string {
  38. password := ""
  39. for key, value := range datasource.SecureJsonData.Decrypt() {
  40. if key == "password" {
  41. password = value
  42. break
  43. }
  44. }
  45. sslmode := datasource.JsonData.Get("sslmode").MustString("verify-full")
  46. u := &url.URL{Scheme: "postgres", User: url.UserPassword(datasource.User, password), Host: datasource.Url, Path: datasource.Database, RawQuery: "sslmode=" + sslmode}
  47. return u.String()
  48. }
  49. func (e *PostgresQueryEndpoint) Query(ctx context.Context, dsInfo *models.DataSource, tsdbQuery *tsdb.TsdbQuery) (*tsdb.Response, error) {
  50. return e.sqlEngine.Query(ctx, dsInfo, tsdbQuery, e.transformToTimeSeries, e.transformToTable)
  51. }
  52. func (e PostgresQueryEndpoint) transformToTable(query *tsdb.Query, rows *core.Rows, result *tsdb.QueryResult, tsdbQuery *tsdb.TsdbQuery) error {
  53. columnNames, err := rows.Columns()
  54. if err != nil {
  55. return err
  56. }
  57. table := &tsdb.Table{
  58. Columns: make([]tsdb.TableColumn, len(columnNames)),
  59. Rows: make([]tsdb.RowValues, 0),
  60. }
  61. for i, name := range columnNames {
  62. table.Columns[i].Text = name
  63. }
  64. rowLimit := 1000000
  65. rowCount := 0
  66. timeIndex := -1
  67. // check if there is a column named time
  68. for i, col := range columnNames {
  69. switch col {
  70. case "time":
  71. timeIndex = i
  72. }
  73. }
  74. for ; rows.Next(); rowCount++ {
  75. if rowCount > rowLimit {
  76. return fmt.Errorf("PostgreSQL query row limit exceeded, limit %d", rowLimit)
  77. }
  78. values, err := e.getTypedRowData(rows)
  79. if err != nil {
  80. return err
  81. }
  82. // convert column named time to unix timestamp to make
  83. // native datetime postgres types work in annotation queries
  84. if timeIndex != -1 {
  85. switch value := values[timeIndex].(type) {
  86. case time.Time:
  87. values[timeIndex] = float64(value.UnixNano() / 1e9)
  88. }
  89. }
  90. table.Rows = append(table.Rows, values)
  91. }
  92. result.Tables = append(result.Tables, table)
  93. result.Meta.Set("rowCount", rowCount)
  94. return nil
  95. }
  96. func (e PostgresQueryEndpoint) getTypedRowData(rows *core.Rows) (tsdb.RowValues, error) {
  97. types, err := rows.ColumnTypes()
  98. if err != nil {
  99. return nil, err
  100. }
  101. values := make([]interface{}, len(types))
  102. valuePtrs := make([]interface{}, len(types))
  103. for i := 0; i < len(types); i++ {
  104. valuePtrs[i] = &values[i]
  105. }
  106. if err := rows.Scan(valuePtrs...); err != nil {
  107. return nil, err
  108. }
  109. // convert types not handled by lib/pq
  110. // unhandled types are returned as []byte
  111. for i := 0; i < len(types); i++ {
  112. if value, ok := values[i].([]byte); ok == true {
  113. switch types[i].DatabaseTypeName() {
  114. case "NUMERIC":
  115. if v, err := strconv.ParseFloat(string(value), 64); err == nil {
  116. values[i] = v
  117. } else {
  118. e.log.Debug("Rows", "Error converting numeric to float", value)
  119. }
  120. case "UNKNOWN", "CIDR", "INET", "MACADDR":
  121. // char literals have type UNKNOWN
  122. values[i] = string(value)
  123. default:
  124. e.log.Debug("Rows", "Unknown database type", types[i].DatabaseTypeName(), "value", value)
  125. values[i] = string(value)
  126. }
  127. }
  128. }
  129. return values, nil
  130. }
  131. func (e PostgresQueryEndpoint) transformToTimeSeries(query *tsdb.Query, rows *core.Rows, result *tsdb.QueryResult, tsdbQuery *tsdb.TsdbQuery) error {
  132. pointsBySeries := make(map[string]*tsdb.TimeSeries)
  133. seriesByQueryOrder := list.New()
  134. columnNames, err := rows.Columns()
  135. if err != nil {
  136. return err
  137. }
  138. columnTypes, err := rows.ColumnTypes()
  139. if err != nil {
  140. return err
  141. }
  142. rowLimit := 1000000
  143. rowCount := 0
  144. timeIndex := -1
  145. metricIndex := -1
  146. // check columns of resultset: a column named time is mandatory
  147. // the first text column is treated as metric name unless a column named metric is present
  148. for i, col := range columnNames {
  149. switch col {
  150. case "time":
  151. timeIndex = i
  152. case "metric":
  153. metricIndex = i
  154. default:
  155. if metricIndex == -1 {
  156. switch columnTypes[i].DatabaseTypeName() {
  157. case "UNKNOWN", "TEXT", "VARCHAR", "CHAR":
  158. metricIndex = i
  159. }
  160. }
  161. }
  162. }
  163. if timeIndex == -1 {
  164. return fmt.Errorf("Found no column named time")
  165. }
  166. fillMissing := query.Model.Get("fill").MustBool(false)
  167. var fillInterval float64
  168. fillValue := null.Float{}
  169. if fillMissing {
  170. fillInterval = query.Model.Get("fillInterval").MustFloat64() * 1000
  171. if query.Model.Get("fillNull").MustBool(false) == false {
  172. fillValue.Float64 = query.Model.Get("fillValue").MustFloat64()
  173. fillValue.Valid = true
  174. }
  175. }
  176. for rows.Next() {
  177. var timestamp float64
  178. var value null.Float
  179. var metric string
  180. if rowCount > rowLimit {
  181. return fmt.Errorf("PostgreSQL query row limit exceeded, limit %d", rowLimit)
  182. }
  183. values, err := e.getTypedRowData(rows)
  184. if err != nil {
  185. return err
  186. }
  187. switch columnValue := values[timeIndex].(type) {
  188. case int64:
  189. timestamp = float64(columnValue * 1000)
  190. case float64:
  191. timestamp = columnValue * 1000
  192. case time.Time:
  193. timestamp = float64(columnValue.UnixNano() / 1e6)
  194. default:
  195. return fmt.Errorf("Invalid type for column time, must be of type timestamp or unix timestamp, got: %T %v", columnValue, columnValue)
  196. }
  197. if metricIndex >= 0 {
  198. if columnValue, ok := values[metricIndex].(string); ok == true {
  199. metric = columnValue
  200. } else {
  201. return fmt.Errorf("Column metric must be of type char,varchar or text, got: %T %v", values[metricIndex], values[metricIndex])
  202. }
  203. }
  204. for i, col := range columnNames {
  205. if i == timeIndex || i == metricIndex {
  206. continue
  207. }
  208. switch columnValue := values[i].(type) {
  209. case int64:
  210. value = null.FloatFrom(float64(columnValue))
  211. case float64:
  212. value = null.FloatFrom(columnValue)
  213. case nil:
  214. value.Valid = false
  215. default:
  216. return fmt.Errorf("Value column must have numeric datatype, column: %s type: %T value: %v", col, columnValue, columnValue)
  217. }
  218. if metricIndex == -1 {
  219. metric = col
  220. }
  221. series, exist := pointsBySeries[metric]
  222. if exist == false {
  223. series = &tsdb.TimeSeries{Name: metric}
  224. pointsBySeries[metric] = series
  225. seriesByQueryOrder.PushBack(metric)
  226. }
  227. if fillMissing {
  228. var intervalStart float64
  229. if exist == false {
  230. intervalStart = float64(tsdbQuery.TimeRange.MustGetFrom().UnixNano() / 1e6)
  231. } else {
  232. intervalStart = series.Points[len(series.Points)-1][1].Float64 + fillInterval
  233. }
  234. // align interval start
  235. intervalStart = math.Floor(intervalStart/fillInterval) * fillInterval
  236. for i := intervalStart; i < timestamp; i += fillInterval {
  237. series.Points = append(series.Points, tsdb.TimePoint{fillValue, null.FloatFrom(i)})
  238. rowCount++
  239. }
  240. }
  241. series.Points = append(series.Points, tsdb.TimePoint{value, null.FloatFrom(timestamp)})
  242. e.log.Debug("Rows", "metric", metric, "time", timestamp, "value", value)
  243. rowCount++
  244. }
  245. }
  246. for elem := seriesByQueryOrder.Front(); elem != nil; elem = elem.Next() {
  247. key := elem.Value.(string)
  248. result.Series = append(result.Series, pointsBySeries[key])
  249. if fillMissing {
  250. series := pointsBySeries[key]
  251. // fill in values from last fetched value till interval end
  252. intervalStart := series.Points[len(series.Points)-1][1].Float64
  253. intervalEnd := float64(tsdbQuery.TimeRange.MustGetTo().UnixNano() / 1e6)
  254. // align interval start
  255. intervalStart = math.Floor(intervalStart/fillInterval) * fillInterval
  256. for i := intervalStart + fillInterval; i < intervalEnd; i += fillInterval {
  257. series.Points = append(series.Points, tsdb.TimePoint{fillValue, null.FloatFrom(i)})
  258. rowCount++
  259. }
  260. }
  261. }
  262. result.Meta.Set("rowCount", rowCount)
  263. return nil
  264. }