postgres.go 8.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313
  1. package postgres
  2. import (
  3. "container/list"
  4. "context"
  5. "fmt"
  6. "math"
  7. "net/url"
  8. "strconv"
  9. "github.com/go-xorm/core"
  10. "github.com/grafana/grafana/pkg/components/null"
  11. "github.com/grafana/grafana/pkg/log"
  12. "github.com/grafana/grafana/pkg/models"
  13. "github.com/grafana/grafana/pkg/tsdb"
  14. )
  15. type PostgresQueryEndpoint struct {
  16. sqlEngine tsdb.SqlEngine
  17. log log.Logger
  18. }
  19. func init() {
  20. tsdb.RegisterTsdbQueryEndpoint("postgres", NewPostgresQueryEndpoint)
  21. }
  22. func NewPostgresQueryEndpoint(datasource *models.DataSource) (tsdb.TsdbQueryEndpoint, error) {
  23. endpoint := &PostgresQueryEndpoint{
  24. log: log.New("tsdb.postgres"),
  25. }
  26. endpoint.sqlEngine = &tsdb.DefaultSqlEngine{
  27. MacroEngine: NewPostgresMacroEngine(),
  28. }
  29. cnnstr := generateConnectionString(datasource)
  30. endpoint.log.Debug("getEngine", "connection", cnnstr)
  31. if err := endpoint.sqlEngine.InitEngine("postgres", datasource, cnnstr); err != nil {
  32. return nil, err
  33. }
  34. return endpoint, nil
  35. }
  36. func generateConnectionString(datasource *models.DataSource) string {
  37. password := ""
  38. for key, value := range datasource.SecureJsonData.Decrypt() {
  39. if key == "password" {
  40. password = value
  41. break
  42. }
  43. }
  44. sslmode := datasource.JsonData.Get("sslmode").MustString("verify-full")
  45. u := &url.URL{Scheme: "postgres",
  46. User: url.UserPassword(datasource.User, password),
  47. Host: datasource.Url, Path: datasource.Database,
  48. RawQuery: "sslmode=" + url.QueryEscape(sslmode)}
  49. return u.String()
  50. }
  51. func (e *PostgresQueryEndpoint) Query(ctx context.Context, dsInfo *models.DataSource, tsdbQuery *tsdb.TsdbQuery) (*tsdb.Response, error) {
  52. return e.sqlEngine.Query(ctx, dsInfo, tsdbQuery, e.transformToTimeSeries, e.transformToTable)
  53. }
  54. func (e PostgresQueryEndpoint) transformToTable(query *tsdb.Query, rows *core.Rows, result *tsdb.QueryResult, tsdbQuery *tsdb.TsdbQuery) error {
  55. columnNames, err := rows.Columns()
  56. if err != nil {
  57. return err
  58. }
  59. table := &tsdb.Table{
  60. Columns: make([]tsdb.TableColumn, len(columnNames)),
  61. Rows: make([]tsdb.RowValues, 0),
  62. }
  63. for i, name := range columnNames {
  64. table.Columns[i].Text = name
  65. }
  66. rowLimit := 1000000
  67. rowCount := 0
  68. timeIndex := -1
  69. // check if there is a column named time
  70. for i, col := range columnNames {
  71. switch col {
  72. case "time":
  73. timeIndex = i
  74. }
  75. }
  76. for ; rows.Next(); rowCount++ {
  77. if rowCount > rowLimit {
  78. return fmt.Errorf("PostgreSQL query row limit exceeded, limit %d", rowLimit)
  79. }
  80. values, err := e.getTypedRowData(rows)
  81. if err != nil {
  82. return err
  83. }
  84. // converts column named time to unix timestamp in milliseconds to make
  85. // native postgres datetime types and epoch dates work in
  86. // annotation and table queries.
  87. tsdb.ConvertSqlTimeColumnToEpochMs(values, timeIndex)
  88. table.Rows = append(table.Rows, values)
  89. }
  90. result.Tables = append(result.Tables, table)
  91. result.Meta.Set("rowCount", rowCount)
  92. return nil
  93. }
  94. func (e PostgresQueryEndpoint) getTypedRowData(rows *core.Rows) (tsdb.RowValues, error) {
  95. types, err := rows.ColumnTypes()
  96. if err != nil {
  97. return nil, err
  98. }
  99. values := make([]interface{}, len(types))
  100. valuePtrs := make([]interface{}, len(types))
  101. for i := 0; i < len(types); i++ {
  102. valuePtrs[i] = &values[i]
  103. }
  104. if err := rows.Scan(valuePtrs...); err != nil {
  105. return nil, err
  106. }
  107. // convert types not handled by lib/pq
  108. // unhandled types are returned as []byte
  109. for i := 0; i < len(types); i++ {
  110. if value, ok := values[i].([]byte); ok {
  111. switch types[i].DatabaseTypeName() {
  112. case "NUMERIC":
  113. if v, err := strconv.ParseFloat(string(value), 64); err == nil {
  114. values[i] = v
  115. } else {
  116. e.log.Debug("Rows", "Error converting numeric to float", value)
  117. }
  118. case "UNKNOWN", "CIDR", "INET", "MACADDR":
  119. // char literals have type UNKNOWN
  120. values[i] = string(value)
  121. default:
  122. e.log.Debug("Rows", "Unknown database type", types[i].DatabaseTypeName(), "value", value)
  123. values[i] = string(value)
  124. }
  125. }
  126. }
  127. return values, nil
  128. }
  129. func (e PostgresQueryEndpoint) transformToTimeSeries(query *tsdb.Query, rows *core.Rows, result *tsdb.QueryResult, tsdbQuery *tsdb.TsdbQuery) error {
  130. pointsBySeries := make(map[string]*tsdb.TimeSeries)
  131. seriesByQueryOrder := list.New()
  132. columnNames, err := rows.Columns()
  133. if err != nil {
  134. return err
  135. }
  136. columnTypes, err := rows.ColumnTypes()
  137. if err != nil {
  138. return err
  139. }
  140. rowLimit := 1000000
  141. rowCount := 0
  142. timeIndex := -1
  143. metricIndex := -1
  144. // check columns of resultset: a column named time is mandatory
  145. // the first text column is treated as metric name unless a column named metric is present
  146. for i, col := range columnNames {
  147. switch col {
  148. case "time":
  149. timeIndex = i
  150. case "metric":
  151. metricIndex = i
  152. default:
  153. if metricIndex == -1 {
  154. switch columnTypes[i].DatabaseTypeName() {
  155. case "UNKNOWN", "TEXT", "VARCHAR", "CHAR":
  156. metricIndex = i
  157. }
  158. }
  159. }
  160. }
  161. if timeIndex == -1 {
  162. return fmt.Errorf("Found no column named time")
  163. }
  164. fillMissing := query.Model.Get("fill").MustBool(false)
  165. var fillInterval float64
  166. fillValue := null.Float{}
  167. if fillMissing {
  168. fillInterval = query.Model.Get("fillInterval").MustFloat64() * 1000
  169. if !query.Model.Get("fillNull").MustBool(false) {
  170. fillValue.Float64 = query.Model.Get("fillValue").MustFloat64()
  171. fillValue.Valid = true
  172. }
  173. }
  174. for rows.Next() {
  175. var timestamp float64
  176. var value null.Float
  177. var metric string
  178. if rowCount > rowLimit {
  179. return fmt.Errorf("PostgreSQL query row limit exceeded, limit %d", rowLimit)
  180. }
  181. values, err := e.getTypedRowData(rows)
  182. if err != nil {
  183. return err
  184. }
  185. // converts column named time to unix timestamp in milliseconds to make
  186. // native mysql datetime types and epoch dates work in
  187. // annotation and table queries.
  188. tsdb.ConvertSqlTimeColumnToEpochMs(values, timeIndex)
  189. switch columnValue := values[timeIndex].(type) {
  190. case int64:
  191. timestamp = float64(columnValue)
  192. case float64:
  193. timestamp = columnValue
  194. default:
  195. return fmt.Errorf("Invalid type for column time, must be of type timestamp or unix timestamp, got: %T %v", columnValue, columnValue)
  196. }
  197. if metricIndex >= 0 {
  198. if columnValue, ok := values[metricIndex].(string); ok {
  199. metric = columnValue
  200. } else {
  201. return fmt.Errorf("Column metric must be of type char,varchar or text, got: %T %v", values[metricIndex], values[metricIndex])
  202. }
  203. }
  204. for i, col := range columnNames {
  205. if i == timeIndex || i == metricIndex {
  206. continue
  207. }
  208. if value, err = tsdb.ConvertSqlValueColumnToFloat(col, values[i]); err != nil {
  209. return err
  210. }
  211. if metricIndex == -1 {
  212. metric = col
  213. }
  214. series, exist := pointsBySeries[metric]
  215. if !exist {
  216. series = &tsdb.TimeSeries{Name: metric}
  217. pointsBySeries[metric] = series
  218. seriesByQueryOrder.PushBack(metric)
  219. }
  220. if fillMissing {
  221. var intervalStart float64
  222. if !exist {
  223. intervalStart = float64(tsdbQuery.TimeRange.MustGetFrom().UnixNano() / 1e6)
  224. } else {
  225. intervalStart = series.Points[len(series.Points)-1][1].Float64 + fillInterval
  226. }
  227. // align interval start
  228. intervalStart = math.Floor(intervalStart/fillInterval) * fillInterval
  229. for i := intervalStart; i < timestamp; i += fillInterval {
  230. series.Points = append(series.Points, tsdb.TimePoint{fillValue, null.FloatFrom(i)})
  231. rowCount++
  232. }
  233. }
  234. series.Points = append(series.Points, tsdb.TimePoint{value, null.FloatFrom(timestamp)})
  235. e.log.Debug("Rows", "metric", metric, "time", timestamp, "value", value)
  236. rowCount++
  237. }
  238. }
  239. for elem := seriesByQueryOrder.Front(); elem != nil; elem = elem.Next() {
  240. key := elem.Value.(string)
  241. result.Series = append(result.Series, pointsBySeries[key])
  242. if fillMissing {
  243. series := pointsBySeries[key]
  244. // fill in values from last fetched value till interval end
  245. intervalStart := series.Points[len(series.Points)-1][1].Float64
  246. intervalEnd := float64(tsdbQuery.TimeRange.MustGetTo().UnixNano() / 1e6)
  247. // align interval start
  248. intervalStart = math.Floor(intervalStart/fillInterval) * fillInterval
  249. for i := intervalStart + fillInterval; i < intervalEnd; i += fillInterval {
  250. series.Points = append(series.Points, tsdb.TimePoint{fillValue, null.FloatFrom(i)})
  251. rowCount++
  252. }
  253. }
  254. }
  255. result.Meta.Set("rowCount", rowCount)
  256. return nil
  257. }