engine.go 3.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145
  1. package alerting
  2. import (
  3. "fmt"
  4. "time"
  5. "github.com/benbjohnson/clock"
  6. "github.com/grafana/grafana/pkg/log"
  7. "github.com/grafana/grafana/pkg/services/alerting/alertstates"
  8. )
  9. type Engine struct {
  10. execQueue chan *AlertJob
  11. resultQueue chan *AlertResult
  12. clock clock.Clock
  13. ticker *Ticker
  14. scheduler Scheduler
  15. handler AlertingHandler
  16. ruleReader RuleReader
  17. log log.Logger
  18. responseHandler ResultHandler
  19. alertJobTimeout time.Duration
  20. }
  21. func NewEngine() *Engine {
  22. e := &Engine{
  23. ticker: NewTicker(time.Now(), time.Second*0, clock.New()),
  24. execQueue: make(chan *AlertJob, 1000),
  25. resultQueue: make(chan *AlertResult, 1000),
  26. scheduler: NewScheduler(),
  27. handler: NewHandler(),
  28. ruleReader: NewRuleReader(),
  29. log: log.New("alerting.engine"),
  30. responseHandler: NewResultHandler(),
  31. alertJobTimeout: time.Second * 5,
  32. }
  33. return e
  34. }
  35. func (e *Engine) Start() {
  36. e.log.Info("Starting Alerting Engine")
  37. go e.alertingTicker()
  38. go e.execDispatch()
  39. go e.resultHandler()
  40. }
  41. func (e *Engine) Stop() {
  42. close(e.execQueue)
  43. close(e.resultQueue)
  44. }
  45. func (e *Engine) alertingTicker() {
  46. defer func() {
  47. if err := recover(); err != nil {
  48. e.log.Error("Scheduler Panic: stopping alertingTicker", "error", err, "stack", log.Stack(1))
  49. }
  50. }()
  51. tickIndex := 0
  52. for {
  53. select {
  54. case tick := <-e.ticker.C:
  55. // TEMP SOLUTION update rules ever tenth tick
  56. if tickIndex%10 == 0 {
  57. e.scheduler.Update(e.ruleReader.Fetch())
  58. }
  59. e.scheduler.Tick(tick, e.execQueue)
  60. tickIndex++
  61. }
  62. }
  63. }
  64. func (e *Engine) execDispatch() {
  65. defer func() {
  66. if err := recover(); err != nil {
  67. e.log.Error("Scheduler Panic: stopping executor", "error", err, "stack", log.Stack(1))
  68. }
  69. }()
  70. for job := range e.execQueue {
  71. log.Trace("Alerting: engine:execDispatch() starting job %s", job.Rule.Name)
  72. job.Running = true
  73. e.executeJob(job)
  74. }
  75. }
  76. func (e *Engine) executeJob(job *AlertJob) {
  77. startTime := time.Now()
  78. resultChan := make(chan *AlertResult, 1)
  79. go e.handler.Execute(job, resultChan)
  80. select {
  81. case <-time.After(e.alertJobTimeout):
  82. e.resultQueue <- &AlertResult{
  83. State: alertstates.Pending,
  84. Error: fmt.Errorf("Timeout"),
  85. AlertJob: job,
  86. StartTime: startTime,
  87. EndTime: time.Now(),
  88. }
  89. close(resultChan)
  90. e.log.Debug("Job Execution timeout", "alertRuleId", job.Rule.Id)
  91. case result := <-resultChan:
  92. duration := float64(result.EndTime.Nanosecond()-result.StartTime.Nanosecond()) / float64(1000000)
  93. e.log.Debug("Job Execution done", "timeTakenMs", duration, "ruleId", job.Rule.Id)
  94. e.resultQueue <- result
  95. }
  96. }
  97. func (e *Engine) resultHandler() {
  98. defer func() {
  99. if err := recover(); err != nil {
  100. e.log.Error("Engine Panic, stopping resultHandler", "error", err, "stack", log.Stack(1))
  101. }
  102. }()
  103. for result := range e.resultQueue {
  104. e.log.Debug("Alert Rule Result", "ruleId", result.AlertJob.Rule.Id, "state", result.State, "retry", result.AlertJob.RetryCount)
  105. result.AlertJob.Running = false
  106. if result.Error != nil {
  107. result.AlertJob.IncRetry()
  108. if result.AlertJob.Retryable() {
  109. e.log.Error("Alert Rule Result Error", "ruleId", result.AlertJob.Rule.Id, "error", result.Error, "retry", result.AlertJob.RetryCount)
  110. e.execQueue <- result.AlertJob
  111. } else {
  112. e.log.Error("Alert Rule Result Error After Max Retries", "ruleId", result.AlertJob.Rule.Id, "error", result.Error, "retry", result.AlertJob.RetryCount)
  113. result.State = alertstates.Critical
  114. result.Description = fmt.Sprintf("Failed to run check after %d retires, Error: %v", maxAlertExecutionRetries, result.Error)
  115. e.responseHandler.Handle(result)
  116. }
  117. } else {
  118. result.AlertJob.ResetRetry()
  119. e.responseHandler.Handle(result)
  120. }
  121. }
  122. }