Lazily retry triggered alerts in case of failure

This commit is contained in:
TwinProduction
2021-01-21 16:14:32 -05:00
parent e9ac115a95
commit 3204a79eb6
4 changed files with 162 additions and 213 deletions

View File

@ -26,18 +26,18 @@ func handleAlertsToTrigger(service *core.Service, result *core.Result, cfg *conf
service.NumberOfFailuresInARow++
for _, alert := range service.Alerts {
// If the alert hasn't been triggered, move to the next one
if !alert.Enabled || alert.FailureThreshold != service.NumberOfFailuresInARow {
if !alert.Enabled || alert.FailureThreshold > service.NumberOfFailuresInARow {
continue
}
if alert.Triggered {
if cfg.Debug {
log.Printf("[watchdog][handleAlertsToTrigger] Alert with description='%s' has already been TRIGGERED, skipping", alert.Description)
log.Printf("[watchdog][handleAlertsToTrigger] Alert for service=%s with description='%s' has already been TRIGGERED, skipping", service.Name, alert.Description)
}
continue
}
alertProvider := config.GetAlertingProviderByAlertType(cfg, alert.Type)
if alertProvider != nil && alertProvider.IsValid() {
log.Printf("[watchdog][handleAlertsToTrigger] Sending %s alert because alert with description='%s' has been TRIGGERED", alert.Type, alert.Description)
log.Printf("[watchdog][handleAlertsToTrigger] Sending %s alert because alert for service=%s with description='%s' has been TRIGGERED", alert.Type, service.Name, alert.Description)
customAlertProvider := alertProvider.ToCustomAlertProvider(service, alert, result, false)
// TODO: retry on error
var err error
@ -47,7 +47,7 @@ func handleAlertsToTrigger(service *core.Service, result *core.Result, cfg *conf
if body, err = customAlertProvider.Send(service.Name, alert.Description, false); err == nil {
var response pagerDutyResponse
if err = json.Unmarshal(body, &response); err != nil {
log.Printf("[watchdog][handleAlertsToTrigger] Ran into error unmarshaling pager duty response: %s", err.Error())
log.Printf("[watchdog][handleAlertsToTrigger] Ran into error unmarshaling pagerduty response: %s", err.Error())
} else {
alert.ResolveKey = response.DedupKey
}
@ -57,7 +57,7 @@ func handleAlertsToTrigger(service *core.Service, result *core.Result, cfg *conf
_, err = customAlertProvider.Send(service.Name, alert.Description, false)
}
if err != nil {
log.Printf("[watchdog][handleAlertsToTrigger] Ran into error sending an alert: %s", err.Error())
log.Printf("[watchdog][handleAlertsToTrigger] Failed to send an alert for service=%s: %s", service.Name, err.Error())
} else {
alert.Triggered = true
}
@ -73,18 +73,20 @@ func handleAlertsToResolve(service *core.Service, result *core.Result, cfg *conf
if !alert.Enabled || !alert.Triggered || alert.SuccessThreshold > service.NumberOfSuccessesInARow {
continue
}
// Even if the alert provider returns an error, we still set the alert's Triggered variable to false.
// Further explanation can be found on Alert's Triggered field.
alert.Triggered = false
if !alert.SendOnResolved {
continue
}
alertProvider := config.GetAlertingProviderByAlertType(cfg, alert.Type)
if alertProvider != nil && alertProvider.IsValid() {
log.Printf("[watchdog][handleAlertsToResolve] Sending %s alert because alert with description='%s' has been RESOLVED", alert.Type, alert.Description)
log.Printf("[watchdog][handleAlertsToResolve] Sending %s alert because alert for service=%s with description='%s' has been RESOLVED", alert.Type, service.Name, alert.Description)
customAlertProvider := alertProvider.ToCustomAlertProvider(service, alert, result, true)
// TODO: retry on error
_, err := customAlertProvider.Send(service.Name, alert.Description, true)
if err != nil {
log.Printf("[watchdog][handleAlertsToResolve] Ran into error sending an alert: %s", err.Error())
log.Printf("[watchdog][handleAlertsToResolve] Failed to send an alert for service=%s: %s", service.Name, err.Error())
} else {
if alert.Type == core.PagerDutyAlert {
alert.ResolveKey = ""