Rename Service to Endpoint (#192)

* Add clarifications in comments

* #191: Rename Service to Endpoint
This commit is contained in:
TwiN
2021-10-23 16:47:12 -04:00
committed by GitHub
parent 634123d723
commit 6ed93d4b82
99 changed files with 2136 additions and 2006 deletions

View File

@ -10,93 +10,93 @@ import (
)
// HandleAlerting takes care of alerts to resolve and alerts to trigger based on result success or failure
func HandleAlerting(service *core.Service, result *core.Result, alertingConfig *alerting.Config, debug bool) {
func HandleAlerting(endpoint *core.Endpoint, result *core.Result, alertingConfig *alerting.Config, debug bool) {
if alertingConfig == nil {
return
}
if result.Success {
handleAlertsToResolve(service, result, alertingConfig, debug)
handleAlertsToResolve(endpoint, result, alertingConfig, debug)
} else {
handleAlertsToTrigger(service, result, alertingConfig, debug)
handleAlertsToTrigger(endpoint, result, alertingConfig, debug)
}
}
func handleAlertsToTrigger(service *core.Service, result *core.Result, alertingConfig *alerting.Config, debug bool) {
service.NumberOfSuccessesInARow = 0
service.NumberOfFailuresInARow++
for _, serviceAlert := range service.Alerts {
// If the serviceAlert hasn't been triggered, move to the next one
if !serviceAlert.IsEnabled() || serviceAlert.FailureThreshold > service.NumberOfFailuresInARow {
func handleAlertsToTrigger(endpoint *core.Endpoint, result *core.Result, alertingConfig *alerting.Config, debug bool) {
endpoint.NumberOfSuccessesInARow = 0
endpoint.NumberOfFailuresInARow++
for _, endpointAlert := range endpoint.Alerts {
// If the alert hasn't been triggered, move to the next one
if !endpointAlert.IsEnabled() || endpointAlert.FailureThreshold > endpoint.NumberOfFailuresInARow {
continue
}
if serviceAlert.Triggered {
if endpointAlert.Triggered {
if debug {
log.Printf("[watchdog][handleAlertsToTrigger] Alert for service=%s with description='%s' has already been TRIGGERED, skipping", service.Name, serviceAlert.GetDescription())
log.Printf("[watchdog][handleAlertsToTrigger] Alert for endpoint=%s with description='%s' has already been TRIGGERED, skipping", endpoint.Name, endpointAlert.GetDescription())
}
continue
}
alertProvider := alertingConfig.GetAlertingProviderByAlertType(serviceAlert.Type)
alertProvider := alertingConfig.GetAlertingProviderByAlertType(endpointAlert.Type)
if alertProvider != nil && alertProvider.IsValid() {
log.Printf("[watchdog][handleAlertsToTrigger] Sending %s serviceAlert because serviceAlert for service=%s with description='%s' has been TRIGGERED", serviceAlert.Type, service.Name, serviceAlert.GetDescription())
customAlertProvider := alertProvider.ToCustomAlertProvider(service, serviceAlert, result, false)
log.Printf("[watchdog][handleAlertsToTrigger] Sending %s alert because alert for endpoint=%s with description='%s' has been TRIGGERED", endpointAlert.Type, endpoint.Name, endpointAlert.GetDescription())
customAlertProvider := alertProvider.ToCustomAlertProvider(endpoint, endpointAlert, result, false)
// TODO: retry on error
var err error
// We need to extract the DedupKey from PagerDuty's response
if serviceAlert.Type == alert.TypePagerDuty {
if endpointAlert.Type == alert.TypePagerDuty {
var body []byte
if body, err = customAlertProvider.Send(service.Name, serviceAlert.GetDescription(), false); err == nil {
if body, err = customAlertProvider.Send(endpoint.Name, endpointAlert.GetDescription(), false); err == nil {
var response pagerDutyResponse
if err = json.Unmarshal(body, &response); err != nil {
log.Printf("[watchdog][handleAlertsToTrigger] Ran into error unmarshaling pagerduty response: %s", err.Error())
} else {
serviceAlert.ResolveKey = response.DedupKey
endpointAlert.ResolveKey = response.DedupKey
}
}
} else {
// All other serviceAlert types don't need to extract anything from the body, so we can just send the request right away
_, err = customAlertProvider.Send(service.Name, serviceAlert.GetDescription(), false)
// All other alert types don't need to extract anything from the body, so we can just send the request right away
_, err = customAlertProvider.Send(endpoint.Name, endpointAlert.GetDescription(), false)
}
if err != nil {
log.Printf("[watchdog][handleAlertsToTrigger] Failed to send an serviceAlert for service=%s: %s", service.Name, err.Error())
log.Printf("[watchdog][handleAlertsToTrigger] Failed to send an alert for endpoint=%s: %s", endpoint.Name, err.Error())
} else {
serviceAlert.Triggered = true
endpointAlert.Triggered = true
}
} else {
log.Printf("[watchdog][handleAlertsToResolve] Not sending serviceAlert of type=%s despite being TRIGGERED, because the provider wasn't configured properly", serviceAlert.Type)
log.Printf("[watchdog][handleAlertsToResolve] Not sending alert of type=%s despite being TRIGGERED, because the provider wasn't configured properly", endpointAlert.Type)
}
}
}
func handleAlertsToResolve(service *core.Service, result *core.Result, alertingConfig *alerting.Config, debug bool) {
service.NumberOfSuccessesInARow++
for _, serviceAlert := range service.Alerts {
if !serviceAlert.IsEnabled() || !serviceAlert.Triggered || serviceAlert.SuccessThreshold > service.NumberOfSuccessesInARow {
func handleAlertsToResolve(endpoint *core.Endpoint, result *core.Result, alertingConfig *alerting.Config, debug bool) {
endpoint.NumberOfSuccessesInARow++
for _, endpointAlert := range endpoint.Alerts {
if !endpointAlert.IsEnabled() || !endpointAlert.Triggered || endpointAlert.SuccessThreshold > endpoint.NumberOfSuccessesInARow {
continue
}
// Even if the serviceAlert provider returns an error, we still set the serviceAlert's Triggered variable to false.
// Even if the alert provider returns an error, we still set the alert's Triggered variable to false.
// Further explanation can be found on Alert's Triggered field.
serviceAlert.Triggered = false
if !serviceAlert.IsSendingOnResolved() {
endpointAlert.Triggered = false
if !endpointAlert.IsSendingOnResolved() {
continue
}
alertProvider := alertingConfig.GetAlertingProviderByAlertType(serviceAlert.Type)
alertProvider := alertingConfig.GetAlertingProviderByAlertType(endpointAlert.Type)
if alertProvider != nil && alertProvider.IsValid() {
log.Printf("[watchdog][handleAlertsToResolve] Sending %s serviceAlert because serviceAlert for service=%s with description='%s' has been RESOLVED", serviceAlert.Type, service.Name, serviceAlert.GetDescription())
customAlertProvider := alertProvider.ToCustomAlertProvider(service, serviceAlert, result, true)
log.Printf("[watchdog][handleAlertsToResolve] Sending %s alert because alert for endpoint=%s with description='%s' has been RESOLVED", endpointAlert.Type, endpoint.Name, endpointAlert.GetDescription())
customAlertProvider := alertProvider.ToCustomAlertProvider(endpoint, endpointAlert, result, true)
// TODO: retry on error
_, err := customAlertProvider.Send(service.Name, serviceAlert.GetDescription(), true)
_, err := customAlertProvider.Send(endpoint.Name, endpointAlert.GetDescription(), true)
if err != nil {
log.Printf("[watchdog][handleAlertsToResolve] Failed to send an serviceAlert for service=%s: %s", service.Name, err.Error())
log.Printf("[watchdog][handleAlertsToResolve] Failed to send an alert for endpoint=%s: %s", endpoint.Name, err.Error())
} else {
if serviceAlert.Type == alert.TypePagerDuty {
serviceAlert.ResolveKey = ""
if endpointAlert.Type == alert.TypePagerDuty {
endpointAlert.ResolveKey = ""
}
}
} else {
log.Printf("[watchdog][handleAlertsToResolve] Not sending serviceAlert of type=%s despite being RESOLVED, because the provider wasn't configured properly", serviceAlert.Type)
log.Printf("[watchdog][handleAlertsToResolve] Not sending alert of type=%s despite being RESOLVED, because the provider wasn't configured properly", endpointAlert.Type)
}
}
service.NumberOfFailuresInARow = 0
endpoint.NumberOfFailuresInARow = 0
}
type pagerDutyResponse struct {