refactor: Break core package into multiple packages under config/endpoint (#759)
* refactor: Partially break core package into dns, result and ssh packages * refactor: Move core package to config/endpoint * refactor: Fix warning about overlapping imported package name with endpoint variable * refactor: Rename EndpointStatus to Status * refactor: Merge result pkg back into endpoint pkg, because it makes more sense * refactor: Rename parameter r to result in Condition.evaluate * refactor: Rename parameter r to result * refactor: Revert accidental change to endpoint.TypeDNS * refactor: Rename parameter r to result * refactor: Merge util package into endpoint package * refactor: Rename parameter r to result
This commit is contained in:
@ -6,48 +6,48 @@ import (
|
||||
"os"
|
||||
|
||||
"github.com/TwiN/gatus/v5/alerting"
|
||||
"github.com/TwiN/gatus/v5/core"
|
||||
"github.com/TwiN/gatus/v5/config/endpoint"
|
||||
)
|
||||
|
||||
// HandleAlerting takes care of alerts to resolve and alerts to trigger based on result success or failure
|
||||
func HandleAlerting(endpoint *core.Endpoint, result *core.Result, alertingConfig *alerting.Config, debug bool) {
|
||||
func HandleAlerting(ep *endpoint.Endpoint, result *endpoint.Result, alertingConfig *alerting.Config, debug bool) {
|
||||
if alertingConfig == nil {
|
||||
return
|
||||
}
|
||||
if result.Success {
|
||||
handleAlertsToResolve(endpoint, result, alertingConfig, debug)
|
||||
handleAlertsToResolve(ep, result, alertingConfig, debug)
|
||||
} else {
|
||||
handleAlertsToTrigger(endpoint, result, alertingConfig, debug)
|
||||
handleAlertsToTrigger(ep, result, alertingConfig, debug)
|
||||
}
|
||||
}
|
||||
|
||||
func handleAlertsToTrigger(endpoint *core.Endpoint, result *core.Result, alertingConfig *alerting.Config, debug bool) {
|
||||
endpoint.NumberOfSuccessesInARow = 0
|
||||
endpoint.NumberOfFailuresInARow++
|
||||
for _, endpointAlert := range endpoint.Alerts {
|
||||
func handleAlertsToTrigger(ep *endpoint.Endpoint, result *endpoint.Result, alertingConfig *alerting.Config, debug bool) {
|
||||
ep.NumberOfSuccessesInARow = 0
|
||||
ep.NumberOfFailuresInARow++
|
||||
for _, endpointAlert := range ep.Alerts {
|
||||
// If the alert hasn't been triggered, move to the next one
|
||||
if !endpointAlert.IsEnabled() || endpointAlert.FailureThreshold > endpoint.NumberOfFailuresInARow {
|
||||
if !endpointAlert.IsEnabled() || endpointAlert.FailureThreshold > ep.NumberOfFailuresInARow {
|
||||
continue
|
||||
}
|
||||
if endpointAlert.Triggered {
|
||||
if debug {
|
||||
log.Printf("[watchdog.handleAlertsToTrigger] Alert for endpoint=%s with description='%s' has already been TRIGGERED, skipping", endpoint.Name, endpointAlert.GetDescription())
|
||||
log.Printf("[watchdog.handleAlertsToTrigger] Alert for endpoint=%s with description='%s' has already been TRIGGERED, skipping", ep.Name, endpointAlert.GetDescription())
|
||||
}
|
||||
continue
|
||||
}
|
||||
alertProvider := alertingConfig.GetAlertingProviderByAlertType(endpointAlert.Type)
|
||||
if alertProvider != nil {
|
||||
log.Printf("[watchdog.handleAlertsToTrigger] Sending %s alert because alert for endpoint=%s with description='%s' has been TRIGGERED", endpointAlert.Type, endpoint.Name, endpointAlert.GetDescription())
|
||||
log.Printf("[watchdog.handleAlertsToTrigger] Sending %s alert because alert for endpoint=%s with description='%s' has been TRIGGERED", endpointAlert.Type, ep.Name, endpointAlert.GetDescription())
|
||||
var err error
|
||||
if os.Getenv("MOCK_ALERT_PROVIDER") == "true" {
|
||||
if os.Getenv("MOCK_ALERT_PROVIDER_ERROR") == "true" {
|
||||
err = errors.New("error")
|
||||
}
|
||||
} else {
|
||||
err = alertProvider.Send(endpoint, endpointAlert, result, false)
|
||||
err = alertProvider.Send(ep, endpointAlert, result, false)
|
||||
}
|
||||
if err != nil {
|
||||
log.Printf("[watchdog.handleAlertsToTrigger] Failed to send an alert for endpoint=%s: %s", endpoint.Name, err.Error())
|
||||
log.Printf("[watchdog.handleAlertsToTrigger] Failed to send an alert for endpoint=%s: %s", ep.Name, err.Error())
|
||||
} else {
|
||||
endpointAlert.Triggered = true
|
||||
}
|
||||
@ -57,10 +57,10 @@ func handleAlertsToTrigger(endpoint *core.Endpoint, result *core.Result, alertin
|
||||
}
|
||||
}
|
||||
|
||||
func handleAlertsToResolve(endpoint *core.Endpoint, result *core.Result, alertingConfig *alerting.Config, debug bool) {
|
||||
endpoint.NumberOfSuccessesInARow++
|
||||
for _, endpointAlert := range endpoint.Alerts {
|
||||
if !endpointAlert.IsEnabled() || !endpointAlert.Triggered || endpointAlert.SuccessThreshold > endpoint.NumberOfSuccessesInARow {
|
||||
func handleAlertsToResolve(ep *endpoint.Endpoint, result *endpoint.Result, alertingConfig *alerting.Config, debug bool) {
|
||||
ep.NumberOfSuccessesInARow++
|
||||
for _, endpointAlert := range ep.Alerts {
|
||||
if !endpointAlert.IsEnabled() || !endpointAlert.Triggered || endpointAlert.SuccessThreshold > ep.NumberOfSuccessesInARow {
|
||||
continue
|
||||
}
|
||||
// Even if the alert provider returns an error, we still set the alert's Triggered variable to false.
|
||||
@ -71,14 +71,14 @@ func handleAlertsToResolve(endpoint *core.Endpoint, result *core.Result, alertin
|
||||
}
|
||||
alertProvider := alertingConfig.GetAlertingProviderByAlertType(endpointAlert.Type)
|
||||
if alertProvider != nil {
|
||||
log.Printf("[watchdog.handleAlertsToResolve] Sending %s alert because alert for endpoint=%s with description='%s' has been RESOLVED", endpointAlert.Type, endpoint.Name, endpointAlert.GetDescription())
|
||||
err := alertProvider.Send(endpoint, endpointAlert, result, true)
|
||||
log.Printf("[watchdog.handleAlertsToResolve] Sending %s alert because alert for endpoint=%s with description='%s' has been RESOLVED", endpointAlert.Type, ep.Name, endpointAlert.GetDescription())
|
||||
err := alertProvider.Send(ep, endpointAlert, result, true)
|
||||
if err != nil {
|
||||
log.Printf("[watchdog.handleAlertsToResolve] Failed to send an alert for endpoint=%s: %s", endpoint.Name, err.Error())
|
||||
log.Printf("[watchdog.handleAlertsToResolve] Failed to send an alert for endpoint=%s: %s", ep.Name, err.Error())
|
||||
}
|
||||
} else {
|
||||
log.Printf("[watchdog.handleAlertsToResolve] Not sending alert of type=%s despite being RESOLVED, because the provider wasn't configured properly", endpointAlert.Type)
|
||||
}
|
||||
}
|
||||
endpoint.NumberOfFailuresInARow = 0
|
||||
ep.NumberOfFailuresInARow = 0
|
||||
}
|
||||
|
Reference in New Issue
Block a user