#29: Automatically reload on configuration file update

This commit is contained in:
TwinProduction
2021-05-18 22:29:15 -04:00
parent 40dc1cc270
commit db23bd9073
37 changed files with 616 additions and 496 deletions

View File

@ -4,96 +4,96 @@ import (
"encoding/json"
"log"
"github.com/TwinProduction/gatus/config"
"github.com/TwinProduction/gatus/alerting"
"github.com/TwinProduction/gatus/alerting/alert"
"github.com/TwinProduction/gatus/core"
)
// HandleAlerting takes care of alerts to resolve and alerts to trigger based on result success or failure
func HandleAlerting(service *core.Service, result *core.Result) {
cfg := config.Get()
if cfg.Alerting == nil {
func HandleAlerting(service *core.Service, result *core.Result, alertingConfig *alerting.Config, debug bool) {
if alertingConfig == nil {
return
}
if result.Success {
handleAlertsToResolve(service, result, cfg)
handleAlertsToResolve(service, result, alertingConfig, debug)
} else {
handleAlertsToTrigger(service, result, cfg)
handleAlertsToTrigger(service, result, alertingConfig, debug)
}
}
func handleAlertsToTrigger(service *core.Service, result *core.Result, cfg *config.Config) {
func handleAlertsToTrigger(service *core.Service, result *core.Result, alertingConfig *alerting.Config, debug bool) {
service.NumberOfSuccessesInARow = 0
service.NumberOfFailuresInARow++
for _, alert := range service.Alerts {
// If the alert hasn't been triggered, move to the next one
if !alert.IsEnabled() || alert.FailureThreshold > service.NumberOfFailuresInARow {
for _, serviceAlert := range service.Alerts {
// If the serviceAlert hasn't been triggered, move to the next one
if !serviceAlert.IsEnabled() || serviceAlert.FailureThreshold > service.NumberOfFailuresInARow {
continue
}
if alert.Triggered {
if cfg.Debug {
log.Printf("[watchdog][handleAlertsToTrigger] Alert for service=%s with description='%s' has already been TRIGGERED, skipping", service.Name, alert.GetDescription())
if serviceAlert.Triggered {
if debug {
log.Printf("[watchdog][handleAlertsToTrigger] Alert for service=%s with description='%s' has already been TRIGGERED, skipping", service.Name, serviceAlert.GetDescription())
}
continue
}
alertProvider := config.GetAlertingProviderByAlertType(cfg, alert.Type)
alertProvider := alertingConfig.GetAlertingProviderByAlertType(serviceAlert.Type)
if alertProvider != nil && alertProvider.IsValid() {
log.Printf("[watchdog][handleAlertsToTrigger] Sending %s alert because alert for service=%s with description='%s' has been TRIGGERED", alert.Type, service.Name, alert.GetDescription())
customAlertProvider := alertProvider.ToCustomAlertProvider(service, alert, result, false)
log.Printf("[watchdog][handleAlertsToTrigger] Sending %s serviceAlert because serviceAlert for service=%s with description='%s' has been TRIGGERED", serviceAlert.Type, service.Name, serviceAlert.GetDescription())
customAlertProvider := alertProvider.ToCustomAlertProvider(service, serviceAlert, result, false)
// TODO: retry on error
var err error
// We need to extract the DedupKey from PagerDuty's response
if alert.Type == core.PagerDutyAlert {
if serviceAlert.Type == alert.TypePagerDuty {
var body []byte
if body, err = customAlertProvider.Send(service.Name, alert.GetDescription(), false); err == nil {
if body, err = customAlertProvider.Send(service.Name, serviceAlert.GetDescription(), false); err == nil {
var response pagerDutyResponse
if err = json.Unmarshal(body, &response); err != nil {
log.Printf("[watchdog][handleAlertsToTrigger] Ran into error unmarshaling pagerduty response: %s", err.Error())
} else {
alert.ResolveKey = response.DedupKey
serviceAlert.ResolveKey = response.DedupKey
}
}
} else {
// All other alert types don't need to extract anything from the body, so we can just send the request right away
_, err = customAlertProvider.Send(service.Name, alert.GetDescription(), false)
// All other serviceAlert types don't need to extract anything from the body, so we can just send the request right away
_, err = customAlertProvider.Send(service.Name, serviceAlert.GetDescription(), false)
}
if err != nil {
log.Printf("[watchdog][handleAlertsToTrigger] Failed to send an alert for service=%s: %s", service.Name, err.Error())
log.Printf("[watchdog][handleAlertsToTrigger] Failed to send an serviceAlert for service=%s: %s", service.Name, err.Error())
} else {
alert.Triggered = true
serviceAlert.Triggered = true
}
} else {
log.Printf("[watchdog][handleAlertsToResolve] Not sending alert of type=%s despite being TRIGGERED, because the provider wasn't configured properly", alert.Type)
log.Printf("[watchdog][handleAlertsToResolve] Not sending serviceAlert of type=%s despite being TRIGGERED, because the provider wasn't configured properly", serviceAlert.Type)
}
}
}
func handleAlertsToResolve(service *core.Service, result *core.Result, cfg *config.Config) {
func handleAlertsToResolve(service *core.Service, result *core.Result, alertingConfig *alerting.Config, debug bool) {
service.NumberOfSuccessesInARow++
for _, alert := range service.Alerts {
if !alert.IsEnabled() || !alert.Triggered || alert.SuccessThreshold > service.NumberOfSuccessesInARow {
for _, serviceAlert := range service.Alerts {
if !serviceAlert.IsEnabled() || !serviceAlert.Triggered || serviceAlert.SuccessThreshold > service.NumberOfSuccessesInARow {
continue
}
// Even if the alert provider returns an error, we still set the alert's Triggered variable to false.
// Even if the serviceAlert provider returns an error, we still set the serviceAlert's Triggered variable to false.
// Further explanation can be found on Alert's Triggered field.
alert.Triggered = false
if !alert.IsSendingOnResolved() {
serviceAlert.Triggered = false
if !serviceAlert.IsSendingOnResolved() {
continue
}
alertProvider := config.GetAlertingProviderByAlertType(cfg, alert.Type)
alertProvider := alertingConfig.GetAlertingProviderByAlertType(serviceAlert.Type)
if alertProvider != nil && alertProvider.IsValid() {
log.Printf("[watchdog][handleAlertsToResolve] Sending %s alert because alert for service=%s with description='%s' has been RESOLVED", alert.Type, service.Name, alert.GetDescription())
customAlertProvider := alertProvider.ToCustomAlertProvider(service, alert, result, true)
log.Printf("[watchdog][handleAlertsToResolve] Sending %s serviceAlert because serviceAlert for service=%s with description='%s' has been RESOLVED", serviceAlert.Type, service.Name, serviceAlert.GetDescription())
customAlertProvider := alertProvider.ToCustomAlertProvider(service, serviceAlert, result, true)
// TODO: retry on error
_, err := customAlertProvider.Send(service.Name, alert.GetDescription(), true)
_, err := customAlertProvider.Send(service.Name, serviceAlert.GetDescription(), true)
if err != nil {
log.Printf("[watchdog][handleAlertsToResolve] Failed to send an alert for service=%s: %s", service.Name, err.Error())
log.Printf("[watchdog][handleAlertsToResolve] Failed to send an serviceAlert for service=%s: %s", service.Name, err.Error())
} else {
if alert.Type == core.PagerDutyAlert {
alert.ResolveKey = ""
if serviceAlert.Type == alert.TypePagerDuty {
serviceAlert.ResolveKey = ""
}
}
} else {
log.Printf("[watchdog][handleAlertsToResolve] Not sending alert of type=%s despite being RESOLVED, because the provider wasn't configured properly", alert.Type)
log.Printf("[watchdog][handleAlertsToResolve] Not sending serviceAlert of type=%s despite being RESOLVED, because the provider wasn't configured properly", serviceAlert.Type)
}
}
service.NumberOfFailuresInARow = 0

View File

@ -5,6 +5,7 @@ import (
"testing"
"github.com/TwinProduction/gatus/alerting"
"github.com/TwinProduction/gatus/alerting/alert"
"github.com/TwinProduction/gatus/alerting/provider/custom"
"github.com/TwinProduction/gatus/alerting/provider/pagerduty"
"github.com/TwinProduction/gatus/config"
@ -24,13 +25,12 @@ func TestHandleAlerting(t *testing.T) {
},
},
}
config.Set(cfg)
enabled := true
service := &core.Service{
URL: "http://example.com",
Alerts: []*core.Alert{
Alerts: []*alert.Alert{
{
Type: core.CustomAlert,
Type: alert.TypeCustom,
Enabled: &enabled,
FailureThreshold: 2,
SuccessThreshold: 3,
@ -41,50 +41,40 @@ func TestHandleAlerting(t *testing.T) {
}
verify(t, service, 0, 0, false, "The alert shouldn't start triggered")
HandleAlerting(service, &core.Result{Success: false})
HandleAlerting(service, &core.Result{Success: false}, cfg.Alerting, cfg.Debug)
verify(t, service, 1, 0, false, "The alert shouldn't have triggered")
HandleAlerting(service, &core.Result{Success: false})
HandleAlerting(service, &core.Result{Success: false}, cfg.Alerting, cfg.Debug)
verify(t, service, 2, 0, true, "The alert should've triggered")
HandleAlerting(service, &core.Result{Success: false})
HandleAlerting(service, &core.Result{Success: false}, cfg.Alerting, cfg.Debug)
verify(t, service, 3, 0, true, "The alert should still be triggered")
HandleAlerting(service, &core.Result{Success: false})
HandleAlerting(service, &core.Result{Success: false}, cfg.Alerting, cfg.Debug)
verify(t, service, 4, 0, true, "The alert should still be triggered")
HandleAlerting(service, &core.Result{Success: true})
HandleAlerting(service, &core.Result{Success: true}, cfg.Alerting, cfg.Debug)
verify(t, service, 0, 1, true, "The alert should still be triggered (because service.Alerts[0].SuccessThreshold is 3)")
HandleAlerting(service, &core.Result{Success: true})
HandleAlerting(service, &core.Result{Success: true}, cfg.Alerting, cfg.Debug)
verify(t, service, 0, 2, true, "The alert should still be triggered (because service.Alerts[0].SuccessThreshold is 3)")
HandleAlerting(service, &core.Result{Success: true})
HandleAlerting(service, &core.Result{Success: true}, cfg.Alerting, cfg.Debug)
verify(t, service, 0, 3, false, "The alert should've been resolved")
HandleAlerting(service, &core.Result{Success: true})
HandleAlerting(service, &core.Result{Success: true}, cfg.Alerting, cfg.Debug)
verify(t, service, 0, 4, false, "The alert should no longer be triggered")
}
func TestHandleAlertingWhenAlertingConfigIsNil(t *testing.T) {
_ = os.Setenv("MOCK_ALERT_PROVIDER", "true")
defer os.Clearenv()
cfg := &config.Config{
Debug: true,
Alerting: nil,
}
config.Set(cfg)
HandleAlerting(nil, nil)
HandleAlerting(nil, nil, nil, true)
}
func TestHandleAlertingWithBadAlertProvider(t *testing.T) {
_ = os.Setenv("MOCK_ALERT_PROVIDER", "true")
defer os.Clearenv()
cfg := &config.Config{
Alerting: &alerting.Config{},
}
config.Set(cfg)
enabled := true
service := &core.Service{
URL: "http://example.com",
Alerts: []*core.Alert{
Alerts: []*alert.Alert{
{
Type: core.CustomAlert,
Type: alert.TypeCustom,
Enabled: &enabled,
FailureThreshold: 1,
SuccessThreshold: 1,
@ -95,9 +85,9 @@ func TestHandleAlertingWithBadAlertProvider(t *testing.T) {
}
verify(t, service, 0, 0, false, "The alert shouldn't start triggered")
HandleAlerting(service, &core.Result{Success: false})
HandleAlerting(service, &core.Result{Success: false}, &alerting.Config{}, false)
verify(t, service, 1, 0, false, "The alert shouldn't have triggered")
HandleAlerting(service, &core.Result{Success: false})
HandleAlerting(service, &core.Result{Success: false}, &alerting.Config{}, false)
verify(t, service, 2, 0, false, "The alert shouldn't have triggered, because the provider wasn't configured properly")
}
@ -114,13 +104,12 @@ func TestHandleAlertingWhenTriggeredAlertIsAlmostResolvedButServiceStartFailingA
},
},
}
config.Set(cfg)
enabled := true
service := &core.Service{
URL: "http://example.com",
Alerts: []*core.Alert{
Alerts: []*alert.Alert{
{
Type: core.CustomAlert,
Type: alert.TypeCustom,
Enabled: &enabled,
FailureThreshold: 2,
SuccessThreshold: 3,
@ -132,7 +121,7 @@ func TestHandleAlertingWhenTriggeredAlertIsAlmostResolvedButServiceStartFailingA
}
// This test simulate an alert that was already triggered
HandleAlerting(service, &core.Result{Success: false})
HandleAlerting(service, &core.Result{Success: false}, cfg.Alerting, cfg.Debug)
verify(t, service, 2, 0, true, "The alert was already triggered at the beginning of this test")
}
@ -149,14 +138,13 @@ func TestHandleAlertingWhenTriggeredAlertIsResolvedButSendOnResolvedIsFalse(t *t
},
},
}
config.Set(cfg)
enabled := true
disabled := false
service := &core.Service{
URL: "http://example.com",
Alerts: []*core.Alert{
Alerts: []*alert.Alert{
{
Type: core.CustomAlert,
Type: alert.TypeCustom,
Enabled: &enabled,
FailureThreshold: 1,
SuccessThreshold: 1,
@ -167,7 +155,7 @@ func TestHandleAlertingWhenTriggeredAlertIsResolvedButSendOnResolvedIsFalse(t *t
NumberOfFailuresInARow: 1,
}
HandleAlerting(service, &core.Result{Success: true})
HandleAlerting(service, &core.Result{Success: true}, cfg.Alerting, cfg.Debug)
verify(t, service, 0, 1, false, "The alert should've been resolved")
}
@ -183,13 +171,12 @@ func TestHandleAlertingWhenTriggeredAlertIsResolvedPagerDuty(t *testing.T) {
},
},
}
config.Set(cfg)
enabled := true
service := &core.Service{
URL: "http://example.com",
Alerts: []*core.Alert{
Alerts: []*alert.Alert{
{
Type: core.PagerDutyAlert,
Type: alert.TypePagerDuty,
Enabled: &enabled,
FailureThreshold: 1,
SuccessThreshold: 1,
@ -200,10 +187,10 @@ func TestHandleAlertingWhenTriggeredAlertIsResolvedPagerDuty(t *testing.T) {
NumberOfFailuresInARow: 0,
}
HandleAlerting(service, &core.Result{Success: false})
HandleAlerting(service, &core.Result{Success: false}, cfg.Alerting, cfg.Debug)
verify(t, service, 1, 0, true, "")
HandleAlerting(service, &core.Result{Success: true})
HandleAlerting(service, &core.Result{Success: true}, cfg.Alerting, cfg.Debug)
verify(t, service, 0, 1, false, "The alert should've been resolved")
}
@ -220,13 +207,12 @@ func TestHandleAlertingWithProviderThatReturnsAnError(t *testing.T) {
},
},
}
config.Set(cfg)
enabled := true
service := &core.Service{
URL: "http://example.com",
Alerts: []*core.Alert{
Alerts: []*alert.Alert{
{
Type: core.CustomAlert,
Type: alert.TypeCustom,
Enabled: &enabled,
FailureThreshold: 2,
SuccessThreshold: 2,
@ -237,32 +223,32 @@ func TestHandleAlertingWithProviderThatReturnsAnError(t *testing.T) {
}
_ = os.Setenv("MOCK_ALERT_PROVIDER_ERROR", "true")
HandleAlerting(service, &core.Result{Success: false})
HandleAlerting(service, &core.Result{Success: false}, cfg.Alerting, cfg.Debug)
verify(t, service, 1, 0, false, "")
HandleAlerting(service, &core.Result{Success: false})
HandleAlerting(service, &core.Result{Success: false}, cfg.Alerting, cfg.Debug)
verify(t, service, 2, 0, false, "The alert should have failed to trigger, because the alert provider is returning an error")
HandleAlerting(service, &core.Result{Success: false})
HandleAlerting(service, &core.Result{Success: false}, cfg.Alerting, cfg.Debug)
verify(t, service, 3, 0, false, "The alert should still not be triggered, because the alert provider is still returning an error")
HandleAlerting(service, &core.Result{Success: false})
HandleAlerting(service, &core.Result{Success: false}, cfg.Alerting, cfg.Debug)
verify(t, service, 4, 0, false, "The alert should still not be triggered, because the alert provider is still returning an error")
_ = os.Setenv("MOCK_ALERT_PROVIDER_ERROR", "false")
HandleAlerting(service, &core.Result{Success: false})
HandleAlerting(service, &core.Result{Success: false}, cfg.Alerting, cfg.Debug)
verify(t, service, 5, 0, true, "The alert should've been triggered because the alert provider is no longer returning an error")
HandleAlerting(service, &core.Result{Success: true})
HandleAlerting(service, &core.Result{Success: true}, cfg.Alerting, cfg.Debug)
verify(t, service, 0, 1, true, "The alert should've still been triggered")
_ = os.Setenv("MOCK_ALERT_PROVIDER_ERROR", "true")
HandleAlerting(service, &core.Result{Success: true})
HandleAlerting(service, &core.Result{Success: true}, cfg.Alerting, cfg.Debug)
verify(t, service, 0, 2, false, "The alert should've been resolved DESPITE THE ALERT PROVIDER RETURNING AN ERROR. See Alert.Triggered for further explanation.")
_ = os.Setenv("MOCK_ALERT_PROVIDER_ERROR", "false")
// Make sure that everything's working as expected after a rough patch
HandleAlerting(service, &core.Result{Success: false})
HandleAlerting(service, &core.Result{Success: false}, cfg.Alerting, cfg.Debug)
verify(t, service, 1, 0, false, "")
HandleAlerting(service, &core.Result{Success: false})
HandleAlerting(service, &core.Result{Success: false}, cfg.Alerting, cfg.Debug)
verify(t, service, 2, 0, true, "The alert should have triggered")
HandleAlerting(service, &core.Result{Success: true})
HandleAlerting(service, &core.Result{Success: true}, cfg.Alerting, cfg.Debug)
verify(t, service, 0, 1, true, "The alert should still be triggered")
HandleAlerting(service, &core.Result{Success: true})
HandleAlerting(service, &core.Result{Success: true}, cfg.Alerting, cfg.Debug)
verify(t, service, 0, 2, false, "The alert should have been resolved")
}
@ -279,13 +265,12 @@ func TestHandleAlertingWithProviderThatOnlyReturnsErrorOnResolve(t *testing.T) {
},
},
}
config.Set(cfg)
enabled := true
service := &core.Service{
URL: "http://example.com",
Alerts: []*core.Alert{
Alerts: []*alert.Alert{
{
Type: core.CustomAlert,
Type: alert.TypeCustom,
Enabled: &enabled,
FailureThreshold: 1,
SuccessThreshold: 1,
@ -295,27 +280,27 @@ func TestHandleAlertingWithProviderThatOnlyReturnsErrorOnResolve(t *testing.T) {
},
}
HandleAlerting(service, &core.Result{Success: false})
HandleAlerting(service, &core.Result{Success: false}, cfg.Alerting, cfg.Debug)
verify(t, service, 1, 0, true, "")
_ = os.Setenv("MOCK_ALERT_PROVIDER_ERROR", "true")
HandleAlerting(service, &core.Result{Success: true})
HandleAlerting(service, &core.Result{Success: true}, cfg.Alerting, cfg.Debug)
verify(t, service, 0, 1, false, "")
_ = os.Setenv("MOCK_ALERT_PROVIDER_ERROR", "false")
HandleAlerting(service, &core.Result{Success: false})
HandleAlerting(service, &core.Result{Success: false}, cfg.Alerting, cfg.Debug)
verify(t, service, 1, 0, true, "")
_ = os.Setenv("MOCK_ALERT_PROVIDER_ERROR", "true")
HandleAlerting(service, &core.Result{Success: true})
HandleAlerting(service, &core.Result{Success: true}, cfg.Alerting, cfg.Debug)
verify(t, service, 0, 1, false, "")
_ = os.Setenv("MOCK_ALERT_PROVIDER_ERROR", "false")
// Make sure that everything's working as expected after a rough patch
HandleAlerting(service, &core.Result{Success: false})
HandleAlerting(service, &core.Result{Success: false}, cfg.Alerting, cfg.Debug)
verify(t, service, 1, 0, true, "")
HandleAlerting(service, &core.Result{Success: false})
HandleAlerting(service, &core.Result{Success: false}, cfg.Alerting, cfg.Debug)
verify(t, service, 2, 0, true, "")
HandleAlerting(service, &core.Result{Success: true})
HandleAlerting(service, &core.Result{Success: true}, cfg.Alerting, cfg.Debug)
verify(t, service, 0, 1, false, "")
HandleAlerting(service, &core.Result{Success: true})
HandleAlerting(service, &core.Result{Success: true}, cfg.Alerting, cfg.Debug)
verify(t, service, 0, 2, false, "")
}

View File

@ -1,10 +1,12 @@
package watchdog
import (
"context"
"log"
"sync"
"time"
"github.com/TwinProduction/gatus/alerting"
"github.com/TwinProduction/gatus/config"
"github.com/TwinProduction/gatus/core"
"github.com/TwinProduction/gatus/metric"
@ -15,48 +17,65 @@ var (
// monitoringMutex is used to prevent multiple services from being evaluated at the same time.
// Without this, conditions using response time may become inaccurate.
monitoringMutex sync.Mutex
ctx context.Context
cancelFunc context.CancelFunc
)
// Monitor loops over each services and starts a goroutine to monitor each services separately
func Monitor(cfg *config.Config) {
ctx, cancelFunc = context.WithCancel(context.Background())
for _, service := range cfg.Services {
// To prevent multiple requests from running at the same time, we'll wait for a little bit before each iteration
time.Sleep(1111 * time.Millisecond)
go monitor(service)
go monitor(service, cfg.Alerting, cfg.DisableMonitoringLock, cfg.Metrics, cfg.Debug, ctx)
}
}
// monitor monitors a single service in a loop
func monitor(service *core.Service) {
cfg := config.Get()
func monitor(service *core.Service, alertingConfig *alerting.Config, disableMonitoringLock, enabledMetrics, debug bool, ctx context.Context) {
// Run it immediately on start
execute(service, alertingConfig, disableMonitoringLock, enabledMetrics, debug)
// Loop for the next executions
for {
if !cfg.DisableMonitoringLock {
// By placing the lock here, we prevent multiple services from being monitored at the exact same time, which
// could cause performance issues and return inaccurate results
monitoringMutex.Lock()
select {
case <-ctx.Done():
log.Printf("[watchdog][monitor] Canceling current execution of group=%s; service=%s", service.Group, service.Name)
return
case <-time.After(service.Interval):
execute(service, alertingConfig, disableMonitoringLock, enabledMetrics, debug)
}
if cfg.Debug {
log.Printf("[watchdog][monitor] Monitoring group=%s; service=%s", service.Group, service.Name)
}
result := service.EvaluateHealth()
}
}
func execute(service *core.Service, alertingConfig *alerting.Config, disableMonitoringLock, enabledMetrics, debug bool) {
if !disableMonitoringLock {
// By placing the lock here, we prevent multiple services from being monitored at the exact same time, which
// could cause performance issues and return inaccurate results
monitoringMutex.Lock()
}
if debug {
log.Printf("[watchdog][execute] Monitoring group=%s; service=%s", service.Group, service.Name)
}
result := service.EvaluateHealth()
if enabledMetrics {
metric.PublishMetricsForService(service, result)
UpdateServiceStatuses(service, result)
log.Printf(
"[watchdog][monitor] Monitored group=%s; service=%s; success=%v; errors=%d; duration=%s",
service.Group,
service.Name,
result.Success,
len(result.Errors),
result.Duration.Round(time.Millisecond),
)
HandleAlerting(service, result)
if cfg.Debug {
log.Printf("[watchdog][monitor] Waiting for interval=%s before monitoring group=%s service=%s again", service.Interval, service.Group, service.Name)
}
if !cfg.DisableMonitoringLock {
monitoringMutex.Unlock()
}
time.Sleep(service.Interval)
}
UpdateServiceStatuses(service, result)
log.Printf(
"[watchdog][execute] Monitored group=%s; service=%s; success=%v; errors=%d; duration=%s",
service.Group,
service.Name,
result.Success,
len(result.Errors),
result.Duration.Round(time.Millisecond),
)
HandleAlerting(service, result, alertingConfig, debug)
if debug {
log.Printf("[watchdog][execute] Waiting for interval=%s before monitoring group=%s service=%s again", service.Interval, service.Group, service.Name)
}
if !disableMonitoringLock {
monitoringMutex.Unlock()
}
}
@ -64,3 +83,8 @@ func monitor(service *core.Service) {
func UpdateServiceStatuses(service *core.Service, result *core.Result) {
storage.Get().Insert(service, result)
}
// Shutdown stops monitoring all services
func Shutdown() {
cancelFunc()
}