Reset
This commit is contained in:
8
storage/store/common/errors.go
Normal file
8
storage/store/common/errors.go
Normal file
@ -0,0 +1,8 @@
|
||||
package common
|
||||
|
||||
import "errors"
|
||||
|
||||
var (
|
||||
ErrEndpointNotFound = errors.New("endpoint not found") // When an endpoint does not exist in the store
|
||||
ErrInvalidTimeRange = errors.New("'from' cannot be older than 'to'") // When an invalid time range is provided
|
||||
)
|
9
storage/store/common/limits.go
Normal file
9
storage/store/common/limits.go
Normal file
@ -0,0 +1,9 @@
|
||||
package common
|
||||
|
||||
const (
|
||||
// MaximumNumberOfResults is the maximum number of results that an endpoint can have
|
||||
MaximumNumberOfResults = 100
|
||||
|
||||
// MaximumNumberOfEvents is the maximum number of events that an endpoint can have
|
||||
MaximumNumberOfEvents = 50
|
||||
)
|
28
storage/store/common/paging/paging.go
Normal file
28
storage/store/common/paging/paging.go
Normal file
@ -0,0 +1,28 @@
|
||||
package paging
|
||||
|
||||
// EndpointStatusParams represents all parameters that can be used for paging purposes
|
||||
type EndpointStatusParams struct {
|
||||
EventsPage int // Number of the event page
|
||||
EventsPageSize int // Size of the event page
|
||||
ResultsPage int // Number of the result page
|
||||
ResultsPageSize int // Size of the result page
|
||||
}
|
||||
|
||||
// NewEndpointStatusParams creates a new EndpointStatusParams
|
||||
func NewEndpointStatusParams() *EndpointStatusParams {
|
||||
return &EndpointStatusParams{}
|
||||
}
|
||||
|
||||
// WithEvents sets the values for EventsPage and EventsPageSize
|
||||
func (params *EndpointStatusParams) WithEvents(page, pageSize int) *EndpointStatusParams {
|
||||
params.EventsPage = page
|
||||
params.EventsPageSize = pageSize
|
||||
return params
|
||||
}
|
||||
|
||||
// WithResults sets the values for ResultsPage and ResultsPageSize
|
||||
func (params *EndpointStatusParams) WithResults(page, pageSize int) *EndpointStatusParams {
|
||||
params.ResultsPage = page
|
||||
params.ResultsPageSize = pageSize
|
||||
return params
|
||||
}
|
72
storage/store/common/paging/paging_test.go
Normal file
72
storage/store/common/paging/paging_test.go
Normal file
@ -0,0 +1,72 @@
|
||||
package paging
|
||||
|
||||
import "testing"
|
||||
|
||||
func TestNewEndpointStatusParams(t *testing.T) {
|
||||
type Scenario struct {
|
||||
Name string
|
||||
Params *EndpointStatusParams
|
||||
ExpectedEventsPage int
|
||||
ExpectedEventsPageSize int
|
||||
ExpectedResultsPage int
|
||||
ExpectedResultsPageSize int
|
||||
}
|
||||
scenarios := []Scenario{
|
||||
{
|
||||
Name: "empty-params",
|
||||
Params: NewEndpointStatusParams(),
|
||||
ExpectedEventsPage: 0,
|
||||
ExpectedEventsPageSize: 0,
|
||||
ExpectedResultsPage: 0,
|
||||
ExpectedResultsPageSize: 0,
|
||||
},
|
||||
{
|
||||
Name: "with-events-page-2-size-7",
|
||||
Params: NewEndpointStatusParams().WithEvents(2, 7),
|
||||
ExpectedEventsPage: 2,
|
||||
ExpectedEventsPageSize: 7,
|
||||
ExpectedResultsPage: 0,
|
||||
ExpectedResultsPageSize: 0,
|
||||
},
|
||||
{
|
||||
Name: "with-events-page-4-size-3-uptime",
|
||||
Params: NewEndpointStatusParams().WithEvents(4, 3),
|
||||
ExpectedEventsPage: 4,
|
||||
ExpectedEventsPageSize: 3,
|
||||
ExpectedResultsPage: 0,
|
||||
ExpectedResultsPageSize: 0,
|
||||
},
|
||||
{
|
||||
Name: "with-results-page-1-size-20-uptime",
|
||||
Params: NewEndpointStatusParams().WithResults(1, 20),
|
||||
ExpectedEventsPage: 0,
|
||||
ExpectedEventsPageSize: 0,
|
||||
ExpectedResultsPage: 1,
|
||||
ExpectedResultsPageSize: 20,
|
||||
},
|
||||
{
|
||||
Name: "with-results-page-2-size-10-events-page-3-size-50",
|
||||
Params: NewEndpointStatusParams().WithResults(2, 10).WithEvents(3, 50),
|
||||
ExpectedEventsPage: 3,
|
||||
ExpectedEventsPageSize: 50,
|
||||
ExpectedResultsPage: 2,
|
||||
ExpectedResultsPageSize: 10,
|
||||
},
|
||||
}
|
||||
for _, scenario := range scenarios {
|
||||
t.Run(scenario.Name, func(t *testing.T) {
|
||||
if scenario.Params.EventsPage != scenario.ExpectedEventsPage {
|
||||
t.Errorf("expected ExpectedEventsPage to be %d, was %d", scenario.ExpectedEventsPageSize, scenario.Params.EventsPage)
|
||||
}
|
||||
if scenario.Params.EventsPageSize != scenario.ExpectedEventsPageSize {
|
||||
t.Errorf("expected EventsPageSize to be %d, was %d", scenario.ExpectedEventsPageSize, scenario.Params.EventsPageSize)
|
||||
}
|
||||
if scenario.Params.ResultsPage != scenario.ExpectedResultsPage {
|
||||
t.Errorf("expected ResultsPage to be %d, was %d", scenario.ExpectedResultsPage, scenario.Params.ResultsPage)
|
||||
}
|
||||
if scenario.Params.ResultsPageSize != scenario.ExpectedResultsPageSize {
|
||||
t.Errorf("expected ResultsPageSize to be %d, was %d", scenario.ExpectedResultsPageSize, scenario.Params.ResultsPageSize)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
222
storage/store/memory/memory.go
Normal file
222
storage/store/memory/memory.go
Normal file
@ -0,0 +1,222 @@
|
||||
package memory
|
||||
|
||||
import (
|
||||
"sort"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/TwiN/gatus/v5/alerting/alert"
|
||||
"github.com/TwiN/gatus/v5/config/endpoint"
|
||||
"github.com/TwiN/gatus/v5/storage/store/common"
|
||||
"github.com/TwiN/gatus/v5/storage/store/common/paging"
|
||||
"github.com/TwiN/gocache/v2"
|
||||
)
|
||||
|
||||
// Store that leverages gocache
|
||||
type Store struct {
|
||||
sync.RWMutex
|
||||
|
||||
cache *gocache.Cache
|
||||
}
|
||||
|
||||
// NewStore creates a new store using gocache.Cache
|
||||
//
|
||||
// This store holds everything in memory, and if the file parameter is not blank,
|
||||
// supports eventual persistence.
|
||||
func NewStore() (*Store, error) {
|
||||
store := &Store{
|
||||
cache: gocache.NewCache().WithMaxSize(gocache.NoMaxSize),
|
||||
}
|
||||
return store, nil
|
||||
}
|
||||
|
||||
// GetAllEndpointStatuses returns all monitored endpoint.Status
|
||||
// with a subset of endpoint.Result defined by the page and pageSize parameters
|
||||
func (s *Store) GetAllEndpointStatuses(params *paging.EndpointStatusParams) ([]*endpoint.Status, error) {
|
||||
endpointStatuses := s.cache.GetAll()
|
||||
pagedEndpointStatuses := make([]*endpoint.Status, 0, len(endpointStatuses))
|
||||
for _, v := range endpointStatuses {
|
||||
pagedEndpointStatuses = append(pagedEndpointStatuses, ShallowCopyEndpointStatus(v.(*endpoint.Status), params))
|
||||
}
|
||||
sort.Slice(pagedEndpointStatuses, func(i, j int) bool {
|
||||
return pagedEndpointStatuses[i].Key < pagedEndpointStatuses[j].Key
|
||||
})
|
||||
return pagedEndpointStatuses, nil
|
||||
}
|
||||
|
||||
// GetEndpointStatus returns the endpoint status for a given endpoint name in the given group
|
||||
func (s *Store) GetEndpointStatus(groupName, endpointName string, params *paging.EndpointStatusParams) (*endpoint.Status, error) {
|
||||
return s.GetEndpointStatusByKey(endpoint.ConvertGroupAndEndpointNameToKey(groupName, endpointName), params)
|
||||
}
|
||||
|
||||
// GetEndpointStatusByKey returns the endpoint status for a given key
|
||||
func (s *Store) GetEndpointStatusByKey(key string, params *paging.EndpointStatusParams) (*endpoint.Status, error) {
|
||||
endpointStatus := s.cache.GetValue(key)
|
||||
if endpointStatus == nil {
|
||||
return nil, common.ErrEndpointNotFound
|
||||
}
|
||||
return ShallowCopyEndpointStatus(endpointStatus.(*endpoint.Status), params), nil
|
||||
}
|
||||
|
||||
// GetUptimeByKey returns the uptime percentage during a time range
|
||||
func (s *Store) GetUptimeByKey(key string, from, to time.Time) (float64, error) {
|
||||
if from.After(to) {
|
||||
return 0, common.ErrInvalidTimeRange
|
||||
}
|
||||
endpointStatus := s.cache.GetValue(key)
|
||||
if endpointStatus == nil || endpointStatus.(*endpoint.Status).Uptime == nil {
|
||||
return 0, common.ErrEndpointNotFound
|
||||
}
|
||||
successfulExecutions := uint64(0)
|
||||
totalExecutions := uint64(0)
|
||||
current := from
|
||||
for to.Sub(current) >= 0 {
|
||||
hourlyUnixTimestamp := current.Truncate(time.Hour).Unix()
|
||||
hourlyStats := endpointStatus.(*endpoint.Status).Uptime.HourlyStatistics[hourlyUnixTimestamp]
|
||||
if hourlyStats == nil || hourlyStats.TotalExecutions == 0 {
|
||||
current = current.Add(time.Hour)
|
||||
continue
|
||||
}
|
||||
successfulExecutions += hourlyStats.SuccessfulExecutions
|
||||
totalExecutions += hourlyStats.TotalExecutions
|
||||
current = current.Add(time.Hour)
|
||||
}
|
||||
if totalExecutions == 0 {
|
||||
return 0, nil
|
||||
}
|
||||
return float64(successfulExecutions) / float64(totalExecutions), nil
|
||||
}
|
||||
|
||||
// GetAverageResponseTimeByKey returns the average response time in milliseconds (value) during a time range
|
||||
func (s *Store) GetAverageResponseTimeByKey(key string, from, to time.Time) (int, error) {
|
||||
if from.After(to) {
|
||||
return 0, common.ErrInvalidTimeRange
|
||||
}
|
||||
endpointStatus := s.cache.GetValue(key)
|
||||
if endpointStatus == nil || endpointStatus.(*endpoint.Status).Uptime == nil {
|
||||
return 0, common.ErrEndpointNotFound
|
||||
}
|
||||
current := from
|
||||
var totalExecutions, totalResponseTime uint64
|
||||
for to.Sub(current) >= 0 {
|
||||
hourlyUnixTimestamp := current.Truncate(time.Hour).Unix()
|
||||
hourlyStats := endpointStatus.(*endpoint.Status).Uptime.HourlyStatistics[hourlyUnixTimestamp]
|
||||
if hourlyStats == nil || hourlyStats.TotalExecutions == 0 {
|
||||
current = current.Add(time.Hour)
|
||||
continue
|
||||
}
|
||||
totalExecutions += hourlyStats.TotalExecutions
|
||||
totalResponseTime += hourlyStats.TotalExecutionsResponseTime
|
||||
current = current.Add(time.Hour)
|
||||
}
|
||||
if totalExecutions == 0 {
|
||||
return 0, nil
|
||||
}
|
||||
return int(float64(totalResponseTime) / float64(totalExecutions)), nil
|
||||
}
|
||||
|
||||
// GetHourlyAverageResponseTimeByKey returns a map of hourly (key) average response time in milliseconds (value) during a time range
|
||||
func (s *Store) GetHourlyAverageResponseTimeByKey(key string, from, to time.Time) (map[int64]int, error) {
|
||||
if from.After(to) {
|
||||
return nil, common.ErrInvalidTimeRange
|
||||
}
|
||||
endpointStatus := s.cache.GetValue(key)
|
||||
if endpointStatus == nil || endpointStatus.(*endpoint.Status).Uptime == nil {
|
||||
return nil, common.ErrEndpointNotFound
|
||||
}
|
||||
hourlyAverageResponseTimes := make(map[int64]int)
|
||||
current := from
|
||||
for to.Sub(current) >= 0 {
|
||||
hourlyUnixTimestamp := current.Truncate(time.Hour).Unix()
|
||||
hourlyStats := endpointStatus.(*endpoint.Status).Uptime.HourlyStatistics[hourlyUnixTimestamp]
|
||||
if hourlyStats == nil || hourlyStats.TotalExecutions == 0 {
|
||||
current = current.Add(time.Hour)
|
||||
continue
|
||||
}
|
||||
hourlyAverageResponseTimes[hourlyUnixTimestamp] = int(float64(hourlyStats.TotalExecutionsResponseTime) / float64(hourlyStats.TotalExecutions))
|
||||
current = current.Add(time.Hour)
|
||||
}
|
||||
return hourlyAverageResponseTimes, nil
|
||||
}
|
||||
|
||||
// Insert adds the observed result for the specified endpoint into the store
|
||||
func (s *Store) Insert(ep *endpoint.Endpoint, result *endpoint.Result) error {
|
||||
key := ep.Key()
|
||||
s.Lock()
|
||||
status, exists := s.cache.Get(key)
|
||||
if !exists {
|
||||
status = endpoint.NewStatus(ep.Group, ep.Name)
|
||||
status.(*endpoint.Status).Events = append(status.(*endpoint.Status).Events, &endpoint.Event{
|
||||
Type: endpoint.EventStart,
|
||||
Timestamp: time.Now(),
|
||||
})
|
||||
}
|
||||
AddResult(status.(*endpoint.Status), result)
|
||||
s.cache.Set(key, status)
|
||||
s.Unlock()
|
||||
return nil
|
||||
}
|
||||
|
||||
// DeleteAllEndpointStatusesNotInKeys removes all Status that are not within the keys provided
|
||||
func (s *Store) DeleteAllEndpointStatusesNotInKeys(keys []string) int {
|
||||
var keysToDelete []string
|
||||
for _, existingKey := range s.cache.GetKeysByPattern("*", 0) {
|
||||
shouldDelete := true
|
||||
for _, key := range keys {
|
||||
if existingKey == key {
|
||||
shouldDelete = false
|
||||
break
|
||||
}
|
||||
}
|
||||
if shouldDelete {
|
||||
keysToDelete = append(keysToDelete, existingKey)
|
||||
}
|
||||
}
|
||||
return s.cache.DeleteAll(keysToDelete)
|
||||
}
|
||||
|
||||
// GetTriggeredEndpointAlert returns whether the triggered alert for the specified endpoint as well as the necessary information to resolve it
|
||||
//
|
||||
// Always returns that the alert does not exist for the in-memory store since it does not support persistence across restarts
|
||||
func (s *Store) GetTriggeredEndpointAlert(ep *endpoint.Endpoint, alert *alert.Alert) (exists bool, resolveKey string, numberOfSuccessesInARow int, err error) {
|
||||
return false, "", 0, nil
|
||||
}
|
||||
|
||||
// UpsertTriggeredEndpointAlert inserts/updates a triggered alert for an endpoint
|
||||
// Used for persistence of triggered alerts across application restarts
|
||||
//
|
||||
// Does nothing for the in-memory store since it does not support persistence across restarts
|
||||
func (s *Store) UpsertTriggeredEndpointAlert(ep *endpoint.Endpoint, triggeredAlert *alert.Alert) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// DeleteTriggeredEndpointAlert deletes a triggered alert for an endpoint
|
||||
//
|
||||
// Does nothing for the in-memory store since it does not support persistence across restarts
|
||||
func (s *Store) DeleteTriggeredEndpointAlert(ep *endpoint.Endpoint, triggeredAlert *alert.Alert) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// DeleteAllTriggeredAlertsNotInChecksumsByEndpoint removes all triggered alerts owned by an endpoint whose alert
|
||||
// configurations are not provided in the checksums list.
|
||||
// This prevents triggered alerts that have been removed or modified from lingering in the database.
|
||||
//
|
||||
// Does nothing for the in-memory store since it does not support persistence across restarts
|
||||
func (s *Store) DeleteAllTriggeredAlertsNotInChecksumsByEndpoint(ep *endpoint.Endpoint, checksums []string) int {
|
||||
return 0
|
||||
}
|
||||
|
||||
// Clear deletes everything from the store
|
||||
func (s *Store) Clear() {
|
||||
s.cache.Clear()
|
||||
}
|
||||
|
||||
// Save persists the cache to the store file
|
||||
func (s *Store) Save() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Close does nothing, because there's nothing to close
|
||||
func (s *Store) Close() {
|
||||
return
|
||||
}
|
135
storage/store/memory/memory_test.go
Normal file
135
storage/store/memory/memory_test.go
Normal file
@ -0,0 +1,135 @@
|
||||
package memory
|
||||
|
||||
import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/TwiN/gatus/v5/config/endpoint"
|
||||
"github.com/TwiN/gatus/v5/storage/store/common/paging"
|
||||
)
|
||||
|
||||
var (
|
||||
firstCondition = endpoint.Condition("[STATUS] == 200")
|
||||
secondCondition = endpoint.Condition("[RESPONSE_TIME] < 500")
|
||||
thirdCondition = endpoint.Condition("[CERTIFICATE_EXPIRATION] < 72h")
|
||||
|
||||
now = time.Now()
|
||||
|
||||
testEndpoint = endpoint.Endpoint{
|
||||
Name: "name",
|
||||
Group: "group",
|
||||
URL: "https://example.org/what/ever",
|
||||
Method: "GET",
|
||||
Body: "body",
|
||||
Interval: 30 * time.Second,
|
||||
Conditions: []endpoint.Condition{firstCondition, secondCondition, thirdCondition},
|
||||
Alerts: nil,
|
||||
NumberOfFailuresInARow: 0,
|
||||
NumberOfSuccessesInARow: 0,
|
||||
}
|
||||
testSuccessfulResult = endpoint.Result{
|
||||
Hostname: "example.org",
|
||||
IP: "127.0.0.1",
|
||||
HTTPStatus: 200,
|
||||
Errors: nil,
|
||||
Connected: true,
|
||||
Success: true,
|
||||
Timestamp: now,
|
||||
Duration: 150 * time.Millisecond,
|
||||
CertificateExpiration: 10 * time.Hour,
|
||||
ConditionResults: []*endpoint.ConditionResult{
|
||||
{
|
||||
Condition: "[STATUS] == 200",
|
||||
Success: true,
|
||||
},
|
||||
{
|
||||
Condition: "[RESPONSE_TIME] < 500",
|
||||
Success: true,
|
||||
},
|
||||
{
|
||||
Condition: "[CERTIFICATE_EXPIRATION] < 72h",
|
||||
Success: true,
|
||||
},
|
||||
},
|
||||
}
|
||||
testUnsuccessfulResult = endpoint.Result{
|
||||
Hostname: "example.org",
|
||||
IP: "127.0.0.1",
|
||||
HTTPStatus: 200,
|
||||
Errors: []string{"error-1", "error-2"},
|
||||
Connected: true,
|
||||
Success: false,
|
||||
Timestamp: now,
|
||||
Duration: 750 * time.Millisecond,
|
||||
CertificateExpiration: 10 * time.Hour,
|
||||
ConditionResults: []*endpoint.ConditionResult{
|
||||
{
|
||||
Condition: "[STATUS] == 200",
|
||||
Success: true,
|
||||
},
|
||||
{
|
||||
Condition: "[RESPONSE_TIME] < 500",
|
||||
Success: false,
|
||||
},
|
||||
{
|
||||
Condition: "[CERTIFICATE_EXPIRATION] < 72h",
|
||||
Success: false,
|
||||
},
|
||||
},
|
||||
}
|
||||
)
|
||||
|
||||
// Note that are much more extensive tests in /storage/store/store_test.go.
|
||||
// This test is simply an extra sanity check
|
||||
func TestStore_SanityCheck(t *testing.T) {
|
||||
store, _ := NewStore()
|
||||
defer store.Close()
|
||||
store.Insert(&testEndpoint, &testSuccessfulResult)
|
||||
endpointStatuses, _ := store.GetAllEndpointStatuses(paging.NewEndpointStatusParams())
|
||||
if numberOfEndpointStatuses := len(endpointStatuses); numberOfEndpointStatuses != 1 {
|
||||
t.Fatalf("expected 1 EndpointStatus, got %d", numberOfEndpointStatuses)
|
||||
}
|
||||
store.Insert(&testEndpoint, &testUnsuccessfulResult)
|
||||
// Both results inserted are for the same endpoint, therefore, the count shouldn't have increased
|
||||
endpointStatuses, _ = store.GetAllEndpointStatuses(paging.NewEndpointStatusParams())
|
||||
if numberOfEndpointStatuses := len(endpointStatuses); numberOfEndpointStatuses != 1 {
|
||||
t.Fatalf("expected 1 EndpointStatus, got %d", numberOfEndpointStatuses)
|
||||
}
|
||||
if hourlyAverageResponseTime, err := store.GetHourlyAverageResponseTimeByKey(testEndpoint.Key(), time.Now().Add(-24*time.Hour), time.Now()); err != nil {
|
||||
t.Errorf("expected no error, got %v", err)
|
||||
} else if len(hourlyAverageResponseTime) != 1 {
|
||||
t.Errorf("expected 1 hour to have had a result in the past 24 hours, got %d", len(hourlyAverageResponseTime))
|
||||
}
|
||||
if uptime, _ := store.GetUptimeByKey(testEndpoint.Key(), time.Now().Add(-24*time.Hour), time.Now()); uptime != 0.5 {
|
||||
t.Errorf("expected uptime of last 24h to be 0.5, got %f", uptime)
|
||||
}
|
||||
if averageResponseTime, _ := store.GetAverageResponseTimeByKey(testEndpoint.Key(), time.Now().Add(-24*time.Hour), time.Now()); averageResponseTime != 450 {
|
||||
t.Errorf("expected average response time of last 24h to be 450, got %d", averageResponseTime)
|
||||
}
|
||||
ss, _ := store.GetEndpointStatus(testEndpoint.Group, testEndpoint.Name, paging.NewEndpointStatusParams().WithResults(1, 20).WithEvents(1, 20))
|
||||
if ss == nil {
|
||||
t.Fatalf("Store should've had key '%s', but didn't", testEndpoint.Key())
|
||||
}
|
||||
if len(ss.Events) != 3 {
|
||||
t.Errorf("Endpoint '%s' should've had 3 events, got %d", ss.Name, len(ss.Events))
|
||||
}
|
||||
if len(ss.Results) != 2 {
|
||||
t.Errorf("Endpoint '%s' should've had 2 results, got %d", ss.Name, len(ss.Results))
|
||||
}
|
||||
if deleted := store.DeleteAllEndpointStatusesNotInKeys([]string{}); deleted != 1 {
|
||||
t.Errorf("%d entries should've been deleted, got %d", 1, deleted)
|
||||
}
|
||||
}
|
||||
|
||||
func TestStore_Save(t *testing.T) {
|
||||
store, err := NewStore()
|
||||
if err != nil {
|
||||
t.Fatal("expected no error, got", err.Error())
|
||||
}
|
||||
err = store.Save()
|
||||
if err != nil {
|
||||
t.Fatal("expected no error, got", err.Error())
|
||||
}
|
||||
store.Clear()
|
||||
store.Close()
|
||||
}
|
43
storage/store/memory/uptime.go
Normal file
43
storage/store/memory/uptime.go
Normal file
@ -0,0 +1,43 @@
|
||||
package memory
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
"github.com/TwiN/gatus/v5/config/endpoint"
|
||||
)
|
||||
|
||||
const (
|
||||
uptimeCleanUpThreshold = 32 * 24
|
||||
uptimeRetention = 30 * 24 * time.Hour
|
||||
)
|
||||
|
||||
// processUptimeAfterResult processes the result by extracting the relevant from the result and recalculating the uptime
|
||||
// if necessary
|
||||
func processUptimeAfterResult(uptime *endpoint.Uptime, result *endpoint.Result) {
|
||||
if uptime.HourlyStatistics == nil {
|
||||
uptime.HourlyStatistics = make(map[int64]*endpoint.HourlyUptimeStatistics)
|
||||
}
|
||||
unixTimestampFlooredAtHour := result.Timestamp.Truncate(time.Hour).Unix()
|
||||
hourlyStats, _ := uptime.HourlyStatistics[unixTimestampFlooredAtHour]
|
||||
if hourlyStats == nil {
|
||||
hourlyStats = &endpoint.HourlyUptimeStatistics{}
|
||||
uptime.HourlyStatistics[unixTimestampFlooredAtHour] = hourlyStats
|
||||
}
|
||||
if result.Success {
|
||||
hourlyStats.SuccessfulExecutions++
|
||||
}
|
||||
hourlyStats.TotalExecutions++
|
||||
hourlyStats.TotalExecutionsResponseTime += uint64(result.Duration.Milliseconds())
|
||||
// Clean up only when we're starting to have too many useless keys
|
||||
// Note that this is only triggered when there are more entries than there should be after
|
||||
// 32 days, despite the fact that we are deleting everything that's older than 30 days.
|
||||
// This is to prevent re-iterating on every `processUptimeAfterResult` as soon as the uptime has been logged for 30 days.
|
||||
if len(uptime.HourlyStatistics) > uptimeCleanUpThreshold {
|
||||
sevenDaysAgo := time.Now().Add(-(uptimeRetention + time.Hour)).Unix()
|
||||
for hourlyUnixTimestamp := range uptime.HourlyStatistics {
|
||||
if sevenDaysAgo > hourlyUnixTimestamp {
|
||||
delete(uptime.HourlyStatistics, hourlyUnixTimestamp)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
26
storage/store/memory/uptime_bench_test.go
Normal file
26
storage/store/memory/uptime_bench_test.go
Normal file
@ -0,0 +1,26 @@
|
||||
package memory
|
||||
|
||||
import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/TwiN/gatus/v5/config/endpoint"
|
||||
)
|
||||
|
||||
func BenchmarkProcessUptimeAfterResult(b *testing.B) {
|
||||
uptime := endpoint.NewUptime()
|
||||
now := time.Now()
|
||||
now = time.Date(now.Year(), now.Month(), now.Day(), now.Hour(), 0, 0, 0, now.Location())
|
||||
// Start 12000 days ago
|
||||
timestamp := now.Add(-12000 * 24 * time.Hour)
|
||||
for n := 0; n < b.N; n++ {
|
||||
processUptimeAfterResult(uptime, &endpoint.Result{
|
||||
Duration: 18 * time.Millisecond,
|
||||
Success: n%15 == 0,
|
||||
Timestamp: timestamp,
|
||||
})
|
||||
// Simulate an endpoint with an interval of 3 minutes
|
||||
timestamp = timestamp.Add(3 * time.Minute)
|
||||
}
|
||||
b.ReportAllocs()
|
||||
}
|
72
storage/store/memory/uptime_test.go
Normal file
72
storage/store/memory/uptime_test.go
Normal file
@ -0,0 +1,72 @@
|
||||
package memory
|
||||
|
||||
import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/TwiN/gatus/v5/config/endpoint"
|
||||
)
|
||||
|
||||
func TestProcessUptimeAfterResult(t *testing.T) {
|
||||
ep := &endpoint.Endpoint{Name: "name", Group: "group"}
|
||||
status := endpoint.NewStatus(ep.Group, ep.Name)
|
||||
uptime := status.Uptime
|
||||
|
||||
now := time.Now()
|
||||
now = time.Date(now.Year(), now.Month(), now.Day(), now.Hour(), 0, 0, 0, now.Location())
|
||||
processUptimeAfterResult(uptime, &endpoint.Result{Timestamp: now.Add(-7 * 24 * time.Hour), Success: true})
|
||||
|
||||
processUptimeAfterResult(uptime, &endpoint.Result{Timestamp: now.Add(-6 * 24 * time.Hour), Success: false})
|
||||
|
||||
processUptimeAfterResult(uptime, &endpoint.Result{Timestamp: now.Add(-8 * 24 * time.Hour), Success: true})
|
||||
|
||||
processUptimeAfterResult(uptime, &endpoint.Result{Timestamp: now.Add(-24 * time.Hour), Success: true})
|
||||
processUptimeAfterResult(uptime, &endpoint.Result{Timestamp: now.Add(-12 * time.Hour), Success: true})
|
||||
|
||||
processUptimeAfterResult(uptime, &endpoint.Result{Timestamp: now.Add(-1 * time.Hour), Success: true, Duration: 10 * time.Millisecond})
|
||||
checkHourlyStatistics(t, uptime.HourlyStatistics[now.Unix()-now.Unix()%3600-3600], 10, 1, 1)
|
||||
processUptimeAfterResult(uptime, &endpoint.Result{Timestamp: now.Add(-30 * time.Minute), Success: false, Duration: 500 * time.Millisecond})
|
||||
checkHourlyStatistics(t, uptime.HourlyStatistics[now.Unix()-now.Unix()%3600-3600], 510, 2, 1)
|
||||
processUptimeAfterResult(uptime, &endpoint.Result{Timestamp: now.Add(-15 * time.Minute), Success: false, Duration: 25 * time.Millisecond})
|
||||
checkHourlyStatistics(t, uptime.HourlyStatistics[now.Unix()-now.Unix()%3600-3600], 535, 3, 1)
|
||||
|
||||
processUptimeAfterResult(uptime, &endpoint.Result{Timestamp: now.Add(-10 * time.Minute), Success: false})
|
||||
|
||||
processUptimeAfterResult(uptime, &endpoint.Result{Timestamp: now.Add(-120 * time.Hour), Success: true})
|
||||
processUptimeAfterResult(uptime, &endpoint.Result{Timestamp: now.Add(-119 * time.Hour), Success: true})
|
||||
processUptimeAfterResult(uptime, &endpoint.Result{Timestamp: now.Add(-118 * time.Hour), Success: true})
|
||||
processUptimeAfterResult(uptime, &endpoint.Result{Timestamp: now.Add(-117 * time.Hour), Success: true})
|
||||
processUptimeAfterResult(uptime, &endpoint.Result{Timestamp: now.Add(-10 * time.Hour), Success: true})
|
||||
processUptimeAfterResult(uptime, &endpoint.Result{Timestamp: now.Add(-8 * time.Hour), Success: true})
|
||||
processUptimeAfterResult(uptime, &endpoint.Result{Timestamp: now.Add(-30 * time.Minute), Success: true})
|
||||
processUptimeAfterResult(uptime, &endpoint.Result{Timestamp: now.Add(-25 * time.Minute), Success: true})
|
||||
}
|
||||
|
||||
func TestAddResultUptimeIsCleaningUpAfterItself(t *testing.T) {
|
||||
ep := &endpoint.Endpoint{Name: "name", Group: "group"}
|
||||
status := endpoint.NewStatus(ep.Group, ep.Name)
|
||||
now := time.Now()
|
||||
now = time.Date(now.Year(), now.Month(), now.Day(), now.Hour(), 0, 0, 0, now.Location())
|
||||
// Start 12 days ago
|
||||
timestamp := now.Add(-12 * 24 * time.Hour)
|
||||
for timestamp.Unix() <= now.Unix() {
|
||||
AddResult(status, &endpoint.Result{Timestamp: timestamp, Success: true})
|
||||
if len(status.Uptime.HourlyStatistics) > uptimeCleanUpThreshold {
|
||||
t.Errorf("At no point in time should there be more than %d entries in status.SuccessfulExecutionsPerHour, but there are %d", uptimeCleanUpThreshold, len(status.Uptime.HourlyStatistics))
|
||||
}
|
||||
// Simulate endpoint with an interval of 3 minutes
|
||||
timestamp = timestamp.Add(3 * time.Minute)
|
||||
}
|
||||
}
|
||||
|
||||
func checkHourlyStatistics(t *testing.T, hourlyUptimeStatistics *endpoint.HourlyUptimeStatistics, expectedTotalExecutionsResponseTime uint64, expectedTotalExecutions uint64, expectedSuccessfulExecutions uint64) {
|
||||
if hourlyUptimeStatistics.TotalExecutionsResponseTime != expectedTotalExecutionsResponseTime {
|
||||
t.Error("TotalExecutionsResponseTime should've been", expectedTotalExecutionsResponseTime, "got", hourlyUptimeStatistics.TotalExecutionsResponseTime)
|
||||
}
|
||||
if hourlyUptimeStatistics.TotalExecutions != expectedTotalExecutions {
|
||||
t.Error("TotalExecutions should've been", expectedTotalExecutions, "got", hourlyUptimeStatistics.TotalExecutions)
|
||||
}
|
||||
if hourlyUptimeStatistics.SuccessfulExecutions != expectedSuccessfulExecutions {
|
||||
t.Error("SuccessfulExecutions should've been", expectedSuccessfulExecutions, "got", hourlyUptimeStatistics.SuccessfulExecutions)
|
||||
}
|
||||
}
|
81
storage/store/memory/util.go
Normal file
81
storage/store/memory/util.go
Normal file
@ -0,0 +1,81 @@
|
||||
package memory
|
||||
|
||||
import (
|
||||
"github.com/TwiN/gatus/v5/config/endpoint"
|
||||
"github.com/TwiN/gatus/v5/storage/store/common"
|
||||
"github.com/TwiN/gatus/v5/storage/store/common/paging"
|
||||
)
|
||||
|
||||
// ShallowCopyEndpointStatus returns a shallow copy of a Status with only the results
|
||||
// within the range defined by the page and pageSize parameters
|
||||
func ShallowCopyEndpointStatus(ss *endpoint.Status, params *paging.EndpointStatusParams) *endpoint.Status {
|
||||
shallowCopy := &endpoint.Status{
|
||||
Name: ss.Name,
|
||||
Group: ss.Group,
|
||||
Key: ss.Key,
|
||||
Uptime: endpoint.NewUptime(),
|
||||
}
|
||||
numberOfResults := len(ss.Results)
|
||||
resultsStart, resultsEnd := getStartAndEndIndex(numberOfResults, params.ResultsPage, params.ResultsPageSize)
|
||||
if resultsStart < 0 || resultsEnd < 0 {
|
||||
shallowCopy.Results = []*endpoint.Result{}
|
||||
} else {
|
||||
shallowCopy.Results = ss.Results[resultsStart:resultsEnd]
|
||||
}
|
||||
numberOfEvents := len(ss.Events)
|
||||
eventsStart, eventsEnd := getStartAndEndIndex(numberOfEvents, params.EventsPage, params.EventsPageSize)
|
||||
if eventsStart < 0 || eventsEnd < 0 {
|
||||
shallowCopy.Events = []*endpoint.Event{}
|
||||
} else {
|
||||
shallowCopy.Events = ss.Events[eventsStart:eventsEnd]
|
||||
}
|
||||
return shallowCopy
|
||||
}
|
||||
|
||||
func getStartAndEndIndex(numberOfResults int, page, pageSize int) (int, int) {
|
||||
if page < 1 || pageSize < 0 {
|
||||
return -1, -1
|
||||
}
|
||||
start := numberOfResults - (page * pageSize)
|
||||
end := numberOfResults - ((page - 1) * pageSize)
|
||||
if start > numberOfResults {
|
||||
start = -1
|
||||
} else if start < 0 {
|
||||
start = 0
|
||||
}
|
||||
if end > numberOfResults {
|
||||
end = numberOfResults
|
||||
}
|
||||
return start, end
|
||||
}
|
||||
|
||||
// AddResult adds a Result to Status.Results and makes sure that there are
|
||||
// no more than MaximumNumberOfResults results in the Results slice
|
||||
func AddResult(ss *endpoint.Status, result *endpoint.Result) {
|
||||
if ss == nil {
|
||||
return
|
||||
}
|
||||
if len(ss.Results) > 0 {
|
||||
// Check if there's any change since the last result
|
||||
if ss.Results[len(ss.Results)-1].Success != result.Success {
|
||||
ss.Events = append(ss.Events, endpoint.NewEventFromResult(result))
|
||||
if len(ss.Events) > common.MaximumNumberOfEvents {
|
||||
// Doing ss.Events[1:] would usually be sufficient, but in the case where for some reason, the slice has
|
||||
// more than one extra element, we can get rid of all of them at once and thus returning the slice to a
|
||||
// length of MaximumNumberOfEvents by using ss.Events[len(ss.Events)-MaximumNumberOfEvents:] instead
|
||||
ss.Events = ss.Events[len(ss.Events)-common.MaximumNumberOfEvents:]
|
||||
}
|
||||
}
|
||||
} else {
|
||||
// This is the first result, so we need to add the first healthy/unhealthy event
|
||||
ss.Events = append(ss.Events, endpoint.NewEventFromResult(result))
|
||||
}
|
||||
ss.Results = append(ss.Results, result)
|
||||
if len(ss.Results) > common.MaximumNumberOfResults {
|
||||
// Doing ss.Results[1:] would usually be sufficient, but in the case where for some reason, the slice has more
|
||||
// than one extra element, we can get rid of all of them at once and thus returning the slice to a length of
|
||||
// MaximumNumberOfResults by using ss.Results[len(ss.Results)-MaximumNumberOfResults:] instead
|
||||
ss.Results = ss.Results[len(ss.Results)-common.MaximumNumberOfResults:]
|
||||
}
|
||||
processUptimeAfterResult(ss.Uptime, result)
|
||||
}
|
21
storage/store/memory/util_bench_test.go
Normal file
21
storage/store/memory/util_bench_test.go
Normal file
@ -0,0 +1,21 @@
|
||||
package memory
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/TwiN/gatus/v5/config/endpoint"
|
||||
"github.com/TwiN/gatus/v5/storage/store/common"
|
||||
"github.com/TwiN/gatus/v5/storage/store/common/paging"
|
||||
)
|
||||
|
||||
func BenchmarkShallowCopyEndpointStatus(b *testing.B) {
|
||||
ep := &testEndpoint
|
||||
status := endpoint.NewStatus(ep.Group, ep.Name)
|
||||
for i := 0; i < common.MaximumNumberOfResults; i++ {
|
||||
AddResult(status, &testSuccessfulResult)
|
||||
}
|
||||
for n := 0; n < b.N; n++ {
|
||||
ShallowCopyEndpointStatus(status, paging.NewEndpointStatusParams().WithResults(1, 20))
|
||||
}
|
||||
b.ReportAllocs()
|
||||
}
|
66
storage/store/memory/util_test.go
Normal file
66
storage/store/memory/util_test.go
Normal file
@ -0,0 +1,66 @@
|
||||
package memory
|
||||
|
||||
import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/TwiN/gatus/v5/config/endpoint"
|
||||
"github.com/TwiN/gatus/v5/storage/store/common"
|
||||
"github.com/TwiN/gatus/v5/storage/store/common/paging"
|
||||
)
|
||||
|
||||
func TestAddResult(t *testing.T) {
|
||||
ep := &endpoint.Endpoint{Name: "name", Group: "group"}
|
||||
endpointStatus := endpoint.NewStatus(ep.Group, ep.Name)
|
||||
for i := 0; i < (common.MaximumNumberOfResults+common.MaximumNumberOfEvents)*2; i++ {
|
||||
AddResult(endpointStatus, &endpoint.Result{Success: i%2 == 0, Timestamp: time.Now()})
|
||||
}
|
||||
if len(endpointStatus.Results) != common.MaximumNumberOfResults {
|
||||
t.Errorf("expected endpointStatus.Results to not exceed a length of %d", common.MaximumNumberOfResults)
|
||||
}
|
||||
if len(endpointStatus.Events) != common.MaximumNumberOfEvents {
|
||||
t.Errorf("expected endpointStatus.Events to not exceed a length of %d", common.MaximumNumberOfEvents)
|
||||
}
|
||||
// Try to add nil endpointStatus
|
||||
AddResult(nil, &endpoint.Result{Timestamp: time.Now()})
|
||||
}
|
||||
|
||||
func TestShallowCopyEndpointStatus(t *testing.T) {
|
||||
ep := &endpoint.Endpoint{Name: "name", Group: "group"}
|
||||
endpointStatus := endpoint.NewStatus(ep.Group, ep.Name)
|
||||
ts := time.Now().Add(-25 * time.Hour)
|
||||
for i := 0; i < 25; i++ {
|
||||
AddResult(endpointStatus, &endpoint.Result{Success: i%2 == 0, Timestamp: ts})
|
||||
ts = ts.Add(time.Hour)
|
||||
}
|
||||
if len(ShallowCopyEndpointStatus(endpointStatus, paging.NewEndpointStatusParams().WithResults(-1, -1)).Results) != 0 {
|
||||
t.Error("expected to have 0 result")
|
||||
}
|
||||
if len(ShallowCopyEndpointStatus(endpointStatus, paging.NewEndpointStatusParams().WithResults(1, 1)).Results) != 1 {
|
||||
t.Error("expected to have 1 result")
|
||||
}
|
||||
if len(ShallowCopyEndpointStatus(endpointStatus, paging.NewEndpointStatusParams().WithResults(5, 0)).Results) != 0 {
|
||||
t.Error("expected to have 0 results")
|
||||
}
|
||||
if len(ShallowCopyEndpointStatus(endpointStatus, paging.NewEndpointStatusParams().WithResults(-1, 20)).Results) != 0 {
|
||||
t.Error("expected to have 0 result, because the page was invalid")
|
||||
}
|
||||
if len(ShallowCopyEndpointStatus(endpointStatus, paging.NewEndpointStatusParams().WithResults(1, -1)).Results) != 0 {
|
||||
t.Error("expected to have 0 result, because the page size was invalid")
|
||||
}
|
||||
if len(ShallowCopyEndpointStatus(endpointStatus, paging.NewEndpointStatusParams().WithResults(1, 10)).Results) != 10 {
|
||||
t.Error("expected to have 10 results, because given a page size of 10, page 1 should have 10 elements")
|
||||
}
|
||||
if len(ShallowCopyEndpointStatus(endpointStatus, paging.NewEndpointStatusParams().WithResults(2, 10)).Results) != 10 {
|
||||
t.Error("expected to have 10 results, because given a page size of 10, page 2 should have 10 elements")
|
||||
}
|
||||
if len(ShallowCopyEndpointStatus(endpointStatus, paging.NewEndpointStatusParams().WithResults(3, 10)).Results) != 5 {
|
||||
t.Error("expected to have 5 results, because given a page size of 10, page 3 should have 5 elements")
|
||||
}
|
||||
if len(ShallowCopyEndpointStatus(endpointStatus, paging.NewEndpointStatusParams().WithResults(4, 10)).Results) != 0 {
|
||||
t.Error("expected to have 0 results, because given a page size of 10, page 4 should have 0 elements")
|
||||
}
|
||||
if len(ShallowCopyEndpointStatus(endpointStatus, paging.NewEndpointStatusParams().WithResults(1, 50)).Results) != 25 {
|
||||
t.Error("expected to have 25 results, because there's only 25 results")
|
||||
}
|
||||
}
|
85
storage/store/sql/specific_postgres.go
Normal file
85
storage/store/sql/specific_postgres.go
Normal file
@ -0,0 +1,85 @@
|
||||
package sql
|
||||
|
||||
func (s *Store) createPostgresSchema() error {
|
||||
_, err := s.db.Exec(`
|
||||
CREATE TABLE IF NOT EXISTS endpoints (
|
||||
endpoint_id BIGSERIAL PRIMARY KEY,
|
||||
endpoint_key TEXT UNIQUE,
|
||||
endpoint_name TEXT NOT NULL,
|
||||
endpoint_group TEXT NOT NULL,
|
||||
UNIQUE(endpoint_name, endpoint_group)
|
||||
)
|
||||
`)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
_, err = s.db.Exec(`
|
||||
CREATE TABLE IF NOT EXISTS endpoint_events (
|
||||
endpoint_event_id BIGSERIAL PRIMARY KEY,
|
||||
endpoint_id BIGINT NOT NULL REFERENCES endpoints(endpoint_id) ON DELETE CASCADE,
|
||||
event_type TEXT NOT NULL,
|
||||
event_timestamp TIMESTAMP NOT NULL
|
||||
)
|
||||
`)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
_, err = s.db.Exec(`
|
||||
CREATE TABLE IF NOT EXISTS endpoint_results (
|
||||
endpoint_result_id BIGSERIAL PRIMARY KEY,
|
||||
endpoint_id BIGINT NOT NULL REFERENCES endpoints(endpoint_id) ON DELETE CASCADE,
|
||||
success BOOLEAN NOT NULL,
|
||||
errors TEXT NOT NULL,
|
||||
connected BOOLEAN NOT NULL,
|
||||
status BIGINT NOT NULL,
|
||||
dns_rcode TEXT NOT NULL,
|
||||
certificate_expiration BIGINT NOT NULL,
|
||||
domain_expiration BIGINT NOT NULL,
|
||||
hostname TEXT NOT NULL,
|
||||
ip TEXT NOT NULL,
|
||||
duration BIGINT NOT NULL,
|
||||
timestamp TIMESTAMP NOT NULL
|
||||
)
|
||||
`)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
_, err = s.db.Exec(`
|
||||
CREATE TABLE IF NOT EXISTS endpoint_result_conditions (
|
||||
endpoint_result_condition_id BIGSERIAL PRIMARY KEY,
|
||||
endpoint_result_id BIGINT NOT NULL REFERENCES endpoint_results(endpoint_result_id) ON DELETE CASCADE,
|
||||
condition TEXT NOT NULL,
|
||||
success BOOLEAN NOT NULL
|
||||
)
|
||||
`)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
_, err = s.db.Exec(`
|
||||
CREATE TABLE IF NOT EXISTS endpoint_uptimes (
|
||||
endpoint_uptime_id BIGSERIAL PRIMARY KEY,
|
||||
endpoint_id BIGINT NOT NULL REFERENCES endpoints(endpoint_id) ON DELETE CASCADE,
|
||||
hour_unix_timestamp BIGINT NOT NULL,
|
||||
total_executions BIGINT NOT NULL,
|
||||
successful_executions BIGINT NOT NULL,
|
||||
total_response_time BIGINT NOT NULL,
|
||||
UNIQUE(endpoint_id, hour_unix_timestamp)
|
||||
)
|
||||
`)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
_, err = s.db.Exec(`
|
||||
CREATE TABLE IF NOT EXISTS endpoint_alerts_triggered (
|
||||
endpoint_alert_trigger_id BIGSERIAL PRIMARY KEY,
|
||||
endpoint_id BIGINT NOT NULL REFERENCES endpoints(endpoint_id) ON DELETE CASCADE,
|
||||
configuration_checksum TEXT NOT NULL,
|
||||
resolve_key TEXT NOT NULL,
|
||||
number_of_successes_in_a_row INTEGER NOT NULL,
|
||||
UNIQUE(endpoint_id, configuration_checksum)
|
||||
)
|
||||
`)
|
||||
// Silent table modifications TODO: Remove this in v6.0.0
|
||||
_, _ = s.db.Exec(`ALTER TABLE endpoint_results ADD IF NOT EXISTS domain_expiration BIGINT NOT NULL DEFAULT 0`)
|
||||
return err
|
||||
}
|
85
storage/store/sql/specific_sqlite.go
Normal file
85
storage/store/sql/specific_sqlite.go
Normal file
@ -0,0 +1,85 @@
|
||||
package sql
|
||||
|
||||
func (s *Store) createSQLiteSchema() error {
|
||||
_, err := s.db.Exec(`
|
||||
CREATE TABLE IF NOT EXISTS endpoints (
|
||||
endpoint_id INTEGER PRIMARY KEY,
|
||||
endpoint_key TEXT UNIQUE,
|
||||
endpoint_name TEXT NOT NULL,
|
||||
endpoint_group TEXT NOT NULL,
|
||||
UNIQUE(endpoint_name, endpoint_group)
|
||||
)
|
||||
`)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
_, err = s.db.Exec(`
|
||||
CREATE TABLE IF NOT EXISTS endpoint_events (
|
||||
endpoint_event_id INTEGER PRIMARY KEY,
|
||||
endpoint_id INTEGER NOT NULL REFERENCES endpoints(endpoint_id) ON DELETE CASCADE,
|
||||
event_type TEXT NOT NULL,
|
||||
event_timestamp TIMESTAMP NOT NULL
|
||||
)
|
||||
`)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
_, err = s.db.Exec(`
|
||||
CREATE TABLE IF NOT EXISTS endpoint_results (
|
||||
endpoint_result_id INTEGER PRIMARY KEY,
|
||||
endpoint_id INTEGER NOT NULL REFERENCES endpoints(endpoint_id) ON DELETE CASCADE,
|
||||
success INTEGER NOT NULL,
|
||||
errors TEXT NOT NULL,
|
||||
connected INTEGER NOT NULL,
|
||||
status INTEGER NOT NULL,
|
||||
dns_rcode TEXT NOT NULL,
|
||||
certificate_expiration INTEGER NOT NULL,
|
||||
domain_expiration INTEGER NOT NULL,
|
||||
hostname TEXT NOT NULL,
|
||||
ip TEXT NOT NULL,
|
||||
duration INTEGER NOT NULL,
|
||||
timestamp TIMESTAMP NOT NULL
|
||||
)
|
||||
`)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
_, err = s.db.Exec(`
|
||||
CREATE TABLE IF NOT EXISTS endpoint_result_conditions (
|
||||
endpoint_result_condition_id INTEGER PRIMARY KEY,
|
||||
endpoint_result_id INTEGER NOT NULL REFERENCES endpoint_results(endpoint_result_id) ON DELETE CASCADE,
|
||||
condition TEXT NOT NULL,
|
||||
success INTEGER NOT NULL
|
||||
)
|
||||
`)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
_, err = s.db.Exec(`
|
||||
CREATE TABLE IF NOT EXISTS endpoint_uptimes (
|
||||
endpoint_uptime_id INTEGER PRIMARY KEY,
|
||||
endpoint_id INTEGER NOT NULL REFERENCES endpoints(endpoint_id) ON DELETE CASCADE,
|
||||
hour_unix_timestamp INTEGER NOT NULL,
|
||||
total_executions INTEGER NOT NULL,
|
||||
successful_executions INTEGER NOT NULL,
|
||||
total_response_time INTEGER NOT NULL,
|
||||
UNIQUE(endpoint_id, hour_unix_timestamp)
|
||||
)
|
||||
`)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
_, err = s.db.Exec(`
|
||||
CREATE TABLE IF NOT EXISTS endpoint_alerts_triggered (
|
||||
endpoint_alert_trigger_id INTEGER PRIMARY KEY,
|
||||
endpoint_id INTEGER NOT NULL REFERENCES endpoints(endpoint_id) ON DELETE CASCADE,
|
||||
configuration_checksum TEXT NOT NULL,
|
||||
resolve_key TEXT NOT NULL,
|
||||
number_of_successes_in_a_row INTEGER NOT NULL,
|
||||
UNIQUE(endpoint_id, configuration_checksum)
|
||||
)
|
||||
`)
|
||||
// Silent table modifications TODO: Remove this in v6.0.0
|
||||
_, _ = s.db.Exec(`ALTER TABLE endpoint_results ADD domain_expiration INTEGER NOT NULL DEFAULT 0`)
|
||||
return err
|
||||
}
|
1084
storage/store/sql/sql.go
Normal file
1084
storage/store/sql/sql.go
Normal file
File diff suppressed because it is too large
Load Diff
853
storage/store/sql/sql_test.go
Normal file
853
storage/store/sql/sql_test.go
Normal file
@ -0,0 +1,853 @@
|
||||
package sql
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/TwiN/gatus/v5/alerting/alert"
|
||||
"github.com/TwiN/gatus/v5/config/endpoint"
|
||||
"github.com/TwiN/gatus/v5/storage/store/common"
|
||||
"github.com/TwiN/gatus/v5/storage/store/common/paging"
|
||||
)
|
||||
|
||||
var (
|
||||
firstCondition = endpoint.Condition("[STATUS] == 200")
|
||||
secondCondition = endpoint.Condition("[RESPONSE_TIME] < 500")
|
||||
thirdCondition = endpoint.Condition("[CERTIFICATE_EXPIRATION] < 72h")
|
||||
|
||||
now = time.Now()
|
||||
|
||||
testEndpoint = endpoint.Endpoint{
|
||||
Name: "name",
|
||||
Group: "group",
|
||||
URL: "https://example.org/what/ever",
|
||||
Method: "GET",
|
||||
Body: "body",
|
||||
Interval: 30 * time.Second,
|
||||
Conditions: []endpoint.Condition{firstCondition, secondCondition, thirdCondition},
|
||||
Alerts: nil,
|
||||
NumberOfFailuresInARow: 0,
|
||||
NumberOfSuccessesInARow: 0,
|
||||
}
|
||||
testSuccessfulResult = endpoint.Result{
|
||||
Hostname: "example.org",
|
||||
IP: "127.0.0.1",
|
||||
HTTPStatus: 200,
|
||||
Errors: nil,
|
||||
Connected: true,
|
||||
Success: true,
|
||||
Timestamp: now,
|
||||
Duration: 150 * time.Millisecond,
|
||||
CertificateExpiration: 10 * time.Hour,
|
||||
ConditionResults: []*endpoint.ConditionResult{
|
||||
{
|
||||
Condition: "[STATUS] == 200",
|
||||
Success: true,
|
||||
},
|
||||
{
|
||||
Condition: "[RESPONSE_TIME] < 500",
|
||||
Success: true,
|
||||
},
|
||||
{
|
||||
Condition: "[CERTIFICATE_EXPIRATION] < 72h",
|
||||
Success: true,
|
||||
},
|
||||
},
|
||||
}
|
||||
testUnsuccessfulResult = endpoint.Result{
|
||||
Hostname: "example.org",
|
||||
IP: "127.0.0.1",
|
||||
HTTPStatus: 200,
|
||||
Errors: []string{"error-1", "error-2"},
|
||||
Connected: true,
|
||||
Success: false,
|
||||
Timestamp: now,
|
||||
Duration: 750 * time.Millisecond,
|
||||
CertificateExpiration: 10 * time.Hour,
|
||||
ConditionResults: []*endpoint.ConditionResult{
|
||||
{
|
||||
Condition: "[STATUS] == 200",
|
||||
Success: true,
|
||||
},
|
||||
{
|
||||
Condition: "[RESPONSE_TIME] < 500",
|
||||
Success: false,
|
||||
},
|
||||
{
|
||||
Condition: "[CERTIFICATE_EXPIRATION] < 72h",
|
||||
Success: false,
|
||||
},
|
||||
},
|
||||
}
|
||||
)
|
||||
|
||||
func TestNewStore(t *testing.T) {
|
||||
if _, err := NewStore("", t.TempDir()+"/TestNewStore.db", false); !errors.Is(err, ErrDatabaseDriverNotSpecified) {
|
||||
t.Error("expected error due to blank driver parameter")
|
||||
}
|
||||
if _, err := NewStore("sqlite", "", false); !errors.Is(err, ErrPathNotSpecified) {
|
||||
t.Error("expected error due to blank path parameter")
|
||||
}
|
||||
if store, err := NewStore("sqlite", t.TempDir()+"/TestNewStore.db", true); err != nil {
|
||||
t.Error("shouldn't have returned any error, got", err.Error())
|
||||
} else {
|
||||
_ = store.db.Close()
|
||||
}
|
||||
}
|
||||
|
||||
func TestStore_InsertCleansUpOldUptimeEntriesProperly(t *testing.T) {
|
||||
store, _ := NewStore("sqlite", t.TempDir()+"/TestStore_InsertCleansUpOldUptimeEntriesProperly.db", false)
|
||||
defer store.Close()
|
||||
now := time.Now().Truncate(time.Hour)
|
||||
now = time.Date(now.Year(), now.Month(), now.Day(), now.Hour(), 0, 0, 0, now.Location())
|
||||
|
||||
store.Insert(&testEndpoint, &endpoint.Result{Timestamp: now.Add(-5 * time.Hour), Success: true})
|
||||
|
||||
tx, _ := store.db.Begin()
|
||||
oldest, _ := store.getAgeOfOldestEndpointUptimeEntry(tx, 1)
|
||||
_ = tx.Commit()
|
||||
if oldest.Truncate(time.Hour) != 5*time.Hour {
|
||||
t.Errorf("oldest endpoint uptime entry should've been ~5 hours old, was %s", oldest)
|
||||
}
|
||||
|
||||
// The oldest cache entry should remain at ~5 hours old, because this entry is more recent
|
||||
store.Insert(&testEndpoint, &endpoint.Result{Timestamp: now.Add(-3 * time.Hour), Success: true})
|
||||
|
||||
tx, _ = store.db.Begin()
|
||||
oldest, _ = store.getAgeOfOldestEndpointUptimeEntry(tx, 1)
|
||||
_ = tx.Commit()
|
||||
if oldest.Truncate(time.Hour) != 5*time.Hour {
|
||||
t.Errorf("oldest endpoint uptime entry should've been ~5 hours old, was %s", oldest)
|
||||
}
|
||||
|
||||
// The oldest cache entry should now become at ~8 hours old, because this entry is older
|
||||
store.Insert(&testEndpoint, &endpoint.Result{Timestamp: now.Add(-8 * time.Hour), Success: true})
|
||||
|
||||
tx, _ = store.db.Begin()
|
||||
oldest, _ = store.getAgeOfOldestEndpointUptimeEntry(tx, 1)
|
||||
_ = tx.Commit()
|
||||
if oldest.Truncate(time.Hour) != 8*time.Hour {
|
||||
t.Errorf("oldest endpoint uptime entry should've been ~8 hours old, was %s", oldest)
|
||||
}
|
||||
|
||||
// Since this is one hour before reaching the clean up threshold, the oldest entry should now be this one
|
||||
store.Insert(&testEndpoint, &endpoint.Result{Timestamp: now.Add(-(uptimeAgeCleanUpThreshold - time.Hour)), Success: true})
|
||||
|
||||
tx, _ = store.db.Begin()
|
||||
oldest, _ = store.getAgeOfOldestEndpointUptimeEntry(tx, 1)
|
||||
_ = tx.Commit()
|
||||
if oldest.Truncate(time.Hour) != uptimeAgeCleanUpThreshold-time.Hour {
|
||||
t.Errorf("oldest endpoint uptime entry should've been ~%s hours old, was %s", uptimeAgeCleanUpThreshold-time.Hour, oldest)
|
||||
}
|
||||
|
||||
// Since this entry is after the uptimeAgeCleanUpThreshold, both this entry as well as the previous
|
||||
// one should be deleted since they both surpass uptimeRetention
|
||||
store.Insert(&testEndpoint, &endpoint.Result{Timestamp: now.Add(-(uptimeAgeCleanUpThreshold + time.Hour)), Success: true})
|
||||
|
||||
tx, _ = store.db.Begin()
|
||||
oldest, _ = store.getAgeOfOldestEndpointUptimeEntry(tx, 1)
|
||||
_ = tx.Commit()
|
||||
if oldest.Truncate(time.Hour) != 8*time.Hour {
|
||||
t.Errorf("oldest endpoint uptime entry should've been ~8 hours old, was %s", oldest)
|
||||
}
|
||||
}
|
||||
|
||||
func TestStore_HourlyUptimeEntriesAreMergedIntoDailyUptimeEntriesProperly(t *testing.T) {
|
||||
store, _ := NewStore("sqlite", t.TempDir()+"/TestStore_HourlyUptimeEntriesAreMergedIntoDailyUptimeEntriesProperly.db", false)
|
||||
defer store.Close()
|
||||
now := time.Now().Truncate(time.Hour)
|
||||
now = time.Date(now.Year(), now.Month(), now.Day(), now.Hour(), 0, 0, 0, now.Location())
|
||||
|
||||
scenarios := []struct {
|
||||
numberOfHours int
|
||||
expectedMaxUptimeEntries int64
|
||||
}{
|
||||
{numberOfHours: 1, expectedMaxUptimeEntries: 1},
|
||||
{numberOfHours: 10, expectedMaxUptimeEntries: 10},
|
||||
{numberOfHours: 50, expectedMaxUptimeEntries: 50},
|
||||
{numberOfHours: 75, expectedMaxUptimeEntries: 75},
|
||||
{numberOfHours: 99, expectedMaxUptimeEntries: 99},
|
||||
{numberOfHours: 150, expectedMaxUptimeEntries: 100},
|
||||
{numberOfHours: 300, expectedMaxUptimeEntries: 100},
|
||||
{numberOfHours: 768, expectedMaxUptimeEntries: 100}, // 32 days (in hours), which means anything beyond that won't be persisted anyway
|
||||
{numberOfHours: 1000, expectedMaxUptimeEntries: 100},
|
||||
}
|
||||
// Note that is not technically an accurate real world representation, because uptime entries are always added in
|
||||
// the present, while this test is inserting results from the past to simulate long term uptime entries.
|
||||
// Since we want to test the behavior and not the test itself, this is a "best effort" approach.
|
||||
for _, scenario := range scenarios {
|
||||
t.Run(fmt.Sprintf("num-hours-%d-expected-max-entries-%d", scenario.numberOfHours, scenario.expectedMaxUptimeEntries), func(t *testing.T) {
|
||||
for i := scenario.numberOfHours; i > 0; i-- {
|
||||
//fmt.Printf("i: %d (%s)\n", i, now.Add(-time.Duration(i)*time.Hour))
|
||||
// Create an uptime entry
|
||||
err := store.Insert(&testEndpoint, &endpoint.Result{Timestamp: now.Add(-time.Duration(i) * time.Hour), Success: true})
|
||||
if err != nil {
|
||||
t.Log(err)
|
||||
}
|
||||
//// DEBUGGING: check number of uptime entries for endpoint
|
||||
//tx, _ := store.db.Begin()
|
||||
//numberOfUptimeEntriesForEndpoint, err := store.getNumberOfUptimeEntriesByEndpointID(tx, 1)
|
||||
//if err != nil {
|
||||
// t.Log(err)
|
||||
//}
|
||||
//_ = tx.Commit()
|
||||
//t.Logf("i=%d; numberOfHours=%d; There are currently %d uptime entries for endpointID=%d", i, scenario.numberOfHours, numberOfUptimeEntriesForEndpoint, 1)
|
||||
}
|
||||
// check number of uptime entries for endpoint
|
||||
tx, _ := store.db.Begin()
|
||||
numberOfUptimeEntriesForEndpoint, err := store.getNumberOfUptimeEntriesByEndpointID(tx, 1)
|
||||
if err != nil {
|
||||
t.Log(err)
|
||||
}
|
||||
_ = tx.Commit()
|
||||
//t.Logf("numberOfHours=%d; There are currently %d uptime entries for endpointID=%d", scenario.numberOfHours, numberOfUptimeEntriesForEndpoint, 1)
|
||||
if scenario.expectedMaxUptimeEntries < numberOfUptimeEntriesForEndpoint {
|
||||
t.Errorf("expected %d (uptime entries) to be smaller than %d", numberOfUptimeEntriesForEndpoint, scenario.expectedMaxUptimeEntries)
|
||||
}
|
||||
store.Clear()
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestStore_getEndpointUptime(t *testing.T) {
|
||||
store, _ := NewStore("sqlite", t.TempDir()+"/TestStore_InsertCleansUpEventsAndResultsProperly.db", false)
|
||||
defer store.Clear()
|
||||
defer store.Close()
|
||||
// Add 768 hourly entries (32 days)
|
||||
// Daily entries should be merged from hourly entries automatically
|
||||
for i := 768; i > 0; i-- {
|
||||
err := store.Insert(&testEndpoint, &endpoint.Result{Timestamp: time.Now().Add(-time.Duration(i) * time.Hour), Duration: time.Second, Success: true})
|
||||
if err != nil {
|
||||
t.Log(err)
|
||||
}
|
||||
}
|
||||
// Check the number of uptime entries
|
||||
tx, _ := store.db.Begin()
|
||||
numberOfUptimeEntriesForEndpoint, err := store.getNumberOfUptimeEntriesByEndpointID(tx, 1)
|
||||
if err != nil {
|
||||
t.Log(err)
|
||||
}
|
||||
if numberOfUptimeEntriesForEndpoint < 20 || numberOfUptimeEntriesForEndpoint > 200 {
|
||||
t.Errorf("expected number of uptime entries to be between 20 and 200, got %d", numberOfUptimeEntriesForEndpoint)
|
||||
}
|
||||
// Retrieve uptime for the past 30d
|
||||
uptime, avgResponseTime, err := store.getEndpointUptime(tx, 1, time.Now().Add(-(30 * 24 * time.Hour)), time.Now())
|
||||
if err != nil {
|
||||
t.Log(err)
|
||||
}
|
||||
_ = tx.Commit()
|
||||
if avgResponseTime != time.Second {
|
||||
t.Errorf("expected average response time to be %s, got %s", time.Second, avgResponseTime)
|
||||
}
|
||||
if uptime != 1 {
|
||||
t.Errorf("expected uptime to be 1, got %f", uptime)
|
||||
}
|
||||
// Add a new unsuccessful result, which should impact the uptime
|
||||
err = store.Insert(&testEndpoint, &endpoint.Result{Timestamp: time.Now(), Duration: time.Second, Success: false})
|
||||
if err != nil {
|
||||
t.Log(err)
|
||||
}
|
||||
// Retrieve uptime for the past 30d
|
||||
tx, _ = store.db.Begin()
|
||||
uptime, _, err = store.getEndpointUptime(tx, 1, time.Now().Add(-(30 * 24 * time.Hour)), time.Now())
|
||||
if err != nil {
|
||||
t.Log(err)
|
||||
}
|
||||
_ = tx.Commit()
|
||||
if uptime == 1 {
|
||||
t.Errorf("expected uptime to be less than 1, got %f", uptime)
|
||||
}
|
||||
// Retrieve uptime for the past 30d, but excluding the last 24h
|
||||
// This is not a real use case as there is no way for users to exclude the last 24h, but this is a great way
|
||||
// to ensure that hourly merging works as intended
|
||||
tx, _ = store.db.Begin()
|
||||
uptimeExcludingLast24h, _, err := store.getEndpointUptime(tx, 1, time.Now().Add(-(30 * 24 * time.Hour)), time.Now().Add(-24*time.Hour))
|
||||
if err != nil {
|
||||
t.Log(err)
|
||||
}
|
||||
_ = tx.Commit()
|
||||
if uptimeExcludingLast24h == uptime {
|
||||
t.Error("expected uptimeExcludingLast24h to to be different from uptime, got")
|
||||
}
|
||||
}
|
||||
|
||||
func TestStore_InsertCleansUpEventsAndResultsProperly(t *testing.T) {
|
||||
store, _ := NewStore("sqlite", t.TempDir()+"/TestStore_InsertCleansUpEventsAndResultsProperly.db", false)
|
||||
defer store.Clear()
|
||||
defer store.Close()
|
||||
for i := 0; i < resultsCleanUpThreshold+eventsCleanUpThreshold; i++ {
|
||||
store.Insert(&testEndpoint, &testSuccessfulResult)
|
||||
store.Insert(&testEndpoint, &testUnsuccessfulResult)
|
||||
ss, _ := store.GetEndpointStatusByKey(testEndpoint.Key(), paging.NewEndpointStatusParams().WithResults(1, common.MaximumNumberOfResults*5).WithEvents(1, common.MaximumNumberOfEvents*5))
|
||||
if len(ss.Results) > resultsCleanUpThreshold+1 {
|
||||
t.Errorf("number of results shouldn't have exceeded %d, reached %d", resultsCleanUpThreshold, len(ss.Results))
|
||||
}
|
||||
if len(ss.Events) > eventsCleanUpThreshold+1 {
|
||||
t.Errorf("number of events shouldn't have exceeded %d, reached %d", eventsCleanUpThreshold, len(ss.Events))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestStore_InsertWithCaching(t *testing.T) {
|
||||
store, _ := NewStore("sqlite", t.TempDir()+"/TestStore_InsertWithCaching.db", true)
|
||||
defer store.Close()
|
||||
// Add 2 results
|
||||
store.Insert(&testEndpoint, &testSuccessfulResult)
|
||||
store.Insert(&testEndpoint, &testSuccessfulResult)
|
||||
// Verify that they exist
|
||||
endpointStatuses, _ := store.GetAllEndpointStatuses(paging.NewEndpointStatusParams().WithResults(1, 20))
|
||||
if numberOfEndpointStatuses := len(endpointStatuses); numberOfEndpointStatuses != 1 {
|
||||
t.Fatalf("expected 1 EndpointStatus, got %d", numberOfEndpointStatuses)
|
||||
}
|
||||
if len(endpointStatuses[0].Results) != 2 {
|
||||
t.Fatalf("expected 2 results, got %d", len(endpointStatuses[0].Results))
|
||||
}
|
||||
// Add 2 more results
|
||||
store.Insert(&testEndpoint, &testUnsuccessfulResult)
|
||||
store.Insert(&testEndpoint, &testUnsuccessfulResult)
|
||||
// Verify that they exist
|
||||
endpointStatuses, _ = store.GetAllEndpointStatuses(paging.NewEndpointStatusParams().WithResults(1, 20))
|
||||
if numberOfEndpointStatuses := len(endpointStatuses); numberOfEndpointStatuses != 1 {
|
||||
t.Fatalf("expected 1 EndpointStatus, got %d", numberOfEndpointStatuses)
|
||||
}
|
||||
if len(endpointStatuses[0].Results) != 4 {
|
||||
t.Fatalf("expected 4 results, got %d", len(endpointStatuses[0].Results))
|
||||
}
|
||||
// Clear the store, which should also clear the cache
|
||||
store.Clear()
|
||||
// Verify that they no longer exist
|
||||
endpointStatuses, _ = store.GetAllEndpointStatuses(paging.NewEndpointStatusParams().WithResults(1, 20))
|
||||
if numberOfEndpointStatuses := len(endpointStatuses); numberOfEndpointStatuses != 0 {
|
||||
t.Fatalf("expected 0 EndpointStatus, got %d", numberOfEndpointStatuses)
|
||||
}
|
||||
}
|
||||
|
||||
func TestStore_Persistence(t *testing.T) {
|
||||
path := t.TempDir() + "/TestStore_Persistence.db"
|
||||
store, _ := NewStore("sqlite", path, false)
|
||||
store.Insert(&testEndpoint, &testSuccessfulResult)
|
||||
store.Insert(&testEndpoint, &testUnsuccessfulResult)
|
||||
if uptime, _ := store.GetUptimeByKey(testEndpoint.Key(), time.Now().Add(-time.Hour), time.Now()); uptime != 0.5 {
|
||||
t.Errorf("the uptime over the past 1h should've been 0.5, got %f", uptime)
|
||||
}
|
||||
if uptime, _ := store.GetUptimeByKey(testEndpoint.Key(), time.Now().Add(-time.Hour*24), time.Now()); uptime != 0.5 {
|
||||
t.Errorf("the uptime over the past 24h should've been 0.5, got %f", uptime)
|
||||
}
|
||||
if uptime, _ := store.GetUptimeByKey(testEndpoint.Key(), time.Now().Add(-time.Hour*24*7), time.Now()); uptime != 0.5 {
|
||||
t.Errorf("the uptime over the past 7d should've been 0.5, got %f", uptime)
|
||||
}
|
||||
if uptime, _ := store.GetUptimeByKey(testEndpoint.Key(), time.Now().Add(-time.Hour*24*30), time.Now()); uptime != 0.5 {
|
||||
t.Errorf("the uptime over the past 30d should've been 0.5, got %f", uptime)
|
||||
}
|
||||
ssFromOldStore, _ := store.GetEndpointStatus(testEndpoint.Group, testEndpoint.Name, paging.NewEndpointStatusParams().WithResults(1, common.MaximumNumberOfResults).WithEvents(1, common.MaximumNumberOfEvents))
|
||||
if ssFromOldStore == nil || ssFromOldStore.Group != "group" || ssFromOldStore.Name != "name" || len(ssFromOldStore.Events) != 3 || len(ssFromOldStore.Results) != 2 {
|
||||
store.Close()
|
||||
t.Fatal("sanity check failed")
|
||||
}
|
||||
store.Close()
|
||||
store, _ = NewStore("sqlite", path, false)
|
||||
defer store.Close()
|
||||
ssFromNewStore, _ := store.GetEndpointStatus(testEndpoint.Group, testEndpoint.Name, paging.NewEndpointStatusParams().WithResults(1, common.MaximumNumberOfResults).WithEvents(1, common.MaximumNumberOfEvents))
|
||||
if ssFromNewStore == nil || ssFromNewStore.Group != "group" || ssFromNewStore.Name != "name" || len(ssFromNewStore.Events) != 3 || len(ssFromNewStore.Results) != 2 {
|
||||
t.Fatal("failed sanity check")
|
||||
}
|
||||
if ssFromNewStore == ssFromOldStore {
|
||||
t.Fatal("ss from the old and new store should have a different memory address")
|
||||
}
|
||||
for i := range ssFromNewStore.Events {
|
||||
if ssFromNewStore.Events[i].Timestamp != ssFromOldStore.Events[i].Timestamp {
|
||||
t.Error("new and old should've been the same")
|
||||
}
|
||||
if ssFromNewStore.Events[i].Type != ssFromOldStore.Events[i].Type {
|
||||
t.Error("new and old should've been the same")
|
||||
}
|
||||
}
|
||||
for i := range ssFromOldStore.Results {
|
||||
if ssFromNewStore.Results[i].Timestamp != ssFromOldStore.Results[i].Timestamp {
|
||||
t.Error("new and old should've been the same")
|
||||
}
|
||||
if ssFromNewStore.Results[i].Success != ssFromOldStore.Results[i].Success {
|
||||
t.Error("new and old should've been the same")
|
||||
}
|
||||
if ssFromNewStore.Results[i].Connected != ssFromOldStore.Results[i].Connected {
|
||||
t.Error("new and old should've been the same")
|
||||
}
|
||||
if ssFromNewStore.Results[i].IP != ssFromOldStore.Results[i].IP {
|
||||
t.Error("new and old should've been the same")
|
||||
}
|
||||
if ssFromNewStore.Results[i].Hostname != ssFromOldStore.Results[i].Hostname {
|
||||
t.Error("new and old should've been the same")
|
||||
}
|
||||
if ssFromNewStore.Results[i].HTTPStatus != ssFromOldStore.Results[i].HTTPStatus {
|
||||
t.Error("new and old should've been the same")
|
||||
}
|
||||
if ssFromNewStore.Results[i].DNSRCode != ssFromOldStore.Results[i].DNSRCode {
|
||||
t.Error("new and old should've been the same")
|
||||
}
|
||||
if len(ssFromNewStore.Results[i].Errors) != len(ssFromOldStore.Results[i].Errors) {
|
||||
t.Error("new and old should've been the same")
|
||||
} else {
|
||||
for j := range ssFromOldStore.Results[i].Errors {
|
||||
if ssFromNewStore.Results[i].Errors[j] != ssFromOldStore.Results[i].Errors[j] {
|
||||
t.Error("new and old should've been the same")
|
||||
}
|
||||
}
|
||||
}
|
||||
if len(ssFromNewStore.Results[i].ConditionResults) != len(ssFromOldStore.Results[i].ConditionResults) {
|
||||
t.Error("new and old should've been the same")
|
||||
} else {
|
||||
for j := range ssFromOldStore.Results[i].ConditionResults {
|
||||
if ssFromNewStore.Results[i].ConditionResults[j].Condition != ssFromOldStore.Results[i].ConditionResults[j].Condition {
|
||||
t.Error("new and old should've been the same")
|
||||
}
|
||||
if ssFromNewStore.Results[i].ConditionResults[j].Success != ssFromOldStore.Results[i].ConditionResults[j].Success {
|
||||
t.Error("new and old should've been the same")
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestStore_Save(t *testing.T) {
|
||||
store, _ := NewStore("sqlite", t.TempDir()+"/TestStore_Save.db", false)
|
||||
defer store.Close()
|
||||
if store.Save() != nil {
|
||||
t.Error("Save shouldn't do anything for this store")
|
||||
}
|
||||
}
|
||||
|
||||
// Note that are much more extensive tests in /storage/store/store_test.go.
|
||||
// This test is simply an extra sanity check
|
||||
func TestStore_SanityCheck(t *testing.T) {
|
||||
store, _ := NewStore("sqlite", t.TempDir()+"/TestStore_SanityCheck.db", false)
|
||||
defer store.Close()
|
||||
store.Insert(&testEndpoint, &testSuccessfulResult)
|
||||
endpointStatuses, _ := store.GetAllEndpointStatuses(paging.NewEndpointStatusParams())
|
||||
if numberOfEndpointStatuses := len(endpointStatuses); numberOfEndpointStatuses != 1 {
|
||||
t.Fatalf("expected 1 EndpointStatus, got %d", numberOfEndpointStatuses)
|
||||
}
|
||||
store.Insert(&testEndpoint, &testUnsuccessfulResult)
|
||||
// Both results inserted are for the same endpoint, therefore, the count shouldn't have increased
|
||||
endpointStatuses, _ = store.GetAllEndpointStatuses(paging.NewEndpointStatusParams())
|
||||
if numberOfEndpointStatuses := len(endpointStatuses); numberOfEndpointStatuses != 1 {
|
||||
t.Fatalf("expected 1 EndpointStatus, got %d", numberOfEndpointStatuses)
|
||||
}
|
||||
if hourlyAverageResponseTime, err := store.GetHourlyAverageResponseTimeByKey(testEndpoint.Key(), time.Now().Add(-24*time.Hour), time.Now()); err != nil {
|
||||
t.Errorf("expected no error, got %v", err)
|
||||
} else if len(hourlyAverageResponseTime) != 1 {
|
||||
t.Errorf("expected 1 hour to have had a result in the past 24 hours, got %d", len(hourlyAverageResponseTime))
|
||||
}
|
||||
if uptime, _ := store.GetUptimeByKey(testEndpoint.Key(), time.Now().Add(-24*time.Hour), time.Now()); uptime != 0.5 {
|
||||
t.Errorf("expected uptime of last 24h to be 0.5, got %f", uptime)
|
||||
}
|
||||
if averageResponseTime, _ := store.GetAverageResponseTimeByKey(testEndpoint.Key(), time.Now().Add(-24*time.Hour), time.Now()); averageResponseTime != 450 {
|
||||
t.Errorf("expected average response time of last 24h to be 450, got %d", averageResponseTime)
|
||||
}
|
||||
ss, _ := store.GetEndpointStatus(testEndpoint.Group, testEndpoint.Name, paging.NewEndpointStatusParams().WithResults(1, 20).WithEvents(1, 20))
|
||||
if ss == nil {
|
||||
t.Fatalf("Store should've had key '%s', but didn't", testEndpoint.Key())
|
||||
}
|
||||
if len(ss.Events) != 3 {
|
||||
t.Errorf("Endpoint '%s' should've had 3 events, got %d", ss.Name, len(ss.Events))
|
||||
}
|
||||
if len(ss.Results) != 2 {
|
||||
t.Errorf("Endpoint '%s' should've had 2 results, got %d", ss.Name, len(ss.Results))
|
||||
}
|
||||
if deleted := store.DeleteAllEndpointStatusesNotInKeys([]string{"invalid-key-which-means-everything-should-get-deleted"}); deleted != 1 {
|
||||
t.Errorf("%d entries should've been deleted, got %d", 1, deleted)
|
||||
}
|
||||
if deleted := store.DeleteAllEndpointStatusesNotInKeys([]string{}); deleted != 0 {
|
||||
t.Errorf("There should've been no entries left to delete, got %d", deleted)
|
||||
}
|
||||
}
|
||||
|
||||
// TestStore_InvalidTransaction tests what happens if an invalid transaction is passed as parameter
|
||||
func TestStore_InvalidTransaction(t *testing.T) {
|
||||
store, _ := NewStore("sqlite", t.TempDir()+"/TestStore_InvalidTransaction.db", false)
|
||||
defer store.Close()
|
||||
tx, _ := store.db.Begin()
|
||||
tx.Commit()
|
||||
if _, err := store.insertEndpoint(tx, &testEndpoint); err == nil {
|
||||
t.Error("should've returned an error, because the transaction was already committed")
|
||||
}
|
||||
if err := store.insertEndpointEvent(tx, 1, endpoint.NewEventFromResult(&testSuccessfulResult)); err == nil {
|
||||
t.Error("should've returned an error, because the transaction was already committed")
|
||||
}
|
||||
if err := store.insertEndpointResult(tx, 1, &testSuccessfulResult); err == nil {
|
||||
t.Error("should've returned an error, because the transaction was already committed")
|
||||
}
|
||||
if err := store.insertConditionResults(tx, 1, testSuccessfulResult.ConditionResults); err == nil {
|
||||
t.Error("should've returned an error, because the transaction was already committed")
|
||||
}
|
||||
if err := store.updateEndpointUptime(tx, 1, &testSuccessfulResult); err == nil {
|
||||
t.Error("should've returned an error, because the transaction was already committed")
|
||||
}
|
||||
if _, err := store.getAllEndpointKeys(tx); err == nil {
|
||||
t.Error("should've returned an error, because the transaction was already committed")
|
||||
}
|
||||
if _, err := store.getEndpointStatusByKey(tx, testEndpoint.Key(), paging.NewEndpointStatusParams().WithResults(1, 20)); err == nil {
|
||||
t.Error("should've returned an error, because the transaction was already committed")
|
||||
}
|
||||
if _, err := store.getEndpointEventsByEndpointID(tx, 1, 1, 50); err == nil {
|
||||
t.Error("should've returned an error, because the transaction was already committed")
|
||||
}
|
||||
if _, err := store.getEndpointResultsByEndpointID(tx, 1, 1, 50); err == nil {
|
||||
t.Error("should've returned an error, because the transaction was already committed")
|
||||
}
|
||||
if err := store.deleteOldEndpointEvents(tx, 1); err == nil {
|
||||
t.Error("should've returned an error, because the transaction was already committed")
|
||||
}
|
||||
if err := store.deleteOldEndpointResults(tx, 1); err == nil {
|
||||
t.Error("should've returned an error, because the transaction was already committed")
|
||||
}
|
||||
if _, _, err := store.getEndpointUptime(tx, 1, time.Now(), time.Now()); err == nil {
|
||||
t.Error("should've returned an error, because the transaction was already committed")
|
||||
}
|
||||
if _, err := store.getEndpointID(tx, &testEndpoint); err == nil {
|
||||
t.Error("should've returned an error, because the transaction was already committed")
|
||||
}
|
||||
if _, err := store.getNumberOfEventsByEndpointID(tx, 1); err == nil {
|
||||
t.Error("should've returned an error, because the transaction was already committed")
|
||||
}
|
||||
if _, err := store.getNumberOfResultsByEndpointID(tx, 1); err == nil {
|
||||
t.Error("should've returned an error, because the transaction was already committed")
|
||||
}
|
||||
if _, err := store.getAgeOfOldestEndpointUptimeEntry(tx, 1); err == nil {
|
||||
t.Error("should've returned an error, because the transaction was already committed")
|
||||
}
|
||||
if _, err := store.getLastEndpointResultSuccessValue(tx, 1); err == nil {
|
||||
t.Error("should've returned an error, because the transaction was already committed")
|
||||
}
|
||||
}
|
||||
|
||||
func TestStore_NoRows(t *testing.T) {
|
||||
store, _ := NewStore("sqlite", t.TempDir()+"/TestStore_NoRows.db", false)
|
||||
defer store.Close()
|
||||
tx, _ := store.db.Begin()
|
||||
defer tx.Rollback()
|
||||
if _, err := store.getLastEndpointResultSuccessValue(tx, 1); !errors.Is(err, errNoRowsReturned) {
|
||||
t.Errorf("should've %v, got %v", errNoRowsReturned, err)
|
||||
}
|
||||
if _, err := store.getAgeOfOldestEndpointUptimeEntry(tx, 1); !errors.Is(err, errNoRowsReturned) {
|
||||
t.Errorf("should've %v, got %v", errNoRowsReturned, err)
|
||||
}
|
||||
}
|
||||
|
||||
// This tests very unlikely cases where a table is deleted.
|
||||
func TestStore_BrokenSchema(t *testing.T) {
|
||||
store, _ := NewStore("sqlite", t.TempDir()+"/TestStore_BrokenSchema.db", false)
|
||||
defer store.Close()
|
||||
if err := store.Insert(&testEndpoint, &testSuccessfulResult); err != nil {
|
||||
t.Fatal("expected no error, got", err.Error())
|
||||
}
|
||||
if _, err := store.GetAverageResponseTimeByKey(testEndpoint.Key(), time.Now().Add(-time.Hour), time.Now()); err != nil {
|
||||
t.Fatal("expected no error, got", err.Error())
|
||||
}
|
||||
if _, err := store.GetAllEndpointStatuses(paging.NewEndpointStatusParams()); err != nil {
|
||||
t.Fatal("expected no error, got", err.Error())
|
||||
}
|
||||
// Break
|
||||
_, _ = store.db.Exec("DROP TABLE endpoints")
|
||||
// And now we'll try to insert something in our broken schema
|
||||
if err := store.Insert(&testEndpoint, &testSuccessfulResult); err == nil {
|
||||
t.Fatal("expected an error")
|
||||
}
|
||||
if _, err := store.GetAverageResponseTimeByKey(testEndpoint.Key(), time.Now().Add(-time.Hour), time.Now()); err == nil {
|
||||
t.Fatal("expected an error")
|
||||
}
|
||||
if _, err := store.GetHourlyAverageResponseTimeByKey(testEndpoint.Key(), time.Now().Add(-time.Hour), time.Now()); err == nil {
|
||||
t.Fatal("expected an error")
|
||||
}
|
||||
if _, err := store.GetAllEndpointStatuses(paging.NewEndpointStatusParams()); err == nil {
|
||||
t.Fatal("expected an error")
|
||||
}
|
||||
if _, err := store.GetUptimeByKey(testEndpoint.Key(), time.Now().Add(-time.Hour), time.Now()); err == nil {
|
||||
t.Fatal("expected an error")
|
||||
}
|
||||
if _, err := store.GetEndpointStatusByKey(testEndpoint.Key(), paging.NewEndpointStatusParams()); err == nil {
|
||||
t.Fatal("expected an error")
|
||||
}
|
||||
// Repair
|
||||
if err := store.createSchema(); err != nil {
|
||||
t.Fatal("schema should've been repaired")
|
||||
}
|
||||
store.Clear()
|
||||
if err := store.Insert(&testEndpoint, &testSuccessfulResult); err != nil {
|
||||
t.Fatal("expected no error, got", err.Error())
|
||||
}
|
||||
// Break
|
||||
_, _ = store.db.Exec("DROP TABLE endpoint_events")
|
||||
if err := store.Insert(&testEndpoint, &testSuccessfulResult); err != nil {
|
||||
t.Fatal("expected no error, because this should silently fails, got", err.Error())
|
||||
}
|
||||
if _, err := store.GetAllEndpointStatuses(paging.NewEndpointStatusParams().WithResults(1, 1).WithEvents(1, 1)); err != nil {
|
||||
t.Fatal("expected no error, because this should silently fail, got", err.Error())
|
||||
}
|
||||
// Repair
|
||||
if err := store.createSchema(); err != nil {
|
||||
t.Fatal("schema should've been repaired")
|
||||
}
|
||||
store.Clear()
|
||||
if err := store.Insert(&testEndpoint, &testSuccessfulResult); err != nil {
|
||||
t.Fatal("expected no error, got", err.Error())
|
||||
}
|
||||
// Break
|
||||
_, _ = store.db.Exec("DROP TABLE endpoint_results")
|
||||
if err := store.Insert(&testEndpoint, &testSuccessfulResult); err == nil {
|
||||
t.Fatal("expected an error")
|
||||
}
|
||||
if _, err := store.GetAllEndpointStatuses(paging.NewEndpointStatusParams().WithResults(1, 1).WithEvents(1, 1)); err != nil {
|
||||
t.Fatal("expected no error, because this should silently fail, got", err.Error())
|
||||
}
|
||||
// Repair
|
||||
if err := store.createSchema(); err != nil {
|
||||
t.Fatal("schema should've been repaired")
|
||||
}
|
||||
store.Clear()
|
||||
if err := store.Insert(&testEndpoint, &testSuccessfulResult); err != nil {
|
||||
t.Fatal("expected no error, got", err.Error())
|
||||
}
|
||||
// Break
|
||||
_, _ = store.db.Exec("DROP TABLE endpoint_result_conditions")
|
||||
if err := store.Insert(&testEndpoint, &testSuccessfulResult); err == nil {
|
||||
t.Fatal("expected an error")
|
||||
}
|
||||
// Repair
|
||||
if err := store.createSchema(); err != nil {
|
||||
t.Fatal("schema should've been repaired")
|
||||
}
|
||||
store.Clear()
|
||||
if err := store.Insert(&testEndpoint, &testSuccessfulResult); err != nil {
|
||||
t.Fatal("expected no error, got", err.Error())
|
||||
}
|
||||
// Break
|
||||
_, _ = store.db.Exec("DROP TABLE endpoint_uptimes")
|
||||
if err := store.Insert(&testEndpoint, &testSuccessfulResult); err != nil {
|
||||
t.Fatal("expected no error, because this should silently fails, got", err.Error())
|
||||
}
|
||||
if _, err := store.GetAverageResponseTimeByKey(testEndpoint.Key(), time.Now().Add(-time.Hour), time.Now()); err == nil {
|
||||
t.Fatal("expected an error")
|
||||
}
|
||||
if _, err := store.GetHourlyAverageResponseTimeByKey(testEndpoint.Key(), time.Now().Add(-time.Hour), time.Now()); err == nil {
|
||||
t.Fatal("expected an error")
|
||||
}
|
||||
if _, err := store.GetUptimeByKey(testEndpoint.Key(), time.Now().Add(-time.Hour), time.Now()); err == nil {
|
||||
t.Fatal("expected an error")
|
||||
}
|
||||
}
|
||||
|
||||
func TestCacheKey(t *testing.T) {
|
||||
scenarios := []struct {
|
||||
endpointKey string
|
||||
params paging.EndpointStatusParams
|
||||
overrideCacheKey string
|
||||
expectedCacheKey string
|
||||
wantErr bool
|
||||
}{
|
||||
{
|
||||
endpointKey: "simple",
|
||||
params: paging.EndpointStatusParams{EventsPage: 1, EventsPageSize: 2, ResultsPage: 3, ResultsPageSize: 4},
|
||||
expectedCacheKey: "simple-1-2-3-4",
|
||||
wantErr: false,
|
||||
},
|
||||
{
|
||||
endpointKey: "with-hyphen",
|
||||
params: paging.EndpointStatusParams{EventsPage: 0, EventsPageSize: 0, ResultsPage: 1, ResultsPageSize: 20},
|
||||
expectedCacheKey: "with-hyphen-0-0-1-20",
|
||||
wantErr: false,
|
||||
},
|
||||
{
|
||||
endpointKey: "with-multiple-hyphens",
|
||||
params: paging.EndpointStatusParams{EventsPage: 0, EventsPageSize: 0, ResultsPage: 2, ResultsPageSize: 20},
|
||||
expectedCacheKey: "with-multiple-hyphens-0-0-2-20",
|
||||
wantErr: false,
|
||||
},
|
||||
{
|
||||
overrideCacheKey: "invalid-a-2-3-4",
|
||||
wantErr: true,
|
||||
},
|
||||
{
|
||||
overrideCacheKey: "invalid-1-a-3-4",
|
||||
wantErr: true,
|
||||
},
|
||||
{
|
||||
overrideCacheKey: "invalid-1-2-a-4",
|
||||
wantErr: true,
|
||||
},
|
||||
{
|
||||
overrideCacheKey: "invalid-1-2-3-a",
|
||||
wantErr: true,
|
||||
},
|
||||
{
|
||||
overrideCacheKey: "notenoughhyphen1-2-3-4",
|
||||
wantErr: true,
|
||||
},
|
||||
}
|
||||
for _, scenario := range scenarios {
|
||||
t.Run(scenario.expectedCacheKey+scenario.overrideCacheKey, func(t *testing.T) {
|
||||
var cacheKey string
|
||||
if len(scenario.overrideCacheKey) > 0 {
|
||||
cacheKey = scenario.overrideCacheKey
|
||||
} else {
|
||||
cacheKey = generateCacheKey(scenario.endpointKey, &scenario.params)
|
||||
if cacheKey != scenario.expectedCacheKey {
|
||||
t.Errorf("expected %s, got %s", scenario.expectedCacheKey, cacheKey)
|
||||
}
|
||||
}
|
||||
extractedEndpointKey, extractedParams, err := extractKeyAndParamsFromCacheKey(cacheKey)
|
||||
if (err != nil) != scenario.wantErr {
|
||||
t.Errorf("expected error %v, got %v", scenario.wantErr, err)
|
||||
return
|
||||
}
|
||||
if err != nil {
|
||||
// If there's an error, we don't need to check the extracted values
|
||||
return
|
||||
}
|
||||
if extractedEndpointKey != scenario.endpointKey {
|
||||
t.Errorf("expected endpointKey %s, got %s", scenario.endpointKey, extractedEndpointKey)
|
||||
}
|
||||
if extractedParams.EventsPage != scenario.params.EventsPage {
|
||||
t.Errorf("expected EventsPage %d, got %d", scenario.params.EventsPage, extractedParams.EventsPage)
|
||||
}
|
||||
if extractedParams.EventsPageSize != scenario.params.EventsPageSize {
|
||||
t.Errorf("expected EventsPageSize %d, got %d", scenario.params.EventsPageSize, extractedParams.EventsPageSize)
|
||||
}
|
||||
if extractedParams.ResultsPage != scenario.params.ResultsPage {
|
||||
t.Errorf("expected ResultsPage %d, got %d", scenario.params.ResultsPage, extractedParams.ResultsPage)
|
||||
}
|
||||
if extractedParams.ResultsPageSize != scenario.params.ResultsPageSize {
|
||||
t.Errorf("expected ResultsPageSize %d, got %d", scenario.params.ResultsPageSize, extractedParams.ResultsPageSize)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestTriggeredEndpointAlertsPersistence(t *testing.T) {
|
||||
store, _ := NewStore("sqlite", t.TempDir()+"/TestTriggeredEndpointAlertsPersistence.db", false)
|
||||
defer store.Close()
|
||||
yes, desc := false, "description"
|
||||
ep := testEndpoint
|
||||
ep.NumberOfSuccessesInARow = 0
|
||||
alrt := &alert.Alert{
|
||||
Type: alert.TypePagerDuty,
|
||||
Enabled: &yes,
|
||||
FailureThreshold: 4,
|
||||
SuccessThreshold: 2,
|
||||
Description: &desc,
|
||||
SendOnResolved: &yes,
|
||||
Triggered: true,
|
||||
ResolveKey: "1234567",
|
||||
}
|
||||
// Alert just triggered, so NumberOfSuccessesInARow is 0
|
||||
if err := store.UpsertTriggeredEndpointAlert(&ep, alrt); err != nil {
|
||||
t.Fatal("expected no error, got", err.Error())
|
||||
}
|
||||
exists, resolveKey, numberOfSuccessesInARow, err := store.GetTriggeredEndpointAlert(&ep, alrt)
|
||||
if err != nil {
|
||||
t.Fatal("expected no error, got", err.Error())
|
||||
}
|
||||
if !exists {
|
||||
t.Error("expected triggered alert to exist")
|
||||
}
|
||||
if resolveKey != alrt.ResolveKey {
|
||||
t.Errorf("expected resolveKey %s, got %s", alrt.ResolveKey, resolveKey)
|
||||
}
|
||||
if numberOfSuccessesInARow != ep.NumberOfSuccessesInARow {
|
||||
t.Errorf("expected persisted NumberOfSuccessesInARow to be %d, got %d", ep.NumberOfSuccessesInARow, numberOfSuccessesInARow)
|
||||
}
|
||||
// Endpoint just had a successful evaluation, so NumberOfSuccessesInARow is now 1
|
||||
ep.NumberOfSuccessesInARow++
|
||||
if err := store.UpsertTriggeredEndpointAlert(&ep, alrt); err != nil {
|
||||
t.Fatal("expected no error, got", err.Error())
|
||||
}
|
||||
exists, resolveKey, numberOfSuccessesInARow, err = store.GetTriggeredEndpointAlert(&ep, alrt)
|
||||
if err != nil {
|
||||
t.Error("expected no error, got", err.Error())
|
||||
}
|
||||
if !exists {
|
||||
t.Error("expected triggered alert to exist")
|
||||
}
|
||||
if resolveKey != alrt.ResolveKey {
|
||||
t.Errorf("expected resolveKey %s, got %s", alrt.ResolveKey, resolveKey)
|
||||
}
|
||||
if numberOfSuccessesInARow != ep.NumberOfSuccessesInARow {
|
||||
t.Errorf("expected persisted NumberOfSuccessesInARow to be %d, got %d", ep.NumberOfSuccessesInARow, numberOfSuccessesInARow)
|
||||
}
|
||||
// Simulate the endpoint having another successful evaluation, which means the alert is now resolved,
|
||||
// and we should delete the triggered alert from the store
|
||||
ep.NumberOfSuccessesInARow++
|
||||
if err := store.DeleteTriggeredEndpointAlert(&ep, alrt); err != nil {
|
||||
t.Fatal("expected no error, got", err.Error())
|
||||
}
|
||||
exists, _, _, err = store.GetTriggeredEndpointAlert(&ep, alrt)
|
||||
if err != nil {
|
||||
t.Error("expected no error, got", err.Error())
|
||||
}
|
||||
if exists {
|
||||
t.Error("expected triggered alert to no longer exist as it has been deleted")
|
||||
}
|
||||
}
|
||||
|
||||
func TestStore_DeleteAllTriggeredAlertsNotInChecksumsByEndpoint(t *testing.T) {
|
||||
store, _ := NewStore("sqlite", t.TempDir()+"/TestStore_DeleteAllTriggeredAlertsNotInChecksumsByEndpoint.db", false)
|
||||
defer store.Close()
|
||||
yes, desc := false, "description"
|
||||
ep1 := testEndpoint
|
||||
ep1.Name = "ep1"
|
||||
ep2 := testEndpoint
|
||||
ep2.Name = "ep2"
|
||||
alert1 := alert.Alert{
|
||||
Type: alert.TypePagerDuty,
|
||||
Enabled: &yes,
|
||||
FailureThreshold: 4,
|
||||
SuccessThreshold: 2,
|
||||
Description: &desc,
|
||||
SendOnResolved: &yes,
|
||||
Triggered: true,
|
||||
ResolveKey: "1234567",
|
||||
}
|
||||
alert2 := alert1
|
||||
alert2.Type, alert2.ResolveKey = alert.TypeSlack, ""
|
||||
alert3 := alert2
|
||||
if err := store.UpsertTriggeredEndpointAlert(&ep1, &alert1); err != nil {
|
||||
t.Fatal("expected no error, got", err.Error())
|
||||
}
|
||||
if err := store.UpsertTriggeredEndpointAlert(&ep1, &alert2); err != nil {
|
||||
t.Fatal("expected no error, got", err.Error())
|
||||
}
|
||||
if err := store.UpsertTriggeredEndpointAlert(&ep2, &alert3); err != nil {
|
||||
t.Fatal("expected no error, got", err.Error())
|
||||
}
|
||||
if exists, _, _, _ := store.GetTriggeredEndpointAlert(&ep1, &alert1); !exists {
|
||||
t.Error("expected alert1 to have been deleted")
|
||||
}
|
||||
if exists, _, _, _ := store.GetTriggeredEndpointAlert(&ep1, &alert2); !exists {
|
||||
t.Error("expected alert2 to exist for ep1")
|
||||
}
|
||||
if exists, _, _, _ := store.GetTriggeredEndpointAlert(&ep2, &alert3); !exists {
|
||||
t.Error("expected alert3 to exist for ep2")
|
||||
}
|
||||
// Now we simulate the alert configuration being updated, and the alert being resolved
|
||||
if deleted := store.DeleteAllTriggeredAlertsNotInChecksumsByEndpoint(&ep1, []string{alert2.Checksum()}); deleted != 1 {
|
||||
t.Errorf("expected 1 triggered alert to be deleted, got %d", deleted)
|
||||
}
|
||||
if exists, _, _, _ := store.GetTriggeredEndpointAlert(&ep1, &alert1); exists {
|
||||
t.Error("expected alert1 to have been deleted")
|
||||
}
|
||||
if exists, _, _, _ := store.GetTriggeredEndpointAlert(&ep1, &alert2); !exists {
|
||||
t.Error("expected alert2 to exist for ep1")
|
||||
}
|
||||
if exists, _, _, _ := store.GetTriggeredEndpointAlert(&ep2, &alert3); !exists {
|
||||
t.Error("expected alert3 to exist for ep2")
|
||||
}
|
||||
// Now let's just assume all alerts for ep1 were removed
|
||||
if deleted := store.DeleteAllTriggeredAlertsNotInChecksumsByEndpoint(&ep1, []string{}); deleted != 1 {
|
||||
t.Errorf("expected 1 triggered alert to be deleted, got %d", deleted)
|
||||
}
|
||||
// Make sure the alert for ep2 still exists
|
||||
if exists, _, _, _ := store.GetTriggeredEndpointAlert(&ep2, &alert3); !exists {
|
||||
t.Error("expected alert3 to exist for ep2")
|
||||
}
|
||||
}
|
150
storage/store/store.go
Normal file
150
storage/store/store.go
Normal file
@ -0,0 +1,150 @@
|
||||
package store
|
||||
|
||||
import (
|
||||
"context"
|
||||
"time"
|
||||
|
||||
"github.com/TwiN/gatus/v5/alerting/alert"
|
||||
"github.com/TwiN/gatus/v5/config/endpoint"
|
||||
"github.com/TwiN/gatus/v5/storage"
|
||||
"github.com/TwiN/gatus/v5/storage/store/common/paging"
|
||||
"github.com/TwiN/gatus/v5/storage/store/memory"
|
||||
"github.com/TwiN/gatus/v5/storage/store/sql"
|
||||
"github.com/TwiN/logr"
|
||||
)
|
||||
|
||||
// Store is the interface that each store should implement
|
||||
type Store interface {
|
||||
// GetAllEndpointStatuses returns the JSON encoding of all monitored endpoint.Status
|
||||
// with a subset of endpoint.Result defined by the page and pageSize parameters
|
||||
GetAllEndpointStatuses(params *paging.EndpointStatusParams) ([]*endpoint.Status, error)
|
||||
|
||||
// GetEndpointStatus returns the endpoint status for a given endpoint name in the given group
|
||||
GetEndpointStatus(groupName, endpointName string, params *paging.EndpointStatusParams) (*endpoint.Status, error)
|
||||
|
||||
// GetEndpointStatusByKey returns the endpoint status for a given key
|
||||
GetEndpointStatusByKey(key string, params *paging.EndpointStatusParams) (*endpoint.Status, error)
|
||||
|
||||
// GetUptimeByKey returns the uptime percentage during a time range
|
||||
GetUptimeByKey(key string, from, to time.Time) (float64, error)
|
||||
|
||||
// GetAverageResponseTimeByKey returns the average response time in milliseconds (value) during a time range
|
||||
GetAverageResponseTimeByKey(key string, from, to time.Time) (int, error)
|
||||
|
||||
// GetHourlyAverageResponseTimeByKey returns a map of hourly (key) average response time in milliseconds (value) during a time range
|
||||
GetHourlyAverageResponseTimeByKey(key string, from, to time.Time) (map[int64]int, error)
|
||||
|
||||
// Insert adds the observed result for the specified endpoint into the store
|
||||
Insert(ep *endpoint.Endpoint, result *endpoint.Result) error
|
||||
|
||||
// DeleteAllEndpointStatusesNotInKeys removes all Status that are not within the keys provided
|
||||
//
|
||||
// Used to delete endpoints that have been persisted but are no longer part of the configured endpoints
|
||||
DeleteAllEndpointStatusesNotInKeys(keys []string) int
|
||||
|
||||
// GetTriggeredEndpointAlert returns whether the triggered alert for the specified endpoint as well as the necessary information to resolve it
|
||||
GetTriggeredEndpointAlert(ep *endpoint.Endpoint, alert *alert.Alert) (exists bool, resolveKey string, numberOfSuccessesInARow int, err error)
|
||||
|
||||
// UpsertTriggeredEndpointAlert inserts/updates a triggered alert for an endpoint
|
||||
// Used for persistence of triggered alerts across application restarts
|
||||
UpsertTriggeredEndpointAlert(ep *endpoint.Endpoint, triggeredAlert *alert.Alert) error
|
||||
|
||||
// DeleteTriggeredEndpointAlert deletes a triggered alert for an endpoint
|
||||
DeleteTriggeredEndpointAlert(ep *endpoint.Endpoint, triggeredAlert *alert.Alert) error
|
||||
|
||||
// DeleteAllTriggeredAlertsNotInChecksumsByEndpoint removes all triggered alerts owned by an endpoint whose alert
|
||||
// configurations are not provided in the checksums list.
|
||||
// This prevents triggered alerts that have been removed or modified from lingering in the database.
|
||||
DeleteAllTriggeredAlertsNotInChecksumsByEndpoint(ep *endpoint.Endpoint, checksums []string) int
|
||||
|
||||
// Clear deletes everything from the store
|
||||
Clear()
|
||||
|
||||
// Save persists the data if and where it needs to be persisted
|
||||
Save() error
|
||||
|
||||
// Close terminates every connection and closes the store, if applicable.
|
||||
// Should only be used before stopping the application.
|
||||
Close()
|
||||
}
|
||||
|
||||
// TODO: add method to check state of store (by keeping track of silent errors)
|
||||
|
||||
var (
|
||||
// Validate interface implementation on compile
|
||||
_ Store = (*memory.Store)(nil)
|
||||
_ Store = (*sql.Store)(nil)
|
||||
)
|
||||
|
||||
var (
|
||||
store Store
|
||||
|
||||
// initialized keeps track of whether the storage provider was initialized
|
||||
// Because store.Store is an interface, a nil check wouldn't be sufficient, so instead of doing reflection
|
||||
// every single time Get is called, we'll just lazily keep track of its existence through this variable
|
||||
initialized bool
|
||||
|
||||
ctx context.Context
|
||||
cancelFunc context.CancelFunc
|
||||
)
|
||||
|
||||
func Get() Store {
|
||||
if !initialized {
|
||||
// This only happens in tests
|
||||
logr.Info("[store.Get] Provider requested before it was initialized, automatically initializing")
|
||||
err := Initialize(nil)
|
||||
if err != nil {
|
||||
panic("failed to automatically initialize store: " + err.Error())
|
||||
}
|
||||
}
|
||||
return store
|
||||
}
|
||||
|
||||
// Initialize instantiates the storage provider based on the Config provider
|
||||
func Initialize(cfg *storage.Config) error {
|
||||
initialized = true
|
||||
var err error
|
||||
if cancelFunc != nil {
|
||||
// Stop the active autoSave task, if there's already one
|
||||
cancelFunc()
|
||||
}
|
||||
if cfg == nil {
|
||||
// This only happens in tests
|
||||
logr.Warn("[store.Initialize] nil storage config passed as parameter. This should only happen in tests. Defaulting to an empty config.")
|
||||
cfg = &storage.Config{}
|
||||
}
|
||||
if len(cfg.Path) == 0 && cfg.Type != storage.TypePostgres {
|
||||
logr.Infof("[store.Initialize] Creating storage provider of type=%s", cfg.Type)
|
||||
}
|
||||
ctx, cancelFunc = context.WithCancel(context.Background())
|
||||
switch cfg.Type {
|
||||
case storage.TypeSQLite, storage.TypePostgres:
|
||||
store, err = sql.NewStore(string(cfg.Type), cfg.Path, cfg.Caching)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
case storage.TypeMemory:
|
||||
fallthrough
|
||||
default:
|
||||
store, _ = memory.NewStore()
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// autoSave automatically calls the Save function of the provider at every interval
|
||||
func autoSave(ctx context.Context, store Store, interval time.Duration) {
|
||||
ticker := time.NewTicker(interval)
|
||||
defer ticker.Stop()
|
||||
for {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
logr.Info("[store.autoSave] Stopping active job")
|
||||
return
|
||||
case <-ticker.C:
|
||||
logr.Info("[store.autoSave] Saving")
|
||||
if err := store.Save(); err != nil {
|
||||
logr.Errorf("[store.autoSave] Save failed: %s", err.Error())
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
213
storage/store/store_bench_test.go
Normal file
213
storage/store/store_bench_test.go
Normal file
@ -0,0 +1,213 @@
|
||||
package store
|
||||
|
||||
import (
|
||||
"strconv"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/TwiN/gatus/v5/config/endpoint"
|
||||
"github.com/TwiN/gatus/v5/storage/store/common/paging"
|
||||
"github.com/TwiN/gatus/v5/storage/store/memory"
|
||||
"github.com/TwiN/gatus/v5/storage/store/sql"
|
||||
)
|
||||
|
||||
func BenchmarkStore_GetAllEndpointStatuses(b *testing.B) {
|
||||
memoryStore, err := memory.NewStore()
|
||||
if err != nil {
|
||||
b.Fatal("failed to create store:", err.Error())
|
||||
}
|
||||
sqliteStore, err := sql.NewStore("sqlite", b.TempDir()+"/BenchmarkStore_GetAllEndpointStatuses.db", false)
|
||||
if err != nil {
|
||||
b.Fatal("failed to create store:", err.Error())
|
||||
}
|
||||
defer sqliteStore.Close()
|
||||
type Scenario struct {
|
||||
Name string
|
||||
Store Store
|
||||
Parallel bool
|
||||
}
|
||||
scenarios := []Scenario{
|
||||
{
|
||||
Name: "memory",
|
||||
Store: memoryStore,
|
||||
Parallel: false,
|
||||
},
|
||||
{
|
||||
Name: "memory-parallel",
|
||||
Store: memoryStore,
|
||||
Parallel: true,
|
||||
},
|
||||
{
|
||||
Name: "sqlite",
|
||||
Store: sqliteStore,
|
||||
Parallel: false,
|
||||
},
|
||||
{
|
||||
Name: "sqlite-parallel",
|
||||
Store: sqliteStore,
|
||||
Parallel: true,
|
||||
},
|
||||
}
|
||||
for _, scenario := range scenarios {
|
||||
numberOfEndpoints := []int{10, 25, 50, 100}
|
||||
for _, numberOfEndpointsToCreate := range numberOfEndpoints {
|
||||
// Create endpoints and insert results
|
||||
for i := 0; i < numberOfEndpointsToCreate; i++ {
|
||||
ep := testEndpoint
|
||||
ep.Name = "endpoint" + strconv.Itoa(i)
|
||||
// Insert 20 results for each endpoint
|
||||
for j := 0; j < 20; j++ {
|
||||
scenario.Store.Insert(&ep, &testSuccessfulResult)
|
||||
}
|
||||
}
|
||||
// Run the scenarios
|
||||
b.Run(scenario.Name+"-with-"+strconv.Itoa(numberOfEndpointsToCreate)+"-endpoints", func(b *testing.B) {
|
||||
if scenario.Parallel {
|
||||
b.RunParallel(func(pb *testing.PB) {
|
||||
for pb.Next() {
|
||||
_, _ = scenario.Store.GetAllEndpointStatuses(paging.NewEndpointStatusParams().WithResults(1, 20))
|
||||
}
|
||||
})
|
||||
} else {
|
||||
for n := 0; n < b.N; n++ {
|
||||
_, _ = scenario.Store.GetAllEndpointStatuses(paging.NewEndpointStatusParams().WithResults(1, 20))
|
||||
}
|
||||
}
|
||||
b.ReportAllocs()
|
||||
})
|
||||
scenario.Store.Clear()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkStore_Insert(b *testing.B) {
|
||||
memoryStore, err := memory.NewStore()
|
||||
if err != nil {
|
||||
b.Fatal("failed to create store:", err.Error())
|
||||
}
|
||||
sqliteStore, err := sql.NewStore("sqlite", b.TempDir()+"/BenchmarkStore_Insert.db", false)
|
||||
if err != nil {
|
||||
b.Fatal("failed to create store:", err.Error())
|
||||
}
|
||||
defer sqliteStore.Close()
|
||||
type Scenario struct {
|
||||
Name string
|
||||
Store Store
|
||||
Parallel bool
|
||||
}
|
||||
scenarios := []Scenario{
|
||||
{
|
||||
Name: "memory",
|
||||
Store: memoryStore,
|
||||
Parallel: false,
|
||||
},
|
||||
{
|
||||
Name: "memory-parallel",
|
||||
Store: memoryStore,
|
||||
Parallel: true,
|
||||
},
|
||||
{
|
||||
Name: "sqlite",
|
||||
Store: sqliteStore,
|
||||
Parallel: false,
|
||||
},
|
||||
{
|
||||
Name: "sqlite-parallel",
|
||||
Store: sqliteStore,
|
||||
Parallel: false,
|
||||
},
|
||||
}
|
||||
for _, scenario := range scenarios {
|
||||
b.Run(scenario.Name, func(b *testing.B) {
|
||||
if scenario.Parallel {
|
||||
b.RunParallel(func(pb *testing.PB) {
|
||||
n := 0
|
||||
for pb.Next() {
|
||||
var result endpoint.Result
|
||||
if n%10 == 0 {
|
||||
result = testUnsuccessfulResult
|
||||
} else {
|
||||
result = testSuccessfulResult
|
||||
}
|
||||
result.Timestamp = time.Now()
|
||||
scenario.Store.Insert(&testEndpoint, &result)
|
||||
n++
|
||||
}
|
||||
})
|
||||
} else {
|
||||
for n := 0; n < b.N; n++ {
|
||||
var result endpoint.Result
|
||||
if n%10 == 0 {
|
||||
result = testUnsuccessfulResult
|
||||
} else {
|
||||
result = testSuccessfulResult
|
||||
}
|
||||
result.Timestamp = time.Now()
|
||||
scenario.Store.Insert(&testEndpoint, &result)
|
||||
}
|
||||
}
|
||||
b.ReportAllocs()
|
||||
scenario.Store.Clear()
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkStore_GetEndpointStatusByKey(b *testing.B) {
|
||||
memoryStore, err := memory.NewStore()
|
||||
if err != nil {
|
||||
b.Fatal("failed to create store:", err.Error())
|
||||
}
|
||||
sqliteStore, err := sql.NewStore("sqlite", b.TempDir()+"/BenchmarkStore_GetEndpointStatusByKey.db", false)
|
||||
if err != nil {
|
||||
b.Fatal("failed to create store:", err.Error())
|
||||
}
|
||||
defer sqliteStore.Close()
|
||||
type Scenario struct {
|
||||
Name string
|
||||
Store Store
|
||||
Parallel bool
|
||||
}
|
||||
scenarios := []Scenario{
|
||||
{
|
||||
Name: "memory",
|
||||
Store: memoryStore,
|
||||
Parallel: false,
|
||||
},
|
||||
{
|
||||
Name: "memory-parallel",
|
||||
Store: memoryStore,
|
||||
Parallel: true,
|
||||
},
|
||||
{
|
||||
Name: "sqlite",
|
||||
Store: sqliteStore,
|
||||
Parallel: false,
|
||||
},
|
||||
{
|
||||
Name: "sqlite-parallel",
|
||||
Store: sqliteStore,
|
||||
Parallel: true,
|
||||
},
|
||||
}
|
||||
for _, scenario := range scenarios {
|
||||
for i := 0; i < 50; i++ {
|
||||
scenario.Store.Insert(&testEndpoint, &testSuccessfulResult)
|
||||
scenario.Store.Insert(&testEndpoint, &testUnsuccessfulResult)
|
||||
}
|
||||
b.Run(scenario.Name, func(b *testing.B) {
|
||||
if scenario.Parallel {
|
||||
b.RunParallel(func(pb *testing.PB) {
|
||||
for pb.Next() {
|
||||
scenario.Store.GetEndpointStatusByKey(testEndpoint.Key(), paging.NewEndpointStatusParams().WithResults(1, 20))
|
||||
}
|
||||
})
|
||||
} else {
|
||||
for n := 0; n < b.N; n++ {
|
||||
scenario.Store.GetEndpointStatusByKey(testEndpoint.Key(), paging.NewEndpointStatusParams().WithResults(1, 20))
|
||||
}
|
||||
}
|
||||
b.ReportAllocs()
|
||||
})
|
||||
scenario.Store.Clear()
|
||||
}
|
||||
}
|
653
storage/store/store_test.go
Normal file
653
storage/store/store_test.go
Normal file
@ -0,0 +1,653 @@
|
||||
package store
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"path/filepath"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/TwiN/gatus/v5/config/endpoint"
|
||||
"github.com/TwiN/gatus/v5/storage"
|
||||
"github.com/TwiN/gatus/v5/storage/store/common"
|
||||
"github.com/TwiN/gatus/v5/storage/store/common/paging"
|
||||
"github.com/TwiN/gatus/v5/storage/store/memory"
|
||||
"github.com/TwiN/gatus/v5/storage/store/sql"
|
||||
)
|
||||
|
||||
var (
|
||||
firstCondition = endpoint.Condition("[STATUS] == 200")
|
||||
secondCondition = endpoint.Condition("[RESPONSE_TIME] < 500")
|
||||
thirdCondition = endpoint.Condition("[CERTIFICATE_EXPIRATION] < 72h")
|
||||
|
||||
now = time.Now().Truncate(time.Hour)
|
||||
|
||||
testEndpoint = endpoint.Endpoint{
|
||||
Name: "name",
|
||||
Group: "group",
|
||||
URL: "https://example.org/what/ever",
|
||||
Method: "GET",
|
||||
Body: "body",
|
||||
Interval: 30 * time.Second,
|
||||
Conditions: []endpoint.Condition{firstCondition, secondCondition, thirdCondition},
|
||||
Alerts: nil,
|
||||
NumberOfFailuresInARow: 0,
|
||||
NumberOfSuccessesInARow: 0,
|
||||
}
|
||||
testSuccessfulResult = endpoint.Result{
|
||||
Timestamp: now,
|
||||
Success: true,
|
||||
Hostname: "example.org",
|
||||
IP: "127.0.0.1",
|
||||
HTTPStatus: 200,
|
||||
Errors: nil,
|
||||
Connected: true,
|
||||
Duration: 150 * time.Millisecond,
|
||||
CertificateExpiration: 10 * time.Hour,
|
||||
ConditionResults: []*endpoint.ConditionResult{
|
||||
{
|
||||
Condition: "[STATUS] == 200",
|
||||
Success: true,
|
||||
},
|
||||
{
|
||||
Condition: "[RESPONSE_TIME] < 500",
|
||||
Success: true,
|
||||
},
|
||||
{
|
||||
Condition: "[CERTIFICATE_EXPIRATION] < 72h",
|
||||
Success: true,
|
||||
},
|
||||
},
|
||||
}
|
||||
testUnsuccessfulResult = endpoint.Result{
|
||||
Timestamp: now,
|
||||
Success: false,
|
||||
Hostname: "example.org",
|
||||
IP: "127.0.0.1",
|
||||
HTTPStatus: 200,
|
||||
Errors: []string{"error-1", "error-2"},
|
||||
Connected: true,
|
||||
Duration: 750 * time.Millisecond,
|
||||
CertificateExpiration: 10 * time.Hour,
|
||||
ConditionResults: []*endpoint.ConditionResult{
|
||||
{
|
||||
Condition: "[STATUS] == 200",
|
||||
Success: true,
|
||||
},
|
||||
{
|
||||
Condition: "[RESPONSE_TIME] < 500",
|
||||
Success: false,
|
||||
},
|
||||
{
|
||||
Condition: "[CERTIFICATE_EXPIRATION] < 72h",
|
||||
Success: false,
|
||||
},
|
||||
},
|
||||
}
|
||||
)
|
||||
|
||||
type Scenario struct {
|
||||
Name string
|
||||
Store Store
|
||||
}
|
||||
|
||||
func initStoresAndBaseScenarios(t *testing.T, testName string) []*Scenario {
|
||||
memoryStore, err := memory.NewStore()
|
||||
if err != nil {
|
||||
t.Fatal("failed to create store:", err.Error())
|
||||
}
|
||||
sqliteStore, err := sql.NewStore("sqlite", t.TempDir()+"/"+testName+".db", false)
|
||||
if err != nil {
|
||||
t.Fatal("failed to create store:", err.Error())
|
||||
}
|
||||
sqliteStoreWithCaching, err := sql.NewStore("sqlite", t.TempDir()+"/"+testName+"-with-caching.db", true)
|
||||
if err != nil {
|
||||
t.Fatal("failed to create store:", err.Error())
|
||||
}
|
||||
return []*Scenario{
|
||||
{
|
||||
Name: "memory",
|
||||
Store: memoryStore,
|
||||
},
|
||||
{
|
||||
Name: "sqlite",
|
||||
Store: sqliteStore,
|
||||
},
|
||||
{
|
||||
Name: "sqlite-with-caching",
|
||||
Store: sqliteStoreWithCaching,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func cleanUp(scenarios []*Scenario) {
|
||||
for _, scenario := range scenarios {
|
||||
scenario.Store.Close()
|
||||
}
|
||||
}
|
||||
|
||||
func TestStore_GetEndpointStatusByKey(t *testing.T) {
|
||||
scenarios := initStoresAndBaseScenarios(t, "TestStore_GetEndpointStatusByKey")
|
||||
defer cleanUp(scenarios)
|
||||
firstResult := testSuccessfulResult
|
||||
firstResult.Timestamp = now.Add(-time.Minute)
|
||||
secondResult := testUnsuccessfulResult
|
||||
secondResult.Timestamp = now
|
||||
thirdResult := testSuccessfulResult
|
||||
thirdResult.Timestamp = now
|
||||
for _, scenario := range scenarios {
|
||||
t.Run(scenario.Name, func(t *testing.T) {
|
||||
scenario.Store.Insert(&testEndpoint, &firstResult)
|
||||
scenario.Store.Insert(&testEndpoint, &secondResult)
|
||||
endpointStatus, err := scenario.Store.GetEndpointStatusByKey(testEndpoint.Key(), paging.NewEndpointStatusParams().WithEvents(1, common.MaximumNumberOfEvents).WithResults(1, common.MaximumNumberOfResults))
|
||||
if err != nil {
|
||||
t.Fatal("shouldn't have returned an error, got", err.Error())
|
||||
}
|
||||
if endpointStatus == nil {
|
||||
t.Fatalf("endpointStatus shouldn't have been nil")
|
||||
}
|
||||
if endpointStatus.Name != testEndpoint.Name {
|
||||
t.Fatalf("endpointStatus.Name should've been %s, got %s", testEndpoint.Name, endpointStatus.Name)
|
||||
}
|
||||
if endpointStatus.Group != testEndpoint.Group {
|
||||
t.Fatalf("endpointStatus.Group should've been %s, got %s", testEndpoint.Group, endpointStatus.Group)
|
||||
}
|
||||
if len(endpointStatus.Results) != 2 {
|
||||
t.Fatalf("endpointStatus.Results should've had 2 entries")
|
||||
}
|
||||
if endpointStatus.Results[0].Timestamp.After(endpointStatus.Results[1].Timestamp) {
|
||||
t.Error("The result at index 0 should've been older than the result at index 1")
|
||||
}
|
||||
scenario.Store.Insert(&testEndpoint, &thirdResult)
|
||||
endpointStatus, err = scenario.Store.GetEndpointStatusByKey(testEndpoint.Key(), paging.NewEndpointStatusParams().WithEvents(1, common.MaximumNumberOfEvents).WithResults(1, common.MaximumNumberOfResults))
|
||||
if err != nil {
|
||||
t.Fatal("shouldn't have returned an error, got", err.Error())
|
||||
}
|
||||
if len(endpointStatus.Results) != 3 {
|
||||
t.Fatalf("endpointStatus.Results should've had 3 entries")
|
||||
}
|
||||
scenario.Store.Clear()
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestStore_GetEndpointStatusForMissingStatusReturnsNil(t *testing.T) {
|
||||
scenarios := initStoresAndBaseScenarios(t, "TestStore_GetEndpointStatusForMissingStatusReturnsNil")
|
||||
defer cleanUp(scenarios)
|
||||
for _, scenario := range scenarios {
|
||||
t.Run(scenario.Name, func(t *testing.T) {
|
||||
scenario.Store.Insert(&testEndpoint, &testSuccessfulResult)
|
||||
endpointStatus, err := scenario.Store.GetEndpointStatus("nonexistantgroup", "nonexistantname", paging.NewEndpointStatusParams().WithEvents(1, common.MaximumNumberOfEvents).WithResults(1, common.MaximumNumberOfResults))
|
||||
if !errors.Is(err, common.ErrEndpointNotFound) {
|
||||
t.Error("should've returned ErrEndpointNotFound, got", err)
|
||||
}
|
||||
if endpointStatus != nil {
|
||||
t.Errorf("Returned endpoint status for group '%s' and name '%s' not nil after inserting the endpoint into the store", testEndpoint.Group, testEndpoint.Name)
|
||||
}
|
||||
endpointStatus, err = scenario.Store.GetEndpointStatus(testEndpoint.Group, "nonexistantname", paging.NewEndpointStatusParams().WithEvents(1, common.MaximumNumberOfEvents).WithResults(1, common.MaximumNumberOfResults))
|
||||
if !errors.Is(err, common.ErrEndpointNotFound) {
|
||||
t.Error("should've returned ErrEndpointNotFound, got", err)
|
||||
}
|
||||
if endpointStatus != nil {
|
||||
t.Errorf("Returned endpoint status for group '%s' and name '%s' not nil after inserting the endpoint into the store", testEndpoint.Group, "nonexistantname")
|
||||
}
|
||||
endpointStatus, err = scenario.Store.GetEndpointStatus("nonexistantgroup", testEndpoint.Name, paging.NewEndpointStatusParams().WithEvents(1, common.MaximumNumberOfEvents).WithResults(1, common.MaximumNumberOfResults))
|
||||
if !errors.Is(err, common.ErrEndpointNotFound) {
|
||||
t.Error("should've returned ErrEndpointNotFound, got", err)
|
||||
}
|
||||
if endpointStatus != nil {
|
||||
t.Errorf("Returned endpoint status for group '%s' and name '%s' not nil after inserting the endpoint into the store", "nonexistantgroup", testEndpoint.Name)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestStore_GetAllEndpointStatuses(t *testing.T) {
|
||||
scenarios := initStoresAndBaseScenarios(t, "TestStore_GetAllEndpointStatuses")
|
||||
defer cleanUp(scenarios)
|
||||
for _, scenario := range scenarios {
|
||||
t.Run(scenario.Name, func(t *testing.T) {
|
||||
scenario.Store.Insert(&testEndpoint, &testSuccessfulResult)
|
||||
scenario.Store.Insert(&testEndpoint, &testUnsuccessfulResult)
|
||||
endpointStatuses, err := scenario.Store.GetAllEndpointStatuses(paging.NewEndpointStatusParams().WithResults(1, 20))
|
||||
if err != nil {
|
||||
t.Error("shouldn't have returned an error, got", err.Error())
|
||||
}
|
||||
if len(endpointStatuses) != 1 {
|
||||
t.Fatal("expected 1 endpoint status")
|
||||
}
|
||||
actual := endpointStatuses[0]
|
||||
if actual == nil {
|
||||
t.Fatal("expected endpoint status to exist")
|
||||
}
|
||||
if len(actual.Results) != 2 {
|
||||
t.Error("expected 2 results, got", len(actual.Results))
|
||||
}
|
||||
if len(actual.Events) != 0 {
|
||||
t.Error("expected 0 events, got", len(actual.Events))
|
||||
}
|
||||
scenario.Store.Clear()
|
||||
})
|
||||
t.Run(scenario.Name+"-page-2", func(t *testing.T) {
|
||||
otherEndpoint := testEndpoint
|
||||
otherEndpoint.Name = testEndpoint.Name + "-other"
|
||||
scenario.Store.Insert(&testEndpoint, &testSuccessfulResult)
|
||||
scenario.Store.Insert(&otherEndpoint, &testSuccessfulResult)
|
||||
scenario.Store.Insert(&otherEndpoint, &testSuccessfulResult)
|
||||
scenario.Store.Insert(&otherEndpoint, &testSuccessfulResult)
|
||||
endpointStatuses, err := scenario.Store.GetAllEndpointStatuses(paging.NewEndpointStatusParams().WithResults(2, 2))
|
||||
if err != nil {
|
||||
t.Error("shouldn't have returned an error, got", err.Error())
|
||||
}
|
||||
if len(endpointStatuses) != 2 {
|
||||
t.Fatal("expected 2 endpoint statuses")
|
||||
}
|
||||
if endpointStatuses[0] == nil || endpointStatuses[1] == nil {
|
||||
t.Fatal("expected endpoint status to exist")
|
||||
}
|
||||
if len(endpointStatuses[0].Results) != 0 {
|
||||
t.Error("expected 0 results on the first endpoint, got", len(endpointStatuses[0].Results))
|
||||
}
|
||||
if len(endpointStatuses[1].Results) != 1 {
|
||||
t.Error("expected 1 result on the second endpoint, got", len(endpointStatuses[1].Results))
|
||||
}
|
||||
if len(endpointStatuses[0].Events) != 0 {
|
||||
t.Error("expected 0 events on the first endpoint, got", len(endpointStatuses[0].Events))
|
||||
}
|
||||
if len(endpointStatuses[1].Events) != 0 {
|
||||
t.Error("expected 0 events on the second endpoint, got", len(endpointStatuses[1].Events))
|
||||
}
|
||||
scenario.Store.Clear()
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestStore_GetAllEndpointStatusesWithResultsAndEvents(t *testing.T) {
|
||||
scenarios := initStoresAndBaseScenarios(t, "TestStore_GetAllEndpointStatusesWithResultsAndEvents")
|
||||
defer cleanUp(scenarios)
|
||||
firstResult := testSuccessfulResult
|
||||
secondResult := testUnsuccessfulResult
|
||||
for _, scenario := range scenarios {
|
||||
t.Run(scenario.Name, func(t *testing.T) {
|
||||
scenario.Store.Insert(&testEndpoint, &firstResult)
|
||||
scenario.Store.Insert(&testEndpoint, &secondResult)
|
||||
// Can't be bothered dealing with timezone issues on the worker that runs the automated tests
|
||||
endpointStatuses, err := scenario.Store.GetAllEndpointStatuses(paging.NewEndpointStatusParams().WithResults(1, 20).WithEvents(1, 50))
|
||||
if err != nil {
|
||||
t.Error("shouldn't have returned an error, got", err.Error())
|
||||
}
|
||||
if len(endpointStatuses) != 1 {
|
||||
t.Fatal("expected 1 endpoint status")
|
||||
}
|
||||
actual := endpointStatuses[0]
|
||||
if actual == nil {
|
||||
t.Fatal("expected endpoint status to exist")
|
||||
}
|
||||
if len(actual.Results) != 2 {
|
||||
t.Error("expected 2 results, got", len(actual.Results))
|
||||
}
|
||||
if len(actual.Events) != 3 {
|
||||
t.Error("expected 3 events, got", len(actual.Events))
|
||||
}
|
||||
scenario.Store.Clear()
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestStore_GetEndpointStatusPage1IsHasMoreRecentResultsThanPage2(t *testing.T) {
|
||||
scenarios := initStoresAndBaseScenarios(t, "TestStore_GetEndpointStatusPage1IsHasMoreRecentResultsThanPage2")
|
||||
defer cleanUp(scenarios)
|
||||
firstResult := testSuccessfulResult
|
||||
firstResult.Timestamp = now.Add(-time.Minute)
|
||||
secondResult := testUnsuccessfulResult
|
||||
secondResult.Timestamp = now
|
||||
for _, scenario := range scenarios {
|
||||
t.Run(scenario.Name, func(t *testing.T) {
|
||||
scenario.Store.Insert(&testEndpoint, &firstResult)
|
||||
scenario.Store.Insert(&testEndpoint, &secondResult)
|
||||
endpointStatusPage1, err := scenario.Store.GetEndpointStatusByKey(testEndpoint.Key(), paging.NewEndpointStatusParams().WithResults(1, 1))
|
||||
if err != nil {
|
||||
t.Error("shouldn't have returned an error, got", err.Error())
|
||||
}
|
||||
if endpointStatusPage1 == nil {
|
||||
t.Fatalf("endpointStatusPage1 shouldn't have been nil")
|
||||
}
|
||||
if len(endpointStatusPage1.Results) != 1 {
|
||||
t.Fatalf("endpointStatusPage1 should've had 1 result")
|
||||
}
|
||||
endpointStatusPage2, err := scenario.Store.GetEndpointStatusByKey(testEndpoint.Key(), paging.NewEndpointStatusParams().WithResults(2, 1))
|
||||
if err != nil {
|
||||
t.Error("shouldn't have returned an error, got", err.Error())
|
||||
}
|
||||
if endpointStatusPage2 == nil {
|
||||
t.Fatalf("endpointStatusPage2 shouldn't have been nil")
|
||||
}
|
||||
if len(endpointStatusPage2.Results) != 1 {
|
||||
t.Fatalf("endpointStatusPage2 should've had 1 result")
|
||||
}
|
||||
// Compare the timestamp of both pages
|
||||
if !endpointStatusPage1.Results[0].Timestamp.After(endpointStatusPage2.Results[0].Timestamp) {
|
||||
t.Errorf("The result from the first page should've been more recent than the results from the second page")
|
||||
}
|
||||
scenario.Store.Clear()
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestStore_GetUptimeByKey(t *testing.T) {
|
||||
scenarios := initStoresAndBaseScenarios(t, "TestStore_GetUptimeByKey")
|
||||
defer cleanUp(scenarios)
|
||||
firstResult := testSuccessfulResult
|
||||
firstResult.Timestamp = now.Add(-time.Minute)
|
||||
secondResult := testUnsuccessfulResult
|
||||
secondResult.Timestamp = now
|
||||
for _, scenario := range scenarios {
|
||||
t.Run(scenario.Name, func(t *testing.T) {
|
||||
if _, err := scenario.Store.GetUptimeByKey(testEndpoint.Key(), time.Now().Add(-time.Hour), time.Now()); err != common.ErrEndpointNotFound {
|
||||
t.Errorf("should've returned not found because there's nothing yet, got %v", err)
|
||||
}
|
||||
scenario.Store.Insert(&testEndpoint, &firstResult)
|
||||
scenario.Store.Insert(&testEndpoint, &secondResult)
|
||||
if uptime, _ := scenario.Store.GetUptimeByKey(testEndpoint.Key(), now.Add(-time.Hour), time.Now()); uptime != 0.5 {
|
||||
t.Errorf("the uptime over the past 1h should've been 0.5, got %f", uptime)
|
||||
}
|
||||
if uptime, _ := scenario.Store.GetUptimeByKey(testEndpoint.Key(), now.Add(-time.Hour*24), time.Now()); uptime != 0.5 {
|
||||
t.Errorf("the uptime over the past 24h should've been 0.5, got %f", uptime)
|
||||
}
|
||||
if uptime, _ := scenario.Store.GetUptimeByKey(testEndpoint.Key(), now.Add(-time.Hour*24*7), time.Now()); uptime != 0.5 {
|
||||
t.Errorf("the uptime over the past 7d should've been 0.5, got %f", uptime)
|
||||
}
|
||||
if _, err := scenario.Store.GetUptimeByKey(testEndpoint.Key(), now, time.Now().Add(-time.Hour)); err == nil {
|
||||
t.Error("should've returned an error because the parameter 'from' cannot be older than 'to'")
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestStore_GetAverageResponseTimeByKey(t *testing.T) {
|
||||
scenarios := initStoresAndBaseScenarios(t, "TestStore_GetAverageResponseTimeByKey")
|
||||
defer cleanUp(scenarios)
|
||||
firstResult := testSuccessfulResult
|
||||
firstResult.Timestamp = now.Add(-(2 * time.Hour))
|
||||
firstResult.Duration = 300 * time.Millisecond
|
||||
secondResult := testSuccessfulResult
|
||||
secondResult.Duration = 150 * time.Millisecond
|
||||
secondResult.Timestamp = now.Add(-(1*time.Hour + 30*time.Minute))
|
||||
thirdResult := testUnsuccessfulResult
|
||||
thirdResult.Duration = 200 * time.Millisecond
|
||||
thirdResult.Timestamp = now.Add(-(1 * time.Hour))
|
||||
fourthResult := testSuccessfulResult
|
||||
fourthResult.Duration = 500 * time.Millisecond
|
||||
fourthResult.Timestamp = now
|
||||
for _, scenario := range scenarios {
|
||||
t.Run(scenario.Name, func(t *testing.T) {
|
||||
scenario.Store.Insert(&testEndpoint, &firstResult)
|
||||
scenario.Store.Insert(&testEndpoint, &secondResult)
|
||||
scenario.Store.Insert(&testEndpoint, &thirdResult)
|
||||
scenario.Store.Insert(&testEndpoint, &fourthResult)
|
||||
if averageResponseTime, err := scenario.Store.GetAverageResponseTimeByKey(testEndpoint.Key(), now.Add(-48*time.Hour), now.Add(-24*time.Hour)); err == nil {
|
||||
if averageResponseTime != 0 {
|
||||
t.Errorf("expected average response time to be 0ms, got %v", averageResponseTime)
|
||||
}
|
||||
} else {
|
||||
t.Error("shouldn't have returned an error, got", err)
|
||||
}
|
||||
if averageResponseTime, err := scenario.Store.GetAverageResponseTimeByKey(testEndpoint.Key(), now.Add(-24*time.Hour), now); err == nil {
|
||||
if averageResponseTime != 287 {
|
||||
t.Errorf("expected average response time to be 287ms, got %v", averageResponseTime)
|
||||
}
|
||||
} else {
|
||||
t.Error("shouldn't have returned an error, got", err)
|
||||
}
|
||||
if averageResponseTime, err := scenario.Store.GetAverageResponseTimeByKey(testEndpoint.Key(), now.Add(-time.Hour), now); err == nil {
|
||||
if averageResponseTime != 350 {
|
||||
t.Errorf("expected average response time to be 350ms, got %v", averageResponseTime)
|
||||
}
|
||||
} else {
|
||||
t.Error("shouldn't have returned an error, got", err)
|
||||
}
|
||||
if averageResponseTime, err := scenario.Store.GetAverageResponseTimeByKey(testEndpoint.Key(), now.Add(-2*time.Hour), now.Add(-time.Hour)); err == nil {
|
||||
if averageResponseTime != 216 {
|
||||
t.Errorf("expected average response time to be 216ms, got %v", averageResponseTime)
|
||||
}
|
||||
} else {
|
||||
t.Error("shouldn't have returned an error, got", err)
|
||||
}
|
||||
if _, err := scenario.Store.GetAverageResponseTimeByKey(testEndpoint.Key(), now, now.Add(-2*time.Hour)); err == nil {
|
||||
t.Error("expected an error because from > to, got nil")
|
||||
}
|
||||
scenario.Store.Clear()
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestStore_GetHourlyAverageResponseTimeByKey(t *testing.T) {
|
||||
scenarios := initStoresAndBaseScenarios(t, "TestStore_GetHourlyAverageResponseTimeByKey")
|
||||
defer cleanUp(scenarios)
|
||||
firstResult := testSuccessfulResult
|
||||
firstResult.Timestamp = now.Add(-(2 * time.Hour))
|
||||
firstResult.Duration = 300 * time.Millisecond
|
||||
secondResult := testSuccessfulResult
|
||||
secondResult.Duration = 150 * time.Millisecond
|
||||
secondResult.Timestamp = now.Add(-(1*time.Hour + 30*time.Minute))
|
||||
thirdResult := testUnsuccessfulResult
|
||||
thirdResult.Duration = 200 * time.Millisecond
|
||||
thirdResult.Timestamp = now.Add(-(1 * time.Hour))
|
||||
fourthResult := testSuccessfulResult
|
||||
fourthResult.Duration = 500 * time.Millisecond
|
||||
fourthResult.Timestamp = now
|
||||
for _, scenario := range scenarios {
|
||||
t.Run(scenario.Name, func(t *testing.T) {
|
||||
scenario.Store.Insert(&testEndpoint, &firstResult)
|
||||
scenario.Store.Insert(&testEndpoint, &secondResult)
|
||||
scenario.Store.Insert(&testEndpoint, &thirdResult)
|
||||
scenario.Store.Insert(&testEndpoint, &fourthResult)
|
||||
hourlyAverageResponseTime, err := scenario.Store.GetHourlyAverageResponseTimeByKey(testEndpoint.Key(), now.Add(-24*time.Hour), now)
|
||||
if err != nil {
|
||||
t.Error("shouldn't have returned an error, got", err)
|
||||
}
|
||||
if key := now.Truncate(time.Hour).Unix(); hourlyAverageResponseTime[key] != 500 {
|
||||
t.Errorf("expected average response time to be 500ms at %d, got %v", key, hourlyAverageResponseTime[key])
|
||||
}
|
||||
if key := now.Truncate(time.Hour).Add(-time.Hour).Unix(); hourlyAverageResponseTime[key] != 200 {
|
||||
t.Errorf("expected average response time to be 200ms at %d, got %v", key, hourlyAverageResponseTime[key])
|
||||
}
|
||||
if key := now.Truncate(time.Hour).Add(-2 * time.Hour).Unix(); hourlyAverageResponseTime[key] != 225 {
|
||||
t.Errorf("expected average response time to be 225ms at %d, got %v", key, hourlyAverageResponseTime[key])
|
||||
}
|
||||
scenario.Store.Clear()
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestStore_Insert(t *testing.T) {
|
||||
scenarios := initStoresAndBaseScenarios(t, "TestStore_Insert")
|
||||
defer cleanUp(scenarios)
|
||||
firstResult := testSuccessfulResult
|
||||
firstResult.Timestamp = now.Add(-time.Minute)
|
||||
secondResult := testUnsuccessfulResult
|
||||
secondResult.Timestamp = now
|
||||
for _, scenario := range scenarios {
|
||||
t.Run(scenario.Name, func(t *testing.T) {
|
||||
scenario.Store.Insert(&testEndpoint, &firstResult)
|
||||
scenario.Store.Insert(&testEndpoint, &secondResult)
|
||||
ss, err := scenario.Store.GetEndpointStatusByKey(testEndpoint.Key(), paging.NewEndpointStatusParams().WithEvents(1, common.MaximumNumberOfEvents).WithResults(1, common.MaximumNumberOfResults))
|
||||
if err != nil {
|
||||
t.Error("shouldn't have returned an error, got", err)
|
||||
}
|
||||
if ss == nil {
|
||||
t.Fatalf("Store should've had key '%s', but didn't", testEndpoint.Key())
|
||||
}
|
||||
if len(ss.Events) != 3 {
|
||||
t.Fatalf("Endpoint '%s' should've had 3 events, got %d", ss.Name, len(ss.Events))
|
||||
}
|
||||
if len(ss.Results) != 2 {
|
||||
t.Fatalf("Endpoint '%s' should've had 2 results, got %d", ss.Name, len(ss.Results))
|
||||
}
|
||||
for i, expectedResult := range []endpoint.Result{firstResult, secondResult} {
|
||||
if expectedResult.HTTPStatus != ss.Results[i].HTTPStatus {
|
||||
t.Errorf("Result at index %d should've had a HTTPStatus of %d, got %d", i, ss.Results[i].HTTPStatus, expectedResult.HTTPStatus)
|
||||
}
|
||||
if expectedResult.DNSRCode != ss.Results[i].DNSRCode {
|
||||
t.Errorf("Result at index %d should've had a DNSRCode of %s, got %s", i, ss.Results[i].DNSRCode, expectedResult.DNSRCode)
|
||||
}
|
||||
if expectedResult.Hostname != ss.Results[i].Hostname {
|
||||
t.Errorf("Result at index %d should've had a Hostname of %s, got %s", i, ss.Results[i].Hostname, expectedResult.Hostname)
|
||||
}
|
||||
if expectedResult.IP != ss.Results[i].IP {
|
||||
t.Errorf("Result at index %d should've had a IP of %s, got %s", i, ss.Results[i].IP, expectedResult.IP)
|
||||
}
|
||||
if expectedResult.Connected != ss.Results[i].Connected {
|
||||
t.Errorf("Result at index %d should've had a Connected value of %t, got %t", i, ss.Results[i].Connected, expectedResult.Connected)
|
||||
}
|
||||
if expectedResult.Duration != ss.Results[i].Duration {
|
||||
t.Errorf("Result at index %d should've had a Duration of %s, got %s", i, ss.Results[i].Duration.String(), expectedResult.Duration.String())
|
||||
}
|
||||
if len(expectedResult.Errors) != len(ss.Results[i].Errors) {
|
||||
t.Errorf("Result at index %d should've had %d errors, but actually had %d errors", i, len(ss.Results[i].Errors), len(expectedResult.Errors))
|
||||
} else {
|
||||
for j := range expectedResult.Errors {
|
||||
if ss.Results[i].Errors[j] != expectedResult.Errors[j] {
|
||||
t.Error("should've been the same")
|
||||
}
|
||||
}
|
||||
}
|
||||
if len(expectedResult.ConditionResults) != len(ss.Results[i].ConditionResults) {
|
||||
t.Errorf("Result at index %d should've had %d ConditionResults, but actually had %d ConditionResults", i, len(ss.Results[i].ConditionResults), len(expectedResult.ConditionResults))
|
||||
} else {
|
||||
for j := range expectedResult.ConditionResults {
|
||||
if ss.Results[i].ConditionResults[j].Condition != expectedResult.ConditionResults[j].Condition {
|
||||
t.Error("should've been the same")
|
||||
}
|
||||
if ss.Results[i].ConditionResults[j].Success != expectedResult.ConditionResults[j].Success {
|
||||
t.Error("should've been the same")
|
||||
}
|
||||
}
|
||||
}
|
||||
if expectedResult.Success != ss.Results[i].Success {
|
||||
t.Errorf("Result at index %d should've had a Success of %t, got %t", i, ss.Results[i].Success, expectedResult.Success)
|
||||
}
|
||||
if expectedResult.Timestamp.Unix() != ss.Results[i].Timestamp.Unix() {
|
||||
t.Errorf("Result at index %d should've had a Timestamp of %d, got %d", i, ss.Results[i].Timestamp.Unix(), expectedResult.Timestamp.Unix())
|
||||
}
|
||||
if expectedResult.CertificateExpiration != ss.Results[i].CertificateExpiration {
|
||||
t.Errorf("Result at index %d should've had a CertificateExpiration of %s, got %s", i, ss.Results[i].CertificateExpiration.String(), expectedResult.CertificateExpiration.String())
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestStore_DeleteAllEndpointStatusesNotInKeys(t *testing.T) {
|
||||
scenarios := initStoresAndBaseScenarios(t, "TestStore_DeleteAllEndpointStatusesNotInKeys")
|
||||
defer cleanUp(scenarios)
|
||||
firstEndpoint := endpoint.Endpoint{Name: "endpoint-1", Group: "group"}
|
||||
secondEndpoint := endpoint.Endpoint{Name: "endpoint-2", Group: "group"}
|
||||
r := &testSuccessfulResult
|
||||
for _, scenario := range scenarios {
|
||||
t.Run(scenario.Name, func(t *testing.T) {
|
||||
scenario.Store.Insert(&firstEndpoint, r)
|
||||
scenario.Store.Insert(&secondEndpoint, r)
|
||||
if ss, _ := scenario.Store.GetEndpointStatusByKey(firstEndpoint.Key(), paging.NewEndpointStatusParams()); ss == nil {
|
||||
t.Fatal("firstEndpoint should exist, got", ss)
|
||||
}
|
||||
if ss, _ := scenario.Store.GetEndpointStatusByKey(secondEndpoint.Key(), paging.NewEndpointStatusParams()); ss == nil {
|
||||
t.Fatal("secondEndpoint should exist, got", ss)
|
||||
}
|
||||
scenario.Store.DeleteAllEndpointStatusesNotInKeys([]string{firstEndpoint.Key()})
|
||||
if ss, _ := scenario.Store.GetEndpointStatusByKey(firstEndpoint.Key(), paging.NewEndpointStatusParams()); ss == nil {
|
||||
t.Error("secondEndpoint should still exist, got", ss)
|
||||
}
|
||||
if ss, _ := scenario.Store.GetEndpointStatusByKey(secondEndpoint.Key(), paging.NewEndpointStatusParams()); ss != nil {
|
||||
t.Error("firstEndpoint should have been deleted, got", ss)
|
||||
}
|
||||
// Delete everything
|
||||
scenario.Store.DeleteAllEndpointStatusesNotInKeys([]string{})
|
||||
endpointStatuses, _ := scenario.Store.GetAllEndpointStatuses(paging.NewEndpointStatusParams())
|
||||
if len(endpointStatuses) != 0 {
|
||||
t.Errorf("everything should've been deleted")
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestGet(t *testing.T) {
|
||||
store := Get()
|
||||
if store == nil {
|
||||
t.Error("store should've been automatically initialized")
|
||||
}
|
||||
}
|
||||
|
||||
func TestInitialize(t *testing.T) {
|
||||
dir := t.TempDir()
|
||||
type Scenario struct {
|
||||
Name string
|
||||
Cfg *storage.Config
|
||||
ExpectedErr error
|
||||
}
|
||||
scenarios := []Scenario{
|
||||
{
|
||||
Name: "nil",
|
||||
Cfg: nil,
|
||||
ExpectedErr: nil,
|
||||
},
|
||||
{
|
||||
Name: "blank",
|
||||
Cfg: &storage.Config{},
|
||||
ExpectedErr: nil,
|
||||
},
|
||||
{
|
||||
Name: "memory-no-path",
|
||||
Cfg: &storage.Config{Type: storage.TypeMemory},
|
||||
ExpectedErr: nil,
|
||||
},
|
||||
{
|
||||
Name: "sqlite-no-path",
|
||||
Cfg: &storage.Config{Type: storage.TypeSQLite},
|
||||
ExpectedErr: sql.ErrPathNotSpecified,
|
||||
},
|
||||
{
|
||||
Name: "sqlite-with-path",
|
||||
Cfg: &storage.Config{Type: storage.TypeSQLite, Path: filepath.Join(dir, "TestInitialize_sqlite-with-path.db")},
|
||||
ExpectedErr: nil,
|
||||
},
|
||||
}
|
||||
for _, scenario := range scenarios {
|
||||
t.Run(scenario.Name, func(t *testing.T) {
|
||||
err := Initialize(scenario.Cfg)
|
||||
if err != scenario.ExpectedErr {
|
||||
t.Errorf("expected %v, got %v", scenario.ExpectedErr, err)
|
||||
}
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
if cancelFunc == nil {
|
||||
t.Error("cancelFunc shouldn't have been nil")
|
||||
}
|
||||
if ctx == nil {
|
||||
t.Error("ctx shouldn't have been nil")
|
||||
}
|
||||
if store == nil {
|
||||
t.Fatal("provider shouldn't have been nit")
|
||||
}
|
||||
store.Close()
|
||||
// Try to initialize it again
|
||||
err = Initialize(scenario.Cfg)
|
||||
if !errors.Is(err, scenario.ExpectedErr) {
|
||||
t.Errorf("expected %v, got %v", scenario.ExpectedErr, err)
|
||||
return
|
||||
}
|
||||
store.Close()
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestAutoSave(t *testing.T) {
|
||||
file := filepath.Join(t.TempDir(), "/TestAutoSave.db")
|
||||
if err := Initialize(&storage.Config{Path: file}); err != nil {
|
||||
t.Fatal("shouldn't have returned an error")
|
||||
}
|
||||
go autoSave(ctx, store, 3*time.Millisecond)
|
||||
time.Sleep(15 * time.Millisecond)
|
||||
cancelFunc()
|
||||
time.Sleep(50 * time.Millisecond)
|
||||
}
|
Reference in New Issue
Block a user