Reset
This commit is contained in:
222
storage/store/memory/memory.go
Normal file
222
storage/store/memory/memory.go
Normal file
@ -0,0 +1,222 @@
|
||||
package memory
|
||||
|
||||
import (
|
||||
"sort"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/TwiN/gatus/v5/alerting/alert"
|
||||
"github.com/TwiN/gatus/v5/config/endpoint"
|
||||
"github.com/TwiN/gatus/v5/storage/store/common"
|
||||
"github.com/TwiN/gatus/v5/storage/store/common/paging"
|
||||
"github.com/TwiN/gocache/v2"
|
||||
)
|
||||
|
||||
// Store that leverages gocache
|
||||
type Store struct {
|
||||
sync.RWMutex
|
||||
|
||||
cache *gocache.Cache
|
||||
}
|
||||
|
||||
// NewStore creates a new store using gocache.Cache
|
||||
//
|
||||
// This store holds everything in memory, and if the file parameter is not blank,
|
||||
// supports eventual persistence.
|
||||
func NewStore() (*Store, error) {
|
||||
store := &Store{
|
||||
cache: gocache.NewCache().WithMaxSize(gocache.NoMaxSize),
|
||||
}
|
||||
return store, nil
|
||||
}
|
||||
|
||||
// GetAllEndpointStatuses returns all monitored endpoint.Status
|
||||
// with a subset of endpoint.Result defined by the page and pageSize parameters
|
||||
func (s *Store) GetAllEndpointStatuses(params *paging.EndpointStatusParams) ([]*endpoint.Status, error) {
|
||||
endpointStatuses := s.cache.GetAll()
|
||||
pagedEndpointStatuses := make([]*endpoint.Status, 0, len(endpointStatuses))
|
||||
for _, v := range endpointStatuses {
|
||||
pagedEndpointStatuses = append(pagedEndpointStatuses, ShallowCopyEndpointStatus(v.(*endpoint.Status), params))
|
||||
}
|
||||
sort.Slice(pagedEndpointStatuses, func(i, j int) bool {
|
||||
return pagedEndpointStatuses[i].Key < pagedEndpointStatuses[j].Key
|
||||
})
|
||||
return pagedEndpointStatuses, nil
|
||||
}
|
||||
|
||||
// GetEndpointStatus returns the endpoint status for a given endpoint name in the given group
|
||||
func (s *Store) GetEndpointStatus(groupName, endpointName string, params *paging.EndpointStatusParams) (*endpoint.Status, error) {
|
||||
return s.GetEndpointStatusByKey(endpoint.ConvertGroupAndEndpointNameToKey(groupName, endpointName), params)
|
||||
}
|
||||
|
||||
// GetEndpointStatusByKey returns the endpoint status for a given key
|
||||
func (s *Store) GetEndpointStatusByKey(key string, params *paging.EndpointStatusParams) (*endpoint.Status, error) {
|
||||
endpointStatus := s.cache.GetValue(key)
|
||||
if endpointStatus == nil {
|
||||
return nil, common.ErrEndpointNotFound
|
||||
}
|
||||
return ShallowCopyEndpointStatus(endpointStatus.(*endpoint.Status), params), nil
|
||||
}
|
||||
|
||||
// GetUptimeByKey returns the uptime percentage during a time range
|
||||
func (s *Store) GetUptimeByKey(key string, from, to time.Time) (float64, error) {
|
||||
if from.After(to) {
|
||||
return 0, common.ErrInvalidTimeRange
|
||||
}
|
||||
endpointStatus := s.cache.GetValue(key)
|
||||
if endpointStatus == nil || endpointStatus.(*endpoint.Status).Uptime == nil {
|
||||
return 0, common.ErrEndpointNotFound
|
||||
}
|
||||
successfulExecutions := uint64(0)
|
||||
totalExecutions := uint64(0)
|
||||
current := from
|
||||
for to.Sub(current) >= 0 {
|
||||
hourlyUnixTimestamp := current.Truncate(time.Hour).Unix()
|
||||
hourlyStats := endpointStatus.(*endpoint.Status).Uptime.HourlyStatistics[hourlyUnixTimestamp]
|
||||
if hourlyStats == nil || hourlyStats.TotalExecutions == 0 {
|
||||
current = current.Add(time.Hour)
|
||||
continue
|
||||
}
|
||||
successfulExecutions += hourlyStats.SuccessfulExecutions
|
||||
totalExecutions += hourlyStats.TotalExecutions
|
||||
current = current.Add(time.Hour)
|
||||
}
|
||||
if totalExecutions == 0 {
|
||||
return 0, nil
|
||||
}
|
||||
return float64(successfulExecutions) / float64(totalExecutions), nil
|
||||
}
|
||||
|
||||
// GetAverageResponseTimeByKey returns the average response time in milliseconds (value) during a time range
|
||||
func (s *Store) GetAverageResponseTimeByKey(key string, from, to time.Time) (int, error) {
|
||||
if from.After(to) {
|
||||
return 0, common.ErrInvalidTimeRange
|
||||
}
|
||||
endpointStatus := s.cache.GetValue(key)
|
||||
if endpointStatus == nil || endpointStatus.(*endpoint.Status).Uptime == nil {
|
||||
return 0, common.ErrEndpointNotFound
|
||||
}
|
||||
current := from
|
||||
var totalExecutions, totalResponseTime uint64
|
||||
for to.Sub(current) >= 0 {
|
||||
hourlyUnixTimestamp := current.Truncate(time.Hour).Unix()
|
||||
hourlyStats := endpointStatus.(*endpoint.Status).Uptime.HourlyStatistics[hourlyUnixTimestamp]
|
||||
if hourlyStats == nil || hourlyStats.TotalExecutions == 0 {
|
||||
current = current.Add(time.Hour)
|
||||
continue
|
||||
}
|
||||
totalExecutions += hourlyStats.TotalExecutions
|
||||
totalResponseTime += hourlyStats.TotalExecutionsResponseTime
|
||||
current = current.Add(time.Hour)
|
||||
}
|
||||
if totalExecutions == 0 {
|
||||
return 0, nil
|
||||
}
|
||||
return int(float64(totalResponseTime) / float64(totalExecutions)), nil
|
||||
}
|
||||
|
||||
// GetHourlyAverageResponseTimeByKey returns a map of hourly (key) average response time in milliseconds (value) during a time range
|
||||
func (s *Store) GetHourlyAverageResponseTimeByKey(key string, from, to time.Time) (map[int64]int, error) {
|
||||
if from.After(to) {
|
||||
return nil, common.ErrInvalidTimeRange
|
||||
}
|
||||
endpointStatus := s.cache.GetValue(key)
|
||||
if endpointStatus == nil || endpointStatus.(*endpoint.Status).Uptime == nil {
|
||||
return nil, common.ErrEndpointNotFound
|
||||
}
|
||||
hourlyAverageResponseTimes := make(map[int64]int)
|
||||
current := from
|
||||
for to.Sub(current) >= 0 {
|
||||
hourlyUnixTimestamp := current.Truncate(time.Hour).Unix()
|
||||
hourlyStats := endpointStatus.(*endpoint.Status).Uptime.HourlyStatistics[hourlyUnixTimestamp]
|
||||
if hourlyStats == nil || hourlyStats.TotalExecutions == 0 {
|
||||
current = current.Add(time.Hour)
|
||||
continue
|
||||
}
|
||||
hourlyAverageResponseTimes[hourlyUnixTimestamp] = int(float64(hourlyStats.TotalExecutionsResponseTime) / float64(hourlyStats.TotalExecutions))
|
||||
current = current.Add(time.Hour)
|
||||
}
|
||||
return hourlyAverageResponseTimes, nil
|
||||
}
|
||||
|
||||
// Insert adds the observed result for the specified endpoint into the store
|
||||
func (s *Store) Insert(ep *endpoint.Endpoint, result *endpoint.Result) error {
|
||||
key := ep.Key()
|
||||
s.Lock()
|
||||
status, exists := s.cache.Get(key)
|
||||
if !exists {
|
||||
status = endpoint.NewStatus(ep.Group, ep.Name)
|
||||
status.(*endpoint.Status).Events = append(status.(*endpoint.Status).Events, &endpoint.Event{
|
||||
Type: endpoint.EventStart,
|
||||
Timestamp: time.Now(),
|
||||
})
|
||||
}
|
||||
AddResult(status.(*endpoint.Status), result)
|
||||
s.cache.Set(key, status)
|
||||
s.Unlock()
|
||||
return nil
|
||||
}
|
||||
|
||||
// DeleteAllEndpointStatusesNotInKeys removes all Status that are not within the keys provided
|
||||
func (s *Store) DeleteAllEndpointStatusesNotInKeys(keys []string) int {
|
||||
var keysToDelete []string
|
||||
for _, existingKey := range s.cache.GetKeysByPattern("*", 0) {
|
||||
shouldDelete := true
|
||||
for _, key := range keys {
|
||||
if existingKey == key {
|
||||
shouldDelete = false
|
||||
break
|
||||
}
|
||||
}
|
||||
if shouldDelete {
|
||||
keysToDelete = append(keysToDelete, existingKey)
|
||||
}
|
||||
}
|
||||
return s.cache.DeleteAll(keysToDelete)
|
||||
}
|
||||
|
||||
// GetTriggeredEndpointAlert returns whether the triggered alert for the specified endpoint as well as the necessary information to resolve it
|
||||
//
|
||||
// Always returns that the alert does not exist for the in-memory store since it does not support persistence across restarts
|
||||
func (s *Store) GetTriggeredEndpointAlert(ep *endpoint.Endpoint, alert *alert.Alert) (exists bool, resolveKey string, numberOfSuccessesInARow int, err error) {
|
||||
return false, "", 0, nil
|
||||
}
|
||||
|
||||
// UpsertTriggeredEndpointAlert inserts/updates a triggered alert for an endpoint
|
||||
// Used for persistence of triggered alerts across application restarts
|
||||
//
|
||||
// Does nothing for the in-memory store since it does not support persistence across restarts
|
||||
func (s *Store) UpsertTriggeredEndpointAlert(ep *endpoint.Endpoint, triggeredAlert *alert.Alert) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// DeleteTriggeredEndpointAlert deletes a triggered alert for an endpoint
|
||||
//
|
||||
// Does nothing for the in-memory store since it does not support persistence across restarts
|
||||
func (s *Store) DeleteTriggeredEndpointAlert(ep *endpoint.Endpoint, triggeredAlert *alert.Alert) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// DeleteAllTriggeredAlertsNotInChecksumsByEndpoint removes all triggered alerts owned by an endpoint whose alert
|
||||
// configurations are not provided in the checksums list.
|
||||
// This prevents triggered alerts that have been removed or modified from lingering in the database.
|
||||
//
|
||||
// Does nothing for the in-memory store since it does not support persistence across restarts
|
||||
func (s *Store) DeleteAllTriggeredAlertsNotInChecksumsByEndpoint(ep *endpoint.Endpoint, checksums []string) int {
|
||||
return 0
|
||||
}
|
||||
|
||||
// Clear deletes everything from the store
|
||||
func (s *Store) Clear() {
|
||||
s.cache.Clear()
|
||||
}
|
||||
|
||||
// Save persists the cache to the store file
|
||||
func (s *Store) Save() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Close does nothing, because there's nothing to close
|
||||
func (s *Store) Close() {
|
||||
return
|
||||
}
|
135
storage/store/memory/memory_test.go
Normal file
135
storage/store/memory/memory_test.go
Normal file
@ -0,0 +1,135 @@
|
||||
package memory
|
||||
|
||||
import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/TwiN/gatus/v5/config/endpoint"
|
||||
"github.com/TwiN/gatus/v5/storage/store/common/paging"
|
||||
)
|
||||
|
||||
var (
|
||||
firstCondition = endpoint.Condition("[STATUS] == 200")
|
||||
secondCondition = endpoint.Condition("[RESPONSE_TIME] < 500")
|
||||
thirdCondition = endpoint.Condition("[CERTIFICATE_EXPIRATION] < 72h")
|
||||
|
||||
now = time.Now()
|
||||
|
||||
testEndpoint = endpoint.Endpoint{
|
||||
Name: "name",
|
||||
Group: "group",
|
||||
URL: "https://example.org/what/ever",
|
||||
Method: "GET",
|
||||
Body: "body",
|
||||
Interval: 30 * time.Second,
|
||||
Conditions: []endpoint.Condition{firstCondition, secondCondition, thirdCondition},
|
||||
Alerts: nil,
|
||||
NumberOfFailuresInARow: 0,
|
||||
NumberOfSuccessesInARow: 0,
|
||||
}
|
||||
testSuccessfulResult = endpoint.Result{
|
||||
Hostname: "example.org",
|
||||
IP: "127.0.0.1",
|
||||
HTTPStatus: 200,
|
||||
Errors: nil,
|
||||
Connected: true,
|
||||
Success: true,
|
||||
Timestamp: now,
|
||||
Duration: 150 * time.Millisecond,
|
||||
CertificateExpiration: 10 * time.Hour,
|
||||
ConditionResults: []*endpoint.ConditionResult{
|
||||
{
|
||||
Condition: "[STATUS] == 200",
|
||||
Success: true,
|
||||
},
|
||||
{
|
||||
Condition: "[RESPONSE_TIME] < 500",
|
||||
Success: true,
|
||||
},
|
||||
{
|
||||
Condition: "[CERTIFICATE_EXPIRATION] < 72h",
|
||||
Success: true,
|
||||
},
|
||||
},
|
||||
}
|
||||
testUnsuccessfulResult = endpoint.Result{
|
||||
Hostname: "example.org",
|
||||
IP: "127.0.0.1",
|
||||
HTTPStatus: 200,
|
||||
Errors: []string{"error-1", "error-2"},
|
||||
Connected: true,
|
||||
Success: false,
|
||||
Timestamp: now,
|
||||
Duration: 750 * time.Millisecond,
|
||||
CertificateExpiration: 10 * time.Hour,
|
||||
ConditionResults: []*endpoint.ConditionResult{
|
||||
{
|
||||
Condition: "[STATUS] == 200",
|
||||
Success: true,
|
||||
},
|
||||
{
|
||||
Condition: "[RESPONSE_TIME] < 500",
|
||||
Success: false,
|
||||
},
|
||||
{
|
||||
Condition: "[CERTIFICATE_EXPIRATION] < 72h",
|
||||
Success: false,
|
||||
},
|
||||
},
|
||||
}
|
||||
)
|
||||
|
||||
// Note that are much more extensive tests in /storage/store/store_test.go.
|
||||
// This test is simply an extra sanity check
|
||||
func TestStore_SanityCheck(t *testing.T) {
|
||||
store, _ := NewStore()
|
||||
defer store.Close()
|
||||
store.Insert(&testEndpoint, &testSuccessfulResult)
|
||||
endpointStatuses, _ := store.GetAllEndpointStatuses(paging.NewEndpointStatusParams())
|
||||
if numberOfEndpointStatuses := len(endpointStatuses); numberOfEndpointStatuses != 1 {
|
||||
t.Fatalf("expected 1 EndpointStatus, got %d", numberOfEndpointStatuses)
|
||||
}
|
||||
store.Insert(&testEndpoint, &testUnsuccessfulResult)
|
||||
// Both results inserted are for the same endpoint, therefore, the count shouldn't have increased
|
||||
endpointStatuses, _ = store.GetAllEndpointStatuses(paging.NewEndpointStatusParams())
|
||||
if numberOfEndpointStatuses := len(endpointStatuses); numberOfEndpointStatuses != 1 {
|
||||
t.Fatalf("expected 1 EndpointStatus, got %d", numberOfEndpointStatuses)
|
||||
}
|
||||
if hourlyAverageResponseTime, err := store.GetHourlyAverageResponseTimeByKey(testEndpoint.Key(), time.Now().Add(-24*time.Hour), time.Now()); err != nil {
|
||||
t.Errorf("expected no error, got %v", err)
|
||||
} else if len(hourlyAverageResponseTime) != 1 {
|
||||
t.Errorf("expected 1 hour to have had a result in the past 24 hours, got %d", len(hourlyAverageResponseTime))
|
||||
}
|
||||
if uptime, _ := store.GetUptimeByKey(testEndpoint.Key(), time.Now().Add(-24*time.Hour), time.Now()); uptime != 0.5 {
|
||||
t.Errorf("expected uptime of last 24h to be 0.5, got %f", uptime)
|
||||
}
|
||||
if averageResponseTime, _ := store.GetAverageResponseTimeByKey(testEndpoint.Key(), time.Now().Add(-24*time.Hour), time.Now()); averageResponseTime != 450 {
|
||||
t.Errorf("expected average response time of last 24h to be 450, got %d", averageResponseTime)
|
||||
}
|
||||
ss, _ := store.GetEndpointStatus(testEndpoint.Group, testEndpoint.Name, paging.NewEndpointStatusParams().WithResults(1, 20).WithEvents(1, 20))
|
||||
if ss == nil {
|
||||
t.Fatalf("Store should've had key '%s', but didn't", testEndpoint.Key())
|
||||
}
|
||||
if len(ss.Events) != 3 {
|
||||
t.Errorf("Endpoint '%s' should've had 3 events, got %d", ss.Name, len(ss.Events))
|
||||
}
|
||||
if len(ss.Results) != 2 {
|
||||
t.Errorf("Endpoint '%s' should've had 2 results, got %d", ss.Name, len(ss.Results))
|
||||
}
|
||||
if deleted := store.DeleteAllEndpointStatusesNotInKeys([]string{}); deleted != 1 {
|
||||
t.Errorf("%d entries should've been deleted, got %d", 1, deleted)
|
||||
}
|
||||
}
|
||||
|
||||
func TestStore_Save(t *testing.T) {
|
||||
store, err := NewStore()
|
||||
if err != nil {
|
||||
t.Fatal("expected no error, got", err.Error())
|
||||
}
|
||||
err = store.Save()
|
||||
if err != nil {
|
||||
t.Fatal("expected no error, got", err.Error())
|
||||
}
|
||||
store.Clear()
|
||||
store.Close()
|
||||
}
|
43
storage/store/memory/uptime.go
Normal file
43
storage/store/memory/uptime.go
Normal file
@ -0,0 +1,43 @@
|
||||
package memory
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
"github.com/TwiN/gatus/v5/config/endpoint"
|
||||
)
|
||||
|
||||
const (
|
||||
uptimeCleanUpThreshold = 32 * 24
|
||||
uptimeRetention = 30 * 24 * time.Hour
|
||||
)
|
||||
|
||||
// processUptimeAfterResult processes the result by extracting the relevant from the result and recalculating the uptime
|
||||
// if necessary
|
||||
func processUptimeAfterResult(uptime *endpoint.Uptime, result *endpoint.Result) {
|
||||
if uptime.HourlyStatistics == nil {
|
||||
uptime.HourlyStatistics = make(map[int64]*endpoint.HourlyUptimeStatistics)
|
||||
}
|
||||
unixTimestampFlooredAtHour := result.Timestamp.Truncate(time.Hour).Unix()
|
||||
hourlyStats, _ := uptime.HourlyStatistics[unixTimestampFlooredAtHour]
|
||||
if hourlyStats == nil {
|
||||
hourlyStats = &endpoint.HourlyUptimeStatistics{}
|
||||
uptime.HourlyStatistics[unixTimestampFlooredAtHour] = hourlyStats
|
||||
}
|
||||
if result.Success {
|
||||
hourlyStats.SuccessfulExecutions++
|
||||
}
|
||||
hourlyStats.TotalExecutions++
|
||||
hourlyStats.TotalExecutionsResponseTime += uint64(result.Duration.Milliseconds())
|
||||
// Clean up only when we're starting to have too many useless keys
|
||||
// Note that this is only triggered when there are more entries than there should be after
|
||||
// 32 days, despite the fact that we are deleting everything that's older than 30 days.
|
||||
// This is to prevent re-iterating on every `processUptimeAfterResult` as soon as the uptime has been logged for 30 days.
|
||||
if len(uptime.HourlyStatistics) > uptimeCleanUpThreshold {
|
||||
sevenDaysAgo := time.Now().Add(-(uptimeRetention + time.Hour)).Unix()
|
||||
for hourlyUnixTimestamp := range uptime.HourlyStatistics {
|
||||
if sevenDaysAgo > hourlyUnixTimestamp {
|
||||
delete(uptime.HourlyStatistics, hourlyUnixTimestamp)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
26
storage/store/memory/uptime_bench_test.go
Normal file
26
storage/store/memory/uptime_bench_test.go
Normal file
@ -0,0 +1,26 @@
|
||||
package memory
|
||||
|
||||
import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/TwiN/gatus/v5/config/endpoint"
|
||||
)
|
||||
|
||||
func BenchmarkProcessUptimeAfterResult(b *testing.B) {
|
||||
uptime := endpoint.NewUptime()
|
||||
now := time.Now()
|
||||
now = time.Date(now.Year(), now.Month(), now.Day(), now.Hour(), 0, 0, 0, now.Location())
|
||||
// Start 12000 days ago
|
||||
timestamp := now.Add(-12000 * 24 * time.Hour)
|
||||
for n := 0; n < b.N; n++ {
|
||||
processUptimeAfterResult(uptime, &endpoint.Result{
|
||||
Duration: 18 * time.Millisecond,
|
||||
Success: n%15 == 0,
|
||||
Timestamp: timestamp,
|
||||
})
|
||||
// Simulate an endpoint with an interval of 3 minutes
|
||||
timestamp = timestamp.Add(3 * time.Minute)
|
||||
}
|
||||
b.ReportAllocs()
|
||||
}
|
72
storage/store/memory/uptime_test.go
Normal file
72
storage/store/memory/uptime_test.go
Normal file
@ -0,0 +1,72 @@
|
||||
package memory
|
||||
|
||||
import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/TwiN/gatus/v5/config/endpoint"
|
||||
)
|
||||
|
||||
func TestProcessUptimeAfterResult(t *testing.T) {
|
||||
ep := &endpoint.Endpoint{Name: "name", Group: "group"}
|
||||
status := endpoint.NewStatus(ep.Group, ep.Name)
|
||||
uptime := status.Uptime
|
||||
|
||||
now := time.Now()
|
||||
now = time.Date(now.Year(), now.Month(), now.Day(), now.Hour(), 0, 0, 0, now.Location())
|
||||
processUptimeAfterResult(uptime, &endpoint.Result{Timestamp: now.Add(-7 * 24 * time.Hour), Success: true})
|
||||
|
||||
processUptimeAfterResult(uptime, &endpoint.Result{Timestamp: now.Add(-6 * 24 * time.Hour), Success: false})
|
||||
|
||||
processUptimeAfterResult(uptime, &endpoint.Result{Timestamp: now.Add(-8 * 24 * time.Hour), Success: true})
|
||||
|
||||
processUptimeAfterResult(uptime, &endpoint.Result{Timestamp: now.Add(-24 * time.Hour), Success: true})
|
||||
processUptimeAfterResult(uptime, &endpoint.Result{Timestamp: now.Add(-12 * time.Hour), Success: true})
|
||||
|
||||
processUptimeAfterResult(uptime, &endpoint.Result{Timestamp: now.Add(-1 * time.Hour), Success: true, Duration: 10 * time.Millisecond})
|
||||
checkHourlyStatistics(t, uptime.HourlyStatistics[now.Unix()-now.Unix()%3600-3600], 10, 1, 1)
|
||||
processUptimeAfterResult(uptime, &endpoint.Result{Timestamp: now.Add(-30 * time.Minute), Success: false, Duration: 500 * time.Millisecond})
|
||||
checkHourlyStatistics(t, uptime.HourlyStatistics[now.Unix()-now.Unix()%3600-3600], 510, 2, 1)
|
||||
processUptimeAfterResult(uptime, &endpoint.Result{Timestamp: now.Add(-15 * time.Minute), Success: false, Duration: 25 * time.Millisecond})
|
||||
checkHourlyStatistics(t, uptime.HourlyStatistics[now.Unix()-now.Unix()%3600-3600], 535, 3, 1)
|
||||
|
||||
processUptimeAfterResult(uptime, &endpoint.Result{Timestamp: now.Add(-10 * time.Minute), Success: false})
|
||||
|
||||
processUptimeAfterResult(uptime, &endpoint.Result{Timestamp: now.Add(-120 * time.Hour), Success: true})
|
||||
processUptimeAfterResult(uptime, &endpoint.Result{Timestamp: now.Add(-119 * time.Hour), Success: true})
|
||||
processUptimeAfterResult(uptime, &endpoint.Result{Timestamp: now.Add(-118 * time.Hour), Success: true})
|
||||
processUptimeAfterResult(uptime, &endpoint.Result{Timestamp: now.Add(-117 * time.Hour), Success: true})
|
||||
processUptimeAfterResult(uptime, &endpoint.Result{Timestamp: now.Add(-10 * time.Hour), Success: true})
|
||||
processUptimeAfterResult(uptime, &endpoint.Result{Timestamp: now.Add(-8 * time.Hour), Success: true})
|
||||
processUptimeAfterResult(uptime, &endpoint.Result{Timestamp: now.Add(-30 * time.Minute), Success: true})
|
||||
processUptimeAfterResult(uptime, &endpoint.Result{Timestamp: now.Add(-25 * time.Minute), Success: true})
|
||||
}
|
||||
|
||||
func TestAddResultUptimeIsCleaningUpAfterItself(t *testing.T) {
|
||||
ep := &endpoint.Endpoint{Name: "name", Group: "group"}
|
||||
status := endpoint.NewStatus(ep.Group, ep.Name)
|
||||
now := time.Now()
|
||||
now = time.Date(now.Year(), now.Month(), now.Day(), now.Hour(), 0, 0, 0, now.Location())
|
||||
// Start 12 days ago
|
||||
timestamp := now.Add(-12 * 24 * time.Hour)
|
||||
for timestamp.Unix() <= now.Unix() {
|
||||
AddResult(status, &endpoint.Result{Timestamp: timestamp, Success: true})
|
||||
if len(status.Uptime.HourlyStatistics) > uptimeCleanUpThreshold {
|
||||
t.Errorf("At no point in time should there be more than %d entries in status.SuccessfulExecutionsPerHour, but there are %d", uptimeCleanUpThreshold, len(status.Uptime.HourlyStatistics))
|
||||
}
|
||||
// Simulate endpoint with an interval of 3 minutes
|
||||
timestamp = timestamp.Add(3 * time.Minute)
|
||||
}
|
||||
}
|
||||
|
||||
func checkHourlyStatistics(t *testing.T, hourlyUptimeStatistics *endpoint.HourlyUptimeStatistics, expectedTotalExecutionsResponseTime uint64, expectedTotalExecutions uint64, expectedSuccessfulExecutions uint64) {
|
||||
if hourlyUptimeStatistics.TotalExecutionsResponseTime != expectedTotalExecutionsResponseTime {
|
||||
t.Error("TotalExecutionsResponseTime should've been", expectedTotalExecutionsResponseTime, "got", hourlyUptimeStatistics.TotalExecutionsResponseTime)
|
||||
}
|
||||
if hourlyUptimeStatistics.TotalExecutions != expectedTotalExecutions {
|
||||
t.Error("TotalExecutions should've been", expectedTotalExecutions, "got", hourlyUptimeStatistics.TotalExecutions)
|
||||
}
|
||||
if hourlyUptimeStatistics.SuccessfulExecutions != expectedSuccessfulExecutions {
|
||||
t.Error("SuccessfulExecutions should've been", expectedSuccessfulExecutions, "got", hourlyUptimeStatistics.SuccessfulExecutions)
|
||||
}
|
||||
}
|
81
storage/store/memory/util.go
Normal file
81
storage/store/memory/util.go
Normal file
@ -0,0 +1,81 @@
|
||||
package memory
|
||||
|
||||
import (
|
||||
"github.com/TwiN/gatus/v5/config/endpoint"
|
||||
"github.com/TwiN/gatus/v5/storage/store/common"
|
||||
"github.com/TwiN/gatus/v5/storage/store/common/paging"
|
||||
)
|
||||
|
||||
// ShallowCopyEndpointStatus returns a shallow copy of a Status with only the results
|
||||
// within the range defined by the page and pageSize parameters
|
||||
func ShallowCopyEndpointStatus(ss *endpoint.Status, params *paging.EndpointStatusParams) *endpoint.Status {
|
||||
shallowCopy := &endpoint.Status{
|
||||
Name: ss.Name,
|
||||
Group: ss.Group,
|
||||
Key: ss.Key,
|
||||
Uptime: endpoint.NewUptime(),
|
||||
}
|
||||
numberOfResults := len(ss.Results)
|
||||
resultsStart, resultsEnd := getStartAndEndIndex(numberOfResults, params.ResultsPage, params.ResultsPageSize)
|
||||
if resultsStart < 0 || resultsEnd < 0 {
|
||||
shallowCopy.Results = []*endpoint.Result{}
|
||||
} else {
|
||||
shallowCopy.Results = ss.Results[resultsStart:resultsEnd]
|
||||
}
|
||||
numberOfEvents := len(ss.Events)
|
||||
eventsStart, eventsEnd := getStartAndEndIndex(numberOfEvents, params.EventsPage, params.EventsPageSize)
|
||||
if eventsStart < 0 || eventsEnd < 0 {
|
||||
shallowCopy.Events = []*endpoint.Event{}
|
||||
} else {
|
||||
shallowCopy.Events = ss.Events[eventsStart:eventsEnd]
|
||||
}
|
||||
return shallowCopy
|
||||
}
|
||||
|
||||
func getStartAndEndIndex(numberOfResults int, page, pageSize int) (int, int) {
|
||||
if page < 1 || pageSize < 0 {
|
||||
return -1, -1
|
||||
}
|
||||
start := numberOfResults - (page * pageSize)
|
||||
end := numberOfResults - ((page - 1) * pageSize)
|
||||
if start > numberOfResults {
|
||||
start = -1
|
||||
} else if start < 0 {
|
||||
start = 0
|
||||
}
|
||||
if end > numberOfResults {
|
||||
end = numberOfResults
|
||||
}
|
||||
return start, end
|
||||
}
|
||||
|
||||
// AddResult adds a Result to Status.Results and makes sure that there are
|
||||
// no more than MaximumNumberOfResults results in the Results slice
|
||||
func AddResult(ss *endpoint.Status, result *endpoint.Result) {
|
||||
if ss == nil {
|
||||
return
|
||||
}
|
||||
if len(ss.Results) > 0 {
|
||||
// Check if there's any change since the last result
|
||||
if ss.Results[len(ss.Results)-1].Success != result.Success {
|
||||
ss.Events = append(ss.Events, endpoint.NewEventFromResult(result))
|
||||
if len(ss.Events) > common.MaximumNumberOfEvents {
|
||||
// Doing ss.Events[1:] would usually be sufficient, but in the case where for some reason, the slice has
|
||||
// more than one extra element, we can get rid of all of them at once and thus returning the slice to a
|
||||
// length of MaximumNumberOfEvents by using ss.Events[len(ss.Events)-MaximumNumberOfEvents:] instead
|
||||
ss.Events = ss.Events[len(ss.Events)-common.MaximumNumberOfEvents:]
|
||||
}
|
||||
}
|
||||
} else {
|
||||
// This is the first result, so we need to add the first healthy/unhealthy event
|
||||
ss.Events = append(ss.Events, endpoint.NewEventFromResult(result))
|
||||
}
|
||||
ss.Results = append(ss.Results, result)
|
||||
if len(ss.Results) > common.MaximumNumberOfResults {
|
||||
// Doing ss.Results[1:] would usually be sufficient, but in the case where for some reason, the slice has more
|
||||
// than one extra element, we can get rid of all of them at once and thus returning the slice to a length of
|
||||
// MaximumNumberOfResults by using ss.Results[len(ss.Results)-MaximumNumberOfResults:] instead
|
||||
ss.Results = ss.Results[len(ss.Results)-common.MaximumNumberOfResults:]
|
||||
}
|
||||
processUptimeAfterResult(ss.Uptime, result)
|
||||
}
|
21
storage/store/memory/util_bench_test.go
Normal file
21
storage/store/memory/util_bench_test.go
Normal file
@ -0,0 +1,21 @@
|
||||
package memory
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/TwiN/gatus/v5/config/endpoint"
|
||||
"github.com/TwiN/gatus/v5/storage/store/common"
|
||||
"github.com/TwiN/gatus/v5/storage/store/common/paging"
|
||||
)
|
||||
|
||||
func BenchmarkShallowCopyEndpointStatus(b *testing.B) {
|
||||
ep := &testEndpoint
|
||||
status := endpoint.NewStatus(ep.Group, ep.Name)
|
||||
for i := 0; i < common.MaximumNumberOfResults; i++ {
|
||||
AddResult(status, &testSuccessfulResult)
|
||||
}
|
||||
for n := 0; n < b.N; n++ {
|
||||
ShallowCopyEndpointStatus(status, paging.NewEndpointStatusParams().WithResults(1, 20))
|
||||
}
|
||||
b.ReportAllocs()
|
||||
}
|
66
storage/store/memory/util_test.go
Normal file
66
storage/store/memory/util_test.go
Normal file
@ -0,0 +1,66 @@
|
||||
package memory
|
||||
|
||||
import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/TwiN/gatus/v5/config/endpoint"
|
||||
"github.com/TwiN/gatus/v5/storage/store/common"
|
||||
"github.com/TwiN/gatus/v5/storage/store/common/paging"
|
||||
)
|
||||
|
||||
func TestAddResult(t *testing.T) {
|
||||
ep := &endpoint.Endpoint{Name: "name", Group: "group"}
|
||||
endpointStatus := endpoint.NewStatus(ep.Group, ep.Name)
|
||||
for i := 0; i < (common.MaximumNumberOfResults+common.MaximumNumberOfEvents)*2; i++ {
|
||||
AddResult(endpointStatus, &endpoint.Result{Success: i%2 == 0, Timestamp: time.Now()})
|
||||
}
|
||||
if len(endpointStatus.Results) != common.MaximumNumberOfResults {
|
||||
t.Errorf("expected endpointStatus.Results to not exceed a length of %d", common.MaximumNumberOfResults)
|
||||
}
|
||||
if len(endpointStatus.Events) != common.MaximumNumberOfEvents {
|
||||
t.Errorf("expected endpointStatus.Events to not exceed a length of %d", common.MaximumNumberOfEvents)
|
||||
}
|
||||
// Try to add nil endpointStatus
|
||||
AddResult(nil, &endpoint.Result{Timestamp: time.Now()})
|
||||
}
|
||||
|
||||
func TestShallowCopyEndpointStatus(t *testing.T) {
|
||||
ep := &endpoint.Endpoint{Name: "name", Group: "group"}
|
||||
endpointStatus := endpoint.NewStatus(ep.Group, ep.Name)
|
||||
ts := time.Now().Add(-25 * time.Hour)
|
||||
for i := 0; i < 25; i++ {
|
||||
AddResult(endpointStatus, &endpoint.Result{Success: i%2 == 0, Timestamp: ts})
|
||||
ts = ts.Add(time.Hour)
|
||||
}
|
||||
if len(ShallowCopyEndpointStatus(endpointStatus, paging.NewEndpointStatusParams().WithResults(-1, -1)).Results) != 0 {
|
||||
t.Error("expected to have 0 result")
|
||||
}
|
||||
if len(ShallowCopyEndpointStatus(endpointStatus, paging.NewEndpointStatusParams().WithResults(1, 1)).Results) != 1 {
|
||||
t.Error("expected to have 1 result")
|
||||
}
|
||||
if len(ShallowCopyEndpointStatus(endpointStatus, paging.NewEndpointStatusParams().WithResults(5, 0)).Results) != 0 {
|
||||
t.Error("expected to have 0 results")
|
||||
}
|
||||
if len(ShallowCopyEndpointStatus(endpointStatus, paging.NewEndpointStatusParams().WithResults(-1, 20)).Results) != 0 {
|
||||
t.Error("expected to have 0 result, because the page was invalid")
|
||||
}
|
||||
if len(ShallowCopyEndpointStatus(endpointStatus, paging.NewEndpointStatusParams().WithResults(1, -1)).Results) != 0 {
|
||||
t.Error("expected to have 0 result, because the page size was invalid")
|
||||
}
|
||||
if len(ShallowCopyEndpointStatus(endpointStatus, paging.NewEndpointStatusParams().WithResults(1, 10)).Results) != 10 {
|
||||
t.Error("expected to have 10 results, because given a page size of 10, page 1 should have 10 elements")
|
||||
}
|
||||
if len(ShallowCopyEndpointStatus(endpointStatus, paging.NewEndpointStatusParams().WithResults(2, 10)).Results) != 10 {
|
||||
t.Error("expected to have 10 results, because given a page size of 10, page 2 should have 10 elements")
|
||||
}
|
||||
if len(ShallowCopyEndpointStatus(endpointStatus, paging.NewEndpointStatusParams().WithResults(3, 10)).Results) != 5 {
|
||||
t.Error("expected to have 5 results, because given a page size of 10, page 3 should have 5 elements")
|
||||
}
|
||||
if len(ShallowCopyEndpointStatus(endpointStatus, paging.NewEndpointStatusParams().WithResults(4, 10)).Results) != 0 {
|
||||
t.Error("expected to have 0 results, because given a page size of 10, page 4 should have 0 elements")
|
||||
}
|
||||
if len(ShallowCopyEndpointStatus(endpointStatus, paging.NewEndpointStatusParams().WithResults(1, 50)).Results) != 25 {
|
||||
t.Error("expected to have 25 results, because there's only 25 results")
|
||||
}
|
||||
}
|
Reference in New Issue
Block a user