refactor: Break core package into multiple packages under config/endpoint (#759)
* refactor: Partially break core package into dns, result and ssh packages * refactor: Move core package to config/endpoint * refactor: Fix warning about overlapping imported package name with endpoint variable * refactor: Rename EndpointStatus to Status * refactor: Merge result pkg back into endpoint pkg, because it makes more sense * refactor: Rename parameter r to result in Condition.evaluate * refactor: Rename parameter r to result * refactor: Revert accidental change to endpoint.TypeDNS * refactor: Rename parameter r to result * refactor: Merge util package into endpoint package * refactor: Rename parameter r to result
This commit is contained in:
@ -5,10 +5,9 @@ import (
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/TwiN/gatus/v5/core"
|
||||
"github.com/TwiN/gatus/v5/config/endpoint"
|
||||
"github.com/TwiN/gatus/v5/storage/store/common"
|
||||
"github.com/TwiN/gatus/v5/storage/store/common/paging"
|
||||
"github.com/TwiN/gatus/v5/util"
|
||||
"github.com/TwiN/gocache/v2"
|
||||
)
|
||||
|
||||
@ -30,13 +29,13 @@ func NewStore() (*Store, error) {
|
||||
return store, nil
|
||||
}
|
||||
|
||||
// GetAllEndpointStatuses returns all monitored core.EndpointStatus
|
||||
// with a subset of core.Result defined by the page and pageSize parameters
|
||||
func (s *Store) GetAllEndpointStatuses(params *paging.EndpointStatusParams) ([]*core.EndpointStatus, error) {
|
||||
// GetAllEndpointStatuses returns all monitored endpoint.Status
|
||||
// with a subset of endpoint.Result defined by the page and pageSize parameters
|
||||
func (s *Store) GetAllEndpointStatuses(params *paging.EndpointStatusParams) ([]*endpoint.Status, error) {
|
||||
endpointStatuses := s.cache.GetAll()
|
||||
pagedEndpointStatuses := make([]*core.EndpointStatus, 0, len(endpointStatuses))
|
||||
pagedEndpointStatuses := make([]*endpoint.Status, 0, len(endpointStatuses))
|
||||
for _, v := range endpointStatuses {
|
||||
pagedEndpointStatuses = append(pagedEndpointStatuses, ShallowCopyEndpointStatus(v.(*core.EndpointStatus), params))
|
||||
pagedEndpointStatuses = append(pagedEndpointStatuses, ShallowCopyEndpointStatus(v.(*endpoint.Status), params))
|
||||
}
|
||||
sort.Slice(pagedEndpointStatuses, func(i, j int) bool {
|
||||
return pagedEndpointStatuses[i].Key < pagedEndpointStatuses[j].Key
|
||||
@ -45,17 +44,17 @@ func (s *Store) GetAllEndpointStatuses(params *paging.EndpointStatusParams) ([]*
|
||||
}
|
||||
|
||||
// GetEndpointStatus returns the endpoint status for a given endpoint name in the given group
|
||||
func (s *Store) GetEndpointStatus(groupName, endpointName string, params *paging.EndpointStatusParams) (*core.EndpointStatus, error) {
|
||||
return s.GetEndpointStatusByKey(util.ConvertGroupAndEndpointNameToKey(groupName, endpointName), params)
|
||||
func (s *Store) GetEndpointStatus(groupName, endpointName string, params *paging.EndpointStatusParams) (*endpoint.Status, error) {
|
||||
return s.GetEndpointStatusByKey(endpoint.ConvertGroupAndEndpointNameToKey(groupName, endpointName), params)
|
||||
}
|
||||
|
||||
// GetEndpointStatusByKey returns the endpoint status for a given key
|
||||
func (s *Store) GetEndpointStatusByKey(key string, params *paging.EndpointStatusParams) (*core.EndpointStatus, error) {
|
||||
func (s *Store) GetEndpointStatusByKey(key string, params *paging.EndpointStatusParams) (*endpoint.Status, error) {
|
||||
endpointStatus := s.cache.GetValue(key)
|
||||
if endpointStatus == nil {
|
||||
return nil, common.ErrEndpointNotFound
|
||||
}
|
||||
return ShallowCopyEndpointStatus(endpointStatus.(*core.EndpointStatus), params), nil
|
||||
return ShallowCopyEndpointStatus(endpointStatus.(*endpoint.Status), params), nil
|
||||
}
|
||||
|
||||
// GetUptimeByKey returns the uptime percentage during a time range
|
||||
@ -64,7 +63,7 @@ func (s *Store) GetUptimeByKey(key string, from, to time.Time) (float64, error)
|
||||
return 0, common.ErrInvalidTimeRange
|
||||
}
|
||||
endpointStatus := s.cache.GetValue(key)
|
||||
if endpointStatus == nil || endpointStatus.(*core.EndpointStatus).Uptime == nil {
|
||||
if endpointStatus == nil || endpointStatus.(*endpoint.Status).Uptime == nil {
|
||||
return 0, common.ErrEndpointNotFound
|
||||
}
|
||||
successfulExecutions := uint64(0)
|
||||
@ -72,7 +71,7 @@ func (s *Store) GetUptimeByKey(key string, from, to time.Time) (float64, error)
|
||||
current := from
|
||||
for to.Sub(current) >= 0 {
|
||||
hourlyUnixTimestamp := current.Truncate(time.Hour).Unix()
|
||||
hourlyStats := endpointStatus.(*core.EndpointStatus).Uptime.HourlyStatistics[hourlyUnixTimestamp]
|
||||
hourlyStats := endpointStatus.(*endpoint.Status).Uptime.HourlyStatistics[hourlyUnixTimestamp]
|
||||
if hourlyStats == nil || hourlyStats.TotalExecutions == 0 {
|
||||
current = current.Add(time.Hour)
|
||||
continue
|
||||
@ -93,14 +92,14 @@ func (s *Store) GetAverageResponseTimeByKey(key string, from, to time.Time) (int
|
||||
return 0, common.ErrInvalidTimeRange
|
||||
}
|
||||
endpointStatus := s.cache.GetValue(key)
|
||||
if endpointStatus == nil || endpointStatus.(*core.EndpointStatus).Uptime == nil {
|
||||
if endpointStatus == nil || endpointStatus.(*endpoint.Status).Uptime == nil {
|
||||
return 0, common.ErrEndpointNotFound
|
||||
}
|
||||
current := from
|
||||
var totalExecutions, totalResponseTime uint64
|
||||
for to.Sub(current) >= 0 {
|
||||
hourlyUnixTimestamp := current.Truncate(time.Hour).Unix()
|
||||
hourlyStats := endpointStatus.(*core.EndpointStatus).Uptime.HourlyStatistics[hourlyUnixTimestamp]
|
||||
hourlyStats := endpointStatus.(*endpoint.Status).Uptime.HourlyStatistics[hourlyUnixTimestamp]
|
||||
if hourlyStats == nil || hourlyStats.TotalExecutions == 0 {
|
||||
current = current.Add(time.Hour)
|
||||
continue
|
||||
@ -121,14 +120,14 @@ func (s *Store) GetHourlyAverageResponseTimeByKey(key string, from, to time.Time
|
||||
return nil, common.ErrInvalidTimeRange
|
||||
}
|
||||
endpointStatus := s.cache.GetValue(key)
|
||||
if endpointStatus == nil || endpointStatus.(*core.EndpointStatus).Uptime == nil {
|
||||
if endpointStatus == nil || endpointStatus.(*endpoint.Status).Uptime == nil {
|
||||
return nil, common.ErrEndpointNotFound
|
||||
}
|
||||
hourlyAverageResponseTimes := make(map[int64]int)
|
||||
current := from
|
||||
for to.Sub(current) >= 0 {
|
||||
hourlyUnixTimestamp := current.Truncate(time.Hour).Unix()
|
||||
hourlyStats := endpointStatus.(*core.EndpointStatus).Uptime.HourlyStatistics[hourlyUnixTimestamp]
|
||||
hourlyStats := endpointStatus.(*endpoint.Status).Uptime.HourlyStatistics[hourlyUnixTimestamp]
|
||||
if hourlyStats == nil || hourlyStats.TotalExecutions == 0 {
|
||||
current = current.Add(time.Hour)
|
||||
continue
|
||||
@ -140,24 +139,24 @@ func (s *Store) GetHourlyAverageResponseTimeByKey(key string, from, to time.Time
|
||||
}
|
||||
|
||||
// Insert adds the observed result for the specified endpoint into the store
|
||||
func (s *Store) Insert(endpoint *core.Endpoint, result *core.Result) error {
|
||||
key := endpoint.Key()
|
||||
func (s *Store) Insert(ep *endpoint.Endpoint, result *endpoint.Result) error {
|
||||
key := ep.Key()
|
||||
s.Lock()
|
||||
status, exists := s.cache.Get(key)
|
||||
if !exists {
|
||||
status = core.NewEndpointStatus(endpoint.Group, endpoint.Name)
|
||||
status.(*core.EndpointStatus).Events = append(status.(*core.EndpointStatus).Events, &core.Event{
|
||||
Type: core.EventStart,
|
||||
status = endpoint.NewStatus(ep.Group, ep.Name)
|
||||
status.(*endpoint.Status).Events = append(status.(*endpoint.Status).Events, &endpoint.Event{
|
||||
Type: endpoint.EventStart,
|
||||
Timestamp: time.Now(),
|
||||
})
|
||||
}
|
||||
AddResult(status.(*core.EndpointStatus), result)
|
||||
AddResult(status.(*endpoint.Status), result)
|
||||
s.cache.Set(key, status)
|
||||
s.Unlock()
|
||||
return nil
|
||||
}
|
||||
|
||||
// DeleteAllEndpointStatusesNotInKeys removes all EndpointStatus that are not within the keys provided
|
||||
// DeleteAllEndpointStatusesNotInKeys removes all Status that are not within the keys provided
|
||||
func (s *Store) DeleteAllEndpointStatusesNotInKeys(keys []string) int {
|
||||
var keysToDelete []string
|
||||
for _, existingKey := range s.cache.GetKeysByPattern("*", 0) {
|
||||
|
@ -4,30 +4,30 @@ import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/TwiN/gatus/v5/core"
|
||||
"github.com/TwiN/gatus/v5/config/endpoint"
|
||||
"github.com/TwiN/gatus/v5/storage/store/common/paging"
|
||||
)
|
||||
|
||||
var (
|
||||
firstCondition = core.Condition("[STATUS] == 200")
|
||||
secondCondition = core.Condition("[RESPONSE_TIME] < 500")
|
||||
thirdCondition = core.Condition("[CERTIFICATE_EXPIRATION] < 72h")
|
||||
firstCondition = endpoint.Condition("[STATUS] == 200")
|
||||
secondCondition = endpoint.Condition("[RESPONSE_TIME] < 500")
|
||||
thirdCondition = endpoint.Condition("[CERTIFICATE_EXPIRATION] < 72h")
|
||||
|
||||
now = time.Now()
|
||||
|
||||
testEndpoint = core.Endpoint{
|
||||
testEndpoint = endpoint.Endpoint{
|
||||
Name: "name",
|
||||
Group: "group",
|
||||
URL: "https://example.org/what/ever",
|
||||
Method: "GET",
|
||||
Body: "body",
|
||||
Interval: 30 * time.Second,
|
||||
Conditions: []core.Condition{firstCondition, secondCondition, thirdCondition},
|
||||
Conditions: []endpoint.Condition{firstCondition, secondCondition, thirdCondition},
|
||||
Alerts: nil,
|
||||
NumberOfFailuresInARow: 0,
|
||||
NumberOfSuccessesInARow: 0,
|
||||
}
|
||||
testSuccessfulResult = core.Result{
|
||||
testSuccessfulResult = endpoint.Result{
|
||||
Hostname: "example.org",
|
||||
IP: "127.0.0.1",
|
||||
HTTPStatus: 200,
|
||||
@ -37,7 +37,7 @@ var (
|
||||
Timestamp: now,
|
||||
Duration: 150 * time.Millisecond,
|
||||
CertificateExpiration: 10 * time.Hour,
|
||||
ConditionResults: []*core.ConditionResult{
|
||||
ConditionResults: []*endpoint.ConditionResult{
|
||||
{
|
||||
Condition: "[STATUS] == 200",
|
||||
Success: true,
|
||||
@ -52,7 +52,7 @@ var (
|
||||
},
|
||||
},
|
||||
}
|
||||
testUnsuccessfulResult = core.Result{
|
||||
testUnsuccessfulResult = endpoint.Result{
|
||||
Hostname: "example.org",
|
||||
IP: "127.0.0.1",
|
||||
HTTPStatus: 200,
|
||||
@ -62,7 +62,7 @@ var (
|
||||
Timestamp: now,
|
||||
Duration: 750 * time.Millisecond,
|
||||
CertificateExpiration: 10 * time.Hour,
|
||||
ConditionResults: []*core.ConditionResult{
|
||||
ConditionResults: []*endpoint.ConditionResult{
|
||||
{
|
||||
Condition: "[STATUS] == 200",
|
||||
Success: true,
|
||||
|
@ -3,7 +3,7 @@ package memory
|
||||
import (
|
||||
"time"
|
||||
|
||||
"github.com/TwiN/gatus/v5/core"
|
||||
"github.com/TwiN/gatus/v5/config/endpoint"
|
||||
)
|
||||
|
||||
const (
|
||||
@ -13,14 +13,14 @@ const (
|
||||
|
||||
// processUptimeAfterResult processes the result by extracting the relevant from the result and recalculating the uptime
|
||||
// if necessary
|
||||
func processUptimeAfterResult(uptime *core.Uptime, result *core.Result) {
|
||||
func processUptimeAfterResult(uptime *endpoint.Uptime, result *endpoint.Result) {
|
||||
if uptime.HourlyStatistics == nil {
|
||||
uptime.HourlyStatistics = make(map[int64]*core.HourlyUptimeStatistics)
|
||||
uptime.HourlyStatistics = make(map[int64]*endpoint.HourlyUptimeStatistics)
|
||||
}
|
||||
unixTimestampFlooredAtHour := result.Timestamp.Truncate(time.Hour).Unix()
|
||||
hourlyStats, _ := uptime.HourlyStatistics[unixTimestampFlooredAtHour]
|
||||
if hourlyStats == nil {
|
||||
hourlyStats = &core.HourlyUptimeStatistics{}
|
||||
hourlyStats = &endpoint.HourlyUptimeStatistics{}
|
||||
uptime.HourlyStatistics[unixTimestampFlooredAtHour] = hourlyStats
|
||||
}
|
||||
if result.Success {
|
||||
|
@ -4,17 +4,17 @@ import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/TwiN/gatus/v5/core"
|
||||
"github.com/TwiN/gatus/v5/config/endpoint"
|
||||
)
|
||||
|
||||
func BenchmarkProcessUptimeAfterResult(b *testing.B) {
|
||||
uptime := core.NewUptime()
|
||||
uptime := endpoint.NewUptime()
|
||||
now := time.Now()
|
||||
now = time.Date(now.Year(), now.Month(), now.Day(), now.Hour(), 0, 0, 0, now.Location())
|
||||
// Start 12000 days ago
|
||||
timestamp := now.Add(-12000 * 24 * time.Hour)
|
||||
for n := 0; n < b.N; n++ {
|
||||
processUptimeAfterResult(uptime, &core.Result{
|
||||
processUptimeAfterResult(uptime, &endpoint.Result{
|
||||
Duration: 18 * time.Millisecond,
|
||||
Success: n%15 == 0,
|
||||
Timestamp: timestamp,
|
||||
|
@ -4,53 +4,53 @@ import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/TwiN/gatus/v5/core"
|
||||
"github.com/TwiN/gatus/v5/config/endpoint"
|
||||
)
|
||||
|
||||
func TestProcessUptimeAfterResult(t *testing.T) {
|
||||
endpoint := &core.Endpoint{Name: "name", Group: "group"}
|
||||
status := core.NewEndpointStatus(endpoint.Group, endpoint.Name)
|
||||
ep := &endpoint.Endpoint{Name: "name", Group: "group"}
|
||||
status := endpoint.NewStatus(ep.Group, ep.Name)
|
||||
uptime := status.Uptime
|
||||
|
||||
now := time.Now()
|
||||
now = time.Date(now.Year(), now.Month(), now.Day(), now.Hour(), 0, 0, 0, now.Location())
|
||||
processUptimeAfterResult(uptime, &core.Result{Timestamp: now.Add(-7 * 24 * time.Hour), Success: true})
|
||||
processUptimeAfterResult(uptime, &endpoint.Result{Timestamp: now.Add(-7 * 24 * time.Hour), Success: true})
|
||||
|
||||
processUptimeAfterResult(uptime, &core.Result{Timestamp: now.Add(-6 * 24 * time.Hour), Success: false})
|
||||
processUptimeAfterResult(uptime, &endpoint.Result{Timestamp: now.Add(-6 * 24 * time.Hour), Success: false})
|
||||
|
||||
processUptimeAfterResult(uptime, &core.Result{Timestamp: now.Add(-8 * 24 * time.Hour), Success: true})
|
||||
processUptimeAfterResult(uptime, &endpoint.Result{Timestamp: now.Add(-8 * 24 * time.Hour), Success: true})
|
||||
|
||||
processUptimeAfterResult(uptime, &core.Result{Timestamp: now.Add(-24 * time.Hour), Success: true})
|
||||
processUptimeAfterResult(uptime, &core.Result{Timestamp: now.Add(-12 * time.Hour), Success: true})
|
||||
processUptimeAfterResult(uptime, &endpoint.Result{Timestamp: now.Add(-24 * time.Hour), Success: true})
|
||||
processUptimeAfterResult(uptime, &endpoint.Result{Timestamp: now.Add(-12 * time.Hour), Success: true})
|
||||
|
||||
processUptimeAfterResult(uptime, &core.Result{Timestamp: now.Add(-1 * time.Hour), Success: true, Duration: 10 * time.Millisecond})
|
||||
processUptimeAfterResult(uptime, &endpoint.Result{Timestamp: now.Add(-1 * time.Hour), Success: true, Duration: 10 * time.Millisecond})
|
||||
checkHourlyStatistics(t, uptime.HourlyStatistics[now.Unix()-now.Unix()%3600-3600], 10, 1, 1)
|
||||
processUptimeAfterResult(uptime, &core.Result{Timestamp: now.Add(-30 * time.Minute), Success: false, Duration: 500 * time.Millisecond})
|
||||
processUptimeAfterResult(uptime, &endpoint.Result{Timestamp: now.Add(-30 * time.Minute), Success: false, Duration: 500 * time.Millisecond})
|
||||
checkHourlyStatistics(t, uptime.HourlyStatistics[now.Unix()-now.Unix()%3600-3600], 510, 2, 1)
|
||||
processUptimeAfterResult(uptime, &core.Result{Timestamp: now.Add(-15 * time.Minute), Success: false, Duration: 25 * time.Millisecond})
|
||||
processUptimeAfterResult(uptime, &endpoint.Result{Timestamp: now.Add(-15 * time.Minute), Success: false, Duration: 25 * time.Millisecond})
|
||||
checkHourlyStatistics(t, uptime.HourlyStatistics[now.Unix()-now.Unix()%3600-3600], 535, 3, 1)
|
||||
|
||||
processUptimeAfterResult(uptime, &core.Result{Timestamp: now.Add(-10 * time.Minute), Success: false})
|
||||
processUptimeAfterResult(uptime, &endpoint.Result{Timestamp: now.Add(-10 * time.Minute), Success: false})
|
||||
|
||||
processUptimeAfterResult(uptime, &core.Result{Timestamp: now.Add(-120 * time.Hour), Success: true})
|
||||
processUptimeAfterResult(uptime, &core.Result{Timestamp: now.Add(-119 * time.Hour), Success: true})
|
||||
processUptimeAfterResult(uptime, &core.Result{Timestamp: now.Add(-118 * time.Hour), Success: true})
|
||||
processUptimeAfterResult(uptime, &core.Result{Timestamp: now.Add(-117 * time.Hour), Success: true})
|
||||
processUptimeAfterResult(uptime, &core.Result{Timestamp: now.Add(-10 * time.Hour), Success: true})
|
||||
processUptimeAfterResult(uptime, &core.Result{Timestamp: now.Add(-8 * time.Hour), Success: true})
|
||||
processUptimeAfterResult(uptime, &core.Result{Timestamp: now.Add(-30 * time.Minute), Success: true})
|
||||
processUptimeAfterResult(uptime, &core.Result{Timestamp: now.Add(-25 * time.Minute), Success: true})
|
||||
processUptimeAfterResult(uptime, &endpoint.Result{Timestamp: now.Add(-120 * time.Hour), Success: true})
|
||||
processUptimeAfterResult(uptime, &endpoint.Result{Timestamp: now.Add(-119 * time.Hour), Success: true})
|
||||
processUptimeAfterResult(uptime, &endpoint.Result{Timestamp: now.Add(-118 * time.Hour), Success: true})
|
||||
processUptimeAfterResult(uptime, &endpoint.Result{Timestamp: now.Add(-117 * time.Hour), Success: true})
|
||||
processUptimeAfterResult(uptime, &endpoint.Result{Timestamp: now.Add(-10 * time.Hour), Success: true})
|
||||
processUptimeAfterResult(uptime, &endpoint.Result{Timestamp: now.Add(-8 * time.Hour), Success: true})
|
||||
processUptimeAfterResult(uptime, &endpoint.Result{Timestamp: now.Add(-30 * time.Minute), Success: true})
|
||||
processUptimeAfterResult(uptime, &endpoint.Result{Timestamp: now.Add(-25 * time.Minute), Success: true})
|
||||
}
|
||||
|
||||
func TestAddResultUptimeIsCleaningUpAfterItself(t *testing.T) {
|
||||
endpoint := &core.Endpoint{Name: "name", Group: "group"}
|
||||
status := core.NewEndpointStatus(endpoint.Group, endpoint.Name)
|
||||
ep := &endpoint.Endpoint{Name: "name", Group: "group"}
|
||||
status := endpoint.NewStatus(ep.Group, ep.Name)
|
||||
now := time.Now()
|
||||
now = time.Date(now.Year(), now.Month(), now.Day(), now.Hour(), 0, 0, 0, now.Location())
|
||||
// Start 12 days ago
|
||||
timestamp := now.Add(-12 * 24 * time.Hour)
|
||||
for timestamp.Unix() <= now.Unix() {
|
||||
AddResult(status, &core.Result{Timestamp: timestamp, Success: true})
|
||||
AddResult(status, &endpoint.Result{Timestamp: timestamp, Success: true})
|
||||
if len(status.Uptime.HourlyStatistics) > numberOfHoursInTenDays {
|
||||
t.Errorf("At no point in time should there be more than %d entries in status.SuccessfulExecutionsPerHour, but there are %d", numberOfHoursInTenDays, len(status.Uptime.HourlyStatistics))
|
||||
}
|
||||
@ -59,7 +59,7 @@ func TestAddResultUptimeIsCleaningUpAfterItself(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func checkHourlyStatistics(t *testing.T, hourlyUptimeStatistics *core.HourlyUptimeStatistics, expectedTotalExecutionsResponseTime uint64, expectedTotalExecutions uint64, expectedSuccessfulExecutions uint64) {
|
||||
func checkHourlyStatistics(t *testing.T, hourlyUptimeStatistics *endpoint.HourlyUptimeStatistics, expectedTotalExecutionsResponseTime uint64, expectedTotalExecutions uint64, expectedSuccessfulExecutions uint64) {
|
||||
if hourlyUptimeStatistics.TotalExecutionsResponseTime != expectedTotalExecutionsResponseTime {
|
||||
t.Error("TotalExecutionsResponseTime should've been", expectedTotalExecutionsResponseTime, "got", hourlyUptimeStatistics.TotalExecutionsResponseTime)
|
||||
}
|
||||
|
@ -1,31 +1,31 @@
|
||||
package memory
|
||||
|
||||
import (
|
||||
"github.com/TwiN/gatus/v5/core"
|
||||
"github.com/TwiN/gatus/v5/config/endpoint"
|
||||
"github.com/TwiN/gatus/v5/storage/store/common"
|
||||
"github.com/TwiN/gatus/v5/storage/store/common/paging"
|
||||
)
|
||||
|
||||
// ShallowCopyEndpointStatus returns a shallow copy of a EndpointStatus with only the results
|
||||
// ShallowCopyEndpointStatus returns a shallow copy of a Status with only the results
|
||||
// within the range defined by the page and pageSize parameters
|
||||
func ShallowCopyEndpointStatus(ss *core.EndpointStatus, params *paging.EndpointStatusParams) *core.EndpointStatus {
|
||||
shallowCopy := &core.EndpointStatus{
|
||||
func ShallowCopyEndpointStatus(ss *endpoint.Status, params *paging.EndpointStatusParams) *endpoint.Status {
|
||||
shallowCopy := &endpoint.Status{
|
||||
Name: ss.Name,
|
||||
Group: ss.Group,
|
||||
Key: ss.Key,
|
||||
Uptime: core.NewUptime(),
|
||||
Uptime: endpoint.NewUptime(),
|
||||
}
|
||||
numberOfResults := len(ss.Results)
|
||||
resultsStart, resultsEnd := getStartAndEndIndex(numberOfResults, params.ResultsPage, params.ResultsPageSize)
|
||||
if resultsStart < 0 || resultsEnd < 0 {
|
||||
shallowCopy.Results = []*core.Result{}
|
||||
shallowCopy.Results = []*endpoint.Result{}
|
||||
} else {
|
||||
shallowCopy.Results = ss.Results[resultsStart:resultsEnd]
|
||||
}
|
||||
numberOfEvents := len(ss.Events)
|
||||
eventsStart, eventsEnd := getStartAndEndIndex(numberOfEvents, params.EventsPage, params.EventsPageSize)
|
||||
if eventsStart < 0 || eventsEnd < 0 {
|
||||
shallowCopy.Events = []*core.Event{}
|
||||
shallowCopy.Events = []*endpoint.Event{}
|
||||
} else {
|
||||
shallowCopy.Events = ss.Events[eventsStart:eventsEnd]
|
||||
}
|
||||
@ -49,16 +49,16 @@ func getStartAndEndIndex(numberOfResults int, page, pageSize int) (int, int) {
|
||||
return start, end
|
||||
}
|
||||
|
||||
// AddResult adds a Result to EndpointStatus.Results and makes sure that there are
|
||||
// AddResult adds a Result to Status.Results and makes sure that there are
|
||||
// no more than MaximumNumberOfResults results in the Results slice
|
||||
func AddResult(ss *core.EndpointStatus, result *core.Result) {
|
||||
func AddResult(ss *endpoint.Status, result *endpoint.Result) {
|
||||
if ss == nil {
|
||||
return
|
||||
}
|
||||
if len(ss.Results) > 0 {
|
||||
// Check if there's any change since the last result
|
||||
if ss.Results[len(ss.Results)-1].Success != result.Success {
|
||||
ss.Events = append(ss.Events, core.NewEventFromResult(result))
|
||||
ss.Events = append(ss.Events, endpoint.NewEventFromResult(result))
|
||||
if len(ss.Events) > common.MaximumNumberOfEvents {
|
||||
// Doing ss.Events[1:] would usually be sufficient, but in the case where for some reason, the slice has
|
||||
// more than one extra element, we can get rid of all of them at once and thus returning the slice to a
|
||||
@ -68,7 +68,7 @@ func AddResult(ss *core.EndpointStatus, result *core.Result) {
|
||||
}
|
||||
} else {
|
||||
// This is the first result, so we need to add the first healthy/unhealthy event
|
||||
ss.Events = append(ss.Events, core.NewEventFromResult(result))
|
||||
ss.Events = append(ss.Events, endpoint.NewEventFromResult(result))
|
||||
}
|
||||
ss.Results = append(ss.Results, result)
|
||||
if len(ss.Results) > common.MaximumNumberOfResults {
|
||||
|
@ -3,14 +3,14 @@ package memory
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/TwiN/gatus/v5/core"
|
||||
"github.com/TwiN/gatus/v5/config/endpoint"
|
||||
"github.com/TwiN/gatus/v5/storage/store/common"
|
||||
"github.com/TwiN/gatus/v5/storage/store/common/paging"
|
||||
)
|
||||
|
||||
func BenchmarkShallowCopyEndpointStatus(b *testing.B) {
|
||||
endpoint := &testEndpoint
|
||||
status := core.NewEndpointStatus(endpoint.Group, endpoint.Name)
|
||||
ep := &testEndpoint
|
||||
status := endpoint.NewStatus(ep.Group, ep.Name)
|
||||
for i := 0; i < common.MaximumNumberOfResults; i++ {
|
||||
AddResult(status, &testSuccessfulResult)
|
||||
}
|
||||
|
@ -4,16 +4,16 @@ import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/TwiN/gatus/v5/core"
|
||||
"github.com/TwiN/gatus/v5/config/endpoint"
|
||||
"github.com/TwiN/gatus/v5/storage/store/common"
|
||||
"github.com/TwiN/gatus/v5/storage/store/common/paging"
|
||||
)
|
||||
|
||||
func TestAddResult(t *testing.T) {
|
||||
endpoint := &core.Endpoint{Name: "name", Group: "group"}
|
||||
endpointStatus := core.NewEndpointStatus(endpoint.Group, endpoint.Name)
|
||||
ep := &endpoint.Endpoint{Name: "name", Group: "group"}
|
||||
endpointStatus := endpoint.NewStatus(ep.Group, ep.Name)
|
||||
for i := 0; i < (common.MaximumNumberOfResults+common.MaximumNumberOfEvents)*2; i++ {
|
||||
AddResult(endpointStatus, &core.Result{Success: i%2 == 0, Timestamp: time.Now()})
|
||||
AddResult(endpointStatus, &endpoint.Result{Success: i%2 == 0, Timestamp: time.Now()})
|
||||
}
|
||||
if len(endpointStatus.Results) != common.MaximumNumberOfResults {
|
||||
t.Errorf("expected endpointStatus.Results to not exceed a length of %d", common.MaximumNumberOfResults)
|
||||
@ -22,15 +22,15 @@ func TestAddResult(t *testing.T) {
|
||||
t.Errorf("expected endpointStatus.Events to not exceed a length of %d", common.MaximumNumberOfEvents)
|
||||
}
|
||||
// Try to add nil endpointStatus
|
||||
AddResult(nil, &core.Result{Timestamp: time.Now()})
|
||||
AddResult(nil, &endpoint.Result{Timestamp: time.Now()})
|
||||
}
|
||||
|
||||
func TestShallowCopyEndpointStatus(t *testing.T) {
|
||||
endpoint := &core.Endpoint{Name: "name", Group: "group"}
|
||||
endpointStatus := core.NewEndpointStatus(endpoint.Group, endpoint.Name)
|
||||
ep := &endpoint.Endpoint{Name: "name", Group: "group"}
|
||||
endpointStatus := endpoint.NewStatus(ep.Group, ep.Name)
|
||||
ts := time.Now().Add(-25 * time.Hour)
|
||||
for i := 0; i < 25; i++ {
|
||||
AddResult(endpointStatus, &core.Result{Success: i%2 == 0, Timestamp: ts})
|
||||
AddResult(endpointStatus, &endpoint.Result{Success: i%2 == 0, Timestamp: ts})
|
||||
ts = ts.Add(time.Hour)
|
||||
}
|
||||
if len(ShallowCopyEndpointStatus(endpointStatus, paging.NewEndpointStatusParams().WithResults(-1, -1)).Results) != 0 {
|
||||
|
@ -9,10 +9,9 @@ import (
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/TwiN/gatus/v5/core"
|
||||
"github.com/TwiN/gatus/v5/config/endpoint"
|
||||
"github.com/TwiN/gatus/v5/storage/store/common"
|
||||
"github.com/TwiN/gatus/v5/storage/store/common/paging"
|
||||
"github.com/TwiN/gatus/v5/util"
|
||||
"github.com/TwiN/gocache/v2"
|
||||
_ "github.com/lib/pq"
|
||||
_ "modernc.org/sqlite"
|
||||
@ -100,9 +99,9 @@ func (s *Store) createSchema() error {
|
||||
return s.createPostgresSchema()
|
||||
}
|
||||
|
||||
// GetAllEndpointStatuses returns all monitored core.EndpointStatus
|
||||
// with a subset of core.Result defined by the page and pageSize parameters
|
||||
func (s *Store) GetAllEndpointStatuses(params *paging.EndpointStatusParams) ([]*core.EndpointStatus, error) {
|
||||
// GetAllEndpointStatuses returns all monitored endpoint.Status
|
||||
// with a subset of endpoint.Result defined by the page and pageSize parameters
|
||||
func (s *Store) GetAllEndpointStatuses(params *paging.EndpointStatusParams) ([]*endpoint.Status, error) {
|
||||
tx, err := s.db.Begin()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@ -112,7 +111,7 @@ func (s *Store) GetAllEndpointStatuses(params *paging.EndpointStatusParams) ([]*
|
||||
_ = tx.Rollback()
|
||||
return nil, err
|
||||
}
|
||||
endpointStatuses := make([]*core.EndpointStatus, 0, len(keys))
|
||||
endpointStatuses := make([]*endpoint.Status, 0, len(keys))
|
||||
for _, key := range keys {
|
||||
endpointStatus, err := s.getEndpointStatusByKey(tx, key, params)
|
||||
if err != nil {
|
||||
@ -127,12 +126,12 @@ func (s *Store) GetAllEndpointStatuses(params *paging.EndpointStatusParams) ([]*
|
||||
}
|
||||
|
||||
// GetEndpointStatus returns the endpoint status for a given endpoint name in the given group
|
||||
func (s *Store) GetEndpointStatus(groupName, endpointName string, params *paging.EndpointStatusParams) (*core.EndpointStatus, error) {
|
||||
return s.GetEndpointStatusByKey(util.ConvertGroupAndEndpointNameToKey(groupName, endpointName), params)
|
||||
func (s *Store) GetEndpointStatus(groupName, endpointName string, params *paging.EndpointStatusParams) (*endpoint.Status, error) {
|
||||
return s.GetEndpointStatusByKey(endpoint.ConvertGroupAndEndpointNameToKey(groupName, endpointName), params)
|
||||
}
|
||||
|
||||
// GetEndpointStatusByKey returns the endpoint status for a given key
|
||||
func (s *Store) GetEndpointStatusByKey(key string, params *paging.EndpointStatusParams) (*core.EndpointStatus, error) {
|
||||
func (s *Store) GetEndpointStatusByKey(key string, params *paging.EndpointStatusParams) (*endpoint.Status, error) {
|
||||
tx, err := s.db.Begin()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@ -224,30 +223,30 @@ func (s *Store) GetHourlyAverageResponseTimeByKey(key string, from, to time.Time
|
||||
}
|
||||
|
||||
// Insert adds the observed result for the specified endpoint into the store
|
||||
func (s *Store) Insert(endpoint *core.Endpoint, result *core.Result) error {
|
||||
func (s *Store) Insert(ep *endpoint.Endpoint, result *endpoint.Result) error {
|
||||
tx, err := s.db.Begin()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
endpointID, err := s.getEndpointID(tx, endpoint)
|
||||
endpointID, err := s.getEndpointID(tx, ep)
|
||||
if err != nil {
|
||||
if errors.Is(err, common.ErrEndpointNotFound) {
|
||||
// Endpoint doesn't exist in the database, insert it
|
||||
if endpointID, err = s.insertEndpoint(tx, endpoint); err != nil {
|
||||
if endpointID, err = s.insertEndpoint(tx, ep); err != nil {
|
||||
_ = tx.Rollback()
|
||||
log.Printf("[sql.Insert] Failed to create endpoint with group=%s; endpoint=%s: %s", endpoint.Group, endpoint.Name, err.Error())
|
||||
log.Printf("[sql.Insert] Failed to create endpoint with group=%s; endpoint=%s: %s", ep.Group, ep.Name, err.Error())
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
_ = tx.Rollback()
|
||||
log.Printf("[sql.Insert] Failed to retrieve id of endpoint with group=%s; endpoint=%s: %s", endpoint.Group, endpoint.Name, err.Error())
|
||||
log.Printf("[sql.Insert] Failed to retrieve id of endpoint with group=%s; endpoint=%s: %s", ep.Group, ep.Name, err.Error())
|
||||
return err
|
||||
}
|
||||
}
|
||||
// First, we need to check if we need to insert a new event.
|
||||
//
|
||||
// A new event must be added if either of the following cases happen:
|
||||
// 1. There is only 1 event. The total number of events for a endpoint can only be 1 if the only existing event is
|
||||
// 1. There is only 1 event. The total number of events for an endpoint can only be 1 if the only existing event is
|
||||
// of type EventStart, in which case we will have to create a new event of type EventHealthy or EventUnhealthy
|
||||
// based on result.Success.
|
||||
// 2. The lastResult.Success != result.Success. This implies that the endpoint went from healthy to unhealthy or
|
||||
@ -256,38 +255,38 @@ func (s *Store) Insert(endpoint *core.Endpoint, result *core.Result) error {
|
||||
numberOfEvents, err := s.getNumberOfEventsByEndpointID(tx, endpointID)
|
||||
if err != nil {
|
||||
// Silently fail
|
||||
log.Printf("[sql.Insert] Failed to retrieve total number of events for group=%s; endpoint=%s: %s", endpoint.Group, endpoint.Name, err.Error())
|
||||
log.Printf("[sql.Insert] Failed to retrieve total number of events for group=%s; endpoint=%s: %s", ep.Group, ep.Name, err.Error())
|
||||
}
|
||||
if numberOfEvents == 0 {
|
||||
// There's no events yet, which means we need to add the EventStart and the first healthy/unhealthy event
|
||||
err = s.insertEndpointEvent(tx, endpointID, &core.Event{
|
||||
Type: core.EventStart,
|
||||
err = s.insertEndpointEvent(tx, endpointID, &endpoint.Event{
|
||||
Type: endpoint.EventStart,
|
||||
Timestamp: result.Timestamp.Add(-50 * time.Millisecond),
|
||||
})
|
||||
if err != nil {
|
||||
// Silently fail
|
||||
log.Printf("[sql.Insert] Failed to insert event=%s for group=%s; endpoint=%s: %s", core.EventStart, endpoint.Group, endpoint.Name, err.Error())
|
||||
log.Printf("[sql.Insert] Failed to insert event=%s for group=%s; endpoint=%s: %s", endpoint.EventStart, ep.Group, ep.Name, err.Error())
|
||||
}
|
||||
event := core.NewEventFromResult(result)
|
||||
event := endpoint.NewEventFromResult(result)
|
||||
if err = s.insertEndpointEvent(tx, endpointID, event); err != nil {
|
||||
// Silently fail
|
||||
log.Printf("[sql.Insert] Failed to insert event=%s for group=%s; endpoint=%s: %s", event.Type, endpoint.Group, endpoint.Name, err.Error())
|
||||
log.Printf("[sql.Insert] Failed to insert event=%s for group=%s; endpoint=%s: %s", event.Type, ep.Group, ep.Name, err.Error())
|
||||
}
|
||||
} else {
|
||||
// Get the success value of the previous result
|
||||
var lastResultSuccess bool
|
||||
if lastResultSuccess, err = s.getLastEndpointResultSuccessValue(tx, endpointID); err != nil {
|
||||
log.Printf("[sql.Insert] Failed to retrieve outcome of previous result for group=%s; endpoint=%s: %s", endpoint.Group, endpoint.Name, err.Error())
|
||||
log.Printf("[sql.Insert] Failed to retrieve outcome of previous result for group=%s; endpoint=%s: %s", ep.Group, ep.Name, err.Error())
|
||||
} else {
|
||||
// If we managed to retrieve the outcome of the previous result, we'll compare it with the new result.
|
||||
// If the final outcome (success or failure) of the previous and the new result aren't the same, it means
|
||||
// that the endpoint either went from Healthy to Unhealthy or Unhealthy -> Healthy, therefore, we'll add
|
||||
// an event to mark the change in state
|
||||
if lastResultSuccess != result.Success {
|
||||
event := core.NewEventFromResult(result)
|
||||
event := endpoint.NewEventFromResult(result)
|
||||
if err = s.insertEndpointEvent(tx, endpointID, event); err != nil {
|
||||
// Silently fail
|
||||
log.Printf("[sql.Insert] Failed to insert event=%s for group=%s; endpoint=%s: %s", event.Type, endpoint.Group, endpoint.Name, err.Error())
|
||||
log.Printf("[sql.Insert] Failed to insert event=%s for group=%s; endpoint=%s: %s", event.Type, ep.Group, ep.Name, err.Error())
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -296,45 +295,45 @@ func (s *Store) Insert(endpoint *core.Endpoint, result *core.Result) error {
|
||||
// (since we're only deleting MaximumNumberOfEvents at a time instead of 1)
|
||||
if numberOfEvents > eventsCleanUpThreshold {
|
||||
if err = s.deleteOldEndpointEvents(tx, endpointID); err != nil {
|
||||
log.Printf("[sql.Insert] Failed to delete old events for group=%s; endpoint=%s: %s", endpoint.Group, endpoint.Name, err.Error())
|
||||
log.Printf("[sql.Insert] Failed to delete old events for group=%s; endpoint=%s: %s", ep.Group, ep.Name, err.Error())
|
||||
}
|
||||
}
|
||||
}
|
||||
// Second, we need to insert the result.
|
||||
if err = s.insertEndpointResult(tx, endpointID, result); err != nil {
|
||||
log.Printf("[sql.Insert] Failed to insert result for group=%s; endpoint=%s: %s", endpoint.Group, endpoint.Name, err.Error())
|
||||
log.Printf("[sql.Insert] Failed to insert result for group=%s; endpoint=%s: %s", ep.Group, ep.Name, err.Error())
|
||||
_ = tx.Rollback() // If we can't insert the result, we'll rollback now since there's no point continuing
|
||||
return err
|
||||
}
|
||||
// Clean up old results
|
||||
numberOfResults, err := s.getNumberOfResultsByEndpointID(tx, endpointID)
|
||||
if err != nil {
|
||||
log.Printf("[sql.Insert] Failed to retrieve total number of results for group=%s; endpoint=%s: %s", endpoint.Group, endpoint.Name, err.Error())
|
||||
log.Printf("[sql.Insert] Failed to retrieve total number of results for group=%s; endpoint=%s: %s", ep.Group, ep.Name, err.Error())
|
||||
} else {
|
||||
if numberOfResults > resultsCleanUpThreshold {
|
||||
if err = s.deleteOldEndpointResults(tx, endpointID); err != nil {
|
||||
log.Printf("[sql.Insert] Failed to delete old results for group=%s; endpoint=%s: %s", endpoint.Group, endpoint.Name, err.Error())
|
||||
log.Printf("[sql.Insert] Failed to delete old results for group=%s; endpoint=%s: %s", ep.Group, ep.Name, err.Error())
|
||||
}
|
||||
}
|
||||
}
|
||||
// Finally, we need to insert the uptime data.
|
||||
// Because the uptime data significantly outlives the results, we can't rely on the results for determining the uptime
|
||||
if err = s.updateEndpointUptime(tx, endpointID, result); err != nil {
|
||||
log.Printf("[sql.Insert] Failed to update uptime for group=%s; endpoint=%s: %s", endpoint.Group, endpoint.Name, err.Error())
|
||||
log.Printf("[sql.Insert] Failed to update uptime for group=%s; endpoint=%s: %s", ep.Group, ep.Name, err.Error())
|
||||
}
|
||||
// Clean up old uptime entries
|
||||
ageOfOldestUptimeEntry, err := s.getAgeOfOldestEndpointUptimeEntry(tx, endpointID)
|
||||
if err != nil {
|
||||
log.Printf("[sql.Insert] Failed to retrieve oldest endpoint uptime entry for group=%s; endpoint=%s: %s", endpoint.Group, endpoint.Name, err.Error())
|
||||
log.Printf("[sql.Insert] Failed to retrieve oldest endpoint uptime entry for group=%s; endpoint=%s: %s", ep.Group, ep.Name, err.Error())
|
||||
} else {
|
||||
if ageOfOldestUptimeEntry > uptimeCleanUpThreshold {
|
||||
if err = s.deleteOldUptimeEntries(tx, endpointID, time.Now().Add(-(uptimeRetention + time.Hour))); err != nil {
|
||||
log.Printf("[sql.Insert] Failed to delete old uptime entries for group=%s; endpoint=%s: %s", endpoint.Group, endpoint.Name, err.Error())
|
||||
log.Printf("[sql.Insert] Failed to delete old uptime entries for group=%s; endpoint=%s: %s", ep.Group, ep.Name, err.Error())
|
||||
}
|
||||
}
|
||||
}
|
||||
if s.writeThroughCache != nil {
|
||||
cacheKeysToRefresh := s.writeThroughCache.GetKeysByPattern(endpoint.Key()+"*", 0)
|
||||
cacheKeysToRefresh := s.writeThroughCache.GetKeysByPattern(ep.Key()+"*", 0)
|
||||
for _, cacheKey := range cacheKeysToRefresh {
|
||||
s.writeThroughCache.Delete(cacheKey)
|
||||
endpointKey, params, err := extractKeyAndParamsFromCacheKey(cacheKey)
|
||||
@ -405,14 +404,14 @@ func (s *Store) Close() {
|
||||
}
|
||||
|
||||
// insertEndpoint inserts an endpoint in the store and returns the generated id of said endpoint
|
||||
func (s *Store) insertEndpoint(tx *sql.Tx, endpoint *core.Endpoint) (int64, error) {
|
||||
//log.Printf("[sql.insertEndpoint] Inserting endpoint with group=%s and name=%s", endpoint.Group, endpoint.Name)
|
||||
func (s *Store) insertEndpoint(tx *sql.Tx, ep *endpoint.Endpoint) (int64, error) {
|
||||
//log.Printf("[sql.insertEndpoint] Inserting endpoint with group=%s and name=%s", ep.Group, ep.Name)
|
||||
var id int64
|
||||
err := tx.QueryRow(
|
||||
"INSERT INTO endpoints (endpoint_key, endpoint_name, endpoint_group) VALUES ($1, $2, $3) RETURNING endpoint_id",
|
||||
endpoint.Key(),
|
||||
endpoint.Name,
|
||||
endpoint.Group,
|
||||
ep.Key(),
|
||||
ep.Name,
|
||||
ep.Group,
|
||||
).Scan(&id)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
@ -421,7 +420,7 @@ func (s *Store) insertEndpoint(tx *sql.Tx, endpoint *core.Endpoint) (int64, erro
|
||||
}
|
||||
|
||||
// insertEndpointEvent inserts en event in the store
|
||||
func (s *Store) insertEndpointEvent(tx *sql.Tx, endpointID int64, event *core.Event) error {
|
||||
func (s *Store) insertEndpointEvent(tx *sql.Tx, endpointID int64, event *endpoint.Event) error {
|
||||
_, err := tx.Exec(
|
||||
"INSERT INTO endpoint_events (endpoint_id, event_type, event_timestamp) VALUES ($1, $2, $3)",
|
||||
endpointID,
|
||||
@ -435,7 +434,7 @@ func (s *Store) insertEndpointEvent(tx *sql.Tx, endpointID int64, event *core.Ev
|
||||
}
|
||||
|
||||
// insertEndpointResult inserts a result in the store
|
||||
func (s *Store) insertEndpointResult(tx *sql.Tx, endpointID int64, result *core.Result) error {
|
||||
func (s *Store) insertEndpointResult(tx *sql.Tx, endpointID int64, result *endpoint.Result) error {
|
||||
var endpointResultID int64
|
||||
err := tx.QueryRow(
|
||||
`
|
||||
@ -462,7 +461,7 @@ func (s *Store) insertEndpointResult(tx *sql.Tx, endpointID int64, result *core.
|
||||
return s.insertConditionResults(tx, endpointResultID, result.ConditionResults)
|
||||
}
|
||||
|
||||
func (s *Store) insertConditionResults(tx *sql.Tx, endpointResultID int64, conditionResults []*core.ConditionResult) error {
|
||||
func (s *Store) insertConditionResults(tx *sql.Tx, endpointResultID int64, conditionResults []*endpoint.ConditionResult) error {
|
||||
var err error
|
||||
for _, cr := range conditionResults {
|
||||
_, err = tx.Exec("INSERT INTO endpoint_result_conditions (endpoint_result_id, condition, success) VALUES ($1, $2, $3)",
|
||||
@ -477,7 +476,7 @@ func (s *Store) insertConditionResults(tx *sql.Tx, endpointResultID int64, condi
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *Store) updateEndpointUptime(tx *sql.Tx, endpointID int64, result *core.Result) error {
|
||||
func (s *Store) updateEndpointUptime(tx *sql.Tx, endpointID int64, result *endpoint.Result) error {
|
||||
unixTimestampFlooredAtHour := result.Timestamp.Truncate(time.Hour).Unix()
|
||||
var successfulExecutions int
|
||||
if result.Success {
|
||||
@ -514,12 +513,12 @@ func (s *Store) getAllEndpointKeys(tx *sql.Tx) (keys []string, err error) {
|
||||
return
|
||||
}
|
||||
|
||||
func (s *Store) getEndpointStatusByKey(tx *sql.Tx, key string, parameters *paging.EndpointStatusParams) (*core.EndpointStatus, error) {
|
||||
func (s *Store) getEndpointStatusByKey(tx *sql.Tx, key string, parameters *paging.EndpointStatusParams) (*endpoint.Status, error) {
|
||||
var cacheKey string
|
||||
if s.writeThroughCache != nil {
|
||||
cacheKey = generateCacheKey(key, parameters)
|
||||
if cachedEndpointStatus, exists := s.writeThroughCache.Get(cacheKey); exists {
|
||||
if castedCachedEndpointStatus, ok := cachedEndpointStatus.(*core.EndpointStatus); ok {
|
||||
if castedCachedEndpointStatus, ok := cachedEndpointStatus.(*endpoint.Status); ok {
|
||||
return castedCachedEndpointStatus, nil
|
||||
}
|
||||
}
|
||||
@ -528,7 +527,7 @@ func (s *Store) getEndpointStatusByKey(tx *sql.Tx, key string, parameters *pagin
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
endpointStatus := core.NewEndpointStatus(group, endpointName)
|
||||
endpointStatus := endpoint.NewStatus(group, endpointName)
|
||||
if parameters.EventsPageSize > 0 {
|
||||
if endpointStatus.Events, err = s.getEndpointEventsByEndpointID(tx, endpointID, parameters.EventsPage, parameters.EventsPageSize); err != nil {
|
||||
log.Printf("[sql.getEndpointStatusByKey] Failed to retrieve events for key=%s: %s", key, err.Error())
|
||||
@ -564,7 +563,7 @@ func (s *Store) getEndpointIDGroupAndNameByKey(tx *sql.Tx, key string) (id int64
|
||||
return
|
||||
}
|
||||
|
||||
func (s *Store) getEndpointEventsByEndpointID(tx *sql.Tx, endpointID int64, page, pageSize int) (events []*core.Event, err error) {
|
||||
func (s *Store) getEndpointEventsByEndpointID(tx *sql.Tx, endpointID int64, page, pageSize int) (events []*endpoint.Event, err error) {
|
||||
rows, err := tx.Query(
|
||||
`
|
||||
SELECT event_type, event_timestamp
|
||||
@ -581,14 +580,14 @@ func (s *Store) getEndpointEventsByEndpointID(tx *sql.Tx, endpointID int64, page
|
||||
return nil, err
|
||||
}
|
||||
for rows.Next() {
|
||||
event := &core.Event{}
|
||||
event := &endpoint.Event{}
|
||||
_ = rows.Scan(&event.Type, &event.Timestamp)
|
||||
events = append(events, event)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (s *Store) getEndpointResultsByEndpointID(tx *sql.Tx, endpointID int64, page, pageSize int) (results []*core.Result, err error) {
|
||||
func (s *Store) getEndpointResultsByEndpointID(tx *sql.Tx, endpointID int64, page, pageSize int) (results []*endpoint.Result, err error) {
|
||||
rows, err := tx.Query(
|
||||
`
|
||||
SELECT endpoint_result_id, success, errors, connected, status, dns_rcode, certificate_expiration, domain_expiration, hostname, ip, duration, timestamp
|
||||
@ -604,9 +603,9 @@ func (s *Store) getEndpointResultsByEndpointID(tx *sql.Tx, endpointID int64, pag
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
idResultMap := make(map[int64]*core.Result)
|
||||
idResultMap := make(map[int64]*endpoint.Result)
|
||||
for rows.Next() {
|
||||
result := &core.Result{}
|
||||
result := &endpoint.Result{}
|
||||
var id int64
|
||||
var joinedErrors string
|
||||
err = rows.Scan(&id, &result.Success, &joinedErrors, &result.Connected, &result.HTTPStatus, &result.DNSRCode, &result.CertificateExpiration, &result.DomainExpiration, &result.Hostname, &result.IP, &result.Duration, &result.Timestamp)
|
||||
@ -618,7 +617,7 @@ func (s *Store) getEndpointResultsByEndpointID(tx *sql.Tx, endpointID int64, pag
|
||||
result.Errors = strings.Split(joinedErrors, arraySeparator)
|
||||
}
|
||||
// This is faster than using a subselect
|
||||
results = append([]*core.Result{result}, results...)
|
||||
results = append([]*endpoint.Result{result}, results...)
|
||||
idResultMap[id] = result
|
||||
}
|
||||
if len(idResultMap) == 0 {
|
||||
@ -643,7 +642,7 @@ func (s *Store) getEndpointResultsByEndpointID(tx *sql.Tx, endpointID int64, pag
|
||||
}
|
||||
defer rows.Close() // explicitly defer the close in case an error happens during the scan
|
||||
for rows.Next() {
|
||||
conditionResult := &core.ConditionResult{}
|
||||
conditionResult := &endpoint.ConditionResult{}
|
||||
var endpointResultID int64
|
||||
if err = rows.Scan(&endpointResultID, &conditionResult.Condition, &conditionResult.Success); err != nil {
|
||||
return
|
||||
@ -734,9 +733,9 @@ func (s *Store) getEndpointHourlyAverageResponseTimes(tx *sql.Tx, endpointID int
|
||||
return hourlyAverageResponseTimes, nil
|
||||
}
|
||||
|
||||
func (s *Store) getEndpointID(tx *sql.Tx, endpoint *core.Endpoint) (int64, error) {
|
||||
func (s *Store) getEndpointID(tx *sql.Tx, ep *endpoint.Endpoint) (int64, error) {
|
||||
var id int64
|
||||
err := tx.QueryRow("SELECT endpoint_id FROM endpoints WHERE endpoint_key = $1", endpoint.Key()).Scan(&id)
|
||||
err := tx.QueryRow("SELECT endpoint_id FROM endpoints WHERE endpoint_key = $1", ep.Key()).Scan(&id)
|
||||
if err != nil {
|
||||
if errors.Is(err, sql.ErrNoRows) {
|
||||
return 0, common.ErrEndpointNotFound
|
||||
|
@ -4,31 +4,31 @@ import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/TwiN/gatus/v5/core"
|
||||
"github.com/TwiN/gatus/v5/config/endpoint"
|
||||
"github.com/TwiN/gatus/v5/storage/store/common"
|
||||
"github.com/TwiN/gatus/v5/storage/store/common/paging"
|
||||
)
|
||||
|
||||
var (
|
||||
firstCondition = core.Condition("[STATUS] == 200")
|
||||
secondCondition = core.Condition("[RESPONSE_TIME] < 500")
|
||||
thirdCondition = core.Condition("[CERTIFICATE_EXPIRATION] < 72h")
|
||||
firstCondition = endpoint.Condition("[STATUS] == 200")
|
||||
secondCondition = endpoint.Condition("[RESPONSE_TIME] < 500")
|
||||
thirdCondition = endpoint.Condition("[CERTIFICATE_EXPIRATION] < 72h")
|
||||
|
||||
now = time.Now()
|
||||
|
||||
testEndpoint = core.Endpoint{
|
||||
testEndpoint = endpoint.Endpoint{
|
||||
Name: "name",
|
||||
Group: "group",
|
||||
URL: "https://example.org/what/ever",
|
||||
Method: "GET",
|
||||
Body: "body",
|
||||
Interval: 30 * time.Second,
|
||||
Conditions: []core.Condition{firstCondition, secondCondition, thirdCondition},
|
||||
Conditions: []endpoint.Condition{firstCondition, secondCondition, thirdCondition},
|
||||
Alerts: nil,
|
||||
NumberOfFailuresInARow: 0,
|
||||
NumberOfSuccessesInARow: 0,
|
||||
}
|
||||
testSuccessfulResult = core.Result{
|
||||
testSuccessfulResult = endpoint.Result{
|
||||
Hostname: "example.org",
|
||||
IP: "127.0.0.1",
|
||||
HTTPStatus: 200,
|
||||
@ -38,7 +38,7 @@ var (
|
||||
Timestamp: now,
|
||||
Duration: 150 * time.Millisecond,
|
||||
CertificateExpiration: 10 * time.Hour,
|
||||
ConditionResults: []*core.ConditionResult{
|
||||
ConditionResults: []*endpoint.ConditionResult{
|
||||
{
|
||||
Condition: "[STATUS] == 200",
|
||||
Success: true,
|
||||
@ -53,7 +53,7 @@ var (
|
||||
},
|
||||
},
|
||||
}
|
||||
testUnsuccessfulResult = core.Result{
|
||||
testUnsuccessfulResult = endpoint.Result{
|
||||
Hostname: "example.org",
|
||||
IP: "127.0.0.1",
|
||||
HTTPStatus: 200,
|
||||
@ -63,7 +63,7 @@ var (
|
||||
Timestamp: now,
|
||||
Duration: 750 * time.Millisecond,
|
||||
CertificateExpiration: 10 * time.Hour,
|
||||
ConditionResults: []*core.ConditionResult{
|
||||
ConditionResults: []*endpoint.ConditionResult{
|
||||
{
|
||||
Condition: "[STATUS] == 200",
|
||||
Success: true,
|
||||
@ -100,7 +100,7 @@ func TestStore_InsertCleansUpOldUptimeEntriesProperly(t *testing.T) {
|
||||
now := time.Now().Truncate(time.Hour)
|
||||
now = time.Date(now.Year(), now.Month(), now.Day(), now.Hour(), 0, 0, 0, now.Location())
|
||||
|
||||
store.Insert(&testEndpoint, &core.Result{Timestamp: now.Add(-5 * time.Hour), Success: true})
|
||||
store.Insert(&testEndpoint, &endpoint.Result{Timestamp: now.Add(-5 * time.Hour), Success: true})
|
||||
|
||||
tx, _ := store.db.Begin()
|
||||
oldest, _ := store.getAgeOfOldestEndpointUptimeEntry(tx, 1)
|
||||
@ -110,7 +110,7 @@ func TestStore_InsertCleansUpOldUptimeEntriesProperly(t *testing.T) {
|
||||
}
|
||||
|
||||
// The oldest cache entry should remain at ~5 hours old, because this entry is more recent
|
||||
store.Insert(&testEndpoint, &core.Result{Timestamp: now.Add(-3 * time.Hour), Success: true})
|
||||
store.Insert(&testEndpoint, &endpoint.Result{Timestamp: now.Add(-3 * time.Hour), Success: true})
|
||||
|
||||
tx, _ = store.db.Begin()
|
||||
oldest, _ = store.getAgeOfOldestEndpointUptimeEntry(tx, 1)
|
||||
@ -120,7 +120,7 @@ func TestStore_InsertCleansUpOldUptimeEntriesProperly(t *testing.T) {
|
||||
}
|
||||
|
||||
// The oldest cache entry should now become at ~8 hours old, because this entry is older
|
||||
store.Insert(&testEndpoint, &core.Result{Timestamp: now.Add(-8 * time.Hour), Success: true})
|
||||
store.Insert(&testEndpoint, &endpoint.Result{Timestamp: now.Add(-8 * time.Hour), Success: true})
|
||||
|
||||
tx, _ = store.db.Begin()
|
||||
oldest, _ = store.getAgeOfOldestEndpointUptimeEntry(tx, 1)
|
||||
@ -130,7 +130,7 @@ func TestStore_InsertCleansUpOldUptimeEntriesProperly(t *testing.T) {
|
||||
}
|
||||
|
||||
// Since this is one hour before reaching the clean up threshold, the oldest entry should now be this one
|
||||
store.Insert(&testEndpoint, &core.Result{Timestamp: now.Add(-(uptimeCleanUpThreshold - time.Hour)), Success: true})
|
||||
store.Insert(&testEndpoint, &endpoint.Result{Timestamp: now.Add(-(uptimeCleanUpThreshold - time.Hour)), Success: true})
|
||||
|
||||
tx, _ = store.db.Begin()
|
||||
oldest, _ = store.getAgeOfOldestEndpointUptimeEntry(tx, 1)
|
||||
@ -141,7 +141,7 @@ func TestStore_InsertCleansUpOldUptimeEntriesProperly(t *testing.T) {
|
||||
|
||||
// Since this entry is after the uptimeCleanUpThreshold, both this entry as well as the previous
|
||||
// one should be deleted since they both surpass uptimeRetention
|
||||
store.Insert(&testEndpoint, &core.Result{Timestamp: now.Add(-(uptimeCleanUpThreshold + time.Hour)), Success: true})
|
||||
store.Insert(&testEndpoint, &endpoint.Result{Timestamp: now.Add(-(uptimeCleanUpThreshold + time.Hour)), Success: true})
|
||||
|
||||
tx, _ = store.db.Begin()
|
||||
oldest, _ = store.getAgeOfOldestEndpointUptimeEntry(tx, 1)
|
||||
@ -313,7 +313,7 @@ func TestStore_InvalidTransaction(t *testing.T) {
|
||||
if _, err := store.insertEndpoint(tx, &testEndpoint); err == nil {
|
||||
t.Error("should've returned an error, because the transaction was already committed")
|
||||
}
|
||||
if err := store.insertEndpointEvent(tx, 1, core.NewEventFromResult(&testSuccessfulResult)); err == nil {
|
||||
if err := store.insertEndpointEvent(tx, 1, endpoint.NewEventFromResult(&testSuccessfulResult)); err == nil {
|
||||
t.Error("should've returned an error, because the transaction was already committed")
|
||||
}
|
||||
if err := store.insertEndpointResult(tx, 1, &testSuccessfulResult); err == nil {
|
||||
|
@ -5,7 +5,7 @@ import (
|
||||
"log"
|
||||
"time"
|
||||
|
||||
"github.com/TwiN/gatus/v5/core"
|
||||
"github.com/TwiN/gatus/v5/config/endpoint"
|
||||
"github.com/TwiN/gatus/v5/storage"
|
||||
"github.com/TwiN/gatus/v5/storage/store/common/paging"
|
||||
"github.com/TwiN/gatus/v5/storage/store/memory"
|
||||
@ -14,15 +14,15 @@ import (
|
||||
|
||||
// Store is the interface that each store should implement
|
||||
type Store interface {
|
||||
// GetAllEndpointStatuses returns the JSON encoding of all monitored core.EndpointStatus
|
||||
// with a subset of core.Result defined by the page and pageSize parameters
|
||||
GetAllEndpointStatuses(params *paging.EndpointStatusParams) ([]*core.EndpointStatus, error)
|
||||
// GetAllEndpointStatuses returns the JSON encoding of all monitored endpoint.Status
|
||||
// with a subset of endpoint.Result defined by the page and pageSize parameters
|
||||
GetAllEndpointStatuses(params *paging.EndpointStatusParams) ([]*endpoint.Status, error)
|
||||
|
||||
// GetEndpointStatus returns the endpoint status for a given endpoint name in the given group
|
||||
GetEndpointStatus(groupName, endpointName string, params *paging.EndpointStatusParams) (*core.EndpointStatus, error)
|
||||
GetEndpointStatus(groupName, endpointName string, params *paging.EndpointStatusParams) (*endpoint.Status, error)
|
||||
|
||||
// GetEndpointStatusByKey returns the endpoint status for a given key
|
||||
GetEndpointStatusByKey(key string, params *paging.EndpointStatusParams) (*core.EndpointStatus, error)
|
||||
GetEndpointStatusByKey(key string, params *paging.EndpointStatusParams) (*endpoint.Status, error)
|
||||
|
||||
// GetUptimeByKey returns the uptime percentage during a time range
|
||||
GetUptimeByKey(key string, from, to time.Time) (float64, error)
|
||||
@ -34,9 +34,9 @@ type Store interface {
|
||||
GetHourlyAverageResponseTimeByKey(key string, from, to time.Time) (map[int64]int, error)
|
||||
|
||||
// Insert adds the observed result for the specified endpoint into the store
|
||||
Insert(endpoint *core.Endpoint, result *core.Result) error
|
||||
Insert(ep *endpoint.Endpoint, result *endpoint.Result) error
|
||||
|
||||
// DeleteAllEndpointStatusesNotInKeys removes all EndpointStatus that are not within the keys provided
|
||||
// DeleteAllEndpointStatusesNotInKeys removes all Status that are not within the keys provided
|
||||
//
|
||||
// Used to delete endpoints that have been persisted but are no longer part of the configured endpoints
|
||||
DeleteAllEndpointStatusesNotInKeys(keys []string) int
|
||||
|
@ -5,7 +5,7 @@ import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/TwiN/gatus/v5/core"
|
||||
"github.com/TwiN/gatus/v5/config/endpoint"
|
||||
"github.com/TwiN/gatus/v5/storage/store/common/paging"
|
||||
"github.com/TwiN/gatus/v5/storage/store/memory"
|
||||
"github.com/TwiN/gatus/v5/storage/store/sql"
|
||||
@ -53,11 +53,11 @@ func BenchmarkStore_GetAllEndpointStatuses(b *testing.B) {
|
||||
for _, numberOfEndpointsToCreate := range numberOfEndpoints {
|
||||
// Create endpoints and insert results
|
||||
for i := 0; i < numberOfEndpointsToCreate; i++ {
|
||||
endpoint := testEndpoint
|
||||
endpoint.Name = "endpoint" + strconv.Itoa(i)
|
||||
ep := testEndpoint
|
||||
ep.Name = "endpoint" + strconv.Itoa(i)
|
||||
// Insert 20 results for each endpoint
|
||||
for j := 0; j < 20; j++ {
|
||||
scenario.Store.Insert(&endpoint, &testSuccessfulResult)
|
||||
scenario.Store.Insert(&ep, &testSuccessfulResult)
|
||||
}
|
||||
}
|
||||
// Run the scenarios
|
||||
@ -123,7 +123,7 @@ func BenchmarkStore_Insert(b *testing.B) {
|
||||
b.RunParallel(func(pb *testing.PB) {
|
||||
n := 0
|
||||
for pb.Next() {
|
||||
var result core.Result
|
||||
var result endpoint.Result
|
||||
if n%10 == 0 {
|
||||
result = testUnsuccessfulResult
|
||||
} else {
|
||||
@ -136,7 +136,7 @@ func BenchmarkStore_Insert(b *testing.B) {
|
||||
})
|
||||
} else {
|
||||
for n := 0; n < b.N; n++ {
|
||||
var result core.Result
|
||||
var result endpoint.Result
|
||||
if n%10 == 0 {
|
||||
result = testUnsuccessfulResult
|
||||
} else {
|
||||
|
@ -1,11 +1,12 @@
|
||||
package store
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"path/filepath"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/TwiN/gatus/v5/core"
|
||||
"github.com/TwiN/gatus/v5/config/endpoint"
|
||||
"github.com/TwiN/gatus/v5/storage"
|
||||
"github.com/TwiN/gatus/v5/storage/store/common"
|
||||
"github.com/TwiN/gatus/v5/storage/store/common/paging"
|
||||
@ -14,25 +15,25 @@ import (
|
||||
)
|
||||
|
||||
var (
|
||||
firstCondition = core.Condition("[STATUS] == 200")
|
||||
secondCondition = core.Condition("[RESPONSE_TIME] < 500")
|
||||
thirdCondition = core.Condition("[CERTIFICATE_EXPIRATION] < 72h")
|
||||
firstCondition = endpoint.Condition("[STATUS] == 200")
|
||||
secondCondition = endpoint.Condition("[RESPONSE_TIME] < 500")
|
||||
thirdCondition = endpoint.Condition("[CERTIFICATE_EXPIRATION] < 72h")
|
||||
|
||||
now = time.Now().Truncate(time.Hour)
|
||||
|
||||
testEndpoint = core.Endpoint{
|
||||
testEndpoint = endpoint.Endpoint{
|
||||
Name: "name",
|
||||
Group: "group",
|
||||
URL: "https://example.org/what/ever",
|
||||
Method: "GET",
|
||||
Body: "body",
|
||||
Interval: 30 * time.Second,
|
||||
Conditions: []core.Condition{firstCondition, secondCondition, thirdCondition},
|
||||
Conditions: []endpoint.Condition{firstCondition, secondCondition, thirdCondition},
|
||||
Alerts: nil,
|
||||
NumberOfFailuresInARow: 0,
|
||||
NumberOfSuccessesInARow: 0,
|
||||
}
|
||||
testSuccessfulResult = core.Result{
|
||||
testSuccessfulResult = endpoint.Result{
|
||||
Timestamp: now,
|
||||
Success: true,
|
||||
Hostname: "example.org",
|
||||
@ -42,7 +43,7 @@ var (
|
||||
Connected: true,
|
||||
Duration: 150 * time.Millisecond,
|
||||
CertificateExpiration: 10 * time.Hour,
|
||||
ConditionResults: []*core.ConditionResult{
|
||||
ConditionResults: []*endpoint.ConditionResult{
|
||||
{
|
||||
Condition: "[STATUS] == 200",
|
||||
Success: true,
|
||||
@ -57,7 +58,7 @@ var (
|
||||
},
|
||||
},
|
||||
}
|
||||
testUnsuccessfulResult = core.Result{
|
||||
testUnsuccessfulResult = endpoint.Result{
|
||||
Timestamp: now,
|
||||
Success: false,
|
||||
Hostname: "example.org",
|
||||
@ -67,7 +68,7 @@ var (
|
||||
Connected: true,
|
||||
Duration: 750 * time.Millisecond,
|
||||
CertificateExpiration: 10 * time.Hour,
|
||||
ConditionResults: []*core.ConditionResult{
|
||||
ConditionResults: []*endpoint.ConditionResult{
|
||||
{
|
||||
Condition: "[STATUS] == 200",
|
||||
Success: true,
|
||||
@ -176,21 +177,21 @@ func TestStore_GetEndpointStatusForMissingStatusReturnsNil(t *testing.T) {
|
||||
t.Run(scenario.Name, func(t *testing.T) {
|
||||
scenario.Store.Insert(&testEndpoint, &testSuccessfulResult)
|
||||
endpointStatus, err := scenario.Store.GetEndpointStatus("nonexistantgroup", "nonexistantname", paging.NewEndpointStatusParams().WithEvents(1, common.MaximumNumberOfEvents).WithResults(1, common.MaximumNumberOfResults))
|
||||
if err != common.ErrEndpointNotFound {
|
||||
if !errors.Is(err, common.ErrEndpointNotFound) {
|
||||
t.Error("should've returned ErrEndpointNotFound, got", err)
|
||||
}
|
||||
if endpointStatus != nil {
|
||||
t.Errorf("Returned endpoint status for group '%s' and name '%s' not nil after inserting the endpoint into the store", testEndpoint.Group, testEndpoint.Name)
|
||||
}
|
||||
endpointStatus, err = scenario.Store.GetEndpointStatus(testEndpoint.Group, "nonexistantname", paging.NewEndpointStatusParams().WithEvents(1, common.MaximumNumberOfEvents).WithResults(1, common.MaximumNumberOfResults))
|
||||
if err != common.ErrEndpointNotFound {
|
||||
if !errors.Is(err, common.ErrEndpointNotFound) {
|
||||
t.Error("should've returned ErrEndpointNotFound, got", err)
|
||||
}
|
||||
if endpointStatus != nil {
|
||||
t.Errorf("Returned endpoint status for group '%s' and name '%s' not nil after inserting the endpoint into the store", testEndpoint.Group, "nonexistantname")
|
||||
}
|
||||
endpointStatus, err = scenario.Store.GetEndpointStatus("nonexistantgroup", testEndpoint.Name, paging.NewEndpointStatusParams().WithEvents(1, common.MaximumNumberOfEvents).WithResults(1, common.MaximumNumberOfResults))
|
||||
if err != common.ErrEndpointNotFound {
|
||||
if !errors.Is(err, common.ErrEndpointNotFound) {
|
||||
t.Error("should've returned ErrEndpointNotFound, got", err)
|
||||
}
|
||||
if endpointStatus != nil {
|
||||
@ -482,7 +483,7 @@ func TestStore_Insert(t *testing.T) {
|
||||
if len(ss.Results) != 2 {
|
||||
t.Fatalf("Endpoint '%s' should've had 2 results, got %d", ss.Name, len(ss.Results))
|
||||
}
|
||||
for i, expectedResult := range []core.Result{firstResult, secondResult} {
|
||||
for i, expectedResult := range []endpoint.Result{firstResult, secondResult} {
|
||||
if expectedResult.HTTPStatus != ss.Results[i].HTTPStatus {
|
||||
t.Errorf("Result at index %d should've had a HTTPStatus of %d, got %d", i, ss.Results[i].HTTPStatus, expectedResult.HTTPStatus)
|
||||
}
|
||||
@ -539,13 +540,13 @@ func TestStore_Insert(t *testing.T) {
|
||||
func TestStore_DeleteAllEndpointStatusesNotInKeys(t *testing.T) {
|
||||
scenarios := initStoresAndBaseScenarios(t, "TestStore_DeleteAllEndpointStatusesNotInKeys")
|
||||
defer cleanUp(scenarios)
|
||||
firstEndpoint := core.Endpoint{Name: "endpoint-1", Group: "group"}
|
||||
secondEndpoint := core.Endpoint{Name: "endpoint-2", Group: "group"}
|
||||
result := &testSuccessfulResult
|
||||
firstEndpoint := endpoint.Endpoint{Name: "endpoint-1", Group: "group"}
|
||||
secondEndpoint := endpoint.Endpoint{Name: "endpoint-2", Group: "group"}
|
||||
r := &testSuccessfulResult
|
||||
for _, scenario := range scenarios {
|
||||
t.Run(scenario.Name, func(t *testing.T) {
|
||||
scenario.Store.Insert(&firstEndpoint, result)
|
||||
scenario.Store.Insert(&secondEndpoint, result)
|
||||
scenario.Store.Insert(&firstEndpoint, r)
|
||||
scenario.Store.Insert(&secondEndpoint, r)
|
||||
if ss, _ := scenario.Store.GetEndpointStatusByKey(firstEndpoint.Key(), paging.NewEndpointStatusParams()); ss == nil {
|
||||
t.Fatal("firstEndpoint should exist, got", ss)
|
||||
}
|
||||
@ -631,7 +632,7 @@ func TestInitialize(t *testing.T) {
|
||||
store.Close()
|
||||
// Try to initialize it again
|
||||
err = Initialize(scenario.Cfg)
|
||||
if err != scenario.ExpectedErr {
|
||||
if !errors.Is(err, scenario.ExpectedErr) {
|
||||
t.Errorf("expected %v, got %v", scenario.ExpectedErr, err)
|
||||
return
|
||||
}
|
||||
|
Reference in New Issue
Block a user