Implement paging and refactor stores to match new store interface with paging
This commit is contained in:
@ -48,58 +48,3 @@ func NewServiceStatus(serviceKey, serviceGroup, serviceName string) *ServiceStat
|
||||
Uptime: NewUptime(),
|
||||
}
|
||||
}
|
||||
|
||||
// WithResultPagination returns a shallow copy of the ServiceStatus with only the results
|
||||
// within the range defined by the page and pageSize parameters
|
||||
func (ss ServiceStatus) WithResultPagination(page, pageSize int) *ServiceStatus {
|
||||
shallowCopy := ss
|
||||
numberOfResults := len(shallowCopy.Results)
|
||||
start := numberOfResults - (page * pageSize)
|
||||
end := numberOfResults - ((page - 1) * pageSize)
|
||||
if start > numberOfResults {
|
||||
start = -1
|
||||
} else if start < 0 {
|
||||
start = 0
|
||||
}
|
||||
if end > numberOfResults {
|
||||
end = numberOfResults
|
||||
}
|
||||
if start < 0 || end < 0 {
|
||||
shallowCopy.Results = []*Result{}
|
||||
} else {
|
||||
shallowCopy.Results = shallowCopy.Results[start:end]
|
||||
}
|
||||
return &shallowCopy
|
||||
}
|
||||
|
||||
// AddResult adds a Result to ServiceStatus.Results and makes sure that there are
|
||||
// no more than MaximumNumberOfResults results in the Results slice
|
||||
func (ss *ServiceStatus) AddResult(result *Result) {
|
||||
if len(ss.Results) > 0 {
|
||||
// Check if there's any change since the last result
|
||||
// OR there's only 1 event, which only happens when there's a start event
|
||||
if ss.Results[len(ss.Results)-1].Success != result.Success || len(ss.Events) == 1 {
|
||||
event := &Event{Timestamp: result.Timestamp}
|
||||
if result.Success {
|
||||
event.Type = EventHealthy
|
||||
} else {
|
||||
event.Type = EventUnhealthy
|
||||
}
|
||||
ss.Events = append(ss.Events, event)
|
||||
if len(ss.Events) > MaximumNumberOfEvents {
|
||||
// Doing ss.Events[1:] would usually be sufficient, but in the case where for some reason, the slice has
|
||||
// more than one extra element, we can get rid of all of them at once and thus returning the slice to a
|
||||
// length of MaximumNumberOfEvents by using ss.Events[len(ss.Events)-MaximumNumberOfEvents:] instead
|
||||
ss.Events = ss.Events[len(ss.Events)-MaximumNumberOfEvents:]
|
||||
}
|
||||
}
|
||||
}
|
||||
ss.Results = append(ss.Results, result)
|
||||
if len(ss.Results) > MaximumNumberOfResults {
|
||||
// Doing ss.Results[1:] would usually be sufficient, but in the case where for some reason, the slice has more
|
||||
// than one extra element, we can get rid of all of them at once and thus returning the slice to a length of
|
||||
// MaximumNumberOfResults by using ss.Results[len(ss.Results)-MaximumNumberOfResults:] instead
|
||||
ss.Results = ss.Results[len(ss.Results)-MaximumNumberOfResults:]
|
||||
}
|
||||
ss.Uptime.ProcessResult(result)
|
||||
}
|
||||
|
@ -1,92 +0,0 @@
|
||||
package core
|
||||
|
||||
import (
|
||||
"testing"
|
||||
"time"
|
||||
)
|
||||
|
||||
var (
|
||||
firstCondition = Condition("[STATUS] == 200")
|
||||
secondCondition = Condition("[RESPONSE_TIME] < 500")
|
||||
thirdCondition = Condition("[CERTIFICATE_EXPIRATION] < 72h")
|
||||
|
||||
timestamp = time.Now()
|
||||
|
||||
testService = Service{
|
||||
Name: "name",
|
||||
Group: "group",
|
||||
URL: "https://example.org/what/ever",
|
||||
Method: "GET",
|
||||
Body: "body",
|
||||
Interval: 30 * time.Second,
|
||||
Conditions: []*Condition{&firstCondition, &secondCondition, &thirdCondition},
|
||||
Alerts: nil,
|
||||
Insecure: false,
|
||||
NumberOfFailuresInARow: 0,
|
||||
NumberOfSuccessesInARow: 0,
|
||||
}
|
||||
testSuccessfulResult = Result{
|
||||
Hostname: "example.org",
|
||||
IP: "127.0.0.1",
|
||||
HTTPStatus: 200,
|
||||
body: []byte("body"),
|
||||
Errors: nil,
|
||||
Connected: true,
|
||||
Success: true,
|
||||
Timestamp: timestamp,
|
||||
Duration: 150 * time.Millisecond,
|
||||
CertificateExpiration: 10 * time.Hour,
|
||||
ConditionResults: []*ConditionResult{
|
||||
{
|
||||
Condition: "[STATUS] == 200",
|
||||
Success: true,
|
||||
},
|
||||
{
|
||||
Condition: "[RESPONSE_TIME] < 500",
|
||||
Success: true,
|
||||
},
|
||||
{
|
||||
Condition: "[CERTIFICATE_EXPIRATION] < 72h",
|
||||
Success: true,
|
||||
},
|
||||
},
|
||||
}
|
||||
testUnsuccessfulResult = Result{
|
||||
Hostname: "example.org",
|
||||
IP: "127.0.0.1",
|
||||
HTTPStatus: 200,
|
||||
body: []byte("body"),
|
||||
Errors: []string{"error-1", "error-2"},
|
||||
Connected: true,
|
||||
Success: false,
|
||||
Timestamp: timestamp,
|
||||
Duration: 750 * time.Millisecond,
|
||||
CertificateExpiration: 10 * time.Hour,
|
||||
ConditionResults: []*ConditionResult{
|
||||
{
|
||||
Condition: "[STATUS] == 200",
|
||||
Success: true,
|
||||
},
|
||||
{
|
||||
Condition: "[RESPONSE_TIME] < 500",
|
||||
Success: false,
|
||||
},
|
||||
{
|
||||
Condition: "[CERTIFICATE_EXPIRATION] < 72h",
|
||||
Success: false,
|
||||
},
|
||||
},
|
||||
}
|
||||
)
|
||||
|
||||
func BenchmarkServiceStatus_WithResultPagination(b *testing.B) {
|
||||
service := &testService
|
||||
serviceStatus := NewServiceStatus(service.Key(), service.Group, service.Name)
|
||||
for i := 0; i < MaximumNumberOfResults; i++ {
|
||||
serviceStatus.AddResult(&testSuccessfulResult)
|
||||
}
|
||||
for n := 0; n < b.N; n++ {
|
||||
serviceStatus.WithResultPagination(1, 20)
|
||||
}
|
||||
b.ReportAllocs()
|
||||
}
|
@ -2,7 +2,6 @@ package core
|
||||
|
||||
import (
|
||||
"testing"
|
||||
"time"
|
||||
)
|
||||
|
||||
func TestNewServiceStatus(t *testing.T) {
|
||||
@ -18,49 +17,3 @@ func TestNewServiceStatus(t *testing.T) {
|
||||
t.Errorf("expected %s, got %s", "group_name", serviceStatus.Key)
|
||||
}
|
||||
}
|
||||
|
||||
func TestServiceStatus_AddResult(t *testing.T) {
|
||||
service := &Service{Name: "name", Group: "group"}
|
||||
serviceStatus := NewServiceStatus(service.Key(), service.Group, service.Name)
|
||||
for i := 0; i < MaximumNumberOfResults+10; i++ {
|
||||
serviceStatus.AddResult(&Result{Timestamp: time.Now()})
|
||||
}
|
||||
if len(serviceStatus.Results) != MaximumNumberOfResults {
|
||||
t.Errorf("expected serviceStatus.Results to not exceed a length of %d", MaximumNumberOfResults)
|
||||
}
|
||||
}
|
||||
|
||||
func TestServiceStatus_WithResultPagination(t *testing.T) {
|
||||
service := &Service{Name: "name", Group: "group"}
|
||||
serviceStatus := NewServiceStatus(service.Key(), service.Group, service.Name)
|
||||
for i := 0; i < 25; i++ {
|
||||
serviceStatus.AddResult(&Result{Timestamp: time.Now()})
|
||||
}
|
||||
if len(serviceStatus.WithResultPagination(1, 1).Results) != 1 {
|
||||
t.Errorf("expected to have 1 result")
|
||||
}
|
||||
if len(serviceStatus.WithResultPagination(5, 0).Results) != 0 {
|
||||
t.Errorf("expected to have 0 results")
|
||||
}
|
||||
if len(serviceStatus.WithResultPagination(-1, 20).Results) != 0 {
|
||||
t.Errorf("expected to have 0 result, because the page was invalid")
|
||||
}
|
||||
if len(serviceStatus.WithResultPagination(1, -1).Results) != 0 {
|
||||
t.Errorf("expected to have 0 result, because the page size was invalid")
|
||||
}
|
||||
if len(serviceStatus.WithResultPagination(1, 10).Results) != 10 {
|
||||
t.Errorf("expected to have 10 results, because given a page size of 10, page 1 should have 10 elements")
|
||||
}
|
||||
if len(serviceStatus.WithResultPagination(2, 10).Results) != 10 {
|
||||
t.Errorf("expected to have 10 results, because given a page size of 10, page 2 should have 10 elements")
|
||||
}
|
||||
if len(serviceStatus.WithResultPagination(3, 10).Results) != 5 {
|
||||
t.Errorf("expected to have 5 results, because given a page size of 10, page 3 should have 5 elements")
|
||||
}
|
||||
if len(serviceStatus.WithResultPagination(4, 10).Results) != 0 {
|
||||
t.Errorf("expected to have 0 results, because given a page size of 10, page 4 should have 0 elements")
|
||||
}
|
||||
if len(serviceStatus.WithResultPagination(1, 50).Results) != 25 {
|
||||
t.Errorf("expected to have 25 results, because there's only 25 results")
|
||||
}
|
||||
}
|
||||
|
107
core/uptime.go
107
core/uptime.go
@ -1,7 +1,6 @@
|
||||
package core
|
||||
|
||||
import (
|
||||
"log"
|
||||
"time"
|
||||
)
|
||||
|
||||
@ -44,109 +43,3 @@ func NewUptime() *Uptime {
|
||||
HourlyStatistics: make(map[int64]*HourlyUptimeStatistics),
|
||||
}
|
||||
}
|
||||
|
||||
// ProcessResult processes the result by extracting the relevant from the result and recalculating the uptime
|
||||
// if necessary
|
||||
func (uptime *Uptime) ProcessResult(result *Result) {
|
||||
// XXX: Remove this on v3.0.0
|
||||
if len(uptime.SuccessfulExecutionsPerHour) != 0 || len(uptime.TotalExecutionsPerHour) != 0 {
|
||||
uptime.migrateToHourlyStatistics()
|
||||
}
|
||||
if uptime.HourlyStatistics == nil {
|
||||
uptime.HourlyStatistics = make(map[int64]*HourlyUptimeStatistics)
|
||||
}
|
||||
unixTimestampFlooredAtHour := result.Timestamp.Truncate(time.Hour).Unix()
|
||||
hourlyStats, _ := uptime.HourlyStatistics[unixTimestampFlooredAtHour]
|
||||
if hourlyStats == nil {
|
||||
hourlyStats = &HourlyUptimeStatistics{}
|
||||
uptime.HourlyStatistics[unixTimestampFlooredAtHour] = hourlyStats
|
||||
}
|
||||
if result.Success {
|
||||
hourlyStats.SuccessfulExecutions++
|
||||
}
|
||||
hourlyStats.TotalExecutions++
|
||||
hourlyStats.TotalExecutionsResponseTime += uint64(result.Duration.Milliseconds())
|
||||
// Clean up only when we're starting to have too many useless keys
|
||||
// Note that this is only triggered when there are more entries than there should be after
|
||||
// 10 days, despite the fact that we are deleting everything that's older than 7 days.
|
||||
// This is to prevent re-iterating on every `ProcessResult` as soon as the uptime has been logged for 7 days.
|
||||
if len(uptime.HourlyStatistics) > numberOfHoursInTenDays {
|
||||
sevenDaysAgo := time.Now().Add(-(sevenDays + time.Hour)).Unix()
|
||||
for hourlyUnixTimestamp := range uptime.HourlyStatistics {
|
||||
if sevenDaysAgo > hourlyUnixTimestamp {
|
||||
delete(uptime.HourlyStatistics, hourlyUnixTimestamp)
|
||||
}
|
||||
}
|
||||
}
|
||||
if result.Success {
|
||||
// Recalculate uptime if at least one of the 1h, 24h or 7d uptime are not 100%
|
||||
// If they're all 100%, then recalculating the uptime would be useless unless
|
||||
// the result added was a failure (!result.Success)
|
||||
if uptime.LastSevenDays != 1 || uptime.LastTwentyFourHours != 1 || uptime.LastHour != 1 {
|
||||
uptime.recalculate()
|
||||
}
|
||||
} else {
|
||||
// Recalculate uptime if at least one of the 1h, 24h or 7d uptime are not 0%
|
||||
// If they're all 0%, then recalculating the uptime would be useless unless
|
||||
// the result added was a success (result.Success)
|
||||
if uptime.LastSevenDays != 0 || uptime.LastTwentyFourHours != 0 || uptime.LastHour != 0 {
|
||||
uptime.recalculate()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (uptime *Uptime) recalculate() {
|
||||
uptimeBrackets := make(map[string]uint64)
|
||||
now := time.Now()
|
||||
// The oldest uptime bracket starts 7 days ago, so we'll start from there
|
||||
timestamp := now.Add(-sevenDays)
|
||||
for now.Sub(timestamp) >= 0 {
|
||||
hourlyUnixTimestamp := timestamp.Truncate(time.Hour).Unix()
|
||||
hourlyStats := uptime.HourlyStatistics[hourlyUnixTimestamp]
|
||||
if hourlyStats == nil || hourlyStats.TotalExecutions == 0 {
|
||||
timestamp = timestamp.Add(time.Hour)
|
||||
continue
|
||||
}
|
||||
uptimeBrackets["7d_success"] += hourlyStats.SuccessfulExecutions
|
||||
uptimeBrackets["7d_total"] += hourlyStats.TotalExecutions
|
||||
if now.Sub(timestamp) <= 24*time.Hour {
|
||||
uptimeBrackets["24h_success"] += hourlyStats.SuccessfulExecutions
|
||||
uptimeBrackets["24h_total"] += hourlyStats.TotalExecutions
|
||||
}
|
||||
if now.Sub(timestamp) <= time.Hour {
|
||||
uptimeBrackets["1h_success"] += hourlyStats.SuccessfulExecutions
|
||||
uptimeBrackets["1h_total"] += hourlyStats.TotalExecutions
|
||||
}
|
||||
timestamp = timestamp.Add(time.Hour)
|
||||
}
|
||||
if uptimeBrackets["7d_total"] > 0 {
|
||||
uptime.LastSevenDays = float64(uptimeBrackets["7d_success"]) / float64(uptimeBrackets["7d_total"])
|
||||
}
|
||||
if uptimeBrackets["24h_total"] > 0 {
|
||||
uptime.LastTwentyFourHours = float64(uptimeBrackets["24h_success"]) / float64(uptimeBrackets["24h_total"])
|
||||
}
|
||||
if uptimeBrackets["1h_total"] > 0 {
|
||||
uptime.LastHour = float64(uptimeBrackets["1h_success"]) / float64(uptimeBrackets["1h_total"])
|
||||
}
|
||||
}
|
||||
|
||||
// XXX: Remove this on v3.0.0
|
||||
// Deprecated
|
||||
func (uptime *Uptime) migrateToHourlyStatistics() {
|
||||
log.Println("[migrateToHourlyStatistics] Got", len(uptime.SuccessfulExecutionsPerHour), "entries for successful executions and", len(uptime.TotalExecutionsPerHour), "entries for total executions")
|
||||
uptime.HourlyStatistics = make(map[int64]*HourlyUptimeStatistics)
|
||||
for hourlyUnixTimestamp, totalExecutions := range uptime.TotalExecutionsPerHour {
|
||||
if totalExecutions == 0 {
|
||||
log.Println("[migrateToHourlyStatistics] Skipping entry at", hourlyUnixTimestamp, "because total number of executions is 0")
|
||||
continue
|
||||
}
|
||||
uptime.HourlyStatistics[hourlyUnixTimestamp] = &HourlyUptimeStatistics{
|
||||
TotalExecutions: totalExecutions,
|
||||
SuccessfulExecutions: uptime.SuccessfulExecutionsPerHour[hourlyUnixTimestamp],
|
||||
TotalExecutionsResponseTime: 0,
|
||||
}
|
||||
}
|
||||
log.Println("[migrateToHourlyStatistics] Migrated", len(uptime.HourlyStatistics), "entries")
|
||||
uptime.SuccessfulExecutionsPerHour = nil
|
||||
uptime.TotalExecutionsPerHour = nil
|
||||
}
|
||||
|
@ -1,24 +0,0 @@
|
||||
package core
|
||||
|
||||
import (
|
||||
"testing"
|
||||
"time"
|
||||
)
|
||||
|
||||
func BenchmarkUptime_ProcessResult(b *testing.B) {
|
||||
uptime := NewUptime()
|
||||
now := time.Now()
|
||||
now = time.Date(now.Year(), now.Month(), now.Day(), now.Hour(), 0, 0, 0, now.Location())
|
||||
// Start 12000 days ago
|
||||
timestamp := now.Add(-12000 * 24 * time.Hour)
|
||||
for n := 0; n < b.N; n++ {
|
||||
uptime.ProcessResult(&Result{
|
||||
Duration: 18 * time.Millisecond,
|
||||
Success: n%15 == 0,
|
||||
Timestamp: timestamp,
|
||||
})
|
||||
// Simulate service with an interval of 3 minutes
|
||||
timestamp = timestamp.Add(3 * time.Minute)
|
||||
}
|
||||
b.ReportAllocs()
|
||||
}
|
@ -1,96 +0,0 @@
|
||||
package core
|
||||
|
||||
import (
|
||||
"testing"
|
||||
"time"
|
||||
)
|
||||
|
||||
func TestUptime_ProcessResult(t *testing.T) {
|
||||
service := &Service{Name: "name", Group: "group"}
|
||||
serviceStatus := NewServiceStatus(service.Key(), service.Group, service.Name)
|
||||
uptime := serviceStatus.Uptime
|
||||
|
||||
checkUptimes(t, serviceStatus, 0.00, 0.00, 0.00)
|
||||
|
||||
now := time.Now()
|
||||
now = time.Date(now.Year(), now.Month(), now.Day(), now.Hour(), 0, 0, 0, now.Location())
|
||||
uptime.ProcessResult(&Result{Timestamp: now.Add(-7 * 24 * time.Hour), Success: true})
|
||||
checkUptimes(t, serviceStatus, 1.00, 0.00, 0.00)
|
||||
|
||||
uptime.ProcessResult(&Result{Timestamp: now.Add(-6 * 24 * time.Hour), Success: false})
|
||||
checkUptimes(t, serviceStatus, 0.50, 0.00, 0.00)
|
||||
|
||||
uptime.ProcessResult(&Result{Timestamp: now.Add(-8 * 24 * time.Hour), Success: true})
|
||||
checkUptimes(t, serviceStatus, 0.50, 0.00, 0.00)
|
||||
|
||||
uptime.ProcessResult(&Result{Timestamp: now.Add(-24 * time.Hour), Success: true})
|
||||
uptime.ProcessResult(&Result{Timestamp: now.Add(-12 * time.Hour), Success: true})
|
||||
checkUptimes(t, serviceStatus, 0.75, 1.00, 0.00)
|
||||
|
||||
uptime.ProcessResult(&Result{Timestamp: now.Add(-1 * time.Hour), Success: true, Duration: 10 * time.Millisecond})
|
||||
checkHourlyStatistics(t, uptime.HourlyStatistics[now.Unix()-now.Unix()%3600-3600], 10, 1, 1)
|
||||
uptime.ProcessResult(&Result{Timestamp: now.Add(-30 * time.Minute), Success: false, Duration: 500 * time.Millisecond})
|
||||
checkHourlyStatistics(t, uptime.HourlyStatistics[now.Unix()-now.Unix()%3600-3600], 510, 2, 1)
|
||||
uptime.ProcessResult(&Result{Timestamp: now.Add(-15 * time.Minute), Success: false, Duration: 25 * time.Millisecond})
|
||||
checkHourlyStatistics(t, uptime.HourlyStatistics[now.Unix()-now.Unix()%3600-3600], 535, 3, 1)
|
||||
|
||||
uptime.ProcessResult(&Result{Timestamp: now.Add(-10 * time.Minute), Success: false})
|
||||
checkUptimes(t, serviceStatus, 0.50, 0.50, 0.25)
|
||||
|
||||
uptime.ProcessResult(&Result{Timestamp: now.Add(-120 * time.Hour), Success: true})
|
||||
uptime.ProcessResult(&Result{Timestamp: now.Add(-119 * time.Hour), Success: true})
|
||||
uptime.ProcessResult(&Result{Timestamp: now.Add(-118 * time.Hour), Success: true})
|
||||
uptime.ProcessResult(&Result{Timestamp: now.Add(-117 * time.Hour), Success: true})
|
||||
uptime.ProcessResult(&Result{Timestamp: now.Add(-10 * time.Hour), Success: true})
|
||||
uptime.ProcessResult(&Result{Timestamp: now.Add(-8 * time.Hour), Success: true})
|
||||
uptime.ProcessResult(&Result{Timestamp: now.Add(-30 * time.Minute), Success: true})
|
||||
uptime.ProcessResult(&Result{Timestamp: now.Add(-25 * time.Minute), Success: true})
|
||||
checkUptimes(t, serviceStatus, 0.75, 0.70, 0.50)
|
||||
}
|
||||
|
||||
func TestServiceStatus_AddResultUptimeIsCleaningUpAfterItself(t *testing.T) {
|
||||
service := &Service{Name: "name", Group: "group"}
|
||||
serviceStatus := NewServiceStatus(service.Key(), service.Group, service.Name)
|
||||
now := time.Now()
|
||||
now = time.Date(now.Year(), now.Month(), now.Day(), now.Hour(), 0, 0, 0, now.Location())
|
||||
// Start 12 days ago
|
||||
timestamp := now.Add(-12 * 24 * time.Hour)
|
||||
for timestamp.Unix() <= now.Unix() {
|
||||
serviceStatus.AddResult(&Result{Timestamp: timestamp, Success: true})
|
||||
if len(serviceStatus.Uptime.HourlyStatistics) > numberOfHoursInTenDays {
|
||||
t.Errorf("At no point in time should there be more than %d entries in serviceStatus.SuccessfulExecutionsPerHour, but there are %d", numberOfHoursInTenDays, len(serviceStatus.Uptime.HourlyStatistics))
|
||||
}
|
||||
if now.Sub(timestamp) > time.Hour && serviceStatus.Uptime.LastHour != 0 {
|
||||
t.Error("most recent timestamp > 1h ago, expected serviceStatus.Uptime.LastHour to be 0, got", serviceStatus.Uptime.LastHour)
|
||||
}
|
||||
if now.Sub(timestamp) < time.Hour && serviceStatus.Uptime.LastHour == 0 {
|
||||
t.Error("most recent timestamp < 1h ago, expected serviceStatus.Uptime.LastHour to NOT be 0, got", serviceStatus.Uptime.LastHour)
|
||||
}
|
||||
// Simulate service with an interval of 3 minutes
|
||||
timestamp = timestamp.Add(3 * time.Minute)
|
||||
}
|
||||
}
|
||||
|
||||
func checkUptimes(t *testing.T, status *ServiceStatus, expectedUptimeDuringLastSevenDays, expectedUptimeDuringLastTwentyFourHours, expectedUptimeDuringLastHour float64) {
|
||||
if status.Uptime.LastSevenDays != expectedUptimeDuringLastSevenDays {
|
||||
t.Errorf("expected status.Uptime.LastSevenDays to be %f, got %f", expectedUptimeDuringLastHour, status.Uptime.LastSevenDays)
|
||||
}
|
||||
if status.Uptime.LastTwentyFourHours != expectedUptimeDuringLastTwentyFourHours {
|
||||
t.Errorf("expected status.Uptime.LastTwentyFourHours to be %f, got %f", expectedUptimeDuringLastTwentyFourHours, status.Uptime.LastTwentyFourHours)
|
||||
}
|
||||
if status.Uptime.LastHour != expectedUptimeDuringLastHour {
|
||||
t.Errorf("expected status.Uptime.LastHour to be %f, got %f", expectedUptimeDuringLastHour, status.Uptime.LastHour)
|
||||
}
|
||||
}
|
||||
|
||||
func checkHourlyStatistics(t *testing.T, hourlyUptimeStatistics *HourlyUptimeStatistics, expectedTotalExecutionsResponseTime uint64, expectedTotalExecutions uint64, expectedSuccessfulExecutions uint64) {
|
||||
if hourlyUptimeStatistics.TotalExecutionsResponseTime != expectedTotalExecutionsResponseTime {
|
||||
t.Error("TotalExecutionsResponseTime should've been", expectedTotalExecutionsResponseTime, "got", hourlyUptimeStatistics.TotalExecutionsResponseTime)
|
||||
}
|
||||
if hourlyUptimeStatistics.TotalExecutions != expectedTotalExecutions {
|
||||
t.Error("TotalExecutions should've been", expectedTotalExecutions, "got", hourlyUptimeStatistics.TotalExecutions)
|
||||
}
|
||||
if hourlyUptimeStatistics.SuccessfulExecutions != expectedSuccessfulExecutions {
|
||||
t.Error("SuccessfulExecutions should've been", expectedSuccessfulExecutions, "got", hourlyUptimeStatistics.SuccessfulExecutions)
|
||||
}
|
||||
}
|
Reference in New Issue
Block a user