Implement persistence
This commit is contained in:
parent
9196f57487
commit
79bef8d391
6
.github/workflows/build.yml
vendored
6
.github/workflows/build.yml
vendored
@ -24,9 +24,9 @@ jobs:
|
|||||||
run: go build -mod vendor
|
run: go build -mod vendor
|
||||||
- name: Test
|
- name: Test
|
||||||
# We're using "sudo" because one of the tests leverages ping, which requires super-user privileges.
|
# We're using "sudo" because one of the tests leverages ping, which requires super-user privileges.
|
||||||
# As for the "PATH=$PATH", we need it to use the same "go" executable that was configured by the "Set
|
# As for the 'env "PATH=$PATH" "GOROOT=$GOROOT"', we need it to use the same "go" executable that
|
||||||
# up Go" step (otherwise, it'd use sudo's "go" executable)
|
# was configured by the "Set up Go 1.15" step (otherwise, it'd use sudo's "go" executable)
|
||||||
run: sudo "PATH=$PATH" go test -mod vendor ./... -race -coverprofile=coverage.txt -covermode=atomic
|
run: sudo env "PATH=$PATH" "GOROOT=$GOROOT" go test -mod vendor ./... -race -coverprofile=coverage.txt -covermode=atomic
|
||||||
- name: Codecov
|
- name: Codecov
|
||||||
uses: codecov/codecov-action@v1.0.14
|
uses: codecov/codecov-action@v1.0.14
|
||||||
with:
|
with:
|
||||||
|
@ -100,6 +100,8 @@ Note that you can also add environment variables in the configuration file (i.e.
|
|||||||
|:---------------------------------------- |:----------------------------------------------------------------------------- |:-------------- |
|
|:---------------------------------------- |:----------------------------------------------------------------------------- |:-------------- |
|
||||||
| `debug` | Whether to enable debug logs | `false` |
|
| `debug` | Whether to enable debug logs | `false` |
|
||||||
| `metrics` | Whether to expose metrics at /metrics | `false` |
|
| `metrics` | Whether to expose metrics at /metrics | `false` |
|
||||||
|
| `storage` | Storage configuration | `{}` |
|
||||||
|
| `storage.file` | File to persist the data in. If not set, storage is in-memory only. | `""` |
|
||||||
| `services` | List of services to monitor | Required `[]` |
|
| `services` | List of services to monitor | Required `[]` |
|
||||||
| `services[].name` | Name of the service. Can be anything. | Required `""` |
|
| `services[].name` | Name of the service. Can be anything. | Required `""` |
|
||||||
| `services[].group` | Group name. Used to group multiple services together on the dashboard. See [Service groups](#service-groups). | `""` |
|
| `services[].group` | Group name. Used to group multiple services together on the dashboard. See [Service groups](#service-groups). | `""` |
|
||||||
|
@ -11,6 +11,8 @@ import (
|
|||||||
"github.com/TwinProduction/gatus/core"
|
"github.com/TwinProduction/gatus/core"
|
||||||
"github.com/TwinProduction/gatus/k8s"
|
"github.com/TwinProduction/gatus/k8s"
|
||||||
"github.com/TwinProduction/gatus/security"
|
"github.com/TwinProduction/gatus/security"
|
||||||
|
"github.com/TwinProduction/gatus/storage"
|
||||||
|
"github.com/TwinProduction/gatus/util"
|
||||||
"gopkg.in/yaml.v2"
|
"gopkg.in/yaml.v2"
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -71,6 +73,9 @@ type Config struct {
|
|||||||
// Kubernetes is the Kubernetes configuration
|
// Kubernetes is the Kubernetes configuration
|
||||||
Kubernetes *k8s.Config `yaml:"kubernetes"`
|
Kubernetes *k8s.Config `yaml:"kubernetes"`
|
||||||
|
|
||||||
|
// Storage is the configuration for how the data is stored
|
||||||
|
Storage *storage.Config `yaml:"storage"`
|
||||||
|
|
||||||
// Web is the configuration for the web listener
|
// Web is the configuration for the web listener
|
||||||
Web *WebConfig `yaml:"web"`
|
Web *WebConfig `yaml:"web"`
|
||||||
}
|
}
|
||||||
@ -144,10 +149,30 @@ func parseAndValidateConfigBytes(yamlBytes []byte) (config *Config, err error) {
|
|||||||
validateServicesConfig(config)
|
validateServicesConfig(config)
|
||||||
validateKubernetesConfig(config)
|
validateKubernetesConfig(config)
|
||||||
validateWebConfig(config)
|
validateWebConfig(config)
|
||||||
|
validateStorageConfig(config)
|
||||||
}
|
}
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func validateStorageConfig(config *Config) {
|
||||||
|
if config.Storage == nil {
|
||||||
|
config.Storage = &storage.Config{}
|
||||||
|
}
|
||||||
|
err := storage.Initialize(config.Storage)
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
// Remove all ServiceStatus that represent services which no longer exist in the configuration
|
||||||
|
var keys []string
|
||||||
|
for _, service := range config.Services {
|
||||||
|
keys = append(keys, util.ConvertGroupAndServiceToKey(service.Group, service.Name))
|
||||||
|
}
|
||||||
|
numberOfServiceStatusesDeleted := storage.Get().DeleteAllServiceStatusesNotInKeys(keys)
|
||||||
|
if numberOfServiceStatusesDeleted > 0 {
|
||||||
|
log.Printf("[config][validateStorageConfig] Deleted %d service statuses because their matching services no longer existed", numberOfServiceStatusesDeleted)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
func validateWebConfig(config *Config) {
|
func validateWebConfig(config *Config) {
|
||||||
if config.Web == nil {
|
if config.Web == nil {
|
||||||
config.Web = &WebConfig{Address: DefaultAddress, Port: DefaultPort}
|
config.Web = &WebConfig{Address: DefaultAddress, Port: DefaultPort}
|
||||||
|
@ -42,7 +42,10 @@ func TestLoadDefaultConfigurationFile(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestParseAndValidateConfigBytes(t *testing.T) {
|
func TestParseAndValidateConfigBytes(t *testing.T) {
|
||||||
config, err := parseAndValidateConfigBytes([]byte(`
|
file := t.TempDir() + "/test.db"
|
||||||
|
config, err := parseAndValidateConfigBytes([]byte(fmt.Sprintf(`
|
||||||
|
storage:
|
||||||
|
file: %s
|
||||||
services:
|
services:
|
||||||
- name: twinnation
|
- name: twinnation
|
||||||
url: https://twinnation.org/health
|
url: https://twinnation.org/health
|
||||||
@ -54,9 +57,9 @@ services:
|
|||||||
conditions:
|
conditions:
|
||||||
- "[STATUS] != 400"
|
- "[STATUS] != 400"
|
||||||
- "[STATUS] != 500"
|
- "[STATUS] != 500"
|
||||||
`))
|
`, file)))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Error("No error should've been returned")
|
t.Error("expected no error, got", err.Error())
|
||||||
}
|
}
|
||||||
if config == nil {
|
if config == nil {
|
||||||
t.Fatal("Config shouldn't have been nil")
|
t.Fatal("Config shouldn't have been nil")
|
||||||
@ -99,7 +102,7 @@ services:
|
|||||||
- "[STATUS] == 200"
|
- "[STATUS] == 200"
|
||||||
`))
|
`))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Error("No error should've been returned")
|
t.Error("expected no error, got", err.Error())
|
||||||
}
|
}
|
||||||
if config == nil {
|
if config == nil {
|
||||||
t.Fatal("Config shouldn't have been nil")
|
t.Fatal("Config shouldn't have been nil")
|
||||||
@ -132,7 +135,7 @@ services:
|
|||||||
- "[STATUS] == 200"
|
- "[STATUS] == 200"
|
||||||
`))
|
`))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Error("No error should've been returned")
|
t.Error("expected no error, got", err.Error())
|
||||||
}
|
}
|
||||||
if config == nil {
|
if config == nil {
|
||||||
t.Fatal("Config shouldn't have been nil")
|
t.Fatal("Config shouldn't have been nil")
|
||||||
@ -167,7 +170,7 @@ services:
|
|||||||
- "[STATUS] == 200"
|
- "[STATUS] == 200"
|
||||||
`))
|
`))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Error("No error should've been returned")
|
t.Error("expected no error, got", err.Error())
|
||||||
}
|
}
|
||||||
if config == nil {
|
if config == nil {
|
||||||
t.Fatal("Config shouldn't have been nil")
|
t.Fatal("Config shouldn't have been nil")
|
||||||
@ -201,7 +204,7 @@ services:
|
|||||||
- "[STATUS] == 200"
|
- "[STATUS] == 200"
|
||||||
`))
|
`))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Error("No error should've been returned")
|
t.Error("expected no error, got", err.Error())
|
||||||
}
|
}
|
||||||
if config == nil {
|
if config == nil {
|
||||||
t.Fatal("Config shouldn't have been nil")
|
t.Fatal("Config shouldn't have been nil")
|
||||||
@ -250,7 +253,7 @@ services:
|
|||||||
- "[STATUS] == 200"
|
- "[STATUS] == 200"
|
||||||
`))
|
`))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Error("No error should've been returned")
|
t.Error("expected no error, got", err.Error())
|
||||||
}
|
}
|
||||||
if config == nil {
|
if config == nil {
|
||||||
t.Fatal("Config shouldn't have been nil")
|
t.Fatal("Config shouldn't have been nil")
|
||||||
@ -288,7 +291,7 @@ services:
|
|||||||
- "[STATUS] == 200"
|
- "[STATUS] == 200"
|
||||||
`))
|
`))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Error("No error should've been returned")
|
t.Error("expected no error, got", err.Error())
|
||||||
}
|
}
|
||||||
if config == nil {
|
if config == nil {
|
||||||
t.Fatal("Config shouldn't have been nil")
|
t.Fatal("Config shouldn't have been nil")
|
||||||
@ -332,7 +335,6 @@ badconfig:
|
|||||||
func TestParseAndValidateConfigBytesWithAlerting(t *testing.T) {
|
func TestParseAndValidateConfigBytesWithAlerting(t *testing.T) {
|
||||||
config, err := parseAndValidateConfigBytes([]byte(`
|
config, err := parseAndValidateConfigBytes([]byte(`
|
||||||
debug: true
|
debug: true
|
||||||
|
|
||||||
alerting:
|
alerting:
|
||||||
slack:
|
slack:
|
||||||
webhook-url: "http://example.com"
|
webhook-url: "http://example.com"
|
||||||
@ -359,7 +361,7 @@ services:
|
|||||||
- "[STATUS] == 200"
|
- "[STATUS] == 200"
|
||||||
`))
|
`))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Error("No error should've been returned")
|
t.Error("expected no error, got", err.Error())
|
||||||
}
|
}
|
||||||
if config == nil {
|
if config == nil {
|
||||||
t.Fatal("Config shouldn't have been nil")
|
t.Fatal("Config shouldn't have been nil")
|
||||||
@ -452,7 +454,7 @@ services:
|
|||||||
- "[STATUS] == 200"
|
- "[STATUS] == 200"
|
||||||
`))
|
`))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Error("No error should've been returned")
|
t.Error("expected no error, got", err.Error())
|
||||||
}
|
}
|
||||||
if config == nil {
|
if config == nil {
|
||||||
t.Fatal("Config shouldn't have been nil")
|
t.Fatal("Config shouldn't have been nil")
|
||||||
@ -486,7 +488,7 @@ services:
|
|||||||
- "[STATUS] == 200"
|
- "[STATUS] == 200"
|
||||||
`))
|
`))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Error("No error should've been returned")
|
t.Error("expected no error, got", err.Error())
|
||||||
}
|
}
|
||||||
if config == nil {
|
if config == nil {
|
||||||
t.Fatal("Config shouldn't have been nil")
|
t.Fatal("Config shouldn't have been nil")
|
||||||
@ -525,7 +527,7 @@ services:
|
|||||||
- "[STATUS] == 200"
|
- "[STATUS] == 200"
|
||||||
`))
|
`))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Error("No error should've been returned")
|
t.Error("expected no error, got", err.Error())
|
||||||
}
|
}
|
||||||
if config == nil {
|
if config == nil {
|
||||||
t.Fatal("Config shouldn't have been nil")
|
t.Fatal("Config shouldn't have been nil")
|
||||||
@ -578,7 +580,7 @@ services:
|
|||||||
- "[STATUS] == 200"
|
- "[STATUS] == 200"
|
||||||
`, expectedUsername, expectedPasswordHash)))
|
`, expectedUsername, expectedPasswordHash)))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Error("No error should've been returned")
|
t.Error("expected no error, got", err.Error())
|
||||||
}
|
}
|
||||||
if config == nil {
|
if config == nil {
|
||||||
t.Fatal("Config shouldn't have been nil")
|
t.Fatal("Config shouldn't have been nil")
|
||||||
@ -645,7 +647,7 @@ kubernetes:
|
|||||||
target-path: "/health"
|
target-path: "/health"
|
||||||
`))
|
`))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Error("No error should've been returned")
|
t.Error("expected no error, got", err.Error())
|
||||||
}
|
}
|
||||||
if config == nil {
|
if config == nil {
|
||||||
t.Fatal("Config shouldn't have been nil")
|
t.Fatal("Config shouldn't have been nil")
|
||||||
|
@ -55,7 +55,7 @@ func Handle() {
|
|||||||
WriteTimeout: 15 * time.Second,
|
WriteTimeout: 15 * time.Second,
|
||||||
IdleTimeout: 15 * time.Second,
|
IdleTimeout: 15 * time.Second,
|
||||||
}
|
}
|
||||||
log.Println("[controller][Handle] Listening on" + cfg.Web.SocketAddress())
|
log.Println("[controller][Handle] Listening on " + cfg.Web.SocketAddress())
|
||||||
if os.Getenv("ROUTER_TEST") == "true" {
|
if os.Getenv("ROUTER_TEST") == "true" {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
@ -140,10 +140,11 @@ func serviceStatusHandler(writer http.ResponseWriter, r *http.Request) {
|
|||||||
}
|
}
|
||||||
data := map[string]interface{}{
|
data := map[string]interface{}{
|
||||||
"serviceStatus": serviceStatus,
|
"serviceStatus": serviceStatus,
|
||||||
// This is my lazy way of exposing events even though they're not visible from the json annotation
|
// The following fields, while present on core.ServiceStatus, are annotated to remain hidden so that we can
|
||||||
// present in ServiceStatus. We do this because creating a separate object for each endpoints
|
// expose only the necessary data on /api/v1/statuses.
|
||||||
// would be wasteful (one with and one without Events)
|
// Since the /api/v1/statuses/{key} endpoint does need this data, however, we explicitly expose it here
|
||||||
"events": serviceStatus.Events,
|
"events": serviceStatus.Events,
|
||||||
|
"uptime": serviceStatus.Uptime,
|
||||||
}
|
}
|
||||||
output, err := json.Marshal(data)
|
output, err := json.Marshal(data)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -32,97 +32,97 @@ func TestCreateRouter(t *testing.T) {
|
|||||||
watchdog.UpdateServiceStatuses(cfg.Services[1], &core.Result{Success: false, Duration: time.Second, Timestamp: time.Now()})
|
watchdog.UpdateServiceStatuses(cfg.Services[1], &core.Result{Success: false, Duration: time.Second, Timestamp: time.Now()})
|
||||||
router := CreateRouter(cfg)
|
router := CreateRouter(cfg)
|
||||||
type Scenario struct {
|
type Scenario struct {
|
||||||
Description string
|
Name string
|
||||||
Path string
|
Path string
|
||||||
ExpectedCode int
|
ExpectedCode int
|
||||||
Gzip bool
|
Gzip bool
|
||||||
}
|
}
|
||||||
scenarios := []Scenario{
|
scenarios := []Scenario{
|
||||||
{
|
{
|
||||||
Description: "health",
|
Name: "health",
|
||||||
Path: "/health",
|
Path: "/health",
|
||||||
ExpectedCode: http.StatusOK,
|
ExpectedCode: http.StatusOK,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
Description: "metrics",
|
Name: "metrics",
|
||||||
Path: "/metrics",
|
Path: "/metrics",
|
||||||
ExpectedCode: http.StatusOK,
|
ExpectedCode: http.StatusOK,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
Description: "badges-1h",
|
Name: "badges-1h",
|
||||||
Path: "/api/v1/badges/uptime/1h/core_frontend.svg",
|
Path: "/api/v1/badges/uptime/1h/core_frontend.svg",
|
||||||
ExpectedCode: http.StatusOK,
|
ExpectedCode: http.StatusOK,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
Description: "badges-24h",
|
Name: "badges-24h",
|
||||||
Path: "/api/v1/badges/uptime/24h/core_backend.svg",
|
Path: "/api/v1/badges/uptime/24h/core_backend.svg",
|
||||||
ExpectedCode: http.StatusOK,
|
ExpectedCode: http.StatusOK,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
Description: "badges-7d",
|
Name: "badges-7d",
|
||||||
Path: "/api/v1/badges/uptime/7d/core_frontend.svg",
|
Path: "/api/v1/badges/uptime/7d/core_frontend.svg",
|
||||||
ExpectedCode: http.StatusOK,
|
ExpectedCode: http.StatusOK,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
Description: "badges-with-invalid-duration",
|
Name: "badges-with-invalid-duration",
|
||||||
Path: "/api/v1/badges/uptime/3d/core_backend.svg",
|
Path: "/api/v1/badges/uptime/3d/core_backend.svg",
|
||||||
ExpectedCode: http.StatusBadRequest,
|
ExpectedCode: http.StatusBadRequest,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
Description: "badges-for-invalid-key",
|
Name: "badges-for-invalid-key",
|
||||||
Path: "/api/v1/badges/uptime/7d/invalid_key.svg",
|
Path: "/api/v1/badges/uptime/7d/invalid_key.svg",
|
||||||
ExpectedCode: http.StatusNotFound,
|
ExpectedCode: http.StatusNotFound,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
Description: "service-statuses",
|
Name: "service-statuses",
|
||||||
Path: "/api/v1/statuses",
|
Path: "/api/v1/statuses",
|
||||||
ExpectedCode: http.StatusOK,
|
ExpectedCode: http.StatusOK,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
Description: "service-statuses-gzip",
|
Name: "service-statuses-gzip",
|
||||||
Path: "/api/v1/statuses",
|
Path: "/api/v1/statuses",
|
||||||
ExpectedCode: http.StatusOK,
|
ExpectedCode: http.StatusOK,
|
||||||
Gzip: true,
|
Gzip: true,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
Description: "service-status",
|
Name: "service-status",
|
||||||
Path: "/api/v1/statuses/core_frontend",
|
Path: "/api/v1/statuses/core_frontend",
|
||||||
ExpectedCode: http.StatusOK,
|
ExpectedCode: http.StatusOK,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
Description: "service-status-gzip",
|
Name: "service-status-gzip",
|
||||||
Path: "/api/v1/statuses/core_frontend",
|
Path: "/api/v1/statuses/core_frontend",
|
||||||
ExpectedCode: http.StatusOK,
|
ExpectedCode: http.StatusOK,
|
||||||
Gzip: true,
|
Gzip: true,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
Description: "service-status-for-invalid-key",
|
Name: "service-status-for-invalid-key",
|
||||||
Path: "/api/v1/statuses/invalid_key",
|
Path: "/api/v1/statuses/invalid_key",
|
||||||
ExpectedCode: http.StatusNotFound,
|
ExpectedCode: http.StatusNotFound,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
Description: "favicon",
|
Name: "favicon",
|
||||||
Path: "/favicon.ico",
|
Path: "/favicon.ico",
|
||||||
ExpectedCode: http.StatusOK,
|
ExpectedCode: http.StatusOK,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
Description: "frontend-home",
|
Name: "frontend-home",
|
||||||
Path: "/",
|
Path: "/",
|
||||||
ExpectedCode: http.StatusOK,
|
ExpectedCode: http.StatusOK,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
Description: "frontend-assets",
|
Name: "frontend-assets",
|
||||||
Path: "/js/app.js",
|
Path: "/js/app.js",
|
||||||
ExpectedCode: http.StatusOK,
|
ExpectedCode: http.StatusOK,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
Description: "frontend-service",
|
Name: "frontend-service",
|
||||||
Path: "/services/core_frontend",
|
Path: "/services/core_frontend",
|
||||||
ExpectedCode: http.StatusOK,
|
ExpectedCode: http.StatusOK,
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
for _, scenario := range scenarios {
|
for _, scenario := range scenarios {
|
||||||
t.Run(scenario.Description, func(t *testing.T) {
|
t.Run(scenario.Name, func(t *testing.T) {
|
||||||
request, _ := http.NewRequest("GET", scenario.Path, nil)
|
request, _ := http.NewRequest("GET", scenario.Path, nil)
|
||||||
if scenario.Gzip {
|
if scenario.Gzip {
|
||||||
request.Header.Set("Accept-Encoding", "gzip")
|
request.Header.Set("Accept-Encoding", "gzip")
|
||||||
|
@ -22,13 +22,17 @@ type ServiceStatus struct {
|
|||||||
|
|
||||||
// Events is a list of events
|
// Events is a list of events
|
||||||
//
|
//
|
||||||
// We don't expose this through JSON, because the main dashboard doesn't need to have these events.
|
// We don't expose this through JSON, because the main dashboard doesn't need to have this data.
|
||||||
// However, the detailed service page does leverage this by including it to a map that will be
|
// However, the detailed service page does leverage this by including it to a map that will be
|
||||||
// marshalled alongside the ServiceStatus.
|
// marshalled alongside the ServiceStatus.
|
||||||
Events []*Event `json:"-"`
|
Events []*Event `json:"-"`
|
||||||
|
|
||||||
// Uptime information on the service's uptime
|
// Uptime information on the service's uptime
|
||||||
Uptime *Uptime `json:"uptime"`
|
//
|
||||||
|
// We don't expose this through JSON, because the main dashboard doesn't need to have this data.
|
||||||
|
// However, the detailed service page does leverage this by including it to a map that will be
|
||||||
|
// marshalled alongside the ServiceStatus.
|
||||||
|
Uptime *Uptime `json:"-"`
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewServiceStatus creates a new ServiceStatus
|
// NewServiceStatus creates a new ServiceStatus
|
||||||
|
@ -25,15 +25,20 @@ type Uptime struct {
|
|||||||
// LastHour is the uptime percentage over the past hour
|
// LastHour is the uptime percentage over the past hour
|
||||||
LastHour float64 `json:"1h"`
|
LastHour float64 `json:"1h"`
|
||||||
|
|
||||||
successCountPerHour map[string]uint64
|
// SuccessCountPerHour is a map containing the number of successes per hour, per timestamp following the
|
||||||
totalCountPerHour map[string]uint64
|
// custom RFC3339WithoutMinutesAndSeconds format
|
||||||
|
SuccessCountPerHour map[string]uint64 `json:"-"`
|
||||||
|
|
||||||
|
// TotalCountPerHour is a map containing the total number of checks per hour, per timestamp following the
|
||||||
|
// custom RFC3339WithoutMinutesAndSeconds format
|
||||||
|
TotalCountPerHour map[string]uint64 `json:"-"`
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewUptime creates a new Uptime
|
// NewUptime creates a new Uptime
|
||||||
func NewUptime() *Uptime {
|
func NewUptime() *Uptime {
|
||||||
return &Uptime{
|
return &Uptime{
|
||||||
successCountPerHour: make(map[string]uint64),
|
SuccessCountPerHour: make(map[string]uint64),
|
||||||
totalCountPerHour: make(map[string]uint64),
|
TotalCountPerHour: make(map[string]uint64),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -42,16 +47,16 @@ func NewUptime() *Uptime {
|
|||||||
func (uptime *Uptime) ProcessResult(result *Result) {
|
func (uptime *Uptime) ProcessResult(result *Result) {
|
||||||
timestampDateWithHour := result.Timestamp.Format(RFC3339WithoutMinutesAndSeconds)
|
timestampDateWithHour := result.Timestamp.Format(RFC3339WithoutMinutesAndSeconds)
|
||||||
if result.Success {
|
if result.Success {
|
||||||
uptime.successCountPerHour[timestampDateWithHour]++
|
uptime.SuccessCountPerHour[timestampDateWithHour]++
|
||||||
}
|
}
|
||||||
uptime.totalCountPerHour[timestampDateWithHour]++
|
uptime.TotalCountPerHour[timestampDateWithHour]++
|
||||||
// Clean up only when we're starting to have too many useless keys
|
// Clean up only when we're starting to have too many useless keys
|
||||||
// Note that this is only triggered when there are more entries than there should be after
|
// Note that this is only triggered when there are more entries than there should be after
|
||||||
// 10 days, despite the fact that we are deleting everything that's older than 7 days.
|
// 10 days, despite the fact that we are deleting everything that's older than 7 days.
|
||||||
// This is to prevent re-iterating on every `ProcessResult` as soon as the uptime has been logged for 7 days.
|
// This is to prevent re-iterating on every `ProcessResult` as soon as the uptime has been logged for 7 days.
|
||||||
if len(uptime.totalCountPerHour) > numberOfHoursInTenDays {
|
if len(uptime.TotalCountPerHour) > numberOfHoursInTenDays {
|
||||||
sevenDaysAgo := time.Now().Add(-(sevenDays + time.Hour))
|
sevenDaysAgo := time.Now().Add(-(sevenDays + time.Hour))
|
||||||
for k := range uptime.totalCountPerHour {
|
for k := range uptime.TotalCountPerHour {
|
||||||
dateWithHour, err := time.Parse(time.RFC3339, k)
|
dateWithHour, err := time.Parse(time.RFC3339, k)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
// This shouldn't happen, but we'll log it in case it does happen
|
// This shouldn't happen, but we'll log it in case it does happen
|
||||||
@ -59,8 +64,8 @@ func (uptime *Uptime) ProcessResult(result *Result) {
|
|||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
if sevenDaysAgo.Unix() > dateWithHour.Unix() {
|
if sevenDaysAgo.Unix() > dateWithHour.Unix() {
|
||||||
delete(uptime.totalCountPerHour, k)
|
delete(uptime.TotalCountPerHour, k)
|
||||||
delete(uptime.successCountPerHour, k)
|
delete(uptime.SuccessCountPerHour, k)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -88,8 +93,8 @@ func (uptime *Uptime) recalculate() {
|
|||||||
timestamp := now.Add(-sevenDays)
|
timestamp := now.Add(-sevenDays)
|
||||||
for now.Sub(timestamp) >= 0 {
|
for now.Sub(timestamp) >= 0 {
|
||||||
timestampDateWithHour := timestamp.Format(RFC3339WithoutMinutesAndSeconds)
|
timestampDateWithHour := timestamp.Format(RFC3339WithoutMinutesAndSeconds)
|
||||||
successCountForTimestamp := uptime.successCountPerHour[timestampDateWithHour]
|
successCountForTimestamp := uptime.SuccessCountPerHour[timestampDateWithHour]
|
||||||
totalCountForTimestamp := uptime.totalCountPerHour[timestampDateWithHour]
|
totalCountForTimestamp := uptime.TotalCountPerHour[timestampDateWithHour]
|
||||||
uptimeBrackets["7d_success"] += successCountForTimestamp
|
uptimeBrackets["7d_success"] += successCountForTimestamp
|
||||||
uptimeBrackets["7d_total"] += totalCountForTimestamp
|
uptimeBrackets["7d_total"] += totalCountForTimestamp
|
||||||
if now.Sub(timestamp) <= 24*time.Hour {
|
if now.Sub(timestamp) <= 24*time.Hour {
|
||||||
|
@ -51,8 +51,8 @@ func TestServiceStatus_AddResultUptimeIsCleaningUpAfterItself(t *testing.T) {
|
|||||||
timestamp := now.Add(-12 * 24 * time.Hour)
|
timestamp := now.Add(-12 * 24 * time.Hour)
|
||||||
for timestamp.Unix() <= now.Unix() {
|
for timestamp.Unix() <= now.Unix() {
|
||||||
serviceStatus.AddResult(&Result{Timestamp: timestamp, Success: true})
|
serviceStatus.AddResult(&Result{Timestamp: timestamp, Success: true})
|
||||||
if len(serviceStatus.Uptime.successCountPerHour) > numberOfHoursInTenDays {
|
if len(serviceStatus.Uptime.SuccessCountPerHour) > numberOfHoursInTenDays {
|
||||||
t.Errorf("At no point in time should there be more than %d entries in serviceStatus.successCountPerHour", numberOfHoursInTenDays)
|
t.Errorf("At no point in time should there be more than %d entries in serviceStatus.SuccessCountPerHour", numberOfHoursInTenDays)
|
||||||
}
|
}
|
||||||
//fmt.Printf("timestamp=%s; uptimeDuringLastHour=%f; timeAgo=%s\n", timestamp.Format(time.RFC3339), serviceStatus.UptimeDuringLastHour, time.Since(timestamp))
|
//fmt.Printf("timestamp=%s; uptimeDuringLastHour=%f; timeAgo=%s\n", timestamp.Format(time.RFC3339), serviceStatus.UptimeDuringLastHour, time.Since(timestamp))
|
||||||
if now.Sub(timestamp) > time.Hour && serviceStatus.Uptime.LastHour != 0 {
|
if now.Sub(timestamp) > time.Hour && serviceStatus.Uptime.LastHour != 0 {
|
||||||
|
2
go.mod
2
go.mod
@ -4,7 +4,7 @@ go 1.15
|
|||||||
|
|
||||||
require (
|
require (
|
||||||
cloud.google.com/go v0.74.0 // indirect
|
cloud.google.com/go v0.74.0 // indirect
|
||||||
github.com/TwinProduction/gocache v1.1.0
|
github.com/TwinProduction/gocache v1.2.0
|
||||||
github.com/go-ping/ping v0.0.0-20201115131931-3300c582a663
|
github.com/go-ping/ping v0.0.0-20201115131931-3300c582a663
|
||||||
github.com/google/gofuzz v1.2.0 // indirect
|
github.com/google/gofuzz v1.2.0 // indirect
|
||||||
github.com/gorilla/mux v1.8.0
|
github.com/gorilla/mux v1.8.0
|
||||||
|
4
go.sum
4
go.sum
@ -50,8 +50,8 @@ github.com/PuerkitoBio/purell v1.0.0/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbt
|
|||||||
github.com/PuerkitoBio/urlesc v0.0.0-20160726150825-5bd2802263f2/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE=
|
github.com/PuerkitoBio/urlesc v0.0.0-20160726150825-5bd2802263f2/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE=
|
||||||
github.com/Shopify/sarama v1.19.0/go.mod h1:FVkBWblsNy7DGZRfXLU0O9RCGt5g3g3yEuWXgklEdEo=
|
github.com/Shopify/sarama v1.19.0/go.mod h1:FVkBWblsNy7DGZRfXLU0O9RCGt5g3g3yEuWXgklEdEo=
|
||||||
github.com/Shopify/toxiproxy v2.1.4+incompatible/go.mod h1:OXgGpZ6Cli1/URJOF1DMxUHB2q5Ap20/P/eIdh4G0pI=
|
github.com/Shopify/toxiproxy v2.1.4+incompatible/go.mod h1:OXgGpZ6Cli1/URJOF1DMxUHB2q5Ap20/P/eIdh4G0pI=
|
||||||
github.com/TwinProduction/gocache v1.1.0 h1:mibBUyccd8kGHlm5dXhTMDOvWBK4mjNqGyOOkG8mib8=
|
github.com/TwinProduction/gocache v1.2.0 h1:iZBUuri5VydxYhNkWEOZm/JzX/X2b3OZzfLrPaRWKjk=
|
||||||
github.com/TwinProduction/gocache v1.1.0/go.mod h1:+qH57V/K4oAcX9C7CvgJTwUX4lzfIUXQC/6XaRSOS1Y=
|
github.com/TwinProduction/gocache v1.2.0/go.mod h1:+qH57V/K4oAcX9C7CvgJTwUX4lzfIUXQC/6XaRSOS1Y=
|
||||||
github.com/VividCortex/gohistogram v1.0.0/go.mod h1:Pf5mBqqDxYaXu3hDrrU+w6nw50o/4+TcAqDqk/vUH7g=
|
github.com/VividCortex/gohistogram v1.0.0/go.mod h1:Pf5mBqqDxYaXu3hDrrU+w6nw50o/4+TcAqDqk/vUH7g=
|
||||||
github.com/afex/hystrix-go v0.0.0-20180502004556-fa1af6a1f4f5/go.mod h1:SkGFH1ia65gfNATL8TAiHDNxPzPdmEL5uirI2Uyuz6c=
|
github.com/afex/hystrix-go v0.0.0-20180502004556-fa1af6a1f4f5/go.mod h1:SkGFH1ia65gfNATL8TAiHDNxPzPdmEL5uirI2Uyuz6c=
|
||||||
github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
|
github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
|
||||||
|
7
storage/config.go
Normal file
7
storage/config.go
Normal file
@ -0,0 +1,7 @@
|
|||||||
|
package storage
|
||||||
|
|
||||||
|
// Config is the configuration for alerting providers
|
||||||
|
type Config struct {
|
||||||
|
// File is the path of the file to use when using file.Store
|
||||||
|
File string `yaml:"file"`
|
||||||
|
}
|
@ -1,69 +0,0 @@
|
|||||||
package storage
|
|
||||||
|
|
||||||
import (
|
|
||||||
"encoding/json"
|
|
||||||
"sync"
|
|
||||||
|
|
||||||
"github.com/TwinProduction/gatus/core"
|
|
||||||
"github.com/TwinProduction/gatus/util"
|
|
||||||
)
|
|
||||||
|
|
||||||
// InMemoryStore implements an in-memory store
|
|
||||||
type InMemoryStore struct {
|
|
||||||
serviceStatuses map[string]*core.ServiceStatus
|
|
||||||
serviceResultsMutex sync.RWMutex
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewInMemoryStore returns an in-memory store. Note that the store acts as a singleton, so although new-ing
|
|
||||||
// up in-memory stores will give you a unique reference to a struct each time, all structs returned
|
|
||||||
// by this function will act on the same in-memory store.
|
|
||||||
func NewInMemoryStore() *InMemoryStore {
|
|
||||||
return &InMemoryStore{
|
|
||||||
serviceStatuses: make(map[string]*core.ServiceStatus),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetAllAsJSON returns the JSON encoding of all monitored core.ServiceStatus
|
|
||||||
func (ims *InMemoryStore) GetAllAsJSON() ([]byte, error) {
|
|
||||||
ims.serviceResultsMutex.RLock()
|
|
||||||
serviceStatuses, err := json.Marshal(ims.serviceStatuses)
|
|
||||||
ims.serviceResultsMutex.RUnlock()
|
|
||||||
return serviceStatuses, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetServiceStatus returns the service status for a given service name in the given group
|
|
||||||
func (ims *InMemoryStore) GetServiceStatus(groupName, serviceName string) *core.ServiceStatus {
|
|
||||||
key := util.ConvertGroupAndServiceToKey(groupName, serviceName)
|
|
||||||
ims.serviceResultsMutex.RLock()
|
|
||||||
serviceStatus := ims.serviceStatuses[key]
|
|
||||||
ims.serviceResultsMutex.RUnlock()
|
|
||||||
return serviceStatus
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetServiceStatusByKey returns the service status for a given key
|
|
||||||
func (ims *InMemoryStore) GetServiceStatusByKey(key string) *core.ServiceStatus {
|
|
||||||
ims.serviceResultsMutex.RLock()
|
|
||||||
serviceStatus := ims.serviceStatuses[key]
|
|
||||||
ims.serviceResultsMutex.RUnlock()
|
|
||||||
return serviceStatus
|
|
||||||
}
|
|
||||||
|
|
||||||
// Insert inserts the observed result for the specified service into the in memory store
|
|
||||||
func (ims *InMemoryStore) Insert(service *core.Service, result *core.Result) {
|
|
||||||
key := util.ConvertGroupAndServiceToKey(service.Group, service.Name)
|
|
||||||
ims.serviceResultsMutex.Lock()
|
|
||||||
serviceStatus, exists := ims.serviceStatuses[key]
|
|
||||||
if !exists {
|
|
||||||
serviceStatus = core.NewServiceStatus(service)
|
|
||||||
ims.serviceStatuses[key] = serviceStatus
|
|
||||||
}
|
|
||||||
serviceStatus.AddResult(result)
|
|
||||||
ims.serviceResultsMutex.Unlock()
|
|
||||||
}
|
|
||||||
|
|
||||||
// Clear will empty all the results from the in memory store
|
|
||||||
func (ims *InMemoryStore) Clear() {
|
|
||||||
ims.serviceResultsMutex.Lock()
|
|
||||||
ims.serviceStatuses = make(map[string]*core.ServiceStatus)
|
|
||||||
ims.serviceResultsMutex.Unlock()
|
|
||||||
}
|
|
48
storage/storage.go
Normal file
48
storage/storage.go
Normal file
@ -0,0 +1,48 @@
|
|||||||
|
package storage
|
||||||
|
|
||||||
|
import (
|
||||||
|
"log"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/TwinProduction/gatus/storage/store"
|
||||||
|
"github.com/TwinProduction/gatus/storage/store/memory"
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
provider store.Store
|
||||||
|
|
||||||
|
// initialized keeps track of whether the storage provider was initialized
|
||||||
|
// Because store.Store is an interface, a nil check wouldn't be sufficient, so instead of doing reflection
|
||||||
|
// every single time Get is called, we'll just lazily keep track of its existence through this variable
|
||||||
|
initialized bool
|
||||||
|
)
|
||||||
|
|
||||||
|
// Get retrieves the storage provider
|
||||||
|
func Get() store.Store {
|
||||||
|
if !initialized {
|
||||||
|
log.Println("[storage][Get] Provider requested before it was initialized, automatically initializing")
|
||||||
|
err := Initialize(nil)
|
||||||
|
if err != nil {
|
||||||
|
panic("failed to automatically initialize store: " + err.Error())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return provider
|
||||||
|
}
|
||||||
|
|
||||||
|
// Initialize instantiates the storage provider based on the Config provider
|
||||||
|
func Initialize(cfg *Config) error {
|
||||||
|
initialized = true
|
||||||
|
var err error
|
||||||
|
if cfg == nil || len(cfg.File) == 0 {
|
||||||
|
log.Println("[storage][Initialize] Creating storage provider")
|
||||||
|
provider, err = memory.NewStore("")
|
||||||
|
} else {
|
||||||
|
log.Printf("[storage][Initialize] Creating storage provider with file=%s", cfg.File)
|
||||||
|
provider, err = memory.NewStore(cfg.File)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
go provider.(*memory.Store).AutoSave(7 * time.Minute)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
110
storage/store/memory/memory.go
Normal file
110
storage/store/memory/memory.go
Normal file
@ -0,0 +1,110 @@
|
|||||||
|
package memory
|
||||||
|
|
||||||
|
import (
|
||||||
|
"encoding/gob"
|
||||||
|
"encoding/json"
|
||||||
|
"log"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/TwinProduction/gatus/core"
|
||||||
|
"github.com/TwinProduction/gatus/util"
|
||||||
|
"github.com/TwinProduction/gocache"
|
||||||
|
)
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
gob.Register(&core.ServiceStatus{})
|
||||||
|
gob.Register(&core.Uptime{})
|
||||||
|
gob.Register(&core.Result{})
|
||||||
|
gob.Register(&core.Event{})
|
||||||
|
}
|
||||||
|
|
||||||
|
// Store that leverages gocache
|
||||||
|
type Store struct {
|
||||||
|
file string
|
||||||
|
cache *gocache.Cache
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewStore creates a new store
|
||||||
|
func NewStore(file string) (*Store, error) {
|
||||||
|
store := &Store{
|
||||||
|
file: file,
|
||||||
|
cache: gocache.NewCache().WithMaxSize(gocache.NoMaxSize),
|
||||||
|
}
|
||||||
|
if len(file) > 0 {
|
||||||
|
_, err := store.cache.ReadFromFile(file)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return store, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetAllAsJSON returns the JSON encoding of all monitored core.ServiceStatus
|
||||||
|
func (s *Store) GetAllAsJSON() ([]byte, error) {
|
||||||
|
return json.Marshal(s.cache.GetAll())
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetServiceStatus returns the service status for a given service name in the given group
|
||||||
|
func (s *Store) GetServiceStatus(groupName, serviceName string) *core.ServiceStatus {
|
||||||
|
return s.GetServiceStatusByKey(util.ConvertGroupAndServiceToKey(groupName, serviceName))
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetServiceStatusByKey returns the service status for a given key
|
||||||
|
func (s *Store) GetServiceStatusByKey(key string) *core.ServiceStatus {
|
||||||
|
serviceStatus := s.cache.GetValue(key)
|
||||||
|
if serviceStatus == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
return serviceStatus.(*core.ServiceStatus)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Insert adds the observed result for the specified service into the store
|
||||||
|
func (s *Store) Insert(service *core.Service, result *core.Result) {
|
||||||
|
key := util.ConvertGroupAndServiceToKey(service.Group, service.Name)
|
||||||
|
serviceStatus, exists := s.cache.Get(key)
|
||||||
|
if !exists {
|
||||||
|
serviceStatus = core.NewServiceStatus(service)
|
||||||
|
}
|
||||||
|
serviceStatus.(*core.ServiceStatus).AddResult(result)
|
||||||
|
s.cache.Set(key, serviceStatus)
|
||||||
|
}
|
||||||
|
|
||||||
|
// DeleteAllServiceStatusesNotInKeys removes all ServiceStatus that are not within the keys provided
|
||||||
|
func (s *Store) DeleteAllServiceStatusesNotInKeys(keys []string) int {
|
||||||
|
var keysToDelete []string
|
||||||
|
for _, existingKey := range s.cache.GetKeysByPattern("*", 0) {
|
||||||
|
shouldDelete := true
|
||||||
|
for _, key := range keys {
|
||||||
|
if existingKey == key {
|
||||||
|
shouldDelete = false
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if shouldDelete {
|
||||||
|
keysToDelete = append(keysToDelete, existingKey)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return s.cache.DeleteAll(keysToDelete)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Clear deletes everything from the store
|
||||||
|
func (s *Store) Clear() {
|
||||||
|
s.cache.Clear()
|
||||||
|
}
|
||||||
|
|
||||||
|
// Save persists the cache to the store file
|
||||||
|
func (s *Store) Save() error {
|
||||||
|
return s.cache.SaveToFile(s.file)
|
||||||
|
}
|
||||||
|
|
||||||
|
// AutoSave automatically calls the Save function at every interval
|
||||||
|
func (s *Store) AutoSave(interval time.Duration) {
|
||||||
|
for {
|
||||||
|
time.Sleep(interval)
|
||||||
|
log.Printf("[memory][AutoSave] Persisting data to file")
|
||||||
|
err := s.Save()
|
||||||
|
if err != nil {
|
||||||
|
log.Printf("[memory][AutoSave] failed to save to file=%s: %s", s.file, err.Error())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
@ -1,4 +1,4 @@
|
|||||||
package storage
|
package memory
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
@ -83,17 +83,17 @@ var (
|
|||||||
}
|
}
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestInMemoryStore_Insert(t *testing.T) {
|
func TestStore_Insert(t *testing.T) {
|
||||||
store := NewInMemoryStore()
|
store, _ := NewStore("")
|
||||||
store.Insert(&testService, &testSuccessfulResult)
|
store.Insert(&testService, &testSuccessfulResult)
|
||||||
store.Insert(&testService, &testUnsuccessfulResult)
|
store.Insert(&testService, &testUnsuccessfulResult)
|
||||||
|
|
||||||
if len(store.serviceStatuses) != 1 {
|
if store.cache.Count() != 1 {
|
||||||
t.Fatalf("expected 1 ServiceStatus, got %d", len(store.serviceStatuses))
|
t.Fatalf("expected 1 ServiceStatus, got %d", store.cache.Count())
|
||||||
}
|
}
|
||||||
key := fmt.Sprintf("%s_%s", testService.Group, testService.Name)
|
key := fmt.Sprintf("%s_%s", testService.Group, testService.Name)
|
||||||
serviceStatus, exists := store.serviceStatuses[key]
|
serviceStatus := store.GetServiceStatusByKey(key)
|
||||||
if !exists {
|
if serviceStatus == nil {
|
||||||
t.Fatalf("Store should've had key '%s', but didn't", key)
|
t.Fatalf("Store should've had key '%s', but didn't", key)
|
||||||
}
|
}
|
||||||
if len(serviceStatus.Results) != 2 {
|
if len(serviceStatus.Results) != 2 {
|
||||||
@ -140,8 +140,8 @@ func TestInMemoryStore_Insert(t *testing.T) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestInMemoryStore_GetServiceStatus(t *testing.T) {
|
func TestStore_GetServiceStatus(t *testing.T) {
|
||||||
store := NewInMemoryStore()
|
store, _ := NewStore("")
|
||||||
store.Insert(&testService, &testSuccessfulResult)
|
store.Insert(&testService, &testSuccessfulResult)
|
||||||
store.Insert(&testService, &testUnsuccessfulResult)
|
store.Insert(&testService, &testUnsuccessfulResult)
|
||||||
|
|
||||||
@ -163,8 +163,8 @@ func TestInMemoryStore_GetServiceStatus(t *testing.T) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestInMemoryStore_GetServiceStatusForMissingStatusReturnsNil(t *testing.T) {
|
func TestStore_GetServiceStatusForMissingStatusReturnsNil(t *testing.T) {
|
||||||
store := NewInMemoryStore()
|
store, _ := NewStore("")
|
||||||
store.Insert(&testService, &testSuccessfulResult)
|
store.Insert(&testService, &testSuccessfulResult)
|
||||||
|
|
||||||
serviceStatus := store.GetServiceStatus("nonexistantgroup", "nonexistantname")
|
serviceStatus := store.GetServiceStatus("nonexistantgroup", "nonexistantname")
|
||||||
@ -181,8 +181,8 @@ func TestInMemoryStore_GetServiceStatusForMissingStatusReturnsNil(t *testing.T)
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestInMemoryStore_GetServiceStatusByKey(t *testing.T) {
|
func TestStore_GetServiceStatusByKey(t *testing.T) {
|
||||||
store := NewInMemoryStore()
|
store, _ := NewStore("")
|
||||||
store.Insert(&testService, &testSuccessfulResult)
|
store.Insert(&testService, &testSuccessfulResult)
|
||||||
store.Insert(&testService, &testUnsuccessfulResult)
|
store.Insert(&testService, &testUnsuccessfulResult)
|
||||||
|
|
||||||
@ -204,8 +204,8 @@ func TestInMemoryStore_GetServiceStatusByKey(t *testing.T) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestInMemoryStore_GetAllAsJSON(t *testing.T) {
|
func TestStore_GetAllAsJSON(t *testing.T) {
|
||||||
store := NewInMemoryStore()
|
store, _ := NewStore("")
|
||||||
firstResult := &testSuccessfulResult
|
firstResult := &testSuccessfulResult
|
||||||
secondResult := &testUnsuccessfulResult
|
secondResult := &testUnsuccessfulResult
|
||||||
store.Insert(&testService, firstResult)
|
store.Insert(&testService, firstResult)
|
||||||
@ -217,8 +217,36 @@ func TestInMemoryStore_GetAllAsJSON(t *testing.T) {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal("shouldn't have returned an error, got", err.Error())
|
t.Fatal("shouldn't have returned an error, got", err.Error())
|
||||||
}
|
}
|
||||||
expectedOutput := `{"group_name":{"name":"name","group":"group","key":"group_name","results":[{"status":200,"hostname":"example.org","duration":150000000,"errors":null,"conditionResults":[{"condition":"[STATUS] == 200","success":true},{"condition":"[RESPONSE_TIME] \u003c 500","success":true},{"condition":"[CERTIFICATE_EXPIRATION] \u003c 72h","success":true}],"success":true,"timestamp":"0001-01-01T00:00:00Z"},{"status":200,"hostname":"example.org","duration":750000000,"errors":["error-1","error-2"],"conditionResults":[{"condition":"[STATUS] == 200","success":true},{"condition":"[RESPONSE_TIME] \u003c 500","success":false},{"condition":"[CERTIFICATE_EXPIRATION] \u003c 72h","success":false}],"success":false,"timestamp":"0001-01-01T00:00:00Z"}],"uptime":{"7d":0.5,"24h":0.5,"1h":0.5}}}`
|
expectedOutput := `{"group_name":{"name":"name","group":"group","key":"group_name","results":[{"status":200,"hostname":"example.org","duration":150000000,"errors":null,"conditionResults":[{"condition":"[STATUS] == 200","success":true},{"condition":"[RESPONSE_TIME] \u003c 500","success":true},{"condition":"[CERTIFICATE_EXPIRATION] \u003c 72h","success":true}],"success":true,"timestamp":"0001-01-01T00:00:00Z"},{"status":200,"hostname":"example.org","duration":750000000,"errors":["error-1","error-2"],"conditionResults":[{"condition":"[STATUS] == 200","success":true},{"condition":"[RESPONSE_TIME] \u003c 500","success":false},{"condition":"[CERTIFICATE_EXPIRATION] \u003c 72h","success":false}],"success":false,"timestamp":"0001-01-01T00:00:00Z"}]}}`
|
||||||
if string(output) != expectedOutput {
|
if string(output) != expectedOutput {
|
||||||
t.Errorf("expected:\n %s\n\ngot:\n %s", expectedOutput, string(output))
|
t.Errorf("expected:\n %s\n\ngot:\n %s", expectedOutput, string(output))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestStore_DeleteAllServiceStatusesNotInKeys(t *testing.T) {
|
||||||
|
store, _ := NewStore("")
|
||||||
|
firstService := core.Service{Name: "service-1", Group: "group"}
|
||||||
|
secondService := core.Service{Name: "service-2", Group: "group"}
|
||||||
|
result := &testSuccessfulResult
|
||||||
|
store.Insert(&firstService, result)
|
||||||
|
store.Insert(&secondService, result)
|
||||||
|
if store.cache.Count() != 2 {
|
||||||
|
t.Errorf("expected cache to have 2 keys, got %d", store.cache.Count())
|
||||||
|
}
|
||||||
|
if store.GetServiceStatusByKey(util.ConvertGroupAndServiceToKey(firstService.Group, firstService.Name)) == nil {
|
||||||
|
t.Fatal("firstService should exist")
|
||||||
|
}
|
||||||
|
if store.GetServiceStatusByKey(util.ConvertGroupAndServiceToKey(secondService.Group, secondService.Name)) == nil {
|
||||||
|
t.Fatal("secondService should exist")
|
||||||
|
}
|
||||||
|
store.DeleteAllServiceStatusesNotInKeys([]string{util.ConvertGroupAndServiceToKey(firstService.Group, firstService.Name)})
|
||||||
|
if store.cache.Count() != 1 {
|
||||||
|
t.Fatalf("expected cache to have 1 keys, got %d", store.cache.Count())
|
||||||
|
}
|
||||||
|
if store.GetServiceStatusByKey(util.ConvertGroupAndServiceToKey(firstService.Group, firstService.Name)) == nil {
|
||||||
|
t.Error("secondService should've been deleted")
|
||||||
|
}
|
||||||
|
if store.GetServiceStatusByKey(util.ConvertGroupAndServiceToKey(secondService.Group, secondService.Name)) != nil {
|
||||||
|
t.Error("firstService should still exist")
|
||||||
|
}
|
||||||
|
}
|
34
storage/store/store.go
Normal file
34
storage/store/store.go
Normal file
@ -0,0 +1,34 @@
|
|||||||
|
package store
|
||||||
|
|
||||||
|
import (
|
||||||
|
"github.com/TwinProduction/gatus/core"
|
||||||
|
"github.com/TwinProduction/gatus/storage/store/memory"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Store is the interface that each stores should implement
|
||||||
|
type Store interface {
|
||||||
|
// GetAllAsJSON returns the JSON encoding of all monitored core.ServiceStatus
|
||||||
|
GetAllAsJSON() ([]byte, error)
|
||||||
|
|
||||||
|
// GetServiceStatus returns the service status for a given service name in the given group
|
||||||
|
GetServiceStatus(groupName, serviceName string) *core.ServiceStatus
|
||||||
|
|
||||||
|
// GetServiceStatusByKey returns the service status for a given key
|
||||||
|
GetServiceStatusByKey(key string) *core.ServiceStatus
|
||||||
|
|
||||||
|
// Insert adds the observed result for the specified service into the store
|
||||||
|
Insert(service *core.Service, result *core.Result)
|
||||||
|
|
||||||
|
// DeleteAllServiceStatusesNotInKeys removes all ServiceStatus that are not within the keys provided
|
||||||
|
//
|
||||||
|
// Used to delete services that have been persisted but are no longer part of the configured services
|
||||||
|
DeleteAllServiceStatusesNotInKeys(keys []string) int
|
||||||
|
|
||||||
|
// Clear deletes everything from the store
|
||||||
|
Clear()
|
||||||
|
}
|
||||||
|
|
||||||
|
var (
|
||||||
|
// Validate interface implementation on compile
|
||||||
|
_ Store = (*memory.Store)(nil)
|
||||||
|
)
|
136
storage/store/store_bench_test.go
Normal file
136
storage/store/store_bench_test.go
Normal file
@ -0,0 +1,136 @@
|
|||||||
|
package store
|
||||||
|
|
||||||
|
import (
|
||||||
|
"testing"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/TwinProduction/gatus/core"
|
||||||
|
"github.com/TwinProduction/gatus/storage/store/memory"
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
firstCondition = core.Condition("[STATUS] == 200")
|
||||||
|
secondCondition = core.Condition("[RESPONSE_TIME] < 500")
|
||||||
|
thirdCondition = core.Condition("[CERTIFICATE_EXPIRATION] < 72h")
|
||||||
|
|
||||||
|
timestamp = time.Now()
|
||||||
|
|
||||||
|
testService = core.Service{
|
||||||
|
Name: "name",
|
||||||
|
Group: "group",
|
||||||
|
URL: "https://example.org/what/ever",
|
||||||
|
Method: "GET",
|
||||||
|
Body: "body",
|
||||||
|
Interval: 30 * time.Second,
|
||||||
|
Conditions: []*core.Condition{&firstCondition, &secondCondition, &thirdCondition},
|
||||||
|
Alerts: nil,
|
||||||
|
Insecure: false,
|
||||||
|
NumberOfFailuresInARow: 0,
|
||||||
|
NumberOfSuccessesInARow: 0,
|
||||||
|
}
|
||||||
|
testSuccessfulResult = core.Result{
|
||||||
|
Hostname: "example.org",
|
||||||
|
IP: "127.0.0.1",
|
||||||
|
HTTPStatus: 200,
|
||||||
|
Body: []byte("body"),
|
||||||
|
Errors: nil,
|
||||||
|
Connected: true,
|
||||||
|
Success: true,
|
||||||
|
Timestamp: timestamp,
|
||||||
|
Duration: 150 * time.Millisecond,
|
||||||
|
CertificateExpiration: 10 * time.Hour,
|
||||||
|
ConditionResults: []*core.ConditionResult{
|
||||||
|
{
|
||||||
|
Condition: "[STATUS] == 200",
|
||||||
|
Success: true,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Condition: "[RESPONSE_TIME] < 500",
|
||||||
|
Success: true,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Condition: "[CERTIFICATE_EXPIRATION] < 72h",
|
||||||
|
Success: true,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
testUnsuccessfulResult = core.Result{
|
||||||
|
Hostname: "example.org",
|
||||||
|
IP: "127.0.0.1",
|
||||||
|
HTTPStatus: 200,
|
||||||
|
Body: []byte("body"),
|
||||||
|
Errors: []string{"error-1", "error-2"},
|
||||||
|
Connected: true,
|
||||||
|
Success: false,
|
||||||
|
Timestamp: timestamp,
|
||||||
|
Duration: 750 * time.Millisecond,
|
||||||
|
CertificateExpiration: 10 * time.Hour,
|
||||||
|
ConditionResults: []*core.ConditionResult{
|
||||||
|
{
|
||||||
|
Condition: "[STATUS] == 200",
|
||||||
|
Success: true,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Condition: "[RESPONSE_TIME] < 500",
|
||||||
|
Success: false,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Condition: "[CERTIFICATE_EXPIRATION] < 72h",
|
||||||
|
Success: false,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
)
|
||||||
|
|
||||||
|
func BenchmarkStore_GetAllAsJSON(b *testing.B) {
|
||||||
|
memoryStore, err := memory.NewStore("")
|
||||||
|
if err != nil {
|
||||||
|
b.Fatal("failed to create store:", err.Error())
|
||||||
|
}
|
||||||
|
type Scenario struct {
|
||||||
|
Name string
|
||||||
|
Store Store
|
||||||
|
}
|
||||||
|
scenarios := []Scenario{
|
||||||
|
{
|
||||||
|
Name: "memory",
|
||||||
|
Store: memoryStore,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
for _, scenario := range scenarios {
|
||||||
|
scenario.Store.Insert(&testService, &testSuccessfulResult)
|
||||||
|
scenario.Store.Insert(&testService, &testUnsuccessfulResult)
|
||||||
|
b.Run(scenario.Name, func(b *testing.B) {
|
||||||
|
for n := 0; n < b.N; n++ {
|
||||||
|
scenario.Store.GetAllAsJSON()
|
||||||
|
}
|
||||||
|
b.ReportAllocs()
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func BenchmarkStore_Insert(b *testing.B) {
|
||||||
|
memoryStore, err := memory.NewStore("")
|
||||||
|
if err != nil {
|
||||||
|
b.Fatal("failed to create store:", err.Error())
|
||||||
|
}
|
||||||
|
type Scenario struct {
|
||||||
|
Name string
|
||||||
|
Store Store
|
||||||
|
}
|
||||||
|
scenarios := []Scenario{
|
||||||
|
{
|
||||||
|
Name: "memory",
|
||||||
|
Store: memoryStore,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
for _, scenario := range scenarios {
|
||||||
|
b.Run(scenario.Name, func(b *testing.B) {
|
||||||
|
for n := 0; n < b.N; n++ {
|
||||||
|
scenario.Store.Insert(&testService, &testSuccessfulResult)
|
||||||
|
scenario.Store.Insert(&testService, &testUnsuccessfulResult)
|
||||||
|
}
|
||||||
|
b.ReportAllocs()
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
106
vendor/github.com/TwinProduction/gocache/README.md
generated
vendored
106
vendor/github.com/TwinProduction/gocache/README.md
generated
vendored
@ -306,9 +306,9 @@ If you do not start the janitor, there will be no passive deletion of expired ke
|
|||||||
For the sake of convenience, a ready-to-go cache server is available
|
For the sake of convenience, a ready-to-go cache server is available
|
||||||
through the `gocacheserver` package.
|
through the `gocacheserver` package.
|
||||||
|
|
||||||
The reason why the server is in a different package is because `gocache` does not use
|
The reason why the server is in a different package is because `gocache` limit its external dependencies to the strict
|
||||||
any external dependencies, but rather than re-inventing the wheel, the server
|
minimum (e.g. boltdb for persistence), however, rather than re-inventing the wheel, the server implementation uses
|
||||||
implementation uses redcon, which is a Redis server framework for Go.
|
redcon, which is a very good Redis server framework for Go.
|
||||||
|
|
||||||
That way, those who desire to use gocache without the server will not add any extra dependencies
|
That way, those who desire to use gocache without the server will not add any extra dependencies
|
||||||
as long as they don't import the `gocacheserver` package.
|
as long as they don't import the `gocacheserver` package.
|
||||||
@ -323,7 +323,7 @@ import (
|
|||||||
|
|
||||||
func main() {
|
func main() {
|
||||||
cache := gocache.NewCache().WithEvictionPolicy(gocache.LeastRecentlyUsed).WithMaxSize(100000)
|
cache := gocache.NewCache().WithEvictionPolicy(gocache.LeastRecentlyUsed).WithMaxSize(100000)
|
||||||
server := gocacheserver.NewServer(cache)
|
server := gocacheserver.NewServer(cache).WithPort(6379)
|
||||||
server.Start()
|
server.Start()
|
||||||
}
|
}
|
||||||
```
|
```
|
||||||
@ -382,43 +382,67 @@ but if you're looking into using a library like gocache, odds are, you want more
|
|||||||
| mem | 32G DDR4 |
|
| mem | 32G DDR4 |
|
||||||
|
|
||||||
```
|
```
|
||||||
BenchmarkMap_Get-8 95936680 26.3 ns/op
|
// Normal map
|
||||||
BenchmarkMap_SetSmallValue-8 7738132 424 ns/op
|
BenchmarkMap_Get
|
||||||
BenchmarkMap_SetMediumValue-8 7766346 424 ns/op
|
BenchmarkMap_Get-8 46087372 26.7 ns/op
|
||||||
BenchmarkMap_SetLargeValue-8 7947063 435 ns/op
|
BenchmarkMap_Set
|
||||||
BenchmarkCache_Get-8 54549049 45.7 ns/op
|
BenchmarkMap_Set/small_value-8 3841911 389 ns/op
|
||||||
BenchmarkCache_SetSmallValue-8 35225013 69.2 ns/op
|
BenchmarkMap_Set/medium_value-8 3887074 391 ns/op
|
||||||
BenchmarkCache_SetMediumValue-8 5952064 412 ns/op
|
BenchmarkMap_Set/large_value-8 3921956 393 ns/op
|
||||||
BenchmarkCache_SetLargeValue-8 5969121 411 ns/op
|
// Gocache
|
||||||
BenchmarkCache_GetUsingLRU-8 54545949 45.6 ns/op
|
BenchmarkCache_Get
|
||||||
BenchmarkCache_SetSmallValueUsingLRU-8 5909504 419 ns/op
|
BenchmarkCache_Get/FirstInFirstOut-8 27273036 46.4 ns/op
|
||||||
BenchmarkCache_SetMediumValueUsingLRU-8 5910885 418 ns/op
|
BenchmarkCache_Get/LeastRecentlyUsed-8 26648248 46.3 ns/op
|
||||||
BenchmarkCache_SetLargeValueUsingLRU-8 5867544 419 ns/op
|
BenchmarkCache_Set
|
||||||
BenchmarkCache_SetSmallValueWhenUsingMaxMemoryUsage-8 5477178 462 ns/op
|
BenchmarkCache_Set/FirstInFirstOut_small_value-8 2919584 405 ns/op
|
||||||
BenchmarkCache_SetMediumValueWhenUsingMaxMemoryUsage-8 5417595 475 ns/op
|
BenchmarkCache_Set/FirstInFirstOut_medium_value-8 2990841 391 ns/op
|
||||||
BenchmarkCache_SetLargeValueWhenUsingMaxMemoryUsage-8 5215263 479 ns/op
|
BenchmarkCache_Set/FirstInFirstOut_large_value-8 2970513 391 ns/op
|
||||||
BenchmarkCache_SetSmallValueWithMaxSize10-8 10115574 236 ns/op
|
BenchmarkCache_Set/LeastRecentlyUsed_small_value-8 2962939 402 ns/op
|
||||||
BenchmarkCache_SetMediumValueWithMaxSize10-8 10242792 241 ns/op
|
BenchmarkCache_Set/LeastRecentlyUsed_medium_value-8 2962963 390 ns/op
|
||||||
BenchmarkCache_SetLargeValueWithMaxSize10-8 10201894 241 ns/op
|
BenchmarkCache_Set/LeastRecentlyUsed_large_value-8 2962928 394 ns/op
|
||||||
BenchmarkCache_SetSmallValueWithMaxSize1000-8 9637113 253 ns/op
|
BenchmarkCache_SetUsingMaxMemoryUsage
|
||||||
BenchmarkCache_SetMediumValueWithMaxSize1000-8 9635175 253 ns/op
|
BenchmarkCache_SetUsingMaxMemoryUsage/small_value-8 2683356 447 ns/op
|
||||||
BenchmarkCache_SetLargeValueWithMaxSize1000-8 9598982 260 ns/op
|
BenchmarkCache_SetUsingMaxMemoryUsage/medium_value-8 2637578 441 ns/op
|
||||||
BenchmarkCache_SetSmallValueWithMaxSize100000-8 7642584 337 ns/op
|
BenchmarkCache_SetUsingMaxMemoryUsage/large_value-8 2672434 443 ns/op
|
||||||
BenchmarkCache_SetMediumValueWithMaxSize100000-8 7407571 344 ns/op
|
BenchmarkCache_SetWithMaxSize
|
||||||
BenchmarkCache_SetLargeValueWithMaxSize100000-8 7071360 345 ns/op
|
BenchmarkCache_SetWithMaxSize/100_small_value-8 4782966 252 ns/op
|
||||||
BenchmarkCache_SetSmallValueWithMaxSize100000AndLRU-8 7544194 332 ns/op
|
BenchmarkCache_SetWithMaxSize/10000_small_value-8 4067967 296 ns/op
|
||||||
BenchmarkCache_SetMediumValueWithMaxSize100000AndLRU-8 7667004 344 ns/op
|
BenchmarkCache_SetWithMaxSize/100000_small_value-8 3762055 328 ns/op
|
||||||
BenchmarkCache_SetLargeValueWithMaxSize100000AndLRU-8 7357642 338 ns/op
|
BenchmarkCache_SetWithMaxSize/100_medium_value-8 4760479 252 ns/op
|
||||||
BenchmarkCache_GetAndSetMultipleConcurrently-8 1442306 1684 ns/op
|
BenchmarkCache_SetWithMaxSize/10000_medium_value-8 4081050 295 ns/op
|
||||||
BenchmarkCache_GetAndSetConcurrentlyWithRandomKeysAndLRU-8 5117271 477 ns/op
|
BenchmarkCache_SetWithMaxSize/100000_medium_value-8 3785050 330 ns/op
|
||||||
BenchmarkCache_GetAndSetConcurrentlyWithRandomKeysAndFIFO-8 5228412 475 ns/op
|
BenchmarkCache_SetWithMaxSize/100_large_value-8 4732909 254 ns/op
|
||||||
BenchmarkCache_GetAndSetConcurrentlyWithRandomKeysAndNoEvictionAndLRU-8 5139195 529 ns/op
|
BenchmarkCache_SetWithMaxSize/10000_large_value-8 4079533 297 ns/op
|
||||||
BenchmarkCache_GetAndSetConcurrentlyWithRandomKeysAndNoEvictionAndFIFO-8 5251639 511 ns/op
|
BenchmarkCache_SetWithMaxSize/100000_large_value-8 3712820 331 ns/op
|
||||||
BenchmarkCache_GetAndSetConcurrentlyWithFrequentEvictionsAndLRU-8 7384626 334 ns/op
|
BenchmarkCache_SetWithMaxSizeAndLRU
|
||||||
BenchmarkCache_GetAndSetConcurrentlyWithFrequentEvictionsAndFIFO-8 7361985 332 ns/op
|
BenchmarkCache_SetWithMaxSizeAndLRU/100_small_value-8 4761732 254 ns/op
|
||||||
BenchmarkCache_GetConcurrentlyWithLRU-8 3370784 726 ns/op
|
BenchmarkCache_SetWithMaxSizeAndLRU/10000_small_value-8 4084474 296 ns/op
|
||||||
BenchmarkCache_GetConcurrentlyWithFIFO-8 3749994 681 ns/op
|
BenchmarkCache_SetWithMaxSizeAndLRU/100000_small_value-8 3761402 329 ns/op
|
||||||
BenchmarkCache_GetKeysThatDoNotExistConcurrently-8 17647344 143 ns/op
|
BenchmarkCache_SetWithMaxSizeAndLRU/100_medium_value-8 4783075 254 ns/op
|
||||||
|
BenchmarkCache_SetWithMaxSizeAndLRU/10000_medium_value-8 4103980 296 ns/op
|
||||||
|
BenchmarkCache_SetWithMaxSizeAndLRU/100000_medium_value-8 3646023 331 ns/op
|
||||||
|
BenchmarkCache_SetWithMaxSizeAndLRU/100_large_value-8 4779025 254 ns/op
|
||||||
|
BenchmarkCache_SetWithMaxSizeAndLRU/10000_large_value-8 4096192 296 ns/op
|
||||||
|
BenchmarkCache_SetWithMaxSizeAndLRU/100000_large_value-8 3726823 331 ns/op
|
||||||
|
BenchmarkCache_GetSetMultipleConcurrent
|
||||||
|
BenchmarkCache_GetSetMultipleConcurrent-8 707142 1698 ns/op
|
||||||
|
BenchmarkCache_GetSetConcurrentWithFrequentEviction
|
||||||
|
BenchmarkCache_GetSetConcurrentWithFrequentEviction/FirstInFirstOut-8 3616256 334 ns/op
|
||||||
|
BenchmarkCache_GetSetConcurrentWithFrequentEviction/LeastRecentlyUsed-8 3636367 331 ns/op
|
||||||
|
BenchmarkCache_GetConcurrentWithLRU
|
||||||
|
BenchmarkCache_GetConcurrentWithLRU/FirstInFirstOut-8 4405557 268 ns/op
|
||||||
|
BenchmarkCache_GetConcurrentWithLRU/LeastRecentlyUsed-8 4445475 269 ns/op
|
||||||
|
BenchmarkCache_WithForceNilInterfaceOnNilPointer
|
||||||
|
BenchmarkCache_WithForceNilInterfaceOnNilPointer/true_with_nil_struct_pointer-8 6184591 191 ns/op
|
||||||
|
BenchmarkCache_WithForceNilInterfaceOnNilPointer/true-8 6090482 191 ns/op
|
||||||
|
BenchmarkCache_WithForceNilInterfaceOnNilPointer/false_with_nil_struct_pointer-8 6184629 187 ns/op
|
||||||
|
BenchmarkCache_WithForceNilInterfaceOnNilPointer/false-8 6281781 186 ns/op
|
||||||
|
(Trimmed "BenchmarkCache_" for readability)
|
||||||
|
WithForceNilInterfaceOnNilPointerWithConcurrency
|
||||||
|
WithForceNilInterfaceOnNilPointerWithConcurrency/true_with_nil_struct_pointer-8 4379564 268 ns/op
|
||||||
|
WithForceNilInterfaceOnNilPointerWithConcurrency/true-8 4379558 265 ns/op
|
||||||
|
WithForceNilInterfaceOnNilPointerWithConcurrency/false_with_nil_struct_pointer-8 4444456 261 ns/op
|
||||||
|
WithForceNilInterfaceOnNilPointerWithConcurrency/false-8 4493896 262 ns/op
|
||||||
```
|
```
|
||||||
|
|
||||||
|
|
||||||
|
12
vendor/github.com/TwinProduction/gocache/gocache.go
generated
vendored
12
vendor/github.com/TwinProduction/gocache/gocache.go
generated
vendored
@ -170,7 +170,7 @@ func (cache *Cache) WithEvictionPolicy(policy EvictionPolicy) *Cache {
|
|||||||
// value, _ := cache.Get("key")
|
// value, _ := cache.Get("key")
|
||||||
// // the following returns true, because the interface{} was forcefully set to nil
|
// // the following returns true, because the interface{} was forcefully set to nil
|
||||||
// if value == nil {}
|
// if value == nil {}
|
||||||
// // the following will panic, because the value has been casted to its type
|
// // the following will panic, because the value has been casted to its type (which is nil)
|
||||||
// if value.(*Struct) == nil {}
|
// if value.(*Struct) == nil {}
|
||||||
//
|
//
|
||||||
// If set to false:
|
// If set to false:
|
||||||
@ -218,7 +218,8 @@ func (cache *Cache) Set(key string, value interface{}) {
|
|||||||
// The TTL provided must be greater than 0, or NoExpiration (-1). If a negative value that isn't -1 (NoExpiration) is
|
// The TTL provided must be greater than 0, or NoExpiration (-1). If a negative value that isn't -1 (NoExpiration) is
|
||||||
// provided, the entry will not be created if the key doesn't exist
|
// provided, the entry will not be created if the key doesn't exist
|
||||||
func (cache *Cache) SetWithTTL(key string, value interface{}, ttl time.Duration) {
|
func (cache *Cache) SetWithTTL(key string, value interface{}, ttl time.Duration) {
|
||||||
// An interface is only nil if both its value and its type are nil, however, passing a pointer
|
// An interface is only nil if both its value and its type are nil, however, passing a nil pointer as an interface{}
|
||||||
|
// means that the interface itself is not nil, because the interface value is nil but not the type.
|
||||||
if cache.forceNilInterfaceOnNilPointer {
|
if cache.forceNilInterfaceOnNilPointer {
|
||||||
if value != nil && (reflect.ValueOf(value).Kind() == reflect.Ptr && reflect.ValueOf(value).IsNil()) {
|
if value != nil && (reflect.ValueOf(value).Kind() == reflect.Ptr && reflect.ValueOf(value).IsNil()) {
|
||||||
value = nil
|
value = nil
|
||||||
@ -334,6 +335,13 @@ func (cache *Cache) Get(key string) (interface{}, bool) {
|
|||||||
return entry.Value, true
|
return entry.Value, true
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// GetValue retrieves an entry using the key passed as parameter
|
||||||
|
// Unlike Get, this function only returns the value
|
||||||
|
func (cache *Cache) GetValue(key string) interface{} {
|
||||||
|
value, _ := cache.Get(key)
|
||||||
|
return value
|
||||||
|
}
|
||||||
|
|
||||||
// GetByKeys retrieves multiple entries using the keys passed as parameter
|
// GetByKeys retrieves multiple entries using the keys passed as parameter
|
||||||
// All keys are returned in the map, regardless of whether they exist or not, however, entries that do not exist in the
|
// All keys are returned in the map, regardless of whether they exist or not, however, entries that do not exist in the
|
||||||
// cache will return nil, meaning that there is no way of determining whether a key genuinely has the value nil, or
|
// cache will return nil, meaning that there is no way of determining whether a key genuinely has the value nil, or
|
||||||
|
26
vendor/github.com/TwinProduction/gocache/policy.go
generated
vendored
26
vendor/github.com/TwinProduction/gocache/policy.go
generated
vendored
@ -3,6 +3,30 @@ package gocache
|
|||||||
type EvictionPolicy string
|
type EvictionPolicy string
|
||||||
|
|
||||||
var (
|
var (
|
||||||
|
// LeastRecentlyUsed is an eviction policy that causes the most recently accessed cache entry to be moved to the
|
||||||
|
// head of the cache. Effectively, this causes the cache entries that have not been accessed for some time to
|
||||||
|
// gradually move closer and closer to the tail, and since the tail is the entry that gets deleted when an eviction
|
||||||
|
// is required, it allows less used cache entries to be evicted while keeping recently accessed entries at or close
|
||||||
|
// to the head.
|
||||||
|
//
|
||||||
|
// For instance, creating a Cache with a Cache.MaxSize of 3 and creating the entries 1, 2 and 3 in that order would
|
||||||
|
// put 3 at the head and 1 at the tail:
|
||||||
|
// 3 (head) -> 2 -> 1 (tail)
|
||||||
|
// If the cache entry 1 was then accessed, 1 would become the head and 2 the tail:
|
||||||
|
// 1 (head) -> 3 -> 2 (tail)
|
||||||
|
// If a cache entry 4 was then created, because the Cache.MaxSize is 3, the tail (2) would then be evicted:
|
||||||
|
// 4 (head) -> 1 -> 3 (tail)
|
||||||
LeastRecentlyUsed EvictionPolicy = "LeastRecentlyUsed"
|
LeastRecentlyUsed EvictionPolicy = "LeastRecentlyUsed"
|
||||||
FirstInFirstOut EvictionPolicy = "FirstInFirstOut"
|
|
||||||
|
// FirstInFirstOut is an eviction policy that causes cache entries to be evicted in the same order that they are
|
||||||
|
// created.
|
||||||
|
//
|
||||||
|
// For instance, creating a Cache with a Cache.MaxSize of 3 and creating the entries 1, 2 and 3 in that order would
|
||||||
|
// put 3 at the head and 1 at the tail:
|
||||||
|
// 3 (head) -> 2 -> 1 (tail)
|
||||||
|
// If the cache entry 1 was then accessed, unlike with LeastRecentlyUsed, nothing would change:
|
||||||
|
// 3 (head) -> 2 -> 1 (tail)
|
||||||
|
// If a cache entry 4 was then created, because the Cache.MaxSize is 3, the tail (1) would then be evicted:
|
||||||
|
// 4 (head) -> 3 -> 2 (tail)
|
||||||
|
FirstInFirstOut EvictionPolicy = "FirstInFirstOut"
|
||||||
)
|
)
|
||||||
|
2
vendor/modules.txt
vendored
2
vendor/modules.txt
vendored
@ -1,7 +1,7 @@
|
|||||||
# cloud.google.com/go v0.74.0
|
# cloud.google.com/go v0.74.0
|
||||||
## explicit
|
## explicit
|
||||||
cloud.google.com/go/compute/metadata
|
cloud.google.com/go/compute/metadata
|
||||||
# github.com/TwinProduction/gocache v1.1.0
|
# github.com/TwinProduction/gocache v1.2.0
|
||||||
## explicit
|
## explicit
|
||||||
github.com/TwinProduction/gocache
|
github.com/TwinProduction/gocache
|
||||||
# github.com/beorn7/perks v1.0.1
|
# github.com/beorn7/perks v1.0.1
|
||||||
|
@ -13,8 +13,6 @@ import (
|
|||||||
)
|
)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
store = storage.NewInMemoryStore()
|
|
||||||
|
|
||||||
// monitoringMutex is used to prevent multiple services from being evaluated at the same time.
|
// monitoringMutex is used to prevent multiple services from being evaluated at the same time.
|
||||||
// Without this, conditions using response time may become inaccurate.
|
// Without this, conditions using response time may become inaccurate.
|
||||||
monitoringMutex sync.Mutex
|
monitoringMutex sync.Mutex
|
||||||
@ -22,12 +20,12 @@ var (
|
|||||||
|
|
||||||
// GetServiceStatusesAsJSON the JSON encoding of all core.ServiceStatus recorded
|
// GetServiceStatusesAsJSON the JSON encoding of all core.ServiceStatus recorded
|
||||||
func GetServiceStatusesAsJSON() ([]byte, error) {
|
func GetServiceStatusesAsJSON() ([]byte, error) {
|
||||||
return store.GetAllAsJSON()
|
return storage.Get().GetAllAsJSON()
|
||||||
}
|
}
|
||||||
|
|
||||||
// GetUptimeByKey returns the uptime of a service based on the ServiceStatus key
|
// GetUptimeByKey returns the uptime of a service based on the ServiceStatus key
|
||||||
func GetUptimeByKey(key string) *core.Uptime {
|
func GetUptimeByKey(key string) *core.Uptime {
|
||||||
serviceStatus := store.GetServiceStatusByKey(key)
|
serviceStatus := storage.Get().GetServiceStatusByKey(key)
|
||||||
if serviceStatus == nil {
|
if serviceStatus == nil {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
@ -36,7 +34,7 @@ func GetUptimeByKey(key string) *core.Uptime {
|
|||||||
|
|
||||||
// GetServiceStatusByKey returns the uptime of a service based on its ServiceStatus key
|
// GetServiceStatusByKey returns the uptime of a service based on its ServiceStatus key
|
||||||
func GetServiceStatusByKey(key string) *core.ServiceStatus {
|
func GetServiceStatusByKey(key string) *core.ServiceStatus {
|
||||||
return store.GetServiceStatusByKey(key)
|
return storage.Get().GetServiceStatusByKey(key)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Monitor loops over each services and starts a goroutine to monitor each services separately
|
// Monitor loops over each services and starts a goroutine to monitor each services separately
|
||||||
@ -88,5 +86,5 @@ func monitor(service *core.Service) {
|
|||||||
|
|
||||||
// UpdateServiceStatuses updates the slice of service statuses
|
// UpdateServiceStatuses updates the slice of service statuses
|
||||||
func UpdateServiceStatuses(service *core.Service, result *core.Result) {
|
func UpdateServiceStatuses(service *core.Service, result *core.Result) {
|
||||||
store.Insert(service, result)
|
storage.Get().Insert(service, result)
|
||||||
}
|
}
|
||||||
|
@ -8,26 +8,26 @@
|
|||||||
<hr class="mb-4" />
|
<hr class="mb-4" />
|
||||||
<Service :data="serviceStatus" :maximumNumberOfResults="20" @showTooltip="showTooltip" />
|
<Service :data="serviceStatus" :maximumNumberOfResults="20" @showTooltip="showTooltip" />
|
||||||
</slot>
|
</slot>
|
||||||
<div v-if="serviceStatus.uptime" class="mt-12">
|
<div v-if="uptime" class="mt-12">
|
||||||
<h1 class="text-xl xl:text-3xl text-monospace text-gray-400">UPTIME</h1>
|
<h1 class="text-xl xl:text-3xl text-monospace text-gray-400">UPTIME</h1>
|
||||||
<hr />
|
<hr />
|
||||||
<div class="flex space-x-4 text-center text-xl xl:text-2xl mt-3">
|
<div class="flex space-x-4 text-center text-xl xl:text-2xl mt-3">
|
||||||
<div class="flex-1">
|
<div class="flex-1">
|
||||||
{{ prettifyUptime(serviceStatus.uptime['7d']) }}
|
{{ prettifyUptime(uptime['7d']) }}
|
||||||
<h2 class="text-sm text-gray-400">Last 7 days</h2>
|
<h2 class="text-sm text-gray-400">Last 7 days</h2>
|
||||||
</div>
|
</div>
|
||||||
<div class="flex-1">
|
<div class="flex-1">
|
||||||
{{ prettifyUptime(serviceStatus.uptime['24h']) }}
|
{{ prettifyUptime(uptime['24h']) }}
|
||||||
<h2 class="text-sm text-gray-400">Last 24 hours</h2>
|
<h2 class="text-sm text-gray-400">Last 24 hours</h2>
|
||||||
</div>
|
</div>
|
||||||
<div class="flex-1">
|
<div class="flex-1">
|
||||||
{{ prettifyUptime(serviceStatus.uptime['1h']) }}
|
{{ prettifyUptime(uptime['1h']) }}
|
||||||
<h2 class="text-sm text-gray-400">Last hour</h2>
|
<h2 class="text-sm text-gray-400">Last hour</h2>
|
||||||
</div>
|
</div>
|
||||||
</div>
|
</div>
|
||||||
<hr class="mt-1"/>
|
<hr class="mt-1"/>
|
||||||
<h3 class="text-xl text-monospace text-gray-400 mt-1 text-right">BADGES</h3>
|
<h3 class="text-xl text-monospace text-gray-400 mt-1 text-right">BADGES</h3>
|
||||||
<div class="flex space-x-4 text-center text-2xl mt-6 relative bottom-12">
|
<div v-if="serviceStatus && serviceStatus.key" class="flex space-x-4 text-center text-2xl mt-6 relative bottom-12">
|
||||||
<div class="flex-1">
|
<div class="flex-1">
|
||||||
<img :src="generateBadgeImageURL('7d')" alt="7d uptime badge" class="mx-auto" />
|
<img :src="generateBadgeImageURL('7d')" alt="7d uptime badge" class="mx-auto" />
|
||||||
</div>
|
</div>
|
||||||
@ -90,6 +90,7 @@ export default {
|
|||||||
.then(data => {
|
.then(data => {
|
||||||
if (JSON.stringify(this.serviceStatus) !== JSON.stringify(data)) {
|
if (JSON.stringify(this.serviceStatus) !== JSON.stringify(data)) {
|
||||||
this.serviceStatus = data.serviceStatus;
|
this.serviceStatus = data.serviceStatus;
|
||||||
|
this.uptime = data.uptime;
|
||||||
let events = [];
|
let events = [];
|
||||||
for (let i = data.events.length-1; i >= 0; i--) {
|
for (let i = data.events.length-1; i >= 0; i--) {
|
||||||
let event = data.events[i];
|
let event = data.events[i];
|
||||||
@ -143,6 +144,7 @@ export default {
|
|||||||
return {
|
return {
|
||||||
serviceStatus: {},
|
serviceStatus: {},
|
||||||
events: [],
|
events: [],
|
||||||
|
uptime: {"7d": 0, "24h": 0, "1h": 0},
|
||||||
// Since this page isn't at the root, we need to modify the server URL a bit
|
// Since this page isn't at the root, we need to modify the server URL a bit
|
||||||
serverUrl: SERVER_URL === '.' ? '..' : SERVER_URL,
|
serverUrl: SERVER_URL === '.' ? '..' : SERVER_URL,
|
||||||
}
|
}
|
||||||
|
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
Loading…
x
Reference in New Issue
Block a user