1
0
mirror of https://github.com/TwiN/gatus.git synced 2026-02-14 14:42:24 +00:00

feat(suite): Implement Suites (#1239)

* feat(suite): Implement Suites

Fixes #1230

* Update docs

* Fix variable alignment

* Prevent always-run endpoint from running if a context placeholder fails to resolve in the URL

* Return errors when a context placeholder path fails to resolve

* Add a couple of unit tests

* Add a couple of unit tests

* fix(ui): Update group count properly

Fixes #1233

* refactor: Pass down entire config instead of several sub-configs

* fix: Change default suite interval and timeout

* fix: Deprecate disable-monitoring-lock in favor of concurrency

* fix: Make sure there are no duplicate keys

* Refactor some code

* Update watchdog/watchdog.go

* Update web/app/src/components/StepDetailsModal.vue

Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com>

* chore: Remove useless log

* fix: Set default concurrency to 3 instead of 5

---------

Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com>
This commit is contained in:
TwiN
2025-09-05 15:39:12 -04:00
committed by GitHub
parent 10cabb9dde
commit d668a14703
74 changed files with 7513 additions and 652 deletions

View File

@@ -4,5 +4,6 @@ import "errors"
var (
ErrEndpointNotFound = errors.New("endpoint not found") // When an endpoint does not exist in the store
ErrSuiteNotFound = errors.New("suite not found") // When a suite does not exist in the store
ErrInvalidTimeRange = errors.New("'from' cannot be older than 'to'") // When an invalid time range is provided
)

View File

@@ -0,0 +1,22 @@
package paging
// SuiteStatusParams represents the parameters for suite status queries
type SuiteStatusParams struct {
Page int // Page number
PageSize int // Number of results per page
}
// NewSuiteStatusParams creates a new SuiteStatusParams
func NewSuiteStatusParams() *SuiteStatusParams {
return &SuiteStatusParams{
Page: 1,
PageSize: 20,
}
}
// WithPagination sets the page and page size
func (params *SuiteStatusParams) WithPagination(page, pageSize int) *SuiteStatusParams {
params.Page = page
params.PageSize = pageSize
return params
}

View File

@@ -0,0 +1,124 @@
package paging
import (
"testing"
)
func TestNewSuiteStatusParams(t *testing.T) {
params := NewSuiteStatusParams()
if params == nil {
t.Fatal("NewSuiteStatusParams should not return nil")
}
if params.Page != 1 {
t.Errorf("expected default Page to be 1, got %d", params.Page)
}
if params.PageSize != 20 {
t.Errorf("expected default PageSize to be 20, got %d", params.PageSize)
}
}
func TestSuiteStatusParams_WithPagination(t *testing.T) {
tests := []struct {
name string
page int
pageSize int
expectedPage int
expectedSize int
}{
{
name: "valid pagination",
page: 2,
pageSize: 50,
expectedPage: 2,
expectedSize: 50,
},
{
name: "zero page",
page: 0,
pageSize: 10,
expectedPage: 0,
expectedSize: 10,
},
{
name: "negative page",
page: -1,
pageSize: 20,
expectedPage: -1,
expectedSize: 20,
},
{
name: "zero page size",
page: 1,
pageSize: 0,
expectedPage: 1,
expectedSize: 0,
},
{
name: "negative page size",
page: 1,
pageSize: -10,
expectedPage: 1,
expectedSize: -10,
},
{
name: "large values",
page: 1000,
pageSize: 10000,
expectedPage: 1000,
expectedSize: 10000,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
params := NewSuiteStatusParams().WithPagination(tt.page, tt.pageSize)
if params.Page != tt.expectedPage {
t.Errorf("expected Page to be %d, got %d", tt.expectedPage, params.Page)
}
if params.PageSize != tt.expectedSize {
t.Errorf("expected PageSize to be %d, got %d", tt.expectedSize, params.PageSize)
}
})
}
}
func TestSuiteStatusParams_ChainedMethods(t *testing.T) {
params := NewSuiteStatusParams().
WithPagination(3, 100)
if params.Page != 3 {
t.Errorf("expected Page to be 3, got %d", params.Page)
}
if params.PageSize != 100 {
t.Errorf("expected PageSize to be 100, got %d", params.PageSize)
}
}
func TestSuiteStatusParams_OverwritePagination(t *testing.T) {
params := NewSuiteStatusParams()
// Set initial pagination
params.WithPagination(2, 50)
if params.Page != 2 || params.PageSize != 50 {
t.Error("initial pagination not set correctly")
}
// Overwrite pagination
params.WithPagination(5, 200)
if params.Page != 5 {
t.Errorf("expected Page to be overwritten to 5, got %d", params.Page)
}
if params.PageSize != 200 {
t.Errorf("expected PageSize to be overwritten to 200, got %d", params.PageSize)
}
}
func TestSuiteStatusParams_ReturnsSelf(t *testing.T) {
params := NewSuiteStatusParams()
// Verify WithPagination returns the same instance
result := params.WithPagination(1, 20)
if result != params {
t.Error("WithPagination should return the same instance for method chaining")
}
}

View File

@@ -7,16 +7,20 @@ import (
"github.com/TwiN/gatus/v5/alerting/alert"
"github.com/TwiN/gatus/v5/config/endpoint"
"github.com/TwiN/gatus/v5/config/key"
"github.com/TwiN/gatus/v5/config/suite"
"github.com/TwiN/gatus/v5/storage/store/common"
"github.com/TwiN/gatus/v5/storage/store/common/paging"
"github.com/TwiN/gocache/v2"
"github.com/TwiN/logr"
)
// Store that leverages gocache
type Store struct {
sync.RWMutex
cache *gocache.Cache
endpointCache *gocache.Cache // Cache for endpoint statuses
suiteCache *gocache.Cache // Cache for suite statuses
maximumNumberOfResults int // maximum number of results that an endpoint can have
maximumNumberOfEvents int // maximum number of events that an endpoint can have
@@ -28,7 +32,8 @@ type Store struct {
// supports eventual persistence.
func NewStore(maximumNumberOfResults, maximumNumberOfEvents int) (*Store, error) {
store := &Store{
cache: gocache.NewCache().WithMaxSize(gocache.NoMaxSize),
endpointCache: gocache.NewCache().WithMaxSize(gocache.NoMaxSize),
suiteCache: gocache.NewCache().WithMaxSize(gocache.NoMaxSize),
maximumNumberOfResults: maximumNumberOfResults,
maximumNumberOfEvents: maximumNumberOfEvents,
}
@@ -38,10 +43,12 @@ func NewStore(maximumNumberOfResults, maximumNumberOfEvents int) (*Store, error)
// GetAllEndpointStatuses returns all monitored endpoint.Status
// with a subset of endpoint.Result defined by the page and pageSize parameters
func (s *Store) GetAllEndpointStatuses(params *paging.EndpointStatusParams) ([]*endpoint.Status, error) {
endpointStatuses := s.cache.GetAll()
pagedEndpointStatuses := make([]*endpoint.Status, 0, len(endpointStatuses))
for _, v := range endpointStatuses {
pagedEndpointStatuses = append(pagedEndpointStatuses, ShallowCopyEndpointStatus(v.(*endpoint.Status), params))
allStatuses := s.endpointCache.GetAll()
pagedEndpointStatuses := make([]*endpoint.Status, 0, len(allStatuses))
for _, v := range allStatuses {
if status, ok := v.(*endpoint.Status); ok {
pagedEndpointStatuses = append(pagedEndpointStatuses, ShallowCopyEndpointStatus(status, params))
}
}
sort.Slice(pagedEndpointStatuses, func(i, j int) bool {
return pagedEndpointStatuses[i].Key < pagedEndpointStatuses[j].Key
@@ -49,26 +56,53 @@ func (s *Store) GetAllEndpointStatuses(params *paging.EndpointStatusParams) ([]*
return pagedEndpointStatuses, nil
}
// GetAllSuiteStatuses returns all monitored suite.Status
func (s *Store) GetAllSuiteStatuses(params *paging.SuiteStatusParams) ([]*suite.Status, error) {
s.RLock()
defer s.RUnlock()
suiteStatuses := make([]*suite.Status, 0)
for _, v := range s.suiteCache.GetAll() {
if status, ok := v.(*suite.Status); ok {
suiteStatuses = append(suiteStatuses, ShallowCopySuiteStatus(status, params))
}
}
sort.Slice(suiteStatuses, func(i, j int) bool {
return suiteStatuses[i].Key < suiteStatuses[j].Key
})
return suiteStatuses, nil
}
// GetEndpointStatus returns the endpoint status for a given endpoint name in the given group
func (s *Store) GetEndpointStatus(groupName, endpointName string, params *paging.EndpointStatusParams) (*endpoint.Status, error) {
return s.GetEndpointStatusByKey(endpoint.ConvertGroupAndEndpointNameToKey(groupName, endpointName), params)
return s.GetEndpointStatusByKey(key.ConvertGroupAndNameToKey(groupName, endpointName), params)
}
// GetEndpointStatusByKey returns the endpoint status for a given key
func (s *Store) GetEndpointStatusByKey(key string, params *paging.EndpointStatusParams) (*endpoint.Status, error) {
endpointStatus := s.cache.GetValue(key)
endpointStatus := s.endpointCache.GetValue(key)
if endpointStatus == nil {
return nil, common.ErrEndpointNotFound
}
return ShallowCopyEndpointStatus(endpointStatus.(*endpoint.Status), params), nil
}
// GetSuiteStatusByKey returns the suite status for a given key
func (s *Store) GetSuiteStatusByKey(key string, params *paging.SuiteStatusParams) (*suite.Status, error) {
s.RLock()
defer s.RUnlock()
suiteStatus := s.suiteCache.GetValue(key)
if suiteStatus == nil {
return nil, common.ErrSuiteNotFound
}
return ShallowCopySuiteStatus(suiteStatus.(*suite.Status), params), nil
}
// GetUptimeByKey returns the uptime percentage during a time range
func (s *Store) GetUptimeByKey(key string, from, to time.Time) (float64, error) {
if from.After(to) {
return 0, common.ErrInvalidTimeRange
}
endpointStatus := s.cache.GetValue(key)
endpointStatus := s.endpointCache.GetValue(key)
if endpointStatus == nil || endpointStatus.(*endpoint.Status).Uptime == nil {
return 0, common.ErrEndpointNotFound
}
@@ -97,7 +131,7 @@ func (s *Store) GetAverageResponseTimeByKey(key string, from, to time.Time) (int
if from.After(to) {
return 0, common.ErrInvalidTimeRange
}
endpointStatus := s.cache.GetValue(key)
endpointStatus := s.endpointCache.GetValue(key)
if endpointStatus == nil || endpointStatus.(*endpoint.Status).Uptime == nil {
return 0, common.ErrEndpointNotFound
}
@@ -125,7 +159,7 @@ func (s *Store) GetHourlyAverageResponseTimeByKey(key string, from, to time.Time
if from.After(to) {
return nil, common.ErrInvalidTimeRange
}
endpointStatus := s.cache.GetValue(key)
endpointStatus := s.endpointCache.GetValue(key)
if endpointStatus == nil || endpointStatus.(*endpoint.Status).Uptime == nil {
return nil, common.ErrEndpointNotFound
}
@@ -144,11 +178,11 @@ func (s *Store) GetHourlyAverageResponseTimeByKey(key string, from, to time.Time
return hourlyAverageResponseTimes, nil
}
// Insert adds the observed result for the specified endpoint into the store
func (s *Store) Insert(ep *endpoint.Endpoint, result *endpoint.Result) error {
key := ep.Key()
// InsertEndpointResult adds the observed result for the specified endpoint into the store
func (s *Store) InsertEndpointResult(ep *endpoint.Endpoint, result *endpoint.Result) error {
endpointKey := ep.Key()
s.Lock()
status, exists := s.cache.Get(key)
status, exists := s.endpointCache.Get(endpointKey)
if !exists {
status = endpoint.NewStatus(ep.Group, ep.Name)
status.(*endpoint.Status).Events = append(status.(*endpoint.Status).Events, &endpoint.Event{
@@ -157,18 +191,45 @@ func (s *Store) Insert(ep *endpoint.Endpoint, result *endpoint.Result) error {
})
}
AddResult(status.(*endpoint.Status), result, s.maximumNumberOfResults, s.maximumNumberOfEvents)
s.cache.Set(key, status)
s.endpointCache.Set(endpointKey, status)
s.Unlock()
return nil
}
// InsertSuiteResult adds the observed result for the specified suite into the store
func (s *Store) InsertSuiteResult(su *suite.Suite, result *suite.Result) error {
s.Lock()
defer s.Unlock()
suiteKey := su.Key()
suiteStatus := s.suiteCache.GetValue(suiteKey)
if suiteStatus == nil {
suiteStatus = &suite.Status{
Name: su.Name,
Group: su.Group,
Key: su.Key(),
Results: []*suite.Result{},
}
logr.Debugf("[memory.InsertSuiteResult] Created new suite status for suiteKey=%s", suiteKey)
}
status := suiteStatus.(*suite.Status)
// Add the new result at the end (append like endpoint implementation)
status.Results = append(status.Results, result)
// Keep only the maximum number of results
if len(status.Results) > s.maximumNumberOfResults {
status.Results = status.Results[len(status.Results)-s.maximumNumberOfResults:]
}
s.suiteCache.Set(suiteKey, status)
logr.Debugf("[memory.InsertSuiteResult] Stored suite result for suiteKey=%s, total results=%d", suiteKey, len(status.Results))
return nil
}
// DeleteAllEndpointStatusesNotInKeys removes all Status that are not within the keys provided
func (s *Store) DeleteAllEndpointStatusesNotInKeys(keys []string) int {
var keysToDelete []string
for _, existingKey := range s.cache.GetKeysByPattern("*", 0) {
for _, existingKey := range s.endpointCache.GetKeysByPattern("*", 0) {
shouldDelete := true
for _, key := range keys {
if existingKey == key {
for _, k := range keys {
if existingKey == k {
shouldDelete = false
break
}
@@ -177,7 +238,24 @@ func (s *Store) DeleteAllEndpointStatusesNotInKeys(keys []string) int {
keysToDelete = append(keysToDelete, existingKey)
}
}
return s.cache.DeleteAll(keysToDelete)
return s.endpointCache.DeleteAll(keysToDelete)
}
// DeleteAllSuiteStatusesNotInKeys removes all suite statuses that are not within the keys provided
func (s *Store) DeleteAllSuiteStatusesNotInKeys(keys []string) int {
s.Lock()
defer s.Unlock()
keysToKeep := make(map[string]bool, len(keys))
for _, k := range keys {
keysToKeep[k] = true
}
var keysToDelete []string
for existingKey := range s.suiteCache.GetAll() {
if !keysToKeep[existingKey] {
keysToDelete = append(keysToDelete, existingKey)
}
}
return s.suiteCache.DeleteAll(keysToDelete)
}
// GetTriggeredEndpointAlert returns whether the triggered alert for the specified endpoint as well as the necessary information to resolve it
@@ -215,12 +293,16 @@ func (s *Store) DeleteAllTriggeredAlertsNotInChecksumsByEndpoint(ep *endpoint.En
func (s *Store) HasEndpointStatusNewerThan(key string, timestamp time.Time) (bool, error) {
s.RLock()
defer s.RUnlock()
endpointStatus := s.cache.GetValue(key)
endpointStatus := s.endpointCache.GetValue(key)
if endpointStatus == nil {
// If no endpoint exists, there's no newer status, so return false instead of an error
return false, nil
}
for _, result := range endpointStatus.(*endpoint.Status).Results {
status, ok := endpointStatus.(*endpoint.Status)
if !ok {
return false, nil
}
for _, result := range status.Results {
if result.Timestamp.After(timestamp) {
return true, nil
}
@@ -230,7 +312,8 @@ func (s *Store) HasEndpointStatusNewerThan(key string, timestamp time.Time) (boo
// Clear deletes everything from the store
func (s *Store) Clear() {
s.cache.Clear()
s.endpointCache.Clear()
s.suiteCache.Clear()
}
// Save persists the cache to the store file

View File

@@ -1,10 +1,12 @@
package memory
import (
"sync"
"testing"
"time"
"github.com/TwiN/gatus/v5/config/endpoint"
"github.com/TwiN/gatus/v5/config/suite"
"github.com/TwiN/gatus/v5/storage"
"github.com/TwiN/gatus/v5/storage/store/common/paging"
)
@@ -86,12 +88,12 @@ func TestStore_SanityCheck(t *testing.T) {
store, _ := NewStore(storage.DefaultMaximumNumberOfResults, storage.DefaultMaximumNumberOfEvents)
defer store.Clear()
defer store.Close()
store.Insert(&testEndpoint, &testSuccessfulResult)
store.InsertEndpointResult(&testEndpoint, &testSuccessfulResult)
endpointStatuses, _ := store.GetAllEndpointStatuses(paging.NewEndpointStatusParams())
if numberOfEndpointStatuses := len(endpointStatuses); numberOfEndpointStatuses != 1 {
t.Fatalf("expected 1 EndpointStatus, got %d", numberOfEndpointStatuses)
}
store.Insert(&testEndpoint, &testUnsuccessfulResult)
store.InsertEndpointResult(&testEndpoint, &testUnsuccessfulResult)
// Both results inserted are for the same endpoint, therefore, the count shouldn't have increased
endpointStatuses, _ = store.GetAllEndpointStatuses(paging.NewEndpointStatusParams())
if numberOfEndpointStatuses := len(endpointStatuses); numberOfEndpointStatuses != 1 {
@@ -140,8 +142,8 @@ func TestStore_HasEndpointStatusNewerThan(t *testing.T) {
store, _ := NewStore(storage.DefaultMaximumNumberOfResults, storage.DefaultMaximumNumberOfEvents)
defer store.Clear()
defer store.Close()
// Insert a result
err := store.Insert(&testEndpoint, &testSuccessfulResult)
// InsertEndpointResult a result
err := store.InsertEndpointResult(&testEndpoint, &testSuccessfulResult)
if err != nil {
t.Fatalf("expected no error while inserting result, got %v", err)
}
@@ -162,3 +164,931 @@ func TestStore_HasEndpointStatusNewerThan(t *testing.T) {
t.Fatal("expected not to have a newer status, but did")
}
}
// TestStore_MixedEndpointsAndSuites tests that having both endpoints and suites in the cache
// doesn't cause issues with core operations
func TestStore_MixedEndpointsAndSuites(t *testing.T) {
// Helper function to create and populate a store with test data
setupStore := func(t *testing.T) (*Store, *endpoint.Endpoint, *endpoint.Endpoint, *endpoint.Endpoint, *endpoint.Endpoint, *suite.Suite) {
store, err := NewStore(100, 50)
if err != nil {
t.Fatal("expected no error, got", err)
}
// Create regular endpoints
endpoint1 := &endpoint.Endpoint{
Name: "endpoint1",
Group: "group1",
URL: "https://example.com/1",
}
endpoint2 := &endpoint.Endpoint{
Name: "endpoint2",
Group: "group2",
URL: "https://example.com/2",
}
// Create suite endpoints (these would be part of a suite)
suiteEndpoint1 := &endpoint.Endpoint{
Name: "suite-endpoint1",
Group: "suite-group",
URL: "https://example.com/suite1",
}
suiteEndpoint2 := &endpoint.Endpoint{
Name: "suite-endpoint2",
Group: "suite-group",
URL: "https://example.com/suite2",
}
// Create a suite
testSuite := &suite.Suite{
Name: "test-suite",
Group: "suite-group",
Endpoints: []*endpoint.Endpoint{
suiteEndpoint1,
suiteEndpoint2,
},
}
return store, endpoint1, endpoint2, suiteEndpoint1, suiteEndpoint2, testSuite
}
// Test 1: InsertEndpointResult endpoint results
t.Run("InsertEndpointResults", func(t *testing.T) {
store, endpoint1, endpoint2, suiteEndpoint1, suiteEndpoint2, _ := setupStore(t)
// InsertEndpointResult regular endpoint results
result1 := &endpoint.Result{
Success: true,
Timestamp: time.Now(),
Duration: 100 * time.Millisecond,
}
if err := store.InsertEndpointResult(endpoint1, result1); err != nil {
t.Fatalf("failed to insert endpoint1 result: %v", err)
}
result2 := &endpoint.Result{
Success: false,
Timestamp: time.Now(),
Duration: 200 * time.Millisecond,
Errors: []string{"error"},
}
if err := store.InsertEndpointResult(endpoint2, result2); err != nil {
t.Fatalf("failed to insert endpoint2 result: %v", err)
}
// InsertEndpointResult suite endpoint results
suiteResult1 := &endpoint.Result{
Success: true,
Timestamp: time.Now(),
Duration: 50 * time.Millisecond,
}
if err := store.InsertEndpointResult(suiteEndpoint1, suiteResult1); err != nil {
t.Fatalf("failed to insert suite endpoint1 result: %v", err)
}
suiteResult2 := &endpoint.Result{
Success: true,
Timestamp: time.Now(),
Duration: 75 * time.Millisecond,
}
if err := store.InsertEndpointResult(suiteEndpoint2, suiteResult2); err != nil {
t.Fatalf("failed to insert suite endpoint2 result: %v", err)
}
})
// Test 2: InsertEndpointResult suite result
t.Run("InsertSuiteResult", func(t *testing.T) {
store, _, _, _, _, testSuite := setupStore(t)
timestamp := time.Now()
suiteResult := &suite.Result{
Name: testSuite.Name,
Group: testSuite.Group,
Success: true,
Timestamp: timestamp,
Duration: 125 * time.Millisecond,
EndpointResults: []*endpoint.Result{
{Success: true, Duration: 50 * time.Millisecond},
{Success: true, Duration: 75 * time.Millisecond},
},
}
if err := store.InsertSuiteResult(testSuite, suiteResult); err != nil {
t.Fatalf("failed to insert suite result: %v", err)
}
// Verify the suite result was stored correctly
status, err := store.GetSuiteStatusByKey(testSuite.Key(), nil)
if err != nil {
t.Fatalf("failed to get suite status: %v", err)
}
if len(status.Results) != 1 {
t.Errorf("expected 1 suite result, got %d", len(status.Results))
}
stored := status.Results[0]
if stored.Name != testSuite.Name {
t.Errorf("expected result name %s, got %s", testSuite.Name, stored.Name)
}
if stored.Group != testSuite.Group {
t.Errorf("expected result group %s, got %s", testSuite.Group, stored.Group)
}
if !stored.Success {
t.Error("expected result to be successful")
}
if stored.Duration != 125*time.Millisecond {
t.Errorf("expected duration 125ms, got %v", stored.Duration)
}
if len(stored.EndpointResults) != 2 {
t.Errorf("expected 2 endpoint results, got %d", len(stored.EndpointResults))
}
})
// Test 3: GetAllEndpointStatuses should only return endpoints, not suites
t.Run("GetAllEndpointStatuses", func(t *testing.T) {
store, endpoint1, endpoint2, suiteEndpoint1, suiteEndpoint2, testSuite := setupStore(t)
// InsertEndpointResult all test data
store.InsertEndpointResult(endpoint1, &endpoint.Result{Success: true, Timestamp: time.Now(), Duration: 100 * time.Millisecond})
store.InsertEndpointResult(endpoint2, &endpoint.Result{Success: false, Timestamp: time.Now(), Duration: 200 * time.Millisecond})
store.InsertEndpointResult(suiteEndpoint1, &endpoint.Result{Success: true, Timestamp: time.Now(), Duration: 50 * time.Millisecond})
store.InsertEndpointResult(suiteEndpoint2, &endpoint.Result{Success: true, Timestamp: time.Now(), Duration: 75 * time.Millisecond})
store.InsertSuiteResult(testSuite, &suite.Result{
Name: testSuite.Name, Group: testSuite.Group, Success: true,
Timestamp: time.Now(), Duration: 125 * time.Millisecond,
})
statuses, err := store.GetAllEndpointStatuses(&paging.EndpointStatusParams{})
if err != nil {
t.Fatalf("failed to get all endpoint statuses: %v", err)
}
// Should have 4 endpoints (2 regular + 2 suite endpoints)
if len(statuses) != 4 {
t.Errorf("expected 4 endpoint statuses, got %d", len(statuses))
}
// Verify all are endpoint statuses with correct data, not suite statuses
expectedEndpoints := map[string]struct {
success bool
duration time.Duration
}{
"endpoint1": {success: true, duration: 100 * time.Millisecond},
"endpoint2": {success: false, duration: 200 * time.Millisecond},
"suite-endpoint1": {success: true, duration: 50 * time.Millisecond},
"suite-endpoint2": {success: true, duration: 75 * time.Millisecond},
}
for _, status := range statuses {
if status.Name == "" {
t.Error("endpoint status should have a name")
}
// Make sure none of them are the suite itself
if status.Name == "test-suite" {
t.Error("suite should not appear in endpoint statuses")
}
// Verify detailed endpoint data
expected, exists := expectedEndpoints[status.Name]
if !exists {
t.Errorf("unexpected endpoint name: %s", status.Name)
continue
}
// Check that endpoint has results and verify the data
if len(status.Results) != 1 {
t.Errorf("endpoint %s should have 1 result, got %d", status.Name, len(status.Results))
continue
}
result := status.Results[0]
if result.Success != expected.success {
t.Errorf("endpoint %s result success should be %v, got %v", status.Name, expected.success, result.Success)
}
if result.Duration != expected.duration {
t.Errorf("endpoint %s result duration should be %v, got %v", status.Name, expected.duration, result.Duration)
}
delete(expectedEndpoints, status.Name)
}
if len(expectedEndpoints) > 0 {
t.Errorf("missing expected endpoints: %v", expectedEndpoints)
}
})
// Test 4: GetAllSuiteStatuses should only return suites, not endpoints
t.Run("GetAllSuiteStatuses", func(t *testing.T) {
store, endpoint1, _, _, _, testSuite := setupStore(t)
// InsertEndpointResult test data
store.InsertEndpointResult(endpoint1, &endpoint.Result{Success: true, Timestamp: time.Now(), Duration: 100 * time.Millisecond})
timestamp := time.Now()
store.InsertSuiteResult(testSuite, &suite.Result{
Name: testSuite.Name, Group: testSuite.Group, Success: true,
Timestamp: timestamp, Duration: 125 * time.Millisecond,
})
statuses, err := store.GetAllSuiteStatuses(&paging.SuiteStatusParams{})
if err != nil {
t.Fatalf("failed to get all suite statuses: %v", err)
}
// Should have 1 suite
if len(statuses) != 1 {
t.Errorf("expected 1 suite status, got %d", len(statuses))
}
if len(statuses) > 0 {
suiteStatus := statuses[0]
if suiteStatus.Name != "test-suite" {
t.Errorf("expected suite name 'test-suite', got '%s'", suiteStatus.Name)
}
if suiteStatus.Group != "suite-group" {
t.Errorf("expected suite group 'suite-group', got '%s'", suiteStatus.Group)
}
if len(suiteStatus.Results) != 1 {
t.Errorf("expected 1 suite result, got %d", len(suiteStatus.Results))
}
if len(suiteStatus.Results) > 0 {
result := suiteStatus.Results[0]
if !result.Success {
t.Error("expected suite result to be successful")
}
if result.Duration != 125*time.Millisecond {
t.Errorf("expected suite result duration 125ms, got %v", result.Duration)
}
}
}
})
// Test 5: GetEndpointStatusByKey should work for all endpoints
t.Run("GetEndpointStatusByKey", func(t *testing.T) {
store, endpoint1, _, suiteEndpoint1, _, _ := setupStore(t)
// InsertEndpointResult test data with specific timestamps and durations
timestamp1 := time.Now()
timestamp2 := time.Now().Add(1 * time.Hour)
store.InsertEndpointResult(endpoint1, &endpoint.Result{Success: true, Timestamp: timestamp1, Duration: 100 * time.Millisecond})
store.InsertEndpointResult(suiteEndpoint1, &endpoint.Result{Success: false, Timestamp: timestamp2, Duration: 50 * time.Millisecond, Errors: []string{"suite error"}})
// Test regular endpoints
status1, err := store.GetEndpointStatusByKey(endpoint1.Key(), &paging.EndpointStatusParams{})
if err != nil {
t.Fatalf("failed to get endpoint1 status: %v", err)
}
if status1.Name != "endpoint1" {
t.Errorf("expected endpoint1, got %s", status1.Name)
}
if status1.Group != "group1" {
t.Errorf("expected group1, got %s", status1.Group)
}
if len(status1.Results) != 1 {
t.Errorf("expected 1 result for endpoint1, got %d", len(status1.Results))
}
if len(status1.Results) > 0 {
result := status1.Results[0]
if !result.Success {
t.Error("expected endpoint1 result to be successful")
}
if result.Duration != 100*time.Millisecond {
t.Errorf("expected endpoint1 result duration 100ms, got %v", result.Duration)
}
}
// Test suite endpoints
suiteStatus1, err := store.GetEndpointStatusByKey(suiteEndpoint1.Key(), &paging.EndpointStatusParams{})
if err != nil {
t.Fatalf("failed to get suite endpoint1 status: %v", err)
}
if suiteStatus1.Name != "suite-endpoint1" {
t.Errorf("expected suite-endpoint1, got %s", suiteStatus1.Name)
}
if suiteStatus1.Group != "suite-group" {
t.Errorf("expected suite-group, got %s", suiteStatus1.Group)
}
if len(suiteStatus1.Results) != 1 {
t.Errorf("expected 1 result for suite-endpoint1, got %d", len(suiteStatus1.Results))
}
if len(suiteStatus1.Results) > 0 {
result := suiteStatus1.Results[0]
if result.Success {
t.Error("expected suite-endpoint1 result to be unsuccessful")
}
if result.Duration != 50*time.Millisecond {
t.Errorf("expected suite-endpoint1 result duration 50ms, got %v", result.Duration)
}
if len(result.Errors) != 1 || result.Errors[0] != "suite error" {
t.Errorf("expected suite-endpoint1 to have error 'suite error', got %v", result.Errors)
}
}
})
// Test 6: GetSuiteStatusByKey should work for suites
t.Run("GetSuiteStatusByKey", func(t *testing.T) {
store, _, _, _, _, testSuite := setupStore(t)
// InsertEndpointResult suite result with endpoint results
timestamp := time.Now()
store.InsertSuiteResult(testSuite, &suite.Result{
Name: testSuite.Name, Group: testSuite.Group, Success: false,
Timestamp: timestamp, Duration: 125 * time.Millisecond,
EndpointResults: []*endpoint.Result{
{Success: true, Duration: 50 * time.Millisecond},
{Success: false, Duration: 75 * time.Millisecond, Errors: []string{"endpoint failed"}},
},
})
suiteStatus, err := store.GetSuiteStatusByKey(testSuite.Key(), &paging.SuiteStatusParams{})
if err != nil {
t.Fatalf("failed to get suite status: %v", err)
}
if suiteStatus.Name != "test-suite" {
t.Errorf("expected test-suite, got %s", suiteStatus.Name)
}
if suiteStatus.Group != "suite-group" {
t.Errorf("expected suite-group, got %s", suiteStatus.Group)
}
if len(suiteStatus.Results) != 1 {
t.Errorf("expected 1 suite result, got %d", len(suiteStatus.Results))
}
if len(suiteStatus.Results) > 0 {
result := suiteStatus.Results[0]
if result.Success {
t.Error("expected suite result to be unsuccessful")
}
if result.Duration != 125*time.Millisecond {
t.Errorf("expected suite result duration 125ms, got %v", result.Duration)
}
if len(result.EndpointResults) != 2 {
t.Errorf("expected 2 endpoint results, got %d", len(result.EndpointResults))
}
if len(result.EndpointResults) >= 2 {
if !result.EndpointResults[0].Success {
t.Error("expected first endpoint result to be successful")
}
if result.EndpointResults[1].Success {
t.Error("expected second endpoint result to be unsuccessful")
}
if len(result.EndpointResults[1].Errors) != 1 || result.EndpointResults[1].Errors[0] != "endpoint failed" {
t.Errorf("expected second endpoint to have error 'endpoint failed', got %v", result.EndpointResults[1].Errors)
}
}
}
})
// Test 7: DeleteAllEndpointStatusesNotInKeys should not affect suites
t.Run("DeleteEndpointsNotInKeys", func(t *testing.T) {
store, endpoint1, endpoint2, suiteEndpoint1, suiteEndpoint2, testSuite := setupStore(t)
// InsertEndpointResult all test data
store.InsertEndpointResult(endpoint1, &endpoint.Result{Success: true, Timestamp: time.Now(), Duration: 100 * time.Millisecond})
store.InsertEndpointResult(endpoint2, &endpoint.Result{Success: false, Timestamp: time.Now(), Duration: 200 * time.Millisecond})
store.InsertEndpointResult(suiteEndpoint1, &endpoint.Result{Success: true, Timestamp: time.Now(), Duration: 50 * time.Millisecond})
store.InsertEndpointResult(suiteEndpoint2, &endpoint.Result{Success: true, Timestamp: time.Now(), Duration: 75 * time.Millisecond})
store.InsertSuiteResult(testSuite, &suite.Result{
Name: testSuite.Name, Group: testSuite.Group, Success: true,
Timestamp: time.Now(), Duration: 125 * time.Millisecond,
})
// Keep only endpoint1 and suite-endpoint1
keysToKeep := []string{endpoint1.Key(), suiteEndpoint1.Key()}
deleted := store.DeleteAllEndpointStatusesNotInKeys(keysToKeep)
// Should have deleted 2 endpoints (endpoint2 and suite-endpoint2)
if deleted != 2 {
t.Errorf("expected to delete 2 endpoints, deleted %d", deleted)
}
// Verify remaining endpoints
statuses, _ := store.GetAllEndpointStatuses(&paging.EndpointStatusParams{})
if len(statuses) != 2 {
t.Errorf("expected 2 remaining endpoint statuses, got %d", len(statuses))
}
// Suite should still exist
suiteStatuses, _ := store.GetAllSuiteStatuses(&paging.SuiteStatusParams{})
if len(suiteStatuses) != 1 {
t.Errorf("suite should not be affected by DeleteAllEndpointStatusesNotInKeys")
}
})
// Test 8: DeleteAllSuiteStatusesNotInKeys should not affect endpoints
t.Run("DeleteSuitesNotInKeys", func(t *testing.T) {
store, endpoint1, _, _, _, testSuite := setupStore(t)
// InsertEndpointResult test data
store.InsertEndpointResult(endpoint1, &endpoint.Result{Success: true, Timestamp: time.Now(), Duration: 100 * time.Millisecond})
store.InsertSuiteResult(testSuite, &suite.Result{
Name: testSuite.Name, Group: testSuite.Group, Success: true,
Timestamp: time.Now(), Duration: 125 * time.Millisecond,
})
// First, add another suite to test deletion
anotherSuite := &suite.Suite{
Name: "another-suite",
Group: "another-group",
}
anotherSuiteResult := &suite.Result{
Name: anotherSuite.Name,
Group: anotherSuite.Group,
Success: true,
Timestamp: time.Now(),
Duration: 100 * time.Millisecond,
}
store.InsertSuiteResult(anotherSuite, anotherSuiteResult)
// Keep only the original test-suite
deleted := store.DeleteAllSuiteStatusesNotInKeys([]string{testSuite.Key()})
// Should have deleted 1 suite (another-suite)
if deleted != 1 {
t.Errorf("expected to delete 1 suite, deleted %d", deleted)
}
// Endpoints should still exist
endpointStatuses, _ := store.GetAllEndpointStatuses(&paging.EndpointStatusParams{})
if len(endpointStatuses) != 1 {
t.Errorf("endpoints should not be affected by DeleteAllSuiteStatusesNotInKeys")
}
// Only one suite should remain
suiteStatuses, _ := store.GetAllSuiteStatuses(&paging.SuiteStatusParams{})
if len(suiteStatuses) != 1 {
t.Errorf("expected 1 remaining suite, got %d", len(suiteStatuses))
}
})
// Test 9: Clear should remove everything
t.Run("Clear", func(t *testing.T) {
store, endpoint1, _, _, _, testSuite := setupStore(t)
// InsertEndpointResult test data
store.InsertEndpointResult(endpoint1, &endpoint.Result{Success: true, Timestamp: time.Now(), Duration: 100 * time.Millisecond})
store.InsertSuiteResult(testSuite, &suite.Result{
Name: testSuite.Name, Group: testSuite.Group, Success: true,
Timestamp: time.Now(), Duration: 125 * time.Millisecond,
})
store.Clear()
// No endpoints should remain
endpointStatuses, _ := store.GetAllEndpointStatuses(&paging.EndpointStatusParams{})
if len(endpointStatuses) != 0 {
t.Errorf("expected 0 endpoints after clear, got %d", len(endpointStatuses))
}
// No suites should remain
suiteStatuses, _ := store.GetAllSuiteStatuses(&paging.SuiteStatusParams{})
if len(suiteStatuses) != 0 {
t.Errorf("expected 0 suites after clear, got %d", len(suiteStatuses))
}
})
}
// TestStore_EndpointStatusCastingSafety tests that type assertions are safe
func TestStore_EndpointStatusCastingSafety(t *testing.T) {
store, err := NewStore(100, 50)
if err != nil {
t.Fatal("expected no error, got", err)
}
// InsertEndpointResult an endpoint
ep := &endpoint.Endpoint{
Name: "test-endpoint",
Group: "test",
URL: "https://example.com",
}
result := &endpoint.Result{
Success: true,
Timestamp: time.Now(),
Duration: 100 * time.Millisecond,
}
store.InsertEndpointResult(ep, result)
// InsertEndpointResult a suite
testSuite := &suite.Suite{
Name: "test-suite",
Group: "test",
}
suiteResult := &suite.Result{
Name: testSuite.Name,
Group: testSuite.Group,
Success: true,
Timestamp: time.Now(),
Duration: 200 * time.Millisecond,
}
store.InsertSuiteResult(testSuite, suiteResult)
// This should not panic even with mixed types in cache
statuses, err := store.GetAllEndpointStatuses(&paging.EndpointStatusParams{})
if err != nil {
t.Fatalf("failed to get all endpoint statuses: %v", err)
}
// Should only have the endpoint, not the suite
if len(statuses) != 1 {
t.Errorf("expected 1 endpoint status, got %d", len(statuses))
}
if statuses[0].Name != "test-endpoint" {
t.Errorf("expected test-endpoint, got %s", statuses[0].Name)
}
}
func TestStore_MaximumLimits(t *testing.T) {
// Use small limits to test trimming behavior
maxResults := 5
maxEvents := 3
store, err := NewStore(maxResults, maxEvents)
if err != nil {
t.Fatal("expected no error, got", err)
}
defer store.Clear()
t.Run("endpoint-result-limits", func(t *testing.T) {
ep := &endpoint.Endpoint{Name: "test-endpoint", Group: "test", URL: "https://example.com"}
// Insert more results than the maximum
baseTime := time.Now().Add(-10 * time.Hour)
for i := 0; i < maxResults*2; i++ {
result := &endpoint.Result{
Success: i%2 == 0,
Timestamp: baseTime.Add(time.Duration(i) * time.Hour),
Duration: time.Duration(i*10) * time.Millisecond,
}
err := store.InsertEndpointResult(ep, result)
if err != nil {
t.Fatalf("failed to insert result %d: %v", i, err)
}
}
// Verify only maxResults are kept
status, err := store.GetEndpointStatusByKey(ep.Key(), nil)
if err != nil {
t.Fatalf("failed to get endpoint status: %v", err)
}
if len(status.Results) != maxResults {
t.Errorf("expected %d results after trimming, got %d", maxResults, len(status.Results))
}
// Verify the newest results are kept (should be results 5-9, not 0-4)
if len(status.Results) > 0 {
firstResult := status.Results[0]
lastResult := status.Results[len(status.Results)-1]
// First result should be older than last result due to append order
if !lastResult.Timestamp.After(firstResult.Timestamp) {
t.Error("expected results to be in chronological order")
}
// The last result should be the most recent one we inserted
expectedLastDuration := time.Duration((maxResults*2-1)*10) * time.Millisecond
if lastResult.Duration != expectedLastDuration {
t.Errorf("expected last result duration %v, got %v", expectedLastDuration, lastResult.Duration)
}
}
})
t.Run("suite-result-limits", func(t *testing.T) {
testSuite := &suite.Suite{Name: "test-suite", Group: "test"}
// Insert more results than the maximum
baseTime := time.Now().Add(-10 * time.Hour)
for i := 0; i < maxResults*2; i++ {
result := &suite.Result{
Name: testSuite.Name,
Group: testSuite.Group,
Success: i%2 == 0,
Timestamp: baseTime.Add(time.Duration(i) * time.Hour),
Duration: time.Duration(i*10) * time.Millisecond,
}
err := store.InsertSuiteResult(testSuite, result)
if err != nil {
t.Fatalf("failed to insert suite result %d: %v", i, err)
}
}
// Verify only maxResults are kept
status, err := store.GetSuiteStatusByKey(testSuite.Key(), &paging.SuiteStatusParams{})
if err != nil {
t.Fatalf("failed to get suite status: %v", err)
}
if len(status.Results) != maxResults {
t.Errorf("expected %d results after trimming, got %d", maxResults, len(status.Results))
}
// Verify the newest results are kept (should be results 5-9, not 0-4)
if len(status.Results) > 0 {
firstResult := status.Results[0]
lastResult := status.Results[len(status.Results)-1]
// First result should be older than last result due to append order
if !lastResult.Timestamp.After(firstResult.Timestamp) {
t.Error("expected results to be in chronological order")
}
// The last result should be the most recent one we inserted
expectedLastDuration := time.Duration((maxResults*2-1)*10) * time.Millisecond
if lastResult.Duration != expectedLastDuration {
t.Errorf("expected last result duration %v, got %v", expectedLastDuration, lastResult.Duration)
}
}
})
}
func TestSuiteResultOrdering(t *testing.T) {
store, err := NewStore(10, 5)
if err != nil {
t.Fatal("expected no error, got", err)
}
defer store.Clear()
testSuite := &suite.Suite{Name: "ordering-suite", Group: "test"}
// Insert results with distinct timestamps
baseTime := time.Now().Add(-5 * time.Hour)
timestamps := make([]time.Time, 5)
for i := 0; i < 5; i++ {
timestamp := baseTime.Add(time.Duration(i) * time.Hour)
timestamps[i] = timestamp
result := &suite.Result{
Name: testSuite.Name,
Group: testSuite.Group,
Success: true,
Timestamp: timestamp,
Duration: time.Duration(i*100) * time.Millisecond,
}
err := store.InsertSuiteResult(testSuite, result)
if err != nil {
t.Fatalf("failed to insert result %d: %v", i, err)
}
}
t.Run("chronological-append-order", func(t *testing.T) {
status, err := store.GetSuiteStatusByKey(testSuite.Key(), nil)
if err != nil {
t.Fatalf("failed to get suite status: %v", err)
}
// Verify results are in chronological order (oldest first due to append)
for i := 0; i < len(status.Results)-1; i++ {
current := status.Results[i]
next := status.Results[i+1]
if !next.Timestamp.After(current.Timestamp) {
t.Errorf("result %d timestamp %v should be before result %d timestamp %v",
i, current.Timestamp, i+1, next.Timestamp)
}
}
// Verify specific timestamp order
if !status.Results[0].Timestamp.Equal(timestamps[0]) {
t.Errorf("first result timestamp should be %v, got %v", timestamps[0], status.Results[0].Timestamp)
}
if !status.Results[len(status.Results)-1].Timestamp.Equal(timestamps[len(timestamps)-1]) {
t.Errorf("last result timestamp should be %v, got %v", timestamps[len(timestamps)-1], status.Results[len(status.Results)-1].Timestamp)
}
})
t.Run("pagination-newest-first", func(t *testing.T) {
// Test reverse pagination (newest first in paginated results)
page1 := ShallowCopySuiteStatus(
&suite.Status{
Name: testSuite.Name, Group: testSuite.Group, Key: testSuite.Key(),
Results: []*suite.Result{
{Timestamp: timestamps[0], Duration: 0 * time.Millisecond},
{Timestamp: timestamps[1], Duration: 100 * time.Millisecond},
{Timestamp: timestamps[2], Duration: 200 * time.Millisecond},
{Timestamp: timestamps[3], Duration: 300 * time.Millisecond},
{Timestamp: timestamps[4], Duration: 400 * time.Millisecond},
},
},
paging.NewSuiteStatusParams().WithPagination(1, 3),
)
if len(page1.Results) != 3 {
t.Errorf("expected 3 results in page 1, got %d", len(page1.Results))
}
// With reverse pagination, page 1 should have the 3 newest results
// That means results[2], results[3], results[4] from original array
if page1.Results[0].Duration != 200*time.Millisecond {
t.Errorf("expected first result in page to have 200ms duration, got %v", page1.Results[0].Duration)
}
if page1.Results[2].Duration != 400*time.Millisecond {
t.Errorf("expected last result in page to have 400ms duration, got %v", page1.Results[2].Duration)
}
})
t.Run("trimming-preserves-newest", func(t *testing.T) {
limitedStore, err := NewStore(3, 2) // Very small limits
if err != nil {
t.Fatal("expected no error, got", err)
}
defer limitedStore.Clear()
smallSuite := &suite.Suite{Name: "small-suite", Group: "test"}
// Insert 6 results, should keep only the newest 3
for i := 0; i < 6; i++ {
result := &suite.Result{
Name: smallSuite.Name,
Group: smallSuite.Group,
Success: true,
Timestamp: baseTime.Add(time.Duration(i) * time.Hour),
Duration: time.Duration(i*50) * time.Millisecond,
}
err := limitedStore.InsertSuiteResult(smallSuite, result)
if err != nil {
t.Fatalf("failed to insert result %d: %v", i, err)
}
}
status, err := limitedStore.GetSuiteStatusByKey(smallSuite.Key(), nil)
if err != nil {
t.Fatalf("failed to get suite status: %v", err)
}
if len(status.Results) != 3 {
t.Errorf("expected 3 results after trimming, got %d", len(status.Results))
}
// Should have results 3, 4, 5 (the newest ones)
expectedDurations := []time.Duration{150 * time.Millisecond, 200 * time.Millisecond, 250 * time.Millisecond}
for i, expectedDuration := range expectedDurations {
if status.Results[i].Duration != expectedDuration {
t.Errorf("result %d should have duration %v, got %v", i, expectedDuration, status.Results[i].Duration)
}
}
})
}
func TestStore_ConcurrentAccess(t *testing.T) {
store, err := NewStore(100, 50)
if err != nil {
t.Fatal("expected no error, got", err)
}
defer store.Clear()
t.Run("concurrent-endpoint-insertions", func(t *testing.T) {
var wg sync.WaitGroup
numGoroutines := 10
resultsPerGoroutine := 5
// Create endpoints for concurrent testing
endpoints := make([]*endpoint.Endpoint, numGoroutines)
for i := 0; i < numGoroutines; i++ {
endpoints[i] = &endpoint.Endpoint{
Name: "endpoint-" + string(rune('A'+i)),
Group: "concurrent",
URL: "https://example.com/" + string(rune('A'+i)),
}
}
// Concurrently insert results for different endpoints
for i := 0; i < numGoroutines; i++ {
wg.Add(1)
go func(endpointIndex int) {
defer wg.Done()
ep := endpoints[endpointIndex]
for j := 0; j < resultsPerGoroutine; j++ {
result := &endpoint.Result{
Success: j%2 == 0,
Timestamp: time.Now().Add(time.Duration(j) * time.Minute),
Duration: time.Duration(j*10) * time.Millisecond,
}
if err := store.InsertEndpointResult(ep, result); err != nil {
t.Errorf("failed to insert result for endpoint %d: %v", endpointIndex, err)
}
}
}(i)
}
wg.Wait()
// Verify all endpoints were created and have correct result counts
statuses, err := store.GetAllEndpointStatuses(&paging.EndpointStatusParams{})
if err != nil {
t.Fatalf("failed to get all endpoint statuses: %v", err)
}
if len(statuses) != numGoroutines {
t.Errorf("expected %d endpoint statuses, got %d", numGoroutines, len(statuses))
}
// Verify each endpoint has the correct number of results
for _, status := range statuses {
if len(status.Results) != resultsPerGoroutine {
t.Errorf("endpoint %s should have %d results, got %d", status.Name, resultsPerGoroutine, len(status.Results))
}
}
})
t.Run("concurrent-suite-insertions", func(t *testing.T) {
var wg sync.WaitGroup
numGoroutines := 5
resultsPerGoroutine := 3
// Create suites for concurrent testing
suites := make([]*suite.Suite, numGoroutines)
for i := 0; i < numGoroutines; i++ {
suites[i] = &suite.Suite{
Name: "suite-" + string(rune('A'+i)),
Group: "concurrent",
}
}
// Concurrently insert results for different suites
for i := 0; i < numGoroutines; i++ {
wg.Add(1)
go func(suiteIndex int) {
defer wg.Done()
su := suites[suiteIndex]
for j := 0; j < resultsPerGoroutine; j++ {
result := &suite.Result{
Name: su.Name,
Group: su.Group,
Success: j%2 == 0,
Timestamp: time.Now().Add(time.Duration(j) * time.Minute),
Duration: time.Duration(j*50) * time.Millisecond,
}
if err := store.InsertSuiteResult(su, result); err != nil {
t.Errorf("failed to insert result for suite %d: %v", suiteIndex, err)
}
}
}(i)
}
wg.Wait()
// Verify all suites were created and have correct result counts
statuses, err := store.GetAllSuiteStatuses(&paging.SuiteStatusParams{})
if err != nil {
t.Fatalf("failed to get all suite statuses: %v", err)
}
if len(statuses) != numGoroutines {
t.Errorf("expected %d suite statuses, got %d", numGoroutines, len(statuses))
}
// Verify each suite has the correct number of results
for _, status := range statuses {
if len(status.Results) != resultsPerGoroutine {
t.Errorf("suite %s should have %d results, got %d", status.Name, resultsPerGoroutine, len(status.Results))
}
}
})
t.Run("concurrent-mixed-operations", func(t *testing.T) {
var wg sync.WaitGroup
// Setup test data
ep := &endpoint.Endpoint{Name: "mixed-endpoint", Group: "test", URL: "https://example.com"}
testSuite := &suite.Suite{Name: "mixed-suite", Group: "test"}
// Concurrent endpoint insertions
wg.Add(1)
go func() {
defer wg.Done()
for i := 0; i < 5; i++ {
result := &endpoint.Result{
Success: true,
Timestamp: time.Now(),
Duration: time.Duration(i*10) * time.Millisecond,
}
store.InsertEndpointResult(ep, result)
}
}()
// Concurrent suite insertions
wg.Add(1)
go func() {
defer wg.Done()
for i := 0; i < 5; i++ {
result := &suite.Result{
Name: testSuite.Name,
Group: testSuite.Group,
Success: true,
Timestamp: time.Now(),
Duration: time.Duration(i*20) * time.Millisecond,
}
store.InsertSuiteResult(testSuite, result)
}
}()
// Concurrent reads
wg.Add(1)
go func() {
defer wg.Done()
for i := 0; i < 10; i++ {
store.GetAllEndpointStatuses(&paging.EndpointStatusParams{})
store.GetAllSuiteStatuses(&paging.SuiteStatusParams{})
time.Sleep(1 * time.Millisecond)
}
}()
wg.Wait()
// Verify final state is consistent
endpointStatuses, err := store.GetAllEndpointStatuses(&paging.EndpointStatusParams{})
if err != nil {
t.Fatalf("failed to get endpoint statuses after concurrent operations: %v", err)
}
if len(endpointStatuses) == 0 {
t.Error("expected at least one endpoint status after concurrent operations")
}
suiteStatuses, err := store.GetAllSuiteStatuses(&paging.SuiteStatusParams{})
if err != nil {
t.Fatalf("failed to get suite statuses after concurrent operations: %v", err)
}
if len(suiteStatuses) == 0 {
t.Error("expected at least one suite status after concurrent operations")
}
})
}

View File

@@ -2,6 +2,7 @@ package memory
import (
"github.com/TwiN/gatus/v5/config/endpoint"
"github.com/TwiN/gatus/v5/config/suite"
"github.com/TwiN/gatus/v5/storage/store/common/paging"
)
@@ -14,19 +15,46 @@ func ShallowCopyEndpointStatus(ss *endpoint.Status, params *paging.EndpointStatu
Key: ss.Key,
Uptime: endpoint.NewUptime(),
}
numberOfResults := len(ss.Results)
resultsStart, resultsEnd := getStartAndEndIndex(numberOfResults, params.ResultsPage, params.ResultsPageSize)
if resultsStart < 0 || resultsEnd < 0 {
shallowCopy.Results = []*endpoint.Result{}
if params == nil || (params.ResultsPage == 0 && params.ResultsPageSize == 0 && params.EventsPage == 0 && params.EventsPageSize == 0) {
shallowCopy.Results = ss.Results
shallowCopy.Events = ss.Events
} else {
shallowCopy.Results = ss.Results[resultsStart:resultsEnd]
numberOfResults := len(ss.Results)
resultsStart, resultsEnd := getStartAndEndIndex(numberOfResults, params.ResultsPage, params.ResultsPageSize)
if resultsStart < 0 || resultsEnd < 0 {
shallowCopy.Results = []*endpoint.Result{}
} else {
shallowCopy.Results = ss.Results[resultsStart:resultsEnd]
}
numberOfEvents := len(ss.Events)
eventsStart, eventsEnd := getStartAndEndIndex(numberOfEvents, params.EventsPage, params.EventsPageSize)
if eventsStart < 0 || eventsEnd < 0 {
shallowCopy.Events = []*endpoint.Event{}
} else {
shallowCopy.Events = ss.Events[eventsStart:eventsEnd]
}
}
numberOfEvents := len(ss.Events)
eventsStart, eventsEnd := getStartAndEndIndex(numberOfEvents, params.EventsPage, params.EventsPageSize)
if eventsStart < 0 || eventsEnd < 0 {
shallowCopy.Events = []*endpoint.Event{}
return shallowCopy
}
// ShallowCopySuiteStatus returns a shallow copy of a suite Status with only the results
// within the range defined by the page and pageSize parameters
func ShallowCopySuiteStatus(ss *suite.Status, params *paging.SuiteStatusParams) *suite.Status {
shallowCopy := &suite.Status{
Name: ss.Name,
Group: ss.Group,
Key: ss.Key,
}
if params == nil || (params.Page == 0 && params.PageSize == 0) {
shallowCopy.Results = ss.Results
} else {
shallowCopy.Events = ss.Events[eventsStart:eventsEnd]
numberOfResults := len(ss.Results)
resultsStart, resultsEnd := getStartAndEndIndex(numberOfResults, params.Page, params.PageSize)
if resultsStart < 0 || resultsEnd < 0 {
shallowCopy.Results = []*suite.Result{}
} else {
shallowCopy.Results = ss.Results[resultsStart:resultsEnd]
}
}
return shallowCopy
}

View File

@@ -5,6 +5,7 @@ import (
"time"
"github.com/TwiN/gatus/v5/config/endpoint"
"github.com/TwiN/gatus/v5/config/suite"
"github.com/TwiN/gatus/v5/storage"
"github.com/TwiN/gatus/v5/storage/store/common/paging"
)
@@ -64,3 +65,108 @@ func TestShallowCopyEndpointStatus(t *testing.T) {
t.Error("expected to have 25 results, because there's only 25 results")
}
}
func TestShallowCopySuiteStatus(t *testing.T) {
testSuite := &suite.Suite{Name: "test-suite", Group: "test-group"}
suiteStatus := &suite.Status{
Name: testSuite.Name,
Group: testSuite.Group,
Key: testSuite.Key(),
Results: []*suite.Result{},
}
ts := time.Now().Add(-25 * time.Hour)
for i := 0; i < 25; i++ {
result := &suite.Result{
Name: testSuite.Name,
Group: testSuite.Group,
Success: i%2 == 0,
Timestamp: ts,
Duration: time.Duration(i*10) * time.Millisecond,
}
suiteStatus.Results = append(suiteStatus.Results, result)
ts = ts.Add(time.Hour)
}
t.Run("invalid-page-negative", func(t *testing.T) {
result := ShallowCopySuiteStatus(suiteStatus, paging.NewSuiteStatusParams().WithPagination(-1, 10))
if len(result.Results) != 0 {
t.Errorf("expected 0 results for negative page, got %d", len(result.Results))
}
})
t.Run("invalid-page-zero", func(t *testing.T) {
result := ShallowCopySuiteStatus(suiteStatus, paging.NewSuiteStatusParams().WithPagination(0, 10))
if len(result.Results) != 0 {
t.Errorf("expected 0 results for zero page, got %d", len(result.Results))
}
})
t.Run("invalid-pagesize-negative", func(t *testing.T) {
result := ShallowCopySuiteStatus(suiteStatus, paging.NewSuiteStatusParams().WithPagination(1, -1))
if len(result.Results) != 0 {
t.Errorf("expected 0 results for negative page size, got %d", len(result.Results))
}
})
t.Run("zero-pagesize", func(t *testing.T) {
result := ShallowCopySuiteStatus(suiteStatus, paging.NewSuiteStatusParams().WithPagination(1, 0))
if len(result.Results) != 0 {
t.Errorf("expected 0 results for zero page size, got %d", len(result.Results))
}
})
t.Run("nil-params", func(t *testing.T) {
result := ShallowCopySuiteStatus(suiteStatus, nil)
if len(result.Results) != 25 {
t.Errorf("expected 25 results for nil params, got %d", len(result.Results))
}
})
t.Run("zero-params", func(t *testing.T) {
result := ShallowCopySuiteStatus(suiteStatus, &paging.SuiteStatusParams{Page: 0, PageSize: 0})
if len(result.Results) != 25 {
t.Errorf("expected 25 results for zero-value params, got %d", len(result.Results))
}
})
t.Run("first-page", func(t *testing.T) {
result := ShallowCopySuiteStatus(suiteStatus, paging.NewSuiteStatusParams().WithPagination(1, 10))
if len(result.Results) != 10 {
t.Errorf("expected 10 results for page 1, size 10, got %d", len(result.Results))
}
// Verify newest results are returned (reverse pagination)
if len(result.Results) > 0 && !result.Results[len(result.Results)-1].Timestamp.After(result.Results[0].Timestamp) {
t.Error("expected newest result to be at the end")
}
})
t.Run("second-page", func(t *testing.T) {
result := ShallowCopySuiteStatus(suiteStatus, paging.NewSuiteStatusParams().WithPagination(2, 10))
if len(result.Results) != 10 {
t.Errorf("expected 10 results for page 2, size 10, got %d", len(result.Results))
}
})
t.Run("last-partial-page", func(t *testing.T) {
result := ShallowCopySuiteStatus(suiteStatus, paging.NewSuiteStatusParams().WithPagination(3, 10))
if len(result.Results) != 5 {
t.Errorf("expected 5 results for page 3, size 10, got %d", len(result.Results))
}
})
t.Run("beyond-available-pages", func(t *testing.T) {
result := ShallowCopySuiteStatus(suiteStatus, paging.NewSuiteStatusParams().WithPagination(4, 10))
if len(result.Results) != 0 {
t.Errorf("expected 0 results for page beyond available data, got %d", len(result.Results))
}
})
t.Run("large-page-size", func(t *testing.T) {
result := ShallowCopySuiteStatus(suiteStatus, paging.NewSuiteStatusParams().WithPagination(1, 100))
if len(result.Results) != 25 {
t.Errorf("expected 25 results for large page size, got %d", len(result.Results))
}
})
}

View File

@@ -38,7 +38,8 @@ func (s *Store) createPostgresSchema() error {
hostname TEXT NOT NULL,
ip TEXT NOT NULL,
duration BIGINT NOT NULL,
timestamp TIMESTAMP NOT NULL
timestamp TIMESTAMP NOT NULL,
suite_result_id BIGINT REFERENCES suite_results(suite_result_id) ON DELETE CASCADE
)
`)
if err != nil {
@@ -79,7 +80,44 @@ func (s *Store) createPostgresSchema() error {
UNIQUE(endpoint_id, configuration_checksum)
)
`)
if err != nil {
return err
}
// Create suite tables
_, err = s.db.Exec(`
CREATE TABLE IF NOT EXISTS suites (
suite_id BIGSERIAL PRIMARY KEY,
suite_key TEXT UNIQUE,
suite_name TEXT NOT NULL,
suite_group TEXT NOT NULL,
UNIQUE(suite_name, suite_group)
)
`)
if err != nil {
return err
}
_, err = s.db.Exec(`
CREATE TABLE IF NOT EXISTS suite_results (
suite_result_id BIGSERIAL PRIMARY KEY,
suite_id BIGINT NOT NULL REFERENCES suites(suite_id) ON DELETE CASCADE,
success BOOLEAN NOT NULL,
errors TEXT NOT NULL,
duration BIGINT NOT NULL,
timestamp TIMESTAMP NOT NULL
)
`)
if err != nil {
return err
}
// Create index for suite_results
_, err = s.db.Exec(`
CREATE INDEX IF NOT EXISTS suite_results_suite_id_idx ON suite_results (suite_id);
`)
// Silent table modifications TODO: Remove this in v6.0.0
_, _ = s.db.Exec(`ALTER TABLE endpoint_results ADD IF NOT EXISTS domain_expiration BIGINT NOT NULL DEFAULT 0`)
// Add suite_result_id to endpoint_results table for suite endpoint linkage
_, _ = s.db.Exec(`ALTER TABLE endpoint_results ADD COLUMN IF NOT EXISTS suite_result_id BIGINT REFERENCES suite_results(suite_result_id) ON DELETE CASCADE`)
// Create index for suite_result_id
_, _ = s.db.Exec(`CREATE INDEX IF NOT EXISTS endpoint_results_suite_result_id_idx ON endpoint_results(suite_result_id)`)
return err
}

View File

@@ -38,7 +38,8 @@ func (s *Store) createSQLiteSchema() error {
hostname TEXT NOT NULL,
ip TEXT NOT NULL,
duration INTEGER NOT NULL,
timestamp TIMESTAMP NOT NULL
timestamp TIMESTAMP NOT NULL,
suite_result_id INTEGER REFERENCES suite_results(suite_result_id) ON DELETE CASCADE
)
`)
if err != nil {
@@ -82,6 +83,32 @@ func (s *Store) createSQLiteSchema() error {
if err != nil {
return err
}
// Create suite tables
_, err = s.db.Exec(`
CREATE TABLE IF NOT EXISTS suites (
suite_id INTEGER PRIMARY KEY,
suite_key TEXT UNIQUE,
suite_name TEXT NOT NULL,
suite_group TEXT NOT NULL,
UNIQUE(suite_name, suite_group)
)
`)
if err != nil {
return err
}
_, err = s.db.Exec(`
CREATE TABLE IF NOT EXISTS suite_results (
suite_result_id INTEGER PRIMARY KEY,
suite_id INTEGER NOT NULL REFERENCES suites(suite_id) ON DELETE CASCADE,
success INTEGER NOT NULL,
errors TEXT NOT NULL,
duration INTEGER NOT NULL,
timestamp TIMESTAMP NOT NULL
)
`)
if err != nil {
return err
}
// Create indices for performance reasons
_, err = s.db.Exec(`
CREATE INDEX IF NOT EXISTS endpoint_results_endpoint_id_idx ON endpoint_results (endpoint_id);
@@ -98,7 +125,23 @@ func (s *Store) createSQLiteSchema() error {
_, err = s.db.Exec(`
CREATE INDEX IF NOT EXISTS endpoint_result_conditions_endpoint_result_id_idx ON endpoint_result_conditions (endpoint_result_id);
`)
if err != nil {
return err
}
// Create index for suite_results
_, err = s.db.Exec(`
CREATE INDEX IF NOT EXISTS suite_results_suite_id_idx ON suite_results (suite_id);
`)
if err != nil {
return err
}
// Silent table modifications TODO: Remove this in v6.0.0
_, _ = s.db.Exec(`ALTER TABLE endpoint_results ADD domain_expiration INTEGER NOT NULL DEFAULT 0`)
// Add suite_result_id to endpoint_results table for suite endpoint linkage
_, _ = s.db.Exec(`ALTER TABLE endpoint_results ADD suite_result_id INTEGER REFERENCES suite_results(suite_result_id) ON DELETE CASCADE`)
// Create index for suite_result_id
_, _ = s.db.Exec(`CREATE INDEX IF NOT EXISTS endpoint_results_suite_result_id_idx ON endpoint_results(suite_result_id)`)
// Note: SQLite doesn't support DROP COLUMN in older versions, so we skip this cleanup
// The suite_id column in endpoints table will remain but unused
return err
}

View File

@@ -10,6 +10,8 @@ import (
"github.com/TwiN/gatus/v5/alerting/alert"
"github.com/TwiN/gatus/v5/config/endpoint"
"github.com/TwiN/gatus/v5/config/key"
"github.com/TwiN/gatus/v5/config/suite"
"github.com/TwiN/gatus/v5/storage/store/common"
"github.com/TwiN/gatus/v5/storage/store/common/paging"
"github.com/TwiN/gocache/v2"
@@ -138,7 +140,7 @@ func (s *Store) GetAllEndpointStatuses(params *paging.EndpointStatusParams) ([]*
// GetEndpointStatus returns the endpoint status for a given endpoint name in the given group
func (s *Store) GetEndpointStatus(groupName, endpointName string, params *paging.EndpointStatusParams) (*endpoint.Status, error) {
return s.GetEndpointStatusByKey(endpoint.ConvertGroupAndEndpointNameToKey(groupName, endpointName), params)
return s.GetEndpointStatusByKey(key.ConvertGroupAndNameToKey(groupName, endpointName), params)
}
// GetEndpointStatusByKey returns the endpoint status for a given key
@@ -233,8 +235,8 @@ func (s *Store) GetHourlyAverageResponseTimeByKey(key string, from, to time.Time
return hourlyAverageResponseTimes, nil
}
// Insert adds the observed result for the specified endpoint into the store
func (s *Store) Insert(ep *endpoint.Endpoint, result *endpoint.Result) error {
// InsertEndpointResult adds the observed result for the specified endpoint into the store
func (s *Store) InsertEndpointResult(ep *endpoint.Endpoint, result *endpoint.Result) error {
tx, err := s.db.Begin()
if err != nil {
return err
@@ -245,12 +247,12 @@ func (s *Store) Insert(ep *endpoint.Endpoint, result *endpoint.Result) error {
// Endpoint doesn't exist in the database, insert it
if endpointID, err = s.insertEndpoint(tx, ep); err != nil {
_ = tx.Rollback()
logr.Errorf("[sql.Insert] Failed to create endpoint with key=%s: %s", ep.Key(), err.Error())
logr.Errorf("[sql.InsertEndpointResult] Failed to create endpoint with key=%s: %s", ep.Key(), err.Error())
return err
}
} else {
_ = tx.Rollback()
logr.Errorf("[sql.Insert] Failed to retrieve id of endpoint with key=%s: %s", ep.Key(), err.Error())
logr.Errorf("[sql.InsertEndpointResult] Failed to retrieve id of endpoint with key=%s: %s", ep.Key(), err.Error())
return err
}
}
@@ -266,7 +268,7 @@ func (s *Store) Insert(ep *endpoint.Endpoint, result *endpoint.Result) error {
numberOfEvents, err := s.getNumberOfEventsByEndpointID(tx, endpointID)
if err != nil {
// Silently fail
logr.Errorf("[sql.Insert] Failed to retrieve total number of events for endpoint with key=%s: %s", ep.Key(), err.Error())
logr.Errorf("[sql.InsertEndpointResult] Failed to retrieve total number of events for endpoint with key=%s: %s", ep.Key(), err.Error())
}
if numberOfEvents == 0 {
// There's no events yet, which means we need to add the EventStart and the first healthy/unhealthy event
@@ -276,18 +278,18 @@ func (s *Store) Insert(ep *endpoint.Endpoint, result *endpoint.Result) error {
})
if err != nil {
// Silently fail
logr.Errorf("[sql.Insert] Failed to insert event=%s for endpoint with key=%s: %s", endpoint.EventStart, ep.Key(), err.Error())
logr.Errorf("[sql.InsertEndpointResult] Failed to insert event=%s for endpoint with key=%s: %s", endpoint.EventStart, ep.Key(), err.Error())
}
event := endpoint.NewEventFromResult(result)
if err = s.insertEndpointEvent(tx, endpointID, event); err != nil {
// Silently fail
logr.Errorf("[sql.Insert] Failed to insert event=%s for endpoint with key=%s: %s", event.Type, ep.Key(), err.Error())
logr.Errorf("[sql.InsertEndpointResult] Failed to insert event=%s for endpoint with key=%s: %s", event.Type, ep.Key(), err.Error())
}
} else {
// Get the success value of the previous result
var lastResultSuccess bool
if lastResultSuccess, err = s.getLastEndpointResultSuccessValue(tx, endpointID); err != nil {
logr.Errorf("[sql.Insert] Failed to retrieve outcome of previous result for endpoint with key=%s: %s", ep.Key(), err.Error())
logr.Errorf("[sql.InsertEndpointResult] Failed to retrieve outcome of previous result for endpoint with key=%s: %s", ep.Key(), err.Error())
} else {
// If we managed to retrieve the outcome of the previous result, we'll compare it with the new result.
// If the final outcome (success or failure) of the previous and the new result aren't the same, it means
@@ -297,7 +299,7 @@ func (s *Store) Insert(ep *endpoint.Endpoint, result *endpoint.Result) error {
event := endpoint.NewEventFromResult(result)
if err = s.insertEndpointEvent(tx, endpointID, event); err != nil {
// Silently fail
logr.Errorf("[sql.Insert] Failed to insert event=%s for endpoint with key=%s: %s", event.Type, ep.Key(), err.Error())
logr.Errorf("[sql.InsertEndpointResult] Failed to insert event=%s for endpoint with key=%s: %s", event.Type, ep.Key(), err.Error())
}
}
}
@@ -306,42 +308,42 @@ func (s *Store) Insert(ep *endpoint.Endpoint, result *endpoint.Result) error {
// (since we're only deleting MaximumNumberOfEvents at a time instead of 1)
if numberOfEvents > int64(s.maximumNumberOfEvents+eventsAboveMaximumCleanUpThreshold) {
if err = s.deleteOldEndpointEvents(tx, endpointID); err != nil {
logr.Errorf("[sql.Insert] Failed to delete old events for endpoint with key=%s: %s", ep.Key(), err.Error())
logr.Errorf("[sql.InsertEndpointResult] Failed to delete old events for endpoint with key=%s: %s", ep.Key(), err.Error())
}
}
}
// Second, we need to insert the result.
if err = s.insertEndpointResult(tx, endpointID, result); err != nil {
logr.Errorf("[sql.Insert] Failed to insert result for endpoint with key=%s: %s", ep.Key(), err.Error())
logr.Errorf("[sql.InsertEndpointResult] Failed to insert result for endpoint with key=%s: %s", ep.Key(), err.Error())
_ = tx.Rollback() // If we can't insert the result, we'll rollback now since there's no point continuing
return err
}
// Clean up old results
numberOfResults, err := s.getNumberOfResultsByEndpointID(tx, endpointID)
if err != nil {
logr.Errorf("[sql.Insert] Failed to retrieve total number of results for endpoint with key=%s: %s", ep.Key(), err.Error())
logr.Errorf("[sql.InsertEndpointResult] Failed to retrieve total number of results for endpoint with key=%s: %s", ep.Key(), err.Error())
} else {
if numberOfResults > int64(s.maximumNumberOfResults+resultsAboveMaximumCleanUpThreshold) {
if err = s.deleteOldEndpointResults(tx, endpointID); err != nil {
logr.Errorf("[sql.Insert] Failed to delete old results for endpoint with key=%s: %s", ep.Key(), err.Error())
logr.Errorf("[sql.InsertEndpointResult] Failed to delete old results for endpoint with key=%s: %s", ep.Key(), err.Error())
}
}
}
// Finally, we need to insert the uptime data.
// Because the uptime data significantly outlives the results, we can't rely on the results for determining the uptime
if err = s.updateEndpointUptime(tx, endpointID, result); err != nil {
logr.Errorf("[sql.Insert] Failed to update uptime for endpoint with key=%s: %s", ep.Key(), err.Error())
logr.Errorf("[sql.InsertEndpointResult] Failed to update uptime for endpoint with key=%s: %s", ep.Key(), err.Error())
}
// Merge hourly uptime entries that can be merged into daily entries and clean up old uptime entries
numberOfUptimeEntries, err := s.getNumberOfUptimeEntriesByEndpointID(tx, endpointID)
if err != nil {
logr.Errorf("[sql.Insert] Failed to retrieve total number of uptime entries for endpoint with key=%s: %s", ep.Key(), err.Error())
logr.Errorf("[sql.InsertEndpointResult] Failed to retrieve total number of uptime entries for endpoint with key=%s: %s", ep.Key(), err.Error())
} else {
// Merge older hourly uptime entries into daily uptime entries if we have more than uptimeTotalEntriesMergeThreshold
if numberOfUptimeEntries >= uptimeTotalEntriesMergeThreshold {
logr.Infof("[sql.Insert] Merging hourly uptime entries for endpoint with key=%s; This is a lot of work, it shouldn't happen too often", ep.Key())
logr.Infof("[sql.InsertEndpointResult] Merging hourly uptime entries for endpoint with key=%s; This is a lot of work, it shouldn't happen too often", ep.Key())
if err = s.mergeHourlyUptimeEntriesOlderThanMergeThresholdIntoDailyUptimeEntries(tx, endpointID); err != nil {
logr.Errorf("[sql.Insert] Failed to merge hourly uptime entries for endpoint with key=%s: %s", ep.Key(), err.Error())
logr.Errorf("[sql.InsertEndpointResult] Failed to merge hourly uptime entries for endpoint with key=%s: %s", ep.Key(), err.Error())
}
}
}
@@ -350,11 +352,11 @@ func (s *Store) Insert(ep *endpoint.Endpoint, result *endpoint.Result) error {
// but if Gatus was temporarily shut down, we might have some old entries that need to be cleaned up
ageOfOldestUptimeEntry, err := s.getAgeOfOldestEndpointUptimeEntry(tx, endpointID)
if err != nil {
logr.Errorf("[sql.Insert] Failed to retrieve oldest endpoint uptime entry for endpoint with key=%s: %s", ep.Key(), err.Error())
logr.Errorf("[sql.InsertEndpointResult] Failed to retrieve oldest endpoint uptime entry for endpoint with key=%s: %s", ep.Key(), err.Error())
} else {
if ageOfOldestUptimeEntry > uptimeAgeCleanUpThreshold {
if err = s.deleteOldUptimeEntries(tx, endpointID, time.Now().Add(-(uptimeRetention + time.Hour))); err != nil {
logr.Errorf("[sql.Insert] Failed to delete old uptime entries for endpoint with key=%s: %s", ep.Key(), err.Error())
logr.Errorf("[sql.InsertEndpointResult] Failed to delete old uptime entries for endpoint with key=%s: %s", ep.Key(), err.Error())
}
}
}
@@ -364,7 +366,7 @@ func (s *Store) Insert(ep *endpoint.Endpoint, result *endpoint.Result) error {
s.writeThroughCache.Delete(cacheKey)
endpointKey, params, err := extractKeyAndParamsFromCacheKey(cacheKey)
if err != nil {
logr.Errorf("[sql.Insert] Silently deleting cache key %s instead of refreshing due to error: %s", cacheKey, err.Error())
logr.Errorf("[sql.InsertEndpointResult] Silently deleting cache key %s instead of refreshing due to error: %s", cacheKey, err.Error())
continue
}
// Retrieve the endpoint status by key, which will in turn refresh the cache
@@ -379,17 +381,43 @@ func (s *Store) Insert(ep *endpoint.Endpoint, result *endpoint.Result) error {
// DeleteAllEndpointStatusesNotInKeys removes all rows owned by an endpoint whose key is not within the keys provided
func (s *Store) DeleteAllEndpointStatusesNotInKeys(keys []string) int {
logr.Debugf("[sql.DeleteAllEndpointStatusesNotInKeys] Called with %d keys", len(keys))
var err error
var result sql.Result
if len(keys) == 0 {
// Delete everything
logr.Debugf("[sql.DeleteAllEndpointStatusesNotInKeys] No keys provided, deleting all endpoints")
result, err = s.db.Exec("DELETE FROM endpoints")
} else {
// First check what we're about to delete
args := make([]interface{}, 0, len(keys))
checkQuery := "SELECT endpoint_key FROM endpoints WHERE endpoint_key NOT IN ("
for i := range keys {
checkQuery += fmt.Sprintf("$%d,", i+1)
args = append(args, keys[i])
}
checkQuery = checkQuery[:len(checkQuery)-1] + ")"
rows, checkErr := s.db.Query(checkQuery, args...)
if checkErr == nil {
defer rows.Close()
var deletedKeys []string
for rows.Next() {
var key string
if err := rows.Scan(&key); err == nil {
deletedKeys = append(deletedKeys, key)
}
}
if len(deletedKeys) > 0 {
logr.Infof("[sql.DeleteAllEndpointStatusesNotInKeys] Deleting endpoints with keys: %v", deletedKeys)
} else {
logr.Debugf("[sql.DeleteAllEndpointStatusesNotInKeys] No endpoints to delete")
}
}
query := "DELETE FROM endpoints WHERE endpoint_key NOT IN ("
for i := range keys {
query += fmt.Sprintf("$%d,", i+1)
args = append(args, keys[i])
}
query = query[:len(query)-1] + ")" // Remove the last comma and add the closing parenthesis
result, err = s.db.Exec(query, args...)
@@ -586,11 +614,16 @@ func (s *Store) insertEndpointEvent(tx *sql.Tx, endpointID int64, event *endpoin
// insertEndpointResult inserts a result in the store
func (s *Store) insertEndpointResult(tx *sql.Tx, endpointID int64, result *endpoint.Result) error {
return s.insertEndpointResultWithSuiteID(tx, endpointID, result, nil)
}
// insertEndpointResultWithSuiteID inserts a result in the store with optional suite linkage
func (s *Store) insertEndpointResultWithSuiteID(tx *sql.Tx, endpointID int64, result *endpoint.Result, suiteResultID *int64) error {
var endpointResultID int64
err := tx.QueryRow(
`
INSERT INTO endpoint_results (endpoint_id, success, errors, connected, status, dns_rcode, certificate_expiration, domain_expiration, hostname, ip, duration, timestamp)
VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11, $12)
INSERT INTO endpoint_results (endpoint_id, success, errors, connected, status, dns_rcode, certificate_expiration, domain_expiration, hostname, ip, duration, timestamp, suite_result_id)
VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11, $12, $13)
RETURNING endpoint_result_id
`,
endpointID,
@@ -605,6 +638,7 @@ func (s *Store) insertEndpointResult(tx *sql.Tx, endpointID int64, result *endpo
result.IP,
result.Duration,
result.Timestamp.UTC(),
suiteResultID,
).Scan(&endpointResultID)
if err != nil {
return err
@@ -652,7 +686,16 @@ func (s *Store) updateEndpointUptime(tx *sql.Tx, endpointID int64, result *endpo
}
func (s *Store) getAllEndpointKeys(tx *sql.Tx) (keys []string, err error) {
rows, err := tx.Query("SELECT endpoint_key FROM endpoints ORDER BY endpoint_key")
// Only get endpoints that have at least one result not linked to a suite
// This excludes endpoints that only exist as part of suites
// Using JOIN for better performance than EXISTS subquery
rows, err := tx.Query(`
SELECT DISTINCT e.endpoint_key
FROM endpoints e
INNER JOIN endpoint_results er ON e.endpoint_id = er.endpoint_id
WHERE er.suite_result_id IS NULL
ORDER BY e.endpoint_key
`)
if err != nil {
return nil, err
}
@@ -1108,3 +1151,428 @@ func extractKeyAndParamsFromCacheKey(cacheKey string) (string, *paging.EndpointS
}
return strings.Join(parts[:len(parts)-4], "-"), params, nil
}
// GetAllSuiteStatuses returns all monitored suite statuses
func (s *Store) GetAllSuiteStatuses(params *paging.SuiteStatusParams) ([]*suite.Status, error) {
tx, err := s.db.Begin()
if err != nil {
return nil, err
}
defer tx.Rollback()
// Get all suites
rows, err := tx.Query(`
SELECT suite_id, suite_key, suite_name, suite_group
FROM suites
ORDER BY suite_key
`)
if err != nil {
return nil, err
}
defer rows.Close()
var suiteStatuses []*suite.Status
for rows.Next() {
var suiteID int64
var key, name, group string
if err = rows.Scan(&suiteID, &key, &name, &group); err != nil {
return nil, err
}
status := &suite.Status{
Name: name,
Group: group,
Key: key,
Results: []*suite.Result{},
}
// Get suite results with pagination
pageSize := 20
page := 1
if params != nil {
if params.PageSize > 0 {
pageSize = params.PageSize
}
if params.Page > 0 {
page = params.Page
}
}
status.Results, err = s.getSuiteResults(tx, suiteID, page, pageSize)
if err != nil {
logr.Errorf("[sql.GetAllSuiteStatuses] Failed to retrieve results for suite_id=%d: %s", suiteID, err.Error())
}
// Populate Name and Group fields on each result
for _, result := range status.Results {
result.Name = name
result.Group = group
}
suiteStatuses = append(suiteStatuses, status)
}
if err = tx.Commit(); err != nil {
return nil, err
}
return suiteStatuses, nil
}
// GetSuiteStatusByKey returns the suite status for a given key
func (s *Store) GetSuiteStatusByKey(key string, params *paging.SuiteStatusParams) (*suite.Status, error) {
tx, err := s.db.Begin()
if err != nil {
return nil, err
}
defer tx.Rollback()
var suiteID int64
var name, group string
err = tx.QueryRow(`
SELECT suite_id, suite_name, suite_group
FROM suites
WHERE suite_key = $1
`, key).Scan(&suiteID, &name, &group)
if err != nil {
if errors.Is(err, sql.ErrNoRows) {
return nil, nil
}
return nil, err
}
status := &suite.Status{
Name: name,
Group: group,
Key: key,
Results: []*suite.Result{},
}
// Get suite results with pagination
pageSize := 20
page := 1
if params != nil {
if params.PageSize > 0 {
pageSize = params.PageSize
}
if params.Page > 0 {
page = params.Page
}
}
status.Results, err = s.getSuiteResults(tx, suiteID, page, pageSize)
if err != nil {
logr.Errorf("[sql.GetSuiteStatusByKey] Failed to retrieve results for suite_id=%d: %s", suiteID, err.Error())
}
// Populate Name and Group fields on each result
for _, result := range status.Results {
result.Name = name
result.Group = group
}
if err = tx.Commit(); err != nil {
return nil, err
}
return status, nil
}
// InsertSuiteResult adds the observed result for the specified suite into the store
func (s *Store) InsertSuiteResult(su *suite.Suite, result *suite.Result) error {
tx, err := s.db.Begin()
if err != nil {
return err
}
defer tx.Rollback()
// Get or create suite
suiteID, err := s.getSuiteID(tx, su)
if err != nil {
if errors.Is(err, common.ErrSuiteNotFound) {
// Suite doesn't exist in the database, insert it
if suiteID, err = s.insertSuite(tx, su); err != nil {
logr.Errorf("[sql.InsertSuiteResult] Failed to create suite with key=%s: %s", su.Key(), err.Error())
return err
}
} else {
logr.Errorf("[sql.InsertSuiteResult] Failed to retrieve id of suite with key=%s: %s", su.Key(), err.Error())
return err
}
}
// Insert suite result
var suiteResultID int64
err = tx.QueryRow(`
INSERT INTO suite_results (suite_id, success, errors, duration, timestamp)
VALUES ($1, $2, $3, $4, $5)
RETURNING suite_result_id
`,
suiteID,
result.Success,
strings.Join(result.Errors, arraySeparator),
result.Duration.Nanoseconds(),
result.Timestamp.UTC(), // timestamp is the start time
).Scan(&suiteResultID)
if err != nil {
return err
}
// For each endpoint result in the suite, we need to store them
for _, epResult := range result.EndpointResults {
// Create a temporary endpoint object for storage
ep := &endpoint.Endpoint{
Name: epResult.Name,
Group: su.Group,
}
// Get or create the endpoint (without suite linkage in endpoints table)
epID, err := s.getEndpointID(tx, ep)
if err != nil {
if errors.Is(err, common.ErrEndpointNotFound) {
// Endpoint doesn't exist, create it
if epID, err = s.insertEndpoint(tx, ep); err != nil {
logr.Errorf("[sql.InsertSuiteResult] Failed to create endpoint %s: %s", epResult.Name, err.Error())
continue
}
} else {
logr.Errorf("[sql.InsertSuiteResult] Failed to get endpoint %s: %s", epResult.Name, err.Error())
continue
}
}
// InsertEndpointResult the endpoint result with suite linkage
err = s.insertEndpointResultWithSuiteID(tx, epID, epResult, &suiteResultID)
if err != nil {
logr.Errorf("[sql.InsertSuiteResult] Failed to insert endpoint result for %s: %s", epResult.Name, err.Error())
}
}
// Clean up old suite results
numberOfResults, err := s.getNumberOfSuiteResultsByID(tx, suiteID)
if err != nil {
logr.Errorf("[sql.InsertSuiteResult] Failed to retrieve total number of results for suite with key=%s: %s", su.Key(), err.Error())
} else {
if numberOfResults > int64(s.maximumNumberOfResults+resultsAboveMaximumCleanUpThreshold) {
if err = s.deleteOldSuiteResults(tx, suiteID); err != nil {
logr.Errorf("[sql.InsertSuiteResult] Failed to delete old results for suite with key=%s: %s", su.Key(), err.Error())
}
}
}
if err = tx.Commit(); err != nil {
return err
}
return nil
}
// DeleteAllSuiteStatusesNotInKeys removes all suite statuses that are not within the keys provided
func (s *Store) DeleteAllSuiteStatusesNotInKeys(keys []string) int {
logr.Debugf("[sql.DeleteAllSuiteStatusesNotInKeys] Called with %d keys", len(keys))
if len(keys) == 0 {
// Delete all suites
logr.Debugf("[sql.DeleteAllSuiteStatusesNotInKeys] No keys provided, deleting all suites")
result, err := s.db.Exec("DELETE FROM suites")
if err != nil {
logr.Errorf("[sql.DeleteAllSuiteStatusesNotInKeys] Failed to delete all suites: %s", err.Error())
return 0
}
rowsAffected, _ := result.RowsAffected()
return int(rowsAffected)
}
args := make([]interface{}, 0, len(keys))
query := "DELETE FROM suites WHERE suite_key NOT IN ("
for i := range keys {
if i > 0 {
query += ","
}
query += fmt.Sprintf("$%d", i+1)
args = append(args, keys[i])
}
query += ")"
// First, let's see what we're about to delete
checkQuery := "SELECT suite_key FROM suites WHERE suite_key NOT IN ("
for i := range keys {
if i > 0 {
checkQuery += ","
}
checkQuery += fmt.Sprintf("$%d", i+1)
}
checkQuery += ")"
rows, err := s.db.Query(checkQuery, args...)
if err == nil {
defer rows.Close()
var deletedKeys []string
for rows.Next() {
var key string
if err := rows.Scan(&key); err == nil {
deletedKeys = append(deletedKeys, key)
}
}
if len(deletedKeys) > 0 {
logr.Infof("[sql.DeleteAllSuiteStatusesNotInKeys] Deleting suites with keys: %v", deletedKeys)
}
}
result, err := s.db.Exec(query, args...)
if err != nil {
logr.Errorf("[sql.DeleteAllSuiteStatusesNotInKeys] Failed to delete suites: %s", err.Error())
return 0
}
rowsAffected, _ := result.RowsAffected()
return int(rowsAffected)
}
// Suite helper methods
// getSuiteID retrieves the suite ID from the database by its key
func (s *Store) getSuiteID(tx *sql.Tx, su *suite.Suite) (int64, error) {
var id int64
err := tx.QueryRow("SELECT suite_id FROM suites WHERE suite_key = $1", su.Key()).Scan(&id)
if err != nil {
if errors.Is(err, sql.ErrNoRows) {
return 0, common.ErrSuiteNotFound
}
return 0, err
}
return id, nil
}
// insertSuite inserts a suite in the store and returns the generated id
func (s *Store) insertSuite(tx *sql.Tx, su *suite.Suite) (int64, error) {
var id int64
err := tx.QueryRow(
"INSERT INTO suites (suite_key, suite_name, suite_group) VALUES ($1, $2, $3) RETURNING suite_id",
su.Key(),
su.Name,
su.Group,
).Scan(&id)
if err != nil {
return 0, err
}
return id, nil
}
// getSuiteResults retrieves paginated suite results
func (s *Store) getSuiteResults(tx *sql.Tx, suiteID int64, page, pageSize int) ([]*suite.Result, error) {
rows, err := tx.Query(`
SELECT suite_result_id, success, errors, duration, timestamp
FROM suite_results
WHERE suite_id = $1
ORDER BY suite_result_id DESC
LIMIT $2 OFFSET $3
`,
suiteID,
pageSize,
(page-1)*pageSize,
)
if err != nil {
logr.Errorf("[sql.getSuiteResults] Query failed: %v", err)
return nil, err
}
defer rows.Close()
type suiteResultData struct {
result *suite.Result
id int64
}
var resultsData []suiteResultData
for rows.Next() {
result := &suite.Result{
EndpointResults: []*endpoint.Result{},
}
var suiteResultID int64
var joinedErrors string
var nanoseconds int64
err = rows.Scan(&suiteResultID, &result.Success, &joinedErrors, &nanoseconds, &result.Timestamp)
if err != nil {
logr.Errorf("[sql.getSuiteResults] Failed to scan suite result: %s", err.Error())
continue
}
result.Duration = time.Duration(nanoseconds)
if len(joinedErrors) > 0 {
result.Errors = strings.Split(joinedErrors, arraySeparator)
}
// Store both result and ID together
resultsData = append(resultsData, suiteResultData{
result: result,
id: suiteResultID,
})
}
// Reverse the results to get chronological order (oldest to newest)
for i := len(resultsData)/2 - 1; i >= 0; i-- {
opp := len(resultsData) - 1 - i
resultsData[i], resultsData[opp] = resultsData[opp], resultsData[i]
}
// Fetch endpoint results for each suite result
for _, data := range resultsData {
result := data.result
resultID := data.id
// Query endpoint results for this suite result
epRows, err := tx.Query(`
SELECT
e.endpoint_name,
er.success,
er.errors,
er.duration,
er.timestamp
FROM endpoint_results er
JOIN endpoints e ON er.endpoint_id = e.endpoint_id
WHERE er.suite_result_id = $1
ORDER BY er.endpoint_result_id
`, resultID)
if err != nil {
logr.Errorf("[sql.getSuiteResults] Failed to get endpoint results for suite_result_id=%d: %s", resultID, err.Error())
continue
}
epCount := 0
for epRows.Next() {
epCount++
var name string
var success bool
var joinedErrors string
var duration int64
var timestamp time.Time
err = epRows.Scan(&name, &success, &joinedErrors, &duration, &timestamp)
if err != nil {
logr.Errorf("[sql.getSuiteResults] Failed to scan endpoint result: %s", err.Error())
continue
}
epResult := &endpoint.Result{
Name: name,
Success: success,
Duration: time.Duration(duration),
Timestamp: timestamp,
}
if len(joinedErrors) > 0 {
epResult.Errors = strings.Split(joinedErrors, arraySeparator)
}
result.EndpointResults = append(result.EndpointResults, epResult)
}
epRows.Close()
if epCount > 0 {
logr.Debugf("[sql.getSuiteResults] Found %d endpoint results for suite_result_id=%d", epCount, resultID)
}
}
// Extract just the results for return
var results []*suite.Result
for _, data := range resultsData {
results = append(results, data.result)
}
return results, nil
}
// getNumberOfSuiteResultsByID gets the count of results for a suite
func (s *Store) getNumberOfSuiteResultsByID(tx *sql.Tx, suiteID int64) (int64, error) {
var count int64
err := tx.QueryRow("SELECT COUNT(1) FROM suite_results WHERE suite_id = $1", suiteID).Scan(&count)
return count, err
}
// deleteOldSuiteResults deletes old suite results beyond the maximum
func (s *Store) deleteOldSuiteResults(tx *sql.Tx, suiteID int64) error {
_, err := tx.Exec(`
DELETE FROM suite_results
WHERE suite_id = $1
AND suite_result_id NOT IN (
SELECT suite_result_id
FROM suite_results
WHERE suite_id = $1
ORDER BY suite_result_id DESC
LIMIT $2
)
`,
suiteID,
s.maximumNumberOfResults,
)
return err
}

View File

@@ -103,7 +103,7 @@ func TestStore_InsertCleansUpOldUptimeEntriesProperly(t *testing.T) {
now := time.Now().Truncate(time.Hour)
now = time.Date(now.Year(), now.Month(), now.Day(), now.Hour(), 0, 0, 0, now.Location())
store.Insert(&testEndpoint, &endpoint.Result{Timestamp: now.Add(-5 * time.Hour), Success: true})
store.InsertEndpointResult(&testEndpoint, &endpoint.Result{Timestamp: now.Add(-5 * time.Hour), Success: true})
tx, _ := store.db.Begin()
oldest, _ := store.getAgeOfOldestEndpointUptimeEntry(tx, 1)
@@ -113,7 +113,7 @@ func TestStore_InsertCleansUpOldUptimeEntriesProperly(t *testing.T) {
}
// The oldest cache entry should remain at ~5 hours old, because this entry is more recent
store.Insert(&testEndpoint, &endpoint.Result{Timestamp: now.Add(-3 * time.Hour), Success: true})
store.InsertEndpointResult(&testEndpoint, &endpoint.Result{Timestamp: now.Add(-3 * time.Hour), Success: true})
tx, _ = store.db.Begin()
oldest, _ = store.getAgeOfOldestEndpointUptimeEntry(tx, 1)
@@ -123,7 +123,7 @@ func TestStore_InsertCleansUpOldUptimeEntriesProperly(t *testing.T) {
}
// The oldest cache entry should now become at ~8 hours old, because this entry is older
store.Insert(&testEndpoint, &endpoint.Result{Timestamp: now.Add(-8 * time.Hour), Success: true})
store.InsertEndpointResult(&testEndpoint, &endpoint.Result{Timestamp: now.Add(-8 * time.Hour), Success: true})
tx, _ = store.db.Begin()
oldest, _ = store.getAgeOfOldestEndpointUptimeEntry(tx, 1)
@@ -133,7 +133,7 @@ func TestStore_InsertCleansUpOldUptimeEntriesProperly(t *testing.T) {
}
// Since this is one hour before reaching the clean up threshold, the oldest entry should now be this one
store.Insert(&testEndpoint, &endpoint.Result{Timestamp: now.Add(-(uptimeAgeCleanUpThreshold - time.Hour)), Success: true})
store.InsertEndpointResult(&testEndpoint, &endpoint.Result{Timestamp: now.Add(-(uptimeAgeCleanUpThreshold - time.Hour)), Success: true})
tx, _ = store.db.Begin()
oldest, _ = store.getAgeOfOldestEndpointUptimeEntry(tx, 1)
@@ -144,7 +144,7 @@ func TestStore_InsertCleansUpOldUptimeEntriesProperly(t *testing.T) {
// Since this entry is after the uptimeAgeCleanUpThreshold, both this entry as well as the previous
// one should be deleted since they both surpass uptimeRetention
store.Insert(&testEndpoint, &endpoint.Result{Timestamp: now.Add(-(uptimeAgeCleanUpThreshold + time.Hour)), Success: true})
store.InsertEndpointResult(&testEndpoint, &endpoint.Result{Timestamp: now.Add(-(uptimeAgeCleanUpThreshold + time.Hour)), Success: true})
tx, _ = store.db.Begin()
oldest, _ = store.getAgeOfOldestEndpointUptimeEntry(tx, 1)
@@ -182,7 +182,7 @@ func TestStore_HourlyUptimeEntriesAreMergedIntoDailyUptimeEntriesProperly(t *tes
for i := scenario.numberOfHours; i > 0; i-- {
//fmt.Printf("i: %d (%s)\n", i, now.Add(-time.Duration(i)*time.Hour))
// Create an uptime entry
err := store.Insert(&testEndpoint, &endpoint.Result{Timestamp: now.Add(-time.Duration(i) * time.Hour), Success: true})
err := store.InsertEndpointResult(&testEndpoint, &endpoint.Result{Timestamp: now.Add(-time.Duration(i) * time.Hour), Success: true})
if err != nil {
t.Log(err)
}
@@ -218,7 +218,7 @@ func TestStore_getEndpointUptime(t *testing.T) {
// Add 768 hourly entries (32 days)
// Daily entries should be merged from hourly entries automatically
for i := 768; i > 0; i-- {
err := store.Insert(&testEndpoint, &endpoint.Result{Timestamp: time.Now().Add(-time.Duration(i) * time.Hour), Duration: time.Second, Success: true})
err := store.InsertEndpointResult(&testEndpoint, &endpoint.Result{Timestamp: time.Now().Add(-time.Duration(i) * time.Hour), Duration: time.Second, Success: true})
if err != nil {
t.Log(err)
}
@@ -245,7 +245,7 @@ func TestStore_getEndpointUptime(t *testing.T) {
t.Errorf("expected uptime to be 1, got %f", uptime)
}
// Add a new unsuccessful result, which should impact the uptime
err = store.Insert(&testEndpoint, &endpoint.Result{Timestamp: time.Now(), Duration: time.Second, Success: false})
err = store.InsertEndpointResult(&testEndpoint, &endpoint.Result{Timestamp: time.Now(), Duration: time.Second, Success: false})
if err != nil {
t.Log(err)
}
@@ -280,8 +280,8 @@ func TestStore_InsertCleansUpEventsAndResultsProperly(t *testing.T) {
resultsCleanUpThreshold := store.maximumNumberOfResults + resultsAboveMaximumCleanUpThreshold
eventsCleanUpThreshold := store.maximumNumberOfEvents + eventsAboveMaximumCleanUpThreshold
for i := 0; i < resultsCleanUpThreshold+eventsCleanUpThreshold; i++ {
store.Insert(&testEndpoint, &testSuccessfulResult)
store.Insert(&testEndpoint, &testUnsuccessfulResult)
store.InsertEndpointResult(&testEndpoint, &testSuccessfulResult)
store.InsertEndpointResult(&testEndpoint, &testUnsuccessfulResult)
ss, _ := store.GetEndpointStatusByKey(testEndpoint.Key(), paging.NewEndpointStatusParams().WithResults(1, storage.DefaultMaximumNumberOfResults*5).WithEvents(1, storage.DefaultMaximumNumberOfEvents*5))
if len(ss.Results) > resultsCleanUpThreshold+1 {
t.Errorf("number of results shouldn't have exceeded %d, reached %d", resultsCleanUpThreshold, len(ss.Results))
@@ -296,8 +296,8 @@ func TestStore_InsertWithCaching(t *testing.T) {
store, _ := NewStore("sqlite", t.TempDir()+"/TestStore_InsertWithCaching.db", true, storage.DefaultMaximumNumberOfResults, storage.DefaultMaximumNumberOfEvents)
defer store.Close()
// Add 2 results
store.Insert(&testEndpoint, &testSuccessfulResult)
store.Insert(&testEndpoint, &testSuccessfulResult)
store.InsertEndpointResult(&testEndpoint, &testSuccessfulResult)
store.InsertEndpointResult(&testEndpoint, &testSuccessfulResult)
// Verify that they exist
endpointStatuses, _ := store.GetAllEndpointStatuses(paging.NewEndpointStatusParams().WithResults(1, 20))
if numberOfEndpointStatuses := len(endpointStatuses); numberOfEndpointStatuses != 1 {
@@ -307,8 +307,8 @@ func TestStore_InsertWithCaching(t *testing.T) {
t.Fatalf("expected 2 results, got %d", len(endpointStatuses[0].Results))
}
// Add 2 more results
store.Insert(&testEndpoint, &testUnsuccessfulResult)
store.Insert(&testEndpoint, &testUnsuccessfulResult)
store.InsertEndpointResult(&testEndpoint, &testUnsuccessfulResult)
store.InsertEndpointResult(&testEndpoint, &testUnsuccessfulResult)
// Verify that they exist
endpointStatuses, _ = store.GetAllEndpointStatuses(paging.NewEndpointStatusParams().WithResults(1, 20))
if numberOfEndpointStatuses := len(endpointStatuses); numberOfEndpointStatuses != 1 {
@@ -329,8 +329,8 @@ func TestStore_InsertWithCaching(t *testing.T) {
func TestStore_Persistence(t *testing.T) {
path := t.TempDir() + "/TestStore_Persistence.db"
store, _ := NewStore("sqlite", path, false, storage.DefaultMaximumNumberOfResults, storage.DefaultMaximumNumberOfEvents)
store.Insert(&testEndpoint, &testSuccessfulResult)
store.Insert(&testEndpoint, &testUnsuccessfulResult)
store.InsertEndpointResult(&testEndpoint, &testSuccessfulResult)
store.InsertEndpointResult(&testEndpoint, &testUnsuccessfulResult)
if uptime, _ := store.GetUptimeByKey(testEndpoint.Key(), time.Now().Add(-time.Hour), time.Now()); uptime != 0.5 {
t.Errorf("the uptime over the past 1h should've been 0.5, got %f", uptime)
}
@@ -425,12 +425,12 @@ func TestStore_Save(t *testing.T) {
func TestStore_SanityCheck(t *testing.T) {
store, _ := NewStore("sqlite", t.TempDir()+"/TestStore_SanityCheck.db", false, storage.DefaultMaximumNumberOfResults, storage.DefaultMaximumNumberOfEvents)
defer store.Close()
store.Insert(&testEndpoint, &testSuccessfulResult)
store.InsertEndpointResult(&testEndpoint, &testSuccessfulResult)
endpointStatuses, _ := store.GetAllEndpointStatuses(paging.NewEndpointStatusParams())
if numberOfEndpointStatuses := len(endpointStatuses); numberOfEndpointStatuses != 1 {
t.Fatalf("expected 1 EndpointStatus, got %d", numberOfEndpointStatuses)
}
store.Insert(&testEndpoint, &testUnsuccessfulResult)
store.InsertEndpointResult(&testEndpoint, &testUnsuccessfulResult)
// Both results inserted are for the same endpoint, therefore, the count shouldn't have increased
endpointStatuses, _ = store.GetAllEndpointStatuses(paging.NewEndpointStatusParams())
if numberOfEndpointStatuses := len(endpointStatuses); numberOfEndpointStatuses != 1 {
@@ -541,7 +541,7 @@ func TestStore_NoRows(t *testing.T) {
func TestStore_BrokenSchema(t *testing.T) {
store, _ := NewStore("sqlite", t.TempDir()+"/TestStore_BrokenSchema.db", false, storage.DefaultMaximumNumberOfResults, storage.DefaultMaximumNumberOfEvents)
defer store.Close()
if err := store.Insert(&testEndpoint, &testSuccessfulResult); err != nil {
if err := store.InsertEndpointResult(&testEndpoint, &testSuccessfulResult); err != nil {
t.Fatal("expected no error, got", err.Error())
}
if _, err := store.GetAverageResponseTimeByKey(testEndpoint.Key(), time.Now().Add(-time.Hour), time.Now()); err != nil {
@@ -553,7 +553,7 @@ func TestStore_BrokenSchema(t *testing.T) {
// Break
_, _ = store.db.Exec("DROP TABLE endpoints")
// And now we'll try to insert something in our broken schema
if err := store.Insert(&testEndpoint, &testSuccessfulResult); err == nil {
if err := store.InsertEndpointResult(&testEndpoint, &testSuccessfulResult); err == nil {
t.Fatal("expected an error")
}
if _, err := store.GetAverageResponseTimeByKey(testEndpoint.Key(), time.Now().Add(-time.Hour), time.Now()); err == nil {
@@ -576,12 +576,12 @@ func TestStore_BrokenSchema(t *testing.T) {
t.Fatal("schema should've been repaired")
}
store.Clear()
if err := store.Insert(&testEndpoint, &testSuccessfulResult); err != nil {
if err := store.InsertEndpointResult(&testEndpoint, &testSuccessfulResult); err != nil {
t.Fatal("expected no error, got", err.Error())
}
// Break
_, _ = store.db.Exec("DROP TABLE endpoint_events")
if err := store.Insert(&testEndpoint, &testSuccessfulResult); err != nil {
if err := store.InsertEndpointResult(&testEndpoint, &testSuccessfulResult); err != nil {
t.Fatal("expected no error, because this should silently fails, got", err.Error())
}
if _, err := store.GetAllEndpointStatuses(paging.NewEndpointStatusParams().WithResults(1, 1).WithEvents(1, 1)); err != nil {
@@ -592,28 +592,28 @@ func TestStore_BrokenSchema(t *testing.T) {
t.Fatal("schema should've been repaired")
}
store.Clear()
if err := store.Insert(&testEndpoint, &testSuccessfulResult); err != nil {
if err := store.InsertEndpointResult(&testEndpoint, &testSuccessfulResult); err != nil {
t.Fatal("expected no error, got", err.Error())
}
// Break
_, _ = store.db.Exec("DROP TABLE endpoint_results")
if err := store.Insert(&testEndpoint, &testSuccessfulResult); err == nil {
if err := store.InsertEndpointResult(&testEndpoint, &testSuccessfulResult); err == nil {
t.Fatal("expected an error")
}
if _, err := store.GetAllEndpointStatuses(paging.NewEndpointStatusParams().WithResults(1, 1).WithEvents(1, 1)); err != nil {
t.Fatal("expected no error, because this should silently fail, got", err.Error())
if _, err := store.GetAllEndpointStatuses(paging.NewEndpointStatusParams().WithResults(1, 1).WithEvents(1, 1)); err == nil {
t.Fatal("expected an error")
}
// Repair
if err := store.createSchema(); err != nil {
t.Fatal("schema should've been repaired")
}
store.Clear()
if err := store.Insert(&testEndpoint, &testSuccessfulResult); err != nil {
if err := store.InsertEndpointResult(&testEndpoint, &testSuccessfulResult); err != nil {
t.Fatal("expected no error, got", err.Error())
}
// Break
_, _ = store.db.Exec("DROP TABLE endpoint_result_conditions")
if err := store.Insert(&testEndpoint, &testSuccessfulResult); err == nil {
if err := store.InsertEndpointResult(&testEndpoint, &testSuccessfulResult); err == nil {
t.Fatal("expected an error")
}
// Repair
@@ -621,12 +621,12 @@ func TestStore_BrokenSchema(t *testing.T) {
t.Fatal("schema should've been repaired")
}
store.Clear()
if err := store.Insert(&testEndpoint, &testSuccessfulResult); err != nil {
if err := store.InsertEndpointResult(&testEndpoint, &testSuccessfulResult); err != nil {
t.Fatal("expected no error, got", err.Error())
}
// Break
_, _ = store.db.Exec("DROP TABLE endpoint_uptimes")
if err := store.Insert(&testEndpoint, &testSuccessfulResult); err != nil {
if err := store.InsertEndpointResult(&testEndpoint, &testSuccessfulResult); err != nil {
t.Fatal("expected no error, because this should silently fails, got", err.Error())
}
if _, err := store.GetAverageResponseTimeByKey(testEndpoint.Key(), time.Now().Add(-time.Hour), time.Now()); err == nil {
@@ -857,8 +857,8 @@ func TestStore_DeleteAllTriggeredAlertsNotInChecksumsByEndpoint(t *testing.T) {
func TestStore_HasEndpointStatusNewerThan(t *testing.T) {
store, _ := NewStore("sqlite", t.TempDir()+"/TestStore_HasEndpointStatusNewerThan.db", false, storage.DefaultMaximumNumberOfResults, storage.DefaultMaximumNumberOfEvents)
defer store.Close()
// Insert an endpoint status
if err := store.Insert(&testEndpoint, &testSuccessfulResult); err != nil {
// InsertEndpointResult an endpoint status
if err := store.InsertEndpointResult(&testEndpoint, &testSuccessfulResult); err != nil {
t.Fatal("expected no error, got", err.Error())
}
// Check if it has a status newer than 1 hour ago

View File

@@ -6,6 +6,7 @@ import (
"github.com/TwiN/gatus/v5/alerting/alert"
"github.com/TwiN/gatus/v5/config/endpoint"
"github.com/TwiN/gatus/v5/config/suite"
"github.com/TwiN/gatus/v5/storage"
"github.com/TwiN/gatus/v5/storage/store/common/paging"
"github.com/TwiN/gatus/v5/storage/store/memory"
@@ -19,12 +20,18 @@ type Store interface {
// with a subset of endpoint.Result defined by the page and pageSize parameters
GetAllEndpointStatuses(params *paging.EndpointStatusParams) ([]*endpoint.Status, error)
// GetAllSuiteStatuses returns all monitored suite statuses
GetAllSuiteStatuses(params *paging.SuiteStatusParams) ([]*suite.Status, error)
// GetEndpointStatus returns the endpoint status for a given endpoint name in the given group
GetEndpointStatus(groupName, endpointName string, params *paging.EndpointStatusParams) (*endpoint.Status, error)
// GetEndpointStatusByKey returns the endpoint status for a given key
GetEndpointStatusByKey(key string, params *paging.EndpointStatusParams) (*endpoint.Status, error)
// GetSuiteStatusByKey returns the suite status for a given key
GetSuiteStatusByKey(key string, params *paging.SuiteStatusParams) (*suite.Status, error)
// GetUptimeByKey returns the uptime percentage during a time range
GetUptimeByKey(key string, from, to time.Time) (float64, error)
@@ -34,14 +41,20 @@ type Store interface {
// GetHourlyAverageResponseTimeByKey returns a map of hourly (key) average response time in milliseconds (value) during a time range
GetHourlyAverageResponseTimeByKey(key string, from, to time.Time) (map[int64]int, error)
// Insert adds the observed result for the specified endpoint into the store
Insert(ep *endpoint.Endpoint, result *endpoint.Result) error
// InsertEndpointResult adds the observed result for the specified endpoint into the store
InsertEndpointResult(ep *endpoint.Endpoint, result *endpoint.Result) error
// InsertSuiteResult adds the observed result for the specified suite into the store
InsertSuiteResult(s *suite.Suite, result *suite.Result) error
// DeleteAllEndpointStatusesNotInKeys removes all Status that are not within the keys provided
//
// Used to delete endpoints that have been persisted but are no longer part of the configured endpoints
DeleteAllEndpointStatusesNotInKeys(keys []string) int
// DeleteAllSuiteStatusesNotInKeys removes all suite statuses that are not within the keys provided
DeleteAllSuiteStatusesNotInKeys(keys []string) int
// GetTriggeredEndpointAlert returns whether the triggered alert for the specified endpoint as well as the necessary information to resolve it
GetTriggeredEndpointAlert(ep *endpoint.Endpoint, alert *alert.Alert) (exists bool, resolveKey string, numberOfSuccessesInARow int, err error)

View File

@@ -56,9 +56,9 @@ func BenchmarkStore_GetAllEndpointStatuses(b *testing.B) {
for i := 0; i < numberOfEndpointsToCreate; i++ {
ep := testEndpoint
ep.Name = "endpoint" + strconv.Itoa(i)
// Insert 20 results for each endpoint
// InsertEndpointResult 20 results for each endpoint
for j := 0; j < 20; j++ {
scenario.Store.Insert(&ep, &testSuccessfulResult)
scenario.Store.InsertEndpointResult(&ep, &testSuccessfulResult)
}
}
// Run the scenarios
@@ -131,7 +131,7 @@ func BenchmarkStore_Insert(b *testing.B) {
result = testSuccessfulResult
}
result.Timestamp = time.Now()
scenario.Store.Insert(&testEndpoint, &result)
scenario.Store.InsertEndpointResult(&testEndpoint, &result)
n++
}
})
@@ -144,7 +144,7 @@ func BenchmarkStore_Insert(b *testing.B) {
result = testSuccessfulResult
}
result.Timestamp = time.Now()
scenario.Store.Insert(&testEndpoint, &result)
scenario.Store.InsertEndpointResult(&testEndpoint, &result)
}
}
b.ReportAllocs()
@@ -192,8 +192,8 @@ func BenchmarkStore_GetEndpointStatusByKey(b *testing.B) {
}
for _, scenario := range scenarios {
for i := 0; i < 50; i++ {
scenario.Store.Insert(&testEndpoint, &testSuccessfulResult)
scenario.Store.Insert(&testEndpoint, &testUnsuccessfulResult)
scenario.Store.InsertEndpointResult(&testEndpoint, &testSuccessfulResult)
scenario.Store.InsertEndpointResult(&testEndpoint, &testUnsuccessfulResult)
}
b.Run(scenario.Name, func(b *testing.B) {
if scenario.Parallel {

View File

@@ -136,8 +136,8 @@ func TestStore_GetEndpointStatusByKey(t *testing.T) {
thirdResult.Timestamp = now
for _, scenario := range scenarios {
t.Run(scenario.Name, func(t *testing.T) {
scenario.Store.Insert(&testEndpoint, &firstResult)
scenario.Store.Insert(&testEndpoint, &secondResult)
scenario.Store.InsertEndpointResult(&testEndpoint, &firstResult)
scenario.Store.InsertEndpointResult(&testEndpoint, &secondResult)
endpointStatus, err := scenario.Store.GetEndpointStatusByKey(testEndpoint.Key(), paging.NewEndpointStatusParams().WithEvents(1, storage.DefaultMaximumNumberOfEvents).WithResults(1, storage.DefaultMaximumNumberOfResults))
if err != nil {
t.Fatal("shouldn't have returned an error, got", err.Error())
@@ -157,7 +157,7 @@ func TestStore_GetEndpointStatusByKey(t *testing.T) {
if endpointStatus.Results[0].Timestamp.After(endpointStatus.Results[1].Timestamp) {
t.Error("The result at index 0 should've been older than the result at index 1")
}
scenario.Store.Insert(&testEndpoint, &thirdResult)
scenario.Store.InsertEndpointResult(&testEndpoint, &thirdResult)
endpointStatus, err = scenario.Store.GetEndpointStatusByKey(testEndpoint.Key(), paging.NewEndpointStatusParams().WithEvents(1, storage.DefaultMaximumNumberOfEvents).WithResults(1, storage.DefaultMaximumNumberOfResults))
if err != nil {
t.Fatal("shouldn't have returned an error, got", err.Error())
@@ -175,7 +175,7 @@ func TestStore_GetEndpointStatusForMissingStatusReturnsNil(t *testing.T) {
defer cleanUp(scenarios)
for _, scenario := range scenarios {
t.Run(scenario.Name, func(t *testing.T) {
scenario.Store.Insert(&testEndpoint, &testSuccessfulResult)
scenario.Store.InsertEndpointResult(&testEndpoint, &testSuccessfulResult)
endpointStatus, err := scenario.Store.GetEndpointStatus("nonexistantgroup", "nonexistantname", paging.NewEndpointStatusParams().WithEvents(1, storage.DefaultMaximumNumberOfEvents).WithResults(1, storage.DefaultMaximumNumberOfResults))
if !errors.Is(err, common.ErrEndpointNotFound) {
t.Error("should've returned ErrEndpointNotFound, got", err)
@@ -206,8 +206,8 @@ func TestStore_GetAllEndpointStatuses(t *testing.T) {
defer cleanUp(scenarios)
for _, scenario := range scenarios {
t.Run(scenario.Name, func(t *testing.T) {
scenario.Store.Insert(&testEndpoint, &testSuccessfulResult)
scenario.Store.Insert(&testEndpoint, &testUnsuccessfulResult)
scenario.Store.InsertEndpointResult(&testEndpoint, &testSuccessfulResult)
scenario.Store.InsertEndpointResult(&testEndpoint, &testUnsuccessfulResult)
endpointStatuses, err := scenario.Store.GetAllEndpointStatuses(paging.NewEndpointStatusParams().WithResults(1, 20))
if err != nil {
t.Error("shouldn't have returned an error, got", err.Error())
@@ -230,10 +230,10 @@ func TestStore_GetAllEndpointStatuses(t *testing.T) {
t.Run(scenario.Name+"-page-2", func(t *testing.T) {
otherEndpoint := testEndpoint
otherEndpoint.Name = testEndpoint.Name + "-other"
scenario.Store.Insert(&testEndpoint, &testSuccessfulResult)
scenario.Store.Insert(&otherEndpoint, &testSuccessfulResult)
scenario.Store.Insert(&otherEndpoint, &testSuccessfulResult)
scenario.Store.Insert(&otherEndpoint, &testSuccessfulResult)
scenario.Store.InsertEndpointResult(&testEndpoint, &testSuccessfulResult)
scenario.Store.InsertEndpointResult(&otherEndpoint, &testSuccessfulResult)
scenario.Store.InsertEndpointResult(&otherEndpoint, &testSuccessfulResult)
scenario.Store.InsertEndpointResult(&otherEndpoint, &testSuccessfulResult)
endpointStatuses, err := scenario.Store.GetAllEndpointStatuses(paging.NewEndpointStatusParams().WithResults(2, 2))
if err != nil {
t.Error("shouldn't have returned an error, got", err.Error())
@@ -268,8 +268,8 @@ func TestStore_GetAllEndpointStatusesWithResultsAndEvents(t *testing.T) {
secondResult := testUnsuccessfulResult
for _, scenario := range scenarios {
t.Run(scenario.Name, func(t *testing.T) {
scenario.Store.Insert(&testEndpoint, &firstResult)
scenario.Store.Insert(&testEndpoint, &secondResult)
scenario.Store.InsertEndpointResult(&testEndpoint, &firstResult)
scenario.Store.InsertEndpointResult(&testEndpoint, &secondResult)
// Can't be bothered dealing with timezone issues on the worker that runs the automated tests
endpointStatuses, err := scenario.Store.GetAllEndpointStatuses(paging.NewEndpointStatusParams().WithResults(1, 20).WithEvents(1, 50))
if err != nil {
@@ -302,8 +302,8 @@ func TestStore_GetEndpointStatusPage1IsHasMoreRecentResultsThanPage2(t *testing.
secondResult.Timestamp = now
for _, scenario := range scenarios {
t.Run(scenario.Name, func(t *testing.T) {
scenario.Store.Insert(&testEndpoint, &firstResult)
scenario.Store.Insert(&testEndpoint, &secondResult)
scenario.Store.InsertEndpointResult(&testEndpoint, &firstResult)
scenario.Store.InsertEndpointResult(&testEndpoint, &secondResult)
endpointStatusPage1, err := scenario.Store.GetEndpointStatusByKey(testEndpoint.Key(), paging.NewEndpointStatusParams().WithResults(1, 1))
if err != nil {
t.Error("shouldn't have returned an error, got", err.Error())
@@ -345,8 +345,8 @@ func TestStore_GetUptimeByKey(t *testing.T) {
if _, err := scenario.Store.GetUptimeByKey(testEndpoint.Key(), time.Now().Add(-time.Hour), time.Now()); err != common.ErrEndpointNotFound {
t.Errorf("should've returned not found because there's nothing yet, got %v", err)
}
scenario.Store.Insert(&testEndpoint, &firstResult)
scenario.Store.Insert(&testEndpoint, &secondResult)
scenario.Store.InsertEndpointResult(&testEndpoint, &firstResult)
scenario.Store.InsertEndpointResult(&testEndpoint, &secondResult)
if uptime, _ := scenario.Store.GetUptimeByKey(testEndpoint.Key(), now.Add(-time.Hour), time.Now()); uptime != 0.5 {
t.Errorf("the uptime over the past 1h should've been 0.5, got %f", uptime)
}
@@ -380,10 +380,10 @@ func TestStore_GetAverageResponseTimeByKey(t *testing.T) {
fourthResult.Timestamp = now
for _, scenario := range scenarios {
t.Run(scenario.Name, func(t *testing.T) {
scenario.Store.Insert(&testEndpoint, &firstResult)
scenario.Store.Insert(&testEndpoint, &secondResult)
scenario.Store.Insert(&testEndpoint, &thirdResult)
scenario.Store.Insert(&testEndpoint, &fourthResult)
scenario.Store.InsertEndpointResult(&testEndpoint, &firstResult)
scenario.Store.InsertEndpointResult(&testEndpoint, &secondResult)
scenario.Store.InsertEndpointResult(&testEndpoint, &thirdResult)
scenario.Store.InsertEndpointResult(&testEndpoint, &fourthResult)
if averageResponseTime, err := scenario.Store.GetAverageResponseTimeByKey(testEndpoint.Key(), now.Add(-48*time.Hour), now.Add(-24*time.Hour)); err == nil {
if averageResponseTime != 0 {
t.Errorf("expected average response time to be 0ms, got %v", averageResponseTime)
@@ -437,10 +437,10 @@ func TestStore_GetHourlyAverageResponseTimeByKey(t *testing.T) {
fourthResult.Timestamp = now
for _, scenario := range scenarios {
t.Run(scenario.Name, func(t *testing.T) {
scenario.Store.Insert(&testEndpoint, &firstResult)
scenario.Store.Insert(&testEndpoint, &secondResult)
scenario.Store.Insert(&testEndpoint, &thirdResult)
scenario.Store.Insert(&testEndpoint, &fourthResult)
scenario.Store.InsertEndpointResult(&testEndpoint, &firstResult)
scenario.Store.InsertEndpointResult(&testEndpoint, &secondResult)
scenario.Store.InsertEndpointResult(&testEndpoint, &thirdResult)
scenario.Store.InsertEndpointResult(&testEndpoint, &fourthResult)
hourlyAverageResponseTime, err := scenario.Store.GetHourlyAverageResponseTimeByKey(testEndpoint.Key(), now.Add(-24*time.Hour), now)
if err != nil {
t.Error("shouldn't have returned an error, got", err)
@@ -468,8 +468,8 @@ func TestStore_Insert(t *testing.T) {
secondResult.Timestamp = now
for _, scenario := range scenarios {
t.Run(scenario.Name, func(t *testing.T) {
scenario.Store.Insert(&testEndpoint, &firstResult)
scenario.Store.Insert(&testEndpoint, &secondResult)
scenario.Store.InsertEndpointResult(&testEndpoint, &firstResult)
scenario.Store.InsertEndpointResult(&testEndpoint, &secondResult)
ss, err := scenario.Store.GetEndpointStatusByKey(testEndpoint.Key(), paging.NewEndpointStatusParams().WithEvents(1, storage.DefaultMaximumNumberOfEvents).WithResults(1, storage.DefaultMaximumNumberOfResults))
if err != nil {
t.Error("shouldn't have returned an error, got", err)
@@ -545,8 +545,8 @@ func TestStore_DeleteAllEndpointStatusesNotInKeys(t *testing.T) {
r := &testSuccessfulResult
for _, scenario := range scenarios {
t.Run(scenario.Name, func(t *testing.T) {
scenario.Store.Insert(&firstEndpoint, r)
scenario.Store.Insert(&secondEndpoint, r)
scenario.Store.InsertEndpointResult(&firstEndpoint, r)
scenario.Store.InsertEndpointResult(&secondEndpoint, r)
if ss, _ := scenario.Store.GetEndpointStatusByKey(firstEndpoint.Key(), paging.NewEndpointStatusParams()); ss == nil {
t.Fatal("firstEndpoint should exist, got", ss)
}