Skip to content

Commit

Permalink
Prevent rate limiting by StatusCake
Browse files Browse the repository at this point in the history
To prevent rate limiting with StatusCake two new things have
been introduced. First of all, we cache the results of the
GetAll function, so that it does not have to retrieve the status
monitors everytime.

Secondly, I added a latency after adding a monitor. Because even
when the results are cached, if several monitors have to be added
in a row, it will be invalidated and still hit the rate limit.
  • Loading branch information
robinderooij-rl committed Nov 19, 2024
1 parent 7e597e6 commit c8e5a2a
Showing 1 changed file with 17 additions and 1 deletion.
18 changes: 17 additions & 1 deletion pkg/monitors/statuscake/statuscake-monitor.go
Original file line number Diff line number Diff line change
Expand Up @@ -11,6 +11,7 @@ import (
"os"
"strconv"
"strings"
"time"

logf "sigs.k8s.io/controller-runtime/pkg/log"

Expand All @@ -23,6 +24,8 @@ import (
)

var log = logf.Log.WithName("statuscake-monitor")
var cachedMonitors []models.Monitor


// StatusCakeMonitorService is the service structure for StatusCake
type StatusCakeMonitorService struct {
Expand Down Expand Up @@ -328,6 +331,9 @@ func (service *StatusCakeMonitorService) GetByID(id string) (*models.Monitor, er

// GetAll function will fetch all monitors
func (service *StatusCakeMonitorService) GetAll() []models.Monitor {
if len(cachedMonitors) > 0 {
return cachedMonitors
}
var StatusCakeMonitorData []StatusCakeMonitorData
page := 1
for {
Expand All @@ -342,7 +348,8 @@ func (service *StatusCakeMonitorService) GetAll() []models.Monitor {
}
page += 1
}
return StatusCakeMonitorMonitorsToBaseMonitorsMapper(StatusCakeMonitorData)
cachedMonitors = StatusCakeMonitorMonitorsToBaseMonitorsMapper(StatusCakeMonitorData)
return cachedMonitors
}

func (service *StatusCakeMonitorService) fetchMonitors(page int) *StatusCakeMonitor {
Expand Down Expand Up @@ -391,6 +398,7 @@ func (service *StatusCakeMonitorService) fetchMonitors(page int) *StatusCakeMoni

// Add will create a new Monitor
func (service *StatusCakeMonitorService) Add(m models.Monitor) {
cachedMonitors = []models.Monitor{}
u, err := url.Parse(service.url)
if err != nil {
log.Error(err, "Unable to Parse monitor URL")
Expand Down Expand Up @@ -421,10 +429,13 @@ func (service *StatusCakeMonitorService) Add(m models.Monitor) {
log.Error(nil, "Insert Request failed for name: "+m.Name+" with status code "+strconv.Itoa(resp.StatusCode))
log.Error(nil, string(bodyBytes))
}
// Take 1 second pause to prevent being rate limited
time.Sleep(1 * time.Second)
}

// Update will update an existing Monitor
func (service *StatusCakeMonitorService) Update(m models.Monitor) {
cachedMonitors = []models.Monitor{}
u, err := url.Parse(service.url)
if err != nil {
log.Error(err, "Unable to Parse monitor URL")
Expand Down Expand Up @@ -455,10 +466,13 @@ func (service *StatusCakeMonitorService) Update(m models.Monitor) {
log.Error(nil, "Update Request failed for name: "+m.Name+" with status code "+strconv.Itoa(resp.StatusCode))
log.Error(nil, string(bodyBytes))
}
// Take 1 second pause to prevent being rate limited
time.Sleep(1 * time.Second)
}

// Remove will delete an existing Monitor
func (service *StatusCakeMonitorService) Remove(m models.Monitor) {
cachedMonitors = []models.Monitor{}
u, err := url.Parse(service.url)
if err != nil {
log.Error(err, "Unable to Parse monitor URL")
Expand Down Expand Up @@ -489,4 +503,6 @@ func (service *StatusCakeMonitorService) Remove(m models.Monitor) {
log.Error(nil, fmt.Sprintf("Delete Request failed for Monitor: %s with id: %s", m.Name, m.ID))
}
}
// Take 1 second pause to prevent being rate limited
time.Sleep(1 * time.Second)
}

0 comments on commit c8e5a2a

Please sign in to comment.