Skip to content
This repository has been archived by the owner on Oct 19, 2024. It is now read-only.

Commit

Permalink
update ratelimiter (#897)
Browse files Browse the repository at this point in the history
* update ratelimiter

* Switch to old scheduler
  • Loading branch information
Geometrically authored Mar 27, 2024
1 parent a0aa350 commit 0a0837e
Show file tree
Hide file tree
Showing 12 changed files with 287 additions and 602 deletions.
119 changes: 73 additions & 46 deletions Cargo.lock

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

3 changes: 2 additions & 1 deletion Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -11,20 +11,21 @@ name = "labrinth"
path = "src/main.rs"

[dependencies]
actix = "0.13.1"
actix-web = "4.4.1"
actix-rt = "2.9.0"
actix-multipart = "0.6.1"
actix-cors = "0.7.0"
actix-ws = "0.2.5"
actix-files = "0.6.5"
actix-web-prom = "0.7.0"
governor = "0.6.3"

tokio = { version = "1.35.1", features = ["sync"] }
tokio-stream = "0.1.14"

futures = "0.3.30"
futures-timer = "3.0.2"
futures-util = "0.3.30"
async-trait = "0.1.70"
dashmap = "5.4.0"
lazy_static = "1.4.0"
Expand Down
30 changes: 27 additions & 3 deletions src/lib.rs
Original file line number Diff line number Diff line change
@@ -1,17 +1,20 @@
use std::num::NonZeroU32;
use std::sync::Arc;
use std::time::Duration;

use actix_web::web;
use database::redis::RedisPool;
use log::{info, warn};
use queue::{
analytics::AnalyticsQueue, payouts::PayoutsQueue, session::AuthQueue, socket::ActiveSockets,
};
use scheduler::Scheduler;
use sqlx::Postgres;
use tokio::sync::RwLock;

extern crate clickhouse as clickhouse_crate;
use clickhouse_crate::Client;
use governor::{Quota, RateLimiter};
use governor::middleware::StateInformationMiddleware;
use util::cors::default_cors;

use crate::queue::moderation::AutomatedModerationQueue;
Expand All @@ -20,14 +23,14 @@ use crate::{
search::indexing::index_projects,
util::env::{parse_strings_from_var, parse_var},
};
use crate::util::ratelimit::KeyedRateLimiter;

pub mod auth;
pub mod clickhouse;
pub mod database;
pub mod file_hosting;
pub mod models;
pub mod queue;
pub mod ratelimit;
pub mod routes;
pub mod scheduler;
pub mod search;
Expand All @@ -46,14 +49,15 @@ pub struct LabrinthConfig {
pub clickhouse: Client,
pub file_host: Arc<dyn file_hosting::FileHost + Send + Sync>,
pub maxmind: Arc<queue::maxmind::MaxMindIndexer>,
pub scheduler: Arc<Scheduler>,
pub scheduler: Arc<scheduler::Scheduler>,
pub ip_salt: Pepper,
pub search_config: search::SearchConfig,
pub session_queue: web::Data<AuthQueue>,
pub payouts_queue: web::Data<PayoutsQueue>,
pub analytics_queue: Arc<AnalyticsQueue>,
pub active_sockets: web::Data<RwLock<ActiveSockets>>,
pub automated_moderation_queue: web::Data<AutomatedModerationQueue>,
pub rate_limiter: KeyedRateLimiter,
}

pub fn app_setup(
Expand Down Expand Up @@ -82,6 +86,25 @@ pub fn app_setup(

let mut scheduler = scheduler::Scheduler::new();

let limiter: KeyedRateLimiter = Arc::new(
RateLimiter::keyed(Quota::per_minute(NonZeroU32::new(300).unwrap()))
.with_middleware::<StateInformationMiddleware>(),
);
let limiter_clone = Arc::clone(&limiter);
scheduler.run(Duration::from_secs(60), move || {
info!(
"Clearing ratelimiter, storage size: {}",
limiter_clone.len()
);
limiter_clone.retain_recent();
info!(
"Done clearing ratelimiter, storage size: {}",
limiter_clone.len()
);

async move {}
});

// The interval in seconds at which the local database is indexed
// for searching. Defaults to 1 hour if unset.
let local_index_interval =
Expand Down Expand Up @@ -255,6 +278,7 @@ pub fn app_setup(
analytics_queue,
active_sockets,
automated_moderation_queue,
rate_limiter: limiter,
}
}

Expand Down
38 changes: 9 additions & 29 deletions src/main.rs
Original file line number Diff line number Diff line change
@@ -1,16 +1,17 @@
use actix_web::{App, HttpServer};
use actix_web_prom::PrometheusMetricsBuilder;
use env_logger::Env;
use governor::middleware::StateInformationMiddleware;

Check warning on line 4 in src/main.rs

View workflow job for this annotation

GitHub Actions / clippy

unused import: `governor::middleware::StateInformationMiddleware`

warning: unused import: `governor::middleware::StateInformationMiddleware` --> src/main.rs:4:5 | 4 | use governor::middleware::StateInformationMiddleware; | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ | = note: `#[warn(unused_imports)]` on by default
use governor::{Quota, RateLimiter};

Check warning on line 5 in src/main.rs

View workflow job for this annotation

GitHub Actions / clippy

unused imports: `Quota`, `RateLimiter`

warning: unused imports: `Quota`, `RateLimiter` --> src/main.rs:5:16 | 5 | use governor::{Quota, RateLimiter}; | ^^^^^ ^^^^^^^^^^^
use labrinth::database::redis::RedisPool;
use labrinth::file_hosting::S3Host;
use labrinth::ratelimit::errors::ARError;
use labrinth::ratelimit::memory::{MemoryStore, MemoryStoreActor};
use labrinth::ratelimit::middleware::RateLimiter;
use labrinth::search;
use labrinth::util::env::parse_var;
use labrinth::util::ratelimit::{KeyedRateLimiter, RateLimit};

Check warning on line 9 in src/main.rs

View workflow job for this annotation

GitHub Actions / clippy

unused import: `KeyedRateLimiter`

warning: unused import: `KeyedRateLimiter` --> src/main.rs:9:33 | 9 | use labrinth::util::ratelimit::{KeyedRateLimiter, RateLimit}; | ^^^^^^^^^^^^^^^^
use labrinth::{check_env_vars, clickhouse, database, file_hosting, queue};
use log::{error, info};
use std::num::NonZeroU32;

Check warning on line 12 in src/main.rs

View workflow job for this annotation

GitHub Actions / clippy

unused import: `std::num::NonZeroU32`

warning: unused import: `std::num::NonZeroU32` --> src/main.rs:12:5 | 12 | use std::num::NonZeroU32; | ^^^^^^^^^^^^^^^^^^^^
use std::sync::Arc;
use std::time::Duration;

Check warning on line 14 in src/main.rs

View workflow job for this annotation

GitHub Actions / clippy

unused import: `std::time::Duration`

warning: unused import: `std::time::Duration` --> src/main.rs:14:5 | 14 | use std::time::Duration; | ^^^^^^^^^^^^^^^^^^^

#[cfg(feature = "jemalloc")]
#[global_allocator]
Expand Down Expand Up @@ -90,17 +91,14 @@ async fn main() -> std::io::Result<()> {

let maxmind_reader = Arc::new(queue::maxmind::MaxMindIndexer::new().await.unwrap());

let store = MemoryStore::new();

let prometheus = PrometheusMetricsBuilder::new("labrinth")
.endpoint("/metrics")
.build()
.expect("Failed to create prometheus metrics middleware");

let search_config = search::SearchConfig::new(None);
info!("Starting Actix HTTP server!");

let labrinth_config = labrinth::app_setup(
let mut labrinth_config = labrinth::app_setup(

Check warning on line 101 in src/main.rs

View workflow job for this annotation

GitHub Actions / clippy

variable does not need to be mutable

warning: variable does not need to be mutable --> src/main.rs:101:9 | 101 | let mut labrinth_config = labrinth::app_setup( | ----^^^^^^^^^^^^^^^ | | | help: remove this `mut` | = note: `#[warn(unused_mut)]` on by default
pool.clone(),
redis_pool.clone(),
search_config.clone(),
Expand All @@ -109,32 +107,14 @@ async fn main() -> std::io::Result<()> {
maxmind_reader.clone(),
);

info!("Starting Actix HTTP server!");

// Init App
HttpServer::new(move || {
App::new()
.wrap(prometheus.clone())
.wrap(RateLimit(Arc::clone(&labrinth_config.rate_limiter)))
.wrap(actix_web::middleware::Compress::default())
.wrap(
RateLimiter::new(MemoryStoreActor::from(store.clone()).start())
.with_identifier(|req| {
let connection_info = req.connection_info();
let ip =
String::from(if parse_var("CLOUDFLARE_INTEGRATION").unwrap_or(false) {
if let Some(header) = req.headers().get("CF-Connecting-IP") {
header.to_str().map_err(|_| ARError::Identification)?
} else {
connection_info.peer_addr().ok_or(ARError::Identification)?
}
} else {
connection_info.peer_addr().ok_or(ARError::Identification)?
});

Ok(ip)
})
.with_interval(std::time::Duration::from_secs(60))
.with_max_requests(300)
.with_ignore_key(dotenvy::var("RATE_LIMIT_IGNORE_KEY").ok()),
)
.wrap(sentry_actix::Sentry::new())
.configure(|cfg| labrinth::app_config(cfg, labrinth_config.clone()))
})
Expand Down
Loading

0 comments on commit 0a0837e

Please sign in to comment.