Skip to content

Commit 04892ce

Browse files
committed
sentry - db - fix postgres test DB pool & test
1 parent 9a7454a commit 04892ce

File tree

3 files changed

+125
-42
lines changed

3 files changed

+125
-42
lines changed

sentry/src/db.rs

Lines changed: 110 additions & 30 deletions
Original file line numberDiff line numberDiff line change
@@ -141,11 +141,8 @@ pub async fn setup_migrations(environment: &str) {
141141
#[cfg(test)]
142142
pub mod tests_postgres {
143143
use std::{
144-
ops::Deref,
145-
sync::{
146-
atomic::{AtomicUsize, Ordering},
147-
Arc,
148-
},
144+
ops::{Deref, DerefMut},
145+
sync::atomic::{AtomicUsize, Ordering},
149146
};
150147

151148
use deadpool::managed::{Manager as ManagerTrait, RecycleResult};
@@ -159,45 +156,64 @@ pub mod tests_postgres {
159156

160157
pub type Pool = deadpool::managed::Pool<Manager>;
161158

162-
pub static DATABASE_POOL: Lazy<Pool> = Lazy::new(|| {
159+
pub static DATABASE_POOL: Lazy<Pool> = Lazy::new(|| create_pool("test"));
160+
161+
/// we must have a duplication of the migration because of how migrant is handling migrations
162+
/// we need to separately setup test migrations
163+
pub static MIGRATIONS: &[&str] = &["20190806011140_initial-tables"];
164+
165+
fn create_pool(db_prefix: &str) -> Pool {
163166
let manager_config = ManagerConfig {
164167
recycling_method: deadpool_postgres::RecyclingMethod::Fast,
165168
};
166-
let manager = Manager::new(POSTGRES_CONFIG.clone(), manager_config);
169+
let manager = Manager::new(POSTGRES_CONFIG.clone(), manager_config, db_prefix);
167170

168171
Pool::new(manager, 15)
169-
});
170-
171-
/// we must have a duplication of the migration because of how migrant is handling migrations
172-
/// we need to separately setup test migrations
173-
pub static MIGRATIONS: &[&str] = &["20190806011140_initial-tables"];
172+
}
174173

175174
/// A Database is used to isolate test runs from each other
176175
/// we need to know the name of the database we've created.
177176
/// This will allow us the drop the database when we are recycling the connection
178177
pub struct Database {
179-
inner: Arc<DatabaseInner>,
178+
/// The database name that will be created by the pool `CREATE DATABASE`
179+
/// This database will be set on configuration level of the underlying connection Pool for tests
180+
pub name: String,
181+
pub pool: deadpool_postgres::Pool<NoTls>,
180182
}
183+
181184
impl Database {
182185
pub fn new(name: String, pool: DbPool) -> Self {
183-
Self {
184-
inner: Arc::new(DatabaseInner { name, pool }),
185-
}
186+
Self { name, pool }
186187
}
187-
}
188188

189-
struct DatabaseInner {
190-
/// The database name that will be created by the pool `CREATE DATABASE`
191-
/// This database will be set on configuration level of the underlying connection Pool for tests
192-
pub name: String,
193-
pub pool: deadpool_postgres::Pool<NoTls>,
189+
pub fn get_pool(&self) -> deadpool_postgres::Pool<NoTls> {
190+
self.pool.clone()
191+
}
194192
}
195193

196194
impl Deref for Database {
197195
type Target = deadpool_postgres::Pool<NoTls>;
198196

199197
fn deref(&self) -> &deadpool_postgres::Pool<NoTls> {
200-
&self.inner.pool
198+
&self.pool
199+
}
200+
}
201+
202+
impl DerefMut for Database {
203+
fn deref_mut(&mut self) -> &mut Self::Target {
204+
&mut self.pool
205+
}
206+
}
207+
208+
impl AsRef<deadpool_postgres::Pool<NoTls>> for Database {
209+
fn as_ref(&self) -> &deadpool_postgres::Pool<NoTls> {
210+
&self.pool
211+
}
212+
}
213+
214+
impl AsMut<deadpool_postgres::Pool<NoTls>> for Database {
215+
fn as_mut(&mut self) -> &mut deadpool_postgres::Pool<NoTls> {
216+
&mut self.pool
201217
}
202218
}
203219

@@ -208,10 +224,15 @@ pub mod tests_postgres {
208224
base_pool: deadpool_postgres::Pool<NoTls>,
209225
manager_config: ManagerConfig,
210226
index: AtomicUsize,
227+
db_prefix: String,
211228
}
212229

213230
impl Manager {
214-
pub fn new(base_config: tokio_postgres::Config, manager_config: ManagerConfig) -> Self {
231+
pub fn new(
232+
base_config: tokio_postgres::Config,
233+
manager_config: ManagerConfig,
234+
db_prefix: &str,
235+
) -> Self {
215236
// We need to create the schema with a temporary connection, in order to use it for the real Test Pool
216237
let base_manager = deadpool_postgres::Manager::from_config(
217238
base_config.clone(),
@@ -220,19 +241,21 @@ pub mod tests_postgres {
220241
);
221242
let base_pool = deadpool_postgres::Pool::new(base_manager, 15);
222243

223-
Self::new_with_pool(base_pool, base_config, manager_config)
244+
Self::new_with_pool(base_pool, base_config, manager_config, db_prefix)
224245
}
225246

226247
pub fn new_with_pool(
227248
base_pool: deadpool_postgres::Pool<NoTls>,
228249
base_config: tokio_postgres::Config,
229250
manager_config: ManagerConfig,
251+
db_prefix: &str,
230252
) -> Self {
231253
Self {
232254
base_config,
233255
base_pool,
234256
manager_config,
235257
index: AtomicUsize::new(0),
258+
db_prefix: db_prefix.into(),
236259
}
237260
}
238261
}
@@ -245,7 +268,9 @@ pub mod tests_postgres {
245268

246269
async fn create(&self) -> Result<Self::Type, Self::Error> {
247270
let pool_index = self.index.fetch_add(1, Ordering::SeqCst);
248-
let db_name = format!("test_{}", pool_index);
271+
272+
// e.g. test_0, test_1, test_2
273+
let db_name = format!("{}_{}", self.db_prefix, pool_index);
249274

250275
// 1. Drop the database if it exists - if a test failed before, the database wouldn't have been removed
251276
// 2. Create database
@@ -279,16 +304,18 @@ pub mod tests_postgres {
279304
}
280305

281306
async fn recycle(&self, database: &mut Database) -> RecycleResult<Self::Error> {
282-
let queries = format!("DROP DATABASE {0} WITH (FORCE);", database.inner.name);
283-
let result = self
284-
.base_pool
307+
// DROP the public schema and create it again for usage after recycling
308+
let queries = format!("DROP SCHEMA public CASCADE; CREATE SCHEMA public;");
309+
let result = database
310+
.pool
285311
.get()
286312
.await?
287313
.simple_query(&queries)
288314
.await
289315
.map_err(|err| PoolError::Backend(err))?;
290-
assert_eq!(1, result.len());
316+
assert_eq!(2, result.len());
291317
assert!(matches!(result[0], SimpleQueryMessage::CommandComplete(..)));
318+
assert!(matches!(result[1], SimpleQueryMessage::CommandComplete(..)));
292319

293320
Ok(())
294321
}
@@ -318,6 +345,59 @@ pub mod tests_postgres {
318345

319346
Ok(client.batch_execute(&full_query).await?)
320347
}
348+
349+
#[cfg(test)]
350+
mod test {
351+
use super::*;
352+
353+
#[tokio::test]
354+
/// Does not use the `DATABASE_POOL` as other tests can interfere with the pool objects!
355+
async fn test_postgres_pool() {
356+
let pool = create_pool("testing_pool");
357+
358+
let database_1 = pool.get().await.expect("Should get");
359+
let status = pool.status();
360+
assert_eq!(status.size, 1);
361+
assert_eq!(status.available, 0);
362+
363+
let database_2 = pool.get().await.expect("Should get");
364+
let status = pool.status();
365+
assert_eq!(status.size, 2);
366+
assert_eq!(status.available, 0);
367+
368+
drop(database_1);
369+
let status = pool.status();
370+
assert_eq!(status.size, 2);
371+
assert_eq!(status.available, 1);
372+
373+
drop(database_2);
374+
let status = pool.status();
375+
assert_eq!(status.size, 2);
376+
assert_eq!(status.available, 2);
377+
378+
let database_3 = pool.get().await.expect("Should get");
379+
let status = pool.status();
380+
assert_eq!(status.size, 2);
381+
assert_eq!(status.available, 1);
382+
383+
let database_4 = pool.get().await.expect("Should get");
384+
let status = pool.status();
385+
assert_eq!(status.size, 2);
386+
assert_eq!(status.available, 0);
387+
388+
let database_5 = pool.get().await.expect("Should get");
389+
let status = pool.status();
390+
assert_eq!(status.size, 3);
391+
assert_eq!(status.available, 0);
392+
393+
drop(database_3);
394+
drop(database_4);
395+
drop(database_5);
396+
let status = pool.status();
397+
assert_eq!(status.size, 3);
398+
assert_eq!(status.available, 3);
399+
}
400+
}
321401
}
322402

323403
#[cfg(test)]

sentry/src/db/campaign.rs

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -51,28 +51,28 @@ pub async fn fetch_campaign(pool: DbPool, campaign: &Campaign) -> Result<Campaig
5151

5252
#[cfg(test)]
5353
mod test {
54-
use primitives::{campaign::Campaign, util::tests::prep_db::DUMMY_CAMPAIGN};
54+
use primitives::util::tests::prep_db::DUMMY_CAMPAIGN;
5555

5656
use crate::db::tests_postgres::{setup_test_migrations, DATABASE_POOL};
5757

5858
use super::*;
5959

6060
#[tokio::test]
6161
async fn it_inserts_and_fetches_campaign() {
62-
let db_pool = DATABASE_POOL.get().await.expect("Should get a DB pool");
62+
let database = DATABASE_POOL.get().await.expect("Should get a DB pool");
6363

64-
setup_test_migrations(db_pool.clone())
64+
setup_test_migrations(database.get_pool())
6565
.await
6666
.expect("Migrations should succeed");
6767

6868
let campaign_for_testing = DUMMY_CAMPAIGN.clone();
69-
let is_inserted = insert_campaign(&db_pool.clone(), &campaign_for_testing)
69+
let is_inserted = insert_campaign(&database.get_pool(), &campaign_for_testing)
7070
.await
7171
.expect("Should succeed");
7272

7373
assert!(is_inserted);
7474

75-
let fetched_campaign: Campaign = fetch_campaign(db_pool.clone(), &campaign_for_testing)
75+
let fetched_campaign = fetch_campaign(database.get_pool(), &campaign_for_testing)
7676
.await
7777
.expect("Should fetch successfully");
7878

sentry/src/db/spendable.rs

Lines changed: 10 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -60,9 +60,9 @@ mod test {
6060

6161
#[tokio::test]
6262
async fn it_inserts_and_fetches_spendable() {
63-
let db_pool = DATABASE_POOL.get().await.expect("Should get a DB pool");
63+
let database = DATABASE_POOL.get().await.expect("Should get a DB pool");
6464

65-
setup_test_migrations(db_pool.clone())
65+
setup_test_migrations(database.get_pool())
6666
.await
6767
.expect("Migrations should succeed");
6868

@@ -74,16 +74,19 @@ mod test {
7474
still_on_create2: UnifiedNum::from(500_000),
7575
},
7676
};
77-
let is_inserted = insert_spendable(db_pool.clone(), &spendable)
77+
let is_inserted = insert_spendable(database.get_pool(), &spendable)
7878
.await
7979
.expect("Should succeed");
8080

8181
assert!(is_inserted);
8282

83-
let fetched_spendable =
84-
fetch_spendable(db_pool.clone(), &spendable.spender, &spendable.channel.id())
85-
.await
86-
.expect("Should fetch successfully");
83+
let fetched_spendable = fetch_spendable(
84+
database.get_pool(),
85+
&spendable.spender,
86+
&spendable.channel.id(),
87+
)
88+
.await
89+
.expect("Should fetch successfully");
8790

8891
assert_eq!(spendable, fetched_spendable);
8992
}

0 commit comments

Comments
 (0)