Skip to content

Commit 78608a1

Browse files
committed
sending queue: do a few renamings after the live review
Thanks @Hywan for the review comments!
1 parent 75aba1d commit 78608a1

File tree

2 files changed

+58
-44
lines changed

2 files changed

+58
-44
lines changed

crates/matrix-sdk/src/client/mod.rs

Lines changed: 6 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -284,8 +284,10 @@ pub(crate) struct ClientInner {
284284
#[cfg(feature = "e2e-encryption")]
285285
pub(crate) verification_state: SharedObservable<VerificationState>,
286286

287-
/// Data related to the sending queue.
288-
pub(crate) sending_queue: Arc<SendingQueueData>,
287+
/// Data related to the [`SendingQueue`].
288+
///
289+
/// [`SendingQueue`]: crate::send_queue::SendingQueue
290+
pub(crate) sending_queue_data: Arc<SendingQueueData>,
289291
}
290292

291293
impl ClientInner {
@@ -328,7 +330,7 @@ impl ClientInner {
328330
respect_login_well_known,
329331
sync_beat: event_listener::Event::new(),
330332
event_cache,
331-
sending_queue,
333+
sending_queue_data: sending_queue,
332334
#[cfg(feature = "e2e-encryption")]
333335
e2ee: EncryptionData::new(encryption_settings),
334336
#[cfg(feature = "e2e-encryption")]
@@ -2108,7 +2110,7 @@ impl Client {
21082110
self.inner.unstable_features.get().cloned(),
21092111
self.inner.respect_login_well_known,
21102112
self.inner.event_cache.clone(),
2111-
self.inner.sending_queue.clone(),
2113+
self.inner.sending_queue_data.clone(),
21122114
#[cfg(feature = "e2e-encryption")]
21132115
self.inner.e2ee.encryption_settings,
21142116
)

crates/matrix-sdk/src/send_queue.rs

Lines changed: 52 additions & 40 deletions
Original file line numberDiff line numberDiff line change
@@ -52,7 +52,7 @@ impl SendingQueue {
5252
}
5353

5454
fn for_room(&self, room: Room) -> RoomSendingQueue {
55-
let data = &self.client.inner.sending_queue;
55+
let data = &self.client.inner.sending_queue_data;
5656

5757
let mut map = data.rooms.write().unwrap();
5858

@@ -63,31 +63,31 @@ impl SendingQueue {
6363

6464
let owned_room_id = room_id.to_owned();
6565
let room_q = RoomSendingQueue::new(
66-
data.enabled.clone(),
67-
data.shutting_down.clone(),
66+
data.globally_enabled.clone(),
67+
data.is_dropping.clone(),
6868
&self.client,
6969
owned_room_id.clone(),
7070
);
7171
map.insert(owned_room_id, room_q.clone());
7272
room_q
7373
}
7474

75-
/// Enables the sending queue for the entire client, i.e. all rooms.
75+
/// Enable the sending queue for the entire client, i.e. all rooms.
7676
///
77-
/// This may wake up backgrounds tasks and resume sending of events in the
77+
/// This may wake up background tasks and resume sending of events in the
7878
/// background.
7979
pub fn enable(&self) {
80-
if self.client.inner.sending_queue.enabled.set_if_not_eq(true).is_some() {
80+
if self.client.inner.sending_queue_data.globally_enabled.set_if_not_eq(true).is_some() {
8181
debug!("globally enabling sending queue");
82-
let rooms = self.client.inner.sending_queue.rooms.read().unwrap();
82+
let rooms = self.client.inner.sending_queue_data.rooms.read().unwrap();
8383
// Wake up the rooms, in case events have been queued in the meanwhile.
8484
for room in rooms.values() {
8585
room.inner.notifier.notify_one();
8686
}
8787
}
8888
}
8989

90-
/// Disables the sending queue for the entire client, i.e. all rooms.
90+
/// Disable the sending queue for the entire client, i.e. all rooms.
9191
///
9292
/// If requests were being sent, they're not aborted, and will continue
9393
/// until a status resolves (error responses will keep the events in the
@@ -101,19 +101,19 @@ impl SendingQueue {
101101
// - or they were not, and it's not worth it waking them to let them they're
102102
// disabled, which causes them to go to sleep again.
103103
debug!("globally disabling sending queue");
104-
self.client.inner.sending_queue.enabled.set(false);
104+
self.client.inner.sending_queue_data.globally_enabled.set(false);
105105
}
106106

107107
/// Returns whether the sending queue is enabled, at a client-wide
108108
/// granularity.
109109
pub fn is_enabled(&self) -> bool {
110-
self.client.inner.sending_queue.enabled.get()
110+
self.client.inner.sending_queue_data.globally_enabled.get()
111111
}
112112

113113
/// A subscriber to the enablement status (enabled or disabled) of the
114114
/// sending queue.
115115
pub fn subscribe_status(&self) -> Subscriber<bool> {
116-
self.client.inner.sending_queue.enabled.subscribe()
116+
self.client.inner.sending_queue_data.globally_enabled.subscribe()
117117
}
118118
}
119119

@@ -130,19 +130,19 @@ pub(super) struct SendingQueueData {
130130
rooms: SyncRwLock<BTreeMap<OwnedRoomId, RoomSendingQueue>>,
131131

132132
/// Is the whole mechanism enabled or disabled?
133-
enabled: SharedObservable<bool>,
133+
globally_enabled: SharedObservable<bool>,
134134

135-
/// Are we shutting down the entire queue?
136-
shutting_down: Arc<AtomicBool>,
135+
/// Are we currently dropping the Client?
136+
is_dropping: Arc<AtomicBool>,
137137
}
138138

139139
impl SendingQueueData {
140140
/// Create the data for a sending queue, in the given enabled state.
141-
pub fn new(enabled: bool) -> Self {
141+
pub fn new(globally_enabled: bool) -> Self {
142142
Self {
143143
rooms: Default::default(),
144-
enabled: SharedObservable::new(enabled),
145-
shutting_down: Arc::new(false.into()),
144+
globally_enabled: SharedObservable::new(globally_enabled),
145+
is_dropping: Arc::new(false.into()),
146146
}
147147
}
148148
}
@@ -151,8 +151,8 @@ impl Drop for SendingQueueData {
151151
fn drop(&mut self) {
152152
// Mark the whole sending queue as shutting down, then wake up all the room
153153
// queues so they're stopped too.
154-
debug!("globally shutting down the sending queue");
155-
self.shutting_down.store(true, Ordering::SeqCst);
154+
debug!("globally dropping the sending queue");
155+
self.is_dropping.store(true, Ordering::SeqCst);
156156

157157
let rooms = self.rooms.read().unwrap();
158158
for room in rooms.values() {
@@ -184,14 +184,14 @@ impl std::fmt::Debug for RoomSendingQueue {
184184

185185
impl RoomSendingQueue {
186186
fn new(
187-
enabled: SharedObservable<bool>,
188-
shutting_down: Arc<AtomicBool>,
187+
globally_enabled: SharedObservable<bool>,
188+
is_dropping: Arc<AtomicBool>,
189189
client: &Client,
190190
room_id: OwnedRoomId,
191191
) -> Self {
192192
let (updates_sender, _) = broadcast::channel(32);
193193

194-
let queue = SharedQueue::new();
194+
let queue = QueueStorage::new();
195195
let notifier = Arc::new(Notify::new());
196196

197197
let weak_room = WeakRoom::new(WeakClient::from_client(client), room_id);
@@ -201,8 +201,8 @@ impl RoomSendingQueue {
201201
queue.clone(),
202202
notifier.clone(),
203203
updates_sender.clone(),
204-
enabled,
205-
shutting_down,
204+
globally_enabled,
205+
is_dropping,
206206
));
207207

208208
Self {
@@ -242,6 +242,7 @@ impl RoomSendingQueue {
242242

243243
let transaction_id = self.inner.queue.push(content.clone()).await;
244244
trace!(%transaction_id, "manager sends an event to the background task");
245+
245246
self.inner.notifier.notify_one();
246247

247248
let _ = self.inner.updates.send(RoomSendingQueueUpdate::NewLocalEvent(LocalEcho {
@@ -261,29 +262,29 @@ impl RoomSendingQueue {
261262
#[instrument(skip_all, fields(room_id = %room.room_id()))]
262263
async fn sending_task(
263264
room: WeakRoom,
264-
queue: SharedQueue,
265+
queue: QueueStorage,
265266
notifier: Arc<Notify>,
266267
updates: broadcast::Sender<RoomSendingQueueUpdate>,
267-
enabled: SharedObservable<bool>,
268-
shutting_down: Arc<AtomicBool>,
268+
globally_enabled: SharedObservable<bool>,
269+
is_dropping: Arc<AtomicBool>,
269270
) {
270271
info!("spawned the sending task");
271272

272273
loop {
273274
// A request to shut down should be preferred above everything else.
274-
if shutting_down.load(Ordering::SeqCst) {
275+
if is_dropping.load(Ordering::SeqCst) {
275276
trace!("shutting down!");
276277
break;
277278
}
278279

279-
if !enabled.get() {
280+
if !globally_enabled.get() {
280281
trace!("not enabled, sleeping");
281282
// Wait for an explicit wakeup.
282283
notifier.notified().await;
283284
continue;
284285
}
285286

286-
let Some(queued_event) = queue.pop_next_to_send().await else {
287+
let Some(queued_event) = queue.peek_next_to_send().await else {
287288
trace!("queue is empty, sleeping");
288289
// Wait for an explicit wakeup.
289290
notifier.notified().await;
@@ -293,7 +294,7 @@ impl RoomSendingQueue {
293294
trace!("received an event to send!");
294295

295296
let Some(room) = room.get() else {
296-
if shutting_down.load(Ordering::SeqCst) {
297+
if is_dropping.load(Ordering::SeqCst) {
297298
break;
298299
}
299300
error!("the weak room couldn't be upgraded but we're not shutting down?");
@@ -322,7 +323,7 @@ impl RoomSendingQueue {
322323

323324
// Disable the queue after an error.
324325
// See comment in [`SendingQueue::disable()`].
325-
enabled.set(false);
326+
globally_enabled.set(false);
326327

327328
// In this case, we intentionally keep the event in the queue, but mark it as
328329
// not being sent anymore.
@@ -360,10 +361,10 @@ struct RoomSendingQueueInner {
360361
/// content / deleting entries, all that will be required will be to
361362
/// manipulate the on-disk storage. In other words, the storage will become
362363
/// the one source of truth.
363-
queue: SharedQueue,
364+
queue: QueueStorage,
364365

365366
/// A notifier that's updated any time common data is touched (stopped or
366-
/// enabled statuses), or the associated room [`SharedQueue`].
367+
/// enabled statuses), or the associated room [`QueueStorage`].
367368
notifier: Arc<Notify>,
368369

369370
/// Handle to the actual sending task. Unused, but kept alive along this
@@ -375,13 +376,18 @@ struct RoomSendingQueueInner {
375376
struct QueuedEvent {
376377
event: AnyMessageLikeEventContent,
377378
transaction_id: OwnedTransactionId,
379+
380+
/// Flag to indicate if an event has been scheduled for sending.
381+
///
382+
/// Useful to indicate if cancelling could happen or if it was too late and
383+
/// the event had already been sent.
378384
is_being_sent: bool,
379385
}
380386

381387
#[derive(Clone)]
382-
struct SharedQueue(Arc<RwLock<VecDeque<QueuedEvent>>>);
388+
struct QueueStorage(Arc<RwLock<VecDeque<QueuedEvent>>>);
383389

384-
impl SharedQueue {
390+
impl QueueStorage {
385391
/// Create a new synchronized queue for queuing events to be sent later.
386392
fn new() -> Self {
387393
Self(Arc::new(RwLock::new(VecDeque::with_capacity(16))))
@@ -402,21 +408,23 @@ impl SharedQueue {
402408
transaction_id
403409
}
404410

405-
/// Pops the next event to be sent, marking it as being sent.
411+
/// Peeks the next event to be sent, marking it as being sent.
406412
///
407413
/// It is required to call [`Self::mark_as_sent`] after it's been
408414
/// effectively sent.
409-
async fn pop_next_to_send(&self) -> Option<QueuedEvent> {
415+
async fn peek_next_to_send(&self) -> Option<QueuedEvent> {
410416
let mut q = self.0.write().await;
411417
if let Some(event) = q.front_mut() {
418+
// TODO: This flag should probably live in memory when we have an actual
419+
// storage.
412420
event.is_being_sent = true;
413421
Some(event.clone())
414422
} else {
415423
None
416424
}
417425
}
418426

419-
/// Marks an event popped with [`Self::pop_next_to_send`] and identified
427+
/// Marks an event popped with [`Self::peek_next_to_send`] and identified
420428
/// with the given transaction id as not being sent anymore, so it can
421429
/// be removed from the queue later.
422430
async fn mark_as_not_being_sent(&self, transaction_id: &TransactionId) {
@@ -432,7 +440,11 @@ impl SharedQueue {
432440
/// transaction id as sent by removing it from the local queue.
433441
async fn mark_as_sent(&self, transaction_id: &TransactionId) {
434442
let mut q = self.0.write().await;
435-
q.retain(|item| item.transaction_id != transaction_id);
443+
if let Some(index) = q.iter().position(|item| item.transaction_id == transaction_id) {
444+
q.remove(index);
445+
} else {
446+
warn!("couldn't find item to mark as sent with transaction id {transaction_id}");
447+
}
436448
}
437449

438450
/// Cancel a sending command for an event that has been sent with

0 commit comments

Comments
 (0)