@@ -65,7 +65,7 @@ bool EventLoopRef::reset(std::unique_lock<std::mutex>* lock)
65
65
return done;
66
66
}
67
67
68
- ProxyContext::ProxyContext (Connection* connection) : connection(connection), loop{& connection->m_loop } {}
68
+ ProxyContext::ProxyContext (Connection* connection) : connection(connection), loop{* connection->m_loop } {}
69
69
70
70
Connection::~Connection ()
71
71
{
@@ -122,18 +122,18 @@ Connection::~Connection()
122
122
m_sync_cleanup_fns.pop_front ();
123
123
}
124
124
while (!m_async_cleanup_fns.empty ()) {
125
- const std::unique_lock<std::mutex> lock (m_loop. m_mutex );
126
- m_loop. m_async_fns .emplace_back (std::move (m_async_cleanup_fns.front ()));
125
+ const std::unique_lock<std::mutex> lock (m_loop-> m_mutex );
126
+ m_loop-> m_async_fns .emplace_back (std::move (m_async_cleanup_fns.front ()));
127
127
m_async_cleanup_fns.pop_front ();
128
128
}
129
- std::unique_lock<std::mutex> lock (m_loop. m_mutex );
130
- m_loop. startAsyncThread (lock);
131
- m_loop.removeClient ( lock);
129
+ std::unique_lock<std::mutex> lock (m_loop-> m_mutex );
130
+ m_loop-> startAsyncThread (lock);
131
+ m_loop.reset (& lock);
132
132
}
133
133
134
134
CleanupIt Connection::addSyncCleanup (std::function<void ()> fn)
135
135
{
136
- const std::unique_lock<std::mutex> lock (m_loop. m_mutex );
136
+ const std::unique_lock<std::mutex> lock (m_loop-> m_mutex );
137
137
// Add cleanup callbacks to the front of list, so sync cleanup functions run
138
138
// in LIFO order. This is a good approach because sync cleanup functions are
139
139
// added as client objects are created, and it is natural to clean up
@@ -147,13 +147,13 @@ CleanupIt Connection::addSyncCleanup(std::function<void()> fn)
147
147
148
148
void Connection::removeSyncCleanup (CleanupIt it)
149
149
{
150
- const std::unique_lock<std::mutex> lock (m_loop. m_mutex );
150
+ const std::unique_lock<std::mutex> lock (m_loop-> m_mutex );
151
151
m_sync_cleanup_fns.erase (it);
152
152
}
153
153
154
154
void Connection::addAsyncCleanup (std::function<void ()> fn)
155
155
{
156
- const std::unique_lock<std::mutex> lock (m_loop. m_mutex );
156
+ const std::unique_lock<std::mutex> lock (m_loop-> m_mutex );
157
157
// Add async cleanup callbacks to the back of the list. Unlike the sync
158
158
// cleanup list, this list order is more significant because it determines
159
159
// the order server objects are destroyed when there is a sudden disconnect,
@@ -244,7 +244,7 @@ void EventLoop::post(const std::function<void()>& fn)
244
244
return ;
245
245
}
246
246
std::unique_lock<std::mutex> lock (m_mutex);
247
- addClient ( lock);
247
+ EventLoopRef ref (* this , & lock);
248
248
m_cv.wait (lock, [this ] { return m_post_fn == nullptr ; });
249
249
m_post_fn = &fn;
250
250
int post_fd{m_post_fd};
@@ -253,13 +253,13 @@ void EventLoop::post(const std::function<void()>& fn)
253
253
KJ_SYSCALL (write (post_fd, &buffer, 1 ));
254
254
});
255
255
m_cv.wait (lock, [this , &fn] { return m_post_fn != &fn; });
256
- removeClient (lock);
257
256
}
258
257
259
258
void EventLoop::addClient (std::unique_lock<std::mutex>& lock) { m_num_clients += 1 ; }
260
259
261
260
bool EventLoop::removeClient (std::unique_lock<std::mutex>& lock)
262
261
{
262
+ assert (m_num_clients > 0 );
263
263
m_num_clients -= 1 ;
264
264
if (done (lock)) {
265
265
m_cv.notify_all ();
@@ -279,16 +279,22 @@ void EventLoop::startAsyncThread(std::unique_lock<std::mutex>& lock)
279
279
} else if (!m_async_fns.empty ()) {
280
280
m_async_thread = std::thread ([this ] {
281
281
std::unique_lock<std::mutex> lock (m_mutex);
282
- while (true ) {
282
+ while (! done (lock) ) {
283
283
if (!m_async_fns.empty ()) {
284
- addClient ( lock) ;
284
+ EventLoopRef ref{* this , & lock} ;
285
285
const std::function<void ()> fn = std::move (m_async_fns.front ());
286
286
m_async_fns.pop_front ();
287
287
Unlock (lock, fn);
288
- if (removeClient (lock)) break ;
288
+ // Important to explictly call ref.reset() here and
289
+ // explicitly break if the EventLoop is done, not relying on
290
+ // while condition above. Reason is that end of `ref`
291
+ // lifetime can cause EventLoop::loop() to exit, and if
292
+ // there is external code that immediately deletes the
293
+ // EventLoop object as soon as EventLoop::loop() method
294
+ // returns, checking the while condition may crash.
295
+ if (ref.reset ()) break ;
296
+ // Continue without waiting in case there are more async_fns
289
297
continue ;
290
- } else if (m_num_clients == 0 ) {
291
- break ;
292
298
}
293
299
m_cv.wait (lock);
294
300
}
@@ -394,7 +400,7 @@ kj::Promise<void> ProxyServer<ThreadMap>::makeThread(MakeThreadContext context)
394
400
const std::string from = context.getParams ().getName ();
395
401
std::promise<ThreadContext*> thread_context;
396
402
std::thread thread ([&thread_context, from, this ]() {
397
- g_thread_context.thread_name = ThreadName (m_connection.m_loop . m_exe_name ) + " (from " + from + " )" ;
403
+ g_thread_context.thread_name = ThreadName (m_connection.m_loop -> m_exe_name ) + " (from " + from + " )" ;
398
404
g_thread_context.waiter = std::make_unique<Waiter>();
399
405
thread_context.set_value (&g_thread_context);
400
406
std::unique_lock<std::mutex> lock (g_thread_context.waiter ->m_mutex );
0 commit comments