Skip to content

Commit

Permalink
system/cpus: rename qemu_mutex_lock_iothread() to bql_lock()
Browse files Browse the repository at this point in the history
The Big QEMU Lock (BQL) has many names and they are confusing. The
actual QemuMutex variable is called qemu_global_mutex but it's commonly
referred to as the BQL in discussions and some code comments. The
locking APIs, however, are called qemu_mutex_lock_iothread() and
qemu_mutex_unlock_iothread().

The "iothread" name is historic and comes from when the main thread was
split into into KVM vcpu threads and the "iothread" (now called the main
loop thread). I have contributed to the confusion myself by introducing
a separate --object iothread, a separate concept unrelated to the BQL.

The "iothread" name is no longer appropriate for the BQL. Rename the
locking APIs to:
- void bql_lock(void)
- void bql_unlock(void)
- bool bql_locked(void)

There are more APIs with "iothread" in their names. Subsequent patches
will rename them. There are also comments and documentation that will be
updated in later patches.

Signed-off-by: Stefan Hajnoczi <[email protected]>
Reviewed-by: Paul Durrant <[email protected]>
Acked-by: Fabiano Rosas <[email protected]>
Acked-by: David Woodhouse <[email protected]>
Reviewed-by: Cédric Le Goater <[email protected]>
Acked-by: Peter Xu <[email protected]>
Acked-by: Eric Farman <[email protected]>
Reviewed-by: Harsh Prateek Bora <[email protected]>
Acked-by: Hyman Huang <[email protected]>
Reviewed-by: Akihiko Odaki <[email protected]>
Message-id: [email protected]
Signed-off-by: Stefan Hajnoczi <[email protected]>
  • Loading branch information
stefanhaRH committed Jan 8, 2024
1 parent 897a06c commit 195801d
Show file tree
Hide file tree
Showing 95 changed files with 529 additions and 529 deletions.
10 changes: 5 additions & 5 deletions accel/accel-blocker.c
Original file line number Diff line number Diff line change
Expand Up @@ -41,7 +41,7 @@ void accel_blocker_init(void)

void accel_ioctl_begin(void)
{
if (likely(qemu_mutex_iothread_locked())) {
if (likely(bql_locked())) {
return;
}

Expand All @@ -51,7 +51,7 @@ void accel_ioctl_begin(void)

void accel_ioctl_end(void)
{
if (likely(qemu_mutex_iothread_locked())) {
if (likely(bql_locked())) {
return;
}

Expand All @@ -62,7 +62,7 @@ void accel_ioctl_end(void)

void accel_cpu_ioctl_begin(CPUState *cpu)
{
if (unlikely(qemu_mutex_iothread_locked())) {
if (unlikely(bql_locked())) {
return;
}

Expand All @@ -72,7 +72,7 @@ void accel_cpu_ioctl_begin(CPUState *cpu)

void accel_cpu_ioctl_end(CPUState *cpu)
{
if (unlikely(qemu_mutex_iothread_locked())) {
if (unlikely(bql_locked())) {
return;
}

Expand Down Expand Up @@ -105,7 +105,7 @@ void accel_ioctl_inhibit_begin(void)
* We allow to inhibit only when holding the BQL, so we can identify
* when an inhibitor wants to issue an ioctl easily.
*/
g_assert(qemu_mutex_iothread_locked());
g_assert(bql_locked());

/* Block further invocations of the ioctls outside the BQL. */
CPU_FOREACH(cpu) {
Expand Down
8 changes: 4 additions & 4 deletions accel/dummy-cpus.c
Original file line number Diff line number Diff line change
Expand Up @@ -24,7 +24,7 @@ static void *dummy_cpu_thread_fn(void *arg)

rcu_register_thread();

qemu_mutex_lock_iothread();
bql_lock();
qemu_thread_get_self(cpu->thread);
cpu->thread_id = qemu_get_thread_id();
cpu->neg.can_do_io = true;
Expand All @@ -43,7 +43,7 @@ static void *dummy_cpu_thread_fn(void *arg)
qemu_guest_random_seed_thread_part2(cpu->random_seed);

do {
qemu_mutex_unlock_iothread();
bql_unlock();
#ifndef _WIN32
do {
int sig;
Expand All @@ -56,11 +56,11 @@ static void *dummy_cpu_thread_fn(void *arg)
#else
qemu_sem_wait(&cpu->sem);
#endif
qemu_mutex_lock_iothread();
bql_lock();
qemu_wait_io_event(cpu);
} while (!cpu->unplug);

qemu_mutex_unlock_iothread();
bql_unlock();
rcu_unregister_thread();
return NULL;
}
Expand Down
4 changes: 2 additions & 2 deletions accel/hvf/hvf-accel-ops.c
Original file line number Diff line number Diff line change
Expand Up @@ -424,7 +424,7 @@ static void *hvf_cpu_thread_fn(void *arg)

rcu_register_thread();

qemu_mutex_lock_iothread();
bql_lock();
qemu_thread_get_self(cpu->thread);

cpu->thread_id = qemu_get_thread_id();
Expand All @@ -449,7 +449,7 @@ static void *hvf_cpu_thread_fn(void *arg)

hvf_vcpu_destroy(cpu);
cpu_thread_signal_destroyed(cpu);
qemu_mutex_unlock_iothread();
bql_unlock();
rcu_unregister_thread();
return NULL;
}
Expand Down
4 changes: 2 additions & 2 deletions accel/kvm/kvm-accel-ops.c
Original file line number Diff line number Diff line change
Expand Up @@ -33,7 +33,7 @@ static void *kvm_vcpu_thread_fn(void *arg)

rcu_register_thread();

qemu_mutex_lock_iothread();
bql_lock();
qemu_thread_get_self(cpu->thread);
cpu->thread_id = qemu_get_thread_id();
cpu->neg.can_do_io = true;
Expand All @@ -58,7 +58,7 @@ static void *kvm_vcpu_thread_fn(void *arg)

kvm_destroy_vcpu(cpu);
cpu_thread_signal_destroyed(cpu);
qemu_mutex_unlock_iothread();
bql_unlock();
rcu_unregister_thread();
return NULL;
}
Expand Down
22 changes: 11 additions & 11 deletions accel/kvm/kvm-all.c
Original file line number Diff line number Diff line change
Expand Up @@ -806,7 +806,7 @@ static void kvm_dirty_ring_flush(void)
* should always be with BQL held, serialization is guaranteed.
* However, let's be sure of it.
*/
assert(qemu_mutex_iothread_locked());
assert(bql_locked());
/*
* First make sure to flush the hardware buffers by kicking all
* vcpus out in a synchronous way.
Expand Down Expand Up @@ -1391,9 +1391,9 @@ static void *kvm_dirty_ring_reaper_thread(void *data)
trace_kvm_dirty_ring_reaper("wakeup");
r->reaper_state = KVM_DIRTY_RING_REAPER_REAPING;

qemu_mutex_lock_iothread();
bql_lock();
kvm_dirty_ring_reap(s, NULL);
qemu_mutex_unlock_iothread();
bql_unlock();

r->reaper_iteration++;
}
Expand Down Expand Up @@ -2817,7 +2817,7 @@ int kvm_cpu_exec(CPUState *cpu)
return EXCP_HLT;
}

qemu_mutex_unlock_iothread();
bql_unlock();
cpu_exec_start(cpu);

do {
Expand Down Expand Up @@ -2857,11 +2857,11 @@ int kvm_cpu_exec(CPUState *cpu)

#ifdef KVM_HAVE_MCE_INJECTION
if (unlikely(have_sigbus_pending)) {
qemu_mutex_lock_iothread();
bql_lock();
kvm_arch_on_sigbus_vcpu(cpu, pending_sigbus_code,
pending_sigbus_addr);
have_sigbus_pending = false;
qemu_mutex_unlock_iothread();
bql_unlock();
}
#endif

Expand Down Expand Up @@ -2927,7 +2927,7 @@ int kvm_cpu_exec(CPUState *cpu)
* still full. Got kicked by KVM_RESET_DIRTY_RINGS.
*/
trace_kvm_dirty_ring_full(cpu->cpu_index);
qemu_mutex_lock_iothread();
bql_lock();
/*
* We throttle vCPU by making it sleep once it exit from kernel
* due to dirty ring full. In the dirtylimit scenario, reaping
Expand All @@ -2939,7 +2939,7 @@ int kvm_cpu_exec(CPUState *cpu)
} else {
kvm_dirty_ring_reap(kvm_state, NULL);
}
qemu_mutex_unlock_iothread();
bql_unlock();
dirtylimit_vcpu_execute(cpu);
ret = 0;
break;
Expand All @@ -2956,9 +2956,9 @@ int kvm_cpu_exec(CPUState *cpu)
break;
case KVM_SYSTEM_EVENT_CRASH:
kvm_cpu_synchronize_state(cpu);
qemu_mutex_lock_iothread();
bql_lock();
qemu_system_guest_panicked(cpu_get_crash_info(cpu));
qemu_mutex_unlock_iothread();
bql_unlock();
ret = 0;
break;
default:
Expand All @@ -2973,7 +2973,7 @@ int kvm_cpu_exec(CPUState *cpu)
} while (ret == 0);

cpu_exec_end(cpu);
qemu_mutex_lock_iothread();
bql_lock();

if (ret < 0) {
cpu_dump_state(cpu, stderr, CPU_DUMP_CODE);
Expand Down
26 changes: 13 additions & 13 deletions accel/tcg/cpu-exec.c
Original file line number Diff line number Diff line change
Expand Up @@ -558,8 +558,8 @@ static void cpu_exec_longjmp_cleanup(CPUState *cpu)
tcg_ctx->gen_tb = NULL;
}
#endif
if (qemu_mutex_iothread_locked()) {
qemu_mutex_unlock_iothread();
if (bql_locked()) {
bql_unlock();
}
assert_no_pages_locked();
}
Expand Down Expand Up @@ -680,10 +680,10 @@ static inline bool cpu_handle_halt(CPUState *cpu)
#if defined(TARGET_I386)
if (cpu->interrupt_request & CPU_INTERRUPT_POLL) {
X86CPU *x86_cpu = X86_CPU(cpu);
qemu_mutex_lock_iothread();
bql_lock();
apic_poll_irq(x86_cpu->apic_state);
cpu_reset_interrupt(cpu, CPU_INTERRUPT_POLL);
qemu_mutex_unlock_iothread();
bql_unlock();
}
#endif /* TARGET_I386 */
if (!cpu_has_work(cpu)) {
Expand Down Expand Up @@ -749,9 +749,9 @@ static inline bool cpu_handle_exception(CPUState *cpu, int *ret)
#else
if (replay_exception()) {
CPUClass *cc = CPU_GET_CLASS(cpu);
qemu_mutex_lock_iothread();
bql_lock();
cc->tcg_ops->do_interrupt(cpu);
qemu_mutex_unlock_iothread();
bql_unlock();
cpu->exception_index = -1;

if (unlikely(cpu->singlestep_enabled)) {
Expand Down Expand Up @@ -812,7 +812,7 @@ static inline bool cpu_handle_interrupt(CPUState *cpu,

if (unlikely(qatomic_read(&cpu->interrupt_request))) {
int interrupt_request;
qemu_mutex_lock_iothread();
bql_lock();
interrupt_request = cpu->interrupt_request;
if (unlikely(cpu->singlestep_enabled & SSTEP_NOIRQ)) {
/* Mask out external interrupts for this step. */
Expand All @@ -821,7 +821,7 @@ static inline bool cpu_handle_interrupt(CPUState *cpu,
if (interrupt_request & CPU_INTERRUPT_DEBUG) {
cpu->interrupt_request &= ~CPU_INTERRUPT_DEBUG;
cpu->exception_index = EXCP_DEBUG;
qemu_mutex_unlock_iothread();
bql_unlock();
return true;
}
#if !defined(CONFIG_USER_ONLY)
Expand All @@ -832,7 +832,7 @@ static inline bool cpu_handle_interrupt(CPUState *cpu,
cpu->interrupt_request &= ~CPU_INTERRUPT_HALT;
cpu->halted = 1;
cpu->exception_index = EXCP_HLT;
qemu_mutex_unlock_iothread();
bql_unlock();
return true;
}
#if defined(TARGET_I386)
Expand All @@ -843,14 +843,14 @@ static inline bool cpu_handle_interrupt(CPUState *cpu,
cpu_svm_check_intercept_param(env, SVM_EXIT_INIT, 0, 0);
do_cpu_init(x86_cpu);
cpu->exception_index = EXCP_HALTED;
qemu_mutex_unlock_iothread();
bql_unlock();
return true;
}
#else
else if (interrupt_request & CPU_INTERRUPT_RESET) {
replay_interrupt();
cpu_reset(cpu);
qemu_mutex_unlock_iothread();
bql_unlock();
return true;
}
#endif /* !TARGET_I386 */
Expand All @@ -873,7 +873,7 @@ static inline bool cpu_handle_interrupt(CPUState *cpu,
*/
if (unlikely(cpu->singlestep_enabled)) {
cpu->exception_index = EXCP_DEBUG;
qemu_mutex_unlock_iothread();
bql_unlock();
return true;
}
cpu->exception_index = -1;
Expand All @@ -892,7 +892,7 @@ static inline bool cpu_handle_interrupt(CPUState *cpu,
}

/* If we exit via cpu_loop_exit/longjmp it is reset in cpu_exec */
qemu_mutex_unlock_iothread();
bql_unlock();
}

/* Finally, check if we need to exit to the main loop. */
Expand Down
16 changes: 8 additions & 8 deletions accel/tcg/cputlb.c
Original file line number Diff line number Diff line change
Expand Up @@ -2030,10 +2030,10 @@ static uint64_t do_ld_mmio_beN(CPUState *cpu, CPUTLBEntryFull *full,
section = io_prepare(&mr_offset, cpu, full->xlat_section, attrs, addr, ra);
mr = section->mr;

qemu_mutex_lock_iothread();
bql_lock();
ret = int_ld_mmio_beN(cpu, full, ret_be, addr, size, mmu_idx,
type, ra, mr, mr_offset);
qemu_mutex_unlock_iothread();
bql_unlock();

return ret;
}
Expand All @@ -2054,12 +2054,12 @@ static Int128 do_ld16_mmio_beN(CPUState *cpu, CPUTLBEntryFull *full,
section = io_prepare(&mr_offset, cpu, full->xlat_section, attrs, addr, ra);
mr = section->mr;

qemu_mutex_lock_iothread();
bql_lock();
a = int_ld_mmio_beN(cpu, full, ret_be, addr, size - 8, mmu_idx,
MMU_DATA_LOAD, ra, mr, mr_offset);
b = int_ld_mmio_beN(cpu, full, ret_be, addr + size - 8, 8, mmu_idx,
MMU_DATA_LOAD, ra, mr, mr_offset + size - 8);
qemu_mutex_unlock_iothread();
bql_unlock();

return int128_make128(b, a);
}
Expand Down Expand Up @@ -2577,10 +2577,10 @@ static uint64_t do_st_mmio_leN(CPUState *cpu, CPUTLBEntryFull *full,
section = io_prepare(&mr_offset, cpu, full->xlat_section, attrs, addr, ra);
mr = section->mr;

qemu_mutex_lock_iothread();
bql_lock();
ret = int_st_mmio_leN(cpu, full, val_le, addr, size, mmu_idx,
ra, mr, mr_offset);
qemu_mutex_unlock_iothread();
bql_unlock();

return ret;
}
Expand All @@ -2601,12 +2601,12 @@ static uint64_t do_st16_mmio_leN(CPUState *cpu, CPUTLBEntryFull *full,
section = io_prepare(&mr_offset, cpu, full->xlat_section, attrs, addr, ra);
mr = section->mr;

qemu_mutex_lock_iothread();
bql_lock();
int_st_mmio_leN(cpu, full, int128_getlo(val_le), addr, 8,
mmu_idx, ra, mr, mr_offset);
ret = int_st_mmio_leN(cpu, full, int128_gethi(val_le), addr + 8,
size - 8, mmu_idx, ra, mr, mr_offset + 8);
qemu_mutex_unlock_iothread();
bql_unlock();

return ret;
}
Expand Down
4 changes: 2 additions & 2 deletions accel/tcg/tcg-accel-ops-icount.c
Original file line number Diff line number Diff line change
Expand Up @@ -126,9 +126,9 @@ void icount_prepare_for_run(CPUState *cpu, int64_t cpu_budget)
* We're called without the iothread lock, so must take it while
* we're calling timer handlers.
*/
qemu_mutex_lock_iothread();
bql_lock();
icount_notify_aio_contexts();
qemu_mutex_unlock_iothread();
bql_unlock();
}
}

Expand Down
Loading

0 comments on commit 195801d

Please sign in to comment.