You signed in with another tab or window. Reload to refresh your session.You signed out in another tab or window. Reload to refresh your session.You switched accounts on another tab or window. Reload to refresh your session.Dismiss alert
/* * The externally visible workqueue. It relays the issued work items to * the appropriate worker_pool through its pool_workqueues. */structworkqueue_struct {
structlist_headpwqs; /* WR: all pwqs of this wq */structlist_headlist; /* PR: list of all workqueues */
...
structpool_workqueue__percpu*cpu_pwqs; /* I: per-cpu pwqs */
...
};
工作队列池 struct pool_workqueue
/* * The per-pool workqueue. While queued, the lower WORK_STRUCT_FLAG_BITS * of work_struct->data are used for flags and the remaining high bits * point to the pwq; thus, pwqs need to be aligned at two's power of the * number of flag bits. */structpool_workqueue {
structworker_pool*pool; /* I: the associated pool */structworkqueue_struct*wq; /* I: the owning workqueue */
...
structlist_headpwqs_node; /* WR: node on wq->pwqs */
...
} __aligned(1 << WORK_STRUCT_FLAG_BITS);
工作者池 struct worker_pool
structworker_pool {
spinlock_tlock; /* the pool lock */intcpu; /* I: the associated cpu */intnode; /* I: the associated node ID */intid; /* I: pool ID */
...
structlist_headworklist; /* L: list of pending works */intnr_workers; /* L: total number of workers */
...
/* see manage_workers() for details on the two manager mutexes */structmutexmanager_arb; /* manager arbitration */structworker*manager; /* L: purely informational */structmutexattach_mutex; /* attach/detach exclusion */structlist_headworkers; /* A: attached workers */
...
intrefcnt; /* PL: refcnt for unbound pools *//* * The current concurrency level. As it's likely to be accessed * from other CPUs during try_to_wake_up(), put it in a separate * cacheline. */atomic_tnr_running____cacheline_aligned_in_smp;
...
} ____cacheline_aligned_in_smp;
工作者 struct worker
/* * The poor guys doing the actual heavy lifting. All on-duty workers are * either serving the manager role, on idle list or on busy hash. For * details on the locking annotation (L, I, X...), refer to workqueue.c. * * Only to be used in workqueue and async. */structworker {
/* on idle list while idle, on busy hash table while busy */union {
structlist_headentry; /* L: while idle */structhlist_nodehentry; /* L: while busy */
};
structwork_struct*current_work; /* K: work being processed and its */work_func_tcurrent_func; /* K: function */structpool_workqueue*current_pwq; /* K: pwq */u64current_at; /* K: runtime at start or last wakeup */unsigned intcurrent_color; /* K: color */intsleeping; /* S: is worker sleeping? *//* used by the scheduler to determine a worker's last known identity */work_func_tlast_func; /* K: last work's fn */structlist_headscheduled; /* L: scheduled works */structtask_struct*task; /* I: worker task */structworker_pool*pool; /* A: the associated pool *//* L: for rescuers */structlist_headnode; /* A: anchored at pool->workers *//* A: runs through worker->node */unsigned longlast_active; /* K: last active timestamp */unsigned intflags; /* L: flags */intid; /* I: worker id *//* * Opaque string set with work_set_desc(). Printed out with task * dump for debugging - WARN, BUG, panic or sysrq. */chardesc[WORKER_DESC_LEN];
/* used only by rescuers to point to the target workqueue */structworkqueue_struct*rescue_wq; /* I: the workqueue to rescue */
};