Skip to content
This repository has been archived by the owner on Jun 18, 2024. It is now read-only.

scx: Cosmetic changes from patch split #187

Merged
merged 1 commit into from
Apr 25, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
36 changes: 17 additions & 19 deletions kernel/sched/ext.c
Original file line number Diff line number Diff line change
Expand Up @@ -121,10 +121,10 @@ enum scx_ops_flags {
*/
SCX_OPS_CGROUP_KNOB_WEIGHT = 1LLU << 16, /* cpu.weight */

SCX_OPS_ALL_FLAGS = SCX_OPS_SWITCH_PARTIAL |
SCX_OPS_KEEP_BUILTIN_IDLE |
SCX_OPS_ALL_FLAGS = SCX_OPS_KEEP_BUILTIN_IDLE |
SCX_OPS_ENQ_LAST |
SCX_OPS_ENQ_EXITING |
SCX_OPS_SWITCH_PARTIAL |
SCX_OPS_CGROUP_KNOB_WEIGHT,
};

Expand Down Expand Up @@ -155,13 +155,13 @@ struct scx_cgroup_init_args {

enum scx_cpu_preempt_reason {
/* next task is being scheduled by &sched_class_rt */
SCX_CPU_PREEMPT_RT,
SCX_CPU_PREEMPT_RT,
/* next task is being scheduled by &sched_class_dl */
SCX_CPU_PREEMPT_DL,
SCX_CPU_PREEMPT_DL,
/* next task is being scheduled by &sched_class_stop */
SCX_CPU_PREEMPT_STOP,
SCX_CPU_PREEMPT_STOP,
/* unknown reason for SCX being preempted */
SCX_CPU_PREEMPT_UNKNOWN,
SCX_CPU_PREEMPT_UNKNOWN,
};

/*
Expand Down Expand Up @@ -920,11 +920,11 @@ static __printf(3, 4) void scx_ops_exit_kind(enum scx_exit_kind kind,
s64 exit_code,
const char *fmt, ...);

#define scx_ops_error_kind(__err, fmt, args...) \
scx_ops_exit_kind(__err, 0, fmt, ##args)
#define scx_ops_error_kind(err, fmt, args...) \
scx_ops_exit_kind((err), 0, fmt, ##args)

#define scx_ops_exit(__code, fmt, args...) \
scx_ops_exit_kind(SCX_EXIT_UNREG_KERN, __code, fmt, ##args)
#define scx_ops_exit(code, fmt, args...) \
scx_ops_exit_kind(SCX_EXIT_UNREG_KERN, (code), fmt, ##args)

#define scx_ops_error(fmt, args...) \
scx_ops_error_kind(SCX_EXIT_ERROR, fmt, ##args)
Expand Down Expand Up @@ -1537,7 +1537,7 @@ static void dispatch_enqueue(struct scx_dispatch_q *dsq, struct task_struct *p,
list_add_tail(&p->scx.dsq_node.list, &dsq->list);
}

/* seq records the order tasks are queued, used by BPF iterations */
/* seq records the order tasks are queued, used by BPF DSQ iterator */
dsq->seq++;
p->scx.dsq_seq = dsq->seq;

Expand Down Expand Up @@ -4561,15 +4561,15 @@ static __printf(3, 4) void scx_ops_exit_kind(enum scx_exit_kind kind,
if (!atomic_try_cmpxchg(&scx_exit_kind, &none, kind))
return;

ei->exit_code = exit_code;

if (kind >= SCX_EXIT_ERROR)
ei->bt_len = stack_trace_save(ei->bt, SCX_EXIT_BT_LEN, 1);

va_start(args, fmt);
vscnprintf(ei->msg, SCX_EXIT_MSG_LEN, fmt, args);
va_end(args);

ei->exit_code = exit_code;

/*
* Set ei->kind and ->reason for scx_dump_state(). They'll be set again
* in scx_ops_disable_workfn().
Expand Down Expand Up @@ -6152,7 +6152,6 @@ static void bpf_exit_bstr_common(enum scx_exit_kind kind, s64 exit_code,
unsigned long flags;
int ret;

BUILD_BUG_ON(sizeof(enum scx_exit_code) != sizeof_field(struct scx_exit_info, exit_code));
local_irq_save(flags);
bufs = this_cpu_ptr(&scx_bpf_error_bstr_bufs);

Expand Down Expand Up @@ -6389,11 +6388,10 @@ __bpf_kfunc const struct cpumask *scx_bpf_get_idle_smtmask(void)
__bpf_kfunc void scx_bpf_put_idle_cpumask(const struct cpumask *idle_mask)
{
/*
* Empty function body because we aren't actually acquiring or
* releasing a reference to a global idle cpumask, which is read-only
* in the caller and is never released. The acquire / release semantics
* here are just used to make the cpumask a trusted pointer in the
* caller.
* Empty function body because we aren't actually acquiring or releasing
* a reference to a global idle cpumask, which is read-only in the
* caller and is never released. The acquire / release semantics here
* are just used to make the cpumask a trusted pointer in the caller.
*/
}

Expand Down
19 changes: 9 additions & 10 deletions kernel/sched/ext.h
Original file line number Diff line number Diff line change
Expand Up @@ -33,17 +33,17 @@ static inline bool task_on_scx(const struct task_struct *p)
return scx_enabled() && p->sched_class == &ext_sched_class;
}

bool task_should_scx(struct task_struct *p);
void scx_next_task_picked(struct rq *rq, struct task_struct *p,
const struct sched_class *active);
void scx_tick(struct rq *rq);
void init_scx_entity(struct sched_ext_entity *scx);
void scx_pre_fork(struct task_struct *p);
int scx_fork(struct task_struct *p);
void scx_post_fork(struct task_struct *p);
void scx_cancel_fork(struct task_struct *p);
int scx_check_setscheduler(struct task_struct *p, int policy);
bool scx_can_stop_tick(struct rq *rq);
void scx_tick(struct rq *rq);
void scx_next_task_picked(struct rq *rq, struct task_struct *p,
const struct sched_class *active);
bool task_should_scx(struct task_struct *p);
void init_sched_ext_class(void);

static inline u32 scx_cpuperf_target(s32 cpu)
Expand Down Expand Up @@ -88,18 +88,17 @@ bool scx_prio_less(const struct task_struct *a, const struct task_struct *b,
#define scx_enabled() false
#define scx_switched_all() false

static inline bool task_on_scx(const struct task_struct *p) { return false; }
static inline void scx_next_task_picked(struct rq *rq, struct task_struct *p,
const struct sched_class *active) {}
static inline void scx_tick(struct rq *rq) {}
static inline void init_scx_entity(struct sched_ext_entity *scx) {}
static inline void scx_pre_fork(struct task_struct *p) {}
static inline int scx_fork(struct task_struct *p) { return 0; }
static inline void scx_post_fork(struct task_struct *p) {}
static inline void scx_cancel_fork(struct task_struct *p) {}
static inline int scx_check_setscheduler(struct task_struct *p,
int policy) { return 0; }
static inline int scx_check_setscheduler(struct task_struct *p, int policy) { return 0; }
static inline bool scx_can_stop_tick(struct rq *rq) { return true; }
static inline void scx_tick(struct rq *rq) {}
static inline void scx_next_task_picked(struct rq *rq, struct task_struct *p,
const struct sched_class *active) {}
static inline bool task_on_scx(const struct task_struct *p) { return false; }
static inline void init_sched_ext_class(void) {}
static inline u32 scx_cpuperf_target(s32 cpu) { return 0; }

Expand Down
16 changes: 8 additions & 8 deletions tools/sched_ext/include/scx/common.bpf.h
Original file line number Diff line number Diff line change
Expand Up @@ -62,8 +62,11 @@ bool scx_bpf_task_running(const struct task_struct *p) __ksym;
s32 scx_bpf_task_cpu(const struct task_struct *p) __ksym;
struct cgroup *scx_bpf_task_cgroup(struct task_struct *p) __ksym;

static inline __attribute__((format(printf, 1, 2)))
void ___scx_bpf_exit_format_checker(const char *fmt, ...) {}
/*
* Use the following as @it when calling scx_bpf_consume_task() from whitin
* bpf_for_each() loops.
*/
#define BPF_FOR_EACH_ITER (&___it)

/* hopefully temporary wrapper to work around BPF restriction */
static inline bool scx_bpf_consume_task(struct bpf_iter_scx_dsq *it,
Expand All @@ -74,11 +77,8 @@ static inline bool scx_bpf_consume_task(struct bpf_iter_scx_dsq *it,
return __scx_bpf_consume_task(ptr, p);
}

/*
* Use the following as @it when calling scx_bpf_consume_task() from whitin
* bpf_for_each() loops.
*/
#define BPF_FOR_EACH_ITER (&___it)
static inline __attribute__((format(printf, 1, 2)))
void ___scx_bpf_exit_format_checker(const char *fmt, ...) {}

/*
* Helper macro for initializing the fmt and variadic argument inputs to both
Expand Down Expand Up @@ -250,7 +250,7 @@ int bpf_rbtree_add_impl(struct bpf_rb_root *root, struct bpf_rb_node *node,

struct bpf_rb_node *bpf_rbtree_first(struct bpf_rb_root *root) __ksym;

extern void *bpf_refcount_acquire_impl(void *kptr, void *meta) __ksym;
void *bpf_refcount_acquire_impl(void *kptr, void *meta) __ksym;
#define bpf_refcount_acquire(kptr) bpf_refcount_acquire_impl(kptr, NULL)

/* task */
Expand Down
4 changes: 2 additions & 2 deletions tools/sched_ext/include/scx/compat.h
Original file line number Diff line number Diff line change
Expand Up @@ -143,7 +143,7 @@ static inline long scx_hotplug_seq(void)
* o If nonzero and running on an older kernel, the value is set to zero
* and a warning is emitted
*
* - sched_ext_ops.hotplug_sqn
* - sched_ext_ops.hotplug_seq
* o If nonzero and running on an older kernel, the scheduler will fail to
* load
*/
Expand All @@ -163,7 +163,7 @@ static inline long scx_hotplug_seq(void)
if (__COMPAT_struct_has_field("sched_ext_ops", "exit_dump_len") && \
(__skel)->struct_ops.__ops_name->exit_dump_len) { \
fprintf(stderr, "WARNING: kernel doesn't support setting exit dump len\n"); \
(__skel)->struct_ops.__ops_name->exit_dump_len = 0; \
(__skel)->struct_ops.__ops_name->exit_dump_len = 0; \
} \
SCX_BUG_ON(__scx_name##__load((__skel)), "Failed to load skel"); \
})
Expand Down
3 changes: 1 addition & 2 deletions tools/sched_ext/scx_central.c
Original file line number Diff line number Diff line change
Expand Up @@ -46,8 +46,7 @@ int main(int argc, char **argv)

libbpf_set_strict_mode(LIBBPF_STRICT_ALL);

skel = scx_central__open();
SCX_BUG_ON(!skel, "Failed to open skel");
skel = SCX_OPS_OPEN(central_ops, scx_central);

skel->rodata->central_cpu = 0;
skel->rodata->nr_cpu_ids = libbpf_num_possible_cpus();
Expand Down
3 changes: 1 addition & 2 deletions tools/sched_ext/scx_flatcg.c
Original file line number Diff line number Diff line change
Expand Up @@ -125,8 +125,7 @@ int main(int argc, char **argv)

libbpf_set_strict_mode(LIBBPF_STRICT_ALL);

skel = scx_flatcg__open();
SCX_BUG_ON(!skel, "Failed to open skel");
skel = SCX_OPS_OPEN(flatcg_ops, scx_flatcg);

skel->rodata->nr_cpus = libbpf_num_possible_cpus();

Expand Down
4 changes: 2 additions & 2 deletions tools/sched_ext/scx_qmap.bpf.c
Original file line number Diff line number Diff line change
Expand Up @@ -266,7 +266,6 @@ static bool consume_shared_dsq(void)
{
struct task_struct *p;
bool consumed;
s32 i;

if (exp_prefix[0] == '\0')
return scx_bpf_consume(SHARED_DSQ);
Expand All @@ -283,7 +282,8 @@ static bool consume_shared_dsq(void)

memcpy(comm, p->comm, sizeof(exp_prefix) - 1);

if (!bpf_strncmp(comm, sizeof(exp_prefix), exp_prefix) &&
if (!bpf_strncmp(comm, sizeof(exp_prefix),
(const char *)exp_prefix) &&
scx_bpf_consume_task(BPF_FOR_EACH_ITER, p)) {
consumed = true;
__sync_fetch_and_add(&nr_expedited, 1);
Expand Down
7 changes: 3 additions & 4 deletions tools/sched_ext/scx_qmap.c
Original file line number Diff line number Diff line change
Expand Up @@ -19,8 +19,8 @@ const char help_fmt[] =
"\n"
"See the top-level comment in .bpf.c for more details.\n"
"\n"
"Usage: %s [-s SLICE_US] [-e COUNT] [-t COUNT] [-T COUNT] [-l COUNT] [-d PID]\n"
" [-D LEN] [-p]\n"
"Usage: %s [-s SLICE_US] [-e COUNT] [-t COUNT] [-T COUNT] [-l COUNT] [-b COUNT]\n"
" [-P] [-E PREFIX] [-d PID] [-D LEN] [-p]\n"
"\n"
" -s SLICE_US Override slice duration\n"
" -e COUNT Trigger scx_bpf_error() after COUNT enqueues\n"
Expand Down Expand Up @@ -54,8 +54,7 @@ int main(int argc, char **argv)

libbpf_set_strict_mode(LIBBPF_STRICT_ALL);

skel = scx_qmap__open();
SCX_BUG_ON(!skel, "Failed to open skel");
skel = SCX_OPS_OPEN(qmap_ops, scx_qmap);

while ((opt = getopt(argc, argv, "s:e:t:T:l:b:PE:d:D:ph")) != -1) {
switch (opt) {
Expand Down
3 changes: 1 addition & 2 deletions tools/sched_ext/scx_simple.c
Original file line number Diff line number Diff line change
Expand Up @@ -60,8 +60,7 @@ int main(int argc, char **argv)

libbpf_set_strict_mode(LIBBPF_STRICT_ALL);

skel = scx_simple__open();
SCX_BUG_ON(!skel, "Failed to open skel");
skel = SCX_OPS_OPEN(simple_ops, scx_simple);

while ((opt = getopt(argc, argv, "fh")) != -1) {
switch (opt) {
Expand Down