Skip to content
This repository has been archived by the owner on Jun 18, 2024. It is now read-only.

Commit

Permalink
Merge pull request #168 from sched-ext/sync-from-scx
Browse files Browse the repository at this point in the history
scx_flatcg: Keep cgroup rb nodes stashed
  • Loading branch information
htejun authored Mar 28, 2024
2 parents 27688fa + 79f18e5 commit e227931
Show file tree
Hide file tree
Showing 2 changed files with 11 additions and 18 deletions.
3 changes: 3 additions & 0 deletions tools/sched_ext/include/scx/common.bpf.h
Original file line number Diff line number Diff line change
Expand Up @@ -224,6 +224,9 @@ int bpf_rbtree_add_impl(struct bpf_rb_root *root, struct bpf_rb_node *node,

struct bpf_rb_node *bpf_rbtree_first(struct bpf_rb_root *root) __ksym;

extern void *bpf_refcount_acquire_impl(void *kptr, void *meta) __ksym;
#define bpf_refcount_acquire(kptr) bpf_refcount_acquire_impl(kptr, NULL)

/* task */
struct task_struct *bpf_task_from_pid(s32 pid) __ksym;
struct task_struct *bpf_task_acquire(struct task_struct *p) __ksym;
Expand Down
26 changes: 8 additions & 18 deletions tools/sched_ext/scx_flatcg.bpf.c
Original file line number Diff line number Diff line change
Expand Up @@ -99,6 +99,7 @@ struct cgv_node {
struct bpf_rb_node rb_node;
__u64 cvtime;
__u64 cgid;
struct bpf_refcount refcount;
};

private(CGV_TREE) struct bpf_spin_lock cgv_tree_lock;
Expand Down Expand Up @@ -288,14 +289,17 @@ static void cgrp_enqueued(struct cgroup *cgrp, struct fcg_cgrp_ctx *cgc)
}

stash = bpf_map_lookup_elem(&cgv_node_stash, &cgid);
if (!stash) {
if (!stash || !stash->node) {
scx_bpf_error("cgv_node lookup failed for cgid %llu", cgid);
return;
}

/* NULL if the node is already on the rbtree */
cgv_node = bpf_kptr_xchg(&stash->node, NULL);
cgv_node = bpf_refcount_acquire(stash->node);
if (!cgv_node) {
/*
* Node never leaves cgv_node_stash, this should only happen if
* fcg_cgroup_exit deletes the stashed node
*/
stat_inc(FCG_STAT_ENQ_RACE);
return;
}
Expand Down Expand Up @@ -608,7 +612,6 @@ void BPF_STRUCT_OPS(fcg_cgroup_set_weight, struct cgroup *cgrp, u32 weight)
static bool try_pick_next_cgroup(u64 *cgidp)
{
struct bpf_rb_node *rb_node;
struct cgv_node_stash *stash;
struct cgv_node *cgv_node;
struct fcg_cgrp_ctx *cgc;
struct cgroup *cgrp;
Expand Down Expand Up @@ -692,12 +695,6 @@ static bool try_pick_next_cgroup(u64 *cgidp)
return true;

out_stash:
stash = bpf_map_lookup_elem(&cgv_node_stash, &cgid);
if (!stash) {
stat_inc(FCG_STAT_PNC_GONE);
goto out_free;
}

/*
* Paired with cmpxchg in cgrp_enqueued(). If they see the following
* transition, they'll enqueue the cgroup. If they are earlier, we'll
Expand All @@ -710,16 +707,9 @@ static bool try_pick_next_cgroup(u64 *cgidp)
bpf_rbtree_add(&cgv_tree, &cgv_node->rb_node, cgv_node_less);
bpf_spin_unlock(&cgv_tree_lock);
stat_inc(FCG_STAT_PNC_RACE);
} else {
cgv_node = bpf_kptr_xchg(&stash->node, cgv_node);
if (cgv_node) {
scx_bpf_error("unexpected !NULL cgv_node stash");
goto out_free;
}
return false;
}

return false;

out_free:
bpf_obj_drop(cgv_node);
return false;
Expand Down

0 comments on commit e227931

Please sign in to comment.