Skip to content
This repository has been archived by the owner on Jun 18, 2024. It is now read-only.

Commit

Permalink
tools/sched_ext: Sync from scx repo (d7677e3e5cc5f556af6ef23698901057…
Browse files Browse the repository at this point in the history
…7fa17dad)
  • Loading branch information
htejun committed Jun 15, 2024
1 parent caa7501 commit c1ff517
Show file tree
Hide file tree
Showing 4 changed files with 89 additions and 26 deletions.
86 changes: 74 additions & 12 deletions tools/sched_ext/include/scx/common.bpf.h
Original file line number Diff line number Diff line change
Expand Up @@ -243,11 +243,69 @@ BPF_PROG(name, ##args)
#define __contains(name, node) __attribute__((btf_decl_tag("contains:" #name ":" #node)))
#define private(name) SEC(".data." #name) __hidden __attribute__((aligned(8)))

/* useful compiler attributes */
#define likely(x) __builtin_expect(!!(x), 1)
#define unlikely(x) __builtin_expect(!!(x), 0)
#define __maybe_unused __attribute__((__unused__))

/*
* bpf_log2 - Compute the base 2 logarithm of a 32-bit exponential value.
* READ/WRITE_ONCE() are from kernel (include/asm-generic/rwonce.h). They
* prevent compiler from caching, redoing or reordering reads or writes.
*/
typedef __u8 __attribute__((__may_alias__)) __u8_alias_t;
typedef __u16 __attribute__((__may_alias__)) __u16_alias_t;
typedef __u32 __attribute__((__may_alias__)) __u32_alias_t;
typedef __u64 __attribute__((__may_alias__)) __u64_alias_t;

static __always_inline void __read_once_size(const volatile void *p, void *res, int size)
{
switch (size) {
case 1: *(__u8_alias_t *) res = *(volatile __u8_alias_t *) p; break;
case 2: *(__u16_alias_t *) res = *(volatile __u16_alias_t *) p; break;
case 4: *(__u32_alias_t *) res = *(volatile __u32_alias_t *) p; break;
case 8: *(__u64_alias_t *) res = *(volatile __u64_alias_t *) p; break;
default:
barrier();
__builtin_memcpy((void *)res, (const void *)p, size);
barrier();
}
}

static __always_inline void __write_once_size(volatile void *p, void *res, int size)
{
switch (size) {
case 1: *(volatile __u8_alias_t *) p = *(__u8_alias_t *) res; break;
case 2: *(volatile __u16_alias_t *) p = *(__u16_alias_t *) res; break;
case 4: *(volatile __u32_alias_t *) p = *(__u32_alias_t *) res; break;
case 8: *(volatile __u64_alias_t *) p = *(__u64_alias_t *) res; break;
default:
barrier();
__builtin_memcpy((void *)p, (const void *)res, size);
barrier();
}
}

#define READ_ONCE(x) \
({ \
union { typeof(x) __val; char __c[1]; } __u = \
{ .__c = { 0 } }; \
__read_once_size(&(x), __u.__c, sizeof(x)); \
__u.__val; \
})

#define WRITE_ONCE(x, val) \
({ \
union { typeof(x) __val; char __c[1]; } __u = \
{ .__val = (val) }; \
__write_once_size(&(x), __u.__c, sizeof(x)); \
__u.__val; \
})

/*
* log2_u32 - Compute the base 2 logarithm of a 32-bit exponential value.
* @v: The value for which we're computing the base 2 logarithm.
*/
static inline u32 bpf_log2(u32 v)
static inline u32 log2_u32(u32 v)
{
u32 r;
u32 shift;
Expand All @@ -261,24 +319,18 @@ static inline u32 bpf_log2(u32 v)
}

/*
* bpf_log2l - Compute the base 2 logarithm of a 64-bit exponential value.
* log2_u64 - Compute the base 2 logarithm of a 64-bit exponential value.
* @v: The value for which we're computing the base 2 logarithm.
*/
static inline u32 bpf_log2l(u64 v)
static inline u32 log2_u64(u64 v)
{
u32 hi = v >> 32;
if (hi)
return bpf_log2(hi) + 32 + 1;
return log2_u32(hi) + 32 + 1;
else
return bpf_log2(v) + 1;
return log2_u32(v) + 1;
}

/* useful compiler attributes */
#define likely(x) __builtin_expect(!!(x), 1)
#define unlikely(x) __builtin_expect(!!(x), 0)
#define __maybe_unused __attribute__((__unused__))


void *bpf_obj_new_impl(__u64 local_type_id, void *meta) __ksym;
void bpf_obj_drop_impl(void *kptr, void *meta) __ksym;

Expand Down Expand Up @@ -311,6 +363,16 @@ struct cgroup *bpf_cgroup_ancestor(struct cgroup *cgrp, int level) __ksym;
void bpf_cgroup_release(struct cgroup *cgrp) __ksym;
struct cgroup *bpf_cgroup_from_id(u64 cgid) __ksym;

/* css iteration */
struct bpf_iter_css;
struct cgroup_subsys_state;
extern int bpf_iter_css_new(struct bpf_iter_css *it,
struct cgroup_subsys_state *start,
unsigned int flags) __weak __ksym;
extern struct cgroup_subsys_state *
bpf_iter_css_next(struct bpf_iter_css *it) __weak __ksym;
extern void bpf_iter_css_destroy(struct bpf_iter_css *it) __weak __ksym;

/* cpumask */
struct bpf_cpumask *bpf_cpumask_create(void) __ksym;
struct bpf_cpumask *bpf_cpumask_acquire(struct bpf_cpumask *cpumask) __ksym;
Expand Down
17 changes: 9 additions & 8 deletions tools/sched_ext/include/scx/common.h
Original file line number Diff line number Diff line change
Expand Up @@ -44,7 +44,8 @@ typedef int64_t s64;

/**
* RESIZE_ARRAY - Convenience macro for resizing a BPF array
* @elfsec: the data section of the BPF program in which to the array exists
* @__skel: the skeleton containing the array
* @elfsec: the data section of the BPF program in which the array exists
* @arr: the name of the array
* @n: the desired array element count
*
Expand All @@ -56,13 +57,13 @@ typedef int64_t s64;
* for that custom data section so that it points to the newly memory mapped
* region.
*/
#define RESIZE_ARRAY(elfsec, arr, n) \
do { \
size_t __sz; \
bpf_map__set_value_size(skel->maps.elfsec##_##arr, \
sizeof(skel->elfsec##_##arr->arr[0]) * (n)); \
skel->elfsec##_##arr = \
bpf_map__initial_value(skel->maps.elfsec##_##arr, &__sz); \
#define RESIZE_ARRAY(__skel, elfsec, arr, n) \
do { \
size_t __sz; \
bpf_map__set_value_size((__skel)->maps.elfsec##_##arr, \
sizeof((__skel)->elfsec##_##arr->arr[0]) * (n)); \
(__skel)->elfsec##_##arr = \
bpf_map__initial_value((__skel)->maps.elfsec##_##arr, &__sz); \
} while (0)

#include "user_exit_info.h"
Expand Down
8 changes: 4 additions & 4 deletions tools/sched_ext/include/scx/user_exit_info.h
Original file line number Diff line number Diff line change
Expand Up @@ -53,10 +53,10 @@ struct user_exit_info {
#include <stdbool.h>

/* no need to call the following explicitly if SCX_OPS_LOAD() is used */
#define UEI_SET_SIZE(__skel, __ops_name, __uei_name) ({ \
u32 __len = (__skel)->struct_ops.__ops_name->exit_dump_len ?: UEI_DUMP_DFL_LEN; \
(__skel)->rodata->__uei_name##_dump_len = __len; \
RESIZE_ARRAY(data, __uei_name##_dump, __len); \
#define UEI_SET_SIZE(__skel, __ops_name, __uei_name) ({ \
u32 __len = (__skel)->struct_ops.__ops_name->exit_dump_len ?: UEI_DUMP_DFL_LEN; \
(__skel)->rodata->__uei_name##_dump_len = __len; \
RESIZE_ARRAY((__skel), data, __uei_name##_dump, __len); \
})

#define UEI_EXITED(__skel, __uei_name) ({ \
Expand Down
4 changes: 2 additions & 2 deletions tools/sched_ext/scx_central.c
Original file line number Diff line number Diff line change
Expand Up @@ -77,8 +77,8 @@ int main(int argc, char **argv)
}

/* Resize arrays so their element count is equal to cpu count. */
RESIZE_ARRAY(data, cpu_gimme_task, skel->rodata->nr_cpu_ids);
RESIZE_ARRAY(data, cpu_started_at, skel->rodata->nr_cpu_ids);
RESIZE_ARRAY(skel, data, cpu_gimme_task, skel->rodata->nr_cpu_ids);
RESIZE_ARRAY(skel, data, cpu_started_at, skel->rodata->nr_cpu_ids);

SCX_OPS_LOAD(skel, central_ops, scx_central, uei);

Expand Down

0 comments on commit c1ff517

Please sign in to comment.