From 2e5e2537b71d5a2e3b7191205cb2359b4e6adf37 Mon Sep 17 00:00:00 2001 From: Godones <1925466036@qq.com> Date: Wed, 4 Sep 2024 20:27:30 +0800 Subject: [PATCH 01/10] feat: add ebpf support add basic bpf map type. add basic perf type. update kprobe impl. add user app example. --- .github/workflows/cache-toolchain.yml | 2 +- kernel/Cargo.toml | 2 + kernel/crates/kprobe/src/arch/mod.rs | 164 +- kernel/crates/rbpf/src/disassembler.rs | 2 +- kernel/crates/rbpf/src/interpreter.rs | 1 + kernel/crates/rbpf/src/lib.rs | 103 +- kernel/src/arch/riscv64/kprobe.rs | 76 + kernel/src/arch/x86_64/kprobe.rs | 54 + kernel/src/bpf/helper/mod.rs | 162 ++ kernel/src/bpf/helper/print.rs | 43 + kernel/src/bpf/map/array_map.rs | 271 ++ kernel/src/bpf/map/hash_map.rs | 85 + kernel/src/bpf/map/mod.rs | 313 +++ kernel/src/bpf/map/util.rs | 86 + kernel/src/bpf/mod.rs | 45 + kernel/src/bpf/prog/mod.rs | 104 + kernel/src/bpf/prog/util.rs | 112 + kernel/src/bpf/prog/verifier.rs | 125 + kernel/src/debug/kprobe/args.rs | 29 +- kernel/src/debug/kprobe/mod.rs | 133 +- kernel/src/debug/kprobe/test.rs | 7 +- kernel/src/exception/debug.rs | 10 +- kernel/src/exception/ebreak.rs | 9 +- kernel/src/filesystem/vfs/file.rs | 9 +- kernel/src/filesystem/vfs/mod.rs | 3 + kernel/src/include/bindings/linux_bpf.rs | 2430 +++++++++++++++++ kernel/src/include/bindings/mod.rs | 10 +- kernel/src/init/init.rs | 2 +- kernel/src/lib.rs | 4 +- kernel/src/mm/syscall.rs | 66 +- kernel/src/mm/ucontext.rs | 18 + kernel/src/perf/bpf.rs | 294 ++ kernel/src/perf/kprobe.rs | 110 + kernel/src/perf/mod.rs | 290 ++ kernel/src/perf/util.rs | 71 + kernel/src/syscall/mod.rs | 14 + kernel/src/time/syscall.rs | 4 +- user/apps/syscall_ebpf/.cargo/config.toml | 2 + user/apps/syscall_ebpf/.dir-locals.el | 1 + user/apps/syscall_ebpf/.gitignore | 9 + user/apps/syscall_ebpf/.vim/coc-settings.json | 3 + user/apps/syscall_ebpf/.vscode/settings.json | 3 + user/apps/syscall_ebpf/Cargo.toml | 3 + user/apps/syscall_ebpf/README.md | 32 + .../syscall_ebpf-common/Cargo.toml | 14 + .../syscall_ebpf-common/src/lib.rs | 1 + .../syscall_ebpf-ebpf/.cargo/config.toml | 6 + .../syscall_ebpf-ebpf/.helix/config.toml | 2 + .../syscall_ebpf-ebpf/.vim/coc-settings.json | 4 + .../syscall_ebpf-ebpf/.vscode/settings.json | 4 + .../syscall_ebpf/syscall_ebpf-ebpf/Cargo.toml | 33 + .../syscall_ebpf-ebpf/rust-toolchain.toml | 13 + .../syscall_ebpf-ebpf/src/main.rs | 44 + .../apps/syscall_ebpf/syscall_ebpf/Cargo.toml | 19 + .../syscall_ebpf/syscall_ebpf/src/main.rs | 47 + user/apps/syscall_ebpf/xtask/Cargo.toml | 8 + user/apps/syscall_ebpf/xtask/src/build.rs | 42 + .../apps/syscall_ebpf/xtask/src/build_ebpf.rs | 67 + user/apps/syscall_ebpf/xtask/src/main.rs | 36 + user/apps/syscall_ebpf/xtask/src/run.rs | 55 + user/apps/test_ebpf/.gitignore | 3 + user/apps/test_ebpf/Cargo.toml | 16 + user/apps/test_ebpf/Makefile | 61 + user/apps/test_ebpf/src/main.rs | 60 + user/dadk/config/test_ebpf_0_1_0.dadk | 23 + 65 files changed, 5695 insertions(+), 179 deletions(-) create mode 100644 kernel/src/bpf/helper/mod.rs create mode 100644 kernel/src/bpf/helper/print.rs create mode 100644 kernel/src/bpf/map/array_map.rs create mode 100644 kernel/src/bpf/map/hash_map.rs create mode 100644 kernel/src/bpf/map/mod.rs create mode 100644 kernel/src/bpf/map/util.rs create mode 100644 kernel/src/bpf/mod.rs create mode 100644 kernel/src/bpf/prog/mod.rs create mode 100644 kernel/src/bpf/prog/util.rs create mode 100644 kernel/src/bpf/prog/verifier.rs create mode 100644 kernel/src/include/bindings/linux_bpf.rs create mode 100644 kernel/src/perf/bpf.rs create mode 100644 kernel/src/perf/kprobe.rs create mode 100644 kernel/src/perf/mod.rs create mode 100644 kernel/src/perf/util.rs create mode 100644 user/apps/syscall_ebpf/.cargo/config.toml create mode 100644 user/apps/syscall_ebpf/.dir-locals.el create mode 100644 user/apps/syscall_ebpf/.gitignore create mode 100644 user/apps/syscall_ebpf/.vim/coc-settings.json create mode 100644 user/apps/syscall_ebpf/.vscode/settings.json create mode 100644 user/apps/syscall_ebpf/Cargo.toml create mode 100644 user/apps/syscall_ebpf/README.md create mode 100644 user/apps/syscall_ebpf/syscall_ebpf-common/Cargo.toml create mode 100644 user/apps/syscall_ebpf/syscall_ebpf-common/src/lib.rs create mode 100644 user/apps/syscall_ebpf/syscall_ebpf-ebpf/.cargo/config.toml create mode 100644 user/apps/syscall_ebpf/syscall_ebpf-ebpf/.helix/config.toml create mode 100644 user/apps/syscall_ebpf/syscall_ebpf-ebpf/.vim/coc-settings.json create mode 100644 user/apps/syscall_ebpf/syscall_ebpf-ebpf/.vscode/settings.json create mode 100644 user/apps/syscall_ebpf/syscall_ebpf-ebpf/Cargo.toml create mode 100644 user/apps/syscall_ebpf/syscall_ebpf-ebpf/rust-toolchain.toml create mode 100644 user/apps/syscall_ebpf/syscall_ebpf-ebpf/src/main.rs create mode 100644 user/apps/syscall_ebpf/syscall_ebpf/Cargo.toml create mode 100644 user/apps/syscall_ebpf/syscall_ebpf/src/main.rs create mode 100644 user/apps/syscall_ebpf/xtask/Cargo.toml create mode 100644 user/apps/syscall_ebpf/xtask/src/build.rs create mode 100644 user/apps/syscall_ebpf/xtask/src/build_ebpf.rs create mode 100644 user/apps/syscall_ebpf/xtask/src/main.rs create mode 100644 user/apps/syscall_ebpf/xtask/src/run.rs create mode 100644 user/apps/test_ebpf/.gitignore create mode 100644 user/apps/test_ebpf/Cargo.toml create mode 100644 user/apps/test_ebpf/Makefile create mode 100644 user/apps/test_ebpf/src/main.rs create mode 100644 user/dadk/config/test_ebpf_0_1_0.dadk diff --git a/.github/workflows/cache-toolchain.yml b/.github/workflows/cache-toolchain.yml index 94519c834..4b6099c54 100644 --- a/.github/workflows/cache-toolchain.yml +++ b/.github/workflows/cache-toolchain.yml @@ -89,6 +89,6 @@ jobs: rustup target add x86_64-unknown-linux-musl --toolchain nightly-2024-07-23-x86_64-unknown-linux-gnu rustup component add rust-src --toolchain nightly-2024-07-23-x86_64-unknown-linux-gnu - + cargo install bpf-linker diff --git a/kernel/Cargo.toml b/kernel/Cargo.toml index 24129dc41..8b9ee80d0 100644 --- a/kernel/Cargo.toml +++ b/kernel/Cargo.toml @@ -60,6 +60,8 @@ paste = "=1.0.14" slabmalloc = { path = "crates/rust-slabmalloc" } log = "0.4.21" kprobe = { path = "crates/kprobe" } +rbpf = { path = "crates/rbpf" , default-features = false } +printf-compat = { version = "0.1.1", default-features = false } # target为x86_64时,使用下面的依赖 [target.'cfg(target_arch = "x86_64")'.dependencies] diff --git a/kernel/crates/kprobe/src/arch/mod.rs b/kernel/crates/kprobe/src/arch/mod.rs index fe25d5d84..27abd5593 100644 --- a/kernel/crates/kprobe/src/arch/mod.rs +++ b/kernel/crates/kprobe/src/arch/mod.rs @@ -1,7 +1,6 @@ -use alloc::collections::BTreeMap; +use alloc::boxed::Box; use alloc::string::String; use alloc::sync::Arc; -use alloc::vec::Vec; use core::{any::Any, fmt::Debug}; #[cfg(target_arch = "loongarch64")] @@ -76,7 +75,13 @@ pub struct KprobeBuilder { pre_handler: ProbeHandler, post_handler: ProbeHandler, fault_handler: Option, + event_callback: Option>, probe_point: Option>, + enable: bool, +} + +pub trait EventCallback: Send { + fn call(&self, trap_frame: &dyn ProbeArgs); } impl KprobeBuilder { @@ -86,6 +91,7 @@ impl KprobeBuilder { offset: usize, pre_handler: fn(&dyn ProbeArgs), post_handler: fn(&dyn ProbeArgs), + enable: bool, ) -> Self { KprobeBuilder { symbol, @@ -93,8 +99,10 @@ impl KprobeBuilder { offset, pre_handler: ProbeHandler::new(pre_handler), post_handler: ProbeHandler::new(post_handler), + event_callback: None, fault_handler: None, probe_point: None, + enable, } } @@ -108,6 +116,11 @@ impl KprobeBuilder { self } + pub fn with_event_callback(mut self, event_callback: Box) -> Self { + self.event_callback = Some(event_callback); + self + } + /// 获取探测点的地址 /// /// 探测点的地址 == break指令的地址 @@ -123,6 +136,12 @@ pub struct KprobeBasic { pre_handler: ProbeHandler, post_handler: ProbeHandler, fault_handler: ProbeHandler, + event_callback: Option>, + enable: bool, +} + +pub trait CallBackFunc: Send + Sync { + fn call(&self, trap_frame: &dyn ProbeArgs); } impl Debug for KprobeBasic { @@ -148,6 +167,27 @@ impl KprobeBasic { self.fault_handler.call(trap_frame); } + pub fn call_event_callback(&self, trap_frame: &dyn ProbeArgs) { + if let Some(ref call_back) = self.event_callback { + call_back.call(trap_frame); + } + } + + pub fn update_event_callback(&mut self, callback: Box) { + self.event_callback = Some(callback); + } + + pub fn disable(&mut self) { + self.enable = false; + } + + pub fn enable(&mut self) { + self.enable = true; + } + + pub fn is_enabled(&self) -> bool { + self.enable + } /// 返回探测点的函数名称 pub fn symbol(&self) -> Option<&str> { self.symbol.as_deref() @@ -163,125 +203,9 @@ impl From for KprobeBasic { offset: value.offset, pre_handler: value.pre_handler, post_handler: value.post_handler, + event_callback: value.event_callback, fault_handler, - } - } -} - -/// 管理所有的kprobe探测点 -#[derive(Debug, Default)] -pub struct KprobeManager { - break_list: BTreeMap>>, - debug_list: BTreeMap>>, -} - -impl KprobeManager { - pub const fn new() -> Self { - KprobeManager { - break_list: BTreeMap::new(), - debug_list: BTreeMap::new(), - } - } - /// # 插入一个kprobe - /// - /// ## 参数 - /// - `kprobe`: kprobe的实例 - pub fn insert_kprobe(&mut self, kprobe: Arc) { - let probe_point = kprobe.probe_point(); - self.insert_break_point(probe_point.break_address(), kprobe.clone()); - self.insert_debug_point(probe_point.debug_address(), kprobe); - } - - /// # 向break_list中插入一个kprobe - /// - /// ## 参数 - /// - `address`: kprobe的地址, 由`KprobePoint::break_address()`或者`KprobeBuilder::probe_addr()`返回 - /// - `kprobe`: kprobe的实例 - fn insert_break_point(&mut self, address: usize, kprobe: Arc) { - let list = self.break_list.entry(address).or_default(); - list.push(kprobe); - } - - /// # 向debug_list中插入一个kprobe - /// - /// ## 参数 - /// - `address`: kprobe的单步执行地址,由`KprobePoint::debug_address()`返回 - /// - `kprobe`: kprobe的实例 - fn insert_debug_point(&mut self, address: usize, kprobe: Arc) { - let list = self.debug_list.entry(address).or_default(); - list.push(kprobe); - } - - pub fn get_break_list(&self, address: usize) -> Option<&Vec>> { - self.break_list.get(&address) - } - - pub fn get_debug_list(&self, address: usize) -> Option<&Vec>> { - self.debug_list.get(&address) - } - - /// # 返回一个地址上注册的kprobe数量 - /// - /// ## 参数 - /// - `address`: kprobe的地址, 由`KprobePoint::break_address()`或者`KprobeBuilder::probe_addr()`返回 - pub fn kprobe_num(&self, address: usize) -> usize { - self.break_list_len(address) - } - - #[inline] - fn break_list_len(&self, address: usize) -> usize { - self.break_list - .get(&address) - .map(|list| list.len()) - .unwrap_or(0) - } - #[inline] - fn debug_list_len(&self, address: usize) -> usize { - self.debug_list - .get(&address) - .map(|list| list.len()) - .unwrap_or(0) - } - - /// # 移除一个kprobe - /// - /// ## 参数 - /// - `kprobe`: kprobe的实例 - pub fn remove_kprobe(&mut self, kprobe: &Arc) { - let probe_point = kprobe.probe_point(); - self.remove_one_break(probe_point.break_address(), kprobe); - self.remove_one_debug(probe_point.debug_address(), kprobe); - } - - /// # 从break_list中移除一个kprobe - /// - /// 如果没有其他kprobe注册在这个地址上,则删除列表 - /// - /// ## 参数 - /// - `address`: kprobe的地址, 由`KprobePoint::break_address()`或者`KprobeBuilder::probe_addr()`返回 - /// - `kprobe`: kprobe的实例 - fn remove_one_break(&mut self, address: usize, kprobe: &Arc) { - if let Some(list) = self.break_list.get_mut(&address) { - list.retain(|x| !Arc::ptr_eq(x, kprobe)); - } - if self.break_list_len(address) == 0 { - self.break_list.remove(&address); - } - } - - /// # 从debug_list中移除一个kprobe - /// - /// 如果没有其他kprobe注册在这个地址上,则删除列表 - /// - /// ## 参数 - /// - `address`: kprobe的单步执行地址,由`KprobePoint::debug_address()`返回 - /// - `kprobe`: kprobe的实例 - fn remove_one_debug(&mut self, address: usize, kprobe: &Arc) { - if let Some(list) = self.debug_list.get_mut(&address) { - list.retain(|x| !Arc::ptr_eq(x, kprobe)); - } - if self.debug_list_len(address) == 0 { - self.debug_list.remove(&address); + enable: value.enable, } } } diff --git a/kernel/crates/rbpf/src/disassembler.rs b/kernel/crates/rbpf/src/disassembler.rs index 8853a2937..6d6572c5c 100644 --- a/kernel/crates/rbpf/src/disassembler.rs +++ b/kernel/crates/rbpf/src/disassembler.rs @@ -801,7 +801,7 @@ pub fn disassemble(prog: &[u8]) { #[cfg(not(feature = "std"))] { for insn in to_insn_vec(prog) { - info!("{}", insn.desc); + log::info!("{}", insn.desc); } } } diff --git a/kernel/crates/rbpf/src/interpreter.rs b/kernel/crates/rbpf/src/interpreter.rs index 34e64ab37..cb4bddf3c 100644 --- a/kernel/crates/rbpf/src/interpreter.rs +++ b/kernel/crates/rbpf/src/interpreter.rs @@ -13,6 +13,7 @@ use crate::{ }; #[cfg(not(feature = "user"))] +#[allow(unused)] fn check_mem( addr: u64, len: usize, diff --git a/kernel/crates/rbpf/src/lib.rs b/kernel/crates/rbpf/src/lib.rs index 7669eac0d..449268425 100644 --- a/kernel/crates/rbpf/src/lib.rs +++ b/kernel/crates/rbpf/src/lib.rs @@ -29,9 +29,7 @@ // Configures the crate to be `no_std` when `std` feature is disabled. #![cfg_attr(not(feature = "std"), no_std)] extern crate alloc; - use alloc::{collections::BTreeMap, format, vec, vec::Vec}; -use core::ptr; use byteorder::{ByteOrder, LittleEndian}; @@ -509,7 +507,7 @@ impl<'a> EbpfVmMbuff<'a> { // in the kernel; anyway the verifier would prevent the use of uninitialized registers). // See `mul_loop` test. let mem_ptr = match mem.len() { - 0 => ptr::null_mut(), + 0 => core::ptr::null_mut(), _ => mem.as_ptr() as *mut u8, }; @@ -908,7 +906,7 @@ impl<'a> EbpfVmFixedMbuff<'a> { // in the kernel; anyway the verifier would prevent the use of uninitialized registers). // See `mul_loop` test. let mem_ptr = match mem.len() { - 0 => ptr::null_mut(), + 0 => core::ptr::null_mut(), _ => mem.as_ptr() as *mut u8, }; @@ -1010,7 +1008,7 @@ impl<'a> EbpfVmFixedMbuff<'a> { // in the kernel; anyway the verifier would prevent the use of uninitialized registers). // See `mul_loop` test. let mem_ptr = match mem.len() { - 0 => ptr::null_mut(), + 0 => core::ptr::null_mut(), _ => mem.as_ptr() as *mut u8, }; @@ -1689,3 +1687,98 @@ impl<'a> EbpfVmNoData<'a> { self.parent.execute_program_cranelift(&mut []) } } + +/// EbpfVm with Owned data +pub struct EbpfVmRawOwned { + parent: EbpfVmRaw<'static>, + data_len: usize, + data_cap: usize, +} + +impl EbpfVmRawOwned { + /// Create a new virtual machine instance, and load an eBPF program into that instance. + /// When attempting to load the program, it passes through a simple verifier. + pub fn new(prog: Option>) -> Result { + let (prog, data_len, data_cap) = match prog { + Some(prog) => { + let data_len = prog.len(); + let data_cap = prog.capacity(); + let slice = prog.leak(); + let slice = unsafe { core::slice::from_raw_parts(slice.as_ptr(), data_len) }; + (Some(slice), data_len, data_cap) + } + None => (None, 0, 0), + }; + let parent = EbpfVmRaw::new(prog)?; + Ok(Self { + parent, + data_len, + data_cap, + }) + } + /// Load a new eBPF program into the virtual machine instance + pub fn set_program(&mut self, prog: Vec) -> Result<(), Error> { + self.data_len = prog.len(); + self.data_cap = prog.capacity(); + let slice = prog.leak(); + self.parent.set_program(slice)?; + Ok(()) + } + + /// Set a new verifier function. The function should return an Error if the program should be rejected by the virtual machine. + /// If a program has been loaded to the VM already, the verifier is immediately run. + pub fn set_verifier(&mut self, verifier: Verifier) -> Result<(), Error> { + self.parent.set_verifier(verifier) + } + + /// Register a built-in or user-defined helper function in order to use it later from within the eBPF program. + /// The helper is registered into a hashmap, so the key can be any u32. + /// If using JIT-compiled eBPF programs, be sure to register all helpers before compiling the program. + /// You should be able to change registered helpers after compiling, but not to add new ones (i. e. with new keys). + pub fn register_helper( + &mut self, + key: u32, + function: fn(u64, u64, u64, u64, u64) -> u64, + ) -> Result<(), Error> { + self.parent.register_helper(key, function) + } + + /// Register a set of built-in or user-defined helper functions in order to use them later from + /// within the eBPF program. The helpers are registered into a hashmap, so the `key` can be any + /// `u32`. + #[allow(clippy::type_complexity)] + pub fn register_helper_set( + &mut self, + helpers: &HashMap u64>, + ) -> Result<(), Error> { + for (key, function) in helpers { + self.parent.register_helper(*key, *function)?; + } + Ok(()) + } + + /// Execute the previously JIT-compiled program, with the given packet data, in a manner very similar to execute_program(). + /// + /// Safety + /// + /// **WARNING:** JIT-compiled assembly code is not safe, in particular there is no runtime check for memory access; + /// so if the eBPF program attempts erroneous accesses, this may end very bad (program may segfault). + /// It may be wise to check that the program works with the interpreter before running the JIT-compiled version of it. + /// + /// For this reason the function should be called from within an unsafe bloc. + pub fn execute_program(&self, mem: &mut [u8]) -> Result { + self.parent.execute_program(mem) + } +} + +impl Drop for EbpfVmRawOwned { + fn drop(&mut self) { + match self.parent.parent.prog { + Some(prog) => unsafe { + let ptr = prog.as_ptr(); + let _prog = Vec::from_raw_parts(ptr as *mut u8, self.data_len, self.data_cap); + }, + None => {} + }; + } +} diff --git a/kernel/src/arch/riscv64/kprobe.rs b/kernel/src/arch/riscv64/kprobe.rs index bf02cc109..960b06cd6 100644 --- a/kernel/src/arch/riscv64/kprobe.rs +++ b/kernel/src/arch/riscv64/kprobe.rs @@ -7,3 +7,79 @@ pub fn setup_single_step(frame: &mut TrapFrame, step_addr: usize) { pub fn clear_single_step(frame: &mut TrapFrame, return_addr: usize) { frame.set_pc(return_addr); } + +#[repr(C)] +#[derive(Debug, Copy, Clone)] +pub struct KProbeContext { + pub pc: usize, + pub ra: usize, + pub sp: usize, + pub gp: usize, + pub tp: usize, + pub t0: usize, + pub t1: usize, + pub t2: usize, + pub s0: usize, + pub s1: usize, + pub a0: usize, + pub a1: usize, + pub a2: usize, + pub a3: usize, + pub a4: usize, + pub a5: usize, + pub a6: usize, + pub a7: usize, + pub s2: usize, + pub s3: usize, + pub s4: usize, + pub s5: usize, + pub s6: usize, + pub s7: usize, + pub s8: usize, + pub s9: usize, + pub s10: usize, + pub s11: usize, + pub t3: usize, + pub t4: usize, + pub t5: usize, + pub t6: usize, +} + +impl From<&TrapFrame> for KProbeContext { + fn from(trap_frame: &TrapFrame) -> Self { + Self { + pc: trap_frame.epc, + ra: trap_frame.ra, + sp: trap_frame.sp, + gp: trap_frame.gp, + tp: trap_frame.tp, + t0: trap_frame.t0, + t1: trap_frame.t1, + t2: trap_frame.t2, + s0: trap_frame.s0, + s1: trap_frame.s1, + a0: trap_frame.a0, + a1: trap_frame.a1, + a2: trap_frame.a2, + a3: trap_frame.a3, + a4: trap_frame.a4, + a5: trap_frame.a5, + a6: trap_frame.a6, + a7: trap_frame.a7, + s2: trap_frame.s2, + s3: trap_frame.s3, + s4: trap_frame.s4, + s5: trap_frame.s5, + s6: trap_frame.s6, + s7: trap_frame.s7, + s8: trap_frame.s8, + s9: trap_frame.s9, + s10: trap_frame.s10, + s11: trap_frame.s11, + t3: trap_frame.t3, + t4: trap_frame.t4, + t5: trap_frame.t5, + t6: trap_frame.t6, + } + } +} diff --git a/kernel/src/arch/x86_64/kprobe.rs b/kernel/src/arch/x86_64/kprobe.rs index 5e31be801..e998aa993 100644 --- a/kernel/src/arch/x86_64/kprobe.rs +++ b/kernel/src/arch/x86_64/kprobe.rs @@ -9,3 +9,57 @@ pub fn clear_single_step(frame: &mut TrapFrame, return_addr: usize) { frame.rflags &= !0x100; frame.set_pc(return_addr); } + +#[repr(C)] +#[derive(Debug, Copy, Clone)] +pub struct KProbeContext { + pub r15: ::core::ffi::c_ulong, + pub r14: ::core::ffi::c_ulong, + pub r13: ::core::ffi::c_ulong, + pub r12: ::core::ffi::c_ulong, + pub rbp: ::core::ffi::c_ulong, + pub rbx: ::core::ffi::c_ulong, + pub r11: ::core::ffi::c_ulong, + pub r10: ::core::ffi::c_ulong, + pub r9: ::core::ffi::c_ulong, + pub r8: ::core::ffi::c_ulong, + pub rax: ::core::ffi::c_ulong, + pub rcx: ::core::ffi::c_ulong, + pub rdx: ::core::ffi::c_ulong, + pub rsi: ::core::ffi::c_ulong, + pub rdi: ::core::ffi::c_ulong, + pub orig_rax: ::core::ffi::c_ulong, + pub rip: ::core::ffi::c_ulong, + pub cs: ::core::ffi::c_ulong, + pub eflags: ::core::ffi::c_ulong, + pub rsp: ::core::ffi::c_ulong, + pub ss: ::core::ffi::c_ulong, +} + +impl From<&TrapFrame> for KProbeContext { + fn from(trap_frame: &TrapFrame) -> Self { + Self { + r15: trap_frame.r15, + r14: trap_frame.r14, + r13: trap_frame.r13, + r12: trap_frame.r12, + rbp: trap_frame.rbp, + rbx: trap_frame.rbx, + r11: trap_frame.r11, + r10: trap_frame.r10, + r9: trap_frame.r9, + r8: trap_frame.r8, + rax: trap_frame.rax, + rcx: trap_frame.rcx, + rdx: trap_frame.rdx, + rsi: trap_frame.rsi, + rdi: trap_frame.rdi, + orig_rax: 0, + rip: trap_frame.rip, + cs: trap_frame.cs, + eflags: trap_frame.rflags, + rsp: trap_frame.rsp, + ss: trap_frame.ss, + } + } +} diff --git a/kernel/src/bpf/helper/mod.rs b/kernel/src/bpf/helper/mod.rs new file mode 100644 index 000000000..fcf2732e5 --- /dev/null +++ b/kernel/src/bpf/helper/mod.rs @@ -0,0 +1,162 @@ +mod print; +use crate::bpf::helper::print::trace_printf; +use crate::bpf::map::BpfMap; +use crate::bpf::map::{PerCpuInfo, PerCpuInfoImpl}; +use crate::include::bindings::linux_bpf::BPF_F_CURRENT_CPU; +use crate::libs::lazy_init::Lazy; +use alloc::{collections::BTreeMap, sync::Arc}; +use core::ffi::c_void; +use system_error::SystemError; + +type RawBPFHelperFn = fn(u64, u64, u64, u64, u64) -> u64; +type Result = core::result::Result; +macro_rules! define_func { + ($name:ident) => { + core::mem::transmute::($name as usize) + }; +} + +/// See https://ebpf-docs.dylanreimerink.nl/linux/helper-function/bpf_map_lookup_elem/ +pub unsafe fn raw_map_lookup_elem(map: *mut c_void, key: *const c_void) -> *const c_void { + let map = Arc::from_raw(map as *const BpfMap); + let key_size = map.key_size(); + let key = core::slice::from_raw_parts(key as *const u8, key_size); + let value = map_lookup_elem(&map, key); + // log::info!(": {:x?}", value); + // warning: We need to keep the map alive, so we don't drop it here. + let _ = Arc::into_raw(map); + match value { + Ok(Some(value)) => value as *const c_void, + _ => core::ptr::null_mut(), + } +} + +pub fn map_lookup_elem(map: &Arc, key: &[u8]) -> Result> { + let binding = map.inner_map().lock(); + // let key_value = u32::from_ne_bytes(key[0..4].try_into().unwrap()); + // log::info!(" key_value: {:?}", key_value); + let value = binding.lookup_elem(key); + match value { + Ok(Some(value)) => Ok(Some(value.as_ptr())), + _ => Ok(None), + } +} + +/// See https://ebpf-docs.dylanreimerink.nl/linux/helper-function/bpf_perf_event_output/ +/// +/// See https://man7.org/linux/man-pages/man7/bpf-helpers.7.html +pub unsafe fn raw_perf_event_output( + ctx: *mut c_void, + map: *mut c_void, + flags: u64, + data: *mut c_void, + size: u64, +) -> i64 { + // log::info!(": {:x?}", data); + let map = Arc::from_raw(map as *const BpfMap); + let data = core::slice::from_raw_parts(data as *const u8, size as usize); + let res = perf_event_output(ctx, &map, flags, data); + // warning: We need to keep the map alive, so we don't drop it here. + let _ = Arc::into_raw(map); + match res { + Ok(_) => 0, + Err(e) => e as i64, + } +} + +pub fn perf_event_output( + ctx: *mut c_void, + map: &Arc, + flags: u64, + data: &[u8], +) -> Result<()> { + let binding = map.inner_map().lock(); + let index = flags as u32; + let flags = (flags >> 32) as u32; + let key = if index == BPF_F_CURRENT_CPU as u32 { + let cpu_id = PerCpuInfoImpl::cpu_id(); + cpu_id + } else { + index + }; + let fd = binding.lookup_elem(&key.to_ne_bytes()).unwrap().unwrap(); + let fd = u32::from_ne_bytes(fd.try_into().unwrap()); + // log::info!( + // ": flags: {:x?}, index: {:x?}, fd: {:x?}", + // flags, + // index, + // fd + // ); + crate::perf::perf_event_output(ctx, fd as usize, flags, data)?; + Ok(()) +} + +/// See https://ebpf-docs.dylanreimerink.nl/linux/helper-function/bpf_probe_read/ +pub fn raw_bpf_probe_read(dst: *mut c_void, size: u32, unsafe_ptr: *const c_void) -> i64 { + log::info!( + "raw_bpf_probe_read, dst:{:x}, size:{}, unsafe_ptr: {:x}", + dst as usize, + size, + unsafe_ptr as usize + ); + let (dst, src) = unsafe { + let dst = core::slice::from_raw_parts_mut(dst as *mut u8, size as usize); + let src = core::slice::from_raw_parts(unsafe_ptr as *const u8, size as usize); + (dst, src) + }; + let res = bpf_probe_read(dst, src); + match res { + Ok(_) => 0, + Err(e) => e as i64, + } +} + +/// For tracing programs, safely attempt to read size +/// bytes from kernel space address unsafe_ptr and +/// store the data in dst. +pub fn bpf_probe_read(dst: &mut [u8], src: &[u8]) -> Result<()> { + log::info!("bpf_probe_read: len: {}", dst.len()); + dst.copy_from_slice(src); + Ok(()) +} + +pub unsafe fn raw_map_update_elem( + map: *mut c_void, + key: *const c_void, + value: *const c_void, + flags: u64, +) -> i64 { + let map = Arc::from_raw(map as *const BpfMap); + let key_size = map.key_size(); + let value_size = map.value_size(); + // log::info!(": flags: {:x?}", flags); + let key = core::slice::from_raw_parts(key as *const u8, key_size); + let value = core::slice::from_raw_parts(value as *const u8, value_size); + let res = map_update_elem(&map, key, value, flags); + let _ = Arc::into_raw(map); + match res { + Ok(_) => 0, + Err(e) => e as _, + } +} + +pub fn map_update_elem(map: &Arc, key: &[u8], value: &[u8], flags: u64) -> Result<()> { + let mut binding = map.inner_map().lock(); + let value = binding.update_elem(key, value, flags); + value +} + +pub static BPF_HELPER_FUN_SET: Lazy> = Lazy::new(); + +/// Initialize the helper functions. +pub fn init_helper_functions() { + let mut map = BTreeMap::new(); + unsafe { + map.insert(1, define_func!(raw_map_lookup_elem)); + map.insert(2, define_func!(raw_map_update_elem)); + map.insert(25, define_func!(raw_perf_event_output)); + map.insert(6, define_func!(trace_printf)); + map.insert(4, define_func!(raw_bpf_probe_read)); + } + BPF_HELPER_FUN_SET.init(map); +} diff --git a/kernel/src/bpf/helper/print.rs b/kernel/src/bpf/helper/print.rs new file mode 100644 index 000000000..f70880409 --- /dev/null +++ b/kernel/src/bpf/helper/print.rs @@ -0,0 +1,43 @@ +use core::{ + ffi::{c_char, c_int}, + fmt::Write, +}; + +use printf_compat::{format, output}; + +/// Printf according to the format string, function will return the number of bytes written(including '\0') +pub unsafe extern "C" fn printf(w: &mut impl Write, str: *const c_char, mut args: ...) -> c_int { + let bytes_written = format(str as _, args.as_va_list(), output::fmt_write(w)); + bytes_written + 1 +} + +/// Printf with '\n' at the end, function will return the number of bytes written(including '\n' and '\0') +pub unsafe extern "C" fn printf_with( + w: &mut impl Write, + str: *const c_char, + mut args: ... +) -> c_int { + let str = core::ffi::CStr::from_ptr(str).to_str().unwrap().as_bytes(); + let bytes_written = if str.ends_with(b"\n") { + format(str.as_ptr() as _, args.as_va_list(), output::fmt_write(w)) + } else { + let mut bytes_written = format(str.as_ptr() as _, args.as_va_list(), output::fmt_write(w)); + w.write_str("\n").unwrap(); + bytes_written += 1; + bytes_written + }; + bytes_written + 1 +} + +struct TerminalOut; +impl Write for TerminalOut { + fn write_str(&mut self, s: &str) -> core::fmt::Result { + print!("{}", s); + Ok(()) + } +} + +/// See https://ebpf-docs.dylanreimerink.nl/linux/helper-function/bpf_trace_printk/ +pub fn trace_printf(fmt_ptr: u64, _fmt_len: u64, arg3: u64, arg4: u64, arg5: u64) -> u64 { + unsafe { printf_with(&mut TerminalOut, fmt_ptr as _, arg3, arg4, arg5) as u64 } +} diff --git a/kernel/src/bpf/map/array_map.rs b/kernel/src/bpf/map/array_map.rs new file mode 100644 index 000000000..14b747474 --- /dev/null +++ b/kernel/src/bpf/map/array_map.rs @@ -0,0 +1,271 @@ +//! BPF_MAP_TYPE_ARRAY and BPF_MAP_TYPE_PERCPU_ARRAY +//! +//! +//! See https://docs.kernel.org/bpf/map_array.html + +use super::super::Result; +use crate::bpf::map::util::round_up; +use crate::bpf::map::PerCpuInfo; +use crate::bpf::map::{BpfCallBackFn, BpfMapCommonOps, BpfMapMeta}; +use alloc::{vec, vec::Vec}; +use core::{ + fmt::{Debug, Formatter}, + ops::{Index, IndexMut}, +}; +use log::info; +use system_error::SystemError; + +#[derive(Debug)] +pub struct ArrayMap { + max_entries: u32, + data: ArrayMapData, +} + +struct ArrayMapData { + elem_size: u32, + data: Vec, +} + +impl Debug for ArrayMapData { + fn fmt(&self, f: &mut Formatter<'_>) -> core::fmt::Result { + f.debug_struct("ArrayMapData") + .field("elem_size", &self.elem_size) + .field("data_len", &self.data.len()) + .finish() + } +} + +impl ArrayMapData { + pub fn new(elem_size: u32, max_entries: u32) -> Self { + debug_assert!(elem_size % 8 == 0); + let total_size = elem_size * max_entries; + let data = vec![0; total_size as usize]; + ArrayMapData { elem_size, data } + } +} + +impl Index for ArrayMapData { + type Output = [u8]; + fn index(&self, index: u32) -> &Self::Output { + let start = index * self.elem_size; + &self.data[start as usize..(start + self.elem_size) as usize] + } +} + +impl IndexMut for ArrayMapData { + fn index_mut(&mut self, index: u32) -> &mut Self::Output { + let start = index * self.elem_size; + &mut self.data[start as usize..(start + self.elem_size) as usize] + } +} + +impl TryFrom<&BpfMapMeta> for ArrayMap { + type Error = SystemError; + fn try_from(attr: &BpfMapMeta) -> Result { + if attr.value_size == 0 || attr.max_entries == 0 || attr.key_size != 4 { + return Err(SystemError::EINVAL); + } + let elem_size = round_up(attr.value_size as usize, 8); + let data = ArrayMapData::new(elem_size as u32, attr.max_entries); + Ok(ArrayMap { + max_entries: attr.max_entries, + data, + }) + } +} + +impl BpfMapCommonOps for ArrayMap { + fn lookup_elem(&self, key: &[u8]) -> Result> { + if key.len() != 4 { + return Err(SystemError::EINVAL); + } + let index = u32::from_ne_bytes(key.try_into().unwrap()); + if index >= self.max_entries { + return Err(SystemError::EINVAL); + } + let val = self.data.index(index); + Ok(Some(val)) + } + fn update_elem(&mut self, key: &[u8], value: &[u8], _flags: u64) -> Result<()> { + if key.len() != 4 { + return Err(SystemError::EINVAL); + } + let index = u32::from_ne_bytes(key.try_into().unwrap()); + if index >= self.max_entries { + return Err(SystemError::EINVAL); + } + if value.len() > self.data.elem_size as usize { + return Err(SystemError::EINVAL); + } + let old_value = self.data.index_mut(index); + old_value[..value.len()].copy_from_slice(value); + Ok(()) + } + fn delete_elem(&mut self, _key: &[u8]) -> Result<()> { + Err(SystemError::EINVAL) + } + fn for_each_elem(&mut self, cb: BpfCallBackFn, ctx: &[u8], flags: u64) -> Result { + if flags != 0 { + return Err(SystemError::EINVAL); + } + let mut total_used = 0; + for i in 0..self.max_entries { + let key = i.to_ne_bytes(); + let value = self.data.index(i); + total_used += 1; + let res = cb(ctx, &key, value); + // return value: 0 - continue, 1 - stop and return + if res != 0 { + break; + } + } + Ok(total_used) + } + + fn get_next_key(&self, key: Option<&[u8]>, next_key: &mut [u8]) -> Result<()> { + if let Some(key) = key { + if key.len() != 4 { + return Err(SystemError::EINVAL); + } + let index = u32::from_ne_bytes(key.try_into().unwrap()); + if index == self.max_entries - 1 { + return Err(SystemError::ENOENT); + } + let next_index = index + 1; + next_key.copy_from_slice(&next_index.to_ne_bytes()); + } else { + next_key.copy_from_slice(&0u32.to_ne_bytes()); + } + Ok(()) + } + + fn freeze(&self) -> Result<()> { + info!("fake freeze done for ArrayMap"); + Ok(()) + } + fn first_value_ptr(&self) -> *const u8 { + self.data.data.as_ptr() + } +} + +pub struct PerCpuArrayMap { + data: Vec, + _phantom: core::marker::PhantomData, +} + +impl Debug for PerCpuArrayMap { + fn fmt(&self, f: &mut Formatter<'_>) -> core::fmt::Result { + f.debug_struct("PerCpuArrayMap") + .field("data", &self.data) + .finish() + } +} + +impl TryFrom<&BpfMapMeta> for PerCpuArrayMap { + type Error = SystemError; + fn try_from(attr: &BpfMapMeta) -> Result { + let num_cpus = T::num_cpus(); + let mut data = Vec::with_capacity(num_cpus as usize); + for _ in 0..num_cpus { + let array_map = ArrayMap::try_from(attr)?; + data.push(array_map); + } + Ok(PerCpuArrayMap { + data, + _phantom: core::marker::PhantomData, + }) + } +} + +impl BpfMapCommonOps for PerCpuArrayMap { + fn lookup_elem(&self, key: &[u8]) -> Result> { + let cpu_id = T::cpu_id(); + self.data[cpu_id as usize].lookup_elem(key) + } + fn update_elem(&mut self, key: &[u8], value: &[u8], flags: u64) -> Result<()> { + let cpu_id = T::cpu_id(); + self.data[cpu_id as usize].update_elem(key, value, flags) + } + fn delete_elem(&mut self, key: &[u8]) -> Result<()> { + let cpu_id = T::cpu_id(); + self.data[cpu_id as usize].delete_elem(key) + } + fn for_each_elem(&mut self, cb: BpfCallBackFn, ctx: &[u8], flags: u64) -> Result { + let cpu_id = T::cpu_id(); + self.data[cpu_id as usize].for_each_elem(cb, ctx, flags) + } + fn get_next_key(&self, key: Option<&[u8]>, next_key: &mut [u8]) -> Result<()> { + let cpu_id = T::cpu_id(); + self.data[cpu_id as usize].get_next_key(key, next_key) + } + fn first_value_ptr(&self) -> *const u8 { + let cpu_id = T::cpu_id(); + self.data[cpu_id as usize].first_value_ptr() + } +} + +/// See https://ebpf-docs.dylanreimerink.nl/linux/map-type/BPF_MAP_TYPE_PERF_EVENT_ARRAY/ +pub struct PerfEventArrayMap { + // The value is the file descriptor of the perf event. + fds: ArrayMapData, + _phantom: core::marker::PhantomData, +} + +impl Debug for PerfEventArrayMap { + fn fmt(&self, f: &mut Formatter<'_>) -> core::fmt::Result { + f.debug_struct("PerfEventArrayMap") + .field("fds", &self.fds) + .finish() + } +} + +impl TryFrom<&BpfMapMeta> for PerfEventArrayMap { + type Error = SystemError; + fn try_from(attr: &BpfMapMeta) -> Result { + let num_cpus = T::num_cpus(); + if attr.key_size != 4 || attr.value_size != 4 || attr.max_entries != num_cpus { + return Err(SystemError::EINVAL); + } + let fds = ArrayMapData::new(4, num_cpus); + Ok(PerfEventArrayMap { + fds, + _phantom: core::marker::PhantomData, + }) + } +} + +impl BpfMapCommonOps for PerfEventArrayMap { + fn lookup_elem(&self, key: &[u8]) -> Result> { + let cpu_id = u32::from_ne_bytes(key.try_into().unwrap()); + let value = self.fds.index(cpu_id); + Ok(Some(value)) + } + fn update_elem(&mut self, key: &[u8], value: &[u8], _flags: u64) -> Result<()> { + assert_eq!(value.len(), 4); + let cpu_id = u32::from_ne_bytes(key.try_into().unwrap()); + let old_value = self.fds.index_mut(cpu_id); + old_value.copy_from_slice(value); + Ok(()) + } + fn delete_elem(&mut self, key: &[u8]) -> Result<()> { + let cpu_id = u32::from_ne_bytes(key.try_into().unwrap()); + self.fds.index_mut(cpu_id).copy_from_slice(&[0; 4]); + Ok(()) + } + fn for_each_elem(&mut self, cb: BpfCallBackFn, ctx: &[u8], _flags: u64) -> Result { + let mut total_used = 0; + for i in 0..T::num_cpus() { + let key = i.to_ne_bytes(); + let value = self.fds.index(i); + total_used += 1; + let res = cb(ctx, &key, value); + if res != 0 { + break; + } + } + Ok(total_used) + } + fn first_value_ptr(&self) -> *const u8 { + self.fds.data.as_ptr() + } +} diff --git a/kernel/src/bpf/map/hash_map.rs b/kernel/src/bpf/map/hash_map.rs new file mode 100644 index 000000000..edca78afc --- /dev/null +++ b/kernel/src/bpf/map/hash_map.rs @@ -0,0 +1,85 @@ +use crate::bpf::map::util::round_up; +use crate::bpf::map::{BpfCallBackFn, BpfMapCommonOps, BpfMapMeta}; +use alloc::{collections::BTreeMap, vec::Vec}; +use system_error::SystemError; + +type BpfHashMapKey = Vec; +type BpfHashMapValue = Vec; + +#[derive(Debug)] +pub struct BpfHashMap { + max_entries: u32, + key_size: u32, + value_size: u32, + data: BTreeMap, +} + +impl TryFrom<&BpfMapMeta> for BpfHashMap { + type Error = SystemError; + fn try_from(attr: &BpfMapMeta) -> Result { + if attr.value_size == 0 || attr.max_entries == 0 { + return Err(SystemError::EINVAL); + } + let value_size = round_up(attr.value_size as usize, 8); + Ok(Self { + max_entries: attr.max_entries, + key_size: attr.key_size, + value_size: value_size as u32, + data: BTreeMap::new(), + }) + } +} + +impl BpfMapCommonOps for BpfHashMap { + fn lookup_elem(&self, key: &[u8]) -> super::Result> { + let value = self.data.get(key).map(|v| v.as_slice()); + Ok(value) + } + fn update_elem(&mut self, key: &[u8], value: &[u8], _flags: u64) -> super::Result<()> { + self.data.insert(key.to_vec(), value.to_vec()); + Ok(()) + } + fn delete_elem(&mut self, key: &[u8]) -> super::Result<()> { + self.data.remove(key); + Ok(()) + } + fn for_each_elem(&mut self, cb: BpfCallBackFn, ctx: &[u8], flags: u64) -> super::Result { + if flags != 0 { + return Err(SystemError::EINVAL); + } + let mut total_used = 0; + for (key, value) in self.data.iter() { + let res = cb(ctx, key, value); + // return value: 0 - continue, 1 - stop and return + if res != 0 { + break; + } + total_used += 1; + } + Ok(total_used) + } + fn get_next_key(&self, key: Option<&[u8]>, next_key: &mut [u8]) -> crate::bpf::Result<()> { + let mut iter = self.data.iter(); + if let Some(key) = key { + for (k, _) in iter.by_ref() { + if k.as_slice() == key { + break; + } + } + } + let res = iter.next(); + match res { + Some((k, _)) => { + next_key.copy_from_slice(k.as_slice()); + Ok(()) + } + None => Err(SystemError::ENOENT), + } + } + fn freeze(&self) -> super::Result<()> { + Ok(()) + } + fn first_value_ptr(&self) -> *const u8 { + panic!("first_value_ptr for Hashmap not implemented"); + } +} diff --git a/kernel/src/bpf/map/mod.rs b/kernel/src/bpf/map/mod.rs new file mode 100644 index 000000000..3d106ef70 --- /dev/null +++ b/kernel/src/bpf/map/mod.rs @@ -0,0 +1,313 @@ +mod array_map; +mod hash_map; +mod util; + +use super::Result; +use crate::bpf::map::array_map::{ArrayMap, PerCpuArrayMap, PerfEventArrayMap}; +use crate::bpf::map::util::{BpfMapGetNextKeyArg, BpfMapMeta, BpfMapUpdateArg}; +use crate::filesystem::vfs::file::{File, FileMode}; +use crate::filesystem::vfs::syscall::ModeType; +use crate::filesystem::vfs::{FilePrivateData, FileSystem, FileType, IndexNode, Metadata}; +use crate::include::bindings::linux_bpf::{bpf_attr, bpf_map_type}; +use crate::libs::casting::DowncastArc; +use crate::libs::spinlock::{SpinLock, SpinLockGuard}; +use crate::process::ProcessManager; +use crate::smp::core::smp_get_processor_id; +use crate::smp::cpu::smp_cpu_manager; +use crate::syscall::user_access::{UserBufferReader, UserBufferWriter}; +use alloc::boxed::Box; +use alloc::string::String; +use alloc::sync::Arc; +use alloc::vec::Vec; +use core::any::Any; +use core::fmt::Debug; +use intertrait::CastFromSync; +use log::{error, info}; +use system_error::SystemError; + +#[derive(Debug)] +pub struct BpfMap { + inner_map: SpinLock>, + meta: BpfMapMeta, +} + +type BpfCallBackFn = fn(key: &[u8], value: &[u8], ctx: &[u8]) -> i32; + +pub trait BpfMapCommonOps: Send + Sync + Debug + CastFromSync { + /// Lookup an element in the map. + /// + /// See https://ebpf-docs.dylanreimerink.nl/linux/helper-function/bpf_map_lookup_elem/ + fn lookup_elem(&self, _key: &[u8]) -> Result> { + panic!("lookup_elem not implemented") + } + /// Update an element in the map. + /// + /// See https://ebpf-docs.dylanreimerink.nl/linux/helper-function/bpf_map_update_elem/ + fn update_elem(&mut self, _key: &[u8], _value: &[u8], _flags: u64) -> Result<()> { + panic!("update_elem not implemented") + } + /// Delete an element from the map. + /// + /// See https://ebpf-docs.dylanreimerink.nl/linux/helper-function/bpf_map_delete_elem/ + fn delete_elem(&mut self, _key: &[u8]) -> Result<()> { + panic!("delete_elem not implemented") + } + /// For each element in map, call callback_fn function with map, + /// callback_ctx and other map-specific parameters. + /// + /// See https://ebpf-docs.dylanreimerink.nl/linux/helper-function/bpf_for_each_map_elem/ + fn for_each_elem(&mut self, _cb: BpfCallBackFn, _ctx: &[u8], _flags: u64) -> Result { + panic!("for_each_elem not implemented") + } + + /// Get the next key in the map. If key is None, get the first key. + /// + /// Called from syscall + fn get_next_key(&self, _key: Option<&[u8]>, _next_key: &mut [u8]) -> Result<()> { + panic!("get_next_key not implemented") + } + + /// Freeze the map. + /// + /// It's useful for .rodata maps. + fn freeze(&self) -> Result<()> { + panic!("freeze not implemented") + } + + /// Get the first value pointer. + fn first_value_ptr(&self) -> *const u8 { + panic!("value_ptr not implemented") + } +} +impl DowncastArc for dyn BpfMapCommonOps { + fn as_any_arc(self: Arc) -> Arc { + self + } +} + +pub struct PerCpuInfoImpl; + +impl PerCpuInfo for PerCpuInfoImpl { + fn cpu_id() -> u32 { + // let cpu = smp_get_processor_id(); + // log::info!("cpu_id: {:?}", cpu.data()); + // cpu.data() + 0 + } + fn num_cpus() -> u32 { + // let cpus = smp_cpu_manager(); + // log::info!("num_cpus: {:?}", cpus.present_cpus_count()); + // cpus.present_cpus_count() + 1 + } +} + +pub trait PerCpuInfo: Send + Sync + 'static { + /// Get the CPU ID of the current CPU. + fn cpu_id() -> u32; + /// Get the number of CPUs. + fn num_cpus() -> u32; +} + +impl BpfMap { + pub fn new(map: Box, meta: BpfMapMeta) -> Self { + assert_ne!(meta.key_size, 0); + BpfMap { + inner_map: SpinLock::new(map), + meta, + } + } + + pub fn inner_map(&self) -> &SpinLock> { + &self.inner_map + } + + pub fn key_size(&self) -> usize { + self.meta.key_size as usize + } + + pub fn value_size(&self) -> usize { + self.meta.value_size as usize + } +} + +impl IndexNode for BpfMap { + fn open(&self, _data: SpinLockGuard, _mode: &FileMode) -> Result<()> { + Ok(()) + } + fn close(&self, _data: SpinLockGuard) -> Result<()> { + Ok(()) + } + fn read_at( + &self, + _offset: usize, + _len: usize, + _buf: &mut [u8], + _data: SpinLockGuard, + ) -> Result { + Err(SystemError::ENOSYS) + } + + fn write_at( + &self, + _offset: usize, + _len: usize, + _buf: &[u8], + _data: SpinLockGuard, + ) -> Result { + Err(SystemError::ENOSYS) + } + + fn metadata(&self) -> Result { + let meta = Metadata { + mode: ModeType::from_bits_truncate(0o755), + file_type: FileType::File, + ..Default::default() + }; + Ok(meta) + } + + fn resize(&self, _len: usize) -> Result<()> { + Ok(()) + } + + fn fs(&self) -> Arc { + panic!("BpfMap does not have a filesystem") + } + + fn as_any_ref(&self) -> &dyn Any { + self + } + + fn list(&self) -> Result> { + Err(SystemError::ENOSYS) + } +} + +pub fn bpf_map_create(attr: &bpf_attr) -> Result { + let map_meta = BpfMapMeta::try_from(attr)?; + info!("The map attr is {:#?}", map_meta); + let map: Box = match map_meta.map_type { + bpf_map_type::BPF_MAP_TYPE_ARRAY => { + let array_map = ArrayMap::try_from(&map_meta)?; + Box::new(array_map) + } + bpf_map_type::BPF_MAP_TYPE_PERCPU_ARRAY => { + let per_cpu_array_map = PerCpuArrayMap::::try_from(&map_meta)?; + Box::new(per_cpu_array_map) + } + bpf_map_type::BPF_MAP_TYPE_PERF_EVENT_ARRAY => { + let perf_event_array_map = PerfEventArrayMap::::try_from(&map_meta)?; + Box::new(perf_event_array_map) + } + + bpf_map_type::BPF_MAP_TYPE_CPUMAP + | bpf_map_type::BPF_MAP_TYPE_DEVMAP + | bpf_map_type::BPF_MAP_TYPE_DEVMAP_HASH => { + error!("bpf map type {:?} not implemented", map_meta.map_type); + Err(SystemError::EINVAL)? + } + bpf_map_type::BPF_MAP_TYPE_HASH => { + let hash_map = hash_map::BpfHashMap::try_from(&map_meta)?; + Box::new(hash_map) + } + _ => { + unimplemented!("bpf map type {:?} not implemented", map_meta.map_type) + } + }; + let bpf_map = BpfMap::new(map, map_meta); + let fd_table = ProcessManager::current_pcb().fd_table(); + let file = File::new(Arc::new(bpf_map), FileMode::O_RDWR)?; + let fd = fd_table.write().alloc_fd(file, None).map(|x| x as usize)?; + info!("create map with fd: [{}]", fd); + Ok(fd) +} + +pub fn bpf_map_update_elem(attr: &bpf_attr) -> Result { + let arg = BpfMapUpdateArg::from(attr); + info!(": {:#x?}", arg); + let map = get_map_file(arg.map_fd as i32)?; + let meta = &map.meta; + let key_size = meta.key_size as usize; + let value_size = meta.value_size as usize; + + let key_buf = UserBufferReader::new(arg.key as *const u8, key_size, true)?; + let value_buf = UserBufferReader::new(arg.value as *const u8, value_size, true)?; + + let key = key_buf.read_from_user(0)?; + let value = value_buf.read_from_user(0)?; + map.inner_map.lock().update_elem(key, value, arg.flags)?; + info!("bpf_map_update_elem ok"); + Ok(0) +} + +pub fn bpf_map_freeze(attr: &bpf_attr) -> Result { + let arg = BpfMapUpdateArg::from(attr); + let map_fd = arg.map_fd; + info!(": map_fd: {:}", map_fd); + let map = get_map_file(map_fd as i32)?; + map.inner_map.lock().freeze()?; + Ok(0) +} + +/// See https://ebpf-docs.dylanreimerink.nl/linux/syscall/BPF_MAP_LOOKUP_ELEM/ +pub fn bpf_lookup_elem(attr: &bpf_attr) -> Result { + let arg = BpfMapUpdateArg::from(attr); + // info!(": {:#x?}", arg); + let map = get_map_file(arg.map_fd as _)?; + let meta = &map.meta; + let key_size = meta.key_size as usize; + let value_size = meta.value_size as usize; + + let key_buf = UserBufferReader::new(arg.key as *const u8, key_size, true)?; + let mut value_buf = UserBufferWriter::new(arg.value as *mut u8, value_size, true)?; + + let key = key_buf.read_from_user(0)?; + + let inner = map.inner_map.lock(); + let r_value = inner.lookup_elem(key).unwrap(); + if let Some(r_value) = r_value { + value_buf.copy_to_user(r_value, 0)?; + Ok(0) + } else { + Err(SystemError::ENOENT) + } +} + +/// See https://ebpf-docs.dylanreimerink.nl/linux/syscall/BPF_MAP_GET_NEXT_KEY/ +pub fn bpf_map_get_next_key(attr: &bpf_attr) -> Result { + let arg = BpfMapGetNextKeyArg::from(attr); + // info!(": {:#x?}", arg); + let map = get_map_file(arg.map_fd as i32)?; + let meta = &map.meta; + let key_size = meta.key_size as usize; + + let key = if let Some(key_ptr) = arg.key { + let key_buf = UserBufferReader::new(key_ptr as *const u8, key_size, true)?; + let key = key_buf.read_from_user(0)?.to_vec(); + Some(key) + } else { + None + }; + let key = key.as_deref(); + let mut next_key_buf = UserBufferWriter::new(arg.next_key as *mut u8, key_size, true)?; + let inner = map.inner_map.lock(); + let mut next_key = vec![0u8; key_size]; + inner.get_next_key(key, &mut next_key)?; + // info!("next_key: {:?}", next_key); + next_key_buf.copy_to_user(&next_key, 0)?; + Ok(0) +} + +fn get_map_file(fd: i32) -> Result> { + let fd_table = ProcessManager::current_pcb().fd_table(); + let map = fd_table + .read() + .get_file_by_fd(fd) + .ok_or(SystemError::EBADF)?; + let map = map + .inode() + .downcast_arc::() + .ok_or(SystemError::EINVAL)?; + Ok(map) +} diff --git a/kernel/src/bpf/map/util.rs b/kernel/src/bpf/map/util.rs new file mode 100644 index 000000000..3812dc59b --- /dev/null +++ b/kernel/src/bpf/map/util.rs @@ -0,0 +1,86 @@ +use crate::include::bindings::linux_bpf::{bpf_attr, bpf_map_type}; +use alloc::string::{String, ToString}; +use core::ffi::CStr; +use num_traits::FromPrimitive; +use system_error::SystemError; + +#[derive(Debug, Clone)] +pub struct BpfMapMeta { + pub map_type: bpf_map_type, + pub key_size: u32, + pub value_size: u32, + pub max_entries: u32, + pub map_flags: u32, + pub map_name: String, +} + +impl TryFrom<&bpf_attr> for BpfMapMeta { + type Error = SystemError; + fn try_from(value: &bpf_attr) -> Result { + let u = unsafe { &value.__bindgen_anon_1 }; + let map_name_slice = unsafe { + core::slice::from_raw_parts(u.map_name.as_ptr() as *const u8, u.map_name.len()) + }; + let map_name = CStr::from_bytes_until_nul(map_name_slice) + .map_err(|_| SystemError::EINVAL)? + .to_str() + .map_err(|_| SystemError::EINVAL)? + .to_string(); + let map_type = bpf_map_type::from_u32(u.map_type).ok_or(SystemError::EINVAL)?; + Ok(BpfMapMeta { + map_type, + key_size: u.key_size, + value_size: u.value_size, + max_entries: u.max_entries, + map_flags: u.map_flags, + map_name, + }) + } +} + +#[derive(Debug)] +pub struct BpfMapUpdateArg { + pub map_fd: u32, + pub key: u64, + pub value: u64, + pub flags: u64, +} + +impl From<&bpf_attr> for BpfMapUpdateArg { + fn from(value: &bpf_attr) -> Self { + unsafe { + let u = &value.__bindgen_anon_2; + BpfMapUpdateArg { + map_fd: u.map_fd, + key: u.key, + value: u.__bindgen_anon_1.value, + flags: u.flags, + } + } + } +} +#[derive(Debug)] +pub struct BpfMapGetNextKeyArg { + pub map_fd: u32, + pub key: Option, + pub next_key: u64, +} + +impl From<&bpf_attr> for BpfMapGetNextKeyArg { + fn from(value: &bpf_attr) -> Self { + unsafe { + let u = &value.__bindgen_anon_2; + BpfMapGetNextKeyArg { + map_fd: u.map_fd, + key: if u.key != 0 { Some(u.key) } else { None }, + next_key: u.__bindgen_anon_1.next_key, + } + } + } +} + +#[inline] +/// Round up `x` to the nearest multiple of `align`. +pub fn round_up(x: usize, align: usize) -> usize { + (x + align - 1) & !(align - 1) +} diff --git a/kernel/src/bpf/mod.rs b/kernel/src/bpf/mod.rs new file mode 100644 index 000000000..3eb9e347f --- /dev/null +++ b/kernel/src/bpf/mod.rs @@ -0,0 +1,45 @@ +#![allow(unused)] +pub mod helper; +pub mod map; +pub mod prog; +use crate::include::bindings::linux_bpf::{bpf_attr, bpf_cmd}; +use crate::syscall::user_access::UserBufferReader; +use crate::syscall::Syscall; +use log::error; +use num_traits::FromPrimitive; +use system_error::SystemError; + +type Result = core::result::Result; + +impl Syscall { + pub fn sys_bpf(cmd: u32, attr: *mut u8, size: u32) -> Result { + let buf = UserBufferReader::new(attr, size as usize, true)?; + let attr = buf.read_one_from_user::(0)?; + let cmd = bpf_cmd::from_u32(cmd).ok_or(SystemError::EINVAL)?; + bpf(cmd, attr) + } +} + +pub fn bpf(cmd: bpf_cmd, attr: &bpf_attr) -> Result { + let res = match cmd { + bpf_cmd::BPF_MAP_CREATE => map::bpf_map_create(attr), + bpf_cmd::BPF_MAP_UPDATE_ELEM => map::bpf_map_update_elem(attr), + bpf_cmd::BPF_MAP_FREEZE => map::bpf_map_freeze(attr), + bpf_cmd::BPF_PROG_LOAD => prog::bpf_prog_load(attr), + bpf_cmd::BPF_MAP_LOOKUP_ELEM => map::bpf_lookup_elem(attr), + bpf_cmd::BPF_MAP_GET_NEXT_KEY => map::bpf_map_get_next_key(attr), + bpf_cmd::BPF_BTF_LOAD => { + error!("bpf cmd {:?} not implemented", cmd); + return Err(SystemError::ENOSYS); + } + ty => { + unimplemented!("bpf cmd {:?} not implemented", ty) + } + }; + res +} + +/// Initialize the BPF system +pub fn init_bpf_system() { + helper::init_helper_functions(); +} diff --git a/kernel/src/bpf/prog/mod.rs b/kernel/src/bpf/prog/mod.rs new file mode 100644 index 000000000..93f2e595d --- /dev/null +++ b/kernel/src/bpf/prog/mod.rs @@ -0,0 +1,104 @@ +mod util; +mod verifier; + +use super::Result; +use crate::bpf::prog::util::{BpfProgMeta, BpfProgVerifierInfo}; +use crate::bpf::prog::verifier::BpfProgVerifier; +use crate::filesystem::vfs::file::{File, FileMode}; +use crate::filesystem::vfs::syscall::ModeType; +use crate::filesystem::vfs::{FilePrivateData, FileSystem, FileType, IndexNode, Metadata}; +use crate::include::bindings::linux_bpf::bpf_attr; +use crate::libs::spinlock::SpinLockGuard; +use crate::process::ProcessManager; +use alloc::string::String; +use alloc::sync::Arc; +use alloc::vec::Vec; +use core::any::Any; +use system_error::SystemError; + +#[derive(Debug)] +pub struct BpfProg { + meta: BpfProgMeta, +} + +impl BpfProg { + pub fn new(meta: BpfProgMeta) -> Self { + Self { meta } + } + + pub fn insns(&self) -> &[u8] { + &self.meta.insns + } + + pub fn insns_mut(&mut self) -> &mut [u8] { + &mut self.meta.insns + } +} + +impl IndexNode for BpfProg { + fn open(&self, _data: SpinLockGuard, _mode: &FileMode) -> Result<()> { + Ok(()) + } + fn close(&self, _data: SpinLockGuard) -> Result<()> { + Ok(()) + } + fn read_at( + &self, + _offset: usize, + _len: usize, + _buf: &mut [u8], + _data: SpinLockGuard, + ) -> Result { + Err(SystemError::ENOSYS) + } + + fn write_at( + &self, + _offset: usize, + _len: usize, + _buf: &[u8], + _data: SpinLockGuard, + ) -> Result { + Err(SystemError::ENOSYS) + } + + fn metadata(&self) -> Result { + let meta = Metadata { + mode: ModeType::from_bits_truncate(0o755), + file_type: FileType::File, + ..Default::default() + }; + Ok(meta) + } + + fn resize(&self, _len: usize) -> Result<()> { + Ok(()) + } + + fn fs(&self) -> Arc { + panic!("BpfProg does not have a filesystem") + } + + fn as_any_ref(&self) -> &dyn Any { + self + } + + fn list(&self) -> Result> { + Err(SystemError::ENOSYS) + } +} + +/// Load a BPF program into the kernel. +/// +/// See https://ebpf-docs.dylanreimerink.nl/linux/syscall/BPF_PROG_LOAD/ +pub fn bpf_prog_load(attr: &bpf_attr) -> Result { + let args = BpfProgMeta::try_from(attr)?; + // info!("bpf_prog_load: {:#?}", args); + let log_info = BpfProgVerifierInfo::from(attr); + let prog = BpfProg::new(args); + let fd_table = ProcessManager::current_pcb().fd_table(); + let prog = BpfProgVerifier::new(prog, log_info.log_level, &mut []).verify(&fd_table)?; + let file = File::new(Arc::new(prog), FileMode::O_RDWR)?; + let fd = fd_table.write().alloc_fd(file, None).map(|x| x as usize)?; + Ok(fd) +} diff --git a/kernel/src/bpf/prog/util.rs b/kernel/src/bpf/prog/util.rs new file mode 100644 index 000000000..540211218 --- /dev/null +++ b/kernel/src/bpf/prog/util.rs @@ -0,0 +1,112 @@ +use crate::include::bindings::linux_bpf::{bpf_attach_type, bpf_attr, bpf_prog_type}; +use crate::syscall::user_access::{check_and_clone_cstr, UserBufferReader}; +use alloc::string::{String, ToString}; +use alloc::vec::Vec; +use core::ffi::CStr; +use core::fmt::Debug; +use num_traits::FromPrimitive; +use system_error::SystemError; + +bitflags::bitflags! { + + pub struct VerifierLogLevel: u32 { + /// Sets no verifier logging. + const DISABLE = 0; + /// Enables debug verifier logging. + const DEBUG = 1; + /// Enables verbose verifier logging. + const VERBOSE = 2 | Self::DEBUG.bits(); + /// Enables verifier stats. + const STATS = 4; + } +} + +#[derive(Debug)] +pub struct BpfProgVerifierInfo { + /// This attribute specifies the level/detail of the log output. Valid values are. + pub log_level: VerifierLogLevel, + /// This attributes indicates the size of the memory region in bytes + /// indicated by `log_buf` which can safely be written to by the kernel. + pub log_buf_size: u32, + /// This attributes can be set to a pointer to a memory region + /// allocated/reservedby the loader process where the verifier log will + /// be written to. + /// The detail of the log is set by log_level. The verifier log + /// is often the only indication in addition to the error code of + /// why the syscall command failed to load the program. + /// + /// The log is also written to on success. If the kernel runs out of + /// space in the buffer while loading, the loading process will fail + /// and the command will return with an error code of -ENOSPC. So it + /// is important to correctly size the buffer when enabling logging. + pub log_buf_ptr: usize, +} + +impl From<&bpf_attr> for BpfProgVerifierInfo { + fn from(attr: &bpf_attr) -> Self { + unsafe { + let u = &attr.__bindgen_anon_3; + Self { + log_level: VerifierLogLevel::from_bits_truncate(u.log_level), + log_buf_size: u.log_size, + log_buf_ptr: u.log_buf as usize, + } + } + } +} + +pub struct BpfProgMeta { + pub prog_flags: u32, + pub prog_type: bpf_prog_type, + pub expected_attach_type: bpf_attach_type, + pub insns: Vec, + pub license: String, + pub kern_version: u32, + pub name: String, +} + +impl Debug for BpfProgMeta { + fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { + f.debug_struct("BpfProgMeta") + .field("prog_flags", &self.prog_flags) + .field("prog_type", &self.prog_type) + .field("expected_attach_type", &self.expected_attach_type) + .field("insns_len", &(self.insns.len() / 8)) + .field("license", &self.license) + .field("kern_version", &self.kern_version) + .field("name", &self.name) + .finish() + } +} + +impl TryFrom<&bpf_attr> for BpfProgMeta { + type Error = SystemError; + fn try_from(attr: &bpf_attr) -> Result { + let u = unsafe { &attr.__bindgen_anon_3 }; + let prog_type = bpf_prog_type::from_u32(u.prog_type).ok_or(SystemError::EINVAL)?; + let expected_attach_type = + bpf_attach_type::from_u32(u.expected_attach_type).ok_or(SystemError::EINVAL)?; + unsafe { + let insns_buf = + UserBufferReader::new(u.insns as *mut u8, u.insn_cnt as usize * 8, true)?; + let insns = insns_buf.read_from_user::(0)?.to_vec(); + let name_slice = + core::slice::from_raw_parts(u.prog_name.as_ptr() as *const u8, u.prog_name.len()); + let prog_name = CStr::from_bytes_until_nul(name_slice) + .map_err(|_| SystemError::EINVAL)? + .to_str() + .map_err(|_| SystemError::EINVAL)? + .to_string(); + let license = check_and_clone_cstr(u.license as *const u8, None)?; + Ok(Self { + prog_flags: u.prog_flags, + prog_type, + expected_attach_type, + insns, + license: license.into_string().map_err(|_| SystemError::EINVAL)?, + kern_version: u.kern_version, + name: prog_name, + }) + } + } +} diff --git a/kernel/src/bpf/prog/verifier.rs b/kernel/src/bpf/prog/verifier.rs new file mode 100644 index 000000000..e00a2960a --- /dev/null +++ b/kernel/src/bpf/prog/verifier.rs @@ -0,0 +1,125 @@ +use super::super::Result; +use crate::bpf::map::BpfMap; +use crate::bpf::prog::util::VerifierLogLevel; +use crate::bpf::prog::BpfProg; +use crate::filesystem::vfs::file::FileDescriptorVec; +use crate::include::bindings::linux_bpf::*; +use crate::libs::casting::DowncastArc; +use crate::libs::rwlock::RwLock; +use alloc::{sync::Arc, vec::Vec}; +use log::{error, info}; +use rbpf::ebpf; +use rbpf::ebpf::to_insn_vec; +use system_error::SystemError; + +/// The BPF program verifier. +/// +/// See https://docs.kernel.org/bpf/verifier.html +#[derive(Debug)] +pub struct BpfProgVerifier<'a> { + prog: BpfProg, + log_level: VerifierLogLevel, + log_buf: &'a mut [u8], +} + +impl<'a> BpfProgVerifier<'a> { + pub fn new(prog: BpfProg, log_level: VerifierLogLevel, log_buf: &'a mut [u8]) -> Self { + Self { + prog, + log_level, + log_buf, + } + } + /// Relocate the program. + /// + /// This function will relocate the program, and update the program's instructions. + fn relocation(&mut self, fd_table: &Arc>) -> Result<()> { + let instructions = self.prog.insns_mut(); + let mut fmt_insn = to_insn_vec(instructions); + let mut index = 0; + loop { + if index >= fmt_insn.len() { + break; + } + let mut insn = fmt_insn[index].clone(); + if insn.opc == ebpf::LD_DW_IMM { + // relocate the instruction + let mut next_insn = fmt_insn[index + 1].clone(); + // the imm is the map_fd because user lib has already done the relocation + let map_fd = insn.imm as usize; + let src_reg = insn.src; + // See https://www.kernel.org/doc/html/latest/bpf/standardization/instruction-set.html#id23 + let ptr = match src_reg as u32 { + BPF_PSEUDO_MAP_VALUE => { + // dst = map_val(map_by_fd(imm)) + next_imm + // map_val(map) gets the address of the first value in a given map + let file = fd_table + .read() + .get_file_by_fd(map_fd as i32) + .ok_or(SystemError::EBADF)?; + let bpf_map = file + .inode() + .downcast_arc::() + .ok_or(SystemError::EINVAL)?; + let first_value_ptr = bpf_map.inner_map().lock().first_value_ptr() as usize; + let offset = next_insn.imm as usize; + info!( + "Relocate for BPF_PSEUDO_MAP_VALUE, instruction index: {}, map_fd: {}", + index, map_fd + ); + Some(first_value_ptr + offset) + } + BPF_PSEUDO_MAP_FD => { + // dst = map_by_fd(imm) + // map_by_fd(imm) means to convert a 32-bit file descriptor into an address of a map + let bpf_map = fd_table + .read() + .get_file_by_fd(map_fd as i32) + .ok_or(SystemError::EBADF)? + .inode() + .downcast_arc::() + .ok_or(SystemError::EINVAL)?; + // todo!(warning: We need release after prog unload) + let map_ptr = Arc::into_raw(bpf_map) as usize; + info!( + "Relocate for BPF_PSEUDO_MAP_FD, instruction index: {}, map_fd: {}, ptr: {:#x}", + index, map_fd, map_ptr + ); + Some(map_ptr) + } + ty => { + error!( + "relocation for ty: {} not implemented, instruction index: {}", + ty, index + ); + None + } + }; + if let Some(ptr) = ptr { + // The current ins store the map_data_ptr low 32 bits, + // the next ins store the map_data_ptr high 32 bits + insn.imm = ptr as i32; + next_insn.imm = (ptr >> 32) as i32; + fmt_insn[index] = insn; + fmt_insn[index + 1] = next_insn; + index += 2; + } else { + index += 1; + } + } else { + index += 1; + } + } + let fmt_insn = fmt_insn + .iter() + .flat_map(|ins| ins.to_vec()) + .collect::>(); + instructions.copy_from_slice(&fmt_insn); + Ok(()) + } + + pub fn verify(mut self, fd_table: &Arc>) -> Result { + self.relocation(fd_table)?; + Ok(self.prog) + } +} diff --git a/kernel/src/debug/kprobe/args.rs b/kernel/src/debug/kprobe/args.rs index dfce410d0..33ca45491 100644 --- a/kernel/src/debug/kprobe/args.rs +++ b/kernel/src/debug/kprobe/args.rs @@ -1,24 +1,27 @@ -use alloc::string::ToString; -use kprobe::{KprobeBuilder, ProbeArgs}; +use alloc::boxed::Box; +use alloc::string::String; +use kprobe::{CallBackFunc, KprobeBuilder, ProbeArgs}; use log::warn; use system_error::SystemError; -pub struct KprobeInfo<'a> { +pub struct KprobeInfo { pub pre_handler: fn(&dyn ProbeArgs), pub post_handler: fn(&dyn ProbeArgs), pub fault_handler: Option, - pub symbol: Option<&'a str>, + pub event_callback: Option>, + pub symbol: Option, pub addr: Option, pub offset: usize, + pub enable: bool, } extern "C" { fn addr_from_symbol(symbol: *const u8) -> usize; } -impl<'a> TryFrom> for KprobeBuilder { +impl TryFrom for KprobeBuilder { type Error = SystemError; - fn try_from(kprobe_info: KprobeInfo<'a>) -> Result { + fn try_from(kprobe_info: KprobeInfo) -> Result { // 检查参数: symbol和addr必须有一个但不能同时有 if kprobe_info.symbol.is_none() && kprobe_info.addr.is_none() { return Err(SystemError::EINVAL); @@ -26,8 +29,8 @@ impl<'a> TryFrom> for KprobeBuilder { if kprobe_info.symbol.is_some() && kprobe_info.addr.is_some() { return Err(SystemError::EINVAL); } - let func_addr = if let Some(symbol) = kprobe_info.symbol { - let mut symbol_sting = symbol.to_string(); + let func_addr = if let Some(symbol) = kprobe_info.symbol.clone() { + let mut symbol_sting = symbol; if !symbol_sting.ends_with("\0") { symbol_sting.push('\0'); } @@ -45,14 +48,18 @@ impl<'a> TryFrom> for KprobeBuilder { kprobe_info.addr.unwrap() }; let mut builder = KprobeBuilder::new( - kprobe_info.symbol.map(|s| s.to_string()), + kprobe_info.symbol, func_addr, kprobe_info.offset, kprobe_info.pre_handler, kprobe_info.post_handler, + kprobe_info.enable, ); - if kprobe_info.fault_handler.is_some() { - builder = builder.with_fault_handler(kprobe_info.fault_handler.unwrap()); + if let Some(fault_handler) = kprobe_info.fault_handler { + builder = builder.with_fault_handler(fault_handler); + } + if let Some(event_callback) = kprobe_info.event_callback { + builder = builder.with_event_callback(event_callback); } Ok(builder) } diff --git a/kernel/src/debug/kprobe/mod.rs b/kernel/src/debug/kprobe/mod.rs index e8805d1a4..0ee803b29 100644 --- a/kernel/src/debug/kprobe/mod.rs +++ b/kernel/src/debug/kprobe/mod.rs @@ -1,17 +1,138 @@ use crate::debug::kprobe::args::KprobeInfo; +use crate::libs::rwlock::RwLock; use crate::libs::spinlock::SpinLock; use alloc::collections::BTreeMap; use alloc::sync::Arc; -use kprobe::{Kprobe, KprobeBuilder, KprobeManager, KprobeOps, KprobePoint}; +use alloc::vec::Vec; +use kprobe::{Kprobe, KprobeBuilder, KprobeOps, KprobePoint}; use system_error::SystemError; -mod args; +pub mod args; mod test; +pub type LockKprobe = Arc>; pub static KPROBE_MANAGER: SpinLock = SpinLock::new(KprobeManager::new()); static KPROBE_POINT_LIST: SpinLock>> = SpinLock::new(BTreeMap::new()); +/// 管理所有的kprobe探测点 +#[derive(Debug, Default)] +pub struct KprobeManager { + break_list: BTreeMap>, + debug_list: BTreeMap>, +} + +impl KprobeManager { + pub const fn new() -> Self { + KprobeManager { + break_list: BTreeMap::new(), + debug_list: BTreeMap::new(), + } + } + /// # 插入一个kprobe + /// + /// ## 参数 + /// - `kprobe`: kprobe的实例 + pub fn insert_kprobe(&mut self, kprobe: LockKprobe) { + let probe_point = kprobe.read().probe_point().clone(); + self.insert_break_point(probe_point.break_address(), kprobe.clone()); + self.insert_debug_point(probe_point.debug_address(), kprobe); + } + + /// # 向break_list中插入一个kprobe + /// + /// ## 参数 + /// - `address`: kprobe的地址, 由`KprobePoint::break_address()`或者`KprobeBuilder::probe_addr()`返回 + /// - `kprobe`: kprobe的实例 + fn insert_break_point(&mut self, address: usize, kprobe: LockKprobe) { + let list = self.break_list.entry(address).or_default(); + list.push(kprobe); + } + + /// # 向debug_list中插入一个kprobe + /// + /// ## 参数 + /// - `address`: kprobe的单步执行地址,由`KprobePoint::debug_address()`返回 + /// - `kprobe`: kprobe的实例 + fn insert_debug_point(&mut self, address: usize, kprobe: LockKprobe) { + let list = self.debug_list.entry(address).or_default(); + list.push(kprobe); + } + + pub fn get_break_list(&self, address: usize) -> Option<&Vec> { + self.break_list.get(&address) + } + + pub fn get_debug_list(&self, address: usize) -> Option<&Vec> { + self.debug_list.get(&address) + } + + /// # 返回一个地址上注册的kprobe数量 + /// + /// ## 参数 + /// - `address`: kprobe的地址, 由`KprobePoint::break_address()`或者`KprobeBuilder::probe_addr()`返回 + pub fn kprobe_num(&self, address: usize) -> usize { + self.break_list_len(address) + } + + #[inline] + fn break_list_len(&self, address: usize) -> usize { + self.break_list + .get(&address) + .map(|list| list.len()) + .unwrap_or(0) + } + #[inline] + fn debug_list_len(&self, address: usize) -> usize { + self.debug_list + .get(&address) + .map(|list| list.len()) + .unwrap_or(0) + } + + /// # 移除一个kprobe + /// + /// ## 参数 + /// - `kprobe`: kprobe的实例 + pub fn remove_kprobe(&mut self, kprobe: &LockKprobe) { + let probe_point = kprobe.read().probe_point().clone(); + self.remove_one_break(probe_point.break_address(), kprobe); + self.remove_one_debug(probe_point.debug_address(), kprobe); + } + + /// # 从break_list中移除一个kprobe + /// + /// 如果没有其他kprobe注册在这个地址上,则删除列表 + /// + /// ## 参数 + /// - `address`: kprobe的地址, 由`KprobePoint::break_address()`或者`KprobeBuilder::probe_addr()`返回 + /// - `kprobe`: kprobe的实例 + fn remove_one_break(&mut self, address: usize, kprobe: &LockKprobe) { + if let Some(list) = self.break_list.get_mut(&address) { + list.retain(|x| !Arc::ptr_eq(x, kprobe)); + } + if self.break_list_len(address) == 0 { + self.break_list.remove(&address); + } + } + + /// # 从debug_list中移除一个kprobe + /// + /// 如果没有其他kprobe注册在这个地址上,则删除列表 + /// + /// ## 参数 + /// - `address`: kprobe的单步执行地址,由`KprobePoint::debug_address()`返回 + /// - `kprobe`: kprobe的实例 + fn remove_one_debug(&mut self, address: usize, kprobe: &LockKprobe) { + if let Some(list) = self.debug_list.get_mut(&address) { + list.retain(|x| !Arc::ptr_eq(x, kprobe)); + } + if self.debug_list_len(address) == 0 { + self.debug_list.remove(&address); + } + } +} + pub fn kprobe_init() {} #[cfg(feature = "kprobe_test")] @@ -25,7 +146,7 @@ pub fn kprobe_test() { /// /// ## 参数 /// - `kprobe_info`: kprobe的信息 -pub fn register_kprobe(kprobe_info: KprobeInfo) -> Result, SystemError> { +pub fn register_kprobe(kprobe_info: KprobeInfo) -> Result { let kprobe_builder = KprobeBuilder::try_from(kprobe_info)?; let address = kprobe_builder.probe_addr(); let existed_point = KPROBE_POINT_LIST.lock().get(&address).map(Clone::clone); @@ -42,7 +163,7 @@ pub fn register_kprobe(kprobe_info: KprobeInfo) -> Result, SystemErr kprobe } }; - let kprobe = Arc::new(kprobe); + let kprobe = Arc::new(RwLock::new(kprobe)); KPROBE_MANAGER.lock().insert_kprobe(kprobe.clone()); Ok(kprobe) } @@ -51,8 +172,8 @@ pub fn register_kprobe(kprobe_info: KprobeInfo) -> Result, SystemErr /// /// ## 参数 /// - `kprobe`: 已安装的kprobe -pub fn unregister_kprobe(kprobe: Arc) -> Result<(), SystemError> { - let kprobe_addr = kprobe.probe_point().break_address(); +pub fn unregister_kprobe(kprobe: LockKprobe) -> Result<(), SystemError> { + let kprobe_addr = kprobe.read().probe_point().break_address(); KPROBE_MANAGER.lock().remove_kprobe(&kprobe); // 如果没有其他kprobe注册在这个地址上,则删除探测点 if KPROBE_MANAGER.lock().kprobe_num(kprobe_addr) == 0 { diff --git a/kernel/src/debug/kprobe/test.rs b/kernel/src/debug/kprobe/test.rs index b0237dff9..f3e04f496 100644 --- a/kernel/src/debug/kprobe/test.rs +++ b/kernel/src/debug/kprobe/test.rs @@ -1,5 +1,6 @@ use crate::arch::interrupt::TrapFrame; use crate::debug::kprobe::{register_kprobe, unregister_kprobe, KprobeInfo}; +use alloc::string::ToString; use kprobe::ProbeArgs; #[inline(never)] @@ -39,9 +40,11 @@ pub fn kprobe_test() { pre_handler, post_handler, fault_handler: Some(fault_handler), + event_callback: None, symbol: None, addr: Some(detect_func as usize), offset: 0, + enable: true, }; let kprobe = register_kprobe(kprobe_info).unwrap(); @@ -57,9 +60,11 @@ pub fn kprobe_test() { pre_handler: new_pre_handler, post_handler, fault_handler: Some(fault_handler), - symbol: Some("dragonos_kernel::debug::kprobe::test::detect_func"), + event_callback: None, + symbol: Some("dragonos_kernel::debug::kprobe::test::detect_func".to_string()), addr: None, offset: 0, + enable: true, }; let kprobe2 = register_kprobe(kprobe_info).unwrap(); println!( diff --git a/kernel/src/exception/debug.rs b/kernel/src/exception/debug.rs index fe9ac7989..118c9ed7b 100644 --- a/kernel/src/exception/debug.rs +++ b/kernel/src/exception/debug.rs @@ -16,10 +16,14 @@ impl DebugException { let pc = frame.debug_address(); if let Some(kprobe_list) = KPROBE_MANAGER.lock().get_debug_list(pc) { for kprobe in kprobe_list { - kprobe.call_post_handler(frame); + let guard = kprobe.read(); + if guard.is_enabled() { + guard.call_post_handler(frame); + guard.call_event_callback(frame); + } } - let probe_point = kprobe_list[0].probe_point(); - clear_single_step(frame, probe_point.return_address()); + let return_address = kprobe_list[0].read().probe_point().return_address(); + clear_single_step(frame, return_address); } else { println!("There is no kprobe on pc {:#x}", pc); } diff --git a/kernel/src/exception/ebreak.rs b/kernel/src/exception/ebreak.rs index 6205f72b5..24adfa137 100644 --- a/kernel/src/exception/ebreak.rs +++ b/kernel/src/exception/ebreak.rs @@ -18,11 +18,14 @@ impl EBreak { let kprobe_list = guard.get_break_list(break_addr); if let Some(kprobe_list) = kprobe_list { for kprobe in kprobe_list { - kprobe.call_pre_handler(frame); + let guard = kprobe.read(); + if guard.is_enabled() { + guard.call_pre_handler(frame); + } } - let probe_point = kprobe_list[0].probe_point(); + let single_step_address = kprobe_list[0].read().probe_point().single_step_address(); // setup_single_step - setup_single_step(frame, probe_point.single_step_address()); + setup_single_step(frame, single_step_address); } else { // For some architectures, they do not support single step execution, // and we need to use breakpoint exceptions to simulate diff --git a/kernel/src/filesystem/vfs/file.rs b/kernel/src/filesystem/vfs/file.rs index e3aff021d..0ccfb350a 100644 --- a/kernel/src/filesystem/vfs/file.rs +++ b/kernel/src/filesystem/vfs/file.rs @@ -10,6 +10,7 @@ use system_error::SystemError; use super::{Dirent, FileType, IndexNode, InodeId, Metadata, SpecialNodeData}; use crate::filesystem::eventfd::EventFdInode; +use crate::perf::PerfEventInode; use crate::{ driver::{ base::{block::SeekFrom, device::DevicePrivateData}, @@ -540,11 +541,15 @@ impl File { inode.inner().lock().remove_epoll(epoll) } _ => { + let inode = self.inode.downcast_ref::(); + if let Some(inode) = inode { + return inode.remove_epoll(epoll); + } let inode = self .inode - .downcast_ref::() + .downcast_ref::() .ok_or(SystemError::ENOSYS)?; - inode.remove_epoll(epoll) + return inode.remove_epoll(epoll); } } } diff --git a/kernel/src/filesystem/vfs/mod.rs b/kernel/src/filesystem/vfs/mod.rs index c8b4a3aee..5482d2f9d 100644 --- a/kernel/src/filesystem/vfs/mod.rs +++ b/kernel/src/filesystem/vfs/mod.rs @@ -119,6 +119,9 @@ bitflags! { } pub trait IndexNode: Any + Sync + Send + Debug + CastFromSync { + fn mmap(&self, _start: usize, _len: usize, _offset: usize) -> Result<(), SystemError> { + return Err(SystemError::ENOSYS); + } /// @brief 打开文件 /// /// @return 成功:Ok() diff --git a/kernel/src/include/bindings/linux_bpf.rs b/kernel/src/include/bindings/linux_bpf.rs new file mode 100644 index 000000000..e3b2b79da --- /dev/null +++ b/kernel/src/include/bindings/linux_bpf.rs @@ -0,0 +1,2430 @@ +/* automatically generated by rust-bindgen 0.69.4 */ + +#[repr(C)] +#[derive(Copy, Clone, Debug, Default, Eq, Hash, Ord, PartialEq, PartialOrd)] +pub struct __BindgenBitfieldUnit { + storage: Storage, +} +impl __BindgenBitfieldUnit { + #[inline] + pub const fn new(storage: Storage) -> Self { + Self { storage } + } +} +impl __BindgenBitfieldUnit +where + Storage: AsRef<[u8]> + AsMut<[u8]>, +{ + #[inline] + pub fn get_bit(&self, index: usize) -> bool { + debug_assert!(index / 8 < self.storage.as_ref().len()); + let byte_index = index / 8; + let byte = self.storage.as_ref()[byte_index]; + let bit_index = if cfg!(target_endian = "big") { + 7 - (index % 8) + } else { + index % 8 + }; + let mask = 1 << bit_index; + byte & mask == mask + } + #[inline] + pub fn set_bit(&mut self, index: usize, val: bool) { + debug_assert!(index / 8 < self.storage.as_ref().len()); + let byte_index = index / 8; + let byte = &mut self.storage.as_mut()[byte_index]; + let bit_index = if cfg!(target_endian = "big") { + 7 - (index % 8) + } else { + index % 8 + }; + let mask = 1 << bit_index; + if val { + *byte |= mask; + } else { + *byte &= !mask; + } + } + #[inline] + pub fn get(&self, bit_offset: usize, bit_width: u8) -> u64 { + debug_assert!(bit_width <= 64); + debug_assert!(bit_offset / 8 < self.storage.as_ref().len()); + debug_assert!((bit_offset + (bit_width as usize)) / 8 <= self.storage.as_ref().len()); + let mut val = 0; + for i in 0..(bit_width as usize) { + if self.get_bit(i + bit_offset) { + let index = if cfg!(target_endian = "big") { + bit_width as usize - 1 - i + } else { + i + }; + val |= 1 << index; + } + } + val + } + #[inline] + pub fn set(&mut self, bit_offset: usize, bit_width: u8, val: u64) { + debug_assert!(bit_width <= 64); + debug_assert!(bit_offset / 8 < self.storage.as_ref().len()); + debug_assert!((bit_offset + (bit_width as usize)) / 8 <= self.storage.as_ref().len()); + for i in 0..(bit_width as usize) { + let mask = 1 << i; + let val_bit_is_set = val & mask == mask; + let index = if cfg!(target_endian = "big") { + bit_width as usize - 1 - i + } else { + i + }; + self.set_bit(index + bit_offset, val_bit_is_set); + } + } +} +#[repr(C)] +#[derive(Default)] +pub struct __IncompleteArrayField(::core::marker::PhantomData, [T; 0]); +impl __IncompleteArrayField { + #[inline] + pub const fn new() -> Self { + __IncompleteArrayField(::core::marker::PhantomData, []) + } + #[inline] + pub fn as_ptr(&self) -> *const T { + self as *const _ as *const T + } + #[inline] + pub fn as_mut_ptr(&mut self) -> *mut T { + self as *mut _ as *mut T + } + #[inline] + pub unsafe fn as_slice(&self, len: usize) -> &[T] { + ::core::slice::from_raw_parts(self.as_ptr(), len) + } + #[inline] + pub unsafe fn as_mut_slice(&mut self, len: usize) -> &mut [T] { + ::core::slice::from_raw_parts_mut(self.as_mut_ptr(), len) + } +} +impl ::core::fmt::Debug for __IncompleteArrayField { + fn fmt(&self, fmt: &mut ::core::fmt::Formatter<'_>) -> ::core::fmt::Result { + fmt.write_str("__IncompleteArrayField") + } +} +pub const SO_ATTACH_BPF: u32 = 50; +pub const SO_DETACH_BPF: u32 = 27; +pub const BPF_LD: u32 = 0; +pub const BPF_LDX: u32 = 1; +pub const BPF_ST: u32 = 2; +pub const BPF_STX: u32 = 3; +pub const BPF_ALU: u32 = 4; +pub const BPF_JMP: u32 = 5; +pub const BPF_W: u32 = 0; +pub const BPF_H: u32 = 8; +pub const BPF_B: u32 = 16; +pub const BPF_K: u32 = 0; +pub const BPF_ALU64: u32 = 7; +pub const BPF_DW: u32 = 24; +pub const BPF_CALL: u32 = 128; +pub const BPF_F_ALLOW_OVERRIDE: u32 = 1; +pub const BPF_F_ALLOW_MULTI: u32 = 2; +pub const BPF_F_REPLACE: u32 = 4; +pub const BPF_F_BEFORE: u32 = 8; +pub const BPF_F_AFTER: u32 = 16; +pub const BPF_F_ID: u32 = 32; +pub const BPF_F_STRICT_ALIGNMENT: u32 = 1; +pub const BPF_F_ANY_ALIGNMENT: u32 = 2; +pub const BPF_F_TEST_RND_HI32: u32 = 4; +pub const BPF_F_TEST_STATE_FREQ: u32 = 8; +pub const BPF_F_SLEEPABLE: u32 = 16; +pub const BPF_F_XDP_HAS_FRAGS: u32 = 32; +pub const BPF_F_XDP_DEV_BOUND_ONLY: u32 = 64; +pub const BPF_F_TEST_REG_INVARIANTS: u32 = 128; +pub const BPF_F_NETFILTER_IP_DEFRAG: u32 = 1; +pub const BPF_PSEUDO_MAP_FD: u32 = 1; +pub const BPF_PSEUDO_MAP_IDX: u32 = 5; +pub const BPF_PSEUDO_MAP_VALUE: u32 = 2; +pub const BPF_PSEUDO_MAP_IDX_VALUE: u32 = 6; +pub const BPF_PSEUDO_BTF_ID: u32 = 3; +pub const BPF_PSEUDO_FUNC: u32 = 4; +pub const BPF_PSEUDO_CALL: u32 = 1; +pub const BPF_PSEUDO_KFUNC_CALL: u32 = 2; +pub const BPF_F_QUERY_EFFECTIVE: u32 = 1; +pub const BPF_F_TEST_RUN_ON_CPU: u32 = 1; +pub const BPF_F_TEST_XDP_LIVE_FRAMES: u32 = 2; +pub const BTF_INT_SIGNED: u32 = 1; +pub const BTF_INT_CHAR: u32 = 2; +pub const BTF_INT_BOOL: u32 = 4; +pub const NLMSG_ALIGNTO: u32 = 4; +pub const XDP_FLAGS_UPDATE_IF_NOEXIST: u32 = 1; +pub const XDP_FLAGS_SKB_MODE: u32 = 2; +pub const XDP_FLAGS_DRV_MODE: u32 = 4; +pub const XDP_FLAGS_HW_MODE: u32 = 8; +pub const XDP_FLAGS_REPLACE: u32 = 16; +pub const XDP_FLAGS_MODES: u32 = 14; +pub const XDP_FLAGS_MASK: u32 = 31; +pub const PERF_MAX_STACK_DEPTH: u32 = 127; +pub const PERF_MAX_CONTEXTS_PER_STACK: u32 = 8; +pub const PERF_FLAG_FD_NO_GROUP: u32 = 1; +pub const PERF_FLAG_FD_OUTPUT: u32 = 2; +pub const PERF_FLAG_PID_CGROUP: u32 = 4; +pub const PERF_FLAG_FD_CLOEXEC: u32 = 8; +pub const TC_H_MAJ_MASK: u32 = 4294901760; +pub const TC_H_MIN_MASK: u32 = 65535; +pub const TC_H_UNSPEC: u32 = 0; +pub const TC_H_ROOT: u32 = 4294967295; +pub const TC_H_INGRESS: u32 = 4294967281; +pub const TC_H_CLSACT: u32 = 4294967281; +pub const TC_H_MIN_PRIORITY: u32 = 65504; +pub const TC_H_MIN_INGRESS: u32 = 65522; +pub const TC_H_MIN_EGRESS: u32 = 65523; +pub const TCA_BPF_FLAG_ACT_DIRECT: u32 = 1; +pub type __u8 = ::core::ffi::c_uchar; +pub type __s16 = ::core::ffi::c_short; +pub type __u16 = ::core::ffi::c_ushort; +pub type __s32 = ::core::ffi::c_int; +pub type __u32 = ::core::ffi::c_uint; +pub type __s64 = ::core::ffi::c_longlong; +pub type __u64 = ::core::ffi::c_ulonglong; +#[repr(C)] +#[derive(Debug, Copy, Clone)] +pub struct bpf_insn { + pub code: __u8, + pub _bitfield_align_1: [u8; 0], + pub _bitfield_1: __BindgenBitfieldUnit<[u8; 1usize]>, + pub off: __s16, + pub imm: __s32, +} +impl bpf_insn { + #[inline] + pub fn dst_reg(&self) -> __u8 { + unsafe { ::core::mem::transmute(self._bitfield_1.get(0usize, 4u8) as u8) } + } + #[inline] + pub fn set_dst_reg(&mut self, val: __u8) { + unsafe { + let val: u8 = ::core::mem::transmute(val); + self._bitfield_1.set(0usize, 4u8, val as u64) + } + } + #[inline] + pub fn src_reg(&self) -> __u8 { + unsafe { ::core::mem::transmute(self._bitfield_1.get(4usize, 4u8) as u8) } + } + #[inline] + pub fn set_src_reg(&mut self, val: __u8) { + unsafe { + let val: u8 = ::core::mem::transmute(val); + self._bitfield_1.set(4usize, 4u8, val as u64) + } + } + #[inline] + pub fn new_bitfield_1(dst_reg: __u8, src_reg: __u8) -> __BindgenBitfieldUnit<[u8; 1usize]> { + let mut __bindgen_bitfield_unit: __BindgenBitfieldUnit<[u8; 1usize]> = Default::default(); + __bindgen_bitfield_unit.set(0usize, 4u8, { + let dst_reg: u8 = unsafe { ::core::mem::transmute(dst_reg) }; + dst_reg as u64 + }); + __bindgen_bitfield_unit.set(4usize, 4u8, { + let src_reg: u8 = unsafe { ::core::mem::transmute(src_reg) }; + src_reg as u64 + }); + __bindgen_bitfield_unit + } +} +#[repr(C)] +#[derive(Debug)] +pub struct bpf_lpm_trie_key { + pub prefixlen: __u32, + pub data: __IncompleteArrayField<__u8>, +} +impl bpf_cmd { + pub const BPF_PROG_RUN: bpf_cmd = bpf_cmd::BPF_PROG_TEST_RUN; +} +#[repr(u32)] +#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq, FromPrimitive)] +pub enum bpf_cmd { + BPF_MAP_CREATE = 0, + BPF_MAP_LOOKUP_ELEM = 1, + BPF_MAP_UPDATE_ELEM = 2, + BPF_MAP_DELETE_ELEM = 3, + BPF_MAP_GET_NEXT_KEY = 4, + BPF_PROG_LOAD = 5, + BPF_OBJ_PIN = 6, + BPF_OBJ_GET = 7, + BPF_PROG_ATTACH = 8, + BPF_PROG_DETACH = 9, + BPF_PROG_TEST_RUN = 10, + BPF_PROG_GET_NEXT_ID = 11, + BPF_MAP_GET_NEXT_ID = 12, + BPF_PROG_GET_FD_BY_ID = 13, + BPF_MAP_GET_FD_BY_ID = 14, + BPF_OBJ_GET_INFO_BY_FD = 15, + BPF_PROG_QUERY = 16, + BPF_RAW_TRACEPOINT_OPEN = 17, + BPF_BTF_LOAD = 18, + BPF_BTF_GET_FD_BY_ID = 19, + BPF_TASK_FD_QUERY = 20, + BPF_MAP_LOOKUP_AND_DELETE_ELEM = 21, + BPF_MAP_FREEZE = 22, + BPF_BTF_GET_NEXT_ID = 23, + BPF_MAP_LOOKUP_BATCH = 24, + BPF_MAP_LOOKUP_AND_DELETE_BATCH = 25, + BPF_MAP_UPDATE_BATCH = 26, + BPF_MAP_DELETE_BATCH = 27, + BPF_LINK_CREATE = 28, + BPF_LINK_UPDATE = 29, + BPF_LINK_GET_FD_BY_ID = 30, + BPF_LINK_GET_NEXT_ID = 31, + BPF_ENABLE_STATS = 32, + BPF_ITER_CREATE = 33, + BPF_LINK_DETACH = 34, + BPF_PROG_BIND_MAP = 35, + BPF_TOKEN_CREATE = 36, + __MAX_BPF_CMD = 37, +} +impl bpf_map_type { + pub const BPF_MAP_TYPE_CGROUP_STORAGE: bpf_map_type = + bpf_map_type::BPF_MAP_TYPE_CGROUP_STORAGE_DEPRECATED; +} +impl bpf_map_type { + pub const BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE: bpf_map_type = + bpf_map_type::BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE_DEPRECATED; +} +#[repr(u32)] +#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq, FromPrimitive)] +pub enum bpf_map_type { + BPF_MAP_TYPE_UNSPEC = 0, + BPF_MAP_TYPE_HASH = 1, + BPF_MAP_TYPE_ARRAY = 2, + BPF_MAP_TYPE_PROG_ARRAY = 3, + BPF_MAP_TYPE_PERF_EVENT_ARRAY = 4, + BPF_MAP_TYPE_PERCPU_HASH = 5, + BPF_MAP_TYPE_PERCPU_ARRAY = 6, + BPF_MAP_TYPE_STACK_TRACE = 7, + BPF_MAP_TYPE_CGROUP_ARRAY = 8, + BPF_MAP_TYPE_LRU_HASH = 9, + BPF_MAP_TYPE_LRU_PERCPU_HASH = 10, + BPF_MAP_TYPE_LPM_TRIE = 11, + BPF_MAP_TYPE_ARRAY_OF_MAPS = 12, + BPF_MAP_TYPE_HASH_OF_MAPS = 13, + BPF_MAP_TYPE_DEVMAP = 14, + BPF_MAP_TYPE_SOCKMAP = 15, + BPF_MAP_TYPE_CPUMAP = 16, + BPF_MAP_TYPE_XSKMAP = 17, + BPF_MAP_TYPE_SOCKHASH = 18, + BPF_MAP_TYPE_CGROUP_STORAGE_DEPRECATED = 19, + BPF_MAP_TYPE_REUSEPORT_SOCKARRAY = 20, + BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE_DEPRECATED = 21, + BPF_MAP_TYPE_QUEUE = 22, + BPF_MAP_TYPE_STACK = 23, + BPF_MAP_TYPE_SK_STORAGE = 24, + BPF_MAP_TYPE_DEVMAP_HASH = 25, + BPF_MAP_TYPE_STRUCT_OPS = 26, + BPF_MAP_TYPE_RINGBUF = 27, + BPF_MAP_TYPE_INODE_STORAGE = 28, + BPF_MAP_TYPE_TASK_STORAGE = 29, + BPF_MAP_TYPE_BLOOM_FILTER = 30, + BPF_MAP_TYPE_USER_RINGBUF = 31, + BPF_MAP_TYPE_CGRP_STORAGE = 32, + BPF_MAP_TYPE_ARENA = 33, + __MAX_BPF_MAP_TYPE = 34, +} +#[repr(u32)] +#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq, FromPrimitive)] +pub enum bpf_prog_type { + BPF_PROG_TYPE_UNSPEC = 0, + BPF_PROG_TYPE_SOCKET_FILTER = 1, + BPF_PROG_TYPE_KPROBE = 2, + BPF_PROG_TYPE_SCHED_CLS = 3, + BPF_PROG_TYPE_SCHED_ACT = 4, + BPF_PROG_TYPE_TRACEPOINT = 5, + BPF_PROG_TYPE_XDP = 6, + BPF_PROG_TYPE_PERF_EVENT = 7, + BPF_PROG_TYPE_CGROUP_SKB = 8, + BPF_PROG_TYPE_CGROUP_SOCK = 9, + BPF_PROG_TYPE_LWT_IN = 10, + BPF_PROG_TYPE_LWT_OUT = 11, + BPF_PROG_TYPE_LWT_XMIT = 12, + BPF_PROG_TYPE_SOCK_OPS = 13, + BPF_PROG_TYPE_SK_SKB = 14, + BPF_PROG_TYPE_CGROUP_DEVICE = 15, + BPF_PROG_TYPE_SK_MSG = 16, + BPF_PROG_TYPE_RAW_TRACEPOINT = 17, + BPF_PROG_TYPE_CGROUP_SOCK_ADDR = 18, + BPF_PROG_TYPE_LWT_SEG6LOCAL = 19, + BPF_PROG_TYPE_LIRC_MODE2 = 20, + BPF_PROG_TYPE_SK_REUSEPORT = 21, + BPF_PROG_TYPE_FLOW_DISSECTOR = 22, + BPF_PROG_TYPE_CGROUP_SYSCTL = 23, + BPF_PROG_TYPE_RAW_TRACEPOINT_WRITABLE = 24, + BPF_PROG_TYPE_CGROUP_SOCKOPT = 25, + BPF_PROG_TYPE_TRACING = 26, + BPF_PROG_TYPE_STRUCT_OPS = 27, + BPF_PROG_TYPE_EXT = 28, + BPF_PROG_TYPE_LSM = 29, + BPF_PROG_TYPE_SK_LOOKUP = 30, + BPF_PROG_TYPE_SYSCALL = 31, + BPF_PROG_TYPE_NETFILTER = 32, + __MAX_BPF_PROG_TYPE = 33, +} +#[repr(u32)] +#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq, FromPrimitive)] +pub enum bpf_attach_type { + BPF_CGROUP_INET_INGRESS = 0, + BPF_CGROUP_INET_EGRESS = 1, + BPF_CGROUP_INET_SOCK_CREATE = 2, + BPF_CGROUP_SOCK_OPS = 3, + BPF_SK_SKB_STREAM_PARSER = 4, + BPF_SK_SKB_STREAM_VERDICT = 5, + BPF_CGROUP_DEVICE = 6, + BPF_SK_MSG_VERDICT = 7, + BPF_CGROUP_INET4_BIND = 8, + BPF_CGROUP_INET6_BIND = 9, + BPF_CGROUP_INET4_CONNECT = 10, + BPF_CGROUP_INET6_CONNECT = 11, + BPF_CGROUP_INET4_POST_BIND = 12, + BPF_CGROUP_INET6_POST_BIND = 13, + BPF_CGROUP_UDP4_SENDMSG = 14, + BPF_CGROUP_UDP6_SENDMSG = 15, + BPF_LIRC_MODE2 = 16, + BPF_FLOW_DISSECTOR = 17, + BPF_CGROUP_SYSCTL = 18, + BPF_CGROUP_UDP4_RECVMSG = 19, + BPF_CGROUP_UDP6_RECVMSG = 20, + BPF_CGROUP_GETSOCKOPT = 21, + BPF_CGROUP_SETSOCKOPT = 22, + BPF_TRACE_RAW_TP = 23, + BPF_TRACE_FENTRY = 24, + BPF_TRACE_FEXIT = 25, + BPF_MODIFY_RETURN = 26, + BPF_LSM_MAC = 27, + BPF_TRACE_ITER = 28, + BPF_CGROUP_INET4_GETPEERNAME = 29, + BPF_CGROUP_INET6_GETPEERNAME = 30, + BPF_CGROUP_INET4_GETSOCKNAME = 31, + BPF_CGROUP_INET6_GETSOCKNAME = 32, + BPF_XDP_DEVMAP = 33, + BPF_CGROUP_INET_SOCK_RELEASE = 34, + BPF_XDP_CPUMAP = 35, + BPF_SK_LOOKUP = 36, + BPF_XDP = 37, + BPF_SK_SKB_VERDICT = 38, + BPF_SK_REUSEPORT_SELECT = 39, + BPF_SK_REUSEPORT_SELECT_OR_MIGRATE = 40, + BPF_PERF_EVENT = 41, + BPF_TRACE_KPROBE_MULTI = 42, + BPF_LSM_CGROUP = 43, + BPF_STRUCT_OPS = 44, + BPF_NETFILTER = 45, + BPF_TCX_INGRESS = 46, + BPF_TCX_EGRESS = 47, + BPF_TRACE_UPROBE_MULTI = 48, + BPF_CGROUP_UNIX_CONNECT = 49, + BPF_CGROUP_UNIX_SENDMSG = 50, + BPF_CGROUP_UNIX_RECVMSG = 51, + BPF_CGROUP_UNIX_GETPEERNAME = 52, + BPF_CGROUP_UNIX_GETSOCKNAME = 53, + BPF_NETKIT_PRIMARY = 54, + BPF_NETKIT_PEER = 55, + __MAX_BPF_ATTACH_TYPE = 56, +} +#[repr(u32)] +#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] +pub enum bpf_link_type { + BPF_LINK_TYPE_UNSPEC = 0, + BPF_LINK_TYPE_RAW_TRACEPOINT = 1, + BPF_LINK_TYPE_TRACING = 2, + BPF_LINK_TYPE_CGROUP = 3, + BPF_LINK_TYPE_ITER = 4, + BPF_LINK_TYPE_NETNS = 5, + BPF_LINK_TYPE_XDP = 6, + BPF_LINK_TYPE_PERF_EVENT = 7, + BPF_LINK_TYPE_KPROBE_MULTI = 8, + BPF_LINK_TYPE_STRUCT_OPS = 9, + BPF_LINK_TYPE_NETFILTER = 10, + BPF_LINK_TYPE_TCX = 11, + BPF_LINK_TYPE_UPROBE_MULTI = 12, + BPF_LINK_TYPE_NETKIT = 13, + __MAX_BPF_LINK_TYPE = 14, +} +pub const BPF_F_KPROBE_MULTI_RETURN: _bindgen_ty_2 = 1; +pub type _bindgen_ty_2 = ::core::ffi::c_uint; +pub const BPF_F_UPROBE_MULTI_RETURN: _bindgen_ty_3 = 1; +pub type _bindgen_ty_3 = ::core::ffi::c_uint; +pub const BPF_ANY: _bindgen_ty_4 = 0; +pub const BPF_NOEXIST: _bindgen_ty_4 = 1; +pub const BPF_EXIST: _bindgen_ty_4 = 2; +pub const BPF_F_LOCK: _bindgen_ty_4 = 4; +pub type _bindgen_ty_4 = ::core::ffi::c_uint; +pub const BPF_F_NO_PREALLOC: _bindgen_ty_5 = 1; +pub const BPF_F_NO_COMMON_LRU: _bindgen_ty_5 = 2; +pub const BPF_F_NUMA_NODE: _bindgen_ty_5 = 4; +pub const BPF_F_RDONLY: _bindgen_ty_5 = 8; +pub const BPF_F_WRONLY: _bindgen_ty_5 = 16; +pub const BPF_F_STACK_BUILD_ID: _bindgen_ty_5 = 32; +pub const BPF_F_ZERO_SEED: _bindgen_ty_5 = 64; +pub const BPF_F_RDONLY_PROG: _bindgen_ty_5 = 128; +pub const BPF_F_WRONLY_PROG: _bindgen_ty_5 = 256; +pub const BPF_F_CLONE: _bindgen_ty_5 = 512; +pub const BPF_F_MMAPABLE: _bindgen_ty_5 = 1024; +pub const BPF_F_PRESERVE_ELEMS: _bindgen_ty_5 = 2048; +pub const BPF_F_INNER_MAP: _bindgen_ty_5 = 4096; +pub const BPF_F_LINK: _bindgen_ty_5 = 8192; +pub const BPF_F_PATH_FD: _bindgen_ty_5 = 16384; +pub const BPF_F_VTYPE_BTF_OBJ_FD: _bindgen_ty_5 = 32768; +pub const BPF_F_TOKEN_FD: _bindgen_ty_5 = 65536; +pub const BPF_F_SEGV_ON_FAULT: _bindgen_ty_5 = 131072; +pub const BPF_F_NO_USER_CONV: _bindgen_ty_5 = 262144; +pub type _bindgen_ty_5 = ::core::ffi::c_uint; +#[repr(u32)] +#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] +pub enum bpf_stats_type { + BPF_STATS_RUN_TIME = 0, +} +#[repr(C)] +#[derive(Copy, Clone)] +pub union bpf_attr { + pub __bindgen_anon_1: bpf_attr__bindgen_ty_1, + pub __bindgen_anon_2: bpf_attr__bindgen_ty_2, + pub batch: bpf_attr__bindgen_ty_3, + pub __bindgen_anon_3: bpf_attr__bindgen_ty_4, + pub __bindgen_anon_4: bpf_attr__bindgen_ty_5, + pub __bindgen_anon_5: bpf_attr__bindgen_ty_6, + pub test: bpf_attr__bindgen_ty_7, + pub __bindgen_anon_6: bpf_attr__bindgen_ty_8, + pub info: bpf_attr__bindgen_ty_9, + pub query: bpf_attr__bindgen_ty_10, + pub raw_tracepoint: bpf_attr__bindgen_ty_11, + pub __bindgen_anon_7: bpf_attr__bindgen_ty_12, + pub task_fd_query: bpf_attr__bindgen_ty_13, + pub link_create: bpf_attr__bindgen_ty_14, + pub link_update: bpf_attr__bindgen_ty_15, + pub link_detach: bpf_attr__bindgen_ty_16, + pub enable_stats: bpf_attr__bindgen_ty_17, + pub iter_create: bpf_attr__bindgen_ty_18, + pub prog_bind_map: bpf_attr__bindgen_ty_19, + pub token_create: bpf_attr__bindgen_ty_20, +} +#[repr(C)] +#[derive(Debug, Copy, Clone)] +pub struct bpf_attr__bindgen_ty_1 { + pub map_type: __u32, + pub key_size: __u32, + pub value_size: __u32, + pub max_entries: __u32, + pub map_flags: __u32, + pub inner_map_fd: __u32, + pub numa_node: __u32, + pub map_name: [::core::ffi::c_char; 16usize], + pub map_ifindex: __u32, + pub btf_fd: __u32, + pub btf_key_type_id: __u32, + pub btf_value_type_id: __u32, + pub btf_vmlinux_value_type_id: __u32, + pub map_extra: __u64, + pub value_type_btf_obj_fd: __s32, + pub map_token_fd: __s32, +} +#[repr(C)] +#[derive(Copy, Clone)] +pub struct bpf_attr__bindgen_ty_2 { + pub map_fd: __u32, + pub key: __u64, + pub __bindgen_anon_1: bpf_attr__bindgen_ty_2__bindgen_ty_1, + pub flags: __u64, +} +#[repr(C)] +#[derive(Copy, Clone)] +pub union bpf_attr__bindgen_ty_2__bindgen_ty_1 { + pub value: __u64, + pub next_key: __u64, +} +#[repr(C)] +#[derive(Debug, Copy, Clone)] +pub struct bpf_attr__bindgen_ty_3 { + pub in_batch: __u64, + pub out_batch: __u64, + pub keys: __u64, + pub values: __u64, + pub count: __u32, + pub map_fd: __u32, + pub elem_flags: __u64, + pub flags: __u64, +} +#[repr(C)] +#[derive(Copy, Clone)] +pub struct bpf_attr__bindgen_ty_4 { + pub prog_type: __u32, + pub insn_cnt: __u32, + pub insns: __u64, + pub license: __u64, + pub log_level: __u32, + pub log_size: __u32, + pub log_buf: __u64, + pub kern_version: __u32, + pub prog_flags: __u32, + pub prog_name: [::core::ffi::c_char; 16usize], + pub prog_ifindex: __u32, + pub expected_attach_type: __u32, + pub prog_btf_fd: __u32, + pub func_info_rec_size: __u32, + pub func_info: __u64, + pub func_info_cnt: __u32, + pub line_info_rec_size: __u32, + pub line_info: __u64, + pub line_info_cnt: __u32, + pub attach_btf_id: __u32, + pub __bindgen_anon_1: bpf_attr__bindgen_ty_4__bindgen_ty_1, + pub core_relo_cnt: __u32, + pub fd_array: __u64, + pub core_relos: __u64, + pub core_relo_rec_size: __u32, + pub log_true_size: __u32, + pub prog_token_fd: __s32, +} +#[repr(C)] +#[derive(Copy, Clone)] +pub union bpf_attr__bindgen_ty_4__bindgen_ty_1 { + pub attach_prog_fd: __u32, + pub attach_btf_obj_fd: __u32, +} +#[repr(C)] +#[derive(Debug, Copy, Clone)] +pub struct bpf_attr__bindgen_ty_5 { + pub pathname: __u64, + pub bpf_fd: __u32, + pub file_flags: __u32, + pub path_fd: __s32, +} +#[repr(C)] +#[derive(Copy, Clone)] +pub struct bpf_attr__bindgen_ty_6 { + pub __bindgen_anon_1: bpf_attr__bindgen_ty_6__bindgen_ty_1, + pub attach_bpf_fd: __u32, + pub attach_type: __u32, + pub attach_flags: __u32, + pub replace_bpf_fd: __u32, + pub __bindgen_anon_2: bpf_attr__bindgen_ty_6__bindgen_ty_2, + pub expected_revision: __u64, +} +#[repr(C)] +#[derive(Copy, Clone)] +pub union bpf_attr__bindgen_ty_6__bindgen_ty_1 { + pub target_fd: __u32, + pub target_ifindex: __u32, +} +#[repr(C)] +#[derive(Copy, Clone)] +pub union bpf_attr__bindgen_ty_6__bindgen_ty_2 { + pub relative_fd: __u32, + pub relative_id: __u32, +} +#[repr(C)] +#[derive(Debug, Copy, Clone)] +pub struct bpf_attr__bindgen_ty_7 { + pub prog_fd: __u32, + pub retval: __u32, + pub data_size_in: __u32, + pub data_size_out: __u32, + pub data_in: __u64, + pub data_out: __u64, + pub repeat: __u32, + pub duration: __u32, + pub ctx_size_in: __u32, + pub ctx_size_out: __u32, + pub ctx_in: __u64, + pub ctx_out: __u64, + pub flags: __u32, + pub cpu: __u32, + pub batch_size: __u32, +} +#[repr(C)] +#[derive(Copy, Clone)] +pub struct bpf_attr__bindgen_ty_8 { + pub __bindgen_anon_1: bpf_attr__bindgen_ty_8__bindgen_ty_1, + pub next_id: __u32, + pub open_flags: __u32, +} +#[repr(C)] +#[derive(Copy, Clone)] +pub union bpf_attr__bindgen_ty_8__bindgen_ty_1 { + pub start_id: __u32, + pub prog_id: __u32, + pub map_id: __u32, + pub btf_id: __u32, + pub link_id: __u32, +} +#[repr(C)] +#[derive(Debug, Copy, Clone)] +pub struct bpf_attr__bindgen_ty_9 { + pub bpf_fd: __u32, + pub info_len: __u32, + pub info: __u64, +} +#[repr(C)] +#[derive(Copy, Clone)] +pub struct bpf_attr__bindgen_ty_10 { + pub __bindgen_anon_1: bpf_attr__bindgen_ty_10__bindgen_ty_1, + pub attach_type: __u32, + pub query_flags: __u32, + pub attach_flags: __u32, + pub prog_ids: __u64, + pub __bindgen_anon_2: bpf_attr__bindgen_ty_10__bindgen_ty_2, + pub _bitfield_align_1: [u8; 0], + pub _bitfield_1: __BindgenBitfieldUnit<[u8; 4usize]>, + pub prog_attach_flags: __u64, + pub link_ids: __u64, + pub link_attach_flags: __u64, + pub revision: __u64, +} +#[repr(C)] +#[derive(Copy, Clone)] +pub union bpf_attr__bindgen_ty_10__bindgen_ty_1 { + pub target_fd: __u32, + pub target_ifindex: __u32, +} +#[repr(C)] +#[derive(Copy, Clone)] +pub union bpf_attr__bindgen_ty_10__bindgen_ty_2 { + pub prog_cnt: __u32, + pub count: __u32, +} +impl bpf_attr__bindgen_ty_10 { + #[inline] + pub fn new_bitfield_1() -> __BindgenBitfieldUnit<[u8; 4usize]> { + let mut __bindgen_bitfield_unit: __BindgenBitfieldUnit<[u8; 4usize]> = Default::default(); + __bindgen_bitfield_unit + } +} +#[repr(C)] +#[derive(Debug, Copy, Clone)] +pub struct bpf_attr__bindgen_ty_11 { + pub name: __u64, + pub prog_fd: __u32, + pub _bitfield_align_1: [u8; 0], + pub _bitfield_1: __BindgenBitfieldUnit<[u8; 4usize]>, + pub cookie: __u64, +} +impl bpf_attr__bindgen_ty_11 { + #[inline] + pub fn new_bitfield_1() -> __BindgenBitfieldUnit<[u8; 4usize]> { + let mut __bindgen_bitfield_unit: __BindgenBitfieldUnit<[u8; 4usize]> = Default::default(); + __bindgen_bitfield_unit + } +} +#[repr(C)] +#[derive(Debug, Copy, Clone)] +pub struct bpf_attr__bindgen_ty_12 { + pub btf: __u64, + pub btf_log_buf: __u64, + pub btf_size: __u32, + pub btf_log_size: __u32, + pub btf_log_level: __u32, + pub btf_log_true_size: __u32, + pub btf_flags: __u32, + pub btf_token_fd: __s32, +} +#[repr(C)] +#[derive(Debug, Copy, Clone)] +pub struct bpf_attr__bindgen_ty_13 { + pub pid: __u32, + pub fd: __u32, + pub flags: __u32, + pub buf_len: __u32, + pub buf: __u64, + pub prog_id: __u32, + pub fd_type: __u32, + pub probe_offset: __u64, + pub probe_addr: __u64, +} +#[repr(C)] +#[derive(Copy, Clone)] +pub struct bpf_attr__bindgen_ty_14 { + pub __bindgen_anon_1: bpf_attr__bindgen_ty_14__bindgen_ty_1, + pub __bindgen_anon_2: bpf_attr__bindgen_ty_14__bindgen_ty_2, + pub attach_type: __u32, + pub flags: __u32, + pub __bindgen_anon_3: bpf_attr__bindgen_ty_14__bindgen_ty_3, +} +#[repr(C)] +#[derive(Copy, Clone)] +pub union bpf_attr__bindgen_ty_14__bindgen_ty_1 { + pub prog_fd: __u32, + pub map_fd: __u32, +} +#[repr(C)] +#[derive(Copy, Clone)] +pub union bpf_attr__bindgen_ty_14__bindgen_ty_2 { + pub target_fd: __u32, + pub target_ifindex: __u32, +} +#[repr(C)] +#[derive(Copy, Clone)] +pub union bpf_attr__bindgen_ty_14__bindgen_ty_3 { + pub target_btf_id: __u32, + pub __bindgen_anon_1: bpf_attr__bindgen_ty_14__bindgen_ty_3__bindgen_ty_1, + pub perf_event: bpf_attr__bindgen_ty_14__bindgen_ty_3__bindgen_ty_2, + pub kprobe_multi: bpf_attr__bindgen_ty_14__bindgen_ty_3__bindgen_ty_3, + pub tracing: bpf_attr__bindgen_ty_14__bindgen_ty_3__bindgen_ty_4, + pub netfilter: bpf_attr__bindgen_ty_14__bindgen_ty_3__bindgen_ty_5, + pub tcx: bpf_attr__bindgen_ty_14__bindgen_ty_3__bindgen_ty_6, + pub uprobe_multi: bpf_attr__bindgen_ty_14__bindgen_ty_3__bindgen_ty_7, + pub netkit: bpf_attr__bindgen_ty_14__bindgen_ty_3__bindgen_ty_8, +} +#[repr(C)] +#[derive(Debug, Copy, Clone)] +pub struct bpf_attr__bindgen_ty_14__bindgen_ty_3__bindgen_ty_1 { + pub iter_info: __u64, + pub iter_info_len: __u32, +} +#[repr(C)] +#[derive(Debug, Copy, Clone)] +pub struct bpf_attr__bindgen_ty_14__bindgen_ty_3__bindgen_ty_2 { + pub bpf_cookie: __u64, +} +#[repr(C)] +#[derive(Debug, Copy, Clone)] +pub struct bpf_attr__bindgen_ty_14__bindgen_ty_3__bindgen_ty_3 { + pub flags: __u32, + pub cnt: __u32, + pub syms: __u64, + pub addrs: __u64, + pub cookies: __u64, +} +#[repr(C)] +#[derive(Debug, Copy, Clone)] +pub struct bpf_attr__bindgen_ty_14__bindgen_ty_3__bindgen_ty_4 { + pub target_btf_id: __u32, + pub cookie: __u64, +} +#[repr(C)] +#[derive(Debug, Copy, Clone)] +pub struct bpf_attr__bindgen_ty_14__bindgen_ty_3__bindgen_ty_5 { + pub pf: __u32, + pub hooknum: __u32, + pub priority: __s32, + pub flags: __u32, +} +#[repr(C)] +#[derive(Copy, Clone)] +pub struct bpf_attr__bindgen_ty_14__bindgen_ty_3__bindgen_ty_6 { + pub __bindgen_anon_1: bpf_attr__bindgen_ty_14__bindgen_ty_3__bindgen_ty_6__bindgen_ty_1, + pub expected_revision: __u64, +} +#[repr(C)] +#[derive(Copy, Clone)] +pub union bpf_attr__bindgen_ty_14__bindgen_ty_3__bindgen_ty_6__bindgen_ty_1 { + pub relative_fd: __u32, + pub relative_id: __u32, +} +#[repr(C)] +#[derive(Debug, Copy, Clone)] +pub struct bpf_attr__bindgen_ty_14__bindgen_ty_3__bindgen_ty_7 { + pub path: __u64, + pub offsets: __u64, + pub ref_ctr_offsets: __u64, + pub cookies: __u64, + pub cnt: __u32, + pub flags: __u32, + pub pid: __u32, +} +#[repr(C)] +#[derive(Copy, Clone)] +pub struct bpf_attr__bindgen_ty_14__bindgen_ty_3__bindgen_ty_8 { + pub __bindgen_anon_1: bpf_attr__bindgen_ty_14__bindgen_ty_3__bindgen_ty_8__bindgen_ty_1, + pub expected_revision: __u64, +} +#[repr(C)] +#[derive(Copy, Clone)] +pub union bpf_attr__bindgen_ty_14__bindgen_ty_3__bindgen_ty_8__bindgen_ty_1 { + pub relative_fd: __u32, + pub relative_id: __u32, +} +#[repr(C)] +#[derive(Copy, Clone)] +pub struct bpf_attr__bindgen_ty_15 { + pub link_fd: __u32, + pub __bindgen_anon_1: bpf_attr__bindgen_ty_15__bindgen_ty_1, + pub flags: __u32, + pub __bindgen_anon_2: bpf_attr__bindgen_ty_15__bindgen_ty_2, +} +#[repr(C)] +#[derive(Copy, Clone)] +pub union bpf_attr__bindgen_ty_15__bindgen_ty_1 { + pub new_prog_fd: __u32, + pub new_map_fd: __u32, +} +#[repr(C)] +#[derive(Copy, Clone)] +pub union bpf_attr__bindgen_ty_15__bindgen_ty_2 { + pub old_prog_fd: __u32, + pub old_map_fd: __u32, +} +#[repr(C)] +#[derive(Debug, Copy, Clone)] +pub struct bpf_attr__bindgen_ty_16 { + pub link_fd: __u32, +} +#[repr(C)] +#[derive(Debug, Copy, Clone)] +pub struct bpf_attr__bindgen_ty_17 { + pub type_: __u32, +} +#[repr(C)] +#[derive(Debug, Copy, Clone)] +pub struct bpf_attr__bindgen_ty_18 { + pub link_fd: __u32, + pub flags: __u32, +} +#[repr(C)] +#[derive(Debug, Copy, Clone)] +pub struct bpf_attr__bindgen_ty_19 { + pub prog_fd: __u32, + pub map_fd: __u32, + pub flags: __u32, +} +#[repr(C)] +#[derive(Debug, Copy, Clone)] +pub struct bpf_attr__bindgen_ty_20 { + pub flags: __u32, + pub bpffs_fd: __u32, +} +pub const BPF_F_RECOMPUTE_CSUM: _bindgen_ty_6 = 1; +pub const BPF_F_INVALIDATE_HASH: _bindgen_ty_6 = 2; +pub type _bindgen_ty_6 = ::core::ffi::c_uint; +pub const BPF_F_HDR_FIELD_MASK: _bindgen_ty_7 = 15; +pub type _bindgen_ty_7 = ::core::ffi::c_uint; +pub const BPF_F_PSEUDO_HDR: _bindgen_ty_8 = 16; +pub const BPF_F_MARK_MANGLED_0: _bindgen_ty_8 = 32; +pub const BPF_F_MARK_ENFORCE: _bindgen_ty_8 = 64; +pub type _bindgen_ty_8 = ::core::ffi::c_uint; +pub const BPF_F_INGRESS: _bindgen_ty_9 = 1; +pub type _bindgen_ty_9 = ::core::ffi::c_uint; +pub const BPF_F_TUNINFO_IPV6: _bindgen_ty_10 = 1; +pub type _bindgen_ty_10 = ::core::ffi::c_uint; +pub const BPF_F_SKIP_FIELD_MASK: _bindgen_ty_11 = 255; +pub const BPF_F_USER_STACK: _bindgen_ty_11 = 256; +pub const BPF_F_FAST_STACK_CMP: _bindgen_ty_11 = 512; +pub const BPF_F_REUSE_STACKID: _bindgen_ty_11 = 1024; +pub const BPF_F_USER_BUILD_ID: _bindgen_ty_11 = 2048; +pub type _bindgen_ty_11 = ::core::ffi::c_uint; +pub const BPF_F_ZERO_CSUM_TX: _bindgen_ty_12 = 2; +pub const BPF_F_DONT_FRAGMENT: _bindgen_ty_12 = 4; +pub const BPF_F_SEQ_NUMBER: _bindgen_ty_12 = 8; +pub const BPF_F_NO_TUNNEL_KEY: _bindgen_ty_12 = 16; +pub type _bindgen_ty_12 = ::core::ffi::c_uint; +pub const BPF_F_TUNINFO_FLAGS: _bindgen_ty_13 = 16; +pub type _bindgen_ty_13 = ::core::ffi::c_uint; +pub const BPF_F_INDEX_MASK: _bindgen_ty_14 = 4294967295; +pub const BPF_F_CURRENT_CPU: _bindgen_ty_14 = 4294967295; +pub const BPF_F_CTXLEN_MASK: _bindgen_ty_14 = 4503595332403200; +pub type _bindgen_ty_14 = ::core::ffi::c_ulong; +pub const BPF_F_CURRENT_NETNS: _bindgen_ty_15 = -1; +pub type _bindgen_ty_15 = ::core::ffi::c_int; +pub const BPF_F_ADJ_ROOM_FIXED_GSO: _bindgen_ty_17 = 1; +pub const BPF_F_ADJ_ROOM_ENCAP_L3_IPV4: _bindgen_ty_17 = 2; +pub const BPF_F_ADJ_ROOM_ENCAP_L3_IPV6: _bindgen_ty_17 = 4; +pub const BPF_F_ADJ_ROOM_ENCAP_L4_GRE: _bindgen_ty_17 = 8; +pub const BPF_F_ADJ_ROOM_ENCAP_L4_UDP: _bindgen_ty_17 = 16; +pub const BPF_F_ADJ_ROOM_NO_CSUM_RESET: _bindgen_ty_17 = 32; +pub const BPF_F_ADJ_ROOM_ENCAP_L2_ETH: _bindgen_ty_17 = 64; +pub const BPF_F_ADJ_ROOM_DECAP_L3_IPV4: _bindgen_ty_17 = 128; +pub const BPF_F_ADJ_ROOM_DECAP_L3_IPV6: _bindgen_ty_17 = 256; +pub type _bindgen_ty_17 = ::core::ffi::c_uint; +pub const BPF_F_SYSCTL_BASE_NAME: _bindgen_ty_19 = 1; +pub type _bindgen_ty_19 = ::core::ffi::c_uint; +pub const BPF_F_GET_BRANCH_RECORDS_SIZE: _bindgen_ty_21 = 1; +pub type _bindgen_ty_21 = ::core::ffi::c_uint; +pub const BPF_RINGBUF_BUSY_BIT: _bindgen_ty_24 = 2147483648; +pub const BPF_RINGBUF_DISCARD_BIT: _bindgen_ty_24 = 1073741824; +pub const BPF_RINGBUF_HDR_SZ: _bindgen_ty_24 = 8; +pub type _bindgen_ty_24 = ::core::ffi::c_uint; +pub const BPF_F_BPRM_SECUREEXEC: _bindgen_ty_26 = 1; +pub type _bindgen_ty_26 = ::core::ffi::c_uint; +pub const BPF_F_BROADCAST: _bindgen_ty_27 = 8; +pub const BPF_F_EXCLUDE_INGRESS: _bindgen_ty_27 = 16; +pub type _bindgen_ty_27 = ::core::ffi::c_uint; +#[repr(C)] +#[derive(Copy, Clone)] +pub struct bpf_devmap_val { + pub ifindex: __u32, + pub bpf_prog: bpf_devmap_val__bindgen_ty_1, +} +#[repr(C)] +#[derive(Copy, Clone)] +pub union bpf_devmap_val__bindgen_ty_1 { + pub fd: ::core::ffi::c_int, + pub id: __u32, +} +#[repr(C)] +#[derive(Copy, Clone)] +pub struct bpf_cpumap_val { + pub qsize: __u32, + pub bpf_prog: bpf_cpumap_val__bindgen_ty_1, +} +#[repr(C)] +#[derive(Copy, Clone)] +pub union bpf_cpumap_val__bindgen_ty_1 { + pub fd: ::core::ffi::c_int, + pub id: __u32, +} +#[repr(C)] +#[derive(Debug, Copy, Clone)] +pub struct bpf_prog_info { + pub type_: __u32, + pub id: __u32, + pub tag: [__u8; 8usize], + pub jited_prog_len: __u32, + pub xlated_prog_len: __u32, + pub jited_prog_insns: __u64, + pub xlated_prog_insns: __u64, + pub load_time: __u64, + pub created_by_uid: __u32, + pub nr_map_ids: __u32, + pub map_ids: __u64, + pub name: [::core::ffi::c_char; 16usize], + pub ifindex: __u32, + pub _bitfield_align_1: [u8; 0], + pub _bitfield_1: __BindgenBitfieldUnit<[u8; 4usize]>, + pub netns_dev: __u64, + pub netns_ino: __u64, + pub nr_jited_ksyms: __u32, + pub nr_jited_func_lens: __u32, + pub jited_ksyms: __u64, + pub jited_func_lens: __u64, + pub btf_id: __u32, + pub func_info_rec_size: __u32, + pub func_info: __u64, + pub nr_func_info: __u32, + pub nr_line_info: __u32, + pub line_info: __u64, + pub jited_line_info: __u64, + pub nr_jited_line_info: __u32, + pub line_info_rec_size: __u32, + pub jited_line_info_rec_size: __u32, + pub nr_prog_tags: __u32, + pub prog_tags: __u64, + pub run_time_ns: __u64, + pub run_cnt: __u64, + pub recursion_misses: __u64, + pub verified_insns: __u32, + pub attach_btf_obj_id: __u32, + pub attach_btf_id: __u32, +} +impl bpf_prog_info { + #[inline] + pub fn gpl_compatible(&self) -> __u32 { + unsafe { ::core::mem::transmute(self._bitfield_1.get(0usize, 1u8) as u32) } + } + #[inline] + pub fn set_gpl_compatible(&mut self, val: __u32) { + unsafe { + let val: u32 = ::core::mem::transmute(val); + self._bitfield_1.set(0usize, 1u8, val as u64) + } + } + #[inline] + pub fn new_bitfield_1(gpl_compatible: __u32) -> __BindgenBitfieldUnit<[u8; 4usize]> { + let mut __bindgen_bitfield_unit: __BindgenBitfieldUnit<[u8; 4usize]> = Default::default(); + __bindgen_bitfield_unit.set(0usize, 1u8, { + let gpl_compatible: u32 = unsafe { ::core::mem::transmute(gpl_compatible) }; + gpl_compatible as u64 + }); + __bindgen_bitfield_unit + } +} +#[repr(C)] +#[derive(Debug, Copy, Clone)] +pub struct bpf_map_info { + pub type_: __u32, + pub id: __u32, + pub key_size: __u32, + pub value_size: __u32, + pub max_entries: __u32, + pub map_flags: __u32, + pub name: [::core::ffi::c_char; 16usize], + pub ifindex: __u32, + pub btf_vmlinux_value_type_id: __u32, + pub netns_dev: __u64, + pub netns_ino: __u64, + pub btf_id: __u32, + pub btf_key_type_id: __u32, + pub btf_value_type_id: __u32, + pub btf_vmlinux_id: __u32, + pub map_extra: __u64, +} +#[repr(C)] +#[derive(Debug, Copy, Clone)] +pub struct bpf_btf_info { + pub btf: __u64, + pub btf_size: __u32, + pub id: __u32, + pub name: __u64, + pub name_len: __u32, + pub kernel_btf: __u32, +} +#[repr(C)] +#[derive(Copy, Clone)] +pub struct bpf_link_info { + pub type_: __u32, + pub id: __u32, + pub prog_id: __u32, + pub __bindgen_anon_1: bpf_link_info__bindgen_ty_1, +} +#[repr(C)] +#[derive(Copy, Clone)] +pub union bpf_link_info__bindgen_ty_1 { + pub raw_tracepoint: bpf_link_info__bindgen_ty_1__bindgen_ty_1, + pub tracing: bpf_link_info__bindgen_ty_1__bindgen_ty_2, + pub cgroup: bpf_link_info__bindgen_ty_1__bindgen_ty_3, + pub iter: bpf_link_info__bindgen_ty_1__bindgen_ty_4, + pub netns: bpf_link_info__bindgen_ty_1__bindgen_ty_5, + pub xdp: bpf_link_info__bindgen_ty_1__bindgen_ty_6, + pub struct_ops: bpf_link_info__bindgen_ty_1__bindgen_ty_7, + pub netfilter: bpf_link_info__bindgen_ty_1__bindgen_ty_8, + pub kprobe_multi: bpf_link_info__bindgen_ty_1__bindgen_ty_9, + pub uprobe_multi: bpf_link_info__bindgen_ty_1__bindgen_ty_10, + pub perf_event: bpf_link_info__bindgen_ty_1__bindgen_ty_11, + pub tcx: bpf_link_info__bindgen_ty_1__bindgen_ty_12, + pub netkit: bpf_link_info__bindgen_ty_1__bindgen_ty_13, +} +#[repr(C)] +#[derive(Debug, Copy, Clone)] +pub struct bpf_link_info__bindgen_ty_1__bindgen_ty_1 { + pub tp_name: __u64, + pub tp_name_len: __u32, +} +#[repr(C)] +#[derive(Debug, Copy, Clone)] +pub struct bpf_link_info__bindgen_ty_1__bindgen_ty_2 { + pub attach_type: __u32, + pub target_obj_id: __u32, + pub target_btf_id: __u32, +} +#[repr(C)] +#[derive(Debug, Copy, Clone)] +pub struct bpf_link_info__bindgen_ty_1__bindgen_ty_3 { + pub cgroup_id: __u64, + pub attach_type: __u32, +} +#[repr(C)] +#[derive(Copy, Clone)] +pub struct bpf_link_info__bindgen_ty_1__bindgen_ty_4 { + pub target_name: __u64, + pub target_name_len: __u32, + pub __bindgen_anon_1: bpf_link_info__bindgen_ty_1__bindgen_ty_4__bindgen_ty_1, + pub __bindgen_anon_2: bpf_link_info__bindgen_ty_1__bindgen_ty_4__bindgen_ty_2, +} +#[repr(C)] +#[derive(Copy, Clone)] +pub union bpf_link_info__bindgen_ty_1__bindgen_ty_4__bindgen_ty_1 { + pub map: bpf_link_info__bindgen_ty_1__bindgen_ty_4__bindgen_ty_1__bindgen_ty_1, +} +#[repr(C)] +#[derive(Debug, Copy, Clone)] +pub struct bpf_link_info__bindgen_ty_1__bindgen_ty_4__bindgen_ty_1__bindgen_ty_1 { + pub map_id: __u32, +} +#[repr(C)] +#[derive(Copy, Clone)] +pub union bpf_link_info__bindgen_ty_1__bindgen_ty_4__bindgen_ty_2 { + pub cgroup: bpf_link_info__bindgen_ty_1__bindgen_ty_4__bindgen_ty_2__bindgen_ty_1, + pub task: bpf_link_info__bindgen_ty_1__bindgen_ty_4__bindgen_ty_2__bindgen_ty_2, +} +#[repr(C)] +#[derive(Debug, Copy, Clone)] +pub struct bpf_link_info__bindgen_ty_1__bindgen_ty_4__bindgen_ty_2__bindgen_ty_1 { + pub cgroup_id: __u64, + pub order: __u32, +} +#[repr(C)] +#[derive(Debug, Copy, Clone)] +pub struct bpf_link_info__bindgen_ty_1__bindgen_ty_4__bindgen_ty_2__bindgen_ty_2 { + pub tid: __u32, + pub pid: __u32, +} +#[repr(C)] +#[derive(Debug, Copy, Clone)] +pub struct bpf_link_info__bindgen_ty_1__bindgen_ty_5 { + pub netns_ino: __u32, + pub attach_type: __u32, +} +#[repr(C)] +#[derive(Debug, Copy, Clone)] +pub struct bpf_link_info__bindgen_ty_1__bindgen_ty_6 { + pub ifindex: __u32, +} +#[repr(C)] +#[derive(Debug, Copy, Clone)] +pub struct bpf_link_info__bindgen_ty_1__bindgen_ty_7 { + pub map_id: __u32, +} +#[repr(C)] +#[derive(Debug, Copy, Clone)] +pub struct bpf_link_info__bindgen_ty_1__bindgen_ty_8 { + pub pf: __u32, + pub hooknum: __u32, + pub priority: __s32, + pub flags: __u32, +} +#[repr(C)] +#[derive(Debug, Copy, Clone)] +pub struct bpf_link_info__bindgen_ty_1__bindgen_ty_9 { + pub addrs: __u64, + pub count: __u32, + pub flags: __u32, + pub missed: __u64, + pub cookies: __u64, +} +#[repr(C)] +#[derive(Debug, Copy, Clone)] +pub struct bpf_link_info__bindgen_ty_1__bindgen_ty_10 { + pub path: __u64, + pub offsets: __u64, + pub ref_ctr_offsets: __u64, + pub cookies: __u64, + pub path_size: __u32, + pub count: __u32, + pub flags: __u32, + pub pid: __u32, +} +#[repr(C)] +#[derive(Copy, Clone)] +pub struct bpf_link_info__bindgen_ty_1__bindgen_ty_11 { + pub type_: __u32, + pub _bitfield_align_1: [u8; 0], + pub _bitfield_1: __BindgenBitfieldUnit<[u8; 4usize]>, + pub __bindgen_anon_1: bpf_link_info__bindgen_ty_1__bindgen_ty_11__bindgen_ty_1, +} +#[repr(C)] +#[derive(Copy, Clone)] +pub union bpf_link_info__bindgen_ty_1__bindgen_ty_11__bindgen_ty_1 { + pub uprobe: bpf_link_info__bindgen_ty_1__bindgen_ty_11__bindgen_ty_1__bindgen_ty_1, + pub kprobe: bpf_link_info__bindgen_ty_1__bindgen_ty_11__bindgen_ty_1__bindgen_ty_2, + pub tracepoint: bpf_link_info__bindgen_ty_1__bindgen_ty_11__bindgen_ty_1__bindgen_ty_3, + pub event: bpf_link_info__bindgen_ty_1__bindgen_ty_11__bindgen_ty_1__bindgen_ty_4, +} +#[repr(C)] +#[derive(Debug, Copy, Clone)] +pub struct bpf_link_info__bindgen_ty_1__bindgen_ty_11__bindgen_ty_1__bindgen_ty_1 { + pub file_name: __u64, + pub name_len: __u32, + pub offset: __u32, + pub cookie: __u64, +} +#[repr(C)] +#[derive(Debug, Copy, Clone)] +pub struct bpf_link_info__bindgen_ty_1__bindgen_ty_11__bindgen_ty_1__bindgen_ty_2 { + pub func_name: __u64, + pub name_len: __u32, + pub offset: __u32, + pub addr: __u64, + pub missed: __u64, + pub cookie: __u64, +} +#[repr(C)] +#[derive(Debug, Copy, Clone)] +pub struct bpf_link_info__bindgen_ty_1__bindgen_ty_11__bindgen_ty_1__bindgen_ty_3 { + pub tp_name: __u64, + pub name_len: __u32, + pub _bitfield_align_1: [u8; 0], + pub _bitfield_1: __BindgenBitfieldUnit<[u8; 4usize]>, + pub cookie: __u64, +} +impl bpf_link_info__bindgen_ty_1__bindgen_ty_11__bindgen_ty_1__bindgen_ty_3 { + #[inline] + pub fn new_bitfield_1() -> __BindgenBitfieldUnit<[u8; 4usize]> { + let mut __bindgen_bitfield_unit: __BindgenBitfieldUnit<[u8; 4usize]> = Default::default(); + __bindgen_bitfield_unit + } +} +#[repr(C)] +#[derive(Debug, Copy, Clone)] +pub struct bpf_link_info__bindgen_ty_1__bindgen_ty_11__bindgen_ty_1__bindgen_ty_4 { + pub config: __u64, + pub type_: __u32, + pub _bitfield_align_1: [u8; 0], + pub _bitfield_1: __BindgenBitfieldUnit<[u8; 4usize]>, + pub cookie: __u64, +} +impl bpf_link_info__bindgen_ty_1__bindgen_ty_11__bindgen_ty_1__bindgen_ty_4 { + #[inline] + pub fn new_bitfield_1() -> __BindgenBitfieldUnit<[u8; 4usize]> { + let mut __bindgen_bitfield_unit: __BindgenBitfieldUnit<[u8; 4usize]> = Default::default(); + __bindgen_bitfield_unit + } +} +impl bpf_link_info__bindgen_ty_1__bindgen_ty_11 { + #[inline] + pub fn new_bitfield_1() -> __BindgenBitfieldUnit<[u8; 4usize]> { + let mut __bindgen_bitfield_unit: __BindgenBitfieldUnit<[u8; 4usize]> = Default::default(); + __bindgen_bitfield_unit + } +} +#[repr(C)] +#[derive(Debug, Copy, Clone)] +pub struct bpf_link_info__bindgen_ty_1__bindgen_ty_12 { + pub ifindex: __u32, + pub attach_type: __u32, +} +#[repr(C)] +#[derive(Debug, Copy, Clone)] +pub struct bpf_link_info__bindgen_ty_1__bindgen_ty_13 { + pub ifindex: __u32, + pub attach_type: __u32, +} +#[repr(C)] +#[derive(Debug, Copy, Clone)] +pub struct bpf_func_info { + pub insn_off: __u32, + pub type_id: __u32, +} +#[repr(C)] +#[derive(Debug, Copy, Clone)] +pub struct bpf_line_info { + pub insn_off: __u32, + pub file_name_off: __u32, + pub line_off: __u32, + pub line_col: __u32, +} +pub const BPF_F_TIMER_ABS: _bindgen_ty_41 = 1; +pub const BPF_F_TIMER_CPU_PIN: _bindgen_ty_41 = 2; +pub type _bindgen_ty_41 = ::core::ffi::c_uint; +#[repr(C)] +#[derive(Debug, Copy, Clone)] +pub struct btf_header { + pub magic: __u16, + pub version: __u8, + pub flags: __u8, + pub hdr_len: __u32, + pub type_off: __u32, + pub type_len: __u32, + pub str_off: __u32, + pub str_len: __u32, +} +#[repr(C)] +#[derive(Copy, Clone)] +pub struct btf_type { + pub name_off: __u32, + pub info: __u32, + pub __bindgen_anon_1: btf_type__bindgen_ty_1, +} +#[repr(C)] +#[derive(Copy, Clone)] +pub union btf_type__bindgen_ty_1 { + pub size: __u32, + pub type_: __u32, +} +pub const BTF_KIND_UNKN: _bindgen_ty_42 = 0; +pub const BTF_KIND_INT: _bindgen_ty_42 = 1; +pub const BTF_KIND_PTR: _bindgen_ty_42 = 2; +pub const BTF_KIND_ARRAY: _bindgen_ty_42 = 3; +pub const BTF_KIND_STRUCT: _bindgen_ty_42 = 4; +pub const BTF_KIND_UNION: _bindgen_ty_42 = 5; +pub const BTF_KIND_ENUM: _bindgen_ty_42 = 6; +pub const BTF_KIND_FWD: _bindgen_ty_42 = 7; +pub const BTF_KIND_TYPEDEF: _bindgen_ty_42 = 8; +pub const BTF_KIND_VOLATILE: _bindgen_ty_42 = 9; +pub const BTF_KIND_CONST: _bindgen_ty_42 = 10; +pub const BTF_KIND_RESTRICT: _bindgen_ty_42 = 11; +pub const BTF_KIND_FUNC: _bindgen_ty_42 = 12; +pub const BTF_KIND_FUNC_PROTO: _bindgen_ty_42 = 13; +pub const BTF_KIND_VAR: _bindgen_ty_42 = 14; +pub const BTF_KIND_DATASEC: _bindgen_ty_42 = 15; +pub const BTF_KIND_FLOAT: _bindgen_ty_42 = 16; +pub const BTF_KIND_DECL_TAG: _bindgen_ty_42 = 17; +pub const BTF_KIND_TYPE_TAG: _bindgen_ty_42 = 18; +pub const BTF_KIND_ENUM64: _bindgen_ty_42 = 19; +pub const NR_BTF_KINDS: _bindgen_ty_42 = 20; +pub const BTF_KIND_MAX: _bindgen_ty_42 = 19; +pub type _bindgen_ty_42 = ::core::ffi::c_uint; +#[repr(C)] +#[derive(Debug, Copy, Clone)] +pub struct btf_enum { + pub name_off: __u32, + pub val: __s32, +} +#[repr(C)] +#[derive(Debug, Copy, Clone)] +pub struct btf_array { + pub type_: __u32, + pub index_type: __u32, + pub nelems: __u32, +} +#[repr(C)] +#[derive(Debug, Copy, Clone)] +pub struct btf_member { + pub name_off: __u32, + pub type_: __u32, + pub offset: __u32, +} +#[repr(C)] +#[derive(Debug, Copy, Clone)] +pub struct btf_param { + pub name_off: __u32, + pub type_: __u32, +} +pub const BTF_VAR_STATIC: _bindgen_ty_43 = 0; +pub const BTF_VAR_GLOBAL_ALLOCATED: _bindgen_ty_43 = 1; +pub const BTF_VAR_GLOBAL_EXTERN: _bindgen_ty_43 = 2; +pub type _bindgen_ty_43 = ::core::ffi::c_uint; +#[repr(u32)] +#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] +pub enum btf_func_linkage { + BTF_FUNC_STATIC = 0, + BTF_FUNC_GLOBAL = 1, + BTF_FUNC_EXTERN = 2, +} +#[repr(C)] +#[derive(Debug, Copy, Clone)] +pub struct btf_var { + pub linkage: __u32, +} +#[repr(C)] +#[derive(Debug, Copy, Clone)] +pub struct btf_var_secinfo { + pub type_: __u32, + pub offset: __u32, + pub size: __u32, +} +#[repr(C)] +#[derive(Debug, Copy, Clone)] +pub struct btf_decl_tag { + pub component_idx: __s32, +} +pub const IFLA_XDP_UNSPEC: _bindgen_ty_92 = 0; +pub const IFLA_XDP_FD: _bindgen_ty_92 = 1; +pub const IFLA_XDP_ATTACHED: _bindgen_ty_92 = 2; +pub const IFLA_XDP_FLAGS: _bindgen_ty_92 = 3; +pub const IFLA_XDP_PROG_ID: _bindgen_ty_92 = 4; +pub const IFLA_XDP_DRV_PROG_ID: _bindgen_ty_92 = 5; +pub const IFLA_XDP_SKB_PROG_ID: _bindgen_ty_92 = 6; +pub const IFLA_XDP_HW_PROG_ID: _bindgen_ty_92 = 7; +pub const IFLA_XDP_EXPECTED_FD: _bindgen_ty_92 = 8; +pub const __IFLA_XDP_MAX: _bindgen_ty_92 = 9; +pub type _bindgen_ty_92 = ::core::ffi::c_uint; +#[repr(u32)] +#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq, FromPrimitive)] +pub enum perf_type_id { + PERF_TYPE_HARDWARE = 0, + PERF_TYPE_SOFTWARE = 1, + PERF_TYPE_TRACEPOINT = 2, + PERF_TYPE_HW_CACHE = 3, + PERF_TYPE_RAW = 4, + PERF_TYPE_BREAKPOINT = 5, + PERF_TYPE_MAX = 6, +} +#[repr(u32)] +#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] +pub enum perf_hw_id { + PERF_COUNT_HW_CPU_CYCLES = 0, + PERF_COUNT_HW_INSTRUCTIONS = 1, + PERF_COUNT_HW_CACHE_REFERENCES = 2, + PERF_COUNT_HW_CACHE_MISSES = 3, + PERF_COUNT_HW_BRANCH_INSTRUCTIONS = 4, + PERF_COUNT_HW_BRANCH_MISSES = 5, + PERF_COUNT_HW_BUS_CYCLES = 6, + PERF_COUNT_HW_STALLED_CYCLES_FRONTEND = 7, + PERF_COUNT_HW_STALLED_CYCLES_BACKEND = 8, + PERF_COUNT_HW_REF_CPU_CYCLES = 9, + PERF_COUNT_HW_MAX = 10, +} +#[repr(u32)] +#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] +pub enum perf_hw_cache_id { + PERF_COUNT_HW_CACHE_L1D = 0, + PERF_COUNT_HW_CACHE_L1I = 1, + PERF_COUNT_HW_CACHE_LL = 2, + PERF_COUNT_HW_CACHE_DTLB = 3, + PERF_COUNT_HW_CACHE_ITLB = 4, + PERF_COUNT_HW_CACHE_BPU = 5, + PERF_COUNT_HW_CACHE_NODE = 6, + PERF_COUNT_HW_CACHE_MAX = 7, +} +#[repr(u32)] +#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] +pub enum perf_hw_cache_op_id { + PERF_COUNT_HW_CACHE_OP_READ = 0, + PERF_COUNT_HW_CACHE_OP_WRITE = 1, + PERF_COUNT_HW_CACHE_OP_PREFETCH = 2, + PERF_COUNT_HW_CACHE_OP_MAX = 3, +} +#[repr(u32)] +#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] +pub enum perf_hw_cache_op_result_id { + PERF_COUNT_HW_CACHE_RESULT_ACCESS = 0, + PERF_COUNT_HW_CACHE_RESULT_MISS = 1, + PERF_COUNT_HW_CACHE_RESULT_MAX = 2, +} +#[repr(u32)] +#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq, FromPrimitive)] +pub enum perf_sw_ids { + PERF_COUNT_SW_CPU_CLOCK = 0, + PERF_COUNT_SW_TASK_CLOCK = 1, + PERF_COUNT_SW_PAGE_FAULTS = 2, + PERF_COUNT_SW_CONTEXT_SWITCHES = 3, + PERF_COUNT_SW_CPU_MIGRATIONS = 4, + PERF_COUNT_SW_PAGE_FAULTS_MIN = 5, + PERF_COUNT_SW_PAGE_FAULTS_MAJ = 6, + PERF_COUNT_SW_ALIGNMENT_FAULTS = 7, + PERF_COUNT_SW_EMULATION_FAULTS = 8, + PERF_COUNT_SW_DUMMY = 9, + PERF_COUNT_SW_BPF_OUTPUT = 10, + PERF_COUNT_SW_CGROUP_SWITCHES = 11, + PERF_COUNT_SW_MAX = 12, +} +#[repr(u32)] +#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq, FromPrimitive)] +pub enum perf_event_sample_format { + PERF_SAMPLE_IP = 1, + PERF_SAMPLE_TID = 2, + PERF_SAMPLE_TIME = 4, + PERF_SAMPLE_ADDR = 8, + PERF_SAMPLE_READ = 16, + PERF_SAMPLE_CALLCHAIN = 32, + PERF_SAMPLE_ID = 64, + PERF_SAMPLE_CPU = 128, + PERF_SAMPLE_PERIOD = 256, + PERF_SAMPLE_STREAM_ID = 512, + PERF_SAMPLE_RAW = 1024, + PERF_SAMPLE_BRANCH_STACK = 2048, + PERF_SAMPLE_REGS_USER = 4096, + PERF_SAMPLE_STACK_USER = 8192, + PERF_SAMPLE_WEIGHT = 16384, + PERF_SAMPLE_DATA_SRC = 32768, + PERF_SAMPLE_IDENTIFIER = 65536, + PERF_SAMPLE_TRANSACTION = 131072, + PERF_SAMPLE_REGS_INTR = 262144, + PERF_SAMPLE_PHYS_ADDR = 524288, + PERF_SAMPLE_AUX = 1048576, + PERF_SAMPLE_CGROUP = 2097152, + PERF_SAMPLE_DATA_PAGE_SIZE = 4194304, + PERF_SAMPLE_CODE_PAGE_SIZE = 8388608, + PERF_SAMPLE_WEIGHT_STRUCT = 16777216, + PERF_SAMPLE_MAX = 33554432, +} +#[repr(C)] +#[derive(Copy, Clone)] +pub struct perf_event_attr { + pub type_: __u32, + pub size: __u32, + pub config: __u64, + pub __bindgen_anon_1: perf_event_attr__bindgen_ty_1, + pub sample_type: __u64, + pub read_format: __u64, + pub _bitfield_align_1: [u32; 0], + pub _bitfield_1: __BindgenBitfieldUnit<[u8; 8usize]>, + pub __bindgen_anon_2: perf_event_attr__bindgen_ty_2, + pub bp_type: __u32, + pub __bindgen_anon_3: perf_event_attr__bindgen_ty_3, + pub __bindgen_anon_4: perf_event_attr__bindgen_ty_4, + pub branch_sample_type: __u64, + pub sample_regs_user: __u64, + pub sample_stack_user: __u32, + pub clockid: __s32, + pub sample_regs_intr: __u64, + pub aux_watermark: __u32, + pub sample_max_stack: __u16, + pub __reserved_2: __u16, + pub aux_sample_size: __u32, + pub __reserved_3: __u32, + pub sig_data: __u64, + pub config3: __u64, +} +#[repr(C)] +#[derive(Copy, Clone)] +pub union perf_event_attr__bindgen_ty_1 { + pub sample_period: __u64, + pub sample_freq: __u64, +} +#[repr(C)] +#[derive(Copy, Clone)] +pub union perf_event_attr__bindgen_ty_2 { + pub wakeup_events: __u32, + pub wakeup_watermark: __u32, +} +#[repr(C)] +#[derive(Copy, Clone)] +pub union perf_event_attr__bindgen_ty_3 { + pub bp_addr: __u64, + pub kprobe_func: __u64, + pub uprobe_path: __u64, + pub config1: __u64, +} +#[repr(C)] +#[derive(Copy, Clone)] +pub union perf_event_attr__bindgen_ty_4 { + pub bp_len: __u64, + pub kprobe_addr: __u64, + pub probe_offset: __u64, + pub config2: __u64, +} +impl perf_event_attr { + #[inline] + pub fn disabled(&self) -> __u64 { + unsafe { ::core::mem::transmute(self._bitfield_1.get(0usize, 1u8) as u64) } + } + #[inline] + pub fn set_disabled(&mut self, val: __u64) { + unsafe { + let val: u64 = ::core::mem::transmute(val); + self._bitfield_1.set(0usize, 1u8, val as u64) + } + } + #[inline] + pub fn inherit(&self) -> __u64 { + unsafe { ::core::mem::transmute(self._bitfield_1.get(1usize, 1u8) as u64) } + } + #[inline] + pub fn set_inherit(&mut self, val: __u64) { + unsafe { + let val: u64 = ::core::mem::transmute(val); + self._bitfield_1.set(1usize, 1u8, val as u64) + } + } + #[inline] + pub fn pinned(&self) -> __u64 { + unsafe { ::core::mem::transmute(self._bitfield_1.get(2usize, 1u8) as u64) } + } + #[inline] + pub fn set_pinned(&mut self, val: __u64) { + unsafe { + let val: u64 = ::core::mem::transmute(val); + self._bitfield_1.set(2usize, 1u8, val as u64) + } + } + #[inline] + pub fn exclusive(&self) -> __u64 { + unsafe { ::core::mem::transmute(self._bitfield_1.get(3usize, 1u8) as u64) } + } + #[inline] + pub fn set_exclusive(&mut self, val: __u64) { + unsafe { + let val: u64 = ::core::mem::transmute(val); + self._bitfield_1.set(3usize, 1u8, val as u64) + } + } + #[inline] + pub fn exclude_user(&self) -> __u64 { + unsafe { ::core::mem::transmute(self._bitfield_1.get(4usize, 1u8) as u64) } + } + #[inline] + pub fn set_exclude_user(&mut self, val: __u64) { + unsafe { + let val: u64 = ::core::mem::transmute(val); + self._bitfield_1.set(4usize, 1u8, val as u64) + } + } + #[inline] + pub fn exclude_kernel(&self) -> __u64 { + unsafe { ::core::mem::transmute(self._bitfield_1.get(5usize, 1u8) as u64) } + } + #[inline] + pub fn set_exclude_kernel(&mut self, val: __u64) { + unsafe { + let val: u64 = ::core::mem::transmute(val); + self._bitfield_1.set(5usize, 1u8, val as u64) + } + } + #[inline] + pub fn exclude_hv(&self) -> __u64 { + unsafe { ::core::mem::transmute(self._bitfield_1.get(6usize, 1u8) as u64) } + } + #[inline] + pub fn set_exclude_hv(&mut self, val: __u64) { + unsafe { + let val: u64 = ::core::mem::transmute(val); + self._bitfield_1.set(6usize, 1u8, val as u64) + } + } + #[inline] + pub fn exclude_idle(&self) -> __u64 { + unsafe { ::core::mem::transmute(self._bitfield_1.get(7usize, 1u8) as u64) } + } + #[inline] + pub fn set_exclude_idle(&mut self, val: __u64) { + unsafe { + let val: u64 = ::core::mem::transmute(val); + self._bitfield_1.set(7usize, 1u8, val as u64) + } + } + #[inline] + pub fn mmap(&self) -> __u64 { + unsafe { ::core::mem::transmute(self._bitfield_1.get(8usize, 1u8) as u64) } + } + #[inline] + pub fn set_mmap(&mut self, val: __u64) { + unsafe { + let val: u64 = ::core::mem::transmute(val); + self._bitfield_1.set(8usize, 1u8, val as u64) + } + } + #[inline] + pub fn comm(&self) -> __u64 { + unsafe { ::core::mem::transmute(self._bitfield_1.get(9usize, 1u8) as u64) } + } + #[inline] + pub fn set_comm(&mut self, val: __u64) { + unsafe { + let val: u64 = ::core::mem::transmute(val); + self._bitfield_1.set(9usize, 1u8, val as u64) + } + } + #[inline] + pub fn freq(&self) -> __u64 { + unsafe { ::core::mem::transmute(self._bitfield_1.get(10usize, 1u8) as u64) } + } + #[inline] + pub fn set_freq(&mut self, val: __u64) { + unsafe { + let val: u64 = ::core::mem::transmute(val); + self._bitfield_1.set(10usize, 1u8, val as u64) + } + } + #[inline] + pub fn inherit_stat(&self) -> __u64 { + unsafe { ::core::mem::transmute(self._bitfield_1.get(11usize, 1u8) as u64) } + } + #[inline] + pub fn set_inherit_stat(&mut self, val: __u64) { + unsafe { + let val: u64 = ::core::mem::transmute(val); + self._bitfield_1.set(11usize, 1u8, val as u64) + } + } + #[inline] + pub fn enable_on_exec(&self) -> __u64 { + unsafe { ::core::mem::transmute(self._bitfield_1.get(12usize, 1u8) as u64) } + } + #[inline] + pub fn set_enable_on_exec(&mut self, val: __u64) { + unsafe { + let val: u64 = ::core::mem::transmute(val); + self._bitfield_1.set(12usize, 1u8, val as u64) + } + } + #[inline] + pub fn task(&self) -> __u64 { + unsafe { ::core::mem::transmute(self._bitfield_1.get(13usize, 1u8) as u64) } + } + #[inline] + pub fn set_task(&mut self, val: __u64) { + unsafe { + let val: u64 = ::core::mem::transmute(val); + self._bitfield_1.set(13usize, 1u8, val as u64) + } + } + #[inline] + pub fn watermark(&self) -> __u64 { + unsafe { ::core::mem::transmute(self._bitfield_1.get(14usize, 1u8) as u64) } + } + #[inline] + pub fn set_watermark(&mut self, val: __u64) { + unsafe { + let val: u64 = ::core::mem::transmute(val); + self._bitfield_1.set(14usize, 1u8, val as u64) + } + } + #[inline] + pub fn precise_ip(&self) -> __u64 { + unsafe { ::core::mem::transmute(self._bitfield_1.get(15usize, 2u8) as u64) } + } + #[inline] + pub fn set_precise_ip(&mut self, val: __u64) { + unsafe { + let val: u64 = ::core::mem::transmute(val); + self._bitfield_1.set(15usize, 2u8, val as u64) + } + } + #[inline] + pub fn mmap_data(&self) -> __u64 { + unsafe { ::core::mem::transmute(self._bitfield_1.get(17usize, 1u8) as u64) } + } + #[inline] + pub fn set_mmap_data(&mut self, val: __u64) { + unsafe { + let val: u64 = ::core::mem::transmute(val); + self._bitfield_1.set(17usize, 1u8, val as u64) + } + } + #[inline] + pub fn sample_id_all(&self) -> __u64 { + unsafe { ::core::mem::transmute(self._bitfield_1.get(18usize, 1u8) as u64) } + } + #[inline] + pub fn set_sample_id_all(&mut self, val: __u64) { + unsafe { + let val: u64 = ::core::mem::transmute(val); + self._bitfield_1.set(18usize, 1u8, val as u64) + } + } + #[inline] + pub fn exclude_host(&self) -> __u64 { + unsafe { ::core::mem::transmute(self._bitfield_1.get(19usize, 1u8) as u64) } + } + #[inline] + pub fn set_exclude_host(&mut self, val: __u64) { + unsafe { + let val: u64 = ::core::mem::transmute(val); + self._bitfield_1.set(19usize, 1u8, val as u64) + } + } + #[inline] + pub fn exclude_guest(&self) -> __u64 { + unsafe { ::core::mem::transmute(self._bitfield_1.get(20usize, 1u8) as u64) } + } + #[inline] + pub fn set_exclude_guest(&mut self, val: __u64) { + unsafe { + let val: u64 = ::core::mem::transmute(val); + self._bitfield_1.set(20usize, 1u8, val as u64) + } + } + #[inline] + pub fn exclude_callchain_kernel(&self) -> __u64 { + unsafe { ::core::mem::transmute(self._bitfield_1.get(21usize, 1u8) as u64) } + } + #[inline] + pub fn set_exclude_callchain_kernel(&mut self, val: __u64) { + unsafe { + let val: u64 = ::core::mem::transmute(val); + self._bitfield_1.set(21usize, 1u8, val as u64) + } + } + #[inline] + pub fn exclude_callchain_user(&self) -> __u64 { + unsafe { ::core::mem::transmute(self._bitfield_1.get(22usize, 1u8) as u64) } + } + #[inline] + pub fn set_exclude_callchain_user(&mut self, val: __u64) { + unsafe { + let val: u64 = ::core::mem::transmute(val); + self._bitfield_1.set(22usize, 1u8, val as u64) + } + } + #[inline] + pub fn mmap2(&self) -> __u64 { + unsafe { ::core::mem::transmute(self._bitfield_1.get(23usize, 1u8) as u64) } + } + #[inline] + pub fn set_mmap2(&mut self, val: __u64) { + unsafe { + let val: u64 = ::core::mem::transmute(val); + self._bitfield_1.set(23usize, 1u8, val as u64) + } + } + #[inline] + pub fn comm_exec(&self) -> __u64 { + unsafe { ::core::mem::transmute(self._bitfield_1.get(24usize, 1u8) as u64) } + } + #[inline] + pub fn set_comm_exec(&mut self, val: __u64) { + unsafe { + let val: u64 = ::core::mem::transmute(val); + self._bitfield_1.set(24usize, 1u8, val as u64) + } + } + #[inline] + pub fn use_clockid(&self) -> __u64 { + unsafe { ::core::mem::transmute(self._bitfield_1.get(25usize, 1u8) as u64) } + } + #[inline] + pub fn set_use_clockid(&mut self, val: __u64) { + unsafe { + let val: u64 = ::core::mem::transmute(val); + self._bitfield_1.set(25usize, 1u8, val as u64) + } + } + #[inline] + pub fn context_switch(&self) -> __u64 { + unsafe { ::core::mem::transmute(self._bitfield_1.get(26usize, 1u8) as u64) } + } + #[inline] + pub fn set_context_switch(&mut self, val: __u64) { + unsafe { + let val: u64 = ::core::mem::transmute(val); + self._bitfield_1.set(26usize, 1u8, val as u64) + } + } + #[inline] + pub fn write_backward(&self) -> __u64 { + unsafe { ::core::mem::transmute(self._bitfield_1.get(27usize, 1u8) as u64) } + } + #[inline] + pub fn set_write_backward(&mut self, val: __u64) { + unsafe { + let val: u64 = ::core::mem::transmute(val); + self._bitfield_1.set(27usize, 1u8, val as u64) + } + } + #[inline] + pub fn namespaces(&self) -> __u64 { + unsafe { ::core::mem::transmute(self._bitfield_1.get(28usize, 1u8) as u64) } + } + #[inline] + pub fn set_namespaces(&mut self, val: __u64) { + unsafe { + let val: u64 = ::core::mem::transmute(val); + self._bitfield_1.set(28usize, 1u8, val as u64) + } + } + #[inline] + pub fn ksymbol(&self) -> __u64 { + unsafe { ::core::mem::transmute(self._bitfield_1.get(29usize, 1u8) as u64) } + } + #[inline] + pub fn set_ksymbol(&mut self, val: __u64) { + unsafe { + let val: u64 = ::core::mem::transmute(val); + self._bitfield_1.set(29usize, 1u8, val as u64) + } + } + #[inline] + pub fn bpf_event(&self) -> __u64 { + unsafe { ::core::mem::transmute(self._bitfield_1.get(30usize, 1u8) as u64) } + } + #[inline] + pub fn set_bpf_event(&mut self, val: __u64) { + unsafe { + let val: u64 = ::core::mem::transmute(val); + self._bitfield_1.set(30usize, 1u8, val as u64) + } + } + #[inline] + pub fn aux_output(&self) -> __u64 { + unsafe { ::core::mem::transmute(self._bitfield_1.get(31usize, 1u8) as u64) } + } + #[inline] + pub fn set_aux_output(&mut self, val: __u64) { + unsafe { + let val: u64 = ::core::mem::transmute(val); + self._bitfield_1.set(31usize, 1u8, val as u64) + } + } + #[inline] + pub fn cgroup(&self) -> __u64 { + unsafe { ::core::mem::transmute(self._bitfield_1.get(32usize, 1u8) as u64) } + } + #[inline] + pub fn set_cgroup(&mut self, val: __u64) { + unsafe { + let val: u64 = ::core::mem::transmute(val); + self._bitfield_1.set(32usize, 1u8, val as u64) + } + } + #[inline] + pub fn text_poke(&self) -> __u64 { + unsafe { ::core::mem::transmute(self._bitfield_1.get(33usize, 1u8) as u64) } + } + #[inline] + pub fn set_text_poke(&mut self, val: __u64) { + unsafe { + let val: u64 = ::core::mem::transmute(val); + self._bitfield_1.set(33usize, 1u8, val as u64) + } + } + #[inline] + pub fn build_id(&self) -> __u64 { + unsafe { ::core::mem::transmute(self._bitfield_1.get(34usize, 1u8) as u64) } + } + #[inline] + pub fn set_build_id(&mut self, val: __u64) { + unsafe { + let val: u64 = ::core::mem::transmute(val); + self._bitfield_1.set(34usize, 1u8, val as u64) + } + } + #[inline] + pub fn inherit_thread(&self) -> __u64 { + unsafe { ::core::mem::transmute(self._bitfield_1.get(35usize, 1u8) as u64) } + } + #[inline] + pub fn set_inherit_thread(&mut self, val: __u64) { + unsafe { + let val: u64 = ::core::mem::transmute(val); + self._bitfield_1.set(35usize, 1u8, val as u64) + } + } + #[inline] + pub fn remove_on_exec(&self) -> __u64 { + unsafe { ::core::mem::transmute(self._bitfield_1.get(36usize, 1u8) as u64) } + } + #[inline] + pub fn set_remove_on_exec(&mut self, val: __u64) { + unsafe { + let val: u64 = ::core::mem::transmute(val); + self._bitfield_1.set(36usize, 1u8, val as u64) + } + } + #[inline] + pub fn sigtrap(&self) -> __u64 { + unsafe { ::core::mem::transmute(self._bitfield_1.get(37usize, 1u8) as u64) } + } + #[inline] + pub fn set_sigtrap(&mut self, val: __u64) { + unsafe { + let val: u64 = ::core::mem::transmute(val); + self._bitfield_1.set(37usize, 1u8, val as u64) + } + } + #[inline] + pub fn __reserved_1(&self) -> __u64 { + unsafe { ::core::mem::transmute(self._bitfield_1.get(38usize, 26u8) as u64) } + } + #[inline] + pub fn set___reserved_1(&mut self, val: __u64) { + unsafe { + let val: u64 = ::core::mem::transmute(val); + self._bitfield_1.set(38usize, 26u8, val as u64) + } + } + #[inline] + pub fn new_bitfield_1( + disabled: __u64, + inherit: __u64, + pinned: __u64, + exclusive: __u64, + exclude_user: __u64, + exclude_kernel: __u64, + exclude_hv: __u64, + exclude_idle: __u64, + mmap: __u64, + comm: __u64, + freq: __u64, + inherit_stat: __u64, + enable_on_exec: __u64, + task: __u64, + watermark: __u64, + precise_ip: __u64, + mmap_data: __u64, + sample_id_all: __u64, + exclude_host: __u64, + exclude_guest: __u64, + exclude_callchain_kernel: __u64, + exclude_callchain_user: __u64, + mmap2: __u64, + comm_exec: __u64, + use_clockid: __u64, + context_switch: __u64, + write_backward: __u64, + namespaces: __u64, + ksymbol: __u64, + bpf_event: __u64, + aux_output: __u64, + cgroup: __u64, + text_poke: __u64, + build_id: __u64, + inherit_thread: __u64, + remove_on_exec: __u64, + sigtrap: __u64, + __reserved_1: __u64, + ) -> __BindgenBitfieldUnit<[u8; 8usize]> { + let mut __bindgen_bitfield_unit: __BindgenBitfieldUnit<[u8; 8usize]> = Default::default(); + __bindgen_bitfield_unit.set(0usize, 1u8, { + let disabled: u64 = unsafe { ::core::mem::transmute(disabled) }; + disabled as u64 + }); + __bindgen_bitfield_unit.set(1usize, 1u8, { + let inherit: u64 = unsafe { ::core::mem::transmute(inherit) }; + inherit as u64 + }); + __bindgen_bitfield_unit.set(2usize, 1u8, { + let pinned: u64 = unsafe { ::core::mem::transmute(pinned) }; + pinned as u64 + }); + __bindgen_bitfield_unit.set(3usize, 1u8, { + let exclusive: u64 = unsafe { ::core::mem::transmute(exclusive) }; + exclusive as u64 + }); + __bindgen_bitfield_unit.set(4usize, 1u8, { + let exclude_user: u64 = unsafe { ::core::mem::transmute(exclude_user) }; + exclude_user as u64 + }); + __bindgen_bitfield_unit.set(5usize, 1u8, { + let exclude_kernel: u64 = unsafe { ::core::mem::transmute(exclude_kernel) }; + exclude_kernel as u64 + }); + __bindgen_bitfield_unit.set(6usize, 1u8, { + let exclude_hv: u64 = unsafe { ::core::mem::transmute(exclude_hv) }; + exclude_hv as u64 + }); + __bindgen_bitfield_unit.set(7usize, 1u8, { + let exclude_idle: u64 = unsafe { ::core::mem::transmute(exclude_idle) }; + exclude_idle as u64 + }); + __bindgen_bitfield_unit.set(8usize, 1u8, { + let mmap: u64 = unsafe { ::core::mem::transmute(mmap) }; + mmap as u64 + }); + __bindgen_bitfield_unit.set(9usize, 1u8, { + let comm: u64 = unsafe { ::core::mem::transmute(comm) }; + comm as u64 + }); + __bindgen_bitfield_unit.set(10usize, 1u8, { + let freq: u64 = unsafe { ::core::mem::transmute(freq) }; + freq as u64 + }); + __bindgen_bitfield_unit.set(11usize, 1u8, { + let inherit_stat: u64 = unsafe { ::core::mem::transmute(inherit_stat) }; + inherit_stat as u64 + }); + __bindgen_bitfield_unit.set(12usize, 1u8, { + let enable_on_exec: u64 = unsafe { ::core::mem::transmute(enable_on_exec) }; + enable_on_exec as u64 + }); + __bindgen_bitfield_unit.set(13usize, 1u8, { + let task: u64 = unsafe { ::core::mem::transmute(task) }; + task as u64 + }); + __bindgen_bitfield_unit.set(14usize, 1u8, { + let watermark: u64 = unsafe { ::core::mem::transmute(watermark) }; + watermark as u64 + }); + __bindgen_bitfield_unit.set(15usize, 2u8, { + let precise_ip: u64 = unsafe { ::core::mem::transmute(precise_ip) }; + precise_ip as u64 + }); + __bindgen_bitfield_unit.set(17usize, 1u8, { + let mmap_data: u64 = unsafe { ::core::mem::transmute(mmap_data) }; + mmap_data as u64 + }); + __bindgen_bitfield_unit.set(18usize, 1u8, { + let sample_id_all: u64 = unsafe { ::core::mem::transmute(sample_id_all) }; + sample_id_all as u64 + }); + __bindgen_bitfield_unit.set(19usize, 1u8, { + let exclude_host: u64 = unsafe { ::core::mem::transmute(exclude_host) }; + exclude_host as u64 + }); + __bindgen_bitfield_unit.set(20usize, 1u8, { + let exclude_guest: u64 = unsafe { ::core::mem::transmute(exclude_guest) }; + exclude_guest as u64 + }); + __bindgen_bitfield_unit.set(21usize, 1u8, { + let exclude_callchain_kernel: u64 = + unsafe { ::core::mem::transmute(exclude_callchain_kernel) }; + exclude_callchain_kernel as u64 + }); + __bindgen_bitfield_unit.set(22usize, 1u8, { + let exclude_callchain_user: u64 = + unsafe { ::core::mem::transmute(exclude_callchain_user) }; + exclude_callchain_user as u64 + }); + __bindgen_bitfield_unit.set(23usize, 1u8, { + let mmap2: u64 = unsafe { ::core::mem::transmute(mmap2) }; + mmap2 as u64 + }); + __bindgen_bitfield_unit.set(24usize, 1u8, { + let comm_exec: u64 = unsafe { ::core::mem::transmute(comm_exec) }; + comm_exec as u64 + }); + __bindgen_bitfield_unit.set(25usize, 1u8, { + let use_clockid: u64 = unsafe { ::core::mem::transmute(use_clockid) }; + use_clockid as u64 + }); + __bindgen_bitfield_unit.set(26usize, 1u8, { + let context_switch: u64 = unsafe { ::core::mem::transmute(context_switch) }; + context_switch as u64 + }); + __bindgen_bitfield_unit.set(27usize, 1u8, { + let write_backward: u64 = unsafe { ::core::mem::transmute(write_backward) }; + write_backward as u64 + }); + __bindgen_bitfield_unit.set(28usize, 1u8, { + let namespaces: u64 = unsafe { ::core::mem::transmute(namespaces) }; + namespaces as u64 + }); + __bindgen_bitfield_unit.set(29usize, 1u8, { + let ksymbol: u64 = unsafe { ::core::mem::transmute(ksymbol) }; + ksymbol as u64 + }); + __bindgen_bitfield_unit.set(30usize, 1u8, { + let bpf_event: u64 = unsafe { ::core::mem::transmute(bpf_event) }; + bpf_event as u64 + }); + __bindgen_bitfield_unit.set(31usize, 1u8, { + let aux_output: u64 = unsafe { ::core::mem::transmute(aux_output) }; + aux_output as u64 + }); + __bindgen_bitfield_unit.set(32usize, 1u8, { + let cgroup: u64 = unsafe { ::core::mem::transmute(cgroup) }; + cgroup as u64 + }); + __bindgen_bitfield_unit.set(33usize, 1u8, { + let text_poke: u64 = unsafe { ::core::mem::transmute(text_poke) }; + text_poke as u64 + }); + __bindgen_bitfield_unit.set(34usize, 1u8, { + let build_id: u64 = unsafe { ::core::mem::transmute(build_id) }; + build_id as u64 + }); + __bindgen_bitfield_unit.set(35usize, 1u8, { + let inherit_thread: u64 = unsafe { ::core::mem::transmute(inherit_thread) }; + inherit_thread as u64 + }); + __bindgen_bitfield_unit.set(36usize, 1u8, { + let remove_on_exec: u64 = unsafe { ::core::mem::transmute(remove_on_exec) }; + remove_on_exec as u64 + }); + __bindgen_bitfield_unit.set(37usize, 1u8, { + let sigtrap: u64 = unsafe { ::core::mem::transmute(sigtrap) }; + sigtrap as u64 + }); + __bindgen_bitfield_unit.set(38usize, 26u8, { + let __reserved_1: u64 = unsafe { ::core::mem::transmute(__reserved_1) }; + __reserved_1 as u64 + }); + __bindgen_bitfield_unit + } +} +#[repr(C)] +#[derive(Copy, Clone)] +pub struct perf_event_mmap_page { + pub version: __u32, + pub compat_version: __u32, + pub lock: __u32, + pub index: __u32, + pub offset: __s64, + pub time_enabled: __u64, + pub time_running: __u64, + pub __bindgen_anon_1: perf_event_mmap_page__bindgen_ty_1, + pub pmc_width: __u16, + pub time_shift: __u16, + pub time_mult: __u32, + pub time_offset: __u64, + pub time_zero: __u64, + pub size: __u32, + pub __reserved_1: __u32, + pub time_cycles: __u64, + pub time_mask: __u64, + pub __reserved: [__u8; 928usize], + pub data_head: __u64, + pub data_tail: __u64, + pub data_offset: __u64, + pub data_size: __u64, + pub aux_head: __u64, + pub aux_tail: __u64, + pub aux_offset: __u64, + pub aux_size: __u64, +} +#[repr(C)] +#[derive(Copy, Clone)] +pub union perf_event_mmap_page__bindgen_ty_1 { + pub capabilities: __u64, + pub __bindgen_anon_1: perf_event_mmap_page__bindgen_ty_1__bindgen_ty_1, +} +#[repr(C)] +#[derive(Debug, Copy, Clone)] +pub struct perf_event_mmap_page__bindgen_ty_1__bindgen_ty_1 { + pub _bitfield_align_1: [u64; 0], + pub _bitfield_1: __BindgenBitfieldUnit<[u8; 8usize]>, +} +impl perf_event_mmap_page__bindgen_ty_1__bindgen_ty_1 { + #[inline] + pub fn cap_bit0(&self) -> __u64 { + unsafe { ::core::mem::transmute(self._bitfield_1.get(0usize, 1u8) as u64) } + } + #[inline] + pub fn set_cap_bit0(&mut self, val: __u64) { + unsafe { + let val: u64 = ::core::mem::transmute(val); + self._bitfield_1.set(0usize, 1u8, val as u64) + } + } + #[inline] + pub fn cap_bit0_is_deprecated(&self) -> __u64 { + unsafe { ::core::mem::transmute(self._bitfield_1.get(1usize, 1u8) as u64) } + } + #[inline] + pub fn set_cap_bit0_is_deprecated(&mut self, val: __u64) { + unsafe { + let val: u64 = ::core::mem::transmute(val); + self._bitfield_1.set(1usize, 1u8, val as u64) + } + } + #[inline] + pub fn cap_user_rdpmc(&self) -> __u64 { + unsafe { ::core::mem::transmute(self._bitfield_1.get(2usize, 1u8) as u64) } + } + #[inline] + pub fn set_cap_user_rdpmc(&mut self, val: __u64) { + unsafe { + let val: u64 = ::core::mem::transmute(val); + self._bitfield_1.set(2usize, 1u8, val as u64) + } + } + #[inline] + pub fn cap_user_time(&self) -> __u64 { + unsafe { ::core::mem::transmute(self._bitfield_1.get(3usize, 1u8) as u64) } + } + #[inline] + pub fn set_cap_user_time(&mut self, val: __u64) { + unsafe { + let val: u64 = ::core::mem::transmute(val); + self._bitfield_1.set(3usize, 1u8, val as u64) + } + } + #[inline] + pub fn cap_user_time_zero(&self) -> __u64 { + unsafe { ::core::mem::transmute(self._bitfield_1.get(4usize, 1u8) as u64) } + } + #[inline] + pub fn set_cap_user_time_zero(&mut self, val: __u64) { + unsafe { + let val: u64 = ::core::mem::transmute(val); + self._bitfield_1.set(4usize, 1u8, val as u64) + } + } + #[inline] + pub fn cap_user_time_short(&self) -> __u64 { + unsafe { ::core::mem::transmute(self._bitfield_1.get(5usize, 1u8) as u64) } + } + #[inline] + pub fn set_cap_user_time_short(&mut self, val: __u64) { + unsafe { + let val: u64 = ::core::mem::transmute(val); + self._bitfield_1.set(5usize, 1u8, val as u64) + } + } + #[inline] + pub fn cap_____res(&self) -> __u64 { + unsafe { ::core::mem::transmute(self._bitfield_1.get(6usize, 58u8) as u64) } + } + #[inline] + pub fn set_cap_____res(&mut self, val: __u64) { + unsafe { + let val: u64 = ::core::mem::transmute(val); + self._bitfield_1.set(6usize, 58u8, val as u64) + } + } + #[inline] + pub fn new_bitfield_1( + cap_bit0: __u64, + cap_bit0_is_deprecated: __u64, + cap_user_rdpmc: __u64, + cap_user_time: __u64, + cap_user_time_zero: __u64, + cap_user_time_short: __u64, + cap_____res: __u64, + ) -> __BindgenBitfieldUnit<[u8; 8usize]> { + let mut __bindgen_bitfield_unit: __BindgenBitfieldUnit<[u8; 8usize]> = Default::default(); + __bindgen_bitfield_unit.set(0usize, 1u8, { + let cap_bit0: u64 = unsafe { ::core::mem::transmute(cap_bit0) }; + cap_bit0 as u64 + }); + __bindgen_bitfield_unit.set(1usize, 1u8, { + let cap_bit0_is_deprecated: u64 = + unsafe { ::core::mem::transmute(cap_bit0_is_deprecated) }; + cap_bit0_is_deprecated as u64 + }); + __bindgen_bitfield_unit.set(2usize, 1u8, { + let cap_user_rdpmc: u64 = unsafe { ::core::mem::transmute(cap_user_rdpmc) }; + cap_user_rdpmc as u64 + }); + __bindgen_bitfield_unit.set(3usize, 1u8, { + let cap_user_time: u64 = unsafe { ::core::mem::transmute(cap_user_time) }; + cap_user_time as u64 + }); + __bindgen_bitfield_unit.set(4usize, 1u8, { + let cap_user_time_zero: u64 = unsafe { ::core::mem::transmute(cap_user_time_zero) }; + cap_user_time_zero as u64 + }); + __bindgen_bitfield_unit.set(5usize, 1u8, { + let cap_user_time_short: u64 = unsafe { ::core::mem::transmute(cap_user_time_short) }; + cap_user_time_short as u64 + }); + __bindgen_bitfield_unit.set(6usize, 58u8, { + let cap_____res: u64 = unsafe { ::core::mem::transmute(cap_____res) }; + cap_____res as u64 + }); + __bindgen_bitfield_unit + } +} +#[repr(C)] +#[derive(Debug, Copy, Clone)] +pub struct perf_event_header { + pub type_: __u32, + pub misc: __u16, + pub size: __u16, +} +#[repr(u32)] +#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq, ToPrimitive)] +pub enum perf_event_type { + PERF_RECORD_MMAP = 1, + PERF_RECORD_LOST = 2, + PERF_RECORD_COMM = 3, + PERF_RECORD_EXIT = 4, + PERF_RECORD_THROTTLE = 5, + PERF_RECORD_UNTHROTTLE = 6, + PERF_RECORD_FORK = 7, + PERF_RECORD_READ = 8, + PERF_RECORD_SAMPLE = 9, + PERF_RECORD_MMAP2 = 10, + PERF_RECORD_AUX = 11, + PERF_RECORD_ITRACE_START = 12, + PERF_RECORD_LOST_SAMPLES = 13, + PERF_RECORD_SWITCH = 14, + PERF_RECORD_SWITCH_CPU_WIDE = 15, + PERF_RECORD_NAMESPACES = 16, + PERF_RECORD_KSYMBOL = 17, + PERF_RECORD_BPF_EVENT = 18, + PERF_RECORD_CGROUP = 19, + PERF_RECORD_TEXT_POKE = 20, + PERF_RECORD_AUX_OUTPUT_HW_ID = 21, + PERF_RECORD_MAX = 22, +} +pub const TCA_BPF_UNSPEC: _bindgen_ty_152 = 0; +pub const TCA_BPF_ACT: _bindgen_ty_152 = 1; +pub const TCA_BPF_POLICE: _bindgen_ty_152 = 2; +pub const TCA_BPF_CLASSID: _bindgen_ty_152 = 3; +pub const TCA_BPF_OPS_LEN: _bindgen_ty_152 = 4; +pub const TCA_BPF_OPS: _bindgen_ty_152 = 5; +pub const TCA_BPF_FD: _bindgen_ty_152 = 6; +pub const TCA_BPF_NAME: _bindgen_ty_152 = 7; +pub const TCA_BPF_FLAGS: _bindgen_ty_152 = 8; +pub const TCA_BPF_FLAGS_GEN: _bindgen_ty_152 = 9; +pub const TCA_BPF_TAG: _bindgen_ty_152 = 10; +pub const TCA_BPF_ID: _bindgen_ty_152 = 11; +pub const __TCA_BPF_MAX: _bindgen_ty_152 = 12; +pub type _bindgen_ty_152 = ::core::ffi::c_uint; +#[repr(C)] +#[derive(Debug, Copy, Clone)] +pub struct ifinfomsg { + pub ifi_family: ::core::ffi::c_uchar, + pub __ifi_pad: ::core::ffi::c_uchar, + pub ifi_type: ::core::ffi::c_ushort, + pub ifi_index: ::core::ffi::c_int, + pub ifi_flags: ::core::ffi::c_uint, + pub ifi_change: ::core::ffi::c_uint, +} +#[repr(C)] +#[derive(Debug, Copy, Clone)] +pub struct tcmsg { + pub tcm_family: ::core::ffi::c_uchar, + pub tcm__pad1: ::core::ffi::c_uchar, + pub tcm__pad2: ::core::ffi::c_ushort, + pub tcm_ifindex: ::core::ffi::c_int, + pub tcm_handle: __u32, + pub tcm_parent: __u32, + pub tcm_info: __u32, +} +pub const TCA_UNSPEC: _bindgen_ty_170 = 0; +pub const TCA_KIND: _bindgen_ty_170 = 1; +pub const TCA_OPTIONS: _bindgen_ty_170 = 2; +pub const TCA_STATS: _bindgen_ty_170 = 3; +pub const TCA_XSTATS: _bindgen_ty_170 = 4; +pub const TCA_RATE: _bindgen_ty_170 = 5; +pub const TCA_FCNT: _bindgen_ty_170 = 6; +pub const TCA_STATS2: _bindgen_ty_170 = 7; +pub const TCA_STAB: _bindgen_ty_170 = 8; +pub const TCA_PAD: _bindgen_ty_170 = 9; +pub const TCA_DUMP_INVISIBLE: _bindgen_ty_170 = 10; +pub const TCA_CHAIN: _bindgen_ty_170 = 11; +pub const TCA_HW_OFFLOAD: _bindgen_ty_170 = 12; +pub const TCA_INGRESS_BLOCK: _bindgen_ty_170 = 13; +pub const TCA_EGRESS_BLOCK: _bindgen_ty_170 = 14; +pub const __TCA_MAX: _bindgen_ty_170 = 15; +pub type _bindgen_ty_170 = ::core::ffi::c_uint; +pub const AYA_PERF_EVENT_IOC_ENABLE: ::core::ffi::c_int = 9216; +pub const AYA_PERF_EVENT_IOC_DISABLE: ::core::ffi::c_int = 9217; +pub const AYA_PERF_EVENT_IOC_SET_BPF: ::core::ffi::c_int = 1074013192; diff --git a/kernel/src/include/bindings/mod.rs b/kernel/src/include/bindings/mod.rs index ee9997615..4ab2c21e2 100644 --- a/kernel/src/include/bindings/mod.rs +++ b/kernel/src/include/bindings/mod.rs @@ -1,2 +1,10 @@ -#[allow(clippy::module_inception)] +#![allow( + dead_code, + non_camel_case_types, + non_snake_case, + clippy::all, + missing_docs, + clippy::module_inception +)] pub mod bindings; +pub mod linux_bpf; diff --git a/kernel/src/init/init.rs b/kernel/src/init/init.rs index 72d4b9c38..51b45c350 100644 --- a/kernel/src/init/init.rs +++ b/kernel/src/init/init.rs @@ -80,7 +80,7 @@ fn do_start_kernel() { clocksource_boot_finish(); kprobe_init(); Futex::init(); - + crate::bpf::init_bpf_system(); #[cfg(all(target_arch = "x86_64", feature = "kvm"))] crate::virt::kvm::kvm_init(); } diff --git a/kernel/src/lib.rs b/kernel/src/lib.rs index 99af0cc42..c3691aff1 100644 --- a/kernel/src/lib.rs +++ b/kernel/src/lib.rs @@ -20,6 +20,7 @@ #![feature(slice_ptr_get)] #![feature(sync_unsafe_cell)] #![feature(vec_into_raw_parts)] +#![feature(c_variadic)] #![cfg_attr(target_os = "none", no_std)] #![allow(internal_features)] // clippy的配置 @@ -45,6 +46,7 @@ mod arch; mod libs; #[macro_use] mod include; +mod bpf; mod debug; mod driver; // 如果driver依赖了libs,应该在libs后面导出 mod exception; @@ -54,12 +56,12 @@ mod ipc; mod misc; mod mm; mod net; +mod perf; mod process; mod sched; mod smp; mod syscall; mod time; - #[cfg(target_arch = "x86_64")] mod virt; diff --git a/kernel/src/mm/syscall.rs b/kernel/src/mm/syscall.rs index beac5cc23..cc6ebb64e 100644 --- a/kernel/src/mm/syscall.rs +++ b/kernel/src/mm/syscall.rs @@ -4,6 +4,12 @@ use alloc::sync::Arc; use log::error; use system_error::SystemError; +use super::{ + allocator::page_frame::{PageFrameCount, VirtPageFrame}, + ucontext::{AddressSpace, DEFAULT_MMAP_MIN_ADDR}, + verify_area, VirtAddr, VmFlags, +}; +use crate::process::ProcessManager; use crate::{ arch::MMArch, ipc::shm::ShmFlags, @@ -12,12 +18,6 @@ use crate::{ syscall::Syscall, }; -use super::{ - allocator::page_frame::{PageFrameCount, VirtPageFrame}, - ucontext::{AddressSpace, DEFAULT_MMAP_MIN_ADDR}, - verify_area, VirtAddr, VmFlags, -}; - bitflags! { /// Memory protection flags pub struct ProtFlags: u64 { @@ -296,8 +296,8 @@ impl Syscall { len: usize, prot_flags: usize, map_flags: usize, - _fd: i32, - _offset: usize, + fd: i32, + offset: usize, ) -> Result { let map_flags = MapFlags::from_bits_truncate(map_flags as u64); let prot_flags = ProtFlags::from_bits_truncate(prot_flags as u64); @@ -312,10 +312,10 @@ impl Syscall { return Err(SystemError::EINVAL); } // 暂时不支持除匿名页以外的映射 - if !map_flags.contains(MapFlags::MAP_ANONYMOUS) { - error!("mmap: not support file mapping"); - return Err(SystemError::ENOSYS); - } + // if !map_flags.contains(MapFlags::MAP_ANONYMOUS) { + // error!("mmap: not support file mapping"); + // return Err(SystemError::ENOSYS); + // } // 暂时不支持巨页映射 if map_flags.contains(MapFlags::MAP_HUGETLB) { @@ -323,14 +323,40 @@ impl Syscall { return Err(SystemError::ENOSYS); } let current_address_space = AddressSpace::current()?; - let start_page = current_address_space.write().map_anonymous( - start_vaddr, - len, - prot_flags, - map_flags, - true, - false, - )?; + let start_page = if fd > 0 { + let page_frame = current_address_space.write().map_file( + start_vaddr, + len, + prot_flags, + map_flags, + true, + false, + )?; + let fd_table = ProcessManager::current_pcb().fd_table(); + let file = fd_table + .read() + .get_file_by_fd(fd as _) + .ok_or(SystemError::EBADF)?; + let start_addr = page_frame.virt_address().data(); + log::info!( + "mmap for file: start_addr: {:#x}, len: {:#x}, offset: {:#x}", + start_addr, + len, + offset + ); + file.inode().mmap(start_addr, len, offset)?; + page_frame + } else { + let start_page = current_address_space.write().map_anonymous( + start_vaddr, + len, + prot_flags, + map_flags, + true, + false, + )?; + start_page + }; return Ok(start_page.virt_address().data()); } diff --git a/kernel/src/mm/ucontext.rs b/kernel/src/mm/ucontext.rs index 635213ff5..72ec389df 100644 --- a/kernel/src/mm/ucontext.rs +++ b/kernel/src/mm/ucontext.rs @@ -227,6 +227,24 @@ impl InnerAddressSpace { return self.user_mapper.utable.is_current(); } + pub fn map_file( + &mut self, + start_vaddr: VirtAddr, + len: usize, + prot_flags: ProtFlags, + map_flags: MapFlags, + round_to_min: bool, + allocate_at_once: bool, + ) -> Result { + self.map_anonymous( + start_vaddr, + len, + prot_flags, + map_flags, + round_to_min, + allocate_at_once, + ) + } /// 进行匿名页映射 /// /// ## 参数 diff --git a/kernel/src/perf/bpf.rs b/kernel/src/perf/bpf.rs new file mode 100644 index 000000000..485996970 --- /dev/null +++ b/kernel/src/perf/bpf.rs @@ -0,0 +1,294 @@ +use super::{PerfEventOps, Result}; +use crate::arch::MMArch; +use crate::include::bindings::linux_bpf::{ + perf_event_header, perf_event_mmap_page, perf_event_type, +}; +use crate::libs::spinlock::SpinLock; +use crate::mm::MemoryManagementArch; +use crate::perf::util::PerfProbeArgs; +use core::fmt::Debug; + +const PAGE_SIZE: usize = MMArch::PAGE_SIZE; +#[derive(Debug)] +pub struct BpfPerfEvent { + args: PerfProbeArgs, + data: SpinLock, +} + +#[derive(Debug)] +pub struct BpfPerfEventData { + enabled: bool, + mmap_page: RingPage, + offset: usize, +} + +/// The event type in our particular use case will be `PERF_RECORD_SAMPLE` or `PERF_RECORD_LOST`. +/// `PERF_RECORD_SAMPLE` indicating that there is an actual sample after this header. +/// And `PERF_RECORD_LOST` indicating that there is a record lost header following the perf event header. +#[repr(C)] +#[derive(Debug)] +struct LostSamples { + header: perf_event_header, + id: u64, + count: u64, +} + +impl LostSamples { + fn as_bytes(&self) -> &[u8] { + unsafe { core::slice::from_raw_parts(self as *const Self as *const u8, size_of::()) } + } +} + +#[repr(C)] +#[derive(Debug)] +struct Sample { + header: perf_event_header, + size: u32, +} + +impl Sample { + fn as_bytes(&self) -> &[u8] { + unsafe { core::slice::from_raw_parts(self as *const Self as *const u8, size_of::()) } + } +} + +#[repr(C)] +#[derive(Debug)] +struct PerfSample<'a> { + s_hdr: Sample, + value: &'a [u8], +} + +impl<'a> PerfSample<'a> { + fn calculate_size(value_size: usize) -> usize { + size_of::() + value_size + } +} + +#[derive(Debug)] +pub struct RingPage { + size: usize, + ptr: usize, + data_region_size: usize, + lost: usize, +} + +impl RingPage { + pub fn empty() -> Self { + RingPage { + ptr: 0, + size: 0, + data_region_size: 0, + lost: 0, + } + } + + pub fn new_init(start: usize, len: usize) -> Self { + Self::init(start as _, len) + } + + fn init(ptr: *mut u8, size: usize) -> Self { + assert_eq!(size % PAGE_SIZE, 0); + assert!(size / PAGE_SIZE >= 2); + // The first page will be filled with perf_event_mmap_page + unsafe { + let perf_event_mmap_page = &mut *(ptr as *mut perf_event_mmap_page); + perf_event_mmap_page.data_offset = PAGE_SIZE as u64; + perf_event_mmap_page.data_size = (size - PAGE_SIZE) as u64; + // user will read sample or lost record from data_tail + perf_event_mmap_page.data_tail = 0; + // kernel will write sample or lost record from data_head + perf_event_mmap_page.data_head = 0; + // It is a ring buffer. + } + RingPage { + ptr: ptr as usize, + size, + data_region_size: size - PAGE_SIZE, + lost: 0, + } + } + + fn can_write(&self, data_size: usize, data_tail: usize, data_head: usize) -> bool { + if (data_head + 1) % self.data_region_size == data_tail { + // The buffer is full + return false; + } + let capacity = if data_head >= data_tail { + self.data_region_size - data_head + data_tail + } else { + data_tail - data_head + }; + data_size <= capacity + } + + pub fn write_event(&mut self, data: &[u8]) -> Result<()> { + let data_tail = unsafe { &mut (*(self.ptr as *mut perf_event_mmap_page)).data_tail }; + let data_head = unsafe { &mut (*(self.ptr as *mut perf_event_mmap_page)).data_head }; + // data_tail..data_head is the region that can be written + // check if there is enough space to write the event + let sample_size = PerfSample::calculate_size(data.len()); + + let can_write_sample = + self.can_write(sample_size, *data_tail as usize, *data_head as usize); + // log::error!( + // "can_write_sample: {}, data_tail: {}, data_head: {}, data.len(): {}, region_size: {}", + // can_write_sample, + // *data_tail, + // *data_head, + // data.len(), + // self.data_region_size + // ); + if !can_write_sample { + //we need record it to the lost record + self.lost += 1; + // log::error!( + // "Lost record: {}, data_tail: {}, data_head: {}", + // self.lost, + // *data_tail, + // *data_head + // ); + Ok(()) + } else { + // we can write the sample to the page + // If the lost record is not zero, we need to write the lost record first. + let can_write_lost_record = self.can_write( + size_of::(), + *data_tail as usize, + *data_head as usize, + ); + if self.lost > 0 && can_write_lost_record { + let new_data_head = self.write_lost(*data_head as usize)?; + *data_head = new_data_head as u64; + // log::info!( + // "Write lost record: {}, data_tail: {}, new_data_head: {}", + // self.lost, + // *data_tail, + // *data_head + // ); + self.lost = 0; + self.write_event(data) + } else { + let new_data_head = self.write_sample(data, *data_head as usize)?; + *data_head = new_data_head as u64; + // log::info!( + // "Write sample record, data_tail: {}, new_data_head: {}", + // *data_tail, + // *data_head + // ); + Ok(()) + } + } + } + + /// Write any data to the page. + /// + /// Return the new data_head + fn write_any(&mut self, data: &[u8], data_head: usize) -> Result { + let data_region_len = self.data_region_size; + let data_region = self.as_mut_slice()[PAGE_SIZE..].as_mut(); + let data_len = data.len(); + let end = (data_head + data_len) % data_region_len; + let start = data_head; + if start < end { + data_region[start..end].copy_from_slice(data); + } else { + let first_len = data_region_len - start; + data_region[start..start + first_len].copy_from_slice(&data[..first_len]); + data_region[0..end].copy_from_slice(&data[first_len..]); + } + Ok(end) + } + + /// Write a sample to the page. + fn write_sample(&mut self, data: &[u8], data_head: usize) -> Result { + let perf_sample = PerfSample { + s_hdr: Sample { + header: perf_event_header { + type_: perf_event_type::PERF_RECORD_SAMPLE as u32, + misc: 0, + size: size_of::() as u16 + data.len() as u16, + }, + size: data.len() as u32, + }, + value: data, + }; + let new_head = self.write_any(perf_sample.s_hdr.as_bytes(), data_head)?; + self.write_any(perf_sample.value, new_head) + } + + /// Write a lost record to the page. + /// + /// Return the new data_head + fn write_lost(&mut self, data_head: usize) -> Result { + let lost = LostSamples { + header: perf_event_header { + type_: perf_event_type::PERF_RECORD_LOST as u32, + misc: 0, + size: size_of::() as u16, + }, + id: 0, + count: self.lost as u64, + }; + self.write_any(lost.as_bytes(), data_head) + } + + pub fn readable(&self) -> bool { + let data_tail = unsafe { &(*(self.ptr as *mut perf_event_mmap_page)).data_tail }; + let data_head = unsafe { &(*(self.ptr as *mut perf_event_mmap_page)).data_head }; + data_tail != data_head + } + pub fn as_slice(&self) -> &[u8] { + unsafe { core::slice::from_raw_parts(self.ptr as *const u8, self.size) } + } + pub fn as_mut_slice(&mut self) -> &mut [u8] { + unsafe { core::slice::from_raw_parts_mut(self.ptr as *mut u8, self.size) } + } +} + +impl BpfPerfEvent { + pub fn new(args: PerfProbeArgs) -> Self { + BpfPerfEvent { + args, + data: SpinLock::new(BpfPerfEventData { + enabled: false, + mmap_page: RingPage::empty(), + offset: 0, + }), + } + } + pub fn do_mmap(&self, start: usize, len: usize, offset: usize) -> Result<()> { + let mut data = self.data.lock(); + let mmap_page = RingPage::new_init(start, len); + data.mmap_page = mmap_page; + data.offset = offset; + Ok(()) + } + + pub fn write_event(&self, data: &[u8]) -> Result<()> { + let mut inner_data = self.data.lock(); + inner_data.mmap_page.write_event(data) + } +} + +impl PerfEventOps for BpfPerfEvent { + fn mmap(&self, start: usize, len: usize, offset: usize) -> Result<()> { + self.do_mmap(start, len, offset) + } + fn enable(&self) -> Result<()> { + self.data.lock().enabled = true; + Ok(()) + } + fn disable(&self) -> Result<()> { + self.data.lock().enabled = false; + Ok(()) + } + fn readable(&self) -> bool { + // false + self.data.lock().mmap_page.readable() + } +} + +pub fn perf_event_open_bpf(args: PerfProbeArgs) -> BpfPerfEvent { + BpfPerfEvent::new(args) +} diff --git a/kernel/src/perf/kprobe.rs b/kernel/src/perf/kprobe.rs new file mode 100644 index 000000000..186ea7b80 --- /dev/null +++ b/kernel/src/perf/kprobe.rs @@ -0,0 +1,110 @@ +use super::Result; +use crate::arch::interrupt::TrapFrame; +use crate::arch::kprobe::KProbeContext; +use crate::bpf::helper::BPF_HELPER_FUN_SET; +use crate::bpf::prog::BpfProg; +use crate::debug::kprobe::args::KprobeInfo; +use crate::debug::kprobe::{register_kprobe, unregister_kprobe, LockKprobe}; +use crate::filesystem::vfs::file::File; +use crate::libs::casting::DowncastArc; +use crate::perf::util::PerfProbeArgs; +use crate::perf::PerfEventOps; +use alloc::boxed::Box; +use alloc::sync::Arc; +use core::fmt::Debug; +use kprobe::{CallBackFunc, ProbeArgs}; +use rbpf::EbpfVmRawOwned; +use system_error::SystemError; + +#[derive(Debug)] +pub struct KprobePerfEvent { + args: PerfProbeArgs, + kprobe: LockKprobe, +} + +impl Drop for KprobePerfEvent { + fn drop(&mut self) { + unregister_kprobe(self.kprobe.clone()).unwrap(); + } +} + +impl KprobePerfEvent { + pub fn do_set_bpf_prog(&self, prog_file: Arc) -> Result<()> { + let file = prog_file + .inode() + .downcast_arc::() + .ok_or(SystemError::EINVAL)?; + let prog_slice = file.insns(); + let mut vm = EbpfVmRawOwned::new(Some(prog_slice.to_vec())).unwrap(); + vm.register_helper_set(BPF_HELPER_FUN_SET.get()).unwrap(); + + // create a callback to execute the ebpf prog + let callback = Box::new(KprobePerfCallBack::new(file, vm)); + // update callback for kprobe + self.kprobe.write().update_event_callback(callback); + Ok(()) + } +} + +pub struct KprobePerfCallBack { + bpf_prog_file: Arc, + vm: EbpfVmRawOwned, +} + +impl KprobePerfCallBack { + fn new(bpf_prog_file: Arc, vm: EbpfVmRawOwned) -> Self { + Self { bpf_prog_file, vm } + } +} + +impl CallBackFunc for KprobePerfCallBack { + fn call(&self, trap_frame: &dyn ProbeArgs) { + let trap_frame = trap_frame.as_any().downcast_ref::().unwrap(); + let pt_regs = KProbeContext::from(trap_frame); + let probe_context = unsafe { + core::slice::from_raw_parts_mut( + &pt_regs as *const KProbeContext as *mut u8, + size_of::(), + ) + }; + // log::info!("---------------------Running probe---------------------"); + let _res = self.vm.execute_program(probe_context).unwrap(); + // log::info!("Program returned: {res:?} ({res:#x})"); + // log::info!("---------------------Probe finished---------------------"); + } +} + +impl PerfEventOps for KprobePerfEvent { + fn set_bpf_prog(&self, bpf_prog: Arc) -> Result<()> { + self.do_set_bpf_prog(bpf_prog) + } + fn enable(&self) -> Result<()> { + self.kprobe.write().enable(); + Ok(()) + } + fn disable(&self) -> Result<()> { + self.kprobe.write().disable(); + Ok(()) + } +} + +pub fn perf_event_open_kprobe(args: PerfProbeArgs) -> KprobePerfEvent { + let symbol = args.name.clone(); + log::info!("create kprobe for symbol: {symbol}"); + let kprobe_info = KprobeInfo { + pre_handler: |_| { + // log::info!("pre_handler:kprobe for perf_event_open_kprobe") + }, + post_handler: |_| { + // log::info!("post_handler:kprobe for perf_event_open_kprobe") + }, + fault_handler: None, + event_callback: None, + symbol: Some(symbol), + addr: None, + offset: 0, + enable: false, + }; + let kprobe = register_kprobe(kprobe_info).expect("create kprobe failed"); + KprobePerfEvent { args, kprobe } +} diff --git a/kernel/src/perf/mod.rs b/kernel/src/perf/mod.rs new file mode 100644 index 000000000..aafbfe89f --- /dev/null +++ b/kernel/src/perf/mod.rs @@ -0,0 +1,290 @@ +#![allow(unused)] +mod bpf; +mod kprobe; +mod util; + +use crate::filesystem::vfs::file::{File, FileMode}; +use crate::filesystem::vfs::syscall::ModeType; +use crate::filesystem::vfs::{FilePrivateData, FileSystem, FileType, IndexNode, Metadata}; +use crate::include::bindings::linux_bpf::{ + perf_event_attr, perf_event_sample_format, perf_sw_ids, perf_type_id, +}; +use crate::libs::casting::DowncastArc; +use crate::libs::spinlock::{SpinLock, SpinLockGuard}; +use crate::net::event_poll::{EPollEventType, EPollItem, EventPoll, KernelIoctlData}; +use crate::perf::bpf::BpfPerfEvent; +use crate::perf::util::{PerfEventIoc, PerfEventOpenFlags, PerfProbeArgs}; +use crate::process::ProcessManager; +use crate::syscall::user_access::UserBufferReader; +use crate::syscall::Syscall; +use alloc::boxed::Box; +use alloc::collections::LinkedList; +use alloc::string::String; +use alloc::sync::{Arc, Weak}; +use alloc::vec::Vec; +use core::any::Any; +use core::ffi::c_void; +use core::fmt::Debug; +use core::ops::Deref; +use intertrait::{CastFrom, CastFromSync}; +use log::info; +use num_traits::FromPrimitive; +use system_error::SystemError; + +type Result = core::result::Result; + +pub trait PerfEventOps: Send + Sync + Debug + CastFromSync + CastFrom { + fn mmap(&self, _start: usize, _len: usize, _offset: usize) -> Result<()> { + panic!("mmap not implemented for PerfEvent"); + } + fn set_bpf_prog(&self, _bpf_prog: Arc) -> Result<()> { + panic!("set_bpf_prog not implemented for PerfEvent"); + } + fn enable(&self) -> Result<()> { + panic!("enable not implemented"); + } + fn disable(&self) -> Result<()> { + panic!("disable not implemented"); + } + fn readable(&self) -> bool { + panic!("readable not implemented"); + } +} + +#[derive(Debug)] +pub struct PerfEventInode { + event: Box, + epitems: SpinLock>>, +} + +impl PerfEventInode { + pub fn new(event: Box) -> Self { + Self { + event, + epitems: SpinLock::new(LinkedList::new()), + } + } + pub fn remove_epoll( + &self, + epoll: &Weak>, + ) -> core::result::Result<(), SystemError> { + let is_remove = !self + .epitems + .lock_irqsave() + .extract_if(|x| x.epoll().ptr_eq(epoll)) + .collect::>() + .is_empty(); + if is_remove { + return Ok(()); + } + Err(SystemError::ENOENT) + } + fn do_poll(&self) -> Result { + let mut events = EPollEventType::empty(); + if self.event.readable() { + events |= EPollEventType::EPOLLIN | EPollEventType::EPOLLRDNORM; + } + return Ok(events.bits() as usize); + } + fn epoll_callback(&self) -> Result<()> { + let pollflag = EPollEventType::from_bits_truncate(self.do_poll()? as u32); + // 唤醒epoll中等待的进程 + EventPoll::wakeup_epoll(&self.epitems, pollflag) + } +} + +impl Deref for PerfEventInode { + type Target = Box; + + fn deref(&self) -> &Self::Target { + &self.event + } +} + +impl IndexNode for PerfEventInode { + fn mmap(&self, start: usize, len: usize, offset: usize) -> Result<()> { + self.event.mmap(start, len, offset) + } + fn open(&self, _data: SpinLockGuard, _mode: &FileMode) -> Result<()> { + Ok(()) + } + fn close(&self, _data: SpinLockGuard) -> Result<()> { + Ok(()) + } + fn read_at( + &self, + _offset: usize, + _len: usize, + _buf: &mut [u8], + _data: SpinLockGuard, + ) -> Result { + panic!("read_at not implemented for PerfEvent"); + } + + fn write_at( + &self, + _offset: usize, + _len: usize, + _buf: &[u8], + _data: SpinLockGuard, + ) -> Result { + panic!("write_at not implemented for PerfEvent"); + } + + fn poll(&self, _private_data: &FilePrivateData) -> Result { + self.do_poll() + } + + fn metadata(&self) -> Result { + let meta = Metadata { + mode: ModeType::from_bits_truncate(0o755), + file_type: FileType::File, + ..Default::default() + }; + Ok(meta) + } + + fn resize(&self, _len: usize) -> Result<()> { + Ok(()) + } + + fn ioctl(&self, cmd: u32, data: usize, _private_data: &FilePrivateData) -> Result { + let req = PerfEventIoc::from_u32(cmd).ok_or(SystemError::EINVAL)?; + info!("perf_event_ioctl: request: {:?}, arg: {}", req, data); + match req { + PerfEventIoc::Enable => { + self.event.enable()?; + Ok(0) + } + PerfEventIoc::Disable => { + self.event.disable()?; + Ok(0) + } + PerfEventIoc::SetBpf => { + info!("perf_event_ioctl: PERF_EVENT_IOC_SET_BPF, arg: {}", data); + let bpf_prog_fd = data; + let fd_table = ProcessManager::current_pcb().fd_table(); + let file = fd_table + .read() + .get_file_by_fd(bpf_prog_fd as _) + .ok_or(SystemError::EBADF)?; + self.event.set_bpf_prog(file)?; + Ok(0) + } + } + } + + fn kernel_ioctl( + &self, + arg: Arc, + _data: &FilePrivateData, + ) -> core::result::Result { + let epitem = arg + .arc_any() + .downcast::() + .map_err(|_| SystemError::EFAULT)?; + self.epitems.lock().push_back(epitem); + Ok(0) + } + + fn fs(&self) -> Arc { + panic!("PerfEvent does not have a filesystem") + } + fn as_any_ref(&self) -> &dyn Any { + self + } + fn list(&self) -> Result> { + Err(SystemError::ENOSYS) + } +} + +impl Syscall { + pub fn sys_perf_event_open( + attr: *const u8, + pid: i32, + cpu: i32, + group_fd: i32, + flags: u32, + ) -> Result { + let buf = UserBufferReader::new( + attr as *const perf_event_attr, + size_of::(), + true, + )?; + let attr = buf.read_one_from_user(0)?; + perf_event_open(attr, pid, cpu, group_fd, flags) + } +} + +pub fn perf_event_open( + attr: &perf_event_attr, + pid: i32, + cpu: i32, + group_fd: i32, + flags: u32, +) -> Result { + let args = PerfProbeArgs::try_from(attr, pid, cpu, group_fd, flags)?; + log::info!("perf_event_process: {:#?}", args); + let file_mode = if args + .flags + .contains(PerfEventOpenFlags::PERF_FLAG_FD_CLOEXEC) + { + FileMode::O_RDWR | FileMode::O_CLOEXEC + } else { + FileMode::O_RDWR + }; + + let event: Box = match args.type_ { + // Kprobe + // See /sys/bus/event_source/devices/kprobe/type + perf_type_id::PERF_TYPE_MAX => { + let kprobe_event = kprobe::perf_event_open_kprobe(args); + Box::new(kprobe_event) + } + perf_type_id::PERF_TYPE_SOFTWARE => { + // For bpf prog output + assert_eq!(args.config, perf_sw_ids::PERF_COUNT_SW_BPF_OUTPUT); + assert_eq!( + args.sample_type, + Some(perf_event_sample_format::PERF_SAMPLE_RAW) + ); + let bpf_event = bpf::perf_event_open_bpf(args); + Box::new(bpf_event) + } + _ => { + unimplemented!("perf_event_process: unknown type: {:?}", args); + } + }; + let perf_event = PerfEventInode::new(event); + let file = File::new(Arc::new(perf_event), file_mode)?; + let fd_table = ProcessManager::current_pcb().fd_table(); + let fd = fd_table.write().alloc_fd(file, None).map(|x| x as usize)?; + Ok(fd) +} + +pub fn perf_event_output(_ctx: *mut c_void, fd: usize, _flags: u32, data: &[u8]) -> Result<()> { + let file = get_perf_event_file(fd)?; + // info!("perf_event_output: fd: {}, flags: {:x?}", fd, flags); + let bpf_event_file = file.deref().deref(); + let bpf_event_file = bpf_event_file + .deref() + .ref_any() + .downcast_ref::() + .unwrap(); + bpf_event_file.write_event(data)?; + file.epoll_callback()?; + Ok(()) +} + +fn get_perf_event_file(fd: usize) -> Result> { + let fd_table = ProcessManager::current_pcb().fd_table(); + let file = fd_table + .read() + .get_file_by_fd(fd as _) + .ok_or(SystemError::EBADF)?; + let event = file + .inode() + .downcast_arc::() + .ok_or(SystemError::EINVAL)?; + Ok(event) +} diff --git a/kernel/src/perf/util.rs b/kernel/src/perf/util.rs new file mode 100644 index 000000000..d7c4541fa --- /dev/null +++ b/kernel/src/perf/util.rs @@ -0,0 +1,71 @@ +use crate::include::bindings::linux_bpf::{ + perf_event_attr, perf_event_sample_format, perf_sw_ids, perf_type_id, +}; +use crate::syscall::user_access::check_and_clone_cstr; +use alloc::string::String; +use num_traits::FromPrimitive; +use system_error::SystemError; + +bitflags! { + pub struct PerfEventOpenFlags: u32 { + const PERF_FLAG_FD_NO_GROUP = 1; + const PERF_FLAG_FD_OUTPUT = 2; + const PERF_FLAG_PID_CGROUP = 4; + const PERF_FLAG_FD_CLOEXEC = 8; + } +} +#[repr(u32)] +#[derive(Debug, Copy, Clone, FromPrimitive)] +pub enum PerfEventIoc { + Enable = 9216, + Disable = 9217, + SetBpf = 1074013192, +} + +#[derive(Debug, Clone)] +pub struct PerfProbeArgs { + pub config: perf_sw_ids, + pub name: String, + pub offset: u64, + pub size: u32, + pub type_: perf_type_id, + pub pid: i32, + pub cpu: i32, + pub group_fd: i32, + pub flags: PerfEventOpenFlags, + pub sample_type: Option, +} + +impl PerfProbeArgs { + pub fn try_from( + attr: &perf_event_attr, + pid: i32, + cpu: i32, + group_fd: i32, + flags: u32, + ) -> Result { + let ty = perf_type_id::from_u32(attr.type_).ok_or(SystemError::EINVAL)?; + let config = perf_sw_ids::from_u32(attr.config as u32).ok_or(SystemError::EINVAL)?; + let name = if ty == perf_type_id::PERF_TYPE_MAX { + let name_ptr = unsafe { attr.__bindgen_anon_3.config1 } as *const u8; + let name = check_and_clone_cstr(name_ptr, None)?; + name.into_string().map_err(|_| SystemError::EINVAL)? + } else { + String::new() + }; + let sample_ty = perf_event_sample_format::from_u32(attr.sample_type as u32); + let args = PerfProbeArgs { + config, + name, + offset: unsafe { attr.__bindgen_anon_4.config2 }, + size: attr.size, + type_: ty, + pid, + cpu, + group_fd, + flags: PerfEventOpenFlags::from_bits_truncate(flags), + sample_type: sample_ty, + }; + Ok(args) + } +} diff --git a/kernel/src/syscall/mod.rs b/kernel/src/syscall/mod.rs index c9600fdf0..c09a4f6be 100644 --- a/kernel/src/syscall/mod.rs +++ b/kernel/src/syscall/mod.rs @@ -1132,6 +1132,20 @@ impl Syscall { let flags = args[1] as u32; Self::sys_eventfd(initval, flags) } + SYS_BPF => { + let cmd = args[0] as u32; + let attr = args[1] as *mut u8; + let size = args[2] as u32; + Self::sys_bpf(cmd, attr, size) + } + SYS_PERF_EVENT_OPEN => { + let attr = args[0] as *const u8; + let pid = args[1] as i32; + let cpu = args[2] as i32; + let group_fd = args[3] as i32; + let flags = args[4] as u32; + Self::sys_perf_event_open(attr, pid, cpu, group_fd, flags) + } _ => panic!("Unsupported syscall ID: {}", syscall_num), }; diff --git a/kernel/src/time/syscall.rs b/kernel/src/time/syscall.rs index 76f9349eb..ec8c0a155 100644 --- a/kernel/src/time/syscall.rs +++ b/kernel/src/time/syscall.rs @@ -2,8 +2,6 @@ use core::{ ffi::{c_int, c_longlong}, time::Duration, }; - -use log::warn; use num_traits::FromPrimitive; use system_error::SystemError; @@ -139,7 +137,7 @@ impl Syscall { pub fn clock_gettime(clock_id: c_int, tp: *mut PosixTimeSpec) -> Result { let clock_id = PosixClockID::try_from(clock_id)?; if clock_id != PosixClockID::Realtime { - warn!("clock_gettime: currently only support Realtime clock, but got {:?}. Defaultly return realtime!!!\n", clock_id); + // warn!("clock_gettime: currently only support Realtime clock, but got {:?}. Defaultly return realtime!!!\n", clock_id); } if tp.is_null() { return Err(SystemError::EFAULT); diff --git a/user/apps/syscall_ebpf/.cargo/config.toml b/user/apps/syscall_ebpf/.cargo/config.toml new file mode 100644 index 000000000..35049cbcb --- /dev/null +++ b/user/apps/syscall_ebpf/.cargo/config.toml @@ -0,0 +1,2 @@ +[alias] +xtask = "run --package xtask --" diff --git a/user/apps/syscall_ebpf/.dir-locals.el b/user/apps/syscall_ebpf/.dir-locals.el new file mode 100644 index 000000000..07f484b10 --- /dev/null +++ b/user/apps/syscall_ebpf/.dir-locals.el @@ -0,0 +1 @@ +((prog-mode . ((lsp-rust-analyzer-linked-projects . ["Cargo.toml" "syscall_ebpf-ebpf/Cargo.toml"])))) diff --git a/user/apps/syscall_ebpf/.gitignore b/user/apps/syscall_ebpf/.gitignore new file mode 100644 index 000000000..9db7029fd --- /dev/null +++ b/user/apps/syscall_ebpf/.gitignore @@ -0,0 +1,9 @@ +### https://raw.github.com/github/gitignore/master/Rust.gitignore + +# Generated by Cargo +# will have compiled files and executables +debug/ +target/ + +# These are backup files generated by rustfmt +**/*.rs.bk diff --git a/user/apps/syscall_ebpf/.vim/coc-settings.json b/user/apps/syscall_ebpf/.vim/coc-settings.json new file mode 100644 index 000000000..0c82ac973 --- /dev/null +++ b/user/apps/syscall_ebpf/.vim/coc-settings.json @@ -0,0 +1,3 @@ +{ + "rust-analyzer.linkedProjects": ["Cargo.toml", "syscall_ebpf-ebpf/Cargo.toml"] +} diff --git a/user/apps/syscall_ebpf/.vscode/settings.json b/user/apps/syscall_ebpf/.vscode/settings.json new file mode 100644 index 000000000..0c82ac973 --- /dev/null +++ b/user/apps/syscall_ebpf/.vscode/settings.json @@ -0,0 +1,3 @@ +{ + "rust-analyzer.linkedProjects": ["Cargo.toml", "syscall_ebpf-ebpf/Cargo.toml"] +} diff --git a/user/apps/syscall_ebpf/Cargo.toml b/user/apps/syscall_ebpf/Cargo.toml new file mode 100644 index 000000000..af91116bd --- /dev/null +++ b/user/apps/syscall_ebpf/Cargo.toml @@ -0,0 +1,3 @@ +[workspace] +resolver = "2" +members = ["xtask", "syscall_ebpf", "syscall_ebpf-common"] diff --git a/user/apps/syscall_ebpf/README.md b/user/apps/syscall_ebpf/README.md new file mode 100644 index 000000000..fe5ed32d3 --- /dev/null +++ b/user/apps/syscall_ebpf/README.md @@ -0,0 +1,32 @@ +# syscall_ebpf + +## Prerequisites + +1. Install bpf-linker: `cargo install bpf-linker` + +## Build eBPF + +```bash +cargo xtask build-ebpf +``` + +To perform a release build you can use the `--release` flag. +You may also change the target architecture with the `--target` flag. + +## Build Userspace + +```bash +cargo build +``` + +## Build eBPF and Userspace + +```bash +cargo xtask build +``` + +## Run + +```bash +RUST_LOG=info cargo xtask run +``` diff --git a/user/apps/syscall_ebpf/syscall_ebpf-common/Cargo.toml b/user/apps/syscall_ebpf/syscall_ebpf-common/Cargo.toml new file mode 100644 index 000000000..e1981510c --- /dev/null +++ b/user/apps/syscall_ebpf/syscall_ebpf-common/Cargo.toml @@ -0,0 +1,14 @@ +[package] +name = "syscall_ebpf-common" +version = "0.1.0" +edition = "2021" + +[features] +default = [] +user = ["aya"] + +[dependencies] +aya = { git = "https://github.com/os-module/tiny-aya.git", optional = true } + +[lib] +path = "src/lib.rs" diff --git a/user/apps/syscall_ebpf/syscall_ebpf-common/src/lib.rs b/user/apps/syscall_ebpf/syscall_ebpf-common/src/lib.rs new file mode 100644 index 000000000..0c9ac1ac8 --- /dev/null +++ b/user/apps/syscall_ebpf/syscall_ebpf-common/src/lib.rs @@ -0,0 +1 @@ +#![no_std] diff --git a/user/apps/syscall_ebpf/syscall_ebpf-ebpf/.cargo/config.toml b/user/apps/syscall_ebpf/syscall_ebpf-ebpf/.cargo/config.toml new file mode 100644 index 000000000..4302a7f16 --- /dev/null +++ b/user/apps/syscall_ebpf/syscall_ebpf-ebpf/.cargo/config.toml @@ -0,0 +1,6 @@ +[build] +target-dir = "../target" +target = "bpfel-unknown-none" + +[unstable] +build-std = ["core"] diff --git a/user/apps/syscall_ebpf/syscall_ebpf-ebpf/.helix/config.toml b/user/apps/syscall_ebpf/syscall_ebpf-ebpf/.helix/config.toml new file mode 100644 index 000000000..da5424f19 --- /dev/null +++ b/user/apps/syscall_ebpf/syscall_ebpf-ebpf/.helix/config.toml @@ -0,0 +1,2 @@ +[editor] +workspace-lsp-roots = [] diff --git a/user/apps/syscall_ebpf/syscall_ebpf-ebpf/.vim/coc-settings.json b/user/apps/syscall_ebpf/syscall_ebpf-ebpf/.vim/coc-settings.json new file mode 100644 index 000000000..e2211a64f --- /dev/null +++ b/user/apps/syscall_ebpf/syscall_ebpf-ebpf/.vim/coc-settings.json @@ -0,0 +1,4 @@ +{ + "rust-analyzer.cargo.target": "bpfel-unknown-none", + "rust-analyzer.checkOnSave.allTargets": false +} diff --git a/user/apps/syscall_ebpf/syscall_ebpf-ebpf/.vscode/settings.json b/user/apps/syscall_ebpf/syscall_ebpf-ebpf/.vscode/settings.json new file mode 100644 index 000000000..e2211a64f --- /dev/null +++ b/user/apps/syscall_ebpf/syscall_ebpf-ebpf/.vscode/settings.json @@ -0,0 +1,4 @@ +{ + "rust-analyzer.cargo.target": "bpfel-unknown-none", + "rust-analyzer.checkOnSave.allTargets": false +} diff --git a/user/apps/syscall_ebpf/syscall_ebpf-ebpf/Cargo.toml b/user/apps/syscall_ebpf/syscall_ebpf-ebpf/Cargo.toml new file mode 100644 index 000000000..5237adc19 --- /dev/null +++ b/user/apps/syscall_ebpf/syscall_ebpf-ebpf/Cargo.toml @@ -0,0 +1,33 @@ +[package] +name = "syscall_ebpf-ebpf" +version = "0.1.0" +edition = "2021" + +[dependencies] +aya-ebpf = { git = "https://github.com/aya-rs/aya", rev = "3d57d35" } +aya-log-ebpf = { git = "https://github.com/aya-rs/aya", rev = "3d57d35" } + +syscall_ebpf-common = { path = "../syscall_ebpf-common" } + +[[bin]] +name = "syscall_ebpf" +path = "src/main.rs" + +[profile.dev] +opt-level = 3 +debug = false +debug-assertions = false +overflow-checks = false +lto = true +panic = "abort" +incremental = false +codegen-units = 1 +rpath = false + +[profile.release] +lto = true +panic = "abort" +codegen-units = 1 + +[workspace] +members = [] diff --git a/user/apps/syscall_ebpf/syscall_ebpf-ebpf/rust-toolchain.toml b/user/apps/syscall_ebpf/syscall_ebpf-ebpf/rust-toolchain.toml new file mode 100644 index 000000000..24ce39183 --- /dev/null +++ b/user/apps/syscall_ebpf/syscall_ebpf-ebpf/rust-toolchain.toml @@ -0,0 +1,13 @@ +[toolchain] +channel = "nightly" +# The source code of rustc, provided by the rust-src component, is needed for +# building eBPF programs. +components = [ + "cargo", + "clippy", + "rust-docs", + "rust-src", + "rust-std", + "rustc", + "rustfmt", +] diff --git a/user/apps/syscall_ebpf/syscall_ebpf-ebpf/src/main.rs b/user/apps/syscall_ebpf/syscall_ebpf-ebpf/src/main.rs new file mode 100644 index 000000000..7f9b79b65 --- /dev/null +++ b/user/apps/syscall_ebpf/syscall_ebpf-ebpf/src/main.rs @@ -0,0 +1,44 @@ +#![no_std] +#![no_main] + +use aya_ebpf::{macros::kprobe, programs::ProbeContext}; +use aya_ebpf::macros::map; +use aya_ebpf::maps::HashMap; +use aya_log_ebpf::info; + +#[kprobe] +pub fn syscall_ebpf(ctx: ProbeContext) -> u32 { + try_syscall_ebpf(ctx).unwrap_or_else(|ret| ret) +} + +fn try_syscall_ebpf(ctx: ProbeContext) -> Result { + let pt_regs = unsafe { + &*ctx.regs + }; + // first arg -> rdi + // second arg -> rsi + // third arg -> rdx + // four arg -> rcx + let syscall_num = pt_regs.rsi as usize; + if syscall_num != 1 { + unsafe { + if let Some(v) = SYSCALL_LIST.get(&(syscall_num as u32)){ + let new_v = *v + 1; + SYSCALL_LIST.insert(&(syscall_num as u32), &new_v,0).unwrap(); + }else { + SYSCALL_LIST.insert(&(syscall_num as u32), &1,0).unwrap(); + } + } + info!(&ctx, "invoke syscall {}", syscall_num); + } + Ok(0) +} + +#[map] // +static SYSCALL_LIST: HashMap = + HashMap::::with_max_entries(1024, 0); + +#[panic_handler] +fn panic(_info: &core::panic::PanicInfo) -> ! { + unsafe { core::hint::unreachable_unchecked() } +} diff --git a/user/apps/syscall_ebpf/syscall_ebpf/Cargo.toml b/user/apps/syscall_ebpf/syscall_ebpf/Cargo.toml new file mode 100644 index 000000000..df20ba02c --- /dev/null +++ b/user/apps/syscall_ebpf/syscall_ebpf/Cargo.toml @@ -0,0 +1,19 @@ +[package] +name = "syscall_ebpf" +version = "0.1.0" +edition = "2021" +publish = false + +[dependencies] +aya = "0.12" +aya-log = "0.2" +syscall_ebpf-common = { path = "../syscall_ebpf-common", features = ["user"] } +anyhow = "1" +env_logger = "0.10" +libc = "0.2" +log = "0.4" +tokio = { version = "1.25", features = ["macros", "rt", "rt-multi-thread", "net", "signal"] } + +[[bin]] +name = "syscall_ebpf" +path = "src/main.rs" diff --git a/user/apps/syscall_ebpf/syscall_ebpf/src/main.rs b/user/apps/syscall_ebpf/syscall_ebpf/src/main.rs new file mode 100644 index 000000000..2a3a274d5 --- /dev/null +++ b/user/apps/syscall_ebpf/syscall_ebpf/src/main.rs @@ -0,0 +1,47 @@ +use aya::programs::KProbe; +use aya::{include_bytes_aligned, Bpf}; +use aya_log::BpfLogger; +use log::{info, warn, debug}; +use tokio::signal; + +#[tokio::main] +async fn main() -> Result<(), anyhow::Error> { + env_logger::init(); + + // Bump the memlock rlimit. This is needed for older kernels that don't use the + // new memcg based accounting, see https://lwn.net/Articles/837122/ + let rlim = libc::rlimit { + rlim_cur: libc::RLIM_INFINITY, + rlim_max: libc::RLIM_INFINITY, + }; + let ret = unsafe { libc::setrlimit(libc::RLIMIT_MEMLOCK, &rlim) }; + if ret != 0 { + debug!("remove limit on locked memory failed, ret is: {}", ret); + } + + // This will include your eBPF object file as raw bytes at compile-time and load it at + // runtime. This approach is recommended for most real-world use cases. If you would + // like to specify the eBPF program at runtime rather than at compile-time, you can + // reach for `Bpf::load_file` instead. + #[cfg(debug_assertions)] + let mut bpf = Bpf::load(include_bytes_aligned!( + "../../target/bpfel-unknown-none/debug/syscall_ebpf" + ))?; + #[cfg(not(debug_assertions))] + let mut bpf = Bpf::load(include_bytes_aligned!( + "../../target/bpfel-unknown-none/release/syscall_ebpf" + ))?; + if let Err(e) = BpfLogger::init(&mut bpf) { + // This can happen if you remove all log statements from your eBPF program. + warn!("failed to initialize eBPF logger: {}", e); + } + let program: &mut KProbe = bpf.program_mut("syscall_ebpf").unwrap().try_into()?; + program.load()?; + program.attach("dragonos_kernel::syscall::Syscall::handle", 0)?; + + info!("Waiting for Ctrl-C..."); + signal::ctrl_c().await?; + info!("Exiting..."); + + Ok(()) +} diff --git a/user/apps/syscall_ebpf/xtask/Cargo.toml b/user/apps/syscall_ebpf/xtask/Cargo.toml new file mode 100644 index 000000000..c4dea5d16 --- /dev/null +++ b/user/apps/syscall_ebpf/xtask/Cargo.toml @@ -0,0 +1,8 @@ +[package] +name = "xtask" +version = "0.1.0" +edition = "2021" + +[dependencies] +anyhow = "1" +clap = { version = "4.1", features = ["derive"] } diff --git a/user/apps/syscall_ebpf/xtask/src/build.rs b/user/apps/syscall_ebpf/xtask/src/build.rs new file mode 100644 index 000000000..ddeee4496 --- /dev/null +++ b/user/apps/syscall_ebpf/xtask/src/build.rs @@ -0,0 +1,42 @@ +use std::process::Command; + +use anyhow::Context as _; +use clap::Parser; + +use crate::build_ebpf::{build_ebpf, Architecture, Options as BuildOptions}; + +#[derive(Debug, Parser)] +pub struct Options { + /// Set the endianness of the BPF target + #[clap(default_value = "bpfel-unknown-none", long)] + pub bpf_target: Architecture, + /// Build and run the release target + #[clap(long)] + pub release: bool, +} + +/// Build the project +fn build_project(opts: &Options) -> Result<(), anyhow::Error> { + let mut args = vec!["build"]; + if opts.release { + args.push("--release") + } + let status = Command::new("cargo") + .args(&args) + .status() + .expect("failed to build userspace"); + assert!(status.success()); + Ok(()) +} + +/// Build our ebpf program and the project +pub fn build(opts: Options) -> Result<(), anyhow::Error> { + // build our ebpf program followed by our application + build_ebpf(BuildOptions { + target: opts.bpf_target, + release: opts.release, + }) + .context("Error while building eBPF program")?; + build_project(&opts).context("Error while building userspace application")?; + Ok(()) +} \ No newline at end of file diff --git a/user/apps/syscall_ebpf/xtask/src/build_ebpf.rs b/user/apps/syscall_ebpf/xtask/src/build_ebpf.rs new file mode 100644 index 000000000..8c6e323f5 --- /dev/null +++ b/user/apps/syscall_ebpf/xtask/src/build_ebpf.rs @@ -0,0 +1,67 @@ +use std::{path::PathBuf, process::Command}; + +use clap::Parser; + +#[derive(Debug, Copy, Clone)] +pub enum Architecture { + BpfEl, + BpfEb, +} + +impl std::str::FromStr for Architecture { + type Err = String; + + fn from_str(s: &str) -> Result { + Ok(match s { + "bpfel-unknown-none" => Architecture::BpfEl, + "bpfeb-unknown-none" => Architecture::BpfEb, + _ => return Err("invalid target".to_owned()), + }) + } +} + +impl std::fmt::Display for Architecture { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + f.write_str(match self { + Architecture::BpfEl => "bpfel-unknown-none", + Architecture::BpfEb => "bpfeb-unknown-none", + }) + } +} + +#[derive(Debug, Parser)] +pub struct Options { + /// Set the endianness of the BPF target + #[clap(default_value = "bpfel-unknown-none", long)] + pub target: Architecture, + /// Build the release target + #[clap(long)] + pub release: bool, +} + +pub fn build_ebpf(opts: Options) -> Result<(), anyhow::Error> { + let dir = PathBuf::from("syscall_ebpf-ebpf"); + let target = format!("--target={}", opts.target); + let mut args = vec![ + "build", + target.as_str(), + "-Z", + "build-std=core", + ]; + if opts.release { + args.push("--release") + } + + // Command::new creates a child process which inherits all env variables. This means env + // vars set by the cargo xtask command are also inherited. RUSTUP_TOOLCHAIN is removed + // so the rust-toolchain.toml file in the -ebpf folder is honored. + + let status = Command::new("cargo") + .current_dir(dir) + .env_remove("RUSTUP_TOOLCHAIN") + .args(&args) + .status() + .expect("failed to build bpf program"); + assert!(status.success()); + Ok(()) +} diff --git a/user/apps/syscall_ebpf/xtask/src/main.rs b/user/apps/syscall_ebpf/xtask/src/main.rs new file mode 100644 index 000000000..507945899 --- /dev/null +++ b/user/apps/syscall_ebpf/xtask/src/main.rs @@ -0,0 +1,36 @@ +mod build_ebpf; +mod build; +mod run; + +use std::process::exit; + +use clap::Parser; + +#[derive(Debug, Parser)] +pub struct Options { + #[clap(subcommand)] + command: Command, +} + +#[derive(Debug, Parser)] +enum Command { + BuildEbpf(build_ebpf::Options), + Build(build::Options), + Run(run::Options), +} + +fn main() { + let opts = Options::parse(); + + use Command::*; + let ret = match opts.command { + BuildEbpf(opts) => build_ebpf::build_ebpf(opts), + Run(opts) => run::run(opts), + Build(opts) => build::build(opts), + }; + + if let Err(e) = ret { + eprintln!("{e:#}"); + exit(1); + } +} diff --git a/user/apps/syscall_ebpf/xtask/src/run.rs b/user/apps/syscall_ebpf/xtask/src/run.rs new file mode 100644 index 000000000..19af11c45 --- /dev/null +++ b/user/apps/syscall_ebpf/xtask/src/run.rs @@ -0,0 +1,55 @@ +use std::process::Command; + +use anyhow::Context as _; +use clap::Parser; + +use crate::{build::{build, Options as BuildOptions}, build_ebpf::Architecture}; + +#[derive(Debug, Parser)] +pub struct Options { + /// Set the endianness of the BPF target + #[clap(default_value = "bpfel-unknown-none", long)] + pub bpf_target: Architecture, + /// Build and run the release target + #[clap(long)] + pub release: bool, + /// The command used to wrap your application + #[clap(short, long, default_value = "sudo -E")] + pub runner: String, + /// Arguments to pass to your application + #[clap(name = "args", last = true)] + pub run_args: Vec, +} + + +/// Build and run the project +pub fn run(opts: Options) -> Result<(), anyhow::Error> { + // Build our ebpf program and the project + build(BuildOptions{ + bpf_target: opts.bpf_target, + release: opts.release, + }).context("Error while building project")?; + + // profile we are building (release or debug) + let profile = if opts.release { "release" } else { "debug" }; + let bin_path = format!("target/{profile}/syscall_ebpf"); + + // arguments to pass to the application + let mut run_args: Vec<_> = opts.run_args.iter().map(String::as_str).collect(); + + // configure args + let mut args: Vec<_> = opts.runner.trim().split_terminator(' ').collect(); + args.push(bin_path.as_str()); + args.append(&mut run_args); + + // run the command + let status = Command::new(args.first().expect("No first argument")) + .args(args.iter().skip(1)) + .status() + .expect("failed to run the command"); + + if !status.success() { + anyhow::bail!("Failed to run `{}`", args.join(" ")); + } + Ok(()) +} diff --git a/user/apps/test_ebpf/.gitignore b/user/apps/test_ebpf/.gitignore new file mode 100644 index 000000000..1ac354611 --- /dev/null +++ b/user/apps/test_ebpf/.gitignore @@ -0,0 +1,3 @@ +/target +Cargo.lock +/install/ \ No newline at end of file diff --git a/user/apps/test_ebpf/Cargo.toml b/user/apps/test_ebpf/Cargo.toml new file mode 100644 index 000000000..7789b1616 --- /dev/null +++ b/user/apps/test_ebpf/Cargo.toml @@ -0,0 +1,16 @@ +[package] +name = "test_ebpf" +version = "0.1.0" +edition = "2021" + +[dependencies] +aya = { git = "https://github.com/os-module/tiny-aya.git" } +aya-log = { git = "https://github.com/os-module/tiny-aya.git" } + +log = "0.4.22" +env_logger = "0.11.5" +tokio = { version = "1.25", features = ["macros", "rt", "rt-multi-thread", "net", "signal", "time"] } + +[profile.release] +lto = true +strip = true diff --git a/user/apps/test_ebpf/Makefile b/user/apps/test_ebpf/Makefile new file mode 100644 index 000000000..11893d806 --- /dev/null +++ b/user/apps/test_ebpf/Makefile @@ -0,0 +1,61 @@ +TOOLCHAIN="+nightly-2024-07-23-x86_64-unknown-linux-gnu" +RUSTFLAGS+="" + +ifdef DADK_CURRENT_BUILD_DIR +# 如果是在dadk中编译,那么安装到dadk的安装目录中 + INSTALL_DIR = $(DADK_CURRENT_BUILD_DIR) +else +# 如果是在本地编译,那么安装到当前目录下的install目录中 + INSTALL_DIR = ./install +endif + +ifeq ($(ARCH), x86_64) + export RUST_TARGET=x86_64-unknown-linux-musl +else ifeq ($(ARCH), riscv64) + export RUST_TARGET=riscv64gc-unknown-linux-gnu +else +# 默认为x86_86,用于本地编译 + export RUST_TARGET=x86_64-unknown-linux-musl +endif + +run: + RUSTFLAGS=$(RUSTFLAGS) cargo $(TOOLCHAIN) run --target $(RUST_TARGET) + +build:build-ebpf + RUSTFLAGS=$(RUSTFLAGS) cargo $(TOOLCHAIN) build --target $(RUST_TARGET) + +clean:clean-ebpf + RUSTFLAGS=$(RUSTFLAGS) cargo $(TOOLCHAIN) clean --target $(RUST_TARGET) + +test: + RUSTFLAGS=$(RUSTFLAGS) cargo $(TOOLCHAIN) test --target $(RUST_TARGET) + +doc: + RUSTFLAGS=$(RUSTFLAGS) cargo $(TOOLCHAIN) doc --target $(RUST_TARGET) + +fmt: + RUSTFLAGS=$(RUSTFLAGS) cargo $(TOOLCHAIN) fmt + +fmt-check: + RUSTFLAGS=$(RUSTFLAGS) cargo $(TOOLCHAIN) fmt --check + +run-release: + RUSTFLAGS=$(RUSTFLAGS) cargo $(TOOLCHAIN) run --target $(RUST_TARGET) --release + +build-release:build-ebpf + RUSTFLAGS=$(RUSTFLAGS) cargo $(TOOLCHAIN) build --target $(RUST_TARGET) --release + +clean-release:clean-ebpf + RUSTFLAGS=$(RUSTFLAGS) cargo $(TOOLCHAIN) clean --target $(RUST_TARGET) --release + +test-release: + RUSTFLAGS=$(RUSTFLAGS) cargo $(TOOLCHAIN) test --target $(RUST_TARGET) --release + +build-ebpf: + cd ../syscall_ebpf && RUST_LOG=debug cargo xtask build --release +clean-ebpf: + cd ../syscall_ebpf && cargo clean + +.PHONY: install +install:build-ebpf + RUSTFLAGS=$(RUSTFLAGS) cargo $(TOOLCHAIN) install --target $(RUST_TARGET) --path . --no-track --root $(INSTALL_DIR) --force diff --git a/user/apps/test_ebpf/src/main.rs b/user/apps/test_ebpf/src/main.rs new file mode 100644 index 000000000..bb278c036 --- /dev/null +++ b/user/apps/test_ebpf/src/main.rs @@ -0,0 +1,60 @@ +use aya::maps::HashMap; +use aya::programs::KProbe; +use aya::{include_bytes_aligned, Ebpf}; +use aya_log::EbpfLogger; +use log::{info, warn}; +use std::error::Error; +use tokio::task::yield_now; +use tokio::{signal, time}; + +#[tokio::main(flavor = "current_thread")] +async fn main() -> Result<(), Box> { + env_logger::builder() + .filter_level(log::LevelFilter::Warn) + .format_timestamp(None) + .init(); + + let mut bpf = Ebpf::load(include_bytes_aligned!( + "../../syscall_ebpf/target/bpfel-unknown-none/release/syscall_ebpf" + ))?; + + // create a async task to read the log + if let Err(e) = EbpfLogger::init(&mut bpf) { + // This can happen if you remove all log statements from your eBPF program. + warn!("failed to initialize eBPF logger: {}", e); + } + + let program: &mut KProbe = bpf.program_mut("syscall_ebpf").unwrap().try_into()?; + program.load()?; + program.attach("dragonos_kernel::syscall::Syscall::handle", 0)?; + + info!("attacch the kprobe to dragonos_kernel::syscall::Syscall::handle"); + + // print the value of the blocklist per 5 seconds + tokio::spawn(async move { + let blocklist: HashMap<_, u32, u32> = + HashMap::try_from(bpf.map("SYSCALL_LIST").unwrap()).unwrap(); + let mut now = time::Instant::now(); + loop { + let new_now = time::Instant::now(); + let duration = new_now.duration_since(now); + if duration.as_secs() >= 5 { + println!("------------SYSCALL_LIST----------------"); + let iter = blocklist.iter(); + for item in iter { + if let Ok((key, value)) = item { + println!("syscall: {:?}, count: {:?}", key, value); + } + } + println!("----------------------------------------"); + now = new_now; + } + yield_now().await; + } + }); + + info!("Waiting for Ctrl-C..."); + signal::ctrl_c().await?; + info!("Exiting..."); + Ok(()) +} diff --git a/user/dadk/config/test_ebpf_0_1_0.dadk b/user/dadk/config/test_ebpf_0_1_0.dadk new file mode 100644 index 000000000..250952d32 --- /dev/null +++ b/user/dadk/config/test_ebpf_0_1_0.dadk @@ -0,0 +1,23 @@ +{ + "name": "test_ebpf", + "version": "0.1.0", + "description": "to test eBPF", + "task_type": { + "BuildFromSource": { + "Local": { + "path": "apps/test_ebpf" + } + } + }, + "depends": [], + "build": { + "build_command": "make install" + }, + "install": { + "in_dragonos_path": "/" + }, + "clean": { + "clean_command": "make clean" + }, + "target_arch": ["x86_64"] +} From a3e82804f5465263bdcfbcf9582c0c1472d20a86 Mon Sep 17 00:00:00 2001 From: Godones <1925466036@qq.com> Date: Sat, 7 Sep 2024 20:26:37 +0800 Subject: [PATCH 02/10] feat: add more bpf map support --- kernel/Cargo.toml | 2 +- kernel/crates/rbpf/examples/helper.rs | 3 + kernel/crates/rbpf/src/helpers.rs | 7 + kernel/src/bpf/helper/mod.rs | 184 +++++++++++++++++++++++++- kernel/src/bpf/map/array_map.rs | 38 ++++-- kernel/src/bpf/map/hash_map.rs | 94 +++++++++++-- kernel/src/bpf/map/lru.rs | 143 ++++++++++++++++++++ kernel/src/bpf/map/mod.rs | 162 ++++++++++++++++++++--- kernel/src/bpf/map/queue.rs | 148 +++++++++++++++++++++ kernel/src/bpf/map/util.rs | 14 ++ kernel/src/bpf/mod.rs | 10 +- user/apps/syscall_ebpf/.dir-locals.el | 1 - 12 files changed, 760 insertions(+), 46 deletions(-) create mode 100644 kernel/crates/rbpf/examples/helper.rs create mode 100644 kernel/src/bpf/map/lru.rs create mode 100644 kernel/src/bpf/map/queue.rs delete mode 100644 user/apps/syscall_ebpf/.dir-locals.el diff --git a/kernel/Cargo.toml b/kernel/Cargo.toml index 8b9ee80d0..3d92c5bb0 100644 --- a/kernel/Cargo.toml +++ b/kernel/Cargo.toml @@ -62,7 +62,7 @@ log = "0.4.21" kprobe = { path = "crates/kprobe" } rbpf = { path = "crates/rbpf" , default-features = false } printf-compat = { version = "0.1.1", default-features = false } - +lru = "0.12.4" # target为x86_64时,使用下面的依赖 [target.'cfg(target_arch = "x86_64")'.dependencies] mini-backtrace = { git = "https://git.mirrors.dragonos.org.cn/DragonOS-Community/mini-backtrace.git", rev = "e0b1d90940" } diff --git a/kernel/crates/rbpf/examples/helper.rs b/kernel/crates/rbpf/examples/helper.rs new file mode 100644 index 000000000..ace3dfefb --- /dev/null +++ b/kernel/crates/rbpf/examples/helper.rs @@ -0,0 +1,3 @@ +fn main() { + rbpf::helpers::show_helper(); +} diff --git a/kernel/crates/rbpf/src/helpers.rs b/kernel/crates/rbpf/src/helpers.rs index 68ccb0bcf..834bf8db9 100644 --- a/kernel/crates/rbpf/src/helpers.rs +++ b/kernel/crates/rbpf/src/helpers.rs @@ -263,6 +263,13 @@ pub fn rand(min: u64, max: u64, unused3: u64, unused4: u64, unused5: u64) -> u64 }; n } +/// Prints the helper functions name and it's index. +#[cfg(feature = "std")] +pub fn show_helper() { + for (index, name) in BPF_FUNC_MAPPER.iter().enumerate() { + println!("{}:{}", index, name); + } +} /// See https://github.com/torvalds/linux/blob/master/include/uapi/linux/bpf.h pub const BPF_FUNC_MAPPER: &[&str] = &[ diff --git a/kernel/src/bpf/helper/mod.rs b/kernel/src/bpf/helper/mod.rs index fcf2732e5..8e3751a77 100644 --- a/kernel/src/bpf/helper/mod.rs +++ b/kernel/src/bpf/helper/mod.rs @@ -1,11 +1,12 @@ mod print; use crate::bpf::helper::print::trace_printf; -use crate::bpf::map::BpfMap; +use crate::bpf::map::{BpfCallBackFn, BpfMap}; use crate::bpf::map::{PerCpuInfo, PerCpuInfoImpl}; use crate::include::bindings::linux_bpf::BPF_F_CURRENT_CPU; use crate::libs::lazy_init::Lazy; use alloc::{collections::BTreeMap, sync::Arc}; use core::ffi::c_void; +use log::info; use system_error::SystemError; type RawBPFHelperFn = fn(u64, u64, u64, u64, u64) -> u64; @@ -32,7 +33,7 @@ pub unsafe fn raw_map_lookup_elem(map: *mut c_void, key: *const c_void) -> *cons } pub fn map_lookup_elem(map: &Arc, key: &[u8]) -> Result> { - let binding = map.inner_map().lock(); + let mut binding = map.inner_map().lock(); // let key_value = u32::from_ne_bytes(key[0..4].try_into().unwrap()); // log::info!(" key_value: {:?}", key_value); let value = binding.lookup_elem(key); @@ -70,7 +71,7 @@ pub fn perf_event_output( flags: u64, data: &[u8], ) -> Result<()> { - let binding = map.inner_map().lock(); + let mut binding = map.inner_map().lock(); let index = flags as u32; let flags = (flags >> 32) as u32; let key = if index == BPF_F_CURRENT_CPU as u32 { @@ -79,7 +80,7 @@ pub fn perf_event_output( } else { index }; - let fd = binding.lookup_elem(&key.to_ne_bytes()).unwrap().unwrap(); + let fd = binding.lookup_elem(&key.to_ne_bytes())?.unwrap(); let fd = u32::from_ne_bytes(fd.try_into().unwrap()); // log::info!( // ": flags: {:x?}, index: {:x?}, fd: {:x?}", @@ -146,17 +147,190 @@ pub fn map_update_elem(map: &Arc, key: &[u8], value: &[u8], flags: u64) value } +/// Delete entry with key from map. +/// +/// The delete map element helper call is used to delete values from maps. +pub unsafe fn raw_map_delete_elem(map: *mut c_void, key: *const c_void) -> i64 { + let map = Arc::from_raw(map as *const BpfMap); + let key_size = map.key_size(); + let key = core::slice::from_raw_parts(key as *const u8, key_size); + let res = map_delete_elem(&map, key); + let _ = Arc::into_raw(map); + match res { + Ok(_) => 0, + Err(e) => e as i64, + } +} + +pub fn map_delete_elem(map: &Arc, key: &[u8]) -> Result<()> { + let mut binding = map.inner_map().lock(); + let value = binding.delete_elem(key); + value +} + +/// For each element in map, call callback_fn function with map, callback_ctx and other map-specific +/// parameters. The callback_fn should be a static function and the callback_ctx should be a pointer +/// to the stack. The flags is used to control certain aspects of the helper. Currently, the flags must +/// be 0. +/// +/// The following are a list of supported map types and their respective expected callback signatures: +/// - BPF_MAP_TYPE_HASH +/// - BPF_MAP_TYPE_PERCPU_HASH +/// - BPF_MAP_TYPE_LRU_HASH +/// - BPF_MAP_TYPE_LRU_PERCPU_HASH +/// - BPF_MAP_TYPE_ARRAY +/// - BPF_MAP_TYPE_PERCPU_ARRAY +/// +/// `long (*callback_fn)(struct bpf_map *map, const void key, void *value, void *ctx);` +/// +/// For per_cpu maps, the map_value is the value on the cpu where the bpf_prog is running. +pub unsafe fn raw_map_for_each_elem( + map: *mut c_void, + cb: *const c_void, + ctx: *const c_void, + flags: u64, +) -> i64 { + let map = Arc::from_raw(map as *const BpfMap); + let cb = *core::mem::transmute::<*const c_void, *const BpfCallBackFn>(cb); + let res = map_for_each_elem(&map, cb, ctx as _, flags); + let _ = Arc::into_raw(map); + match res { + Ok(v) => v as i64, + Err(e) => e as i64, + } +} + +pub fn map_for_each_elem( + map: &Arc, + cb: BpfCallBackFn, + ctx: *const u8, + flags: u64, +) -> Result { + let mut binding = map.inner_map().lock(); + let value = binding.for_each_elem(cb, ctx, flags); + value +} + +/// Perform a lookup in percpu map for an entry associated to key on cpu. +/// +/// See https://ebpf-docs.dylanreimerink.nl/linux/helper-function/bpf_map_lookup_percpu_elem/ +pub unsafe fn raw_map_lookup_percpu_elem( + map: *mut c_void, + key: *const c_void, + cpu: u32, +) -> *const c_void { + let map = Arc::from_raw(map as *const BpfMap); + let key_size = map.key_size(); + let key = core::slice::from_raw_parts(key as *const u8, key_size); + let value = map_lookup_percpu_elem(&map, key, cpu); + // warning: We need to keep the map alive, so we don't drop it here. + let _ = Arc::into_raw(map); + match value { + Ok(Some(value)) => value as *const c_void, + _ => core::ptr::null_mut(), + } +} + +pub fn map_lookup_percpu_elem( + map: &Arc, + key: &[u8], + cpu: u32, +) -> Result> { + let mut binding = map.inner_map().lock(); + let value = binding.lookup_percpu_elem(key, cpu); + match value { + Ok(Some(value)) => Ok(Some(value.as_ptr())), + _ => Ok(None), + } +} +/// Push an element value in map. +/// +/// See https://ebpf-docs.dylanreimerink.nl/linux/helper-function/bpf_map_push_elem/ +pub unsafe fn raw_map_push_elem(map: *mut c_void, value: *const c_void, flags: u64) -> i64 { + let map = Arc::from_raw(map as *const BpfMap); + let value_size = map.value_size(); + let value = core::slice::from_raw_parts(value as *const u8, value_size); + let res = map_push_elem(&map, value, flags); + let _ = Arc::into_raw(map); + match res { + Ok(_) => 0, + Err(e) => e as i64, + } +} + +pub fn map_push_elem(map: &Arc, value: &[u8], flags: u64) -> Result<()> { + let mut binding = map.inner_map().lock(); + let value = binding.push_elem(value, flags); + value +} + +/// Pop an element from map. +/// +/// See https://ebpf-docs.dylanreimerink.nl/linux/helper-function/bpf_map_pop_elem/ +pub unsafe fn raw_map_pop_elem(map: *mut c_void, value: *mut c_void) -> i64 { + let map = Arc::from_raw(map as *const BpfMap); + let value_size = map.value_size(); + let value = core::slice::from_raw_parts_mut(value as *mut u8, value_size); + let res = map_pop_elem(&map, value); + let _ = Arc::into_raw(map); + match res { + Ok(_) => 0, + Err(e) => e as i64, + } +} + +pub fn map_pop_elem(map: &Arc, value: &mut [u8]) -> Result<()> { + let mut binding = map.inner_map().lock(); + let value = binding.pop_elem(value); + value +} + +/// Get an element from map without removing it. +/// +/// See https://ebpf-docs.dylanreimerink.nl/linux/helper-function/bpf_map_peek_elem/ +pub unsafe fn raw_map_peek_elem(map: *mut c_void, value: *mut c_void) -> i64 { + let map = Arc::from_raw(map as *const BpfMap); + let value_size = map.value_size(); + let value = core::slice::from_raw_parts_mut(value as *mut u8, value_size); + let res = map_peek_elem(&map, value); + let _ = Arc::into_raw(map); + match res { + Ok(_) => 0, + Err(e) => e as i64, + } +} + +pub fn map_peek_elem(map: &Arc, value: &mut [u8]) -> Result<()> { + let mut binding = map.inner_map().lock(); + let value = binding.peek_elem(value); + value +} + pub static BPF_HELPER_FUN_SET: Lazy> = Lazy::new(); /// Initialize the helper functions. pub fn init_helper_functions() { let mut map = BTreeMap::new(); unsafe { + // Map helpers::Generic map helpers map.insert(1, define_func!(raw_map_lookup_elem)); map.insert(2, define_func!(raw_map_update_elem)); + map.insert(3, define_func!(raw_map_delete_elem)); + map.insert(164, define_func!(raw_map_for_each_elem)); + map.insert(195, define_func!(raw_map_lookup_percpu_elem)); + // map.insert(93,define_func!(raw_bpf_spin_lock); + // map.insert(94,define_func!(raw_bpf_spin_unlock); + // Map helpers::Perf event array helpers map.insert(25, define_func!(raw_perf_event_output)); - map.insert(6, define_func!(trace_printf)); + // Probe and trace helpers::Memory helpers map.insert(4, define_func!(raw_bpf_probe_read)); + // Print helpers + map.insert(6, define_func!(trace_printf)); + + // Map helpers::Queue and stack helpers + map.insert(87, define_func!(raw_map_push_elem)); + map.insert(88, define_func!(raw_map_pop_elem)); + map.insert(89, define_func!(raw_map_peek_elem)); } BPF_HELPER_FUN_SET.init(map); } diff --git a/kernel/src/bpf/map/array_map.rs b/kernel/src/bpf/map/array_map.rs index 14b747474..b783a6adc 100644 --- a/kernel/src/bpf/map/array_map.rs +++ b/kernel/src/bpf/map/array_map.rs @@ -15,6 +15,10 @@ use core::{ use log::info; use system_error::SystemError; +/// The array map type is a generic map type with no restrictions on the structure of the value. +/// Like a normal array, the array map has a numeric key starting at 0 and incrementing. +/// +/// See https://ebpf-docs.dylanreimerink.nl/linux/map-type/BPF_MAP_TYPE_ARRAY/ #[derive(Debug)] pub struct ArrayMap { max_entries: u32, @@ -23,6 +27,7 @@ pub struct ArrayMap { struct ArrayMapData { elem_size: u32, + /// The data is stored in a Vec with the size of elem_size * max_entries. data: Vec, } @@ -75,7 +80,7 @@ impl TryFrom<&BpfMapMeta> for ArrayMap { } impl BpfMapCommonOps for ArrayMap { - fn lookup_elem(&self, key: &[u8]) -> Result> { + fn lookup_elem(&mut self, key: &[u8]) -> Result> { if key.len() != 4 { return Err(SystemError::EINVAL); } @@ -101,10 +106,11 @@ impl BpfMapCommonOps for ArrayMap { old_value[..value.len()].copy_from_slice(value); Ok(()) } + /// For ArrayMap, delete_elem is not supported. fn delete_elem(&mut self, _key: &[u8]) -> Result<()> { Err(SystemError::EINVAL) } - fn for_each_elem(&mut self, cb: BpfCallBackFn, ctx: &[u8], flags: u64) -> Result { + fn for_each_elem(&mut self, cb: BpfCallBackFn, ctx: *const u8, flags: u64) -> Result { if flags != 0 { return Err(SystemError::EINVAL); } @@ -113,7 +119,7 @@ impl BpfMapCommonOps for ArrayMap { let key = i.to_ne_bytes(); let value = self.data.index(i); total_used += 1; - let res = cb(ctx, &key, value); + let res = cb(&key, value, ctx); // return value: 0 - continue, 1 - stop and return if res != 0 { break; @@ -122,6 +128,10 @@ impl BpfMapCommonOps for ArrayMap { Ok(total_used) } + fn lookup_and_delete_elem(&mut self, _key: &[u8], _value: &mut [u8]) -> Result<()> { + Err(SystemError::EINVAL) + } + fn get_next_key(&self, key: Option<&[u8]>, next_key: &mut [u8]) -> Result<()> { if let Some(key) = key { if key.len() != 4 { @@ -148,6 +158,9 @@ impl BpfMapCommonOps for ArrayMap { } } +/// This is the per-CPU variant of the [ArrayMap] map type. +/// +/// See https://ebpf-docs.dylanreimerink.nl/linux/map-type/BPF_MAP_TYPE_PERCPU_ARRAY/ pub struct PerCpuArrayMap { data: Vec, _phantom: core::marker::PhantomData, @@ -178,7 +191,7 @@ impl TryFrom<&BpfMapMeta> for PerCpuArrayMap { } impl BpfMapCommonOps for PerCpuArrayMap { - fn lookup_elem(&self, key: &[u8]) -> Result> { + fn lookup_elem(&mut self, key: &[u8]) -> Result> { let cpu_id = T::cpu_id(); self.data[cpu_id as usize].lookup_elem(key) } @@ -190,10 +203,16 @@ impl BpfMapCommonOps for PerCpuArrayMap { let cpu_id = T::cpu_id(); self.data[cpu_id as usize].delete_elem(key) } - fn for_each_elem(&mut self, cb: BpfCallBackFn, ctx: &[u8], flags: u64) -> Result { + fn for_each_elem(&mut self, cb: BpfCallBackFn, ctx: *const u8, flags: u64) -> Result { let cpu_id = T::cpu_id(); self.data[cpu_id as usize].for_each_elem(cb, ctx, flags) } + fn lookup_and_delete_elem(&mut self, _key: &[u8], _value: &mut [u8]) -> Result<()> { + Err(SystemError::EINVAL) + } + fn lookup_percpu_elem(&mut self, key: &[u8], cpu: u32) -> Result> { + self.data[cpu as usize].lookup_elem(key) + } fn get_next_key(&self, key: Option<&[u8]>, next_key: &mut [u8]) -> Result<()> { let cpu_id = T::cpu_id(); self.data[cpu_id as usize].get_next_key(key, next_key) @@ -235,7 +254,7 @@ impl TryFrom<&BpfMapMeta> for PerfEventArrayMap { } impl BpfMapCommonOps for PerfEventArrayMap { - fn lookup_elem(&self, key: &[u8]) -> Result> { + fn lookup_elem(&mut self, key: &[u8]) -> Result> { let cpu_id = u32::from_ne_bytes(key.try_into().unwrap()); let value = self.fds.index(cpu_id); Ok(Some(value)) @@ -252,19 +271,22 @@ impl BpfMapCommonOps for PerfEventArrayMap { self.fds.index_mut(cpu_id).copy_from_slice(&[0; 4]); Ok(()) } - fn for_each_elem(&mut self, cb: BpfCallBackFn, ctx: &[u8], _flags: u64) -> Result { + fn for_each_elem(&mut self, cb: BpfCallBackFn, ctx: *const u8, _flags: u64) -> Result { let mut total_used = 0; for i in 0..T::num_cpus() { let key = i.to_ne_bytes(); let value = self.fds.index(i); total_used += 1; - let res = cb(ctx, &key, value); + let res = cb(&key, value, ctx); if res != 0 { break; } } Ok(total_used) } + fn lookup_and_delete_elem(&mut self, _key: &[u8], _value: &mut [u8]) -> Result<()> { + Err(SystemError::EINVAL) + } fn first_value_ptr(&self) -> *const u8 { self.fds.data.as_ptr() } diff --git a/kernel/src/bpf/map/hash_map.rs b/kernel/src/bpf/map/hash_map.rs index edca78afc..13a18b2ce 100644 --- a/kernel/src/bpf/map/hash_map.rs +++ b/kernel/src/bpf/map/hash_map.rs @@ -1,11 +1,16 @@ -use crate::bpf::map::util::round_up; -use crate::bpf::map::{BpfCallBackFn, BpfMapCommonOps, BpfMapMeta}; +use super::Result; +use crate::bpf::map::util::{round_up, BpfMapUpdateElemFlags}; +use crate::bpf::map::{BpfCallBackFn, BpfMapCommonOps, BpfMapMeta, PerCpuInfo}; use alloc::{collections::BTreeMap, vec::Vec}; +use core::fmt::Debug; use system_error::SystemError; - type BpfHashMapKey = Vec; type BpfHashMapValue = Vec; +/// The hash map type is a generic map type with no restrictions on the structure of the key and value. +/// Hash-maps are implemented using a hash table, allowing for lookups with arbitrary keys. +/// +/// See https://ebpf-docs.dylanreimerink.nl/linux/map-type/BPF_MAP_TYPE_HASH/ #[derive(Debug)] pub struct BpfHashMap { max_entries: u32, @@ -16,7 +21,7 @@ pub struct BpfHashMap { impl TryFrom<&BpfMapMeta> for BpfHashMap { type Error = SystemError; - fn try_from(attr: &BpfMapMeta) -> Result { + fn try_from(attr: &BpfMapMeta) -> Result { if attr.value_size == 0 || attr.max_entries == 0 { return Err(SystemError::EINVAL); } @@ -31,25 +36,26 @@ impl TryFrom<&BpfMapMeta> for BpfHashMap { } impl BpfMapCommonOps for BpfHashMap { - fn lookup_elem(&self, key: &[u8]) -> super::Result> { + fn lookup_elem(&mut self, key: &[u8]) -> Result> { let value = self.data.get(key).map(|v| v.as_slice()); Ok(value) } - fn update_elem(&mut self, key: &[u8], value: &[u8], _flags: u64) -> super::Result<()> { + fn update_elem(&mut self, key: &[u8], value: &[u8], flags: u64) -> Result<()> { + let _flags = BpfMapUpdateElemFlags::from_bits_truncate(flags); self.data.insert(key.to_vec(), value.to_vec()); Ok(()) } - fn delete_elem(&mut self, key: &[u8]) -> super::Result<()> { + fn delete_elem(&mut self, key: &[u8]) -> Result<()> { self.data.remove(key); Ok(()) } - fn for_each_elem(&mut self, cb: BpfCallBackFn, ctx: &[u8], flags: u64) -> super::Result { + fn for_each_elem(&mut self, cb: BpfCallBackFn, ctx: *const u8, flags: u64) -> Result { if flags != 0 { return Err(SystemError::EINVAL); } let mut total_used = 0; for (key, value) in self.data.iter() { - let res = cb(ctx, key, value); + let res = cb(key, value, ctx); // return value: 0 - continue, 1 - stop and return if res != 0 { break; @@ -58,7 +64,17 @@ impl BpfMapCommonOps for BpfHashMap { } Ok(total_used) } - fn get_next_key(&self, key: Option<&[u8]>, next_key: &mut [u8]) -> crate::bpf::Result<()> { + fn lookup_and_delete_elem(&mut self, key: &[u8], value: &mut [u8]) -> Result<()> { + let v = self + .data + .get(key) + .map(|v| v.as_slice()) + .ok_or(SystemError::ENOENT)?; + value.copy_from_slice(v); + self.data.remove(key); + Ok(()) + } + fn get_next_key(&self, key: Option<&[u8]>, next_key: &mut [u8]) -> Result<()> { let mut iter = self.data.iter(); if let Some(key) = key { for (k, _) in iter.by_ref() { @@ -76,10 +92,62 @@ impl BpfMapCommonOps for BpfHashMap { None => Err(SystemError::ENOENT), } } - fn freeze(&self) -> super::Result<()> { - Ok(()) +} + +/// This is the per-CPU variant of the [BpfHashMap] map type. +/// +/// See https://ebpf-docs.dylanreimerink.nl/linux/map-type/BPF_MAP_TYPE_PERCPU_HASH/ +pub struct PerCpuHashMap { + maps: Vec, + _phantom: core::marker::PhantomData, +} + +impl Debug for PerCpuHashMap { + fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { + f.debug_struct("PerCpuHashMap") + .field("maps", &self.maps) + .finish() + } +} +impl TryFrom<&BpfMapMeta> for PerCpuHashMap { + type Error = SystemError; + fn try_from(attr: &BpfMapMeta) -> Result { + let num_cpus = T::num_cpus(); + let mut data = Vec::with_capacity(num_cpus as usize); + for _ in 0..num_cpus { + let array_map = BpfHashMap::try_from(attr)?; + data.push(array_map); + } + Ok(PerCpuHashMap { + maps: data, + _phantom: core::marker::PhantomData, + }) + } +} + +impl BpfMapCommonOps for PerCpuHashMap { + fn lookup_elem(&mut self, key: &[u8]) -> Result> { + self.maps[T::cpu_id() as usize].lookup_elem(key) + } + fn update_elem(&mut self, key: &[u8], value: &[u8], flags: u64) -> Result<()> { + self.maps[T::cpu_id() as usize].update_elem(key, value, flags) + } + fn delete_elem(&mut self, key: &[u8]) -> Result<()> { + self.maps[T::cpu_id() as usize].delete_elem(key) + } + fn for_each_elem(&mut self, cb: BpfCallBackFn, ctx: *const u8, flags: u64) -> Result { + self.maps[T::cpu_id() as usize].for_each_elem(cb, ctx, flags) + } + fn lookup_and_delete_elem(&mut self, key: &[u8], value: &mut [u8]) -> Result<()> { + self.maps[T::cpu_id() as usize].lookup_and_delete_elem(key, value) + } + fn lookup_percpu_elem(&mut self, key: &[u8], cpu: u32) -> Result> { + self.maps[cpu as usize].lookup_elem(key) + } + fn get_next_key(&self, key: Option<&[u8]>, next_key: &mut [u8]) -> Result<()> { + self.maps[T::cpu_id() as usize].get_next_key(key, next_key) } fn first_value_ptr(&self) -> *const u8 { - panic!("first_value_ptr for Hashmap not implemented"); + self.maps[T::cpu_id() as usize].first_value_ptr() } } diff --git a/kernel/src/bpf/map/lru.rs b/kernel/src/bpf/map/lru.rs new file mode 100644 index 000000000..5336bb840 --- /dev/null +++ b/kernel/src/bpf/map/lru.rs @@ -0,0 +1,143 @@ +use super::{BpfCallBackFn, BpfMapCommonOps, PerCpuInfo, Result}; +use crate::bpf::map::util::{round_up, BpfMapMeta}; +use crate::libs::spinlock::SpinLock; +use alloc::vec::Vec; +use core::fmt::Debug; +use core::num::NonZero; +use lru::LruCache; +use system_error::SystemError; + +type BpfHashMapKey = Vec; +type BpfHashMapValue = Vec; + +#[derive(Debug)] +pub struct LruMap { + max_entries: u32, + data: LruCache, +} + +impl TryFrom<&BpfMapMeta> for LruMap { + type Error = SystemError; + fn try_from(attr: &BpfMapMeta) -> Result { + if attr.value_size == 0 || attr.max_entries == 0 { + return Err(SystemError::EINVAL); + } + let value_size = round_up(attr.value_size as usize, 8); + Ok(Self { + max_entries: attr.max_entries, + data: LruCache::new(NonZero::new(attr.max_entries as usize).unwrap()), + }) + } +} + +impl BpfMapCommonOps for LruMap { + fn lookup_elem(&mut self, key: &[u8]) -> Result> { + let value = self.data.get(key).map(|v| v.as_slice()); + Ok(value) + } + fn update_elem(&mut self, key: &[u8], value: &[u8], _flags: u64) -> Result<()> { + self.data.put(key.to_vec(), value.to_vec()); + Ok(()) + } + fn delete_elem(&mut self, key: &[u8]) -> Result<()> { + self.data.pop(key); + Ok(()) + } + fn for_each_elem(&mut self, cb: BpfCallBackFn, ctx: *const u8, flags: u64) -> Result { + if flags != 0 { + return Err(SystemError::EINVAL); + } + let mut total_used = 0; + for (key, value) in self.data.iter() { + let res = cb(key, value, ctx); + // return value: 0 - continue, 1 - stop and return + if res != 0 { + break; + } + total_used += 1; + } + Ok(total_used) + } + fn lookup_and_delete_elem(&mut self, key: &[u8], value: &mut [u8]) -> Result<()> { + let v = self + .data + .get(key) + .map(|v| v.as_slice()) + .ok_or(SystemError::ENOENT)?; + value.copy_from_slice(v); + self.data.pop(key); + Ok(()) + } + fn get_next_key(&self, key: Option<&[u8]>, next_key: &mut [u8]) -> Result<()> { + let mut iter = self.data.iter(); + if let Some(key) = key { + for (k, _) in iter.by_ref() { + if k.as_slice() == key { + break; + } + } + } + let res = iter.next(); + match res { + Some((k, _)) => { + next_key.copy_from_slice(k.as_slice()); + Ok(()) + } + None => Err(SystemError::ENOENT), + } + } +} + +/// See https://ebpf-docs.dylanreimerink.nl/linux/map-type/BPF_MAP_TYPE_LRU_PERCPU_HASH/ +pub struct PerCpuLruMap { + maps: Vec, + _phantom: core::marker::PhantomData, +} + +impl Debug for PerCpuLruMap { + fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { + f.debug_struct("PerCpuLruMap") + .field("maps", &self.maps) + .finish() + } +} + +impl TryFrom<&BpfMapMeta> for PerCpuLruMap { + type Error = SystemError; + fn try_from(attr: &BpfMapMeta) -> Result { + let num_cpus = T::num_cpus(); + let mut data = Vec::with_capacity(num_cpus as usize); + for _ in 0..num_cpus { + let array_map = LruMap::try_from(attr)?; + data.push(array_map); + } + Ok(PerCpuLruMap { + maps: data, + _phantom: core::marker::PhantomData, + }) + } +} + +impl BpfMapCommonOps for PerCpuLruMap { + fn lookup_elem(&mut self, key: &[u8]) -> Result> { + self.maps[T::cpu_id() as usize].lookup_elem(key) + } + fn update_elem(&mut self, key: &[u8], value: &[u8], flags: u64) -> Result<()> { + self.maps[T::cpu_id() as usize].update_elem(key, value, flags) + } + fn delete_elem(&mut self, key: &[u8]) -> Result<()> { + self.maps[T::cpu_id() as usize].delete_elem(key) + } + fn for_each_elem(&mut self, cb: BpfCallBackFn, ctx: *const u8, flags: u64) -> Result { + self.maps[T::cpu_id() as usize].for_each_elem(cb, ctx, flags) + } + fn lookup_and_delete_elem(&mut self, key: &[u8], value: &mut [u8]) -> Result<()> { + self.maps[T::cpu_id() as usize].lookup_and_delete_elem(key, value) + } + fn lookup_percpu_elem(&mut self, key: &[u8], cpu: u32) -> Result> { + self.maps[cpu as usize].lookup_elem(key) + } + fn get_next_key(&self, key: Option<&[u8]>, next_key: &mut [u8]) -> Result<()> { + self.maps[T::cpu_id() as usize].get_next_key(key, next_key) + } +} diff --git a/kernel/src/bpf/map/mod.rs b/kernel/src/bpf/map/mod.rs index 3d106ef70..574a71b08 100644 --- a/kernel/src/bpf/map/mod.rs +++ b/kernel/src/bpf/map/mod.rs @@ -1,9 +1,12 @@ mod array_map; mod hash_map; +mod lru; +mod queue; mod util; use super::Result; use crate::bpf::map::array_map::{ArrayMap, PerCpuArrayMap, PerfEventArrayMap}; +use crate::bpf::map::hash_map::PerCpuHashMap; use crate::bpf::map::util::{BpfMapGetNextKeyArg, BpfMapMeta, BpfMapUpdateArg}; use crate::filesystem::vfs::file::{File, FileMode}; use crate::filesystem::vfs::syscall::ModeType; @@ -31,47 +34,71 @@ pub struct BpfMap { meta: BpfMapMeta, } -type BpfCallBackFn = fn(key: &[u8], value: &[u8], ctx: &[u8]) -> i32; +pub type BpfCallBackFn = fn(key: &[u8], value: &[u8], ctx: *const u8) -> i32; pub trait BpfMapCommonOps: Send + Sync + Debug + CastFromSync { /// Lookup an element in the map. /// /// See https://ebpf-docs.dylanreimerink.nl/linux/helper-function/bpf_map_lookup_elem/ - fn lookup_elem(&self, _key: &[u8]) -> Result> { - panic!("lookup_elem not implemented") + fn lookup_elem(&mut self, _key: &[u8]) -> Result> { + Err(SystemError::ENOSYS) } /// Update an element in the map. /// /// See https://ebpf-docs.dylanreimerink.nl/linux/helper-function/bpf_map_update_elem/ fn update_elem(&mut self, _key: &[u8], _value: &[u8], _flags: u64) -> Result<()> { - panic!("update_elem not implemented") + Err(SystemError::ENOSYS) } /// Delete an element from the map. /// /// See https://ebpf-docs.dylanreimerink.nl/linux/helper-function/bpf_map_delete_elem/ fn delete_elem(&mut self, _key: &[u8]) -> Result<()> { - panic!("delete_elem not implemented") + Err(SystemError::ENOSYS) } /// For each element in map, call callback_fn function with map, /// callback_ctx and other map-specific parameters. /// /// See https://ebpf-docs.dylanreimerink.nl/linux/helper-function/bpf_for_each_map_elem/ - fn for_each_elem(&mut self, _cb: BpfCallBackFn, _ctx: &[u8], _flags: u64) -> Result { - panic!("for_each_elem not implemented") + fn for_each_elem(&mut self, _cb: BpfCallBackFn, _ctx: *const u8, _flags: u64) -> Result { + Err(SystemError::ENOSYS) + } + /// Look up an element with the given key in the map referred to by the file descriptor fd, + /// and if found, delete the element. + fn lookup_and_delete_elem(&mut self, _key: &[u8], _value: &mut [u8]) -> Result<()> { + Err(SystemError::ENOSYS) } + /// erform a lookup in percpu map for an entry associated to key on cpu. + fn lookup_percpu_elem(&mut self, _key: &[u8], cpu: u32) -> Result> { + Err(SystemError::ENOSYS) + } /// Get the next key in the map. If key is None, get the first key. /// /// Called from syscall fn get_next_key(&self, _key: Option<&[u8]>, _next_key: &mut [u8]) -> Result<()> { - panic!("get_next_key not implemented") + Err(SystemError::ENOSYS) + } + + /// Push an element value in map. + fn push_elem(&mut self, _value: &[u8], _flags: u64) -> Result<()> { + Err(SystemError::ENOSYS) + } + + /// Pop an element value from map. + fn pop_elem(&mut self, _value: &mut [u8]) -> Result<()> { + Err(SystemError::ENOSYS) + } + + /// Peek an element value from map. + fn peek_elem(&self, _value: &mut [u8]) -> Result<()> { + Err(SystemError::ENOSYS) } /// Freeze the map. /// /// It's useful for .rodata maps. fn freeze(&self) -> Result<()> { - panic!("freeze not implemented") + Err(SystemError::ENOSYS) } /// Get the first value pointer. @@ -184,6 +211,11 @@ impl IndexNode for BpfMap { } } +/// Create a map and return a file descriptor that refers to +/// the map. The close-on-exec file descriptor flag +/// is automatically enabled for the new file descriptor. +/// +/// See https://ebpf-docs.dylanreimerink.nl/linux/syscall/BPF_MAP_CREATE/ pub fn bpf_map_create(attr: &bpf_attr) -> Result { let map_meta = BpfMapMeta::try_from(attr)?; info!("The map attr is {:#?}", map_meta); @@ -211,18 +243,41 @@ pub fn bpf_map_create(attr: &bpf_attr) -> Result { let hash_map = hash_map::BpfHashMap::try_from(&map_meta)?; Box::new(hash_map) } + bpf_map_type::BPF_MAP_TYPE_PERCPU_HASH => { + let per_cpu_hash_map = PerCpuHashMap::::try_from(&map_meta)?; + Box::new(per_cpu_hash_map) + } + bpf_map_type::BPF_MAP_TYPE_QUEUE => { + let queue_map = queue::QueueMap::try_from(&map_meta)?; + Box::new(queue_map) + } + bpf_map_type::BPF_MAP_TYPE_STACK => { + let stack_map = queue::StackMap::try_from(&map_meta)?; + Box::new(stack_map) + } + bpf_map_type::BPF_MAP_TYPE_LRU_HASH => { + let lru_hash_map = lru::LruMap::try_from(&map_meta)?; + Box::new(lru_hash_map) + } + bpf_map_type::BPF_MAP_TYPE_LRU_PERCPU_HASH => { + let lru_per_cpu_hash_map = lru::PerCpuLruMap::::try_from(&map_meta)?; + Box::new(lru_per_cpu_hash_map) + } _ => { unimplemented!("bpf map type {:?} not implemented", map_meta.map_type) } }; let bpf_map = BpfMap::new(map, map_meta); let fd_table = ProcessManager::current_pcb().fd_table(); - let file = File::new(Arc::new(bpf_map), FileMode::O_RDWR)?; + let file = File::new(Arc::new(bpf_map), FileMode::O_RDWR | FileMode::O_CLOEXEC)?; let fd = fd_table.write().alloc_fd(file, None).map(|x| x as usize)?; info!("create map with fd: [{}]", fd); Ok(fd) } +/// Create or update an element (key/value pair) in a specified map. +/// +/// See https://ebpf-docs.dylanreimerink.nl/linux/syscall/BPF_MAP_UPDATE_ELEM/ pub fn bpf_map_update_elem(attr: &bpf_attr) -> Result { let arg = BpfMapUpdateArg::from(attr); info!(": {:#x?}", arg); @@ -250,6 +305,8 @@ pub fn bpf_map_freeze(attr: &bpf_attr) -> Result { Ok(0) } +/// Look up an element by key in a specified map and return its value. +/// /// See https://ebpf-docs.dylanreimerink.nl/linux/syscall/BPF_MAP_LOOKUP_ELEM/ pub fn bpf_lookup_elem(attr: &bpf_attr) -> Result { let arg = BpfMapUpdateArg::from(attr); @@ -264,8 +321,8 @@ pub fn bpf_lookup_elem(attr: &bpf_attr) -> Result { let key = key_buf.read_from_user(0)?; - let inner = map.inner_map.lock(); - let r_value = inner.lookup_elem(key).unwrap(); + let mut inner = map.inner_map.lock(); + let r_value = inner.lookup_elem(key)?; if let Some(r_value) = r_value { value_buf.copy_to_user(r_value, 0)?; Ok(0) @@ -273,7 +330,12 @@ pub fn bpf_lookup_elem(attr: &bpf_attr) -> Result { Err(SystemError::ENOENT) } } - +/// Look up an element by key in a specified map and return the key of the next element. +/// +/// - If key is `None`, the operation returns zero and sets the next_key pointer to the key of the first element. +/// - If key is `Some(T)`, the operation returns zero and sets the next_key pointer to the key of the next element. +/// - If key is the last element, returns -1 and errno is set to ENOENT. +/// /// See https://ebpf-docs.dylanreimerink.nl/linux/syscall/BPF_MAP_GET_NEXT_KEY/ pub fn bpf_map_get_next_key(attr: &bpf_attr) -> Result { let arg = BpfMapGetNextKeyArg::from(attr); @@ -292,10 +354,78 @@ pub fn bpf_map_get_next_key(attr: &bpf_attr) -> Result { let key = key.as_deref(); let mut next_key_buf = UserBufferWriter::new(arg.next_key as *mut u8, key_size, true)?; let inner = map.inner_map.lock(); - let mut next_key = vec![0u8; key_size]; - inner.get_next_key(key, &mut next_key)?; + let next_key = next_key_buf.buffer(0)?; + inner.get_next_key(key, next_key)?; // info!("next_key: {:?}", next_key); - next_key_buf.copy_to_user(&next_key, 0)?; + Ok(0) +} + +/// Look up and delete an element by key in a specified map. +/// +/// # WARN +/// +/// Not all map types (particularly array maps) support this operation, +/// instead a zero value can be written to the map value. Check the map types page to check for support. +/// +/// See https://ebpf-docs.dylanreimerink.nl/linux/syscall/BPF_MAP_DELETE_ELEM/ +pub fn bpf_map_delete_elem(attr: &bpf_attr) -> Result { + let arg = BpfMapUpdateArg::from(attr); + // info!(": {:#x?}", arg); + let map = get_map_file(arg.map_fd as i32)?; + let meta = &map.meta; + let key_size = meta.key_size as usize; + + let key_buf = UserBufferReader::new(arg.key as *const u8, key_size, true)?; + let key = key_buf.read_from_user(0)?; + map.inner_map.lock().delete_elem(key)?; + Ok(0) +} + +/// Iterate and fetch multiple elements in a map. +/// +/// See https://ebpf-docs.dylanreimerink.nl/linux/syscall/BPF_MAP_LOOKUP_BATCH/ +pub fn bpf_map_lookup_batch(_attr: &bpf_attr) -> Result { + todo!() +} + +/// Look up an element with the given key in the map referred to by the file descriptor fd, +/// and if found, delete the element. +/// +/// For BPF_MAP_TYPE_QUEUE and BPF_MAP_TYPE_STACK map types, the flags argument needs to be set to 0, +/// but for other map types, it may be specified as: +/// - BPF_F_LOCK : If this flag is set, the command will acquire the spin-lock of the map value we are looking up. +/// +/// If the map contains no spin-lock in its value, -EINVAL will be returned by the command. +/// +/// The BPF_MAP_TYPE_QUEUE and BPF_MAP_TYPE_STACK map types implement this command as a “pop” operation, +/// deleting the top element rather than one corresponding to key. +/// The key and key_len parameters should be zeroed when issuing this operation for these map types. +/// +/// This command is only valid for the following map types: +/// - BPF_MAP_TYPE_QUEUE +/// - BPF_MAP_TYPE_STACK +/// - BPF_MAP_TYPE_HASH +/// - BPF_MAP_TYPE_PERCPU_HASH +/// - BPF_MAP_TYPE_LRU_HASH +/// - BPF_MAP_TYPE_LRU_PERCPU_HASH +/// +/// +/// See https://ebpf-docs.dylanreimerink.nl/linux/syscall/BPF_MAP_LOOKUP_AND_DELETE_ELEM/ +pub fn bpf_map_lookup_and_delete_elem(attr: &bpf_attr) -> Result { + let arg = BpfMapUpdateArg::from(attr); + // info!(": {:#x?}", arg); + let map = get_map_file(arg.map_fd as i32)?; + let meta = &map.meta; + let key_size = meta.key_size as usize; + let value_size = meta.value_size as usize; + + let key_buf = UserBufferReader::new(arg.key as *const u8, key_size, true)?; + let mut value_buf = UserBufferWriter::new(arg.value as *mut u8, value_size, true)?; + + let value = value_buf.buffer(0)?; + let key = key_buf.read_from_user(0)?; + let mut inner = map.inner_map.lock(); + inner.lookup_and_delete_elem(key, value)?; Ok(0) } diff --git a/kernel/src/bpf/map/queue.rs b/kernel/src/bpf/map/queue.rs new file mode 100644 index 000000000..3efddd2d1 --- /dev/null +++ b/kernel/src/bpf/map/queue.rs @@ -0,0 +1,148 @@ +use super::{BpfCallBackFn, BpfMapCommonOps, Result}; +use crate::bpf::map::util::{BpfMapMeta, BpfMapUpdateElemFlags}; +use alloc::vec::Vec; +use core::fmt::{Debug, Formatter}; +use core::ops::Deref; +use core::ops::DerefMut; +use system_error::SystemError; + +type BpfQueueValue = Vec; +/// The queue map type is a generic map type, resembling a FIFO (First-In First-Out) queue. +/// +/// This map type has no keys, only values. The size and type of the values can be specified by the user +/// to fit a large variety of use cases. The typical use-case for this map type is to keep track of +/// a pool of elements such as available network ports when implementing NAT (network address translation). +/// +/// As apposed to most map types, this map type uses a custom set of helpers to pop, peek and push elements. +/// +/// See https://ebpf-docs.dylanreimerink.nl/linux/map-type/BPF_MAP_TYPE_QUEUE/ +#[derive(Debug)] +pub struct QueueMap { + max_entries: u32, + data: Vec, +} + +pub trait SpecialMap: Debug + Send + Sync + 'static { + /// Returns the number of elements the queue can hold. + fn push(&mut self, value: BpfQueueValue, flags: BpfMapUpdateElemFlags) -> Result<()>; + /// Removes the first element and returns it. + fn pop(&mut self) -> Option; + /// Returns the first element without removing it. + fn peek(&self) -> Option<&BpfQueueValue>; +} + +impl TryFrom<&BpfMapMeta> for QueueMap { + type Error = SystemError; + fn try_from(value: &BpfMapMeta) -> Result { + if value.value_size == 0 || value.max_entries == 0 || value.key_size != 0 { + return Err(SystemError::EINVAL); + } + let data = Vec::with_capacity(value.max_entries as usize); + Ok(Self { + max_entries: value.max_entries, + data, + }) + } +} + +impl SpecialMap for QueueMap { + fn push(&mut self, value: BpfQueueValue, flags: BpfMapUpdateElemFlags) -> Result<()> { + if self.data.len() == self.max_entries as usize { + if flags.contains(BpfMapUpdateElemFlags::BPF_EXIST) { + // remove the first element + self.data.remove(0); + } else { + return Err(SystemError::ENOSPC); + } + } + self.data.push(value); + Ok(()) + } + fn pop(&mut self) -> Option { + if self.data.is_empty() { + return None; + } + Some(self.data.remove(0)) + } + fn peek(&self) -> Option<&BpfQueueValue> { + self.data.first() + } +} +/// The stack map type is a generic map type, resembling a stack data structure. +/// +/// See https://ebpf-docs.dylanreimerink.nl/linux/map-type/BPF_MAP_TYPE_STACK/ +#[derive(Debug)] +pub struct StackMap(QueueMap); + +impl TryFrom<&BpfMapMeta> for StackMap { + type Error = SystemError; + fn try_from(value: &BpfMapMeta) -> Result { + QueueMap::try_from(value).map(StackMap) + } +} + +impl Deref for StackMap { + type Target = QueueMap; + fn deref(&self) -> &Self::Target { + &self.0 + } +} + +impl DerefMut for StackMap { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.0 + } +} + +impl SpecialMap for StackMap { + fn push(&mut self, value: BpfQueueValue, flags: BpfMapUpdateElemFlags) -> Result<()> { + if self.data.len() == self.max_entries as usize { + if flags.contains(BpfMapUpdateElemFlags::BPF_EXIST) { + // remove the last element + self.data.pop(); + } else { + return Err(SystemError::ENOSPC); + } + } + self.data.push(value); + Ok(()) + } + fn pop(&mut self) -> Option { + self.data.pop() + } + fn peek(&self) -> Option<&BpfQueueValue> { + self.data.last() + } +} + +impl BpfMapCommonOps for T { + /// Equal to [QueueMap::peek] + fn lookup_elem(&mut self, _key: &[u8]) -> Result> { + Ok(self.peek().map(|v| v.as_slice())) + } + /// Equal to [QueueMap::push] + fn update_elem(&mut self, _key: &[u8], value: &[u8], flags: u64) -> Result<()> { + let flag = BpfMapUpdateElemFlags::from_bits_truncate(flags); + self.push(value.to_vec(), flag) + } + /// Equal to [QueueMap::pop] + fn lookup_and_delete_elem(&mut self, key: &[u8], value: &mut [u8]) -> Result<()> { + if let Some(v) = self.pop() { + value.copy_from_slice(&v); + Ok(()) + } else { + Err(SystemError::ENOENT) + } + } + fn push_elem(&mut self, value: &[u8], flags: u64) -> Result<()> { + self.update_elem(&[], value, flags) + } + fn pop_elem(&mut self, value: &mut [u8]) -> Result<()> { + self.lookup_and_delete_elem(&[], value) + } + fn peek_elem(&self, value: &mut [u8]) -> Result<()> { + self.peek() + .map(|v| value.copy_from_slice(v)) + .ok_or(SystemError::ENOENT) + } +} diff --git a/kernel/src/bpf/map/util.rs b/kernel/src/bpf/map/util.rs index 3812dc59b..a4cd7af4b 100644 --- a/kernel/src/bpf/map/util.rs +++ b/kernel/src/bpf/map/util.rs @@ -84,3 +84,17 @@ impl From<&bpf_attr> for BpfMapGetNextKeyArg { pub fn round_up(x: usize, align: usize) -> usize { (x + align - 1) & !(align - 1) } + +/// flags for BPF_MAP_UPDATE_ELEM command +bitflags! { + pub struct BpfMapUpdateElemFlags: u64 { + /// create new element or update existing + const BPF_ANY = 0; + /// create new element if it didn't exist + const BPF_NOEXIST = 1; + /// update existing element + const BPF_EXIST = 2; + /// spin_lock-ed map_lookup/map_update + const BPF_F_LOCK = 4; + } +} diff --git a/kernel/src/bpf/mod.rs b/kernel/src/bpf/mod.rs index 3eb9e347f..d59e5cce8 100644 --- a/kernel/src/bpf/mod.rs +++ b/kernel/src/bpf/mod.rs @@ -22,12 +22,18 @@ impl Syscall { pub fn bpf(cmd: bpf_cmd, attr: &bpf_attr) -> Result { let res = match cmd { + // Map related commands bpf_cmd::BPF_MAP_CREATE => map::bpf_map_create(attr), bpf_cmd::BPF_MAP_UPDATE_ELEM => map::bpf_map_update_elem(attr), - bpf_cmd::BPF_MAP_FREEZE => map::bpf_map_freeze(attr), - bpf_cmd::BPF_PROG_LOAD => prog::bpf_prog_load(attr), bpf_cmd::BPF_MAP_LOOKUP_ELEM => map::bpf_lookup_elem(attr), bpf_cmd::BPF_MAP_GET_NEXT_KEY => map::bpf_map_get_next_key(attr), + bpf_cmd::BPF_MAP_DELETE_ELEM => map::bpf_map_delete_elem(attr), + bpf_cmd::BPF_MAP_LOOKUP_AND_DELETE_ELEM => map::bpf_map_lookup_and_delete_elem(attr), + bpf_cmd::BPF_MAP_LOOKUP_BATCH => map::bpf_map_lookup_batch(attr), + bpf_cmd::BPF_MAP_FREEZE => map::bpf_map_freeze(attr), + // Program related commands + bpf_cmd::BPF_PROG_LOAD => prog::bpf_prog_load(attr), + // Object creation commands bpf_cmd::BPF_BTF_LOAD => { error!("bpf cmd {:?} not implemented", cmd); return Err(SystemError::ENOSYS); diff --git a/user/apps/syscall_ebpf/.dir-locals.el b/user/apps/syscall_ebpf/.dir-locals.el deleted file mode 100644 index 07f484b10..000000000 --- a/user/apps/syscall_ebpf/.dir-locals.el +++ /dev/null @@ -1 +0,0 @@ -((prog-mode . ((lsp-rust-analyzer-linked-projects . ["Cargo.toml" "syscall_ebpf-ebpf/Cargo.toml"])))) From bc9a46afeab4b5b11e4f371453a1ee5c9f6c605f Mon Sep 17 00:00:00 2001 From: Godones <1925466036@qq.com> Date: Sat, 14 Sep 2024 11:07:19 +0800 Subject: [PATCH 03/10] remove bad code and fix fmt error --- kernel/src/mm/page.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/kernel/src/mm/page.rs b/kernel/src/mm/page.rs index 875d1cdfc..f3d900697 100644 --- a/kernel/src/mm/page.rs +++ b/kernel/src/mm/page.rs @@ -772,7 +772,7 @@ pub struct EntryFlags { phantom: PhantomData, } -impl Default for PageFlags { +impl Default for EntryFlags { fn default() -> Self { Self::new() } From 3d28969f97c4c1074322fb84a1574c70d6941c83 Mon Sep 17 00:00:00 2001 From: Godones <1925466036@qq.com> Date: Mon, 16 Sep 2024 16:07:24 +0800 Subject: [PATCH 04/10] Re-implement mmap for perf file --- kernel/src/bpf/prog/mod.rs | 21 ++++- kernel/src/bpf/prog/verifier.rs | 5 ++ kernel/src/debug/kprobe/mod.rs | 1 + kernel/src/filesystem/vfs/file.rs | 10 +-- kernel/src/mm/fault.rs | 10 +-- kernel/src/mm/ucontext.rs | 8 +- kernel/src/perf/bpf.rs | 125 +++++++++++++++++------------- kernel/src/perf/kprobe.rs | 46 ++++++++++- kernel/src/perf/mod.rs | 65 +++++++++++++--- kernel/src/perf/util.rs | 45 ++++++++++- 10 files changed, 258 insertions(+), 78 deletions(-) diff --git a/kernel/src/bpf/prog/mod.rs b/kernel/src/bpf/prog/mod.rs index 93f2e595d..569af42fe 100644 --- a/kernel/src/bpf/prog/mod.rs +++ b/kernel/src/bpf/prog/mod.rs @@ -2,6 +2,7 @@ mod util; mod verifier; use super::Result; +use crate::bpf::map::BpfMap; use crate::bpf::prog::util::{BpfProgMeta, BpfProgVerifierInfo}; use crate::bpf::prog::verifier::BpfProgVerifier; use crate::filesystem::vfs::file::{File, FileMode}; @@ -19,11 +20,15 @@ use system_error::SystemError; #[derive(Debug)] pub struct BpfProg { meta: BpfProgMeta, + raw_file_ptr: Vec, } impl BpfProg { pub fn new(meta: BpfProgMeta) -> Self { - Self { meta } + Self { + meta, + raw_file_ptr: Vec::new(), + } } pub fn insns(&self) -> &[u8] { @@ -33,6 +38,10 @@ impl BpfProg { pub fn insns_mut(&mut self) -> &mut [u8] { &mut self.meta.insns } + + pub fn insert_map(&mut self, map_ptr: usize) { + self.raw_file_ptr.push(map_ptr); + } } impl IndexNode for BpfProg { @@ -88,6 +97,16 @@ impl IndexNode for BpfProg { } } +impl Drop for BpfProg { + fn drop(&mut self) { + unsafe { + for ptr in self.raw_file_ptr.iter() { + let file = Arc::from_raw(*ptr as *const u8 as *const BpfMap); + drop(file) + } + } + } +} /// Load a BPF program into the kernel. /// /// See https://ebpf-docs.dylanreimerink.nl/linux/syscall/BPF_PROG_LOAD/ diff --git a/kernel/src/bpf/prog/verifier.rs b/kernel/src/bpf/prog/verifier.rs index e00a2960a..987423bf1 100644 --- a/kernel/src/bpf/prog/verifier.rs +++ b/kernel/src/bpf/prog/verifier.rs @@ -37,6 +37,7 @@ impl<'a> BpfProgVerifier<'a> { let instructions = self.prog.insns_mut(); let mut fmt_insn = to_insn_vec(instructions); let mut index = 0; + let mut raw_file_ptr = vec![]; loop { if index >= fmt_insn.len() { break; @@ -85,6 +86,7 @@ impl<'a> BpfProgVerifier<'a> { "Relocate for BPF_PSEUDO_MAP_FD, instruction index: {}, map_fd: {}, ptr: {:#x}", index, map_fd, map_ptr ); + raw_file_ptr.push(map_ptr); Some(map_ptr) } ty => { @@ -115,6 +117,9 @@ impl<'a> BpfProgVerifier<'a> { .flat_map(|ins| ins.to_vec()) .collect::>(); instructions.copy_from_slice(&fmt_insn); + for ptr in raw_file_ptr { + self.prog.insert_map(ptr); + } Ok(()) } diff --git a/kernel/src/debug/kprobe/mod.rs b/kernel/src/debug/kprobe/mod.rs index 0ee803b29..0afdc4f90 100644 --- a/kernel/src/debug/kprobe/mod.rs +++ b/kernel/src/debug/kprobe/mod.rs @@ -8,6 +8,7 @@ use kprobe::{Kprobe, KprobeBuilder, KprobeOps, KprobePoint}; use system_error::SystemError; pub mod args; +#[cfg(feature = "kprobe_test")] mod test; pub type LockKprobe = Arc>; diff --git a/kernel/src/filesystem/vfs/file.rs b/kernel/src/filesystem/vfs/file.rs index 564b0223a..52e05322b 100644 --- a/kernel/src/filesystem/vfs/file.rs +++ b/kernel/src/filesystem/vfs/file.rs @@ -126,7 +126,7 @@ impl FileMode { /// 页面缓存 pub struct PageCache { xarray: SpinLock>>, - inode: Option>, + inode: SpinLock>>, } impl core::fmt::Debug for PageCache { @@ -149,13 +149,13 @@ impl PageCache { pub fn new(inode: Option>) -> Arc { let page_cache = Self { xarray: SpinLock::new(XArray::new()), - inode, + inode: SpinLock::new(inode), }; Arc::new(page_cache) } pub fn inode(&self) -> Option> { - self.inode.clone() + self.inode.lock().clone() } pub fn add_page(&self, offset: usize, page: &Arc) { @@ -177,8 +177,8 @@ impl PageCache { cursor.remove(); } - pub fn set_inode(&mut self, inode: Weak) { - self.inode = Some(inode) + pub fn set_inode(&self, inode: Weak) { + *self.inode.lock() = Some(inode) } } diff --git a/kernel/src/mm/fault.rs b/kernel/src/mm/fault.rs index 457b911f3..d4e08e00e 100644 --- a/kernel/src/mm/fault.rs +++ b/kernel/src/mm/fault.rs @@ -271,18 +271,18 @@ impl PageFaultHandler { /// ## 返回值 /// - VmFaultReason: 页面错误处理信息标志 pub unsafe fn do_fault(pfm: &mut PageFaultMessage) -> VmFaultReason { - if !pfm.flags().contains(FaultFlags::FAULT_FLAG_WRITE) { - return Self::do_read_fault(pfm); + return if !pfm.flags().contains(FaultFlags::FAULT_FLAG_WRITE) { + Self::do_read_fault(pfm) } else if !pfm .vma() .lock_irqsave() .vm_flags() .contains(VmFlags::VM_SHARED) { - return Self::do_cow_fault(pfm); + Self::do_cow_fault(pfm) } else { - return Self::do_shared_fault(pfm); - } + Self::do_shared_fault(pfm) + }; } /// 处理私有文件映射的写时复制 diff --git a/kernel/src/mm/ucontext.rs b/kernel/src/mm/ucontext.rs index 77a6ed3a7..318f92729 100644 --- a/kernel/src/mm/ucontext.rs +++ b/kernel/src/mm/ucontext.rs @@ -394,7 +394,7 @@ impl InnerAddressSpace { PageFrameCount::from_bytes(len).unwrap(), prot_flags, map_flags, - move |page, count, vm_flags, flags, mapper, flusher| { + |page, count, vm_flags, flags, mapper, flusher| { if allocate_at_once { VMA::zeroed( page, @@ -403,7 +403,7 @@ impl InnerAddressSpace { flags, mapper, flusher, - file, + file.clone(), Some(pgoff), ) } else { @@ -411,13 +411,15 @@ impl InnerAddressSpace { VirtRegion::new(page.virt_address(), count.data() * MMArch::PAGE_SIZE), vm_flags, flags, - file, + file.clone(), Some(pgoff), false, ))) } }, )?; + let file = file.unwrap(); + let _ = file.inode().mmap(start_vaddr.data(), len, offset); return Ok(start_page); } diff --git a/kernel/src/perf/bpf.rs b/kernel/src/perf/bpf.rs index 485996970..5f5ad7cb7 100644 --- a/kernel/src/perf/bpf.rs +++ b/kernel/src/perf/bpf.rs @@ -1,13 +1,22 @@ use super::{PerfEventOps, Result}; +use crate::arch::mm::LockedFrameAllocator; use crate::arch::MMArch; +use crate::filesystem::vfs::file::PageCache; +use crate::filesystem::vfs::{FilePrivateData, FileSystem, IndexNode}; use crate::include::bindings::linux_bpf::{ perf_event_header, perf_event_mmap_page, perf_event_type, }; -use crate::libs::spinlock::SpinLock; +use crate::libs::spinlock::{SpinLock, SpinLockGuard}; +use crate::mm::allocator::page_frame::{FrameAllocator, PageFrameCount, PhysPageFrame}; +use crate::mm::page::{page_manager_lock_irqsave, Page, PageFlushAll}; use crate::mm::MemoryManagementArch; -use crate::perf::util::PerfProbeArgs; +use crate::perf::util::{LostSamples, PerfProbeArgs, PerfSample, SampleHeader}; +use alloc::string::String; +use alloc::sync::Arc; +use alloc::vec::Vec; +use core::any::Any; use core::fmt::Debug; - +use system_error::SystemError; const PAGE_SIZE: usize = MMArch::PAGE_SIZE; #[derive(Debug)] pub struct BpfPerfEvent { @@ -19,52 +28,10 @@ pub struct BpfPerfEvent { pub struct BpfPerfEventData { enabled: bool, mmap_page: RingPage, + page_cache: Arc, offset: usize, } -/// The event type in our particular use case will be `PERF_RECORD_SAMPLE` or `PERF_RECORD_LOST`. -/// `PERF_RECORD_SAMPLE` indicating that there is an actual sample after this header. -/// And `PERF_RECORD_LOST` indicating that there is a record lost header following the perf event header. -#[repr(C)] -#[derive(Debug)] -struct LostSamples { - header: perf_event_header, - id: u64, - count: u64, -} - -impl LostSamples { - fn as_bytes(&self) -> &[u8] { - unsafe { core::slice::from_raw_parts(self as *const Self as *const u8, size_of::()) } - } -} - -#[repr(C)] -#[derive(Debug)] -struct Sample { - header: perf_event_header, - size: u32, -} - -impl Sample { - fn as_bytes(&self) -> &[u8] { - unsafe { core::slice::from_raw_parts(self as *const Self as *const u8, size_of::()) } - } -} - -#[repr(C)] -#[derive(Debug)] -struct PerfSample<'a> { - s_hdr: Sample, - value: &'a [u8], -} - -impl<'a> PerfSample<'a> { - fn calculate_size(value_size: usize) -> usize { - size_of::() + value_size - } -} - #[derive(Debug)] pub struct RingPage { size: usize, @@ -203,11 +170,11 @@ impl RingPage { /// Write a sample to the page. fn write_sample(&mut self, data: &[u8], data_head: usize) -> Result { let perf_sample = PerfSample { - s_hdr: Sample { + s_hdr: SampleHeader { header: perf_event_header { type_: perf_event_type::PERF_RECORD_SAMPLE as u32, misc: 0, - size: size_of::() as u16 + data.len() as u16, + size: size_of::() as u16 + data.len() as u16, }, size: data.len() as u32, }, @@ -253,13 +220,29 @@ impl BpfPerfEvent { data: SpinLock::new(BpfPerfEventData { enabled: false, mmap_page: RingPage::empty(), + page_cache: PageCache::new(None), offset: 0, }), } } pub fn do_mmap(&self, start: usize, len: usize, offset: usize) -> Result<()> { let mut data = self.data.lock(); - let mmap_page = RingPage::new_init(start, len); + // alloc page frame + let (phy_addr, page_count) = + unsafe { LockedFrameAllocator.allocate(PageFrameCount::new(len / PAGE_SIZE)) } + .ok_or(SystemError::ENOSPC)?; + let mut page_manager_guard = page_manager_lock_irqsave(); + let mut cur_phys = PhysPageFrame::new(phy_addr); + for i in 0..page_count.data() { + let page = Arc::new(Page::new(true, cur_phys.phys_address())); + let paddr = cur_phys.phys_address(); + page_manager_guard.insert(paddr, &page); + data.page_cache.add_page(i, &page); + cur_phys = cur_phys.next(); + } + let virt_addr = unsafe { MMArch::phys_2_virt(phy_addr) }.unwrap(); + // create mmap page + let mmap_page = RingPage::new_init(virt_addr.data(), len); data.mmap_page = mmap_page; data.offset = offset; Ok(()) @@ -267,14 +250,53 @@ impl BpfPerfEvent { pub fn write_event(&self, data: &[u8]) -> Result<()> { let mut inner_data = self.data.lock(); - inner_data.mmap_page.write_event(data) + inner_data.mmap_page.write_event(data); + Ok(()) } } -impl PerfEventOps for BpfPerfEvent { +impl IndexNode for BpfPerfEvent { fn mmap(&self, start: usize, len: usize, offset: usize) -> Result<()> { self.do_mmap(start, len, offset) } + + fn read_at( + &self, + offset: usize, + len: usize, + buf: &mut [u8], + _data: SpinLockGuard, + ) -> Result { + panic!("PerfEventInode does not support read") + } + + fn write_at( + &self, + offset: usize, + len: usize, + buf: &[u8], + _data: SpinLockGuard, + ) -> Result { + panic!("PerfEventInode does not support write") + } + + fn fs(&self) -> Arc { + panic!("PerfEventInode does not have a filesystem") + } + + fn as_any_ref(&self) -> &dyn Any { + self + } + fn list(&self) -> Result> { + Err(SystemError::ENOSYS) + } + + fn page_cache(&self) -> Option> { + Some(self.data.lock().page_cache.clone()) + } +} + +impl PerfEventOps for BpfPerfEvent { fn enable(&self) -> Result<()> { self.data.lock().enabled = true; Ok(()) @@ -284,7 +306,6 @@ impl PerfEventOps for BpfPerfEvent { Ok(()) } fn readable(&self) -> bool { - // false self.data.lock().mmap_page.readable() } } diff --git a/kernel/src/perf/kprobe.rs b/kernel/src/perf/kprobe.rs index 186ea7b80..0d88047df 100644 --- a/kernel/src/perf/kprobe.rs +++ b/kernel/src/perf/kprobe.rs @@ -5,17 +5,21 @@ use crate::bpf::helper::BPF_HELPER_FUN_SET; use crate::bpf::prog::BpfProg; use crate::debug::kprobe::args::KprobeInfo; use crate::debug::kprobe::{register_kprobe, unregister_kprobe, LockKprobe}; -use crate::filesystem::vfs::file::File; +use crate::filesystem::vfs::file::{File, PageCache}; +use crate::filesystem::vfs::{FilePrivateData, FileSystem, IndexNode}; use crate::libs::casting::DowncastArc; +use crate::libs::spinlock::SpinLockGuard; use crate::perf::util::PerfProbeArgs; use crate::perf::PerfEventOps; use alloc::boxed::Box; +use alloc::string::String; use alloc::sync::Arc; +use alloc::vec::Vec; +use core::any::Any; use core::fmt::Debug; use kprobe::{CallBackFunc, ProbeArgs}; use rbpf::EbpfVmRawOwned; use system_error::SystemError; - #[derive(Debug)] pub struct KprobePerfEvent { args: PerfProbeArgs, @@ -74,6 +78,44 @@ impl CallBackFunc for KprobePerfCallBack { } } +impl IndexNode for KprobePerfEvent { + fn read_at( + &self, + _offset: usize, + _len: usize, + buf: &mut [u8], + _data: SpinLockGuard, + ) -> Result { + panic!("read_at not implemented for PerfEvent"); + } + + fn write_at( + &self, + _offset: usize, + _len: usize, + _buf: &[u8], + _data: SpinLockGuard, + ) -> Result { + panic!("write_at not implemented for PerfEvent"); + } + + fn fs(&self) -> Arc { + panic!("fs not implemented for PerfEvent"); + } + + fn as_any_ref(&self) -> &dyn Any { + self + } + + fn list(&self) -> Result> { + Err(SystemError::ENOSYS) + } + + fn page_cache(&self) -> Option> { + None + } +} + impl PerfEventOps for KprobePerfEvent { fn set_bpf_prog(&self, bpf_prog: Arc) -> Result<()> { self.do_set_bpf_prog(bpf_prog) diff --git a/kernel/src/perf/mod.rs b/kernel/src/perf/mod.rs index aafbfe89f..ef1e79bf2 100644 --- a/kernel/src/perf/mod.rs +++ b/kernel/src/perf/mod.rs @@ -3,14 +3,18 @@ mod bpf; mod kprobe; mod util; -use crate::filesystem::vfs::file::{File, FileMode}; +use crate::filesystem::vfs::file::{File, FileMode, PageCache}; use crate::filesystem::vfs::syscall::ModeType; -use crate::filesystem::vfs::{FilePrivateData, FileSystem, FileType, IndexNode, Metadata}; +use crate::filesystem::vfs::{ + FilePrivateData, FileSystem, FileType, FsInfo, IndexNode, Metadata, SuperBlock, +}; use crate::include::bindings::linux_bpf::{ perf_event_attr, perf_event_sample_format, perf_sw_ids, perf_type_id, }; use crate::libs::casting::DowncastArc; use crate::libs::spinlock::{SpinLock, SpinLockGuard}; +use crate::mm::fault::{PageFaultHandler, PageFaultMessage}; +use crate::mm::VmFaultReason; use crate::net::event_poll::{EPollEventType, EPollItem, EventPoll, KernelIoctlData}; use crate::perf::bpf::BpfPerfEvent; use crate::perf::util::{PerfEventIoc, PerfEventOpenFlags, PerfProbeArgs}; @@ -33,10 +37,7 @@ use system_error::SystemError; type Result = core::result::Result; -pub trait PerfEventOps: Send + Sync + Debug + CastFromSync + CastFrom { - fn mmap(&self, _start: usize, _len: usize, _offset: usize) -> Result<()> { - panic!("mmap not implemented for PerfEvent"); - } +pub trait PerfEventOps: Send + Sync + Debug + CastFromSync + CastFrom + IndexNode { fn set_bpf_prog(&self, _bpf_prog: Arc) -> Result<()> { panic!("set_bpf_prog not implemented for PerfEvent"); } @@ -188,7 +189,8 @@ impl IndexNode for PerfEventInode { } fn fs(&self) -> Arc { - panic!("PerfEvent does not have a filesystem") + // panic!("PerfEvent does not have a filesystem") + Arc::new(PerfFakeFs) } fn as_any_ref(&self) -> &dyn Any { self @@ -196,6 +198,46 @@ impl IndexNode for PerfEventInode { fn list(&self) -> Result> { Err(SystemError::ENOSYS) } + fn page_cache(&self) -> Option> { + self.event.page_cache() + } +} + +#[derive(Debug)] +struct PerfFakeFs; + +impl FileSystem for PerfFakeFs { + fn root_inode(&self) -> Arc { + panic!("PerfFakeFs does not have a root inode") + } + + fn info(&self) -> FsInfo { + panic!("PerfFakeFs does not have a filesystem info") + } + + fn as_any_ref(&self) -> &dyn Any { + self + } + + fn name(&self) -> &str { + "perf" + } + + fn super_block(&self) -> SuperBlock { + panic!("PerfFakeFs does not have a super block") + } + unsafe fn fault(&self, pfm: &mut PageFaultMessage) -> VmFaultReason { + let res = PageFaultHandler::filemap_fault(pfm); + res + } + unsafe fn map_pages( + &self, + pfm: &mut PageFaultMessage, + start_pgoff: usize, + end_pgoff: usize, + ) -> VmFaultReason { + PageFaultHandler::filemap_map_pages(pfm, start_pgoff, end_pgoff) + } } impl Syscall { @@ -255,8 +297,13 @@ pub fn perf_event_open( unimplemented!("perf_event_process: unknown type: {:?}", args); } }; - let perf_event = PerfEventInode::new(event); - let file = File::new(Arc::new(perf_event), file_mode)?; + + let page_cache = event.page_cache(); + let perf_event = Arc::new(PerfEventInode::new(event)); + if let Some(cache) = page_cache { + cache.set_inode(Arc::downgrade(&(perf_event.clone() as _))); + } + let file = File::new(perf_event, file_mode)?; let fd_table = ProcessManager::current_pcb().fd_table(); let fd = fd_table.write().alloc_fd(file, None).map(|x| x as usize)?; Ok(fd) diff --git a/kernel/src/perf/util.rs b/kernel/src/perf/util.rs index d7c4541fa..e2538e6dc 100644 --- a/kernel/src/perf/util.rs +++ b/kernel/src/perf/util.rs @@ -1,5 +1,5 @@ use crate::include::bindings::linux_bpf::{ - perf_event_attr, perf_event_sample_format, perf_sw_ids, perf_type_id, + perf_event_attr, perf_event_header, perf_event_sample_format, perf_sw_ids, perf_type_id, }; use crate::syscall::user_access::check_and_clone_cstr; use alloc::string::String; @@ -69,3 +69,46 @@ impl PerfProbeArgs { Ok(args) } } + +/// The event type in our particular use case will be `PERF_RECORD_SAMPLE` or `PERF_RECORD_LOST`. +/// `PERF_RECORD_SAMPLE` indicating that there is an actual sample after this header. +/// And `PERF_RECORD_LOST` indicating that there is a record lost header following the perf event header. +#[repr(C)] +#[derive(Debug)] +pub struct LostSamples { + pub header: perf_event_header, + pub id: u64, + pub count: u64, +} + +impl LostSamples { + pub fn as_bytes(&self) -> &[u8] { + unsafe { core::slice::from_raw_parts(self as *const Self as *const u8, size_of::()) } + } +} + +#[repr(C)] +#[derive(Debug)] +pub struct SampleHeader { + pub header: perf_event_header, + pub size: u32, +} + +impl SampleHeader { + pub fn as_bytes(&self) -> &[u8] { + unsafe { core::slice::from_raw_parts(self as *const Self as *const u8, size_of::()) } + } +} + +#[repr(C)] +#[derive(Debug)] +pub struct PerfSample<'a> { + pub s_hdr: SampleHeader, + pub value: &'a [u8], +} + +impl<'a> PerfSample<'a> { + pub fn calculate_size(value_size: usize) -> usize { + size_of::() + value_size + } +} From 5a2c426b9cf0937182bc0f108708d454717bdd5f Mon Sep 17 00:00:00 2001 From: Godones <1925466036@qq.com> Date: Thu, 19 Sep 2024 16:50:01 +0800 Subject: [PATCH 05/10] update doc --- docs/index.rst | 1 + docs/kernel/trace/eBPF.md | 320 ++++++++++++++++++++++++++++++++++++ docs/kernel/trace/img.png | Bin 0 -> 47130 bytes docs/kernel/trace/img_1.png | Bin 0 -> 66484 bytes docs/kernel/trace/index.rst | 11 ++ docs/kernel/trace/kprobe.md | 100 +++++++++++ 6 files changed, 432 insertions(+) create mode 100644 docs/kernel/trace/eBPF.md create mode 100644 docs/kernel/trace/img.png create mode 100644 docs/kernel/trace/img_1.png create mode 100644 docs/kernel/trace/index.rst create mode 100644 docs/kernel/trace/kprobe.md diff --git a/docs/index.rst b/docs/index.rst index be25163df..5d2aa82b3 100644 --- a/docs/index.rst +++ b/docs/index.rst @@ -31,6 +31,7 @@ kernel/ktest/index kernel/cpu_arch/index kernel/libs/index + kernel/trace/index .. toctree:: diff --git a/docs/kernel/trace/eBPF.md b/docs/kernel/trace/eBPF.md new file mode 100644 index 000000000..00c37984f --- /dev/null +++ b/docs/kernel/trace/eBPF.md @@ -0,0 +1,320 @@ +# eBPF + +## 概述 + +eBPF 是一项革命性的技术,起源于 Linux 内核,它可以在特权上下文中(如操作系统内核)运行沙盒程序。它用于安全有效地扩展内核的功能,而无需通过更改内核源代码或加载内核模块的方式来实现。 + +从历史上看,由于内核具有监督和控制整个系统的特权,操作系统一直是实现可观测性、安全性和网络功能的理想场所。同时,由于操作系统内核的核心地位和对稳定性和安全性的高要求,操作系统内核很难快速迭代发展。因此在传统意义上,与在操作系统本身之外实现的功能相比,操作系统级别的创新速度要慢一些。 + +eBPF 从根本上改变了这个方式。通过允许在操作系统中运行沙盒程序的方式,应用程序开发人员可以运行 eBPF 程序,以便在运行时向操作系统添加额外的功能。然后在 JIT 编译器和验证引擎的帮助下,操作系统确保它像本地编译的程序一样具备安全性和执行效率。这引发了一股基于 eBPF 的项目热潮,它们涵盖了广泛的用例,包括下一代网络实现、可观测性和安全功能等领域。 + +## eBPF In DragonOS + +在一个新的OS上添加eBPF的支持需要了解eBPF的运行过程,通常,eBPF需要用户态工具和内核相关基础设施配合才能发挥其功能。而新的OS通常会兼容Linux上的应用程序,这可以进一步简化对用户态工具的移植工作,只要内核实现相关的系统调用和功能,就可以配合已有的工具完成eBPF的支持。 + +## eBPF的运行流程 + +![image-20240909165945192](./img_1.png) + +如图所示,eBPF程序的运行过程分为三个主要步骤: + +1. 源代码->二进制 + 1. 用户可以使用python/C/Rust编写eBPF程序,并使用相关的工具链编译源代码到二进制程序 + 2. 这个步骤中,用户需要合理使用helper函数丰富eBPF程序功能 +2. 加载eBPF程序 + 1. 用户态的工具库会封装内核提供的系统调用接口,以简化用户的工作。用户态工具对eBPF程序经过预处理后发出系统调用,请求内核加载eBPF程序。 + 1. 内核首先会对eBPF程序进行验证,检查程序的正确性和合法性,同时也会对程序做进一步的处理 + 1. 内核会根据用户请求,将eBPF程序附加到内核的挂载点上(kprobe/uprobe/trace_point) + 1. 在内核运行期间,当这些挂载点被特定的事件触发, eBPF程序就会被执行 +3. 数据交互 + 1. eBPF程序可以收集内核的信息,用户工具可以选择性的获取这些信息 + 2. eBPF程序可以直接将信息输出到文件中,用户工具通过读取和解析文件中的内容拿到信息 + 3. eBPF程序通过Map在内核和用户态之间共享和交换数据 + + + +## 用户态支持 + +用户态的eBPF工具库有很多,比如C的libbpf,python的bcc, Rust的Aya,总体来说,这些工具的处理流程都大致相同。DragonOS当前支持[Aya](https://github.com/aya-rs/aya)框架编写的eBPF程序,以Aya为例,用户态的工具的处理过程如下: + +1. 提供eBPF使用的helper函数和Map抽象,方便实现eBPF程序 +2. 处理编译出来的eBPF程序,调用系统调用创建Map,获得对应的文件描述符 +3. 根据需要,更新Map的值(.data) +4. 根据重定位信息,对eBPF程序的相关指令做修改 +5. 根据内核版本,对eBPF程序中的bpf to bpf call进行处理 +6. 加载eBPF程序到内核中 +7. 对系统调用封装,提供大量的函数帮助访问eBPF的信息并与内核交互 + +DragonOS对Aya 库的支持并不完整。通过对Aya库的删减,我们实现了一个较小的[tiny-aya](https://github.com/DragonOS-Community/tiny-aya)。为了确保后期对Aya的兼容,tiny-aya只对Aya中的核心工具aya做了修改**,其中一些函数被禁用,因为这些函数的所需的系统调用或者文件在DragonOS中还未实现**。 + +### Tokio + +Aya需要使用异步运行时,通过增加一些系统调用和修复一些错误DragonOS现在已经支持基本的tokio运行时。 + +### 使用Aya创建eBPF程序 + +与Aya官方提供的[文档](https://aya-rs.dev/book/start/development/)所述,只需要根据其流程安装对应的Rust工具链,就可以按照模板创建eBPF项目。以当前实现的`syscall_ebf`为例,这个程序的功能是统计系统调用的次数,并将其存储在一个HashMap中。 + +``` +├── Cargo.toml +├── README.md +├── syscall_ebpf +├── syscall_ebpf-common +├── syscall_ebpf-ebpf +└── xtask +``` + +在user/app目录中,项目结构如上所示: + +- `syscall_ebpf-ebpf`是 eBPF代码的实现目录,其会被编译到字节码 +- `syscall_ebpf-common` 是公共库,方便内核和用户态进行信息交互 +- `syscall_ebpf` 是用户态程序,其负责加载eBPF程序并获取eBPF程序产生的数据 +- `xtask` 是一个命令行工具,方便用户编译和运行用户态程序 + +为了在DragonOS中运行用户态程序,暂时还不能直接使用模板创建的项目: + +1. 这个项目不符合DragonOS对用户程序的项目结构要求,当然这可以通过稍加修改完成 +2. 因为DragonOS对tokio运行时的支持还不是完整体,需要稍微修改一下使用方式 + +``` +#[tokio::main(flavor = "current_thread")] +async fn main() -> Result<(), Box> { +``` + +3. 因为对Aya支持不是完整体,因此项目依赖的aya和aya-log需要换成tiny-aya中的实现。 + +``` +[dependencies] +aya = { git = "https://github.com/os-module/tiny-aya.git" } +aya-log = { git = "https://github.com/os-module/tiny-aya.git" } +``` + +只需要稍加修改,就可以利用Aya现有的工具完成eBPF程序的实现。 + +## 内核态支持 + +内核态支持主要为三个部分: + +1. kprobe实现:位于目录`kernel/crates/kprobe` +2. rbpf运行时:位于目录`kernel/crates/rbpf` +3. 系统调用支持 +4. helper函数支持 + +### rbpf + +由于rbpf之前只是用于运行一些简单的eBPF程序,其需要通过一些修改才能运行更复杂的程序。 + +1. 增加bpf to bpf call 的支持:通过增加新的栈抽象和保存和恢复必要的寄存器数据 +2. 关闭内部不必要的内存检查,这通常由内核的验证器完成 +3. 增加带所有权的数据结构避免生命周期的限制 + + + +### 系统调用 + +eBPF相关的系统调用都集中在`bpf()` 上,通过参数cmd来进一步区分功能,目前对其支持如下: + +```rust +pub fn bpf(cmd: bpf_cmd, attr: &bpf_attr) -> Result { + let res = match cmd { + // Map related commands + bpf_cmd::BPF_MAP_CREATE => map::bpf_map_create(attr), + bpf_cmd::BPF_MAP_UPDATE_ELEM => map::bpf_map_update_elem(attr), + bpf_cmd::BPF_MAP_LOOKUP_ELEM => map::bpf_lookup_elem(attr), + bpf_cmd::BPF_MAP_GET_NEXT_KEY => map::bpf_map_get_next_key(attr), + bpf_cmd::BPF_MAP_DELETE_ELEM => map::bpf_map_delete_elem(attr), + bpf_cmd::BPF_MAP_LOOKUP_AND_DELETE_ELEM => map::bpf_map_lookup_and_delete_elem(attr), + bpf_cmd::BPF_MAP_LOOKUP_BATCH => map::bpf_map_lookup_batch(attr), + bpf_cmd::BPF_MAP_FREEZE => map::bpf_map_freeze(attr), + // Program related commands + bpf_cmd::BPF_PROG_LOAD => prog::bpf_prog_load(attr), + // Object creation commands + bpf_cmd::BPF_BTF_LOAD => { + error!("bpf cmd {:?} not implemented", cmd); + return Err(SystemError::ENOSYS); + } + ty => { + unimplemented!("bpf cmd {:?} not implemented", ty) + } + }; + res +} +``` + +其中对创建Map命令会再次细分,以确定具体的Map类型,目前我们对通用的Map基本添加了支持: + +```rust +bpf_map_type::BPF_MAP_TYPE_ARRAY +bpf_map_type::BPF_MAP_TYPE_PERCPU_ARRAY +bpf_map_type::BPF_MAP_TYPE_PERF_EVENT_ARRAY +bpf_map_type::BPF_MAP_TYPE_HASH +bpf_map_type::BPF_MAP_TYPE_PERCPU_HASH +bpf_map_type::BPF_MAP_TYPE_QUEUE +bpf_map_type::BPF_MAP_TYPE_STACK +bpf_map_type::BPF_MAP_TYPE_LRU_HASH +bpf_map_type::BPF_MAP_TYPE_LRU_PERCPU_HASH + +bpf_map_type::BPF_MAP_TYPE_CPUMAP +| bpf_map_type::BPF_MAP_TYPE_DEVMAP +| bpf_map_type::BPF_MAP_TYPE_DEVMAP_HASH => { + error!("bpf map type {:?} not implemented", map_meta.map_type); + Err(SystemError::EINVAL)? +} +``` + +所有的Map都会实现定义好的接口,这个接口参考Linux的实现定义: + +```rust +pub trait BpfMapCommonOps: Send + Sync + Debug + CastFromSync { + /// Lookup an element in the map. + /// + /// See https://ebpf-docs.dylanreimerink.nl/linux/helper-function/bpf_map_lookup_elem/ + fn lookup_elem(&mut self, _key: &[u8]) -> Result> { + Err(SystemError::ENOSYS) + } + /// Update an element in the map. + /// + /// See https://ebpf-docs.dylanreimerink.nl/linux/helper-function/bpf_map_update_elem/ + fn update_elem(&mut self, _key: &[u8], _value: &[u8], _flags: u64) -> Result<()> { + Err(SystemError::ENOSYS) + } + /// Delete an element from the map. + /// + /// See https://ebpf-docs.dylanreimerink.nl/linux/helper-function/bpf_map_delete_elem/ + fn delete_elem(&mut self, _key: &[u8]) -> Result<()> { + Err(SystemError::ENOSYS) + } + /// For each element in map, call callback_fn function with map, + /// callback_ctx and other map-specific parameters. + /// + /// See https://ebpf-docs.dylanreimerink.nl/linux/helper-function/bpf_for_each_map_elem/ + fn for_each_elem(&mut self, _cb: BpfCallBackFn, _ctx: *const u8, _flags: u64) -> Result { + Err(SystemError::ENOSYS) + } + /// Look up an element with the given key in the map referred to by the file descriptor fd, + /// and if found, delete the element. + fn lookup_and_delete_elem(&mut self, _key: &[u8], _value: &mut [u8]) -> Result<()> { + Err(SystemError::ENOSYS) + } + /// perform a lookup in percpu map for an entry associated to key on cpu. + fn lookup_percpu_elem(&mut self, _key: &[u8], cpu: u32) -> Result> { + Err(SystemError::ENOSYS) + } + /// Get the next key in the map. If key is None, get the first key. + /// + /// Called from syscall + fn get_next_key(&self, _key: Option<&[u8]>, _next_key: &mut [u8]) -> Result<()> { + Err(SystemError::ENOSYS) + } + /// Push an element value in map. + fn push_elem(&mut self, _value: &[u8], _flags: u64) -> Result<()> { + Err(SystemError::ENOSYS) + } + /// Pop an element value from map. + fn pop_elem(&mut self, _value: &mut [u8]) -> Result<()> { + Err(SystemError::ENOSYS) + } + /// Peek an element value from map. + fn peek_elem(&self, _value: &mut [u8]) -> Result<()> { + Err(SystemError::ENOSYS) + } + /// Freeze the map. + /// + /// It's useful for .rodata maps. + fn freeze(&self) -> Result<()> { + Err(SystemError::ENOSYS) + } + /// Get the first value pointer. + fn first_value_ptr(&self) -> *const u8 { + panic!("value_ptr not implemented") + } +} +``` + +联通eBPF和kprobe的系统调用是[`perf_event_open`](https://man7.org/linux/man-pages/man2/perf_event_open.2.html),这个系统调用在Linux中非常复杂,因此Dragon中并没有按照Linux进行实现,目前只支持其中两个功能: + + + +```rust +match args.type_ { + // Kprobe + // See /sys/bus/event_source/devices/kprobe/type + perf_type_id::PERF_TYPE_MAX => { + let kprobe_event = kprobe::perf_event_open_kprobe(args); + Box::new(kprobe_event) + } + perf_type_id::PERF_TYPE_SOFTWARE => { + // For bpf prog output + assert_eq!(args.config, perf_sw_ids::PERF_COUNT_SW_BPF_OUTPUT); + assert_eq!( + args.sample_type, + Some(perf_event_sample_format::PERF_SAMPLE_RAW) + ); + let bpf_event = bpf::perf_event_open_bpf(args); + Box::new(bpf_event) + } +} +``` + +- 其中一个`PERF_TYPE_SOFTWARE`是用来创建软件定义的事件,`PERF_COUNT_SW_BPF_OUTPUT` 确保这个事件用来采集bpf的输出。 +- `PERF_TYPE_MAX` 通常指示创建kprobe/uprobe事件,也就是用户程序使用kprobe的途径之一,用户程序可以将eBPF程序绑定在这个事件上 + +同样的,perf不同的事件也实现定义的接口: + +```rust +pub trait PerfEventOps: Send + Sync + Debug + CastFromSync + CastFrom { + fn mmap(&self, _start: usize, _len: usize, _offset: usize) -> Result<()> { + panic!("mmap not implemented for PerfEvent"); + } + fn set_bpf_prog(&self, _bpf_prog: Arc) -> Result<()> { + panic!("set_bpf_prog not implemented for PerfEvent"); + } + fn enable(&self) -> Result<()> { + panic!("enable not implemented"); + } + fn disable(&self) -> Result<()> { + panic!("disable not implemented"); + } + fn readable(&self) -> bool { + panic!("readable not implemented"); + } +} +``` + +这个接口目前并不稳定。 + +### helper函数支持 + +用户态工具通过系统调用和内核进行通信,完成eBPF数据的设置、交换。在内核中,eBPF程序的运行也需要内核的帮助,单独的eBPF程序并没有什么太大的用处,因此其会调用内核提供的`helper` 函数完成对内核资源的访问。 + +目前已经支持的大多数`helper` 函数是与Map操作相关: + +```rust +/// Initialize the helper functions. +pub fn init_helper_functions() { + let mut map = BTreeMap::new(); + unsafe { + // Map helpers::Generic map helpers + map.insert(1, define_func!(raw_map_lookup_elem)); + map.insert(2, define_func!(raw_map_update_elem)); + map.insert(3, define_func!(raw_map_delete_elem)); + map.insert(164, define_func!(raw_map_for_each_elem)); + map.insert(195, define_func!(raw_map_lookup_percpu_elem)); + // map.insert(93,define_func!(raw_bpf_spin_lock); + // map.insert(94,define_func!(raw_bpf_spin_unlock); + // Map helpers::Perf event array helpers + map.insert(25, define_func!(raw_perf_event_output)); + // Probe and trace helpers::Memory helpers + map.insert(4, define_func!(raw_bpf_probe_read)); + // Print helpers + map.insert(6, define_func!(trace_printf)); + + // Map helpers::Queue and stack helpers + map.insert(87, define_func!(raw_map_push_elem)); + map.insert(88, define_func!(raw_map_pop_elem)); + map.insert(89, define_func!(raw_map_peek_elem)); + } + BPF_HELPER_FUN_SET.init(map); +} +``` + diff --git a/docs/kernel/trace/img.png b/docs/kernel/trace/img.png new file mode 100644 index 0000000000000000000000000000000000000000..884ec42f97a8c7e88885bfa9693a59c1840485ec GIT binary patch literal 47130 zcmbTeby$>L+dfLD2na}tC?VY-gGi?!HGq^fh%g{h($XRV64DF|AxejIx1!|G10vGh z-M!b~^SwUr_uI$b``G`Da^Lr=tIzXVK~GfV2=MOUVPIeoD9FpGV_;yrVqjotUdICe zBSa%_gMsk|LqSGL^R>}xGOj0??D0YAM#F`np_IQi*%kDp>kl)qQW9ksrHwomw7NMM}kEsDcD31IR~M_4b+vq+V?|tM?BF&LqBJEeHY3s z#{Yfe#7*3jd2mo`5T$`K{wU5&PXvYsLrao=zIFKvy{Becm5f>i|%fe2N#6*KvZOx}EhkHZD+J#2-X6+=- zm>8e0F%yCZ9q)HTmU_!=Mk*Fke8lPLpU<>>Se|~fJ!04BwbLcQrC*WW@cjAnfo6<% zgz+E{i_Za_t`QB$UL?9t`u5n{SjUauhX=~U* zcQWaRFS@1k^NDuzvS2AET(s`rm<~oAdp?SJWZwa^vi|2?I#Yy$ifoo~qxZ$}P-7Lb zyOfWE8(0((m0%IrjbL&P2V7e5omWLyOs-t+6gDpsva`y=lPKiKOwn($5yI2(iGiM4 zYN<`T#%byOsf?1ldrh|I?qXj8zm>xoA+_)Wo8K|U0+#)=r$;-fcer`5O|UP>|_E zta~*j9de7S>Q!evZ<0nkvhnv9t-PlO;?B#Oeb=?#hlR~MU{b?kYw6Jw`1_iu+qPM< zuyfUYUS72VT1v|HIcgD?)1!qH*&`x})2}@{q@j2Cm$&DT{keiVZ#OYXae$Oqz6*eL z_`8%@qUQTElpHj`){?vOiJl+MAP|V{eo-n;soTCm*ccdJA2hW>Vo!*?Pu41zP+1`v z8A@>lK5qp{|ZsXwr3lb`m>d)s?^>P9w%X z^z9qTJA!{?3vpX4sbmZz+78r0plKa*iLg9$N&3AG58RgRW@c^h$NGE|tz5Cwx zK%yOWjLs|QZIzUNmIO0u@N_%eoZ80xVpQ+$RF)p9x)TM$!@p^c*Ep{LjR-2oxPSBV;OM08bNw+5?>LH};3P3HZodBiXi6LwqJ=4r@1Qqu|Bh{>{3OF5-HN? zn7@7#WO}a%dNcJEcQhy8!Gh!UcQi%N$y@*W%X@>&0*dQ0+?=G~5GZM$P2axGN&1ep z4Mv}Y4aXhDLf#;{Z-3(o99NpG*1&xGcdV+|-jAHb+uIogtxx=z-o?Gt|l{b_L%q~*)U;K#2Xk^0}QS=czcgDX{`N)(OM z51qcwsr^`YRGig?YS0$QWdJsE`jdx z`kG?Toh{*8l++(uh{y+0&wC5uVEpt&;{EfsNmI|?t9C4 z`RRB~kl}2>koGv!nqm$kE6EFS+`L;7Pj@1Lg{mK|nu2$=Cl$=NFsmHS)^k%MV{509 zE9{cH#J#6ZP>u`G!mVe{4hF|@@ET&d_3XV7uADTB6t9G#40xScCrU#p${%4!$_%TBf<+#vsevSR1iFdYkvO^9i4W{Mr~P*nn&eB(^tp`BnMq1uO1GYPx^m|^X6?taj|sUC z^bX=GEg8FyH5jgdjofS53zOGU;I`;Kbeh|)5=dx#Tp`CCr~e9~_dsN&l&#!qFx|vj z(J(r(5%J*K9g{#pn-{X{Wv~$Q(WTGWwQW}|Eup)eM?2&TM&9lHkOeRCkl#~} zvzZ4!e_I_cN7R1$_M~)|Il^jvtbQ(ww$x*9Rq?L(Rv>CSpx5K%2WfdY?O36mY5P?m z_*ef5{;kHdef`IeTYi^Y8NVF;@eQ&x0oG?eq*>PA5AhEYo_N;%qfie@^H{u6*q*KM zyvHs)@}l^=J$_fh!*+4Un7Amirv;uH;A~TC*DP0Z-og6lbPm@WYDoqcNJ#km%ZVfw zV|P%xkY;e0dg220R&wlG^CZ8aca}f-p2*sTPT9 zOW`?-w%Ug8ubhbEGP&}aoW6)m7lm)}Bi1eWSVHLRt;fsY;{|(cxQcE;*Zylw@wg*q z4i4L^!=*TcRKIey@`Rn2M_op28c)VHboO$!3P0;J8=EO8330R=?JcFt3)SYZhz#S$ zN(;$X&pu%Bh>top-|d?ngD%I9qY)lGO^Eu(dW7inFwHY_E#C?(XYz zHHgG9#r>KysJ4n~0=5IHR}bZ!BRM{>-rt%pZ2X;Pp$(BOj``Wo75$V)dp#gWqa`F# zqs+UOX^4y<_;FjnM%@l!^}CtPZ)4%22H$6_j7 zqmEvt;eMacuJc?Juwlvfy`6xHzCfKFw-0{>k9E zE-f5^gM2{w*Ibj`d1%cV(?TZ=lO`$xmp7H2l@ z#wbM~%dp{=+4>-xwqJIeHkL;Oko?*X=%y@uIW70^3Aa~%gjE&VCf zW|Sy=q^QX)5xedNv+{1Ih*qMyO=;{g$kBQVFzsZO-%Oen=>h{SAoTgmt_=M4jB`mBfJg&JmrwCV|XENv|LZ-vp$$+VOu#W6W z@!GevM^@M(%*@wD%9$@#ewLF`F1*`Hvy?3nb`YuO5lZ561bzjN+%}`Y5(CJT^YXy@ z{)in1uxkSFc0IOmiPXss&0$Ep!Ger(TBIWnenUL6$9g((WskcALbNTmwEc8;e!|o! zr$5;}tW@T|z3AKbv-d42vZ*oLUU|IdmGTa{A&+Z;WV{O1DUaVc{QS`4V7WrV%OkFv z`p{Rg28*5P0fzT8k*oE$#a?FHBC3y46;zI<@q41I`r!@x%gE`Y9d`;(H;e@*Srgmy zrNiUB6j6k;JU8dke|zP7(<^Q}AWJ#}iZQ*9B=xMPsmz{5655uMCOMH(G( zNw%X!dRr=XSpOAiyzN%vsb@S%qIO>S89Rjc1$Ip4TLM+7${k@di3{NuZEG{O9lgKu zqmc)9GUXc;pLtB5Ul|YNc(-A`m#c9969*G68;tW+;Uv#=F%>pQev#nHaeR)FxKC$ojBuW zV0ffU4G(%n8Q@v{4bt`3^}G&`YDyo{oPO0<{n|b9hSzgq2dDMXjNS4fwZ)K|%ULY9 zzy8O%CAnuvpyQPa~~SR-)V<471FUn=b7R z+@YlZ6MABhP*WRrk}+@I{m)9`pHxDlkZ$@c5gIkPB)#cp_7*;(X%!^LAUKx4tutS} z>t%^8A#+H!o0+p=x+RInxA=Fj-W9;sg(HQ9+};#K**;%d7h2$f<#7c-K!iv_1E;*` zP`aK%Ujz>#%f$WHOR2DrM|6DdJ33?3ULOV+wqBy>@R`GW-$lc0m$qn9+pd3s(feBx z{@n*U3xr6xU9VT)#(U*NC$@vs-*VUAf?$8j3}JDrd2V$y?A3Mm5Udvnd6PR=|L)NX zUGr^UrW!T}nSynG9vg?I^Je|9{0T>gp&^TYl9bfr##}F1D46OgM(b&smW+A%1q_J=re&2Wa?rzxwz&+NeqZ?-+)_|{^)?l!7&9a1PA@{NhN626ViYGI z7wPnDxHYeam7pb4SzUyX`ZDW5Xhg`F)jN`Y@vC7D&KOAWBV@N!f*C$l7=|2hhvup0 zL?sRFiUgaYx0&!iGY&n1A0g+GwiLX`84QX{1J=XDn={kcOi4W0y8_k?|4g?Nw*poo zQ$kziyBbZUY4=n4&;ooV;L6`Kp4T_`bpMjzH2P4h^0CBQ8I7J70gH|gc+u$?LIVrV z_O26i7&2!3O2}hTjz*tZJ^nM+-#JXd5nfa2&wbGc3B*KRxCm!(y?ba57DJWHN^M86%sh821ts71%O3PfiT)*6R)#j0N{hn$b7=XiExIZd`oA&P>hv%RvVf9N!)--evU zt2f-hv8Sof3~3-{dYnwoe#55_pM!bGwE{|<9_xUmb)-QUs3hctcYq$f?O&spz|K#R zOwAX!)`R=``yl|)9!RH%cuE;C=gtgllqB@QJY zpDJ>Hrr2Hh8b<9sUi=~1L3%=bHz5uW2~7}6UUhsC*DcNKW*Ms(8!U=l5RyiR9{D}i z-*uP3Y$f3I$O#}&=_%uRR;e&+@v5BhQq`JbC|;CybsCV3!OypefR2BKIO0*+njP3S zF)TF)u8mu69-3wi<;U{HQ5isrjVEw8v@>6pJa-^-TTfI=DrqYR6$vJPA=h<-@@x0W z|D%Vuqq+DV+C*{saN8XZyS}t*`gsUn)9V*_tArdylq;{lr(f|55WZ9T4V{;ptN6m$ z!|_rjXbbe_dV^su@b(8^^aZTCUe+s1NO!~7dyu>L;=r1+vH!g$c7Zo`f>G)CsU*Bg zY0cVh@2rsNPrTnkW@#+np%uK7$^-djPjW8}H*b}*6t&W|*gRG_ao&C+CPSk6?tDg! ziZH8r?2A!nQ7O>H?-+k4Z_1@nU7HlDq5VkgsjXh$*>_vA{0XnxQRTT&{dr)G99)gp z;xh@`tpCxqz*lRBOC3Y%MbE;R%TaFrX?1NM?$`h(g5V#q;Vf-A zI~elW?Q=_e+)X9A=zFs6^;u<&G>_Er%g1j8F`kq{nFhlpj&e8RMh2QAzjMbe6~nws z?XiKSc>hFUjGv238#Ma)esjC;h_g8G9 z3{;Lk8E1^J!|T_77jTW=bnBw$iViLMZdEA0-Kf>hNT|iQ&EWKR3*`UYlKT7TYpTWNRB@e3L zoE1UgdXF1>QCTxb*hst_X?cL&%=>L51nIGr#gC3I8be zMCauA&sxlZYjtpS6!@TqWA^aUKh>p5`$Q36mtE~gxcq&IXU-_5Jp8%E&xd1#Yfak7 zq_~H=>m1@@PpKn@FHO=;FrJS2uSxObAel@$5CS&qAQ>~i++|Fh=-9nlL;c&o5KdWL z6E~`T-2TA%!KHM9AVA-}E|3-S3^C<<;>1jX2mm1#1+WlPD_2X7j&`59q;T>9UtP@* ztW-@g*cXqMC=hJWkK%Ch-FOwz;56YA)$8<>E4>^o>Z8w3{q>I@Zz}_mwDZ-C%pQO2vQ+ke(afk*9b+gwShML$CD}a@5V!FZB&_zk5ho?Z?|6SHpk0 z#iSm8A1q{ZCb(KZ85N{Px_eAE6Mfvirm8~oiIAS`vhoN@pT>-h5PKC1`bbMM^!bcJ zf-Kn{k@bzfVpNEe4U5m{TaJ^$WRN)1DU@(UzRuV(WKka^euwoZmT-<}s%t7%Lce_n z{q_WeD2aPFPn0Z$arc*lb$vl-L(5xgz8H9#K)6;d5%Hy6qU^utj|+}h-Ynr75b(}Q z==0|B+lCLQAqlqhE+d*5%E5dRX1QL0*Z&Nn7SVJCOANhO;03yv7&)U4t!J(q;w2aM zm4%eeB$`_ny|7u>x_NFWcqw=R^ztTHlz(5)?*F?Yiln{5-1 zw{p3UYseH%W(K;Y3I2?G6O1b&&v9|sDzR;W9R%g`g(WCm*EKitevH~4sD8htaAuz6 zEiHXC7uIuYHJ-TCnI1Pg*}x&4CP=#$SL)9cNWc_=(?N~@bPK2~6o?mz9=v@~0Q-mn z93PEvsho}2gG=|6d34X;5*M8Tm01ln{C5V#M8*XGgssQNjt{(o4|jhVb1!7C1#n(& z=mX01`_!KpuzZ||uP=a#IXZEpdx_}mp1B*7^2v!5xi4d3ESOV4K7FZKD4mEVr)z(6 zF{X5`Xd&X(;tqMmbxHF2+O_uBhKDF-BISJPg`Of2*bLl{! zC`mjNQxn9|M?6q+5nV^dWkJ?O{aL@zJ^v-rBIrBEuMl5-pv0J$2A=?v4QlNBzYFt} zP7zJJL^{k(LjP5q`gWBK+W3Dy%o)+d$!xAS|EI*x{&IgngM~<9Z-BJjN_*Kk`_<%| z26v(Sk$U|FFOQ1lkeE-xd&`66c}oq{LXX4mEEnM)^ck3(!iN`xG6Z8+qfV4={>9?COn$Hua9kerHBH^-7r_G(Tt)hbT5dEV+=C z-C6%ts&K!d9`D=K58(@2HHDe=AiG@SKvfOy4?Um};fRXm6)+z1EP73AahkI#X#B4N&X2pu zx)=}MGc!F;_tjBl(-xLJ>f#bq1Z?Am;kdVxbS1Db4ZnH0_T)W(9^G&*FT!T)#CUFr zpekWomNvIU_&c}VqBB`FTYm1L^D%h$zOO@rPGu*%7CT2CtJ{;`k|YNGwlG8892kN%+UKrpRS)d=IFlDXV-E!4KgbP}4T3Rlr$VLIXQ=}sFB!exntZku`de|)kIek5 zeGybh-Zkbl$rj-*C{G?3RT|^xUdGu|zO1()&nrPV21yQFC@S!SIaE1)Qy^^>)jCwa z(if5p6UI5nyKZIwry3J;1Q1D(2s58B@!#0K? z)7X#xF~@G>j;H!Jbb)~32A4ril!9f8^|sazKN+hEJZAddJ!_3{$xY{oTMvQHYkMN0 zBQ+`tKI%M69;n?0&hG_sH2J^ASx>AaL!6X#=6$|>>oMCcGoMi1x^s-0 z>w4h+OCk1p96U!8f0EL%c50175W)SWTbbMO**AV{mne%N2D?aWgoCj|IXxHO5c`%v z5?a1qn<7BT{GPLh!#gyB&a9H8190%E?|}=mvuz>4xCjFoUe7STghv54`l!{U%mi|n z^QAfZt@<^hG))ljIZPaoG-KKRADR05H)Q5Psrt-;N}tu5=x|vguC{HLL6C53z4oI6 zMSLMtYvxE|64Hr}{pv%#Y6hB~tZ4edTw-ERqzl+696-$F9cYVyCuZ(4accNXkkQV4GKqHIez7>I=FRNNcVtBNM);2v7eVoyi3C3mr_WCB>$6;1Z4+|^_(6^7D62cN zX_o{PrhV9_Q<1b3ddyR+>`CmN*K{kOPb)!XS-mkBzT~n9iL2(hc&Vh8UG_aM!VlD* zrK@xa#`KwaV!aIOu!CX2t_t4yc2(+9yV;GF-Z%h2YBoiRf+TaK!bV{OzzwkGPUHbs zRP)Qh-1>|2lc1oWvy%fBFDm|*19Ql@VF;**J{B(fqF4U1FI86J;uP=>d{oGVofaiy z(^6AuJU69CP~eRPa-I1ufr(Lo5bE8bq*UN?T^wuHHZS&97ZX z;{4E*L^&5qJ#0zKNqQ(F`Tta{d5D{SU#ukqR&vo^LuwMs<|N;QYsNH9`XhLu_?>+? zvHwO;AGK1Mshjo(^}tc3hoWwG-EyK5$jO;~gk5&CKbL7LtO)DRYvVu9Zh%x|HxZd; zIlR<0Qo?Ok-0S7i@V*;Di(^I3GKCupWn zZ1taPG^PkoOIR|(iMUG2pyOrI6LGR8!>@TkJPOKC%>SJ^z*Da$W_hl(%os==X{a<6_q6LN#2VZ5XFJS! zT$~WBV5xjt4n}a;%zQn?XLYtTM})CfeT3vrwtgGa$^7-887M)406G6$`JwD*g5KWf zj$;q;qD`B57rYf_bckW3$J}*A{rj(9zW@uU!g_c+kk&h5Wzn|PO3kYR8cD_o=hWS< zY&^38grro6?ovFj>GD91qf zrk)|DM^xp>YxUm6M%c?7MIp&rm=iv*wiyJ=WU{t9YceeuWTplpDf%0)QI6*E+BPx5JkV1u5l8(&T&v3~UY0F|`5?oNbL~OiYeiCIXa`tA6P+ z=S4*+&XWA(R8lVP%p2CanIn@ysKmlUA|&s^ATsPw0Tp}4sYfU&qbWtBePcvj2V}L{ zj_YAgPs;8;5x!fX&s-?q_1mWQR00$B=ruXFX28PAkrTj3LgL_T5Fa5SAu;#eY+%~J zG-asArgV_<=U6a=*U5f{DS2XE*~j29G0EfWbF%P<3UjGEI`grQdAS^iKfch#4CLO3 zaPf@h4r#_cGrvdb{g#BS^OLD1VwN>cjsQk%!WP8pnfjEhgPS$9mC3GtP}-Gx%r~YX z=9r3`D_R!OM;hotXRT`dgU*Vokw)j$)hw8C(r0uy;378+sV1K5Jd|8w6b$*Y!@OA} zM9watH9OCG86OA3NaVt4@^wqizXuTXBpvmreSnJ}FQ&DIQIk5q&Sz4BFTyQdTj~H$ zX?S?pv&Hjdf9*M23U7F5=p`4;bJ1TXSJH^}faz6HC*9Ge)$o7LqZQ(FJ{L9ZgYPQ4 zlu$+Ib(Y=Ekm9z5&_8#;r+#L8v0`Ia;o}IZhV_}SRk3n?w>q`{b3tcTnzv+`bO77E z{u6?eXrCrR^R5&YoRs31CHyYQKIS_lHs0PW#WSwPaI+S{cS0-lWdGZ^B{a4pGqXStgRDPwW#ujyQ ze3;gm-#29|IrXZ`RRT^vn4w#L>xe(2fH?5MH|HT^0r%y_UDSzF{(#@C*7^%ibz^5% zBVV#iAwhj+`cr9@gc@8RJxVvZqI(jfUqso?lk1odm%wm{XrN68=O-HzuV+4(1ik0NGx%*e!~c4X)}Hdt&b=lv-&Sy7l% zU8NU4QM0>FUys?Ye7pM)o$rvfLQGN2?b{Y^DAmsX7ss2TBa6d}OpdM=>-721Uw2e} z)Tay&#^xYt)qGZoqr|f4i2DH($OJMmkurj47sm7+j?jKJp0v8z`}q;?k+v4MZb>?@ z-|6A1;zzT~gLy}BzD~$aq^e7YDr{qS8D&J97e^0WA zTyVQ8tng`m!fSJpkWXV;iEwzkX|(p|5BbK8=YZxpd&Po=bEhLs5S}xN(J&<+#r@v+ zN9*J6+ocO_mYIRUUhe(m(UA%z z{o|r?>mdi%`9>d9U7JoF%j5O+JZjVcX6y}sO(?bL&gV2qU?KDmD{eoMZtyZ{5NOMq zv=Zhu-+}PV< z8`a4?EuyQxx)o5{IF$A7U#Krw&%f%OWg*mZm5u$UFk!+-t7RoF)4#RPt7Pf-yITLr z^vD{Vb2o{?7WBl{)En|;jB-Uy)kQs`@?^=pUsoNL-6v-sJ?=pA3QVF~}h?z-_4(K?d?Tb|K|3%^d^Q7`zUli9FFbxb`Ws;8!WggQZ+ zg?LlMCqYoDcs~K^&qck3+n&`ke1-&ErglF+cGrubq*fyJ!oQ3}f6*?&uNJ{`mh`a}fT_8e~${oR>r`CzAdMW&RTdYSM7AJ)t>fcpvGlA@}TH!0GW)Q;9$Q z)MIJZ%3zRx0eT;R=Z23sX-&;2)k0EjkC0qLF*W7`vyt2>aSFu?v)t9S#2g|y*z6#I z2)HGy$DH`&-Tug&L-4wvw^((U$1lXFj(NALg~aJ6intZRh0xKGB4};hr_~?9avuS) zM0H~w9Hk7L#29#h<3hCTV4o3pH_c~oDoOrB>Po%Ku0VU zRT+0t(&y?!Z{)iqn6M~+vOS>2?xc(sguf$p4H*#|YeOMY>8Mb^BUZYi6A)p13l8r&;53y^%<1X~YUEl`NXnovjfpDeZ$&^&R-gl2fwzEjEvm{Id>G_kBr|wUw;A&m`z1aJSlP8o}~&L#dBLQ08wn zdX@(q0&5M-f%W4oB#AmKl1af;Dgy;rc`QD{@CWN@bZ0)ty!soF%}PC+?Fl-a2J`66 z)~s@No>nh-{OIMGi_$Q}k`&H;E^XV91*#=dsxH4{DOyj?uA}1%0XW;U9u!}s=5qwU zCTDc+QhyGklru7>5jk6Mw6zp$`>;|#p~wfu_+nV1|X>Ly$LBizI1z21YzYKYs>uPD>%6mQd6WY>u9CW97Z#sKXc78 z>2zhPZ|4s#HM7jt^6AI^o^KCs)~HhfYHZ6>tL;r8EtG_ZqrCvJ#-Ea{<5!W`v}XdY^p_vt}(pv+@Z zkk_A9K%aNiNR=GrLybR;(6x?#5gU_OeVnq-?o(J|{aw9YI+FNrU0(bx0Mll`nHF-E ziE0_18e}?z9|1;`!X&rh>C<)DD}~agpD4^sQ`u;OA|z;Xi=fVuB+toB5TjRvGLsa> z>MYkD@zVg@tXt|anRSkznB0}=;iNyLS0YZ0IEjFP4`ffcy5!nY+F;8RPQML5u4>`Z)Sk;jZ?1UDCVzAL0?o3{XnhI%Irw!4AG@|{NVeZb1RwZc#veQ!hs+(~~Rn?P>{e z{|E6hJmdShp@BTOgY`eJpd)@kxZCL{o$R?fAq)FkRl@!-}b`cb0+N>+yqrlz?pVbPDf5 zJ0#hUV};u7Ci>KSIHNVfr^9F?Da^bWdrYY{xBa&P$&Avme{l*g1 z=Oq7M+a&#yXo^L1G4%QI@#m5YWn&2_@sD%b1D?$11wuNDTN{r~#SFv9%;VM4VNutL zua*qjr*bzj=P+B~aXrd;LWN}1a4Oz9kgBplZ#IuEPg%7o^%GAqm ze-D=<-?M2lf&z~^^e<`>M;+-?IO-dj;{7EtgX%9#(sUJA0a`*ho4DbzuAx{+`;;zs zv`ER_YXZ&G?QD|c&B0P;mQYQy0zUR1zX;Iby7befk6&(+U$PjlC$VGtz za;yy1zB2*rF10TMmn#!O_^Xj)Z#dXE1 z6V~0NAHa~ET@+osX@Su%;9MG+fEW!cz3~O+u$kBslB7{VbLEfMa#G>$SZEbOfrP-o zn@ewA^u_b%siTo&V-kwhuQYHtM|iKGi=Y+P55Hwn#vd8hbXLvRJ|_zgsD_x1p<`5L@=13%f>vgk+z~Kmvj1PW!@zp@&XaLg9JrzB)EHg0a92;h!m&uoO zlYi3zdKvOgK!V6UOr82-_!7KjRx5H86UjhnQ}d((ldZD|L_M;vY*nkjKPLRiABg8m zaP*Oe&u@bFNURvC`#Tm%$HMPN{>PSSP$5=}%AnQn?5oD~E1~7#SYV)|U_@Y$DI;bv zbh`8dgA#{zQ=H0n(AIkUy?sl%e4|u{gcT|8@_|!!j0^Y-aLs4Z| zY@Hl9{{vJ20@D|-q-J!V6)IDiT!S+VHcvh}+Z}RP{?9}K=Wn``2mwzOV!ZHO<3Lde zo?e*?U{EL?$@H$v4Qb$(1u0lL9|u6{Qs4Yf5M2xlmU&MNTIR-7-99U+ql> z3^Hy0DdI`zCC>N>Ha}F2B{-l zMS)^#Ri}Ob=$HC;JgAIzo zN%`Q#%^rMb%=_4XSbsAgVCLrLpbDf}b!Xy>Y2X*9ZhlHxOfe9jj=LcT>oI}5tt;z% z{+5gxgYYkF4wUEJuHMm}O@7d=n3Xm(lw!unz8HH|TsL!DGm~DR0~^eUQInGt44L$o zgSgcR+1XAZ&}{Vf+6tvpa>{UgT)5E!5x6BPqwyj`70%YXlbH&;iiBfd2U%#m9Ux_4 zP5z7K$isyq2<{(NNDTErlJ!h7(lao?@-;a%Zap{V(#TnzEX2G5rhB6jFg-9|{bjL* zPQi5<9lZ91rXq=tRGqX%wQ`_JdrA@tkCVBgX@3=bA&>M~7Ukt7hU4BgDTx2Xm9?KV+QvdwQ93`NK6C zY`^kZH#;G2*1a5r;O`)oaJKIaZIp_b`8wq03bAz?Izkdzt{4-`O6r(cZ+4t$Pj%j-;fdOa4(q zoxm?iV;+rD(?s2-AqSEbV+86L6hO*?8y1Znj4$WHR0U{T%ox7^+Gk1RXuJ=zo_%ZG zTP;Daw(%UK@kcYntmhh@gkH8hGL*IIpiL;QJ=PS@)-=vB)djsB_oUFwLd@%+(dvI# zs^j|1_3`3fH6BQ_=n&RJI=SN1Ijuyu{s_ANQRE=zXF=*m9N-;SMv{y9aDxr!Z!kcr}nNGw(TY1LSYp^i4I;I zF28B?S3pP$;O*K_y>SV%AWyN332pUlf_bSeJ+!r?LkklIpcVfQ!59oX=izEFJx;iG zQ?;w?k=tbCU8K`P?P7#fwFK@TG!DJXWPj%cB2%Sb?PP=aWF3CPuwP`(#DK+hL>%wp zrfB595tb$F!mQ_j4q={g@d*nbx7@Xx?V$tv^ZKuDLl1SC6=;;wy$zX9QqR*FjnZ|@ zeRXp8_HbN+aq5snePyc|vuKwfd8s&8DE{i&{Zi7LdM)1kyV`8wH{GEMMC&YR7M|y@Jq~zl=J6nX08>H}_ zPzQfb5g>XyoJXAvenw$jMy;1Zx*1w*+8U+keK6*_{MIwLtaEmDHh|#n?|%{Ct_(qp zJ=XY5EPZ9WpE#U21lf8kl4v&E((u(M)m1l10<|dR#D`i@37$X4{YueXR&}b&W8k5a zs#~vFDQG-*R~iC)xQzq_z}{3}Y@A%=8*QD{+aiD^9zUtaEJW!M?=7MdDG;<0Q9AB( zadxUFk;9dk@xov?PbZU4)Z8#^ts_Tf;_FxR#Lxfi+0}lTk4`mKdU~0gN^fsyxoS)T zn9dQm0ES0rx`ryWy>Yz;2Iv_5jbL)lO9}AaTe6QI!=2{U(HQJ$(WQpoO6r~+rcnFE zPmYW^i;|+}t3q>(ZX!E5%z`+ifUEHmcG+1j{YQ@70LC%$UUgOoBSH zo2nUK3W8$4fhB|p!P{?8LKr8td4(4s)vZ5%{Gi`BSL^Uk6i%zuAVC*! zJT5ng^hrPBZC*i*B3G0oJ(@G#)%*Z?gUtG!HM|7&TlY(%N9S4x*Hj0Smry3UEpb!_ zt%tY$KXyA^3xbk@p_D+WS@5XAYN#=My$RBO`l1hYMD$R-0CSK_fZ zbaS^fP{?Te6f}eECYs`74m^Y5rmOEBcIzB+#N%Rm>qza-sEpl7LOUHCh1n^I z@81s@`-pLH-~-p!*{Y15h2!Qo$U>`kI}TE}bfI^PM6JK?i<^>r<;07liW(Nh+eDlw zCf=*6;w0KHf6sBQb-;Fa_QpY+gL5Xu@>b$>is%#zd<@$1&&OIJ#6~UAs{EiycQfw8 zQKL#u$|oK*pi7(IaA>nz=|Oykd7pLx&6j%&Sj3l&xSXh)9dSH`G+}t46@k)u0TZM1 z`yYDRp(*0H+E7N}ig6q+@`iJ2qk)U-7&BQ<#roc|haa(2P}an2ZLwRr#dZ_o&KKzS zxY0~6%R@`VOE^Xh2IypX!?{zqr+4q|-Ac_uFvORL4FMdC+aZSNwWGiZpu0(K!iPVt z;k&ibpE^teg)A|;MD6L!5 zIawdo52!G{TF;_)#2gr8V7KUcT`};EN^g(hrFOw5sM8u}-G1)X7^{yftd)K+I?N*F;>V zuz96r{72Ta&nFq&kH`T`_XJSr9VeYKKY`l@)(uJFme0OIqyRZvrV8{XvtsFFs=as0 zEG)o9J^<}}_hd|v)#dYB+)-)Chd)@JAs&BD=esIf z!_0~L`UW&P%j%e~_blAJ4g^a)IyqVF)#O*uQ0M%s$ONB)+Zq7v)edu+swv*BjRTa2BB0D5s(0*D5a1Ruy-@$0aJzoRV6!2fU&jZk*-UIfhU$TYGF$w>( zs6U{>5+7h8)GuWwzT1ybffy(~53U*)&F zL6MUVkCSwY%ET5i;kly>xY)K`ED(qTK$YrZ{5Qbe0kpzSFAxyeN1yYf!p3vG2hzZ_ z#;|KCR*iwA3qW0C0K%~wsrYb<|3&XJG}^pfWb*yv>**i{xv=*nN@PgJpmeppjfsnt zi&K^}xAoC#z}?ZnwZS@5@Js*vH1Da+`RUPsASbELbVN6q)7oz>Z-+!bnD|lWPX5GN z%a(hd>v}_B$DmJ*d2cqZL-H*f;n0twN-~AM-`lUt@{i#wsLLrQxM>{eFY~{iLLTnb zJhKl)knBC_(!bKffdKqn?VaH_vC}h%P=*+ii7y@Ej(8T$AA=x#`xTU3~AtBrv zG1n1m)t}xTXY5T$O|49GWSr6=&{SbFBEH-EsMd8uJcTD&6LB89a?_$a>GNPt*Ec_$ zQcmGdI?RJvy;9?G$?U!M&^P``^u#WRafE*4M8ebqR zKDzA>TPXAzdu}zy8M!ab#TjP`>;U2gfDd(d=DYOEUTlGDMH=>g>I2S7rfZ2|tqZt( zV|5@$BaYip-W}Y_pb|Whr(G!Sb-cSWwDFxtf=ck!z52VfUc1C>C+`8c7e*!IzPre4 z+7gPswc~IL5MPqOjUM0&S*j`G0IUS)t*oqUSF%Xlep{W}&WiR@BSXLIr! zxV8f2qXe2i-NB$BtOj?l^k+?j=EV##S&=zFuV{QV)f5g$B`8&ix!Kv%Z6r?|kMa3h z<-|ZwMkN`d+YH1l$5MXRimilo%u~M6B9}>P=@Fe9{&9)i#us+&rC%6wBW1XCYh|~j z)jKh1!xcE#^AI1ITK>ukC&>~?L zh8cl1%VYDz{f)g@iHY@18h-h%j{L0^xXlDLzoq%y#hHkWN|&&z3gU#RomGoDGGnnf zU%>T&%dx?o_s2YKlyulfbWz3LeE7JJCnPO{lznQn{7Q1?2?#kDiCG@e ztJ3S!ThKew`_PBf%!G;9)~;3Hk+Yx9@%YfS(xd1n=vU|u=!fWt7zApLm-EmJ?DL+B zQzG3v)d`}tlI>erRt43c`>|#9=5q(ezPBjnyarTI@jEc>KtIt!BR>inpVafz^sXj+ zio)U1`iaREWQ@&@Z6*0#7uLM@pMDN(kxP0he+bjyjK<5;^!Q|P@%bz;Jm>fTC+}&Y z+DiMeazf?t>!#g#yUN;Q)uAU@K}T@i{Qw8oG5unru19j*kR5A3w*C6jor9%nMXox5u=(4uUQVX7~O~J=h;o1T!5+Wj{C|$asG$^5TBMmAnE#0XI5)x92OA68mNJ&aavj|cmNQlxc zAq{sH%+L4Uf8hFy`+Bkaj&tVBnKSb|&+GzvEj2ZDexS?&$m@I#bNv-=e1VqWHq+D7 z$*-GTBK8Ne@b0x0P5adS223pn zTp%qi4cuY7$CU}0I0Uj81b}$0zSe$++k*21)ufjIcj2)f0l)eLT)KYaGw@IC9vp&O z3pKElufQekbbDiMqZLn{kIBw;~ZA{8<3*etyHcEi{rGz9ii=`ES40iE${QW#~^+oI3f>c)pYUYcS+l~~%Dd~OwJ94`Gq$&eSk z5^uV8Yx;+>IO{SMr*OYe((UE1UhQFq3egTjs@^-vJ&$k66sKB8tU90ejk!LKQDH1a zwgzawn+{c1+|25;qq!){@=RNBRu+}cuJ|!|I52*_L@)`Pn)YHqp(Jhzr_i`I$|^OY zfrB|C?N7wxH5iqrOv=p@UXYx}mYMX*ieFiK(l*eBn*W%S-FTqlpwljk(h$2~T?7(6 zLSfrtZ-LhJ*uF+NW=uMjia|EoiR1XY9!Wu_UY5R0(d9ilxwu59ZV=WJmFx7(F`e)5 z5!|v=gQ;h__!QjFUJVSFN@L1A24~xYwcodjS}=J?jq6ZbzYEoVhG$$SJHKlBDM=lI z)92@jQ(Yn`^io8vm^c+mS8f;3X{K5+dXI9DJ zgy3=`QfGQn+J+_nJs;)H{O*>V=+2T=SL{JgubmS&cuAP==vR)YP_QU9DTQ)Ztjp4e zidT3klr)rs#{g%rK1q81y=K(qJNM#WsvL9WM-{L;E4*`4$>@HylvFb?dS-TLx_9~g z^%%|wi^CxQp}VB9ae6E2VrF!< zmDrQGTZJu>OdsPiPjy(cI@vi%r8nf9t!<<|WJsw>W5yb$(@z@vf_!kxU#9wX@H9Aenrrl~##i4So$AAlhKm{EtfSCSZx zXvu4t&SobL+07T3YCTZ>qEVD?eH4^aA`1I84eq-eQ+Jy-u zH}8#!S?cWVKg1EuJsh@LYRdw0C=PuUh4-Cei~^@b+hP?ewq%r211fF5YtCWRCB}x+sKm}(LrirYAG5uR5}=P%*Y2?H7CO~&n5C?QAlx`j>b9+HVJjq(5GMpLYN^u zW>po(csSF>b45ne=Fb3F6+-G`_!Nz)UFvwOZE?)8izSQT0CoT)wLA`yB{`?ji+C=s zX2kARB;Hm#prGcvw-53+3M2l@C>qu!*9s$iYQdO*XQHnm$P#XiVGVQ51=+iZG5L;)9Or+opvbG-cB`LyJI^r&ahU#E^^JZE?XScGYTtwwW{Wt1Q+p%-9yhh zq6QHxN>8@(WMg?PD%Mj<4?GM7=y^ycao#%<1`iqBGZuUYc@4)3~uyl^u#Ywq+HR%e=g@k)JIzZIK4 z0fE%4>&?>^i?ISwoK#y-{!uociP*%9mbO_fIN7v^Q#`&cN3c5DSa~2E2V;ADlh)(d zGWD6GFR&_EBKC|R1Qs$(7r8!HHm|j|Q;@dM+tTEbbH~nO!~QzQz+h@ zs5uX6CGn3smeshN+1hwJc=$Y*X^C}>><%$%GGHe12obu<1vo&V=fobz5bKp9o2yfj zImII&Ha1>LDZ~<>UB8#SYI*D&?d~;W@4q!^|D5?P$D$((A@hb?eT+}nTTR0|u9)v1 zn3QP-rNwA?I9Rz(U``_7JPn7TVJTLUUk7$>%<3_9lxoZv3RuOZDNVrc9WkEvaaLrq!#s(6G@Phc7KDu0dWIWN^_lC=_rf&DP`gSaA0+uXHSp4YagkN~IpsKa*PsdChI zkq_k$K<?9Z^`1WR@@>NLdr$Syg;Y5_Hub4Xq2m?$gb1}WO4nZ;Ie62^Uh*6*@o9;R2L`U=un>#y+IcN2Z+=DqD z=RZJ*E!@9pp^rCNSgqWR9h(Z16HK2YBEW+>$w@v8O9z%Vee*RG^+PE2H)ywr>^YtUmZ=0vNlj!32@$+tf}IA0BBtf3ptOq9VX3kCctD zC0uz90r|y1YLV{T(U}~ZZJST^@=UV zb~2JsT;KBru$Gt&tgDUHwUt;N7fNhfe!k1-S$7ge&o{eNgK%0RV^io?$iD0M7{9bY zI$@KL&Hb#R5yM*oK7|}QlPL>dq}OX?(2hRKX}6CvJUFKl)t6Aq<~>e{I{{=IYx%|4 zTxrgMZ$w6YQ zs*-bje;hVP+dJ_QqQJ=Vw`fV-pD8tsBAo2pEhx`_r+Zh8P5xdA?(Ti-!sX#=_&zFm zj^r-?$kneH%ICN0KF`TNeR!`5fwaYlUKjy1f$NDpY3C)W2qv+iH{ev|SMjb;siib2 z7th)k;+#9iqJ-s*1_6%Tj&Yih3Ua{e<9-{5wr>*L_aF2#0m9RvaKidH3(WPtN`a8rglJx>eAd!0#_wu7Pq{kqoXtD&fUG< znYufk*Jt&O)%`fhk)wU2!D(adgacOWHt3)M=r|AzAjl%K{piV@4zP3*9mnX8lY>5#BTEqNVwhVO z@&``7EijLA0+zC_6LthM&>T94waLANm172+NbU?=tpcImWA_cw(jnP?wkgM}5Ujv8 z)U`NPR6fp|@n!Yo%EpEUelT7hMro0WzxC14%G_*Kk+nT##$JS{N{bdV#@1aYxB}HEiD*8soJwf5nHaT5Pjdl_iBw?8 zzgP2wGY}$H$GcOjz#Ln8Oon6Ua&}H?_<=2Zd zrG3@^H13<|Esodw*Md`B0)K9Pe*TGhvb&t%=3uiANH(R3RZoN$gPrcOT+-o}uiSAy zvz@^dgx6`Y(lU-C>>|!2Y{{OEmmZH&{semhk4Mvv3D*HHx&4cB2nxnk+Ni!#Q*dlM z@DlruIZ%JczN6nEOaa6IgspjLJ~<^&8~>iFQFg#y(p<}aDB#kj&Vvp$36sT~v%@n^ z-?1~82tq?PRIpJs|V|zn;05l3w=QNqYCL(ElNMx>kv`hDKCr%N59u&XtT)ZFoBFtdA!T z9FjhcGR9)tp8UQ+EHY1hYOucgPO4)PclVqR6xVXD8_#rHV}1Oj`LuQi=|z9ydr0){ zAPT3ixT&0F_>hgn^B69xOU7o={dtMH&!olN?kKX*wL{;VKtWzt>ijwcW0z;o-UT%p zEcgmR@M7;>fVw4mYt0C)474@(B342aBkxy|^-}3V`-Aq6Z-yH|VqW9jlrgrGk z?5|e>A+B%0_tF%s9JP!XV$zb9N9(7hz@26{mU)El0~RlA;i#qTP8^q7KP)-NVAApE zwfR74^YAm97_h|DM-!+LrzL~IPvD89T#lU!G)Hse5@2;Bz=mOdLpkvHhQEQIURUPM z8UzEWxS*(Jkk>u0(u{A?ldfHYv_xtJk)=IEZaj8ZPN^9IO%m4AsPU!X$Yup}{mayQ zC)bN)Fo9|sX0b0tGOc46 zCi7&6LyR?h1XLFI1n!Fgy@_k`)2Und3tDhU#K4Z+Aki~u$v@q$GOC3Z&|!!gG4G7lat-N8h? zRqvTMk-s{TRkE7F%UgjoGi?>?KGv_pJFnoqV_bj8L~bZ89T1nFfJZn+$<4|Nv5G8b z7q;}tc7%>t6jP3nJ5+oSWh@A_caVFs9v#^_uG5EWluZ}qu1VKWb#Pa(Z;{6NRNYBO zEk=~YV7(8`HbI@_II*j|GE2UA+9lYDktOHeXr0qy*-Nr+#erV42;1>`B2kQfc0t$y zb10udd5{PW`w9|7eD3&!Id!9p0=&-WLo<*Ry$v!XO93SlIQy@V!kv_>bgXu;wmMXU zxwZQFE%Di;q@=VHCqa!5^2=?+jzD0u=DBho+u=drq8idF}wG>)2G>&KpJSO@f1xuaNjiulae3XQ{MD&g9bhZOy z_g)}1WC<{f^8C11&=&Sykc-!X8yzox@B2=u(9B8RmVxxhREmFSeKq2C;Q6|tN+9e` zA?}{L_y9dFvT*q!Y^~VE?A0}l7o7X8KC@2`(8xkZMS;sx0tf&PA3^08D!it>@Q#-ONxKMrFf1~~d zP)+(ie#Ee8uW;OoD$#4ekJEtijzZ+Yu!qR=IC~aVb?Wi4Q(xQsN)J4Lbeiwh8)>iO zAns7xt*R@dlnE{Yo&}Ozz<`3KN{safSZTI4CqmxDQII zLOdqIM?oy-5T#MMyFI_VlH-w%d3Y6~WvFC>W&T3!c_CY)1a1qxeTw7AX*w7>ILoC` zku~RcXsj*1E#DN9w=nT1-ywEaJu(H3wLJWBbpDK5N-iMQ!3eN8tlnHbQq3{m{_StL zuG$8hLE2pc3RNMOZadn`Jil^N`&htDHM%RHhWFP`9wD8jLr@ao?7DFVs$dpfN2|^? z>aC^Pg#a=E8^zs4*MnFzc&I(p5^D0QIm;5HU{ibTpuCcH;^9?*jNo8H#sUXW5pOvF z!2kr|KNS*zLjat+7UG%y5*v8-V}QSb$~-k7JD=GJg8IRhDqjjH9U^!WK7SaU^#S$6 z+Gbh6eYXy9(g_M&-`4K~k;EyJ>_3<(AXPA{W{=>WrKZkdJ8ZBo8NLJvQ%;aQ3@Z={ zQVxyjkZJkM`^Z-5LEX_ms4#OD4nDqU`cGIW!9PD_0L?=zn{`#3>ne{^RW@14nx+4vUGuk;UUrXc)7FZHV=_@$(=WcAQSj`pAYV(rOvsmaT z;J@|^hohhvBsoC(EIYux0SjXmeD!l=L2wc%KM(+a<3WXlB(NPm_UGLQdP!wvK`$tp zrXk?erL*&X5#_O}=M#I%Iy-oO zgw?)vAOJhz6tpq}qT0t!?hBHG13HPzHmU=t*Lw5l($y|=Z3fEw@2g2*Dko)*>A?R{yl^)LtJ#O6l*PA1JFrm}e8suO5w2IV7Ldzby5Ke`t8HgfC}b@oQ(#)#;V z>sSom-pU?(Iiw&+mpcxshFCiK3HuiLD9ZS*RV-e4b$#s~`g=~fmnFX17|A0SjM=iz zgE#|9`tn!OvZ8kngV^@|PR9*q!mJxioinBAS~uc5a9zciA1i!yQ}A_5C=T|wOq><; z(h-Hux8W0p7l`ndL#;1-JKf1yHooJsT;k8*4yvl4mIk9LLuc#}GvjgTTiKLD`271m z;(X512a#`7GF3;l*S}mky-cyT`FQ_#tecgg{jD@2Vrx}1p+mokmEJbFh^3i~-hx7ip z1kzTn`I@<(+31=^m!N0M3U-Ht`Vp4354#4)^G^JjX>HReSfqC7DT@v=e%hdsG(VKM zo7^ozcI-^#RT<-xEF7K(F#&9zc`YL?`V}36=ND7l6KLMSwHx*W9JO%VfU%^&u3dvr zWS%Y)X59U}jdsLz#(?~Lq=P|Uf__Q1LS`3xt>o*LoKCLQb@5zLq*I@w{_!P}T(+Zd8AQ_D48pS@;bGZZOtmBX%B zYhW;pZ?s-H2$`45`1VQCbNzT{1H9&2=-w@EVBXpsOa82gj~JMId`ySQ0*x}5p}^XN z8fIp@mE1LVl8cex@7L1{bH?;VI=M!}>mgv$%7x}`GurNI(bc_spfo!5hEkoKD@B-9 z$@cg$(Gund#^@?jgc#bh(+l;hxYLm zLuyvmPIVgAYsxosr6W&kX_$K>s}L`r-etA<}J2jB3oB zKr-#wK%un%=N(7#Se3x29Tl*F;Uj-+;1aP*b6=$0{m30ICH#A6(b!{lbam4Gc4nz@ z@7cne0pwtuYC601lrA752|r*kE{+4$|F-x7N2Dy{zT-7bkE}rl6%$O zd3&;li3ZYTzt%m{&OYXq+f#(Wga^m0=Mb)_PoZjMoO1>h-%=6APC<)-2aJY@B@E+G z5B$tqy*R87BI2{Um`^cEfK-unj;gR3M>P(+ zYVv=Xngh?T?PY-8LVYTm8cf#B_I>up{h?0Kjw6>|it{{6+wCm~2oU+KQREVUKTUpE zw?}D3K1E-yS^X6!n0qI}7Sr_@(HQINk5tSUaGgX&8Ns2dBo4uG_c4rayOuW#d@+ za$d}E$seDAHJuwnF?0Wr)KL_V*j0AmOj?{&)dM&--?qeB%3zz zUa_mZWKoJ+7oQI;IH7~71{Dra8?BID-J7X5Epu*VOX?LXH%jBq1NzkYw`GsnO!ZsU z9cBFbrmbt&s}%d7rT4S@-^yo|1vD2)pWbt(y@*uy=dG&RE%%>#Fz286QJbT=IKm;f zpdAq9(1ms5#yrL}kIm5q&5H#{ZJoLMCC&ctnb#hfKZ;GTZqiST&(`k$ntQfKr6dS1 z>3)xyoT4?IVfJiyWlyZ#+{mGoTh<@^`{D`Z_|$j_)GBZxAh`9nDvP!g!?_G4-q-idCJKXOhxz6E1t ztQlCD(;4;%Mo!J0+V=TTxm&&IwiEJLc`Pwv_ro*^ZDq$MlBnW|%y~rxExv08?(g_R z0tGvXGP3Q;8qx|Pha4w=L>0CB%TSv$332QMlk8q7OYTl5Feh=#cG1<2*GG4)fvx1X z+RyaeB1---F;-^ixUBy<{w3qhwDOuJ_uso5XJPjiw2kS`jx|tw@Pq^pH z^N@=Dwip@P?rWyK(lEhQAIE~P=k&ZD+|ZtO!gczTX6(K`z0@2>EVLsg&tfy2cQnZ; zDFK%pwHPWu?+_e)ExYejcT}OP{4#rKiC!!1=GM~p#lv5eH(SK071tBfmbW#<4;nX> zC+{w{f_sXaT2AU`3Yndce?N-8=`e11D)vhF?Q3|Lk$Nt3``tcDQ%AqXkA*I(T&EL2 zMDnNm>>+5`ZX`oKXoa?DvFU*UXHxJ8&*zw}o6xZAxFp|DWZ`sQ>Jfo~jPrsFwmiK$dn925= zl~7n0M(WJp>ZNOot(Af-87?hPqE1)Ay3(Yob?+v1c|4QKnP-Yfw-Mc@JG}usZz%iw zQrt{K0wpK5?$hVpD|xA`zq$ROa+m+wO`SR5Y3~O=V}*5NBAIqFXKt2CFqK$>pfTD; z@O_Nb)O!1v6-d-g@rbnyaCVfChCM>wg0vv2*I& z;N5li3%v-58`FcTk)DDKz2XB5Jo+tnk?S#XV;>f&EPlK;JAJ(EJ7z=N zpm(;Ax=y+Mw*ToA*9&=B1G!__r49owyKgrR$8HbJ-|nq{e?FC^=)|wcV=3xeRr)Ik zs%>>oUZi-k$>61j#^SyeM7dF9(F@-$oV(xA?#UG3lrmNLT*yoa|-%T;F$( zUCXCA($tLP$dtbIYIz=gGVO8$E7(#7wCCq^wp~J0FyB8Z1^b(s|Mt%1XG>g1JEms1 z2hI;Pyy{A@4?C+4jhkpF;CMv>kIQGnvHs2_x95apDB^BRx}7TPRsGvV(sjGs-0Ani zolJC#kU7p!-V(_U;;GuP8j9(j9X*1 zi?5EB-sEk7ZH9Gd?%PS0e43v(JL8eDO<%fjp3~$7y;aM$tvQ5&?k=A`^LuX2IXf%` z2tz`D8R>r+D#$})s7(adoWz3-OQrbTqs5m}gS0}odOsf4YzjcSC7n+n1f*aNcfRrn z95sKQZ^}QGIIh17>hCHJFn)RGZ32Y zv*jtUDn|}FYK%q*=JczNxB6Wr9vQ`lR4NquAm4Kc%gLknJ^Yv*BS|NX3D7O_%6U6X z?BnNs0#&!uCxJ>#Om@YT$K4TzJnSEmNps#4q-qN%+*qT3lk-vML@m^wh*djCWy{RRnhQX!E zB-&L0%OQUmj1w+0Ma4+$yVt|$-%-X7ZM-InI@9lPe+VVOF*5$xh$!Jg#eZ@!J6ZjTuHzzw0v$7^+n*}-j*!4k*!_Z50z zY|@@-ICRt0*toB@!SNma<3w}MtTbu} zJ(%f{GtyGX-f6zX4a4;1S8 zVj8giU}Y%J%6uSxXDhR9=^Y(wRHM_m@A3x8-g7yGr?cN}+fK9nUR-;bOj&~C$8=E< z0aBRD(XTdkGq&`DMlXI-CXKk6Il-pVPIwbT!DnLzuWaDKFQwC{*OoaTFH;dg9rAyj zWJrlMJwjdfxML7(ZVc;uAeo7m#t3bdDM%OXn$a1yHI_hOtR@G!gT{G@VDbO!s6#dP z%q6k(8HO4G&{W<;OKDVGC z17brwy@{|>m+(y_oW3Jpu!JSbY+gs=lb!PknIs`4ENG$Foq3)w7}pCi)fQVb#9&>= ziNu(GMG(>#jM@{ux3)=G02yrJ_aYCH7gDcL5(xoE^1fy8hdap}{}rj@9!c^Wt*rHY zcv6KGUm2*iHn`OY%3Ft;v?6>EInb8r=K<@HbmIVeqOrUbauBpfFfCSGL`JnE!=>UE z!E68Ybw>~hc7}@tkcHy^ZZr^PZLh(~?V#D|#ry@YY}7u=Sk!{S8o=CCByhL5M)* zbpPm0JW)K4{qsfa1*9=a&q;SfoJWTfTAuxRP*m~E@2|LGk|u--DqV-+DIHt^c*hd=*uSOW1>L2`ZaQtt=N&{ z!~s)_ z*!^b2iujR3q}cW!P-Ml8X)ohw-)HoJw{Ybqh=8Pz1!VoHp@*h#N337aUYX=6IVOqU zw;2)~k$agi;v=GS*zrNbRlMKRy+CGUKEk^p4)r|p{pLl;}tET)5UGc6O0mV?6;dYYA-%MK7k>O!y=>JfvgTRK7`)Hpiw|fO3DSeKTrURX0Ze~ zwTa-VH!aZA7StE1`0S-+J~A*dC8~%^0Bflk<+QOp+11r0_Vld6;devNUi^765O|`O zqi~%_NYQIJnQ9gu?@%aPWlcZ_>V@1;%8cW+%Wzqp3+7++{m@)cPyp;5Hy}Taa%fbc zB!s$;FfT4H-U>MPeJOr7<_;mM>4TvL(b$tCFrxeO%n126pOaJsAMgi8(kHlITl6I1 zz8olcbp16{b6xV@OiF|VhOhV~(qx2-#@K4L*=GT;o|`ZQd6tBP_VQ&;^Zw^M(n27y zne><@ViQMCPmi$>$YfaLtTzGRJ^)-2_$m|xU0<334Pghs!+dsAoF-iiAv{2Mg6YUN zcuyoX^NcnB#fvXc_KUEH^?-883UJ3hW|2QQx-B{TN-?<^C}%wGTaIdUzTA6}2p)|& z`XGn0Y_SETnIU;{i2hwci}wlA&@{W5Pgey6-D`abSv3lby3#^kOG}mlU5#G)`Hk$7 z2cNGi;E0fkGYhSKIjpFHlJyj9bYftnr%wf370@_)p-Xs-zF$YA1oUVbi8|^&aaf(M zgf117o3U*gfn0ad!yD=83!W2_tyLX}brjb;&6A`+VI7)h@s&w!EOKWqJE^uO-bIsO zK}`_MOWaxmmQ=;UyA$i{>oHN!o>>Cx3J0^iP91Vt=}aaDs;L{mWQ4AwpOuu9nyyWX z0Qx58kxDS$`Zwy`VuSbRd7ALIgPf$LrGaJz%DKYAE!0UfLSXGA`=ypcP{epWjX9&y zL{^)luOK|rpGjWU^CVnldvj%{uFV76$vTL7^X7N-HyV+h@7py#Z>2Dkj@OK>cj>K` z*kgA6!|-IO&U1f}ku9xMsQI*>U&*6cmBnOpH@?GNpg7$$4E6T=MrPN;X!DF6`&~h0 zM5>~;vfk3z={s2(-lpmVd_Lq=Y*ZYEysYV}MfhCo#wzcU=UaWp6cm)9w4eU1CtKaX z5MA)LpnK!GH3>l0UA7CUx-+&Hlg-;=8CyEJhBNl8DhL^RW@q@chi`B&X*y`OPJc*v zY=&+P7N_F2e8|57Of*=)8z2-)LQUJPS!!!+W@ZMw0Sh1vVYfGC!M0;Q2~t#@7h37It4<*T81gG`J!&jRX3XLW{Iale#r>N zCBvtrsPG2{+s>=c9=n%MT*BK}3HB&=Qynvq6i~5TYohOa#vQ6lSL?CW(mBfr;*p%C z14<9((`~qLnY)GU6avcwwT{ll_c+_>?yTR(OL(mchsP>EOO`PGAQq*$ha->pfZ~89 z^a023L%I^Z&&6RUCnul{U8YuCWoroLMI!^0H+dU*q)Ux{6n~ECK>I0~5 z8K4T8W5d{VxMe!(>28RhCnCdZH5>5O8ZW&M9-SLiHYb|Tl>0gsR+6oq9}t$Na=8NdJO=&6 z-*rz@HWJAs3dm=Ej5i`L@lWM@^AhE&ECO<&F}|;maiFW>Fh4lQnxB?t)R80+E>o$U zeMPVSB$v`%jXS0KO`%NgEEbWZkgcrSw~MN3gipADFFH>!#4c5CUdMVq1!+hx6+Pf) z^5aTLbeN!OmWG&)HO=k~FnWCQ4Yu@d;s=_mF4LzjI-S{EoqGjlUqW|6*XM_vfw>&< z>IbnSAhT~);-Z9BvQURT+)eGpi`%!I=1+}pE+8WbW~`! zKfi!PzZ;gCmHbpz(N3JZ|DhF>*2lly5s?Vq$=Oxw0%mJ)!!~0sxQId$h^lHdTKzJhD-^iTRi* z+P}jp_sng4WSCxvd^0W1cbX#2EeRGOjAN%vcl>l8dTS;+|7Z*=14jA$3zm!C0@g;S zNd&F77%CtMHI2uBYD6$8b{4(L(VeHN11TUua+1dU6{Z&KhDZZ~c8plwI*@~FC*om; z1v=V5GN`#&QmM9fo|B20VCO`3#oF^FB~lWEiKSFZ=lWS}RfgG-c_ zKDjH&tTD*tgY@*@he6$xM=^Fi@b#!qyyWx4c1b9*39;N-Rm6~Afuca6)~9uco>d^< zs=ae)X$T^lvutoRZ#kJfa$>wPio+#}fr$pyWVbTkv$?EV5@VZkC8v{P_OM3ECf}IW zi%=zYdAU+3Mjny%GDcs1y4+I&OH1G4vM0z>597pA{Lb?2JjJ(oHrVA0;fTyb z3R?i)9%SW!pzi|IbO7marno?Q=amb&CLLNS-$3S5Jd`v5x@rsV+KEK&a_!PuICral zxQDAp^vA5Zbeq-iCcPdy)@XjKlSpEv3xSjzF}7Ficg(mr$a>Ys9=Sn>zal}ET+5p+ zQ?*=&zi>v(F17#19;W-;?UD zX@1PM&4a#^>|0b?6TlKU_(h#~ZqmLpc)GD_;Bm3&|3 zBlDD|P>k$1wTQ8%rS%jwc|eoy-s%^F8*9}{Y)4SI@Ag5CkCKs3h(RmXhhpARw|C7p z1P@bw3((0i3(t`{oxg*9PhU-g*9r+J#!`*hD@z8;*RXU3D9`+f>mL=dE_FB<;>l48 zOva-+5dN~%ojtm;4el?s zN06zbs@xusp>QS=_Hvd~**h#7p@VIW+DnzD*#>lODBJRHyBhX=KJ)7YmGY1!tOADs zWhFPT;Tks~fkQaOsNx5n25@oX7Gzwu>r&L_jzZ(g*Ao->S@cGUCOb3usphwKF(ZNJagYO;rix8iH4m8uNaam#VUG|4Q!f&81+MTy z=2agY!tc5s?m6g`2;Vgn+C(RVE(2XG&uRXJ1GJ!ECMt6{!n^KK?qNte{X0$%IYZ=* z{oUFy4M1CCa6J7pD;K`ZNefcDj{fXN2)PjL|M|+LQV!@b2z>GDyBV9wrzz3LAeaCF z9G*YQg@YmQXY^F zo0uC-L6;Z2%^z!p97h<1cdhTTILpPbZ`*V)r(>gPo)8o?u@R}VBshyn0ckc_=0V`4 zx!DhF;wxRkm`n5ZL=LD)v+VmE#;PCi+c~g6r2o7TK5D)9t9KG(UhOm7CQ3as9)IL@ z(~n-Uw00S*QDsACrh3W@*IqlEkfkCbnXEQw>gLf*L0X2L>3*t=Q`ZID+a9qG z<_=b&pE&Hd!^`o2D4}PV2q+t3Zpg9yS+szbMj54uR}g(bJe~yw+7`n&bD{fdcL^!` zgwzX7BF0PY1-DhQ2gL~s*tpslFiCW|qEl_i#d;>ndK!sb>D;04t>&AI?e^)5rb*af z1XjQwn*i0@(GT4+MbelL*TnHb)S$V1c>qn;NgryIEvuCg`QkW9;wnDx?W5``EKg~X zg$w5TR$ZnssRF2~*k)A6E9yhp!b8J^^X*>$ojeeywts6b?AI7`f4uGN{ShLKfk33< z%+7t&EDev2Nak(VgY8^5*>1B7eZ9!_Ns)p_U!Yq>(QC{1P|hgxXP1K4aAH}kU#zxH z;{?*ke=Gw6#P_cZjKwrH!h1MQ2v>H`@JQBwj`a82x*i^ z@r>3P9g+_Sb~pr@FT**`&Z^*qBC~cFRMZqIpIdN%^8Fb*+B;d2{f{!?Z<4KB?$iA)a0~pSy*sk z+dEbLvwmO-Xs^FDbad`WV8N%qcXFjKgNT)i8v`x1!ahN~ zTyE%#(P4_`d+I7R-rVuh&KP~adZosR)GE-A8HH1=g71ZcEV1#(zrjwwm)H9DF)iT6 zc#Y0UYcamQ%g85Y!YH@Q+0O#Gsb-~;Wudby9E#P^O`5ElDY13U0u39Fo?*HoHIEtM z?^;R!47mPsHgM9{^3nH|F^b#WvH0pwJ76~WV4`0k)zJ3^GVgZZ0P(nT*%OsCq<{20 z+2`xw;w}%bqeIo2BMhcdj7PMqWuh--Kc|HS|IhAv6so!o;Vj|&i2A(a*&|acNb9%u zSxk&WgqgSN)amBgO7yjB>)xA(z)E`i0EH=GP-kEkz6v~%Dqw#6%E=o)wgv>ME0y z6SMp)zy3S=z?)zZ`y>WayW|uaUU29C@vTE#we|IjPS#(l4qiKT4tSqhS{ih?1c^kj zx&gZaXRjmX(9@7Ho*ra9P0n9|mfR%(&ljbIO3<$o0?^-h1?3c#l)5?ZLU%qJU=BdR z_-Ap#QsmaHWAXGrAW-gncn}r_#MBZF1gf-Mmk4pResUA&ENeilYn|Wk=~JcO->NP= z^b1Z!Ts*w?{hMnX$h)N(S7DnuAHpmj`z!d)H*8yULKt^k{`nHv5G%+D(FOUJchKe>oAG)iwKF@k6WAKIv%Q&#J8YFO;`+lcpu5*9SF;`(246ws zj(-?sKc)M<`BTy$Aj8~IJf=$t-RpI?ySi^U{m-L>#&6FU+QuzUw2R2#kc48yKBr9B z1gIUuHCl|6YISvyWIehDllh-VFfW~?4Fto6?z zXjy|EUc1xc2a(h1xY@nv$dt|djf9a`XN8-b3WJOwuKCaPjoTBC#?r&7CMW7k5Eki_ z(s=dWgGwD+3$pAUz-0;^Ho>IPJB$`1fLApyQMYHf=UZ4M&T@bx%N^J(+ z-fm9vJBAJEu|b~=r#_w&er7Z!5Y^5fO!q&Ng|M4a7F2xi&r z4TViS$-Q9w?ZH1Q3-Y0~t{F4HM3w0*c3k)T>hZm7Y%(gXHt5o4Kn{gu7!4)68JMTU z-Siuvuar;WlH9TO>kFTN&34rqW35}W z)?#PY>$1%!T(GU^s}R3lLQa0_KqC5uED7>}BS8DL3B;b=-`UPDsazkhh!X4%FXMnV zmfnka%@Z*lIR88}aX9Zoqx$Xi#DKj{S@JKKF7W9qSKG}GSU8r}u9uYNO1$kp zCU)@O`Cf(BrdUmFu0PC-oXvMhzTBp13nYj7mUFS%uYE|L+(pCM9G!ZS<0$q({>;Ba zZIy$6q1nnKBWaF(^-i1|$M}?qIMMo4v_M(`K;Y(E+lVlb@xv?O#Vfq2fZY}T_>zXV zybl=~w$t<0KbSyIr_`&Uqe4NlV%rnM7i1P0>2nvYPH*4T9Vk7M{{MVwd&WdbCvJ^dQ0o&mWstbE_u}&0BUqZ;#%8*kIp>h zf9PK?JlB0tlxI#Z0*l$6efbHkJNKqo#0#>j3iT&Tvca>PJWj;!V}DF2*v0OEf~SiH0*!L_{; z^yDea;d5BRrm*~Rf>gdvV`JaW~2rD4)nOwxz6W)ODoG{^rQHty&SI{S&34(nDe4jH9Xg1;9%D zW&QLNU@2nHQzcCIs- z`mVeiuTg%*ObJxDmNE&{@?HuFR9EgE+bt!>duZC#$)LRGkAHcTWyi9{rBvSkT;Wt9 z03Z3RmDqJEUSywc#(9T0cmVz-`oJ0rj_WO&{xtU9=T7RJZpHVi<_EVw&I^B{ZUDS z8EwbiA3PF_N`bT`$^PKHxDT*p`u#CfW&vgvB;nz=5B+KLP)?u^oR~->*^I$_Gb`C? zV3UjE$Eq1qyRY4BHCxr9Q>{M1u3Mp{rsfFvh>|pmiuGGex4iwgLFuuU>4;m3uu%zM z%eA{4`wri?AXrADK3LloB3k2xq!9uH4>8ddGgGe zGtaL1Z^6;e>za^|E^FQCrkJm{=#uhOgw~ zJ=3|6;TMi{2l+|itCNRuM~#K7RjC!!)EMqqy^&_~5qdVRpdfjgcAM$WJ%(WMC9fv zS^qtw^8kGlsktI#Zxf%eKWa}qcItyv+tPa<9XWJ`UMR1up}C=IUatsvgeZm*HPmpKxDmPe1Vme>+BYMU_<*%AGv_iDVq?OFXrhaoryzLLP}cj`Y` zI)a<(%ATSiFv-b7Y<#!(3<^Xc!G@VbxzIet!booK-wz`K52KfnNOz{vR>_p~{3`gX zQ?SVvj>Tf);**yhBh60pu)FV9KI-2SOZgnG>39gPVnoPwRHhWd5R;q4We|Yvu0%7j z!$IPGSeQ58-bEh(4l^AcT}#1jjd}36K7-Ow`O)UVVSbU0Qo_d_q{DCOb3T6n(U+jX zbi}{a*Z<#n8kd8?nrA5oBtAm=1&P#D8)9<7yM&UT>3PWNu|dI|EDdJ$ikTg;-(zo~ zHVS|B&n&h)Vl)(Pue8Nz3<<6qlM>lhii#Hyj5-GNKV95 zEs-lu4~nCQX0T(-`~DtN3S0MKFeemC&BCV8CFWA1ab!Ga3y9L`+P9&}olHRI=d zt*JF9*Bcb$PJ5*5rposr&tSx)73%G=C(*NA|IR@C-77_bVp2mA;VR3qIV$fyyyy1d zO@YD=uSsDqa9l+tzhRyYqLHdSR)iS|Tt@CwZSA%_ZfBFFZXSOhm~TxLL?J4pm^gaXu z#E)n8|L=-x%Tx|9p04lc>ME8hv#v#o{0=~#nm1y0AykU%<&GS0dmW=P-jWDBY2)d} z0t)O>ZTUUyv;4@fnYY_hHYI9<`X{9^*E%8}SrU7pmI0SZJGF;%E0m!OpBJM5%)cWY=`YzB?hT1r}JhTO9Pb#1? z24pxxKt^@5I^j%bPyR&|5~=AvCClJ3nHJW+k47b79qi4vf+SzvjpbPEFlyI5T_?Yn zKm$|dkmQdfe_%WSm9G<=0En)Ig`oJOtcOT6Zuu})1j5D*EcE_Ts{oKL@sfCI7q-@y zrWY#GFJ*z8?}S(VbEl8#fFE&7S8iC!7nfzHOvADuRx5^y@D%?e7PMdd=}#z4psjt~^XmK78azA25b@MT~>oa@UWo_klbzR6OU= zO`VM_QDNbSU^-B8ZtYhSE4eyV;lgGt3)38-pnzZYBfVP6wekr$4=rzh!=bGm0n87wvx$466IGR2Z z`4yPkW~>MkK-jvL0&^`Qy|gHmH$#+1|4YKhbXk;q8+ZLT-b`ki2zRh0rLlj@l^a4N z0XdoV0EgIBPFIkJdgcsQu`>i%no5^^bVEm#Fp|We`E9T?!(_%XQwkIBCN6h(z6(j^ zTY{JL?5B{RV9Xbg5C;3xsmr?cyL9K=epSgVzH7kUxYjI@F5{|+;Ad@^dpYcKf?WE& z?}E!SmID~=Rg6x-jHzcqQ?7Nj=U-6gniFTB^{T zz^V}R3d8vqSmlYlKu1phJ zO!@DL6kla?JR$DoF=I>lC_m=6F!~6qdlA6Zc?B_hE&D+N)3}PVwhqxb18lQICzxg2 z5<#_h#&w1sVB$xu>t1u6yFdh|&};OSP@k9t?*+3>i*3V9g=D*E1{fe0tn@=1!1mc- zJ94Bcetq^u7Y7GNJJBNAog~$!?b5iu#Y*+~RG!@llU*FU;ITKLiS4!DD`G(Eljz-NQjJqWKBhA5Wy7M}1Bh^=IEnHz>@`i?HOr-}l1JbVX~w~I;P zt$Sl8!Y+-*E6>;P@$p5b{W_@{a?5Xi#B#1rG#@~;@}q&*nk^@fQxC7Fk)sc0tHfjC zot?v}n`31*buVW>?c4*+*Qu*og8FJ`CU|8^e_k0BtK)YOH>7o+AjC!xC{(-DHfI4G zUtkx$c+alVeaI6SQ0iO1A~P*R@;Ny;!2VzZ{2PigCuGJIwX-e*Awo?hmZeZ_SHY{b zgKZj2OMRxY&PAWN?Z{eD1;e{ZtH74GDF_+LN7tm#RC2 z!55#*b6TDoPBUKs3pf{B&3HYAXt|rPdTxCuzTLHSX9ll_o=vf!Qvy9t+RKHuU2M(E z)oW*4N)<>2+xV=kEH=fpr#OT($iTq!VA;LQ_BFc7P5Yt;7C2d-gqlTdzT(I zw5VtYLWKj+hGycY_6H(*W!w3N>7~ITGDv$zX8~|Xa5ib3+7MUwnb0MU^+Lt>gRF>* z*Jub>o8&>mJJ?kSxSL6#_JctA@;(E`C@>tf2V-rf&+EWSZ!QBV{#C02BG_fwjNkRHOtLNzG zIC7(fdweVR-R05)tU)}zf=>qO@@r~7&8uhcWnhtUAG|LY0#Wa%Sp0UK&X3lz`*QhrXzIJ~mi z(OF&3zGPi~l{Pf3k5f8S4;8aOnq)1mqGvs2ed*G9a!N`Hzkt9%snyU_^%)9{_{A$l zU1MXria`(%nua}o^yN)-D{klR+mmKT*>#(HHQo42b)TS|$SgiYVCW8zqrw0DLdF}8 zR@r#&@l$H&E0S?;eJ{J&Mo(rp7AL5Ys$YyjZ4`8G&t)lFiJ3;~71GX~%X+NbOrN;8 zb$oGwh452c+-HwC=)`s@t+k6+K}Bum{^)?nDh4YoVfE6`MVRWWx%u;;`tt2gD<$vv zhNk~Mce$aDRPIAOy&#{X!isptN))AlT@t&^l+i*e|vhF%WNlD3iE!_ba+(ah79vs_CdTu4>1zM{Z zk5_bzV*Q2)W%bEW*MMq}8v zOKO)o)!G)WT(q4_O-@94GAvk;8|sYUvhA4X^0oDLuh+?iJ8Uosn}<8Qoqc4R8`&-W$_;y5-D>l=d5;}4S>;m?ZT&vp zToh*~Ve<-b_U~aY*mlP>e(Xy;gDWc9`DxKOK5C~_u?r-to4X@ul>V!Uetgl0mM{;V zKx7Al?cfM>A&)i5qdsK4f_$NH&r8Et+65)}?!32emUqC`)x(k-uK3NIdSq9D(>l1v4&DZh{?4(tXy3QV`&Emgal2p zMylK`vJ)nz_f>Yx&dx&2$fFi=W!bu75mdY?YpQZ``06`MJmkh^ug^H{6il!2G3d-& z5GTtfZOCC;aBnRT>R|@CXXYQGngEq5Y?!>t&qJm^$kly219~q&L+xsDb(xjMlyNs2 z__`3h?fx)Ud-JJIR|?wl4)ZDoj?@<_O9rg~m9hiu^vTd?pqx~UO1}&zIiG+)^vIXD zc{RS3w|@T|QMCltB{(5Gn6g9*s7uMN?8TIn=5nr*oK60T5}S70!EVAOGycdq)O~g z^EmO5V@h$n{lACAyH2S%h`1ZPJcl@CufCzdbOaK^K`TD`I`oS#;Z^!RK!%8l&b%D< zTypKgfZY)4qlju+8*zGi`f`=MIPLQn!iy(`*c)Kk0j90rLM}1Os!+vWKSSF7-M+9r z8ZuUCDh7UgQCH)5q#W;la;?vI{0`8xSr0HF5o*HQtOHe=zZ=Q~VC2-#<6H3hOPQT| z(0U0a^6h>l`@mafN;MmQIg>n&s{X+LRZ__A+cNU@F!6Ig^Tz5mxFnT$y^QVU|1TKx zE;ro3lO?~Z&Cwd!J^}9&=;lfpTstgTW3{aK*|LZvIq9IC>yEdZ16BF=Z)(f8!&G4{ z?`2i_x+I!X_zn&1VQ4!d+BG(Kn^V5S;S6m+clFKrrR3!UAf702sjOQ|yy=KQmDH;A zKN1_U2s?)d%i>$hF0kEVEX?IHwcDpyYQ4W8APdNe28DWCR~ILV?&h#?QCWo_>ZM`m zau@ime5v*_p?@mILv~nZ&DQ0wnfSO+zr^y$Cq+y1EWPn(xaR*StYyJliWprtzEcI4 zw`IKm@cFDY^Ybtb+4;*DA@~nKx+))&N0@x^EHxHE2b4a`8JYark|K! z=?%)?c9Bken@DC>mT@pPU4OH^S1Rbk?`%OGN>iW7C}1@CLnxFKip2D{Yg4}-6gJ8E zu-+P=pz@FhT)X;?r;cZz$vfrI^=+RfQ3^Ko0SEka=ORP;_HhDo&4 zjXKY>n3B~kYxmhbs$q$hMm=Qh+*s!8+O6PMOG`@`GOq7lohMjol>CC`xXGOW`cdNR zg`Qri+O95Vs@Ca+gY(uCdeh_^rpZ+u;8M5Nz9}Bp?_Ty=kn0UNU8q-X0EF`3v2zid zTnGfJ#fK`)_??q_>W9MheDdD(ku<}Lgf8TK|IsJ+sL>MvdA+q-oAEp3wNa{$sMNLf zkxOE@;ybW!TZDwDun$9HpQKSy2YAW}+1`MC{GZQDi&)CWNc1e~d6j*uIu*Ly13o;h zc1>#3Iv(eahrDC+e&rp=#<5SYTQWz)rKM4wZLf4V-kI}W5UF(=5Ft-0xJn)gaa**6FiHG{8stSgT0|qS{;FxR!82eq~*?4AKO!K`}S$ z`V3_TH%+^#O7YaMOTlj7DFhv9c_HBkZ1~!sp^;0`hd+F^5AA5P#iq-Rz89ajC+NDu zHou~vyXgmM$$0JU7uSDwKVFd!Hz3VpadufyKLB$jbNZJI7=qdN2fPJl$FlN=HFO*KB~rMaQ9?TAET)6HX!>0@dWm{)W9lJky4W-q78qO0KruOMveuWSW3 zc{xp;-T1v@Dvu|gR|F)vV5i5wLwphe`p(d1%7~ukkGHy@7@hXrgcEx; z?ri2U1`_{RdwaW5*wz|B>OCoTLP(izOOuZ`$jkt;k`VLi9Rvb*y{>YWMY&2^Rn=y~ z;!hN`6C^C3_#vt3nD00gkoZ|=#u}-xM@)3ij*N^L?eNM%KUl2#_A+Yp34X!bL3gqw zXyitxv7QG9*T!w8x zUSOl4X@%gShAFT^>IfU#7490}lzoLYH3}f--!z{S96bDB8#1vWix|qJ9|IJ*hH*>< zHz=wBw5wgJzxz@B^dv)hsoLt)!g%L9W|It;#88!r!ZjB&TZZmo z4LnAh{9X*Gq$@f0N;ItHJyeSL)*=(S6L2pLrLJD-di^>0N8zT#b0QU>uPTUT=+|P0 z01Zt!TuHpS{N@_|ta9wJSMQTH6b;$LaTyuIP!mB1bmwU3Gi1EL{PaCP3K|kgau|QOQ?e}A;fy5se@cMRxJtD)-jawx_y~N$%gaqPc!4BckJ+6 z)L<-X56znp$m*==RGfBiz9OLSBI3zzb&w`v>i_%ZYe)T4{kIvz53D8dWHfr(##+xb Huip6|(pA=V literal 0 HcmV?d00001 diff --git a/docs/kernel/trace/img_1.png b/docs/kernel/trace/img_1.png new file mode 100644 index 0000000000000000000000000000000000000000..fc46be5d2db7d7dcac4a8b21f449252cc4f8cc32 GIT binary patch literal 66484 zcmeFZWk8hc_CBoJKt-ffI-~^wX=%}+OG1!VVCay}fvu>3gaSjC(v5&L45Bc=(A^*m zIdsDS!+W#0dXD>?^Z)#Q`0Y>Z!FgicE3S2|YZ3TJMV8W|EC!9OhVqt)Ugo%=QGM z6<0SaMz;M_!vu3=GCfaZ{1n0lXde;YDxW z@^VCl`0h^%Lm&+?aJF+I?5O1!fu*|MOk!eU6ZMqf2qI{n6dYWPiW&9iHMNVx#I|rY zG!wWJ5tN9yTPBeA!^0oHzIPcsDTD$7`K*?5b>Zi)RUW{LF8=u5IguZ~{=drt8~y*$ z2;yIYM$2qtc%jELEt57QX`KvJ#SjYCTc{^~Y<>X&f(r1nG$~h8@W_cI5l2#?G@`pJ z-~4wj1aNVvA?1Nz)1y9gC;N;Ulv)jYA2?pW6H^VVeuMpEaW!!Z6~!4YWWPJzxYe1! z12Oup9!lMfZWmJ2%6g=)ldiDnxif(o&)q@p^x}@^KO-(PAY&jDUAIslWJM6F$;Jnp zv+YT>Hd9}#%{m3!X^o0f99zPT%kCHG6Xa;sQc>ot)z=1-vW6Z>-5k4eZGeovD(}~W z%edJkr>;t%o8Dz|zrRK?ro-vAH!ieup#WV@R+jZJAQP$D4BhRb^+U9=JcN<2y%B*&AT>IAvrY4a=cUO}Nul1ijhv(c^ zF5N)>9>fxaVq1;|68nx)P*bDGxN$@TR)we>m4{31Mg9z-t}Jue*r6g5-;r{INeMdF z)xOV~*)Og1RT3pCwI->FQ}up4PRJdUO{|1L2)=~%q~YWM4KwP)p#t22BqdlNfY+k0 zBsI9Ou+YJ({wT~!p{r)N8ao$2K28F?@6YB(YxzoTZtlgoMfFTAbff}&e{V(}rsoD{ z^IQG5Q;Q~n(vC@T*s0L%9eq$qL_N-d+f6KwXKP#>tDwzWlXailuA=D0b6f73St)OI z+0QC^;=$k?iCErD7f07Ei+VrVE6rsMQa!DB+lh$f z;@t+9YKtkdX_4!bL8wSY0c&hWCT!I9iO}OZi{5^=qq(EGKvHafzMk~G=l7cwC70EM z?8mPXlW`&&?eGXOpMBRJ!2AO`6#w#d&opA0SegEu?0V;H4WscA*~v%lhwD=Uq^#Fg zhl&g%*}uM^5xGMrw7U162sWEeBZ@ehiFjsEU66EF${Rc1oyu6{lB=7cl&BEdM+lE0 z!AI__9`|-6PWRS5d|0lOjFVp)5SVaJ_1c;Xk_mNw9ygUmclmek>X3~^H)+ka#~ZY> zldxRf-~Bvsjjv%}Yi zc+t(0QZ%1DXB*gPk!$6l-d#Bb$;pM3Lh7F_Bzk$s3uQJI!Lc-i(u`KfzdXMIZ6`j- zP>3A6$Go3+B1^cM++W8AF=zU#Xo#zeqt1uW=F3bs1eVz2ykQ*An!}lg`Jn0?H*|V4 zB|klVVY%^MC2__^%=cvS;RA&m?%4b3cC#qQ5KjnCjrAa1xjlCjcd{cOL?Q=my13w>WD@A3`O42 zG2RjhX$+#Rqv~@a{I(ww@E&+_wJ*04&7Zp5y8|HHItyS*DUcVbGQ0eZl#O36qy`%i zr)>R*zwFy6{fSZzo$1VXR}c%s8k3NU=M9(8C#V>ry+OSsagO!xp)|Aby5z~~8yu8m zgYIa8N*VDlMEL5=ie~z$S^C9k5wZ_{Pa6?xW-|e_Ry97gIjj$~VNO0FBMwwhgkHr|hV@OsnciO|kbrE=>_WV$LHj_Ogd37q0kl1gE_-cL+LcrOidp1MGPANza0T>iy63Q>>Wlntr+$rbt=(ZwgYl859`U<r zP+}E+#4fQis?t8!95(FE9!0Q}L0R2) z?wY|+ipNS<>bvBc)aAYnSn1;vpOyZKMXR|$x{|QOotSr$f+~@>HRY6BdYj~w z)sT~&_Q(%XLf5D-8Gc>>Xworc@vKZ|6epBlnELv4qLamvKV@ClR@=gepj|L#)DKk0 zk1>T?j%B?89?-HxrC{Dco)WclxE%}iJ@I%@tI{+$s4d>!ph+66n&ObR$XQfJzEku` zZ*VSMQ4)4$YHMLw1gSAkNhfoiBg%+6(z5)vh6Jpja3h;d-6TQX9^ zirXlVxK$?eH_jq6ZAU3HM+OQDGphJ)bC5>g<-nB?B+!IcY$gOLS1Db1QNdJ)fs|y| z+1`6$wf#=^SF$Xf(2FjI-aW2|13EHJv#1<1Zl7|4x(4h{4#xj_ERv0sVwywT&CJh% z1O4sNv3G6VvFDTaw~-lpCL~b=tSvN{cPB@) zn-DGOUuo){EMb)+)t}Qr9q6YhF!;rHN*UnLbV)? z%wkJrQ)-b4NF78&`T4R_s?xW;9N0-+)1s`UH~Ge4K4Zr4H~*-dVdq8lC9*27d9hM@ z7*@N_z0y&-&m91sH;KAl*qISZ0aR{1Ko?Yd=>5BCd#g#hwT{LsBa!NvD#TO69;xI| zE?`;ZMK$DpLkbAbcr)U}Uuj|_2g=91F&IwY( zt2C`&pR&qPNmuyKS6iK|el!k4zP-`9jp#!mU=o_Gar_0A2~|Ej_xp2HRrNF7S4wS! zd@n~?za{ha^fbV003|dL(IBP#JnuH@6yiSmC?;S9U+bw{+~+<-@#QRt6x_S^CLZB3h2#H@{bvsaE~HJhv&BgxVZiWy_d8J zv#M2JEJ}-`fD`5jOs{#v>R-TuCw={avRcQDkwP=TZmL(ez!$%eOVe!z-t9I;Qv3zIi*1H8RS|OGOks5z2-ZALIfZ_e$1b0|t3c;zPt z6S0$~&h#7CJ0j*M0!bZnv~x36xjNv2E4T~z!J;)D-@hD9eG?V1G^P&DVB7jbcKW+I z5s0XtUA6(*I+Xgope?(`?dD|3%+6FfwF3PxEzTuD@4`49gm)hzOjj5iWT@&kgO0fS z?0%8)LGEJ?R-AGie-ZWaaB8IP+6R_%qM+fQY$`0uA^3f!&6T3dj5 zzebTv80RI|%yC+avn&4TGY!aq*ooAtdd(4qoZRsps)aQ%nuC z^J!%I%ykD6OG-|LF?y2Y2R-k7Q`ok|FLQN`cIR7+Sa#jB-pn+GNHHS>^`^wukza;$a#9VQDZQjkY>6&n<1E%k~eEZ3g2}DfyZW8T1U?<;!cmHs}uu9w3HUOQ2 zXhh4Z9^MF0ErGW3+hV?);KWny9YGxc-CFhuXBd-8!YBJ=!v4c&RO1ysx~pS_lw^uM z>5Ah;4@uFjAIt#wFp~{k__)T-+xaG6^*ude?5@{tQ(>}a0cKXRC7d~Q8CL(gZsJ}a zfaWh=T<$Bn+|U@wZKR}sBcpLz4)jp(tsQ4kBEAy=I(+^ru}3=P1?`P5vfI!7L6Ws3 z$nL+<|5?hiCrvXx2=9J>Zk@x7-k4de+|P=GK-t zaTSU`=oc0n$x)&4_;&MM#vB2aS)6#4bG8Dl0~#riR6dJ*an5#W@EzS+toc_IbT;Uw zfuL)igCyTY0(E>D=OweM>y}7<%bAy^futuc%P|gCOWTX=8O|1c&7VJ}|1;m4fU|W0 z=zwZaRj!F+XQ*0ECh-9lYj|W=?V#Y@f(eN|Ki_E^j#d-9x}i+0{XORfDL}OrVxpae zLf0-UpW1BmZI4hGZg=Tf(Z>q$1&v>DCCB&GEB)~j*5U^aCw2UYhYcoxP}sZuH|r zES#5Jc6uMgzRs`M4?cSWC?(GhwKrESl}b*IV7v~~uN<}RR39Iz!3YDA*#FA?0Ra}N zzj7t{If)A2fqkx#hZo%>K#K!XwDA$>qt+0I8F6o?y4pZ`XwYoU8{Uoo0nE#rawHj`0n zvYwBVJ@vg0zDva2d`v&v&mThtcP01%cBBZuj#^gL(X5NIh)c*3;Q}P#L z|NbYg{MV!+I+D$Af#6rb{&M<-ViG7(t<Y$_YZ^jf^- z-CKK2Ggjw57kmpaMU*GdNzQn)<#BPNgTX{JmIW|&6)szC#NVplQ608`5rNO)&L@Nn zTgE_stmFJUlg-}N=-swtA>)PjT%g2}dUXv_9q%RgW~$`Hikk2L*(q{IM-01ELUnSu z7mQd&&DMghON_Wm!Kcc)X$;`ZprA*LDEkikB=M!KdOr~Y>Mk{EKezmw>h}WP0np;xg|(DoCJY zZQK+pZu;C@gTE4>S>bAai{+KccLN0mEfs-vb(4S`+eU}2P6%@wRP`ZuUeiU4}z)UWm|*7aTVHvoMp zJK1UHY8!tsgi@Mo)#~9_>K(l3Oyy<3wTCm;>JtI~ zq9*fV)8(H8#8<)NTx0C^lpD#*s}w!t$yWex=Hoz&Kuiq=(i@8TZe-u3r>_S@W*@KO z;5p-~rv*(rh}W7$RalnFerPc?8`J+rt0Av?`)^W72pH{0m4FE-X^WoG3W3af?WvN3uT z){mNh8)(f|WAsARzr5fS&vi=b71zPJjoS7}GW`z8G698dxA`5XBfzB>tfUB_Wz>Zb zKm?!qo|ju^G`{OfO#D5EuYfQ$JoZQ)!6etImRT6;(fTcB-zQ9cu0rwe?1Y?MEq-=}TZscY0ZR zd~u=QG1wjC#|j7ZTY`ANlf*evL-Py?19<2`qSNO@U2lz6rBy|lwg51@D=Qa(KxXYh zVrgUtTIT%`kGGV2`?~G-E}p-*FXoB0>JS@tnr~mE>ok&y)pjx^Mo)(>-yaf$C7*WV z|DN|KjO^gTpX|fNMx&JZlr&s0n!6smpYMDH10vK8==c}BmH=&eS8RHg$ME9=2lR)U?Y7w7 z@oFyxxBXA6j`kS+8g|rY+ukpqeW!Xp6gdi7w2%vAiFD`1KHLc+Vv!@7obTrN*vG<- zIy&k#JXWXXI0+!8Q6F6XV>s#g zmr=pP2$umcek5U4VUr1GR}mN@`|@QMD5uSW+?*eCCJwSH!PE)wW!vWN@Ez5VGj z+t*t&VNK{55R8Jp^NH48dq^d?@b*r}?X4yiBVL|jva^X5?$5E)mwRvecwm~I!bXQ% zGq#r}E7!V`@CVxN?bg*|=f0kMHpT}JS)}p&yf{$qM@h_OD5*z{#C^)& z)Ns%V8fd?s;mMp{ago=&olUZT+{>}VC$NW&HYtY0C~=>+u}Ky&-0SPUR%W0Z8grMe zm}4eg?w|Icg6=NIvAW*iPC5yD4?R9CEz~Ww0pm)LNKUC2Vu@BD2>8Y=F7_wJN_QWkH4 zr(eJ{QKas~2aV~UWH+d4wl0J!CrenSKsI5o^TO$UjwtPGZb^m?u<;pxrT4gPaOj=Z zd1ch)_&(PY1QbCjwqvsH)s8_qNG}f+HL7f<+U}@n@Elr&-kC3;%^%#SDV0D+kTC=6 zJ__ZxF;oo=PPq4k(@6;gQ4Ci{&s260&T+1JR*#HHF-$Z6#2EDm*2%5-pFzcE@szbF zms*&2NrQN#O!=J0BH+XZ%w3G=QTmlN+SjxS^c5zV9wbvifiK@NYmMSeQI)<4%}}AB z->Y;yI1l6aWrXM(P@J&O(Wr8KgUZ}h8j-Ar{sTLhS_3emq81Kkm1o$aQ#NfU!l7uA zEkb$yxtF8n$y%fMawWW`TJQtZYs1xPkP{K{v!iNo|-CmUU+EB;H{I_Jv>>&W`mPYjky6ABG~SW2+r?U;eqjLD~?324V*ix_G## zd<hod_qD}mt30rNlbGjd)gcG8|EmiM4U>YQkzWVw8@ zK=uS{mh4`X|23}4{qbEes5=5&DlC0lu9>&ekQxb^a_k=w+(ghr|9r1>8a4Nr~M)hwt&xad0Q| z<-^+$aeTAQnj&Ok^G)>O!t2jx&t67$QoF2Tx

5%#XiKwxA}n_&tF)y)}A0kPY=j zM9igobA2KYi$wJ5br=d4Ta4#DoOg}eJtFzNgmlk<@!z$}*VJ!<;n_qF5J%9Ds^ue}9GszHoZSNwHNEgYc%B z+t&1_A3w>D^a?IvfO>STPu+k2Zdubn|%;VGzd^sry-QJs`v1cr*`O^;j~U^@8l z7~uBCl;GzheJ?i9l0F?}ovC{-6ygOVy*(>uGU?oM8-dY4I!A(;#9+X=j!2H&HXgtR zXB!pfs6y1{K2FKYzvXV)Q7efm_yO)S*qk`-xp$6Lk{<5bY7`n~|8aqY)f>yvN)q_T zxvd(kRit`)T-}z+WSMQ_wdfFThkgT78>+QD89aZ%@o?$otpk@SJ4 zCvaid;-mXd|5SyniE$e3M2HXOTBSL(VmGrweW>a^oZ`#u7Pc1pWu8S7=1Sdwj({{c zIrCL9gjb6YL~eLmAmJ<9UZFsi-vKB80T}GhD7T(H+`aQ?3BZ3cqWAQ`G=MN0*fVnk zE#Ri`ZG&olujeOu>xH6C_WFbqwN8e0s-rF0?i)wStj9N6S%OdaH8PqCRkTf8qp}B^ z=(1SLE&&D6D;seDE(W;`5_{cry6(YtNAdM0e5w=nb%V)nN!;+u)e>_`A{+kL&<)Du zkh#qwH3_5CN4UJ8UdCCn3OvDtOdB)7ke|(c zB=Sa6jrLnc&d2*{XDe&M)$9vQH<@K9a{V*8s-${Fu+os^-w2B?mm0UdJ}_zMZ*l># z{CLblF_L{BG#)Um#17ZJj)2aU&H0tCu2haPn>Yu6#tw?<#{t26GCNX#Y42pco8r0M z_|@*F5JT*0Fj76)84d|c{35OY>K-|4Y^FbXZ)LRPJ7j?!jgM#u3$j)+x>I|g z_lmx%7~+0SksJZcNo%6F;6ZJnv@LfSeybGhg5 zC4vIDnAMQhmCvWXE)SQCezuGdvY%i}3LBj4EPT2;wlgqw$w1ynVH3w z-9nivC;RNazqO~5&=q~#gclVM^nPAXK3<~zVCkbV;Xr?4aP4q zA#{>SB0-!Cr+n1fRNV;e{7FW`Ic^x55{X>+uv3`_pYBrJ)*f z+d8%S`<8=z>B^$8?_YdF=?2MSf?L=aZ9v)aaFC*!4^VBa+w8q%a!3{3QqpGH9;Gw+ z&=hI<-wn8$C1S1s(mx_j(4w=z1E`#aKG|UaNbbv4pKGTnun!0*U{OPscs zj5dZ$u`#v*7*kOJ&sI+WO)?@bTOmY9*?PiSOPe;*`+>41upCJL)cVe!e@7hSgFu?# zx;BtvSX-_T$sQx=+X2wx!nzvTQUebMbR`IKVgrqo;HKw$tK;qQLZ=}h8%n+TQQDMA zF)GijJ#M;wwcZc!$f)(!?^^;Fe(+ZhxoIihFGgr$NU|-`6r-#ABDQ);q(jd27 z+J!81WsT^?h|2PxZ~S4U1EQk`Di?8A?E_`@3|mxmc?O=8@Lft;25`%vMVKn~FHMnI$NV+*<8C)Lr8_&zCg zrs&DCk7d$`x@s#@de7ag+QGq}*4CT0G3*rypI62e&y5$V$Ex)z_|h=?)>CcC}S zHtMLzp|CqR6BPU-CA~1291Tolur?J{{2mwVq*`+ys~ub(<4+dj#@w zHTUOg%f6#M#x51M%3c!j*qS?DF6jhC`q{)LGm!QFgAGqPHwlXD(b>zFcM&pS)^4N;vrUJv^f6(oWTYxwN?AQ!p zK-!d7zfYYD#3(R;`;Hp&#y)tKYPYo{2I(tkdok%yYT=7l$>|4Tgn|VB+iRU5hIIi+ zc)-Wd0>8ade;l+E8L)3m0=1c$sP$5ex^r85bwlG1WiY7DQH1|tSVH@h4Dl z4_9hv?A+D-zO;1MnDq`!qI0=rAb2Fu6hGiNp>&cGK$C$q(s&7Y4+PkZX?jN|WdGf` z!VA8^PIi^9!0zcY>wyJ)BI0qCU*jC_XF>(ZQM^#zou`_QDjl=ItOmnD%mWBz?&!n4 zHPv`QPJ=3Eq>=K=KmYu*z@XZ&%DJWHh2jIa17rVX=krb4d0k}m3{{E0XuXACZQqH# z0X=9RT21Qhb@{=<#GFD%!zE@^%^g|^?+WL|9-HA}3SY*9SIOaG4s|}q`s)om*XU6d zVH}7f_3@zxQFprkamh5Gnx^5aDhD!aPfhlPp^q&<#N0*32)dOv#i=hZ&NaTh!H2uK z=>a#nc#V30)V?lOyIKrFA>_D{)?`o2IaK%b1>y{GH<--)P;E9uPXqa=(_C8$ZOoO; zxprDtt9(SyjHGXJA0Bb7EHKZ}KKdO2CI4OA=7z_%aXJEaSY;9lq@QEW%DoxcR1*cm zC1AZHz>f9jM2Q}8=cI;a1ncLCDar<<{Ygs%c8Mc$^2FZjV6*X__+8HuNG#c0vM-K; z*Axr}E(h1Sf`(ModExoWh`^Rr`Aasm%-glFssr0Z2?H=9TX6j~jhk+Le8k4A9QQ>0 z!HvE=ohjtbQ`jsBQ)b1RMEFq`L;B5$Yat*H7dg#$PzyVf`}SV~%I)KJp~;x&s#~Xq zSgY~C+^l$px(;$z3Rt!HIkn>+2Qq(8bVv$DDu-S<4nA7m!OOX{XDKQi&nPj- zf04GiV?eDh9dr{pdH3a@kuR(>NqjEd4qpu!a@Fv&-1s#>2z=na4{;X#6&5@qN1zV} zO;0FTKJmiMrYd2sj~$V61=5$&L=PZ z9$~>}$=c{*?>_s}bBOZRZ4H)cFbjj}K{}(@&@ujmbfJ_2?0@A%GSE!aIn9T9WaCMJ z=&Bq4EI-a`ew8xqA7(W>0!ou0vd%)%$cg((xsv)2BHL-2gx$;G3td)^{<=t{67PVbomp&$i-ofr3j zO)GCGl0&;KNQ~z6T?UIxFmr$i#QWzJo|an8_L&UK1C~5t4KbdvcD|dD8AZN0Sy#7F zE(K8c+C;6j;mn0o7xGn2(kvhHq4UY&zOVg!G?;>hQUIIv>>;GH%zm`&bTo2u>B+Bs zCZg2Q4Wr%1giliCTEZ`!22~n_;&pbj^UBE1)HnY*>4~#y$l73_Xgl)RvFcYyme#&& zfaz29tvut-k3QV)_C25XYpp4udqq`pb)v=r8`E8@=liG-tVYR4NQirwn+D)jfSF3K z+v^1n5-8JjSn@{nm+$p{?Ho0iQ-Zw~=a)F@jK<>xdd<6UD220y?O#l9fiv3|sRtzG z6ecHEBog{ku$D9YN|P~rBaz3|u5F28*}g#7%C2=Mj73@Dz{Fx^+-^ih6G@)W|&Nm>*D#w|^JAk(UyeC+O3}%s} z8xa6HrpkS)SAQkv`WS{qa=ZVhHd zmO7(+OkX#6btcq@Q{@89UyFH9`E^nnmZn`Fml<=#{{eJ+ten_k`0Z5`(efxM^c34Of3_Rh$aQEa`R zUA~>_F&gG|tGxH=<{=b?DMocZn_N%ca~mNfPmX$PzUtjVRYb@3rbkQ`?oIgUfkS`K z=65PmaH0Qev9Oy7pqFq6#ny2|CMhK)C1{X6$f(K5-KV}@*t){~h7E0K)(MyQsl#X$ z}i&+M&m}S^}zG;t-0x;`;^b$;S1g zP}BlAVxV^bGK`9Af9<{AjblR51F&a;@ZHpJ5Vy}@<=$K0+dEmUUTfp1p#=`8!*6d0 z?x>ma>i>1ePAGWB%_QwE&|YuxgJ5e{zi_2_O}p~z>z)5JHPEMU%ENgkB|W%+3L$#0 z=Iq(CLUvulA-WQ&4t9!4_AhkHKGsnPiO zptKmu5q(DoVbKk4b#<{%ev%Lg|XF11W&@+j`e8r3AND zfzs-7E|j4uinDjmkLS++GA3cVd1sO;PHRUPhW+e8NefavwDmfBn%%d~*BU2P0ss4! z`l=I|bESiZe4X{AAdHr2zj^tKlNCLgJcq3LpWhlisirLG{N>01pYiI-r`A_y7DVg{bTD=~qF^&bq}f z_JR$K9q?V^bTJ8i|3{_cqTWY?|LHOi&+UaHh<;e zKJvA-`LbxKbh0DkU-6)t2HM3<2_b-s=Y;RxhdbbN+l|hhW>yUrGf=M!WvO<)vz5#? zNzUXKXa$vF^I9<({wqM3{3|;EV7JlG=(mYi@@&f!dnc|fj&fA)_QKr zit3fw{CyL$YJOYxIt>}HEdUeZ*E9+i#MG;qZ4uVBQW>_-*>3AZ_bU^Qf*Jd-Safb@ z)KrlA_6VdZZS+VtsDbt&_u)Ii9wg^dbp8|e)2f6g)yWJP*!LP2-p<&M7geqw4-jxX z&L!gZ5fi2*djIqw>VViTT-3hfqVm!Lm?^7C3a0W9K`*|;X&N@<&;)Jk?1i(T2Y@!> z;9x#^mN4<@`uF-d%Tsq@w7-DY^loqen%e1N4vb&~?5XrS_kME)dquy>nt?D10H`K@ z#IwoG2Kldag_m9mcJQcVf9*l&S)<(-_6}!_x?di>-jsnxWKo?{%Q*RDxXBP@7nj>v z6%C;%AMi?Pn{Es`y)hV${(WO^NvokpK6OXOM+K|HsoeKRfwRPc061Ng`56FdUO)uj zlfD3vm59`cO$rt>*ir6SsrDr@m7cRNmf1g;4*oU>G*E) zNqBOH`12g}youf}?Oo%Ik)26g%9>FHW_h-AzA&MbRdeN|N2gD|Bu$D;uwAXQ*GSoS zT9b^ouxl|zPnLb>`>V0M$DhpC#Btrm`(%PPTW9Hm?eJQb+9>bbzqN0?BHAdVi0TF z6JDO>%=aOO+IMt?N<2oJeCe)H_se>-ivz+OvGTFdt;0|--Tb5j=D(}|$qZR^63acF zc5lCN$vkNMZ7R*~?4RRJLKtg!^#dWLSjjT$hLOqk`s;B%KNgpz zQ7Qc?%&U7~o~BiIUhnUS464InIqu$lgzI`-pg$}|&^i5BZ4Q$p?thcp)y)gQ-1(*I z({2z|D@J{NhG)K^l8Um*W_btq}0P{iH$cnpuNu}nTmy*^9EJ%Re9==i9XcxT~8 zymn%(ewCH-LH2a*weh{N3j3-{JZu@fv(2?j*jg)P-JHh7;utaiVK4ADSz3lpEkmCj1l8 z4I$S0i%awKVdbxCAr#s#f24amGAJTJSRHrA`5>*bZ5@r|I#@r zsa?;|qV9ikfh&6;Eq#$;;xH?oyb~@See`9%``qOX`#2??zPQL|(G5N`dF~0Z;zloC zLOGZ&DeapUN`SaA9?KLVDZ1l=4VHK}tyb_-IwHgS=(@SRe{buqUw~p3i3Esbf z=tWnx46A&FP4j2%9n46Frq|NqlcsN`{tg;;6r4?i-xkY1eeT2^cW?y}B{tiO_asEO zPh;twPxWBur0P{ORJhb+{9Saa&4$8ApOKMhRNbSA{mat_SP7gxX^}Y)K}y~CMpFdr zC5;^c#fhVGl6NJTW|Ye$<}C8Pk$g$4nWe5$2L~CsSw{-!F|Lqt$1qtMA7l>=RBQrW zWY#&kCLP3B^=*1WPCMvY0N@k*kFhKHPB`7*;fTRjs_XWkksV7a-DXXpv{c)?u{ z{)`Jiz=YGFMFT}YQmE$3moKB`_VLl)dy$?X z39nM~i&_lsc;$8&CVlcL*VV{gMe}h4!t4$<2!$)=k;iBGqMHdC{W@RYnr9FVXD5hj z*1i#_jCfMk-8pF#+BxLX=gzd{*V&9y|D@!B&VA2fKaqUT;|fobxUbZXQu1SSpK#Ia z!LkSk*4{E244-A0Lxah`fp_OrHc~J!@nn!6msyJuba?ax$SvbI&9#K9C2g!V1p`yq zQF0U6M#o))Q-w>yzT(b6X+CUpKG%&)D_+xU&!OaQ?G5U&nn_vuYPg9>4 z^IzaRAo7i54WQMYCS4@p4(8>HQW|qGr5Ul#G+gl><3mwjzJmQEb?#V48we~yavqaP z%F2_xk%WGuN@=UIFEDRQ>Saw~FwtM$T8LE07*=JG@KG=+MREqCk*v+Z29^DvBcCG+ zQXQpQ*!y?1C1HZFumo7DnSp?XSU5;{g0`C*6@@dJAsKY!O|wh+e~>Z{sQUK0G@P$& zna9^^2@8{2nhjW7j zzhjwR-rAZ-)ft-Sn#8B-nk+jR>f#lZ3<@4h*&XB%~X-wp2aJfqMI$emm`=2LUTb+ ztw#q-{#s5Y5RK1bJC@tITIIuiz%kY7d6V#7W0T|hVi>$IkFI4vvHX~}w%0o)t)=h# z$d>jj%Z|iyo)PNJ$@qJ)5MMV!yXl_hWdy601)R!H7GQB&jpYFl@mYvuqb@vrW;s@j z8+b49H9dar8lB`P)8VFA`e!L$YFUM3E96ZVc)q5+<(($n*&2MPT*B&mZ-I{c$Rl4q zoJGqINZ}$27vz(8WA~qg1hx03;nx|pL<;#kumUmdzVU3HkNUHZ`(;=$49l}t4?_P(19R%B#27 zw)KQbb{K~}7t5$m0Ri=*__Mi=TsIdqvmCt{$ABIEp#8}Kw~NGg>`GOhxbNe->x2>i zhOr~TD{JVBt%vW@_T@MFEBBG|fVm_Ynk5upalj_F@ei;nsO$|x1L<>&QaJxcK^FnG zh@yWI){>_1fYsOCQ)O&`hVDeRz3qd%)m;lTweXPF_g)QRBX>ji?Hw5ho=6Fk5k5(} zftBvUh%r~Eb@oahpl)(BZI1E5P6X$;Rbuv*_1hqt7L)Dvp3eJ4PL-`E;FOS2Cxvzf!0~`!e&~w`Y*y#MJjap@5y;xQ968RSsuiTiy zF9=CfiptX~F*6@1=r8BR5U6Fy^b)?UJJ|0I^NG6IJKd}DT-*NNlqJjT$bW|Hrf z>@U3DEX8D(OOwI#cMCR{>XVp$1Mw77>l^20-#zTtjqLlterMWx*vU9z{Zz?aCSyX6 z;LAqN+K5!7{?iA@3hESTJb7-WyyWc$??>V0Dd++am z9X@;S70-Isv-Vzb0Ny%U(*;se2Y-h%<`OK>w3v_KiR#?WnV9+cr7G((2OJHTMx0*1 zblyQa*`dY7D&1h=B*kp;O*l=I~Iy@JIYd_TK}VPaRsBcy}}g%$={}^18@& zBK)EP*4tkK-S|Ic54T9Hb9QChBOfqxApeP3?v>_S+;C76M3 z9~xN5rliGY+xYPzB6N#5kyTyanO^V=bA{rF&1mzX^RQt(t(RUyp8RRvHlR5yUt}8N z&$oSP7n*Ma2Xb)m@BK*t?b~)cLfod{M^_V7R*U#|IbRZ%2_rs3UBNi?$H6b`lAhDd zF?U@VT!!l|n$5=d^GTMehq_eAGWLH zX$;&OEOXl=e2jTll~{Xt!`4q4r^k3+ycJ}H8qH}|iJ!Ss9vsMnp8%~1d67-s{axwP z>vn%q1f=N!@M|m2G8O;Wj@|sA>(_5c%^|hIJ$E5mRe8r{@AkH@^gk2uCyk^UWzNx2 z%9h~rzu*D8Lfb>kaP(j_Ry#tAk%(C}uKPp!n|iz0H`_0K^euI#zP4Im=Ez)_KpTJ- zN3uxc7h9VMhI>tCn}V-|6rMVtTJNF1M?+c+epwdsAOyyWXvzEtyXEjls@eX9x1qa{(rf9P^Ic~PGGjPInLX7Y z)BjE0Y(CT*+>>ZZZZ+OUMeo2c)m9LWMeHsOJHmq~*XB*@JPKFFk#jA{qFXhN4VWLw zsPfpJzUNGvDuQUa^PHZ7Sk@$a_wrRMo?f2IoeD`L>s!SLxOA&5so420+1?s^t3C9O zWJvoV22(#26k1ph<(_Eq9pnmyGDt?r^E!ZuYSC$W=RoA-1!Hf^K>E}ZIa_#`GK zTD|c8X2*2y(sOqFrspL2GdH`|V$#Y`vTfH`P5AUwks0ln%rM<649VX7=uqbRJ4Eca zg!`$VOw4p(%iEIcW#EbtesGVthU(cb9_tA+pdSafh26L^fL1+yQuXk$TR4=SaBq@k z;`7Xr?Z!~3ZT`qNU(&T=3*HU0(E`HS;zibp^b(6LotpYoZHGWe zCrbrB;?&AmHLo|Vfe*GaDLdz*P|vL<8%H*t1hM3S9iL9F#gHBc0a&(YlPpb zn|rsJ?_7<@6`wy6db4$y)G*MjYs}n7C}`T=`EW(#Qr<91K-ov)&>5AmKpHO>SPGa@VegtDR zm-^^f&v?bwV7P4Ag21`-sU(99R$!y*=3ew04!4L<8FIZQupYd-3Q^Gc3yanlc0uLi zQNgj$&OsjMFt#wF@~=CEIT09*Vg3}?IUqvQU(GhmqVf_6>?j{UZHT^n{5b<`S}ejv z!r7rO?;9kaf}&E7{&R>`?DS2ENQn5RI|Gi+5!onN3oJ?zkrca{ZF>=SE&C{&r~3`T zXh8Y6Mtr3?)ahK{@;YZndVYHC`=Pvnofo>yPC;9%6Y0E0c?$VZRr_PzEG7y^cKhyc zKcBfElC}KAu{CP-a*D-IvqkXrWu|SC7z|;lp+AU%7YNKn!n~N(o<$RpIDgug#Ky?z zl-_}*ydu~0+>(`u0S;EGk}u^hrT}zlK!S+oA3|~yd7$>qECRx&@g-V|tn|~^d zPq}o|*jwo6jLKJKeGw;Y=#J^w7ZrLvHl`UGeY5#My@J6c@uZc?MXJ*u=F^0x5{e74 z+TKm1$BuQiN=sE7?>+A?$`SB*uMVBEac;L%c|%R0qgX24nm&RrtyZcWpS>O67aJG- zq)RJW3x4DEXr0AkPV4IGY9?HjPCo029afg$iI}fAY#=HfdpGoy1tZ(!N8+yn0!>!_>gZSD}Lq3)tuAyqxjTE_!9BDUNfbzO(WZGBJ9 z`1>j_7wH23n)GOhx*yD#L%e6Y`p$z93pv>W)sP?Q8pn~P(K17W{qqhdmE|^N1hB4q zPfkwvBohJ^+P|&dw9|o4eE1}ixy8(&XIYGaep5l@_%g%P{$MrolhA2R!1D(W9#oo7 z-hV*BiIIURhh`Z9oemEds7OL^d)3a<*Cv!(%B`(epPSgwtuVp-t|=~k)phbit3C+Z zSU_{V)cd)AGO>`X1id^~swv!gv${?Z1-|@J#^jc3m(| zLBAzUeBwavTZqMq3Vz4I^7v4P-81f{t3kfkY&KILD^zKKP5YBR{t0h<6&O&tS}R(i z^^I_}+(2|nyeSLjfP#e6v7d9sVNAZ$NQCM_&HIl}n1CNf{@rhcrS#y9@y95Lqh%OL#y@KCu zAah2mIZ3#hBM%D4nZ7;su|)&#GjF~(<{Q`jGJ}^+$9v4{@39p`9yC{U%s16mstfiw zcs7S&={Rz>T|Xh1q8YEEcyA0*wi+90c2Z!1DueUdoh zuuF2(k=1~n5nz2CLdg^YO>qm!ifAW|-b%zx;M-HQWO*sje_<7Ee&txTHexa}CCYmt zyAgg8PJJ!I`;sU(t*ah!*zJeGPKo2a&PP%mZTf6nl7iTZ>im1qr%l}(v*s_0P_N(L zY$8D$tGbF&?TnPGi0%m_xRcFHHomcS&xX@;BeFdqP+=cmVBGPhsC<nHZ$Vfo7QR8}5X8;6*rjuXf(-DPQ7;7PKkO|Y1Ml@+5h z0YNm7AE2#;Ox!~Lg$eOk_N8vKq-U+}>_hC)t88p*2Hj3JhflaAx&`2J4au3D&&@SK zCXQfLAJt4*|39l?G1R>R9%i>ZMl-WXSV186K#XwaESzc&Tq%r2%w}j}abjD&IqXNV@g%2I9(l{`3Z(8PVOPCvm_ci9TpZ;1+zOl zP{qQ++O%1@&kH=M!y(%0!eqd;!>UH>ECU01k;BLCNBhXdIsxP*K&nOSS4ufPBC zqd_2Ar+6Z~3kiV=Q0P+YnJ06w7#wdyA3T`wM+yA;)p+d|meb?}A1zhUZB+#jKGRVC zb)?q>@P66(op~V1%vH(n9~d}WVtC~MjMDaGSMMDptPp@f^cUoM;3tmrG$-LK7tLyS zB{BS~C`BgjVCeI6NC3=1m|h*(`OPL*Hz z&D)Xdxe0nm|B1CtK@PYa{hQ&LtQx_7hv z#j+$zMu7GnkRFCI&S>q#HTCwspRBfVy}GC@B7zhly-^?&lfRma3y6PSZcKHNaW6Db z3Bi9V5-NNXO0EcjDxm#NO=*rtDpyV!FYZ!h$Ff>+oSD*3h`oeoWM)=XRmH}|p$Cag z*EuKi*gwt_LFYi8*mkBAqodf+xPApN-*+tpSPJ#FH zp*CHIgtrR7eSuG*2>X$0O0Fm-WoW=?rt7ncSW((=Q6&g>v6oj?)KUdeH^pB7wPLIW z%=Ti-^$HhTU>^UMkylf(p`?7d_U8{VCx_?dcMKTtehyb$>v_ans)y2s1(17L*>gL) zquqroXQtkmn3!+X)h?IkY8)t`7Dy{`1J5$Z{j(1#Up`#s1!rZ^IM0R;<{8<|y+%{N z<>nMp8exBSn4g=Q>*a-dgqPBXKqLdfHPFODNZjZiBNq-%5(R{0d(2;_g(7#8qQ?%n z-{}NxUtgbQh4pN7kFYr#E;;Hvjo#)YjQzvxOCcel%~09&>}+sNX}p(~lF!XHj~A$^MeJyRmm4`x^8A}y)Ec8v}V zT?X!Sy#1rGG2P){)#Em&I8cm5F{&RPZm7>Zd`X1#&}$$_2)IYI>-+^y-O@Uzrwa0u#iZldm50|jpAAn=)~e*c_7H`B z1~Dg|&CKJ|Csz=eNW8_E|I5~+&>d|hk@0>l3<_d%mj2n=*{M74%NIK1_;e1l&2E=T zug>*~jD%z&SC!e!j4)V}k}@6WI5g{BG&nFr-y@BJZbD(vO0XeYG8%<$!U@ESN9R>2 z3N*ogMRydGy`WakuUCIVC>y%bWBE@3X)`XKYz@uK*PiYq_H2UCE8rR zfoo$N&ki>p@w?z*3sF7#1t5weR>o6r9F6&sdvTmrR~yvU{@WW-ikzvTZ6hr zFot^{K3wMHEI-_RvwQpwkDJ6TAshn|9uva>WTxo&^k|Qw30YWJ2w9Dl1O+|bKTP96 z9$6~D+u9-`6aG{lc$?p3659Ulc;v)Myr0!z`ud4nLzjEDN7G8V`p=AJ-F}h>AGtP{ zN}`ZETGHu(`2S(1!He*jzS37E@#DLlzHxD{!2w{CaJY#(VSvC5HmgaY1q3YXOXuZ& zUKbQRl>q53B~a>vY=kWJQRY3zT@&8Sa8+s667Ay)=?@MKk&?Q`a_WqH{<*O=CD`jm z5?TvxfjTfTG+bWpOYew)P&$3T2nQnv)m2rz_PZ}R9?Fj)1ErjACMK_<{w!R&ukh+t zN|8dElo|*JMt>n76qK}&=TG4}YYjo9@hhvVy{b5|@`{SNY6Iq-%Pwa}`E`zGcrX$a zI%sC+K(;Ix{u)ge#fgnB&nhl2Pvm#K1gY=%MrgyYEDF3w&iEg`0%bl3L=+$Ik1}x_ zDX&?HPoZJb5)n_=CaRjln&;X>5X?QLk~f4O08hF&Wrad57dvCIv9Z5?{o2sl%b%l= z2?qpWczC$imVp=rIx>*}7pG(WI}M|t>~@#ZvVe~y<7yfmtLAYQZ|74{(Mw2p&8z{X z;Aop`PdBu6Px0M8s?3dKP!IKa>4lEJe{kT16XBZn_3LX;hUs-g&D6v^zW>|#|NZ&Q zvSh+%r+I|jiI4ac{3r6t8Pfqy}%{jx>7+HXap1qM1(f zHJ*WPe^j#{A;u%D8W50eHYeR-Q#8BmQ+7Ex9%2VqMlF-|s_O&8I<@L|b$x~}Rw6A9hDt*I6tF9*gqPb+NJyl>rd zXb`%b9vK@0BO#YSMf5F5Ar$MsZPt8Ceu89@mWGBJqX8}< zACAJ;oCm&FS-Gk~&X*r!HBdv)vCF0QK#^!x0%hBBJ|IaUc)5m zb>2z)a8{$5$ZhqrJ$?DgSym5$iXI2{a<==-!Bm68=~W?@%~ufr(1Rb~;^L-vC#nOq z?=g|YW)`=7aR<`yp9q@?V)btXiQ@M1_JHLz8BnX{OkP`pHzQ^~t+n$`q>B<0$M5p<5_o5U z57y)zx0rZJ3)yJ{@%yrEHf%)f9?pN;VXBU7oX?-5&yTj$d0nTLr)tL#c}CfVSK%vS zCBdPg?ui zysInh^!T09I%o2yIuq5xI5vIjdJrN;$JN(XSJAd|Ow%(n#j)W8M{d7DnNgZ*M17Z3%61Boh5s7;RLm6aJ)I=?AoT7nym zOoqO?ms)`fDH5_}2y1kAj08jfrNaB|b%kVYWK{cpMtMb6j$O4Z3&Le9`PJ9;S$m<%TISgDXt7OTk^KC^jT zBd6RhwXw0WxA*CT2a6!;tZ(Flxt6gn^7&DW#MbL0o$Wa`(|n}2v6uOPEwiMbNyl-# zJuRasH3*F1x7eQ*o{M5k{MKNz`!!N=Qbks4j!kDHE_Q8LU+@yJit6hAm3beZpNU70z)xSFX19#(en3Tq$^u*kw64fF(|T5w2ZD|*B*d|w_;!Q{{g2H<(cJ|hmpM6E zQu>bH=UwdW@#QM#9XP(CK3I*Zp{&7D1rVcuQSPYIZY}|T=ER1~?_5V)Th~NA)qW2Q zJln=`>1|h+KWlgV;WhH9C#k8;?2zPt8N9&M$v(x-%G@$BLXfG26rI0(&G7r8^HeT~ zT-NHm&V3CZ2C}gTpH)w%xXU;@*e&-jAlS@^SdA32%_mpEY$(voeRVqN&y>JtfA);! z`5>aNPeoNVo4F$mWC@9uxR{5b!pdKj3PiBRy|GBgD$Lf`Cv4k(l7-V^y(#&4z9_wi zy>5qraUfAMf3^H#KDs9?W*vCJf8BT#@)vc^RbhvamANxdJBv9G)VCLv*K-M$wD|as zW=GYoSG~AATUz7jKU)!+1vzE?egx|OS~&pWAy72HXCRTQTLP!U#R_!G4a@1@#fA!PoURo~CF9fS?S)t;>2VdN^Am+@ zJP5*KMB}J~t?>1@O7pPupFG?Wd?4I&4mft|o}#~>AH%I6@CCfae*!Q-;D9Oy3#W9A zJ0A-%zeCK*T5ev;O+CTe7-T&BwYcas+0fV+^AZ0xU`#o+?ses0xUc)o-^joWc~&bb zn}&1dHrQ&)d>7;Ik{G-VhKKemtcY9=S}2K9>%g~J6usq7u~tXt5#GQBtubI6c5TLZ z?LYgNgj%u5uegrZjLp$*1F(Jb!P(aWcr{N}AH)g$C)0bs862UbBz z5g^yQ%N7FN^{KvZV`KAzd-SPJ`2N-&RoxGmZqVbZ!zW48bwiGcSy{I>#x_eKv_csu z$gd88p8WTZ1*JdQz@lslF{i=k^VoDHUBU+nQZTTy5seh+w-s)+5sJm0|M$Hs359ZF zR}TcP^Ops8%F~x%O1_$qAJbDSu*UYGBca!O&iee+cKKHtL0pnPy(}}xsE_R{1o%KN0U7TP=P0c>X!-VPjtNv7> z_G{M=pF99bv9rCuVjwSX(Vgh%I@1u{^zx8}9PCuaoeZfa-n+k z>T1g66}CmCxvI4ebFa%^59T5&X?$o1(bWa`3v#=o-i>ie+fAOi4d)*S_P(Ai zWidzKh6=36QBN^L{HXXaQ-C2d(vcq^X=4dRP%~=si61D zoA*vh46I zX*|7E2jm`eU6>h-zfJ6Oll6X9zy_DFdZGLQ4N>Us{_^~Q^L7z<*;>D!`F!8|N54!d z>R7$6ELSAon3E*!{XC@}UQ7e69=2wkkX`VY@y5R3pz<0Ud8Gkt zi;Z3%J}FGqEUCW9nf&+_9;rfIiAIxuA*l;-6Ml4#+-I!^mrwGikBuhwN8YqS5u}{d zuk=r~?G?Ny!@?Hw4x2S}MDMZP^d?|d_STY8=qrc={WbL(7P@};u%_Ib9OFP8*P6Hf zk$=M5f7TER(NiGPddO=}HwlPxm_Dn7aMU|Bm>K8B>cTA2CwwFbNzFFwDMh=%)a9<* zwKblUokE54N*vJ8EtmZzy?X&)NKeN0Rv zjCRmSxvAd9m@icCzAw^VvcPbq;6K1cv)F8xD1JYHsQf*(vo&EHp2=W=)e%Ly3F}3q zc}-B#REy5NbM0hF*Jb?#TH%TejpXUGfW-c^+|s&tr#+*(4v+stw0{It4%J=BjnxRh zVU>LovXfJG<9@kV)vw*~qL&)jkazM4L6feXl8bjM^E-yk)tSiYPVd4VRE2KgUbx80 zTt0ks#m06hEY&&Z>kFfmox;SH;dFQo+{B}Bi_l`rH<253pl~ehjU0FMaU%AY4>MP&$Y-4J=gpB{X_i)TZ!G)4;dN2zR3vWhZKWk~H(VLxq_(A=Usd$QHWCT({Jb%<>|5%&2suMOE# zDeA^H5{c>#)+DE6m(Kf@4eX#!`cDhx&n)x4;H7>ZeUy5Ur3zcROacxEq22T2faKP? z1D))^UIZe2z_VxRky z3K~w^39xkpeB2qz(}hF^ETL9)F%9!ld6TlfU41jg{jT}e*nJRk%|4K^B<1tFoZ)XQ zSY+s|VP^v@4sy=T>x%EaP832^1D(zMv}Cf*s91Xh`JKQES)F zw73h4YPk7_mB>3uOCCGNjOk5wZN9^PDq|_zRtDp! z&y`LlP&W{jZ#CFNmKKx3OgBECxfbU#-2B5>+Bm_L0YxSEH9bOG1_wDf+CGs!Mi+~H z{P)K(>uDEw_HX1VRbA(n5Ds|P-hB6$2>^DI({kg^hS^xLBeqY1%W3SP{xh47FT%0O z)KTl4ovixxBk73b{wW&L>b|IVR?+|4lL>0w=JXoLbMRl6C&^EilqvcD? z%5~;Pwt;Z$e|FX_VW7-t(Psq|Pn`)Yrh(%DKR!7aniyyf|LE+b3SR0Efa3w?>=#0# zb%a=*OmS8xMlEy{r7JZ(jCz9^N(_$uHm%+&}04_ET# zGU>-7t2|l4SMGy0>g9OMhW1ecOwcjWCpew4bi%GL3N`8W$m#kWkf}#CU42@*R_=?b z9FypD{EMyO%m$nb=rO{Qh&nB##Ic`0ZL8^R$%}0qmFK*zkcIX)G?I)39qxe*)4_>Q z(#*##Mf2Le6QEtk7WajT@ZDjX_#blf*pEQy$ZkM^TN-z+i!_to*<@vhNW z`)4M1*lXMUo(C|e>3d_``k#r-MoCDd)kPTJv=1+Z*pDCf`5UX|&$S)W|H+71b^{8+ zl=9tWP2YYbvOi#I(GB{N9)Fd&^RjFTpmi4uw_S-|r2a5j6!Pb{i99dwmqp}HmSv{! zt_~el)M_k$Vq}0kLo~)~yv-$ZHe_~vJ+Z!VX9lrZYxo0zi;*di?|b|El<3uL86Nmt_8hmiYQQeQ_=4Qntb( zsE5Ao)sd+afJecGm)z7PEPo=(bSnb1Tz5SYM3sb~a2_dgv6cHdyd>LGq(=f63I}RP zFCa@wGI1#zkDM+)p4`rmx_09?IvtGsl9RK+vD{s3JH87lhS<35E2d8w05c)00MQ&@ z7NmNLom?*>v}8DPMd;|~0{0u8!Q8FHH{QLXaT3D{A{rVR+uY=ZZIXjY5jk?=i{-hc zVNl{=;oj?KV+W%~)-WtWVMo;54t+`6MDes}1$r~(P;hhaxNVwH8xg8WsQAyWCDU}F z@CUkg78u5h|IY?ktuH?~mFWNW!K{LP34{d@x&hhe>9m@L`sN9vq<8XIGt(+y`6<1R zhKHoD44Z?Kk~YCw^)x%rTFUPNhU#2fQE{&KuB=$>b-*qDD!WLC5t-^?%0#SoHOHb6 zZ+$qBg|*liVpb;1cjJx+4i>jV(YVat=|PU#K!LFTDd1ts zymVK>?sy278x8o{Mk2|=%Y3b+mOv9L83S;$kH;xsk7#IClGVh^%FEm64T}LjWUpn( zka~gg+hm?Skk-zxObvBcZnoq-wLRFK?8K%xAFCjQ3=9oaU7p_`X9)i*)WF5sYWW!% z^e=0IlHv7(yJ-HImB2Q-DO;|%-J%bjZ+r4fhI3L8|JFNQg~o$bPHboqvGF=B)V)R1 zFva)bx{=j=VfT>P%<+K0^v*kYT>e;f^_xI>7R9}=%#*cUNqYeX9O}shBCG&u_~^(g z(Zk-WFmA@~iFJPSLnE&~y~zda~o&Q6ovcv@KK*~K2p=i(f-X!piO zpoRewq`J?AJ2(r7J**DD8}8H{YJFE>;D+ra{(9!yku>v-x729HJMLYccYI&jJ0VD> zA(s0~qV!nHMQ2TerbnfHn|1y$jMt}qtjZl!F8a^nx7?58gYw7LR+jnGXR@a|%^R^~mf@ zMqMC@RiL5Z+v7q5fnDcO+U-MF-xL||*!Fn6lgxc3g>T9e)r4@?q|Z~L#EA*7F;?4m zSL_?;L!hXDw)hX-nYp+>(33PnVE2-G+x0OXC^C1r3sECI1~kQLRp&FyFKL4{UExeo zl-rAYpmZ$VfCByFMpSPp*}LL*!F)xvB)UVo#WOwi@2{xGWh-y;Tu@rWrYKS>f?jOg4H zP|0wXD5>Z^Zgr`u-Pj?_M%Z5!a3kZ`8`_)J+Q zq8ISN>%&1Lq4qgmz5}pP3IvOL%JO=&FVZ~cm)@kXR6X-gQs3benFa0lY zEar(ogCsAE<@QrYnr@so&N*)G5idsmYAYzdbc=xG{f01)Ng+C~I|r@Sm{J5Vu?CjD zXjR(MI0m4Ev}Xmz?)4Al869n!@u-tOh*fCenKRCt&*fH^1&?GL z9wHeRElBBWhu&S(Vd-1c9w zi6$XgAMv^Z>w5baph5P*@$Ql9p)>r$*Q4kiiJ@3Ua$51#h{$ z7#5?P*F$mn*;27QzCjWaJc8<%m$s2esi|GzVYbZXH$adB9AK0jhJ8|yzTpG`4{x8s zz;lzCqyPWEs}I{MP%0M*yd^F8s7A-pefJ+!{u4C%IjMHS>We>|22X7#YqFx0qwj0 zc*?)pU$-+b+wLF~Klu}|E4Rx%1mFG^W7k6pG5}L`9DDe&DUoz{wb`DO)U4mcMBUwF zqR54f^uCL6bc$wH#7q`CpNna3O48GxN9JV&3R&cCf1Ay}x>3kw@GndyD(b;sg|vS5 z_7;&#!F2s$x!LK?St{zVr2*M%5V@`^5&OlLx2)LZf@tgNz6>pogOb+R8mK}{tfzT` z*{Ye6JDD%ndi()%ouc6c!TsRCZf4q-b_sa&VvpQw5rxFxSub z^(XEsD=Rys8!NA6G*Rxe{hdJml12PMaeTi9xlyvzmHWzpB4d4SelU(7JgB;uQLHPz z2UiY4g8Qp2!xfI?tHL7mHBeg z8??I(bbJAh3-_A-_MpLnDn6p?ZV18u-g?l&22Ai@jedn7pBuHyP z3E>9^7BBcjPa%lxqIo!Klk{^`7EP`DGrbIyt5r$%PoH3AhCfIFHu$U`74+zeg)fOW z55-OJ*5B$Nw;Mp4L+*+^7%ye`x1Pk@^n7W%z>Kfu<^W+JHL6Q+g&G%B-z@Gg~8Ib6`owiKn2B3i1jfbnI|a zayC)5*zC81qeXe!2cxcT-Tf8{9`--tsXxv{SAoX*>`|7$3qeEyHx>Cg3uz@22oVtC%7OcKOZ1QPLn4wy$L z*P{|H*&%RygsJ6B+~ zr{G?l_H#h)iti{wgzik%)qX8!wbK>PgGX?d*)po%L6HMG{QrpK{#lp2Eo6_E(|%)r z<($)bgI+~ye@};j0(=p6X6G~^ZkK5s?|5gECwCK-v@s%x|6>T_EfhM^e^g;0bfUPO zBOw7@c|?`-?8{PMwTA-%>yu|dk88Rx9Rj^ixuoE|d%8Zlb8%*}O%n%N%B1V_O;=t- zC<26kOR!pcmcAUxBs;Pv_g;y3Cu%%*9UT;ZcDTtC5@+q7zqGe~xVPOKgJm+2EtODb z6K){&aeWTPJX(_BpTk|b#fyJm_)2n^!{~gFP;u!!<9(#Q>n|IEGw4-xCl?mxO0p+q zc;>A1Nym>s!Dow8sKDibBzZVh9CtG%9Ee}@W1fpE;C9vc`*W{tINx$>=pOnmaJZ9< z@q6PSvkHrc<+rVmoNF$^n6fI2hBlLr208;s*cRJsRo4YP>Mh>pgk|15P&VDrZqPxE z)TPMiXsO;!=Vm@VJPl;1px1KO$h(e|5dAXDK&V9Q?QEHHvq4`+f7%b`1B=2^nCk7f z6+{%~W#DpxJUr|4D)am(^sio@=49Aow9$T$#H1IJiO#FrxF*mUYbgsQu!vRYbh ztK@5U0JSBRux1A4jHLFgw_0gi-xAp!s;<>RaSo>##;9k_0RhMAUf^R3aA`|quND6t zYZ@KVbFJ5ABS>5WZArQArsRg(-?B99HM;z=Jg}7GYBZAaL1I?A;3DSgj-ukTe$85A ztjiPD!Dd4vqfpm_1FQ;*(UawFe{k~Gd_0c*m4P($KcD#+vL`AgCMhafUH+Ujp3Rx$ z-VYTxP*W)Ynp93Rk(awdi~saRC{Xa&d@0>i^pbgwjON~*udTs!28tJcX}}{EkbnOQ z=8d&_K_qM?Pg-!3Vp1*bYa8LFtvCKey4vfl&7RvbAB8o&@BLzD}$7*)s=M7^BLP#7(_M$A?e zi3&6;f5?7??CM?XubDEZerU`p`w1++jJlWKvm#HUeCJ2L!ISE0hm1;-rOgRJ4nXF>$APnq4Z3K#C&% zA~PV={fDf9VK=ANeOyFW3`Ia4IZ0GUCJPsuV`|%cj1l z1M_$z$wOc-ER}a<;8Yq?da5(!Hb<@^m$;RYYooJB;^lV~2#bVXt|8PTA&su3m$fE{ zXoK#eNKT|>0DW)VhbdggqTTEEAs|EeZ0!OynfKB9$rE}%MkavG{uHfOpQ(uxMo-`B z_t@@zI0m&Edo!kOXb9ToG~u(&l6h|yTNtST1G+4LvVrf(&k*SJcNz-3nb}#c(|S|2 zrr@m+VYM^2It1wH#rs?hA8j6+$FTbIYT8pREcdZt5nJZUwR12S0Q)gQ+;)e>}+sB(7qIR0w6pW^7FMzh>uXIc-E*@+5 z;lsc!oZ-RVl=zovsi`cWS9C(^uKI^BkU_ZA#L+}M$rq8l@R+x%>UNj zTOZ}B=D#q;TJC87@@S+9MgD`j(XG(AeI#`iPfD6BFrj;NB$GQNx_I7r`r4{+^& z>~&-9z2O|?BYilpi$x`qOqod|k)4argc$*du2^A{D<#D1bJJ<7KJD z0_pjgtYD}Bhk&`H*&b2mv*pwx1_vZa}5Wr7Q!xIJ4S$=-4ZmNmpIqTRZ zIP%_<0o)duiUQs6u=k&GeL#Y7%R|;%bP{l|F0%2$Tgb;irem{|n1-rpS%0DHib(LJX zvkyAM{0j@s50(JDM}EnC?*#gt88Vz8R%0-Uq>H0<85R{%XnZ()nUmtoP~a^dJ(R<=Q^4ZK8)izuB#FiT8h(oYAR>T*hOh@!=&z*b@^2 zDEo3xyLoxnsO&kb=>Dn=^OJylW<#(JR0ch?Xl&BQ!FL^ExY*3#WKGu>fJPk@MuC`^ z-+UOv$3+4`5uxV~(&oPxg9F*a11_KPt8PZ6&`S~(Wq!yT=fb*>mY&bMSvgIXz|}n5 zaT^Kp6ybM=yJCLu>JQ5#caybxfXZr6&cNWu%V$8s;utxH3;VyZEd(k{0sPtz1;mr^ zdIcJA#d%S9ctz9$4Vu8Dg3bABHj8?~T_l>=6@~-FK3Bw}0*zyxtMof!3@k86@Pu(- z+T|ONChB#dw1HuxyKMFmP%u1<=t+RYP((I0bei_Xuy9^A`;h$ps|^rlW+aPHbIW=I z{-7YVr+pJoVOZSfo8PsR*#3gv3 zKtFkSIPBn1J>LycuTbiU!g9j?RvanOm0UQ7iS>+sT)DthIo&%yE^Z9ye@7)yb^*KY zDP53>d7{Aeqgi-cFES&l_5ytPylv`QLZf(WdueOIMA<{SsDfIqueywSXrN6_xA!5qO}dl8_XQk}#);|TMX+OOH|Sxcfq|Fb!<(RV^X>us z>!({>7kiwdF`xkodd&Kw5J*R~blO?D{Q%F{?(FC&Qf4k8zO#E$6*2!EDO%Rp_2f}1 z(BO4hQh7LzY6%+04I+6FCk#y8R()yStR|L){m%i==6zimG*%l=`uJYhjq5kZ1L$~~ zOQMirnbc%3MbG>6v{F!TgoqPNAEoNstZr9M4~~2U3M(yUt;J|@HTcV#=(KsE`PBC?`fY$kUoa+wX`BCDiL%~K#8ZUs!C8nuMC{thm*Th)Nk(aGwuZHSCDU# z!o_VDzJtWfa1JuxnM939pim>OJ4tu)fS)ZkdSf}rT;w6kKp9t>UitBl6lkN>tJ zf;nvELQT#mgOa`7&NtZEnOlMs6i7&2DI9fm6q7c>pjlZhy2wYDnruNtAztkDX*wz8 z`_DR(C1QQDWtl-ns(!=yx}}G?q9T5&Q~oDUo^aIQcL6Z#c87_{RF$F;(G}=eBTdW` z!zhQA%%t&}?VX2v64};hNAuRZ<-;^{Uw29N8~0Wu`MkAGpBzTagE>y#3DA zS(357zF;yd5QDtKh`496wySH03!k+F4h;?(UL)8| zw6ad-+qsQXlAQ1Ev^CZ%{d3dL41g-g00R%TYVWq~8cmF$KHwle zr=|inH$?bd^%ce6cT<3BpVQODhpNhM32!{!3Jf*z&DO81M(XU>C-YH}?Q6i^{}^H1 z+ylbvb1;X+?Kw9d&Cnhlyg9W{{r2)<@(pu90+)Ub#+(LS(I}iPrqFQ<&1Um44m%)~ zhuJa##ty2cV?D1a2na_dXm>>F74`WESCB@;E515eU+%57VjGH4NJ%K9-5u&mc2K{S zlig@bhBa~yB()XMrYAc(>YqOG^z%{oMFHm-2+Rn_MF3KB+;p@1JTw<}G;@Ilod>;; z3V(sk_x zl-hi{m2otXDY>3)OwN8E=g}gzM7Dm{h=*Xy83qA>paMsThR$_0B|P zd#MCXg3$MeLmj&{Mjzs>@M!Z)wzcO@w!1=7ai%`#5npfAIC_-H*lJN9_f31w0$0Ct ztmfJJj_74SvV*&5r`>sb+9{=})t2~pfJ9JX<%?#w%%;^rIYGSfo-QRdU>BGE?=Hn*uUXmdyY5mn3rAz$nA?7+kJ^u%HcG&y2JD zcqMAk$z6K6yaF*-Z*%6hT_I{;{b%AD1$5|TjxSk-MMdK$kM0TO;Ybm6(7 zT=>LP>3C$)(U0c-5~`~+{^Yi8UclLJ`-^{7F0D!;dh+Fbd-MJV?=tLAaQ(+@vvifi zRxN|wmgT}FLt9N%`pCgzLZMMRP@o8rPn5)%DCZqFQ zK0RT)tZ%6AqzCs2CH@mefBp$*D4<({_Mb_(N0?P$(jy9Z~b_ z+M-IGwbACcFJiLrhjDmw7G(V4DrQ-%;DGz0yttis8?>dNe0lwylhx6~H>W-h>-~Ih zrf&`*<*fxGni8l7)S!dx2Ib7zM}Ko(a5>6+v=`9D*?+HBolK)he(&9>*(nhrk!m z?~cJ7`OdM|X-)+6F2NT&9$LxaXURps1)Xv^##lf4fy()hR7FFZr|~SDPY=wFfQYHR zo8hj@d9a3v;h3?j?kv%h94gRI*3d}2L<|ldTMD|p9R$-2E7LIWV#_8N`vT(yE*C?g z*nI;7fqo;6%q~}|JQ{fRQ6E%1s>8rohOE!VAkf#To~g7SKO?G=9-S$VnEXis2Kbb~ z#N9Hq-5a#{5NX2Bn_=TrlF?E>5!1Da5WOr};sz$O9;N6p3rAE zR`Mr~@rNDRMHiHRKDiqh>NZ)8xC}l9~mRt9XH9rT?H>g z%F4>XJ5&AH`0-#C5Xetuwniv^Y<(Agz!i^q^;G)F^=BP-*}CJJH+ENZ6i2Un?XmE= zw14|>dNqQcBTfi=ePF%6|NPRPc&NXR;%cb<<0Fj~;+G~e6IcF+K`~o!PV7^0{faa5 z*NT8uL5ulOPk!y+nk1XruPDp&h_7`UAlTu$J&Ws_On zLQB^g#Vo*8DP@`O#$`O356Kc;I3ys&k9c@eu zLE%7+4Y=;+RO^0+->^^k*3MehnWrfZ%ob`p?}5J$$uW5=mnxyWS@6YB3n+i=3ixP` z>U%Qzt)J;v3_p8nidZ`0)5$JdUd^+J;S=ry;o#UL`k!%l6l}5B^^;@1ckQz~`@|nl zb~7JM1-~_U`6B;XU{1~lw`q~F%fl`&FuP*t2xc>ng#c)gnMTC37ar}Na28xl#NJq3 zTyfoe|GwyK`beoDF&z5(^-BC?W=CQ0BN68_OKKW;{FVCu$JSTIRk>|nTL=n@ih{JX zv~Idpx&;KJMK&ef4T^+8r<5QmA|Tx$puncPOKCRU%{#Z}oO{px|6V@n2iVVE&suZM zIp!E+euB}J3@xp>e1-=*3sUOC8BW+M>Vn8DcaQ$gwmTKppgLA(S8KLLie$*Kt?1?{ z`+4&jVrX}dHo+Tvtiow`X{hI(!3OR_xqtx3C|%uK2;XWiU0nNAfLc=#Gm4_JH|_FX z$xrud5ZBWyesEdvGJb}xMEb|ayh9zPb(e730ye?zXZ3gD#=WEcXmF`1)!W4w*Z|?w z>_~LKp_bz)Imh#W(g#dy{6u7@*zF7+)9M&ujE0mukxM`lH=>j6vl%y zDe`-23fNyyVensVCL!l^9P+e1;XIF(f~?dt5zh0B-#^les)#8>xm+huX`tT3{t4jG zFVPQmQ z869**&MQki1V%w^C0J9e{i=U`@k;*Q+{o8E2gF_z^n7_1q=y>BQkQo)iDQ%P^5yp0 z6XK&5C(1aq?qFSh6>*=*h-s%c0Hg}@gIp({w5K2wFfowL7+<<3Ja#cSHL`KF(#XvX zPxkcA!>dAimuxhVcGjE%$j$()LZ?c!>#!}P92S{lg>+hqA1a3v2=U^D6U~kQwJUSv ze-#8|IPaaMRjBnk@aL@R#eH~dy$r-h4aci`UD+p9gr z{W_Zn%lHa%G3>)#t6^C+d99{~&M5Fgs7a?Q=rmPx-F?x)>JyFt=^s+PHZ)kdp=+He zGGO0wkgXY{U-scZJ4s>1Vl}jN-aP>_=Cu%nIIW21k(9Kwjh&rYS27k4LHM1S=15Y| z6|7G+u1?e_YF&u_aZ#Bjs|RWFt_g+k0gnx9x2l1@NY=-^fRccK`|e1bP|MCiPoC@! z*(}MxL1Jp4%7E)P(Y;>N{&x|PEXEjavEg3jj6L6YUgqdEh4)Qcl5s(jUE>f`I|)nb zsS162IL}>Wm`K4o(z|#3q`663;22BAC5{QRQl}X9w{D9J_ox^L#zN+Asa4t?x^*Mu z;1INBhH~65D9>hutS(w+W%Oj=q4H9+24myHtGM>28ZHfwK%gAXH#43c@!ByRucW}> z*m#uvVDuy@EJMs+bY(xb%~5H#9d%Tp^7J#2h0>b{{nc^G5X9wk%IVf~2is_n%`SjX z$olV7T@GJLFb#njO<$kpQPa`+lG2sZa)+>JB*4kESBTno-M6ie1 zQ&v9&aeC_-OZBA^kiJYLXW3n8Y>2v*&C1e39A|jK8pv)~YNx11hJz%k%Rmlo?H(r@ zP37TOO6_POPk)jYMhi&$lU*OC{t)L!bF@#S`8ZowH0OTSA?;z2I%GWdp$Rt%jlE7K ztB?9Wdk|?Rh26*P7&J6PUHlfQ{9guPT9VlM7D1&}iFu3T4B&x$K8G{1plAkmENYxY z(Cvn(TZNOAxE4qE z%r@^Q;C(chf-lc+pJIrpk479b80kv%9RDD8RC1p;d<#^Jz_!+cx^%zur(u0VW}z4x zyYY1LiXK8sgFPe3GYL7R5RY=`{w&FyLbI50YaNG6k-#=Nr^HV=BIo^Yk)SsF&$dbW zAbl`|-4<8;tYYK3R1De1{KMJQU2HXlHA_vw z5N$~9^*0sD&ZX4F9gCN*C_V1S{#URz=!trGK4I|S&-%#T7MSH3-UHdTzy%S5m~b%C_@|s+{w(Wtnh;`4rD{RKmTTc$%cj@7~k?h zD|)syzQ*JP;WhD{RDI?0<#WfPj^-l-4vS<0QMLzSoQyd5f%+Am<<^%Mh6@Z%m(B3! zy$IM-%{5C58+!Q;9rwfNaNb#^jpOs)6uWot-W?~UHa)U=uS-Mb$Q>E1 zTsV)vzSat_s+wD2IILEXlv}vKwO3qdITn%~{q`tLZdG@Af#iH}X+c%4ZagS;>8py1 zZ|yBZ#u%1m*mBYP0Q273b-_(6!Kuu3yV=|E-C#xZH+PNM4B{PcdFi*3-~jOiDIJro zgA?vhn$AbbT~oqheYM3FDs) zyOJumvs)v#$f1M!M6Hx~T z7gIm#G*POrE|{Q0yb0U1W}pA^85yAH9Jg2zuuL1bnqb>BqY5l8-kIH|I69!i zYiMna*NcJP*xFE43z!xfmkr@N7{kqn~rBNYXHhZhR|V&Y5sL``r&M7aZUFswS{viEpb%0aho?TK2Bd0N=| zR}5M-um@O;sQ=L+gQQw>$Q3nI=`K>?Y&aBc-#vNwks!17c6C(?ddI>@;7n`R{2ds3 zm&+e007Pv2QPAbawQKhb8UhGeHE)xUbo;xi8Z|n~AcP9&sr|Z`Pm&H8r2ztgEkSbSIXw!EXxfR^FMusWRvFhho=SuuX55qq$ zCU5ACU4s!n<)q-Hl= zNc9T7tMciFm+4h`%}%QKjt7^cib0m+N^Zui<#rPCbtN2}*x+ja9@jA>QQr1P8}RP$ zyFEJjjv?Dm-MqXUIi~{GJ`#Aj^W#O58ZBE^pO{V)gq(hS>ngN4j7nIt<<6dvaZ_ir zYx!B1XcWKyXvDbVgZfYI<3rlFMMZ(8_f=fuegIcC$1^&kBqxXBm1Ah`*R2{KSBJ#{ ztq;&|-2*aWxMF;QK*`#+hTUOsI2eBDRq;~VxbEiG@ORfgtWFd0y#13hzut^-`F96- z+?9AP=o*(!HS7cn&255g{ozE>Xi*6`KuqHUJR^6?pPijuq<_2w5-yWNaLZ`W;}taC zX*w-%qJPGf=0z6#*p$wVic#q{UH+{hCxe#ij={$2Tq=RrOBE~{$;q7`Kd1VJT|T$MYZ9odD-)Xx`2LNVZQ8kb6}wZ1SzC)rv_bC~grzS@D{MGva>pn{h?e+3 z8`jFXNBoOg_L3`7XV~9t+QoKm$K@e$(_k^*XF;zMZQgh?@I~S)6Or>vmm+5o9V~x#cDE!qmOqm9uJaET!>fK1Nr&%_ zl64EwZziVjNrlb_iH)aM@m<2Xs*v#Hi>3#<>^+gL=Ou+>H-EiSn}qU09bdkSK0Vjc zdiyf6$6Q%v*)(u9iY*cYPWcg;hq&rP<5cL%Sy%2hCv1s}g7PWL#w(RiD@Te<582K$ zbo>r(HN76$x=K;yl~_8)(<-o#HZ^Ww$c)r3dW}3dSUwVW+|!v|lw^Opa>Fsf{1mn4 zk6TRH+a6#2(#QOsS7`jsTXSWJx)Y~M!Ro9^$#b9AM-6MvCuZ0Yp^Q3+RDeD68@xj8 z880iRs|N?1nyag*j8xhP3^c&9mJ;{8WEOse}Zbtu}`qyiW zcBdajhR%=fd`9YSL{(TA8z0X<40#q>GcMLmtaTwTA5mkw&s)_d6Vv%#(vzjbS-Vts z$Z@rw71!4x=5(USbV@KEUo}4*;ZtnEQ9Zv_U{kMMjE4kkfODG=jNZHfiLDb@^i*pU z7>NiG`c+1ea+=&TYW)DiQ|V}Fr9OUD{|MeWG3+)rZqaOGGxJT2E-TtAF5OucS`XQ& zer=gLety)No2_M+8~Y|ja>)CfM60sjv$1^E-J?(Y1|YSJ41YP%ZkEws=lJnja=RJ zy1jk9tMr1CL=)8?<>nlh2dfKOUvH+#dHKKc4v&i+sUmrSrm z<+m1kQ9%keb-tr?fh!DNANpyR>$wgp=|)XmA$i8H~*%BTW4wYr6VPbB|Yvj|b* z^7B1Wu1ZFFPd-ZTw7RyRV7&4jc=>Sgyw0z#9tQ|hxVlEhkxL`0aBqJ}KYF;L@vT6F zU#gw4K#uI}nA_1{VkDCxxs1x!r}vqe?Yt(cQWKgJjM`X+#3@9|JFAbq^oAj+f8>py zZ+J%w7~=>=Mn+v-T?GXN*b3RIS%e-o(hA#)edJ_h{UCT>=uQJOzF%+l0h&ykTAulO zx~z^$(GzQKZ|Qto|4u_v;x;Ks*l~UTO=P176O7>=G-6Kc%3#_HC*^$Ymkw%U@OlMnWjpK=;nD<74-Iqk9k!v zSUFu+gp5Y^Q#6$B0jXY-*`;pgtDk=nb2x`i=-{sirm@-?8IjvGi){FQGxg{%ynfX4 zK9xK^9KmX#B~*5*{I~|$Wjl=atds4;0z=o6UG?6g+fEakZq#+b$6K1Ri6TF%+?eB~ z=ol_UV;}6${d%#*xN&&8-y>^oQ!MCu?dD`j@X<+4q&0U&7P*JJ7uUAShe>;?v&aHH zLEK}3WBw632BejbKSdS9fFZo%XgVQQZvGA0pX!f^n-x|a4`2wRh zFoVTh7{yCo8O^gdH)0H*Kkv>^Oay~67Z;bAWohX$Fxc%r%UFM-(o1Ac2-|+8mGI9i zznNQ+IpY4vY0vji(7n-WSJuUge|_IKT5nK%Z%<@@{su)&ZT)F$0?{e!;oj@rDs}Vw zjb(>c#qn2cb}kFwI(@S|Gj%xcRq;Bxzbs+ombGuqWnpgv)h6!MOF<`_5xH~O8RqwK zGet-|j9*tIYK?jnM=L5E`f!FVBhi;0vI{3x9|s62_|s$@E3TF9BMWwY9T++&gG!}$ zI4ZtMz5MUHb!62HtIZ-&b93G$ZmAs7$Y9dOtCU02h!&(HiR7?*hK1|d%*@Ot6y2#e zC*vcC1+70k6SyQ$vXWLMP`q^A9(w39w0Z?_%{u1hpJ5DVa5>mAm6B=#V@O!Tp(-~g z4i1i$QVtY2LA)3)To}wVD6yKLprAPURge9v^Bq`%!uo}?zHc;^SCh)0K)q$Xdk=V& zH_y=~KDe*}O{#U^Ta@2)WfW!qywy`mDN@h+yyva~z50ejl)%Z1k@k&*n+7lI@3>ry zW-$z+;Fose$r+)Z2X#fncamx5!K&tSsR4!3^72*g`x~{du>s2*%+(W-m6gr*Qrzf} z2M1R;(Ir^8gr#L=W8jwxsY2bYH$Z`vfwow&d$nU*vA$@z7TCL3y13+Vfxb+x_6%bR zr29YM^e-sSr{NvvK7z(^zTviA@<;LG)HwVyaW(dia+Z~YzBm)yve2UFAlGIN-c-Cv z8e21KzNLA-)r7L?qamFk-Du5MZ`^m!0a_pbNH}SAv}|f>s=xu=ztEEb(0GC8@qzL_ z(Z)IwHeQ-+v}sTJ@@N?cE$uYS1+VLSgZS$y6$IBdQy%&J$X7@x{BXNC{S3&MZi8+e zE;?t3{5qGbB0i~SaA%q}JI;tbTGDx;r8!)j%sPFsOe@XyR;}2*?yFZB&+ok%BB^wX za~Mh=om>0FwZwZ_thIt@?Nx-!`fqW2`@)Kf!*uz$bj8G#$w?r35S%~%dbG8!MR9*) zCXUBS`1y}FptuU_x^d%%u-6HTc1#$&xCJN)zM}$3%Mv(TnywWm{ePY;&%#pj;y~x! zHb?0s(Fz5onA67Fly(9qFQ}_d+I{lEFBtyybfsiZHEOD3AeY6N^M&*8nj*0+C!aMJ zrdbqFCmNhTb@$r+Ro(9ycco%rhmBXdaNM~w+4)IynshvZQ3Vh)V^Av<7|8=Jd(h6V zy0x|SH9H&oDp{%X`fnP^ATbGv6HIkq#AFBRV<0P{0+t7YM?`eBO7gMxufR*cXN7(h zOSp~ZDKeaE#=?RY+&_y8r{ebU`Hp!0j-A;bITnZW%31OKIU0E%$FFWw(cTSrw2n!W z+uyh)<(4o!yQJ&_{f{$AlNbsXKSeUws4bS5 zl+6Kg=sG=GOS~R=$9+4RO`~ey%}n zl(ax;t8zM|bCIo9bIAHZm~#!>t_QU=3XGf3R#eZ`9V@eApr-x}PLTfil#cLYZy(QT z+6`0A1;7*rwg&uwO#7gk)!5$No`r>l-(isxh7~a|Fo5~>tx1|Uf)xO>q%JTXpC|N8qTx%GG7^^susc2FY_tOPoy5~gB>m?jpaBe+w z*ZDGNGGOb1)fiL;;5z#1)vJ}26)-p*`KDhFNFjsbM?r9t)$=;;8y_FQ6U~n&PERB~b0#XBR$-s(>+1^( z3o|Ih55oT&0!gF6kRAM0?;AAWk@0>5KRi~g;==rV1Q%EK)4b=9%3xW2a-p!WSO%X` zZos*~kq^#8Jcn^R{4tocK95Gra&N;r)rnjT1xMbr^UCzmTb2VsD|8!y_m|f6i|-@? zqY~q7{a38UVia|@Jx%KgQ}y#BW`E-O;x@73Z2?BMMf!>+5R+-98x?+qlWzVB?={1O zr(7m#}#Yp4MNm)aT7hNy+&N7goLe-j`%dPSXzpCVD6Bpy2Il~LN z=S1Z@=cD<$gps-2pQZjfw%fa7Zo>v>r2z-GU0&kbPiR*K0WFFX_Ben!+J0BbhhZs# z$OVRnhPccJzrh4~@EY$M7>Krbq|K+2EcSL|rX?;Wh6|>+$wsrm)cY7gmyIwwS#GOw z_vxnVl$43!w;T}>0cy)%{>;?iQ3eqrMtg&avru!U*Fge#VF_s>cN)OazkP>d+J zL~I=?KmL(vkWnw1JgB>mg7D4nCk%1v%w10ReX`RNovp0iwZ>pJ+OhR2n`1fqX_PFL z|FQc{{LY{&xt+&7N^+!XD5RTJrV(wJkNft=^h_CtaPQwLsn#A0k{Af ztWA4A>%%_av#^ji%|*xOQjvm>voR70qVv}n^W^z1gZa)+;q>yFrPjY-N9?YQxou3N z0DFTqfy=VSZC7D0^zBU$T4EMu)NXz9;YwiFCj-giJ0kzHFSr=b(}s{T)(_n01VZXG zC1mpJ3kFN4C+}SMsgH)j-v?7%7%TG-7GANJ>qV_~ra;~%Jm;9WK052@KJ`n~Y-h!0 z7`|7-6+1A$thPr{;?4#q@EP=`iueT(k6FRjJAdE=T@i~61;?Eo?zUJBO^ukixVYP- z>ekBk67?$Md_aH=6Nj>{|x0A9H6 zc2+06Hm0TZ^d{EU)<}6Q1z|!uw5`EsmB{llHHI<>=?l#*S$eri$AOPvM(!>6HwE|4 zIfqcgODjvP*4PPT>PO#8*e^la>#?WTs%J%=c_QU|<9b15I79 zudd(?viaYu6}$1f?HnIRnnu#mp$=@=YAZuk=7+^U(2zlf}h<){1t@UikF9rRQ<2A!hO-5EG|kKcc7*QqcLJ*WBTrK=e#g^n)&=E zXnhu6m`19pFh|bWwJ`~T$M5iP?6lQarTWggx>ThU*KAELde+n|%dv6-dbtlUO*bWF zYPiU(#A3v8^7~mHo)Wvcc3&SKF>&#+N*7E;ovT|VBP&bb8Ajy`BQPL=3V3ovPCC*( z9J`X!O+s=hL$QF%>0xNZYNva;SnJb{Vc=|VrPj;uI_TVL&0hZ1X*Y=pWtXt&pAQV(Z1pEGq6|Ehmxjd z5Eaa`k#eluq`y&d{3t}$Ith1wn_{wr-HNK_1}y&G$BvM5bCpgXw}*du7*z}`EE@}! zgieHfT+tF#uy7PvaBpslsU#Z>vJ$j0uU|Lt3e)=_sWVVUq2V;WHeB!DPsU$*kkrBy zeV!SANl)6-Yh|RueU&0(tRiYlrN9^$-$X) zwPu@~D}NV2$fn$hPol1P6ulmkFLNM-I?j(|S|tWmxs|LA6|nBRj8?g!b9A&ER?G+T zQY_%y!71ramsfFfivx4vks*x|ldf;3Jr}vTw^8gb0D;&zUuvnNVrNIdQ9zQO-iHdd z0D}maKcJqe{0ZQCs6yd(qN%BOum>WoEV%VUx}gmA(`)4UpDG!3D@*yeCmGv&qi9_9$<9p~SJR6}EU& zPo!WVswYlx6UFRZ>71)AOyShVu@2T}sIwi3x(8@^q{2Bmh~5~-2TX#7nEbWWx?K47 zJa#^oOOq!_(CKOKUsIaNIt3@7!yl`FQ=&dNVgj26fHm9xT{`s;_=!8R~4qYVH~V5(aK!z z_Pv)QRv+Vd3b4*#476#C0Rw?a29uMia*w~C6|E0&qP`GH1!xqzMt*8}c`SS}9i3%> zE;HNjD1KS&D+rKC;O9d3bJ5i_0Q7A#TI$)8eg!~;!7r*XJSDam-i``Gh2_|i*Kv{j z>i9DGfg=fT5;~-I%pV`k_X4>JqWwThp2^_1uQTK*_ze!4Fae&&skIJkDvE92VCJ3a z6BqNLO6x|)MhIKlSCLPH2^zVbm9S6IJhqq$!^$+ej}fP)mrxt|6{3NCqWE6YB(4R7-JNDwV!0#aT8POHAW9!>ElfhZKJ`q1$j$Hkco%cP zDf{iF-;CD;E^@%G!@LO65wE(vS6*^tf-c=5m29DjM+ z08aVz_g-UuT6mOzyhffolS|)M4IzWEVGZ;^8$JF|df+Cv0Q+FR3zbVRAl{=zb_**y zvb&Ja)L8kpyo&U{n&T*xzp|GnG8SZIFS4+-Q2i6VwNs!g;NHHxvpYsQUvv5>q~#~B ze;r$y?$s84>}Ab>!|lNyo}9!ofXLwJrV3P}q)+fyvC{?dfR!{2KpMNAPmQO`elAbw z=^^tX^iC3m8%`2Ds~%G{p2a{b7!yKF`oh7Zkl6oNJ)@$%+GeT-M#sS`Kl`s&{%j7W zhf8cIaeSC3I&%kR9%1Z_G1m!&BOkx5o7E7d*EcK}gaNMaFtSJr<_3MQl_7qIAH?7xV4vissH(b+(afAZlXT}_m(4V-;a$E=ki?yNDfpK)8m0{Ab0H^GtCG1@ zejatjHah$4nVYvm5Vr71<={ng6{YM~_vQco%KjxGdnwR8uha`KkHA5Gc@7@+p9c#i z1=Gu{|MM#`g0-gIw3a+8+La$z!P^+`zaJ4aDocD>l>^czbD}e6reP?--(M!20h8Jc zH~6hcTyfZTR#Ys$c;@k$2hCt1OuT@}`%)tj5$uUmLw^dj|Ew8(uGA$w7WH(zv2cJd zL?k2&xDb=oraehHI@pd2=N|krs(X2sT~u@^`eot2{umT}AUXW+($nMfRT<)za5V0& zb`Ul_fGeZ){`FLAcMPX7Y8L3@NBKJ?x|AI{yX~cqy4CIws$sN?SonXe zUoh|tN{jvqMeDNSF3980nRfMbQHl0_w&+ULF$R;1wBFv0MOK`DRxieUsM>{Rj1_yg zEWgSZ23Guu3>e?x>l}K&e-i>*(5?5bu!{fHG<;&|EQ!DiRX+2eFZ}dCA@%1E&d&+! z<5J3M>bsw6M6_sb8N&FAh`u;cT@NgnfKi+D1%{{bV&JMfhWUmq4dnJEOA{V%mRZ4Q z2pm@_{#y8^7+&i;9xrWU5d8dB?0QCxXK$3c%*vTwxl(?9nS}dDfIBK1CG@WeHwyIn~zHnsivCC|EGGDfB33uB2@!^MWUwxCV_*XPdK z*uDr^P20}mX^SbF?9J;zl~9Tt$3$GJxK^gWJgd<&*ZzUuZPU+UoTA#>yW$f5sG{?A zsFIrQT=C3Hb-F}!RDa>8bP1>+?YnJi^Q8$g*zB}jtP|1O^IA(XxL}j~pADgO+1BnG z-qmmvK~u1tWwfPMX$UJuN%UV#hnmfh4NLs?z-N?4duT)#^OS2RD+6PJ@!?tA&GIAH z*oJ=YSHeVP`G!HvzPC;#^lHe|2`TN|nv#BzENZBr?YcVe3K`FON4T*MUTIhV_pa6m zasTtt%+)`B7Fl%rtf)G074bSb6-xkK`rijgW-+W3a(8gBUr5YNPgfgA)?NSo6Se&O z=R2-EQwd-*HMETRb3;|c4P<@El8@xW4queZE?dx!c&$2K&Zd<$HHo0_WJ9o2$(op3 z)L$8FZvRO5Ke!2rLwK3rhlTsLq9^hN+~a z4a1ghax?ws8&pj1vO~eX{68-&?XE1bS(e0>)v3V|a#7{Cfn`Tx!J8gxCJ~vKa7iHt zdwV0f1acyWa5H!7In$bIjZ^vQ(q}QxZiRu_)AOFM8a;%KR})=rPw58cr^ug{7pBY1 z-WR}M6*h=&7Wwyry=_`}ModN@KId#T(y%;!|LRrqR~P=h@x=jy`6wuRy4Fr!ak;#h zZxwRUv?}kOXo^vn=Bo}IY;P65_oD}@z1dL8^dnt32A-6P0~8cDe(uZyj(6sz_g{uO zDPq(1f8Gp!`c?AgW&M&Xq=Vk^|AP@|Fml+hz4vnfWGcgMDJm>vVP$O$B;DOwP|;^phDfNyMuBx9f=MGx7|F>i zM}JznyqujG>Ja#bP)#i{SI58%8V7%j1^%34SlAD-z0}NHUvytDyD}*?3sxSQcU+A= zd-e5_R#D!)T(f7-q>?h&QS^q?Y(M1^5m{Q%6x@Naz4s9GDUqyMiUR=LpSimQf^Q!` zKa$(GJ9)Go!ilY{tn|UXgM~rs4D2VWN13HW<*)dorkSy$*ynb3>hcVR&cUESj5PGG zHeYBYVyj$sn`N;;N6{_{fxSbPK1)3!#S9a^6c&0v6YPJb7%j37^#^Jzo>IGKh1naHL}f#%{Vo?sKjK)RKXOP#iCP5J;=^UwX<^k zgWTcs4(cQhh8C2n>KzI6yZWl@+Bp0d-V{T`W5J@OK{IL4*i4=J(X}P5kmWH?P9;2=i^JhM>{|E4vX8C zO^BfNXYwK{{Sf=1*lj>!mcj|89+)c|?bmRagpt+>n})^4@;fdKcBlESs>jC0GOK-k zmizq}x^|2z=@CckH@K(keFffsfesE+O!zgsFyBNB!PZ^}Vn;e_YQR<7-#^~&u1@SO z_PrO$FE2j`rIGX`o2mhmeJMr7U_CvcazpR0%JW$BX{-n%zcT$!FXM@)#E)x260io`Gbdn%sAiV!YW<-OrrwW~0m{n8% zy{Quh4*ggiaR_lt5Sdb}z`K(5QP^PxbsQZWuF~Op(8rx6I-+{V(DtQF%uvsq??t_{ z4+OW+arYB3?bkOlnCW-LFqIGKCKGWtG(MhXDTu^QvaqlKtiY5; zK)=oB5`l?2#1!Cb@mr4aLjT!zpuIha5QYN!6OswL?=^~Z*v@xS4OhVQ(BX}fW4Gb_ z7kyvg&v%W$076D}wkeU@CJG+~_owa=6AQU=F4HAirhYUpuJ-KbItx9&yI^_Bzi@koZ&5J&O#Yvu@6*+g-NfIvI`T`*E(@N0YeU{}|ab$K|d->_Nz zv(kt+>v0`Cg>{!OlfqA*SRy7 zky}<%nE7*(g37bak<8rO+*=;bL-~evmW(|h>vMB+^YS_cJ%n8#w6C5VpwZ~?4!s=A zCbX{#XHc9aIYkK?E_{D=erpG!o(X0-_qaG=0;MWSFn<=Ng%$dSBeqh$c%7a=&oNa8 zT0?0%YHDhyS3+X|;ONc)6w6NY-O;7SAo48bG@0V8_F!1GDUUAJ``b*9S>CI6s48(Y zit?7wn9+-cO&o0L<5oXvo_?v*X9PI6kwG z?A@@PL@My?1bpf+rDc=fxwKZ`dBFWJ4up8Hzk=26+10nR+dMwp#g$F#nCnRNm5+%z zStYv$%|Y#{GCPfnBDAzV>dbf!5_B>V*R*pEp##75`y~(tW=Z%MMG>~sO%T-9qnfNG z3en?LZrW3DytBW4m7>|d;C{4sKUa_@SFa{0Fz^=VtO{nh)njEw(GT~ZU+|y~CDSSi zVuezSF!~$XJ|j@~Tl_Msx13&0!b-^4t5EaYy?@9!*P{^;P|qa@EITpY^`7`(kM6?I za_&>HEvEa&m7JF0!f7mPARSl%7X1}0nHX4*9>oW2FxYo#tYR0wL5=%9_1E^4($dmShyBAtyFeu+r9zup zZ>&m9{bH+*gL1(9jXVX0o(-=oETFh-mD3@fAEh{$05x06ja|30iA&gd?IrhBz|_>U zUSh0clPZ;GRpbJSe6c*(7G9W5Y_4AiT0gRdzl4RV+ImjExC95`(nnt|TDi9PU87 zz_Vx17TOHt=u~QAL4|ks!VDOB{wOdmI!Vx|)OdXn^0=wpNATR<_>*B1tca?F3-$< z-S>B*4Js+&$yGaxnZEZhGY#)oXLID%f*8VT%fvH!Ftt^pRn${ix{cB`A3!B$+Zx+c0B#&tC=Z?a^+ z#iXTnjkI02Ry4gWrNV5>duE#LRaUhB_ql~bHr==ihtoBU_k^pCLiT)AyxUfuXZH!B zC~Z4Rk9MZ;mwsPJ^j~c{(Qx?@UM0M%h&^{lz(3%DU$T?pPm$P~Hs7VQ`MUXMy`RJW zpYB-YFuE~D)zfd8CgAfu@WCb5^_ZpB1$6P17=EYZ9#^G>e@HIXX>eEL76T&% zRPWhHM9n)xcZ89XO|mvUD=S9FF?w^u{DQI{EhA$lymhF^=Uz17U=o;tcpHk zh+=DcIu@1yJ;v7&F+A+(U1?`$-hb9}C=E|vYIkLeSbW@lkDXFUns|dI(Q>*xd~S?@ zf@gIwaC`;`oQ1gTHQ#U#(kh9xt7CmCje}xKB5n~NuXSrxsU;*4B?{mlShbVCCgGB{ zN_!!pjVl6W<*N?iysUPiI0fveT=P4(`%4wZM575xW6QL5x*YRomT-KiR5(7qqZK%+ zOCyi3TShikHMWcFwdBYuo=^iaL%78IBX^*mxt@K}crlh$JElPWTWCyKQc}A3F^7{b z4mZZa_;1AFA|z0M;G=vh2-q7VRyAFU5-}xj-Li7UBizumbkdNXJ42gLlD4PNeTo`*Wbs* z=g}Q-wRm$cTQ@;C6MPode*YF9W@BQsJE_&P$7NvAQqNJo6)7kAc%-}B{3i>QLbppN zPOY-#0OgZhRsWsX{*oo`B^Y&ZXlI$G;h*Du;5n5MwzHUCv~VR-&O`JhTf6Gwf`!R| zM3wzQSd2<)7Si%enQT(Cgi!-uPCA>eR<7J;(3oowiIw3%EJdr-MJHhbv6jk}^e<6& zACd&NwhleSL2^j35AnGVMYPm1jw(=8S^ENCdy9LBwA^re!6Gc}FxYO};VVh_Lm8y* zoKI>*IsX_HNo;IZuuE5LN|zHSB3D;gQ?|n3^wTkeLdj~Eq>8{)W>IZ@VvVCXAgUO` z4F(4N&y9nV77!jzaE!dEoy5Nj5*Q}(ht@J(B7Cik^|Nr36c-m4!i=uy2`*r13{#Mk zA5WtQ4mb?DlEsf=y^vvu(PBOUfz21Uz~nFK8Yn`k#3%&R5S!Q{(j{uyCMmpWc=B<& z$AXr^pL9q^kAe7Ua+vdy`6z98hdAjG{`#P2h}o9cMySALBS@i-To<2zcI@ix*34gh z+WEd?1xWU{L|u~ zrKiQ!lYif1n1%{e{SPSa7$QY|l+z&r*$NM3XGwimP{2V>b@#4RXQV8n=nV=A1!gb= zX|xIs4$fFs$xxVKOSYeDC--a17BJDd4IdWit{w4Q=JQ&SlcIx!rk>2a%ir}l1==OeIQgmW0V<} zAzsudw6`BJwMqZleDL_GXH!Bk?~0IwmY-Z4!(9qqM)a>KMQfVANh*;QWIh8{hSPC3 zi`nx-*bjTlVQF#}EdhK3ELw%nE5sn_{X-6eni(7F(GBP%kzNi(fbsxxDxyvR8u{u*m~|dEaf^bY`dO`ay+3||`UgTJc&~32IKW8wqa8?6KnEV>xSrOE9>}fn zIBZ~UZGtdnI`fu*;eyT;Qf}I1Al`u->OM>zvIFNS$8wkie zH~-E%?s6*6%4%z85VFpzWIaKJ-M6&-9f9a)9m%(A+)Jy>z-}=?NB$!5BIn~qN7}C5 z5_q;c!SJQi)c#{S#%Pil;lNgbLx-Q%x-il)|Jb8J?zvl4xb9`{hShcdZcXCw3R03d zoOcU+8eA{5ngpG3yckMeG_Fj)Z8=S?I-2r(O%93DpBQBu;tP9!BJ_VxAk z0EHvOHKaH+ky4fcEF;Fo#>U0PRaYMX{toEyB9L(HHe;j=VPSQu-R9$!F{J)Jqkk_Q zOhd=B80hRAtoCrTwM7v7A6uanEocAV%iO`fFaOo8G8IC0uqiChB>?I+Vq)Fk%Q*y7yAY<4R4$( z^jNQq@_DKk6Z6~O<%xau>S39e7rA^quM*JpWTY|+pEdcAidXA2z_yZ*{bcs+%VuiB z`K|l3{xp`|Q8KCpM(j~!C&-dh>6rKo0hB0Xz0PB4+r7CHbL$RQ$`uhC{(&ZoV&E|Y zjkhvuyFr=%Z-jl6+A0#WnGrz3^#veY zjR`gm46zINZ-;w(I9VJ$*;);Z$8bQe#O6W|1Cu3igXotI=Mpbb(=7{rN>_(kp1%e`A|~H*@n56(~?NXe4cOpWm7ZMU43er(;3Rl)}38 zIL{KxhqU zV_XrJCT2sSP{0#BIXQv)#yiDeuo94lLE8Ip^+T47clk2Z(zgcopp-N(x~k!(tsS43 zs3#^SR*YS>;8Y6-H8UtJkRQ1be14Trvym3dd#o!X~B_=lSC!f-HWuuk;WBboP zD|Cj&O?hhKnYoVCd%n+uw@s%Z}lLw_2%Co1dOB(1&JB(m54)=w-vY++tC|`nCa2=_z z-StC(;>pk@TpTXe#VjPnt-2!4LNig#UfF1)_jbuI(^{hnsO&%61ZBaK4HA&aM=u^9 zirJQcI)^DHdY!Pw`l?lA;70chX`~Rvq?M;UxoPjp9j-H=>rFy@FY*J$ zDm`o>-~;7DRwG*a_SNgxumAIyYK)@S=IHXISxC5qoKCOQl*s3L7Mj5u)Sgl}#Thmk z%eR2+V(9}57KTi)EJpzvJ^56O-ES(Fodx*=-|>Q9vUcoE|@kuGQcm2-h2c1bjXE#p=B` zj(r8D)dKerlcppVB6f2>e~;zJk2n$Yx)9b>Po}kgYPqzf{ZaYFi*Pl|tVIO`JM3qhx`bts-Nvz72E+j7$Wl^i|hznmB$UA9Y2dq?oMGTsOsX&83u z%FF3pC4-^y#IW4g%v=bm80am-P;(oeF8^gFe#`>+`1k;k5jd+yzzwgfuDbzctG+*e zh1Kwn;yD^EduI2P$8C)025h8HOp*a-j-G8 zV(|0;QDp=b;1D|UqOV726A*?is$U*i;I@I%a7lSPZrB!U%URAc#DaR_y)j3k)zo2h-xU>BuaeNe1^krsb` zX2rm8sB`#BXo)}j$f3%HgLRq&6QihuTw82k6Sb21=%9BpX1I@pOImAJkr~QJt3NYb z7B~i&Yw+^LYWB72-7h^a^|x+c3sYWPgp4L^;&e%wDrUgo;~&jbpXq%fQG@VT;YPOD z!H)sgOA1@Cgt8!CR4HCtq5C>_>0w?~{x6cq!Bh)|EAsLugkcq__eVCvaNawT1GD^T zLx0rWyn6O{))0MbjE2kM2S&ry_{-?3ZvN_rB=nP`6p5YXvRV|)@1!{=iN67j2MZ8>8cb142Z$}(U41vhI~~~5;g~mD7J+cbupd*vRt0IyAK@tGHDaf>Dxx;e z?oXp9W!e~yUc}}4du$!y2t{C zcCvz*@2<9S+ELe8&P%*$M?;aVzXNm;+^!(?qUl$F55cHs)?Npx+rioDGLOf zmxtYku}YT0@2PRamdOMRd%=w_I8oqx|1|4gBdVGh{+qf*T%`*`tT0r*h>7irzD)!j zE}opRPgt(yuHL$A%+cAu8iopvU;4V}==;8tF))jJkof+HOXhY+Tj=@5%@<}vI2Rdy zNYEEr98YL8a{_=mT;`DbB#ufmKN1n>5 zEZb&LEO;9xa7(Q%h&a-mHsi?4dwsKbu*W7P2~&-MR78>*9qOb$NyNzH;imZH|+j zI0GZfZl5*O5x8pN&cnk4AXam5VqdN*C13$vAIGL-ub-yZ2b7&RM)W9s03_7QR5^_e zm*#h_79rUDYB*mr5EEuPb1nQ_5}+2m9Q~nZ#|A4B>T(4$_pL{8U3{>*XyY$owAuaY zAmAOMnex6iSGwC88Y1HQK3^IBG-&604g6LHCyqe@4WP4(HdDitR<9>O%!+Ti5-B8H{wFX@?2x3O}X(azIKCwoWk z8?*ykX-%<+b1md2|4?Jo;Z^W$bPNWDhA6K-EqzRFeuRW-D?T+*OiWB5<8?@2IbKMr zbgAg_`+ZIr{$$f5c%r$X>ZH}hDrFNJo05A+V|Q5>WEJ`aJTkYT&na< zQ#TgaxdsEua{3yKG3&lVUU_GC79vfE%DXp2RWp_KZ$&3EloI31-wM68+{EnG53+3g zuIl0i`JKcu&nfG0xReFYtyfF5=VQE?=leMzixJd5eF1Xmvlss;O!#uFN3dOTc7wv~ zcckdi&uFYki|HPwQ+<)p9t$o!`_K2B&=8;@AGitBhLdJPd(;LZ^XUtVR@Vy|Ztu>? z7diBWhRv{mXho`WGid1bwz>adE-Xl0M#m+;Tcu&)Deg53gX+lXV#i0Uvs048 zHTSSn28N-uabrE+AGg3!8if>U`Z0|lH{Mj8pfZ*vCY?Zj?!{QB%*Fa3#@*oK;60{# z@{EyPxSmnm>@bDLMz`XeV0t`m?@qb&c#}4-njPMc`PkXpDwbbpnx;m!nK3iNV?LIV zmJOn~t$J>A4D9l+x2w$8s8eMw|6fHg8K5keJ^at zx}K!xM&=9P3SeQ2RI~b?dW2Zdf{o&952tGqvT{7yU>0cB&b&`-(+g6mehQTlAJdmOX1UVtXi$0UmzrJ*pfV5`+Y)SQ@&%&vIa=qHAUA(V3+T~7w}YMA zWOhBSwzjT8A>d5TW1->}jntHk63?-=1A2PyFX6FjQmXn*DummW_3+wk{dv-^dC6q9 zgN3oP*p$NE*qFFQdbWJYFjol*SBIxQ+!o!mtRq$xA-3jpcnH#8i%)&mazs#IA&K)y zN4#iS;qv8E>_Qi!PaArQeAs7%F}R_5a>%2^!mkzOC%c+U(0|-3bmq*dH$;s~3N54W zdd$V4)EinryeGf+PA*;OX8U1g?*PxNSbRG|tJEVty3Hb;VXv`lqy>tzwwTYXtTnW2 z3N`&!VXMXiPe+-sB`5SX=(cW*Q{LF7vXuZ{)NZ*H(XGB>TipuVTrS_pgUdR~hM4YL z*Pemu4lE_XaoWu(E@am@4d2&rEwP)NpQ6Hg2)3hlw7+jjAeD%OU}hNYj*;Dl_qa#~ zMOyjYu`bsAtelT>L=PY=5EIsc(SQiISS6!Yik;!tI)tvS5*nk9@K$($eiJHj5i978 zu(e!23q_meoypHBr?;)j(=WG1ryE-X1#;VOC53pa)$+aPk&#k!TkO4{_vZxNv0xl! z(#V?#=2ZXIR{(|Od>EbxNOOTfcaXrSCB>|G3twoCD6kkNoHMfjmG{DF;elz*oBMqg z0BId=n>JJPd90`H%Q6ma{6|%WP`_b`6o+Ryn&p%cMm{XZ{QH7rEe)0@oGqN#OM|ZA zSGx4GR2tV?Ey8(f!jGLuqLMvRhHLhtMn+Cf{}TC=tm=QmI)#Q>!o^=(QXMg)a;JNtZuc|Rmvr}g5rLPp{%QPgbB|r@(P>=-bFwG z2WvT>i?Hm3ikj<>V6Wdf{uq2Hk()cAt@t7&g)d#S31IdS^WR{)K2t0K%)o{@@{ zyk;kdUKqa#$5zrG^pcw5)>F>OM_<%;iQ+aMn4o$ElQ=gct_= z%rG14p|?@im08L>o+Ec9{U%%O7aJ^RST;63osZ57nFFQV)~e(HB~%d3GULFWqI{ey zg3r!vcbTseHwHJv0bS@fOVghROQp@;%)=sS*>+Au{{E^#2>==$O-hL%EQ>%BNB4rI=fTqFl7ol5og+Nf4X0a;kl%U}<2g1E!rC<5Tp zE`FXtF9p(YD*_LL)lH@+ZuCRXi+eg#z1uz|1HvztB;152$Le99J}R4bbYpJ7S(5-g zOIsPCD<51ivDWL{r_H|m8%Ae-ZFVR@_3MLnzp&}CzMRT&=wT^?f+4O(XZ*sjROG4i zXu@>SpPi8nrVnL$tM7TH2S*;_`s=2nPfrq=q_PwlN$`}b)L=r%DT&}w-tfP?^ci=C z-Bg*Q_-h`=I8g5Mrk*=tP!;QO@H}L5w>j!|OYp8^QinFnHiok;%6J)!ZLS_=-6+%0u2Q_{!appLc33sW zf{r(S=tX0FKl)BxyARRted9IcKe^Zu6zNbZLcro_x}MTUD0nFB*k|`V?4b3+JMM+t znVB*bXCdb=M@=q$j?o#i^klTt67ztn-=ehZ2M0&4-kT$Y z1Yj#XZ;Xg1sq~N(9U#(JG`oJ|c>Ln%PAl|xGa65edp8JiMOG5hd-d@gy%0 zeZTMT?;jZ01XQMu#iZI}la`UO2GUV9+`6H!CJD^&=I$z^iR)C*oiG8Tl7H<>=g~~+ z2n?2$-Ued&35y9pb%0sN4fgb(!-4Dcm>aAr=mpHld(WkU!SCqkGDrf4Zlb}_0+3tl z$u5IO*X$7|*5_9md-E?7ubcv$q}U33>A~Z`>gwwJe7>|Ggg*lHcz{L!%kYF*<0Hf> z;RwT)O0V5SPv7v*HX9oo%fbnKBS6P0VrOG(92|^yF)%O)Mhln^*WLvK)?^+?q;5Lk zyu_Sv<#Gjg@9ry;yg6@bWMpJZ5dtJ5(kWIJmX-*7slwTS^78WZbT%GdlggW|Q3f|0 zIf8%00kB1x(~>sds*-91u5>sAn%f`jwZ@tral!6v zv~agXEp--OMmjpWa$YYluRMpr(jW^4WqbR1uk>AD8wx?qUBx15aNJRVtD>L~+jp%D z=ygc^K?d3nIH@_bab0y=(e>Vt<({WnBcEr0KgU(R2AnV-ReppM+6Z0-LiFfYMH0H_xdJY%Y8F`t5ffP;3OFhj9=GGP^n0Vf5Z;=9gF6C5qRU6%(7^0IU8 zsu7%8Qsept1|Dnkca2W%u{)gDW39oU6}Zc0`}2I@1{d}+P9Wm>C2&EXYXGk^Yie7t zk?{ZqoXwv~6zi+A^pdOrX@EWh-v}US* z-ZU`+9xXXc?^RL$eo|E@5UyUPy=qK}&A>CW*Xt}ZIA&n>q!WShfbN^f*?4|%%HWFW z9Z&~jw0GvnMeViCTko!zFmv3wf)-~Rky}9q=`Z&!n@;ZSk91J&(sXjmOyUn8KGapu ze4Lq4o4)7mz2CTBB?f@1uBJkZjzz0N{@ZKz1TqY(;UAcL$mZ*Bm+H)k`9%3X@vwp# zTIhuc!v5tP)Mn@6dhnW}JI}ZRAXd_f80&$OV2V=c3Z9!lj+>AbL8J!5!={8jFv^1$ zHy^cTF*g`=^eklg6k4<2ya5%?Aghl*Gf!@AzY-}*GY#VDj{ya0v!PN?cBwf*)Rht4 zy8e8x_ofIF+Q+CRlpM`}zo+P0Cks3K*wT1M(Khr9=xx)4;^}zN8>JtgKIJsDHZ!B9 zrCkM19uRkop-{F-m3t8aQ_5>+sxuIzOl$8A%_<*^ksT!y27aR7#v2NWgKuD#??^1Z*6AIUWo8n9$Kep(?e+E9m+#kB zfU^9loSd8-_Lxd`8hhxdPZ&QC!m`0(`Q>?lwpR<x1|*3& zKXB5L9hshP&F3h)^(7BD*uK6b#tpT#rZaudf$cdi+|kjYQQ(h5#B~tCeoiLH5maMc z=jcwU1R_3SWE3*92%gzMFoVdhsH9ZmzM|6CuIlc-0h|-)L2Fo;-`qD>dGe?HLMfMV zI9x-6f+wp%>iu&Hz_LPG3Z{r(+D!rIP~nWoR0=Wt=VZEfgs5oY6l(Bv`nCw%JNjB> z9BGxmx6!wH@+g>|E`Dlt2S^Hg(AADtRM@}Ad@wu9{!I$P@Qs2f_PM#1UXb^M3GW~${h-LfWbM? zT<1Xg2fx$|1*1nuzkSUFtG6&6@MiLV0(i%Ky)qhdXZy7vl$rsq_clu_)p!1&Kk^U6 zk73Llkd6;!iSI~~h7GgH+G+S#+0fsD{QUgpmk#1VIGFYttf=JV<@uPAOR`YW({pc` z$vwEOt)t_kL5*A*^Ayw<@@x3}qhg@R^hU0{_eBb>Oy7S-PW}z<=jUG!JOKjhFNfZo z&}o013qSn-=|5WIziGOy8U}G%c=@Pfqjr)(l0HXJs9I!=fw(>c3g*-V^@Bad=8hJDT literal 0 HcmV?d00001 diff --git a/docs/kernel/trace/index.rst b/docs/kernel/trace/index.rst new file mode 100644 index 000000000..23b4c8fa2 --- /dev/null +++ b/docs/kernel/trace/index.rst @@ -0,0 +1,11 @@ +内核跟踪机制 +==================================== + + 内核跟踪机制由很多功能构成, 比如kprobe/uprobe/tracepoint/ftrace等, 以及用于扩展内核可观测性的eBPF,内核当前支持kprobe和eBPF, 本章将介绍这两种机制。 + +.. toctree:: + :maxdepth: 1 + :caption: 目录 + + eBPF + kprobe diff --git a/docs/kernel/trace/kprobe.md b/docs/kernel/trace/kprobe.md new file mode 100644 index 000000000..55efd0271 --- /dev/null +++ b/docs/kernel/trace/kprobe.md @@ -0,0 +1,100 @@ +# kprobe + +## 概述 + +Linux kprobes调试技术是内核开发者们专门为了便于跟踪内核函数执行状态所设计的一种轻量级内核调试技术。利用kprobes技术,内核开发人员可以在内核的绝大多数指定函数中动态的插入探测点来收集所需的调试状态信息而基本不影响内核原有的执行流程。 + +kprobes技术依赖硬件架构相关的支持,主要包括CPU的异常处理和单步调试机制,前者用于让程序的执行流程陷入到用户注册的回调函数中去,而后者则用于单步执行被探测点指令。需要注意的是,在一些架构上硬件并不支持单步调试机制,这可以通过一些软件模拟的方法解决(比如riscv)。 + + + +## kprobe工作流程 + +xxx + + + +1. 注册kprobe后,注册的每一个kprobe对应一个kprobe结构体,该结构中记录着探测点的位置,以及该探测点本来对应的指令。 +2. 探测点的位置被替换成了一条异常的指令,这样当CPU执行到探测点位置时会陷入到异常态,在x86_64上指令是int3(如果kprobe经过优化后,指令是jmp) +3. 当执行到异常指令时,系统换检查是否是kprobe 安装的异常,如果是,就执行kprobe的pre_handler,然后利用CPU提供的单步调试(single-step)功能,设置好相应的寄存器,将下一条指令设置为插入点处本来的指令,从异常态返回; +4. 再次陷入异常态。上一步骤中设置了single-step相关的寄存器,所以原指令刚一执行,便会再次陷入异常态,此时将single-step清除,并且执行post_handler,然后从异常态安全返回. +5. 当卸载kprobe时,探测点原来的指令会被恢复回去。 + + + +内核目前对x86和riscv64都进行了支持,由于 riscv64 没有单步执行模式,因此我们使用 break 异常来进行模拟,在保存探测点指令时,我们会额外填充一条 break 指令,这样就可以使得在riscv64架构上,在执行完原指令后,会再次触发break陷入异常。 + +## kprobe的接口 + +```rust +pub fn register_kprobe(kprobe_info: KprobeInfo) -> Result; +pub fn unregister_kprobe(kprobe: LockKprobe) -> Result<(), SystemError>; + +impl KprobeBasic { + pub fn call_pre_handler(&self, trap_frame: &dyn ProbeArgs) + pub fn call_post_handler(&self, trap_frame: &dyn ProbeArgs) + pub fn call_fault_handler(&self, trap_frame: &dyn ProbeArgs) + pub fn call_event_callback(&self, trap_frame: &dyn ProbeArgs) + pub fn update_event_callback(&mut self, callback: Box) + pub fn disable(&mut self) + pub fn enable(&mut self) + pub fn is_enabled(&self) -> bool + pub fn symbol(&self) -> Option<&str> +} +``` + +- `call_pre_handler` 在探测点指令被执行前调用用户定义的回调函数 +- `call_post_handler` 在单步执行完探测点指令后调用用户定义的回调函数 +- `call_fault_handler` 在调用前两种回调函数发生失败时调用 +- `call_event_callback` 用于调用eBPF相关的回调函数,通常与`call_post_handler` 一样在单步执行探测点指令会调用 +- `update_event_callback`用于运行过程中更新回调函数 +- `disable` 和 `enable` 用于动态关闭kprobe,在`disable`调用后,kprobe被触发时不执行回调函数 +- `symbol` 返回探测点的函数名称 + + + +## 代码示例 + +```rust +#[inline(never)] +fn detect_func(x: usize, y: usize) -> usize { + let hart = 0; + println!("detect_func: hart_id: {}, x: {}, y:{}", hart, x, y); + hart +} +fn pre_handler(regs: &dyn ProbeArgs) { + let pt_regs = regs.as_any().downcast_ref::().unwrap(); + println!( + "call pre_handler, the sp is {:#x}", + pt_regs as *const _ as usize + ); +} +fn post_handler(regs: &dyn ProbeArgs) { + let pt_regs = regs.as_any().downcast_ref::().unwrap(); + println!( + "call post_handler, the sp is {:#x}", + pt_regs as *const _ as usize + ); +} +fn fault_handler(regs: &dyn ProbeArgs) { + let pt_regs = regs.as_any().downcast_ref::().unwrap(); + println!( + "call fault_handler, the sp is {:#x}", + pt_regs as *const _ as usize + ); +} + +let kprobe_info = KprobeInfo { + pre_handler, + post_handler, + fault_handler: Some(fault_handler), + event_callback: None, + symbol: None, + addr: Some(detect_func as usize), + offset: 0, + enable: true, + }; +let kprobe = register_kprobe(kprobe_info).unwrap(); + unregister_kprobe(kprobe).unwrap(); +``` + From b453b471ef7dbdeeb6ff3714b75aca20877615e9 Mon Sep 17 00:00:00 2001 From: Godones <1925466036@qq.com> Date: Tue, 24 Sep 2024 11:18:05 +0800 Subject: [PATCH 06/10] fix: remove bad code --- kernel/src/bpf/helper/mod.rs | 4 +- kernel/src/bpf/map/array_map.rs | 69 ++++++++++++++------------------- kernel/src/bpf/map/hash_map.rs | 43 +++++++++----------- kernel/src/bpf/map/lru.rs | 38 ++++++++---------- kernel/src/bpf/map/mod.rs | 46 +++++++++------------- kernel/src/bpf/map/queue.rs | 37 +++++++++--------- kernel/src/bpf/prog/verifier.rs | 3 +- kernel/src/perf/kprobe.rs | 4 ++ kernel/src/perf/mod.rs | 14 ++++--- 9 files changed, 115 insertions(+), 143 deletions(-) diff --git a/kernel/src/bpf/helper/mod.rs b/kernel/src/bpf/helper/mod.rs index 8e3751a77..37b896b5a 100644 --- a/kernel/src/bpf/helper/mod.rs +++ b/kernel/src/bpf/helper/mod.rs @@ -1,7 +1,7 @@ mod print; use crate::bpf::helper::print::trace_printf; +use crate::bpf::map::PerCpuInfo; use crate::bpf::map::{BpfCallBackFn, BpfMap}; -use crate::bpf::map::{PerCpuInfo, PerCpuInfoImpl}; use crate::include::bindings::linux_bpf::BPF_F_CURRENT_CPU; use crate::libs::lazy_init::Lazy; use alloc::{collections::BTreeMap, sync::Arc}; @@ -75,7 +75,7 @@ pub fn perf_event_output( let index = flags as u32; let flags = (flags >> 32) as u32; let key = if index == BPF_F_CURRENT_CPU as u32 { - let cpu_id = PerCpuInfoImpl::cpu_id(); + let cpu_id = PerCpuInfo::cpu_id(); cpu_id } else { index diff --git a/kernel/src/bpf/map/array_map.rs b/kernel/src/bpf/map/array_map.rs index b783a6adc..74cefa5d4 100644 --- a/kernel/src/bpf/map/array_map.rs +++ b/kernel/src/bpf/map/array_map.rs @@ -64,9 +64,8 @@ impl IndexMut for ArrayMapData { } } -impl TryFrom<&BpfMapMeta> for ArrayMap { - type Error = SystemError; - fn try_from(attr: &BpfMapMeta) -> Result { +impl ArrayMap { + pub fn new(attr: &BpfMapMeta) -> Result { if attr.value_size == 0 || attr.max_entries == 0 || attr.key_size != 4 { return Err(SystemError::EINVAL); } @@ -153,20 +152,19 @@ impl BpfMapCommonOps for ArrayMap { info!("fake freeze done for ArrayMap"); Ok(()) } - fn first_value_ptr(&self) -> *const u8 { - self.data.data.as_ptr() + fn first_value_ptr(&self) -> Result<*const u8> { + Ok(self.data.data.as_ptr()) } } /// This is the per-CPU variant of the [ArrayMap] map type. /// /// See https://ebpf-docs.dylanreimerink.nl/linux/map-type/BPF_MAP_TYPE_PERCPU_ARRAY/ -pub struct PerCpuArrayMap { +pub struct PerCpuArrayMap { data: Vec, - _phantom: core::marker::PhantomData, } -impl Debug for PerCpuArrayMap { +impl Debug for PerCpuArrayMap { fn fmt(&self, f: &mut Formatter<'_>) -> core::fmt::Result { f.debug_struct("PerCpuArrayMap") .field("data", &self.data) @@ -174,37 +172,33 @@ impl Debug for PerCpuArrayMap { } } -impl TryFrom<&BpfMapMeta> for PerCpuArrayMap { - type Error = SystemError; - fn try_from(attr: &BpfMapMeta) -> Result { - let num_cpus = T::num_cpus(); +impl PerCpuArrayMap { + pub fn new(attr: &BpfMapMeta) -> Result { + let num_cpus = PerCpuInfo::num_cpus(); let mut data = Vec::with_capacity(num_cpus as usize); for _ in 0..num_cpus { - let array_map = ArrayMap::try_from(attr)?; + let array_map = ArrayMap::new(attr)?; data.push(array_map); } - Ok(PerCpuArrayMap { - data, - _phantom: core::marker::PhantomData, - }) + Ok(PerCpuArrayMap { data }) } } -impl BpfMapCommonOps for PerCpuArrayMap { +impl BpfMapCommonOps for PerCpuArrayMap { fn lookup_elem(&mut self, key: &[u8]) -> Result> { - let cpu_id = T::cpu_id(); + let cpu_id = PerCpuInfo::cpu_id(); self.data[cpu_id as usize].lookup_elem(key) } fn update_elem(&mut self, key: &[u8], value: &[u8], flags: u64) -> Result<()> { - let cpu_id = T::cpu_id(); + let cpu_id = PerCpuInfo::cpu_id(); self.data[cpu_id as usize].update_elem(key, value, flags) } fn delete_elem(&mut self, key: &[u8]) -> Result<()> { - let cpu_id = T::cpu_id(); + let cpu_id = PerCpuInfo::cpu_id(); self.data[cpu_id as usize].delete_elem(key) } fn for_each_elem(&mut self, cb: BpfCallBackFn, ctx: *const u8, flags: u64) -> Result { - let cpu_id = T::cpu_id(); + let cpu_id = PerCpuInfo::cpu_id(); self.data[cpu_id as usize].for_each_elem(cb, ctx, flags) } fn lookup_and_delete_elem(&mut self, _key: &[u8], _value: &mut [u8]) -> Result<()> { @@ -214,23 +208,22 @@ impl BpfMapCommonOps for PerCpuArrayMap { self.data[cpu as usize].lookup_elem(key) } fn get_next_key(&self, key: Option<&[u8]>, next_key: &mut [u8]) -> Result<()> { - let cpu_id = T::cpu_id(); + let cpu_id = PerCpuInfo::cpu_id(); self.data[cpu_id as usize].get_next_key(key, next_key) } - fn first_value_ptr(&self) -> *const u8 { - let cpu_id = T::cpu_id(); + fn first_value_ptr(&self) -> Result<*const u8> { + let cpu_id = PerCpuInfo::cpu_id(); self.data[cpu_id as usize].first_value_ptr() } } /// See https://ebpf-docs.dylanreimerink.nl/linux/map-type/BPF_MAP_TYPE_PERF_EVENT_ARRAY/ -pub struct PerfEventArrayMap { +pub struct PerfEventArrayMap { // The value is the file descriptor of the perf event. fds: ArrayMapData, - _phantom: core::marker::PhantomData, } -impl Debug for PerfEventArrayMap { +impl Debug for PerfEventArrayMap { fn fmt(&self, f: &mut Formatter<'_>) -> core::fmt::Result { f.debug_struct("PerfEventArrayMap") .field("fds", &self.fds) @@ -238,22 +231,18 @@ impl Debug for PerfEventArrayMap { } } -impl TryFrom<&BpfMapMeta> for PerfEventArrayMap { - type Error = SystemError; - fn try_from(attr: &BpfMapMeta) -> Result { - let num_cpus = T::num_cpus(); +impl PerfEventArrayMap { + pub fn new(attr: &BpfMapMeta) -> Result { + let num_cpus = PerCpuInfo::num_cpus(); if attr.key_size != 4 || attr.value_size != 4 || attr.max_entries != num_cpus { return Err(SystemError::EINVAL); } let fds = ArrayMapData::new(4, num_cpus); - Ok(PerfEventArrayMap { - fds, - _phantom: core::marker::PhantomData, - }) + Ok(PerfEventArrayMap { fds }) } } -impl BpfMapCommonOps for PerfEventArrayMap { +impl BpfMapCommonOps for PerfEventArrayMap { fn lookup_elem(&mut self, key: &[u8]) -> Result> { let cpu_id = u32::from_ne_bytes(key.try_into().unwrap()); let value = self.fds.index(cpu_id); @@ -273,7 +262,7 @@ impl BpfMapCommonOps for PerfEventArrayMap { } fn for_each_elem(&mut self, cb: BpfCallBackFn, ctx: *const u8, _flags: u64) -> Result { let mut total_used = 0; - for i in 0..T::num_cpus() { + for i in 0..PerCpuInfo::num_cpus() { let key = i.to_ne_bytes(); let value = self.fds.index(i); total_used += 1; @@ -287,7 +276,7 @@ impl BpfMapCommonOps for PerfEventArrayMap { fn lookup_and_delete_elem(&mut self, _key: &[u8], _value: &mut [u8]) -> Result<()> { Err(SystemError::EINVAL) } - fn first_value_ptr(&self) -> *const u8 { - self.fds.data.as_ptr() + fn first_value_ptr(&self) -> Result<*const u8> { + Ok(self.fds.data.as_ptr()) } } diff --git a/kernel/src/bpf/map/hash_map.rs b/kernel/src/bpf/map/hash_map.rs index 13a18b2ce..0e5abb0e4 100644 --- a/kernel/src/bpf/map/hash_map.rs +++ b/kernel/src/bpf/map/hash_map.rs @@ -19,9 +19,8 @@ pub struct BpfHashMap { data: BTreeMap, } -impl TryFrom<&BpfMapMeta> for BpfHashMap { - type Error = SystemError; - fn try_from(attr: &BpfMapMeta) -> Result { +impl BpfHashMap { + pub fn new(attr: &BpfMapMeta) -> Result { if attr.value_size == 0 || attr.max_entries == 0 { return Err(SystemError::EINVAL); } @@ -97,57 +96,51 @@ impl BpfMapCommonOps for BpfHashMap { /// This is the per-CPU variant of the [BpfHashMap] map type. /// /// See https://ebpf-docs.dylanreimerink.nl/linux/map-type/BPF_MAP_TYPE_PERCPU_HASH/ -pub struct PerCpuHashMap { +pub struct PerCpuHashMap { maps: Vec, - _phantom: core::marker::PhantomData, } -impl Debug for PerCpuHashMap { +impl Debug for PerCpuHashMap { fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { f.debug_struct("PerCpuHashMap") .field("maps", &self.maps) .finish() } } -impl TryFrom<&BpfMapMeta> for PerCpuHashMap { - type Error = SystemError; - fn try_from(attr: &BpfMapMeta) -> Result { - let num_cpus = T::num_cpus(); +impl PerCpuHashMap { + pub fn new(attr: &BpfMapMeta) -> Result { + let num_cpus = PerCpuInfo::num_cpus(); let mut data = Vec::with_capacity(num_cpus as usize); for _ in 0..num_cpus { - let array_map = BpfHashMap::try_from(attr)?; + let array_map = BpfHashMap::new(attr)?; data.push(array_map); } - Ok(PerCpuHashMap { - maps: data, - _phantom: core::marker::PhantomData, - }) + Ok(PerCpuHashMap { maps: data }) } } - -impl BpfMapCommonOps for PerCpuHashMap { +impl BpfMapCommonOps for PerCpuHashMap { fn lookup_elem(&mut self, key: &[u8]) -> Result> { - self.maps[T::cpu_id() as usize].lookup_elem(key) + self.maps[PerCpuInfo::cpu_id() as usize].lookup_elem(key) } fn update_elem(&mut self, key: &[u8], value: &[u8], flags: u64) -> Result<()> { - self.maps[T::cpu_id() as usize].update_elem(key, value, flags) + self.maps[PerCpuInfo::cpu_id() as usize].update_elem(key, value, flags) } fn delete_elem(&mut self, key: &[u8]) -> Result<()> { - self.maps[T::cpu_id() as usize].delete_elem(key) + self.maps[PerCpuInfo::cpu_id() as usize].delete_elem(key) } fn for_each_elem(&mut self, cb: BpfCallBackFn, ctx: *const u8, flags: u64) -> Result { - self.maps[T::cpu_id() as usize].for_each_elem(cb, ctx, flags) + self.maps[PerCpuInfo::cpu_id() as usize].for_each_elem(cb, ctx, flags) } fn lookup_and_delete_elem(&mut self, key: &[u8], value: &mut [u8]) -> Result<()> { - self.maps[T::cpu_id() as usize].lookup_and_delete_elem(key, value) + self.maps[PerCpuInfo::cpu_id() as usize].lookup_and_delete_elem(key, value) } fn lookup_percpu_elem(&mut self, key: &[u8], cpu: u32) -> Result> { self.maps[cpu as usize].lookup_elem(key) } fn get_next_key(&self, key: Option<&[u8]>, next_key: &mut [u8]) -> Result<()> { - self.maps[T::cpu_id() as usize].get_next_key(key, next_key) + self.maps[PerCpuInfo::cpu_id() as usize].get_next_key(key, next_key) } - fn first_value_ptr(&self) -> *const u8 { - self.maps[T::cpu_id() as usize].first_value_ptr() + fn first_value_ptr(&self) -> Result<*const u8> { + self.maps[PerCpuInfo::cpu_id() as usize].first_value_ptr() } } diff --git a/kernel/src/bpf/map/lru.rs b/kernel/src/bpf/map/lru.rs index 5336bb840..7ebb9fa16 100644 --- a/kernel/src/bpf/map/lru.rs +++ b/kernel/src/bpf/map/lru.rs @@ -16,9 +16,8 @@ pub struct LruMap { data: LruCache, } -impl TryFrom<&BpfMapMeta> for LruMap { - type Error = SystemError; - fn try_from(attr: &BpfMapMeta) -> Result { +impl LruMap { + pub fn new(attr: &BpfMapMeta) -> Result { if attr.value_size == 0 || attr.max_entries == 0 { return Err(SystemError::EINVAL); } @@ -89,12 +88,11 @@ impl BpfMapCommonOps for LruMap { } /// See https://ebpf-docs.dylanreimerink.nl/linux/map-type/BPF_MAP_TYPE_LRU_PERCPU_HASH/ -pub struct PerCpuLruMap { +pub struct PerCpuLruMap { maps: Vec, - _phantom: core::marker::PhantomData, } -impl Debug for PerCpuLruMap { +impl Debug for PerCpuLruMap { fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { f.debug_struct("PerCpuLruMap") .field("maps", &self.maps) @@ -102,42 +100,38 @@ impl Debug for PerCpuLruMap { } } -impl TryFrom<&BpfMapMeta> for PerCpuLruMap { - type Error = SystemError; - fn try_from(attr: &BpfMapMeta) -> Result { - let num_cpus = T::num_cpus(); +impl PerCpuLruMap { + pub fn new(attr: &BpfMapMeta) -> Result { + let num_cpus = PerCpuInfo::num_cpus(); let mut data = Vec::with_capacity(num_cpus as usize); for _ in 0..num_cpus { - let array_map = LruMap::try_from(attr)?; + let array_map = LruMap::new(attr)?; data.push(array_map); } - Ok(PerCpuLruMap { - maps: data, - _phantom: core::marker::PhantomData, - }) + Ok(PerCpuLruMap { maps: data }) } } -impl BpfMapCommonOps for PerCpuLruMap { +impl BpfMapCommonOps for PerCpuLruMap { fn lookup_elem(&mut self, key: &[u8]) -> Result> { - self.maps[T::cpu_id() as usize].lookup_elem(key) + self.maps[PerCpuInfo::cpu_id() as usize].lookup_elem(key) } fn update_elem(&mut self, key: &[u8], value: &[u8], flags: u64) -> Result<()> { - self.maps[T::cpu_id() as usize].update_elem(key, value, flags) + self.maps[PerCpuInfo::cpu_id() as usize].update_elem(key, value, flags) } fn delete_elem(&mut self, key: &[u8]) -> Result<()> { - self.maps[T::cpu_id() as usize].delete_elem(key) + self.maps[PerCpuInfo::cpu_id() as usize].delete_elem(key) } fn for_each_elem(&mut self, cb: BpfCallBackFn, ctx: *const u8, flags: u64) -> Result { - self.maps[T::cpu_id() as usize].for_each_elem(cb, ctx, flags) + self.maps[PerCpuInfo::cpu_id() as usize].for_each_elem(cb, ctx, flags) } fn lookup_and_delete_elem(&mut self, key: &[u8], value: &mut [u8]) -> Result<()> { - self.maps[T::cpu_id() as usize].lookup_and_delete_elem(key, value) + self.maps[PerCpuInfo::cpu_id() as usize].lookup_and_delete_elem(key, value) } fn lookup_percpu_elem(&mut self, key: &[u8], cpu: u32) -> Result> { self.maps[cpu as usize].lookup_elem(key) } fn get_next_key(&self, key: Option<&[u8]>, next_key: &mut [u8]) -> Result<()> { - self.maps[T::cpu_id() as usize].get_next_key(key, next_key) + self.maps[PerCpuInfo::cpu_id() as usize].get_next_key(key, next_key) } } diff --git a/kernel/src/bpf/map/mod.rs b/kernel/src/bpf/map/mod.rs index 574a71b08..3e08bcce3 100644 --- a/kernel/src/bpf/map/mod.rs +++ b/kernel/src/bpf/map/mod.rs @@ -15,8 +15,6 @@ use crate::include::bindings::linux_bpf::{bpf_attr, bpf_map_type}; use crate::libs::casting::DowncastArc; use crate::libs::spinlock::{SpinLock, SpinLockGuard}; use crate::process::ProcessManager; -use crate::smp::core::smp_get_processor_id; -use crate::smp::cpu::smp_cpu_manager; use crate::syscall::user_access::{UserBufferReader, UserBufferWriter}; use alloc::boxed::Box; use alloc::string::String; @@ -102,8 +100,8 @@ pub trait BpfMapCommonOps: Send + Sync + Debug + CastFromSync { } /// Get the first value pointer. - fn first_value_ptr(&self) -> *const u8 { - panic!("value_ptr not implemented") + fn first_value_ptr(&self) -> Result<*const u8> { + Err(SystemError::ENOSYS) } } impl DowncastArc for dyn BpfMapCommonOps { @@ -112,30 +110,22 @@ impl DowncastArc for dyn BpfMapCommonOps { } } -pub struct PerCpuInfoImpl; +pub struct PerCpuInfo; -impl PerCpuInfo for PerCpuInfoImpl { - fn cpu_id() -> u32 { - // let cpu = smp_get_processor_id(); +impl PerCpuInfo { + pub fn cpu_id() -> u32 { + // let cpu = crate::smp::core::smp_get_processor_id(); // log::info!("cpu_id: {:?}", cpu.data()); // cpu.data() 0 } - fn num_cpus() -> u32 { - // let cpus = smp_cpu_manager(); + pub fn num_cpus() -> u32 { + // let cpus = crate::smp::cpu::smp_cpu_manager(); // log::info!("num_cpus: {:?}", cpus.present_cpus_count()); // cpus.present_cpus_count() 1 } } - -pub trait PerCpuInfo: Send + Sync + 'static { - /// Get the CPU ID of the current CPU. - fn cpu_id() -> u32; - /// Get the number of CPUs. - fn num_cpus() -> u32; -} - impl BpfMap { pub fn new(map: Box, meta: BpfMapMeta) -> Self { assert_ne!(meta.key_size, 0); @@ -199,7 +189,7 @@ impl IndexNode for BpfMap { } fn fs(&self) -> Arc { - panic!("BpfMap does not have a filesystem") + todo!("BpfMap does not have a filesystem") } fn as_any_ref(&self) -> &dyn Any { @@ -221,15 +211,15 @@ pub fn bpf_map_create(attr: &bpf_attr) -> Result { info!("The map attr is {:#?}", map_meta); let map: Box = match map_meta.map_type { bpf_map_type::BPF_MAP_TYPE_ARRAY => { - let array_map = ArrayMap::try_from(&map_meta)?; + let array_map = ArrayMap::new(&map_meta)?; Box::new(array_map) } bpf_map_type::BPF_MAP_TYPE_PERCPU_ARRAY => { - let per_cpu_array_map = PerCpuArrayMap::::try_from(&map_meta)?; + let per_cpu_array_map = PerCpuArrayMap::new(&map_meta)?; Box::new(per_cpu_array_map) } bpf_map_type::BPF_MAP_TYPE_PERF_EVENT_ARRAY => { - let perf_event_array_map = PerfEventArrayMap::::try_from(&map_meta)?; + let perf_event_array_map = PerfEventArrayMap::new(&map_meta)?; Box::new(perf_event_array_map) } @@ -240,27 +230,27 @@ pub fn bpf_map_create(attr: &bpf_attr) -> Result { Err(SystemError::EINVAL)? } bpf_map_type::BPF_MAP_TYPE_HASH => { - let hash_map = hash_map::BpfHashMap::try_from(&map_meta)?; + let hash_map = hash_map::BpfHashMap::new(&map_meta)?; Box::new(hash_map) } bpf_map_type::BPF_MAP_TYPE_PERCPU_HASH => { - let per_cpu_hash_map = PerCpuHashMap::::try_from(&map_meta)?; + let per_cpu_hash_map = PerCpuHashMap::new(&map_meta)?; Box::new(per_cpu_hash_map) } bpf_map_type::BPF_MAP_TYPE_QUEUE => { - let queue_map = queue::QueueMap::try_from(&map_meta)?; + let queue_map = queue::QueueMap::new(&map_meta)?; Box::new(queue_map) } bpf_map_type::BPF_MAP_TYPE_STACK => { - let stack_map = queue::StackMap::try_from(&map_meta)?; + let stack_map = queue::StackMap::new(&map_meta)?; Box::new(stack_map) } bpf_map_type::BPF_MAP_TYPE_LRU_HASH => { - let lru_hash_map = lru::LruMap::try_from(&map_meta)?; + let lru_hash_map = lru::LruMap::new(&map_meta)?; Box::new(lru_hash_map) } bpf_map_type::BPF_MAP_TYPE_LRU_PERCPU_HASH => { - let lru_per_cpu_hash_map = lru::PerCpuLruMap::::try_from(&map_meta)?; + let lru_per_cpu_hash_map = lru::PerCpuLruMap::new(&map_meta)?; Box::new(lru_per_cpu_hash_map) } _ => { diff --git a/kernel/src/bpf/map/queue.rs b/kernel/src/bpf/map/queue.rs index 3efddd2d1..99a6b1975 100644 --- a/kernel/src/bpf/map/queue.rs +++ b/kernel/src/bpf/map/queue.rs @@ -7,6 +7,16 @@ use core::ops::DerefMut; use system_error::SystemError; type BpfQueueValue = Vec; + +pub trait SpecialMap: Debug + Send + Sync + 'static { + /// Returns the number of elements the queue can hold. + fn push(&mut self, value: BpfQueueValue, flags: BpfMapUpdateElemFlags) -> Result<()>; + /// Removes the first element and returns it. + fn pop(&mut self) -> Option; + /// Returns the first element without removing it. + fn peek(&self) -> Option<&BpfQueueValue>; +} + /// The queue map type is a generic map type, resembling a FIFO (First-In First-Out) queue. /// /// This map type has no keys, only values. The size and type of the values can be specified by the user @@ -22,24 +32,14 @@ pub struct QueueMap { data: Vec, } -pub trait SpecialMap: Debug + Send + Sync + 'static { - /// Returns the number of elements the queue can hold. - fn push(&mut self, value: BpfQueueValue, flags: BpfMapUpdateElemFlags) -> Result<()>; - /// Removes the first element and returns it. - fn pop(&mut self) -> Option; - /// Returns the first element without removing it. - fn peek(&self) -> Option<&BpfQueueValue>; -} - -impl TryFrom<&BpfMapMeta> for QueueMap { - type Error = SystemError; - fn try_from(value: &BpfMapMeta) -> Result { - if value.value_size == 0 || value.max_entries == 0 || value.key_size != 0 { +impl QueueMap { + pub fn new(attr: &BpfMapMeta) -> Result { + if attr.value_size == 0 || attr.max_entries == 0 || attr.key_size != 0 { return Err(SystemError::EINVAL); } - let data = Vec::with_capacity(value.max_entries as usize); + let data = Vec::with_capacity(attr.max_entries as usize); Ok(Self { - max_entries: value.max_entries, + max_entries: attr.max_entries, data, }) } @@ -74,10 +74,9 @@ impl SpecialMap for QueueMap { #[derive(Debug)] pub struct StackMap(QueueMap); -impl TryFrom<&BpfMapMeta> for StackMap { - type Error = SystemError; - fn try_from(value: &BpfMapMeta) -> Result { - QueueMap::try_from(value).map(StackMap) +impl StackMap { + pub fn new(attr: &BpfMapMeta) -> Result { + QueueMap::new(attr).map(StackMap) } } diff --git a/kernel/src/bpf/prog/verifier.rs b/kernel/src/bpf/prog/verifier.rs index 987423bf1..4429bdf8c 100644 --- a/kernel/src/bpf/prog/verifier.rs +++ b/kernel/src/bpf/prog/verifier.rs @@ -62,7 +62,8 @@ impl<'a> BpfProgVerifier<'a> { .inode() .downcast_arc::() .ok_or(SystemError::EINVAL)?; - let first_value_ptr = bpf_map.inner_map().lock().first_value_ptr() as usize; + let first_value_ptr = + bpf_map.inner_map().lock().first_value_ptr()? as usize; let offset = next_insn.imm as usize; info!( "Relocate for BPF_PSEUDO_MAP_VALUE, instruction index: {}, map_fd: {}", diff --git a/kernel/src/perf/kprobe.rs b/kernel/src/perf/kprobe.rs index 0d88047df..cf97800c0 100644 --- a/kernel/src/perf/kprobe.rs +++ b/kernel/src/perf/kprobe.rs @@ -128,6 +128,10 @@ impl PerfEventOps for KprobePerfEvent { self.kprobe.write().disable(); Ok(()) } + + fn readable(&self) -> bool { + true + } } pub fn perf_event_open_kprobe(args: PerfProbeArgs) -> KprobePerfEvent { diff --git a/kernel/src/perf/mod.rs b/kernel/src/perf/mod.rs index ef1e79bf2..de18c30ea 100644 --- a/kernel/src/perf/mod.rs +++ b/kernel/src/perf/mod.rs @@ -38,18 +38,20 @@ use system_error::SystemError; type Result = core::result::Result; pub trait PerfEventOps: Send + Sync + Debug + CastFromSync + CastFrom + IndexNode { + /// Set the bpf program for the perf event fn set_bpf_prog(&self, _bpf_prog: Arc) -> Result<()> { - panic!("set_bpf_prog not implemented for PerfEvent"); + Err(SystemError::ENOSYS) } + /// Enable the perf event fn enable(&self) -> Result<()> { - panic!("enable not implemented"); + Err(SystemError::ENOSYS) } + /// Disable the perf event fn disable(&self) -> Result<()> { - panic!("disable not implemented"); - } - fn readable(&self) -> bool { - panic!("readable not implemented"); + Err(SystemError::ENOSYS) } + /// Whether the perf event is readable + fn readable(&self) -> bool; } #[derive(Debug)] From 134e89be032ca1c6886bb7a8aaa615ce07217d67 Mon Sep 17 00:00:00 2001 From: Godones <1925466036@qq.com> Date: Tue, 24 Sep 2024 23:47:38 +0800 Subject: [PATCH 07/10] add docs and update dep --- docs/kernel/trace/eBPF.md | 10 +++- .../kernel/trace/{img_1.png => ebpf_flow.png} | Bin docs/kernel/trace/kprobe.md | 53 ++---------------- .../kernel/trace/{img.png => kprobe_flow.png} | Bin kernel/crates/rbpf/README.md | 4 ++ kernel/src/bpf/helper/consts.rs | 11 ++++ kernel/src/bpf/helper/mod.rs | 34 +++++++---- kernel/src/bpf/map/lru.rs | 7 ++- kernel/src/bpf/map/queue.rs | 9 ++- kernel/src/debug/kprobe/test.rs | 19 ++++--- kernel/src/mm/ucontext.rs | 18 ------ kernel/src/perf/util.rs | 7 +++ user/apps/syscall_ebpf/.vim/coc-settings.json | 3 - .../syscall_ebpf-common/Cargo.toml | 2 +- .../syscall_ebpf-ebpf/rust-toolchain.toml | 2 +- .../apps/syscall_ebpf/syscall_ebpf/Cargo.toml | 4 +- user/apps/test_ebpf/Cargo.toml | 4 +- 17 files changed, 87 insertions(+), 100 deletions(-) rename docs/kernel/trace/{img_1.png => ebpf_flow.png} (100%) rename docs/kernel/trace/{img.png => kprobe_flow.png} (100%) create mode 100644 kernel/src/bpf/helper/consts.rs delete mode 100644 user/apps/syscall_ebpf/.vim/coc-settings.json diff --git a/docs/kernel/trace/eBPF.md b/docs/kernel/trace/eBPF.md index 00c37984f..b9a1bf1a8 100644 --- a/docs/kernel/trace/eBPF.md +++ b/docs/kernel/trace/eBPF.md @@ -1,5 +1,9 @@ # eBPF +> 作者: 陈林峰 +> +> Email: chenlinfeng25@outlook.com + ## 概述 eBPF 是一项革命性的技术,起源于 Linux 内核,它可以在特权上下文中(如操作系统内核)运行沙盒程序。它用于安全有效地扩展内核的功能,而无需通过更改内核源代码或加载内核模块的方式来实现。 @@ -14,7 +18,7 @@ eBPF 从根本上改变了这个方式。通过允许在操作系统中运行沙 ## eBPF的运行流程 -![image-20240909165945192](./img_1.png) +![image-20240909165945192](./ebpf_flow.png) 如图所示,eBPF程序的运行过程分为三个主要步骤: @@ -85,8 +89,8 @@ async fn main() -> Result<(), Box> { ``` [dependencies] -aya = { git = "https://github.com/os-module/tiny-aya.git" } -aya-log = { git = "https://github.com/os-module/tiny-aya.git" } +aya = { git = "https://github.com/DragonOS-Community/tiny-aya.git" } +aya-log = { git = "https://github.com/DragonOS-Community/tiny-aya.git" } ``` 只需要稍加修改,就可以利用Aya现有的工具完成eBPF程序的实现。 diff --git a/docs/kernel/trace/img_1.png b/docs/kernel/trace/ebpf_flow.png similarity index 100% rename from docs/kernel/trace/img_1.png rename to docs/kernel/trace/ebpf_flow.png diff --git a/docs/kernel/trace/kprobe.md b/docs/kernel/trace/kprobe.md index 55efd0271..53bd3aec8 100644 --- a/docs/kernel/trace/kprobe.md +++ b/docs/kernel/trace/kprobe.md @@ -1,5 +1,9 @@ # kprobe +> 作者: 陈林峰 +> +> Email: chenlinfeng25@outlook.com + ## 概述 Linux kprobes调试技术是内核开发者们专门为了便于跟踪内核函数执行状态所设计的一种轻量级内核调试技术。利用kprobes技术,内核开发人员可以在内核的绝大多数指定函数中动态的插入探测点来收集所需的调试状态信息而基本不影响内核原有的执行流程。 @@ -10,7 +14,7 @@ kprobes技术依赖硬件架构相关的支持,主要包括CPU的异常处理 ## kprobe工作流程 -xxx +xxx @@ -51,50 +55,3 @@ impl KprobeBasic { - `disable` 和 `enable` 用于动态关闭kprobe,在`disable`调用后,kprobe被触发时不执行回调函数 - `symbol` 返回探测点的函数名称 - - -## 代码示例 - -```rust -#[inline(never)] -fn detect_func(x: usize, y: usize) -> usize { - let hart = 0; - println!("detect_func: hart_id: {}, x: {}, y:{}", hart, x, y); - hart -} -fn pre_handler(regs: &dyn ProbeArgs) { - let pt_regs = regs.as_any().downcast_ref::().unwrap(); - println!( - "call pre_handler, the sp is {:#x}", - pt_regs as *const _ as usize - ); -} -fn post_handler(regs: &dyn ProbeArgs) { - let pt_regs = regs.as_any().downcast_ref::().unwrap(); - println!( - "call post_handler, the sp is {:#x}", - pt_regs as *const _ as usize - ); -} -fn fault_handler(regs: &dyn ProbeArgs) { - let pt_regs = regs.as_any().downcast_ref::().unwrap(); - println!( - "call fault_handler, the sp is {:#x}", - pt_regs as *const _ as usize - ); -} - -let kprobe_info = KprobeInfo { - pre_handler, - post_handler, - fault_handler: Some(fault_handler), - event_callback: None, - symbol: None, - addr: Some(detect_func as usize), - offset: 0, - enable: true, - }; -let kprobe = register_kprobe(kprobe_info).unwrap(); - unregister_kprobe(kprobe).unwrap(); -``` - diff --git a/docs/kernel/trace/img.png b/docs/kernel/trace/kprobe_flow.png similarity index 100% rename from docs/kernel/trace/img.png rename to docs/kernel/trace/kprobe_flow.png diff --git a/kernel/crates/rbpf/README.md b/kernel/crates/rbpf/README.md index 13ec5af3e..e2dc8ce7b 100644 --- a/kernel/crates/rbpf/README.md +++ b/kernel/crates/rbpf/README.md @@ -710,6 +710,10 @@ See and [LICENSE-MIT](https://github.com/qmonnet/rbpf/blob/main/LICENSE-MIT) for details. +## Version +[The last commit](https://github.com/qmonnet/rbpf/commit/fe7021b07b08a43b836743a77796d07ce1f4902e) + + ## Inspired by * [uBPF](https://github.com/iovisor/ubpf), a C user-space implementation of an diff --git a/kernel/src/bpf/helper/consts.rs b/kernel/src/bpf/helper/consts.rs new file mode 100644 index 000000000..69bce7d61 --- /dev/null +++ b/kernel/src/bpf/helper/consts.rs @@ -0,0 +1,11 @@ +pub const HELPER_MAP_LOOKUP_ELEM: u32 = 1; +pub const HELPER_MAP_UPDATE_ELEM: u32 = 2; +pub const HELPER_MAP_DELETE_ELEM: u32 = 3; +pub const HELPER_MAP_FOR_EACH_ELEM: u32 = 164; +pub const HELPER_MAP_LOOKUP_PERCPU_ELEM: u32 = 195; +pub const HELPER_PERF_EVENT_OUTPUT: u32 = 25; +pub const HELPER_BPF_PROBE_READ: u32 = 4; +pub const HELPER_TRACE_PRINTF: u32 = 6; +pub const HELPER_MAP_PUSH_ELEM: u32 = 87; +pub const HELPER_MAP_POP_ELEM: u32 = 88; +pub const HELPER_MAP_PEEK_ELEM: u32 = 89; diff --git a/kernel/src/bpf/helper/mod.rs b/kernel/src/bpf/helper/mod.rs index 37b896b5a..457047324 100644 --- a/kernel/src/bpf/helper/mod.rs +++ b/kernel/src/bpf/helper/mod.rs @@ -1,4 +1,6 @@ +mod consts; mod print; + use crate::bpf::helper::print::trace_printf; use crate::bpf::map::PerCpuInfo; use crate::bpf::map::{BpfCallBackFn, BpfMap}; @@ -310,27 +312,37 @@ pub static BPF_HELPER_FUN_SET: Lazy> = Lazy::new() /// Initialize the helper functions. pub fn init_helper_functions() { + use consts::*; let mut map = BTreeMap::new(); unsafe { // Map helpers::Generic map helpers - map.insert(1, define_func!(raw_map_lookup_elem)); - map.insert(2, define_func!(raw_map_update_elem)); - map.insert(3, define_func!(raw_map_delete_elem)); - map.insert(164, define_func!(raw_map_for_each_elem)); - map.insert(195, define_func!(raw_map_lookup_percpu_elem)); + map.insert(HELPER_MAP_LOOKUP_ELEM, define_func!(raw_map_lookup_elem)); + map.insert(HELPER_MAP_UPDATE_ELEM, define_func!(raw_map_update_elem)); + map.insert(HELPER_MAP_DELETE_ELEM, define_func!(raw_map_delete_elem)); + map.insert( + HELPER_MAP_FOR_EACH_ELEM, + define_func!(raw_map_for_each_elem), + ); + map.insert( + HELPER_MAP_LOOKUP_PERCPU_ELEM, + define_func!(raw_map_lookup_percpu_elem), + ); // map.insert(93,define_func!(raw_bpf_spin_lock); // map.insert(94,define_func!(raw_bpf_spin_unlock); // Map helpers::Perf event array helpers - map.insert(25, define_func!(raw_perf_event_output)); + map.insert( + HELPER_PERF_EVENT_OUTPUT, + define_func!(raw_perf_event_output), + ); // Probe and trace helpers::Memory helpers - map.insert(4, define_func!(raw_bpf_probe_read)); + map.insert(HELPER_BPF_PROBE_READ, define_func!(raw_bpf_probe_read)); // Print helpers - map.insert(6, define_func!(trace_printf)); + map.insert(HELPER_TRACE_PRINTF, define_func!(trace_printf)); // Map helpers::Queue and stack helpers - map.insert(87, define_func!(raw_map_push_elem)); - map.insert(88, define_func!(raw_map_pop_elem)); - map.insert(89, define_func!(raw_map_peek_elem)); + map.insert(HELPER_MAP_PUSH_ELEM, define_func!(raw_map_push_elem)); + map.insert(HELPER_MAP_POP_ELEM, define_func!(raw_map_pop_elem)); + map.insert(HELPER_MAP_PEEK_ELEM, define_func!(raw_map_peek_elem)); } BPF_HELPER_FUN_SET.init(map); } diff --git a/kernel/src/bpf/map/lru.rs b/kernel/src/bpf/map/lru.rs index 7ebb9fa16..5c2158df1 100644 --- a/kernel/src/bpf/map/lru.rs +++ b/kernel/src/bpf/map/lru.rs @@ -9,7 +9,12 @@ use system_error::SystemError; type BpfHashMapKey = Vec; type BpfHashMapValue = Vec; - +/// This map is the LRU (Least Recently Used) variant of the BPF_MAP_TYPE_HASH. +/// It is a generic map type that stores a fixed maximum number of key/value pairs. +/// When the map starts to get at capacity, the approximately least recently +/// used elements is removed to make room for new elements. +/// +/// See https://docs.ebpf.io/linux/map-type/BPF_MAP_TYPE_LRU_HASH/ #[derive(Debug)] pub struct LruMap { max_entries: u32, diff --git a/kernel/src/bpf/map/queue.rs b/kernel/src/bpf/map/queue.rs index 99a6b1975..817f2089e 100644 --- a/kernel/src/bpf/map/queue.rs +++ b/kernel/src/bpf/map/queue.rs @@ -7,7 +7,14 @@ use core::ops::DerefMut; use system_error::SystemError; type BpfQueueValue = Vec; - +/// BPF_MAP_TYPE_QUEUE provides FIFO storage and BPF_MAP_TYPE_STACK provides LIFO storage for BPF programs. +/// These maps support peek, pop and push operations that are exposed to BPF programs through the respective helpers. +/// These operations are exposed to userspace applications using the existing bpf syscall in the following way: +/// - `BPF_MAP_LOOKUP_ELEM` -> `peek` +/// - `BPF_MAP_UPDATE_ELEM` -> `push` +/// - `BPF_MAP_LOOKUP_AND_DELETE_ELEM ` -> `pop` +/// +/// See https://docs.kernel.org/bpf/map_queue_stack.html pub trait SpecialMap: Debug + Send + Sync + 'static { /// Returns the number of elements the queue can hold. fn push(&mut self, value: BpfQueueValue, flags: BpfMapUpdateElemFlags) -> Result<()>; diff --git a/kernel/src/debug/kprobe/test.rs b/kernel/src/debug/kprobe/test.rs index f3e04f496..cef74ad30 100644 --- a/kernel/src/debug/kprobe/test.rs +++ b/kernel/src/debug/kprobe/test.rs @@ -2,17 +2,18 @@ use crate::arch::interrupt::TrapFrame; use crate::debug::kprobe::{register_kprobe, unregister_kprobe, KprobeInfo}; use alloc::string::ToString; use kprobe::ProbeArgs; +use log::info; #[inline(never)] fn detect_func(x: usize, y: usize) -> usize { let hart = 0; - println!("detect_func: hart_id: {}, x: {}, y:{}", hart, x, y); + info!("detect_func: hart_id: {}, x: {}, y:{}", hart, x, y); hart } fn pre_handler(regs: &dyn ProbeArgs) { let pt_regs = regs.as_any().downcast_ref::().unwrap(); - println!( + info!( "call pre_handler, the sp is {:#x}", pt_regs as *const _ as usize ); @@ -20,7 +21,7 @@ fn pre_handler(regs: &dyn ProbeArgs) { fn post_handler(regs: &dyn ProbeArgs) { let pt_regs = regs.as_any().downcast_ref::().unwrap(); - println!( + info!( "call post_handler, the sp is {:#x}", pt_regs as *const _ as usize ); @@ -28,14 +29,14 @@ fn post_handler(regs: &dyn ProbeArgs) { fn fault_handler(regs: &dyn ProbeArgs) { let pt_regs = regs.as_any().downcast_ref::().unwrap(); - println!( + info!( "call fault_handler, the sp is {:#x}", pt_regs as *const _ as usize ); } pub fn kprobe_test() { - println!("kprobe test for [detect_func]: {:#x}", detect_func as usize); + info!("kprobe test for [detect_func]: {:#x}", detect_func as usize); let kprobe_info = KprobeInfo { pre_handler, post_handler, @@ -50,7 +51,7 @@ pub fn kprobe_test() { let new_pre_handler = |regs: &dyn ProbeArgs| { let pt_regs = regs.as_any().downcast_ref::().unwrap(); - println!( + info!( "call new pre_handler, the sp is {:#x}", pt_regs as *const _ as usize ); @@ -67,17 +68,17 @@ pub fn kprobe_test() { enable: true, }; let kprobe2 = register_kprobe(kprobe_info).unwrap(); - println!( + info!( "install 2 kprobes at [detect_func]: {:#x}", detect_func as usize ); detect_func(1, 2); unregister_kprobe(kprobe).unwrap(); unregister_kprobe(kprobe2).unwrap(); - println!( + info!( "uninstall 2 kprobes at [detect_func]: {:#x}", detect_func as usize ); detect_func(1, 2); - println!("kprobe test end"); + info!("kprobe test end"); } diff --git a/kernel/src/mm/ucontext.rs b/kernel/src/mm/ucontext.rs index 318f92729..d9308e369 100644 --- a/kernel/src/mm/ucontext.rs +++ b/kernel/src/mm/ucontext.rs @@ -228,24 +228,6 @@ impl InnerAddressSpace { return self.user_mapper.utable.is_current(); } - pub fn map_file( - &mut self, - start_vaddr: VirtAddr, - len: usize, - prot_flags: ProtFlags, - map_flags: MapFlags, - round_to_min: bool, - allocate_at_once: bool, - ) -> Result { - self.map_anonymous( - start_vaddr, - len, - prot_flags, - map_flags, - round_to_min, - allocate_at_once, - ) - } /// 进行匿名页映射 /// /// ## 参数 diff --git a/kernel/src/perf/util.rs b/kernel/src/perf/util.rs index e2538e6dc..0758cef7e 100644 --- a/kernel/src/perf/util.rs +++ b/kernel/src/perf/util.rs @@ -14,11 +14,18 @@ bitflags! { const PERF_FLAG_FD_CLOEXEC = 8; } } + +/// The `PerfEventIoc` enum is used to define the ioctl commands for perf events. +/// +/// See https://elixir.bootlin.com/linux/v6.1/source/include/uapi/linux/perf_event.h#L544 #[repr(u32)] #[derive(Debug, Copy, Clone, FromPrimitive)] pub enum PerfEventIoc { + /// Equivalent to [crate::include::bindings::linux_bpf::AYA_PERF_EVENT_IOC_ENABLE]. Enable = 9216, + /// Equivalent to [crate::include::bindings::linux_bpf::AYA_PERF_EVENT_IOC_DISABLE]. Disable = 9217, + /// Equivalent to [crate::include::bindings::linux_bpf::AYA_PERF_EVENT_IOC_SET_BPF]. SetBpf = 1074013192, } diff --git a/user/apps/syscall_ebpf/.vim/coc-settings.json b/user/apps/syscall_ebpf/.vim/coc-settings.json deleted file mode 100644 index 0c82ac973..000000000 --- a/user/apps/syscall_ebpf/.vim/coc-settings.json +++ /dev/null @@ -1,3 +0,0 @@ -{ - "rust-analyzer.linkedProjects": ["Cargo.toml", "syscall_ebpf-ebpf/Cargo.toml"] -} diff --git a/user/apps/syscall_ebpf/syscall_ebpf-common/Cargo.toml b/user/apps/syscall_ebpf/syscall_ebpf-common/Cargo.toml index e1981510c..842697f84 100644 --- a/user/apps/syscall_ebpf/syscall_ebpf-common/Cargo.toml +++ b/user/apps/syscall_ebpf/syscall_ebpf-common/Cargo.toml @@ -8,7 +8,7 @@ default = [] user = ["aya"] [dependencies] -aya = { git = "https://github.com/os-module/tiny-aya.git", optional = true } +aya = { git = "https://git.mirrors.dragonos.org.cn/DragonOS-Community/tiny-aya.git", rev = "9632e57", optional = true } [lib] path = "src/lib.rs" diff --git a/user/apps/syscall_ebpf/syscall_ebpf-ebpf/rust-toolchain.toml b/user/apps/syscall_ebpf/syscall_ebpf-ebpf/rust-toolchain.toml index 24ce39183..447b8f049 100644 --- a/user/apps/syscall_ebpf/syscall_ebpf-ebpf/rust-toolchain.toml +++ b/user/apps/syscall_ebpf/syscall_ebpf-ebpf/rust-toolchain.toml @@ -1,5 +1,5 @@ [toolchain] -channel = "nightly" +channel = "nightly-2024-07-23" # The source code of rustc, provided by the rust-src component, is needed for # building eBPF programs. components = [ diff --git a/user/apps/syscall_ebpf/syscall_ebpf/Cargo.toml b/user/apps/syscall_ebpf/syscall_ebpf/Cargo.toml index df20ba02c..11ee997e2 100644 --- a/user/apps/syscall_ebpf/syscall_ebpf/Cargo.toml +++ b/user/apps/syscall_ebpf/syscall_ebpf/Cargo.toml @@ -5,8 +5,8 @@ edition = "2021" publish = false [dependencies] -aya = "0.12" -aya-log = "0.2" +aya = "0.12.0" +aya-log = "0.2.0" syscall_ebpf-common = { path = "../syscall_ebpf-common", features = ["user"] } anyhow = "1" env_logger = "0.10" diff --git a/user/apps/test_ebpf/Cargo.toml b/user/apps/test_ebpf/Cargo.toml index 7789b1616..f01ffcf27 100644 --- a/user/apps/test_ebpf/Cargo.toml +++ b/user/apps/test_ebpf/Cargo.toml @@ -4,8 +4,8 @@ version = "0.1.0" edition = "2021" [dependencies] -aya = { git = "https://github.com/os-module/tiny-aya.git" } -aya-log = { git = "https://github.com/os-module/tiny-aya.git" } +aya = { git = "https://git.mirrors.dragonos.org.cn/DragonOS-Community/tiny-aya.git", rev = "9632e57" } +aya-log = { git = "https://git.mirrors.dragonos.org.cn/DragonOS-Community/tiny-aya.git",rev = "9632e57" } log = "0.4.22" env_logger = "0.11.5" From 065148526ef3137a8ef3debfb55379ec30e8782f Mon Sep 17 00:00:00 2001 From: Godones Date: Tue, 15 Oct 2024 10:06:44 +0800 Subject: [PATCH 08/10] fix: release pagecache --- kernel/src/perf/bpf.rs | 28 +++++++++++++++++++++++----- 1 file changed, 23 insertions(+), 5 deletions(-) diff --git a/kernel/src/perf/bpf.rs b/kernel/src/perf/bpf.rs index 5f5ad7cb7..e2b94f8f2 100644 --- a/kernel/src/perf/bpf.rs +++ b/kernel/src/perf/bpf.rs @@ -9,7 +9,7 @@ use crate::include::bindings::linux_bpf::{ use crate::libs::spinlock::{SpinLock, SpinLockGuard}; use crate::mm::allocator::page_frame::{FrameAllocator, PageFrameCount, PhysPageFrame}; use crate::mm::page::{page_manager_lock_irqsave, Page, PageFlushAll}; -use crate::mm::MemoryManagementArch; +use crate::mm::{MemoryManagementArch, PhysAddr}; use crate::perf::util::{LostSamples, PerfProbeArgs, PerfSample, SampleHeader}; use alloc::string::String; use alloc::sync::Arc; @@ -38,6 +38,7 @@ pub struct RingPage { ptr: usize, data_region_size: usize, lost: usize, + phys_addr: PhysAddr, } impl RingPage { @@ -47,14 +48,15 @@ impl RingPage { size: 0, data_region_size: 0, lost: 0, + phys_addr: PhysAddr::new(0), } } - pub fn new_init(start: usize, len: usize) -> Self { - Self::init(start as _, len) + pub fn new_init(start: usize, len: usize, phys_addr: PhysAddr) -> Self { + Self::init(start as _, len, phys_addr) } - fn init(ptr: *mut u8, size: usize) -> Self { + fn init(ptr: *mut u8, size: usize, phys_addr: PhysAddr) -> Self { assert_eq!(size % PAGE_SIZE, 0); assert!(size / PAGE_SIZE >= 2); // The first page will be filled with perf_event_mmap_page @@ -73,6 +75,7 @@ impl RingPage { size, data_region_size: size - PAGE_SIZE, lost: 0, + phys_addr, } } @@ -242,7 +245,7 @@ impl BpfPerfEvent { } let virt_addr = unsafe { MMArch::phys_2_virt(phy_addr) }.unwrap(); // create mmap page - let mmap_page = RingPage::new_init(virt_addr.data(), len); + let mmap_page = RingPage::new_init(virt_addr.data(), len, phy_addr); data.mmap_page = mmap_page; data.offset = offset; Ok(()) @@ -255,6 +258,21 @@ impl BpfPerfEvent { } } +impl Drop for BpfPerfEvent { + fn drop(&mut self) { + let mut page_manager_guard = page_manager_lock_irqsave(); + let data = self.data.lock(); + let phy_addr = data.mmap_page.phys_addr; + let len = data.mmap_page.size; + let page_count = PageFrameCount::new(len / PAGE_SIZE); + let mut cur_phys = PhysPageFrame::new(phy_addr); + for i in 0..page_count.data() { + page_manager_guard.remove_page(&cur_phys.phys_address()); + cur_phys = cur_phys.next(); + } + } +} + impl IndexNode for BpfPerfEvent { fn mmap(&self, start: usize, len: usize, offset: usize) -> Result<()> { self.do_mmap(start, len, offset) From 7a5b4731d5022fbb8e969c300843f88858c94261 Mon Sep 17 00:00:00 2001 From: Godones Date: Tue, 15 Oct 2024 11:14:10 +0800 Subject: [PATCH 09/10] fix: remove syscall_ebpf --- user/apps/syscall_ebpf/Cargo.toml | 2 +- .../apps/syscall_ebpf/syscall_ebpf/Cargo.toml | 19 -------- .../syscall_ebpf/syscall_ebpf/src/main.rs | 47 ------------------- 3 files changed, 1 insertion(+), 67 deletions(-) delete mode 100644 user/apps/syscall_ebpf/syscall_ebpf/Cargo.toml delete mode 100644 user/apps/syscall_ebpf/syscall_ebpf/src/main.rs diff --git a/user/apps/syscall_ebpf/Cargo.toml b/user/apps/syscall_ebpf/Cargo.toml index af91116bd..6eb4e6322 100644 --- a/user/apps/syscall_ebpf/Cargo.toml +++ b/user/apps/syscall_ebpf/Cargo.toml @@ -1,3 +1,3 @@ [workspace] resolver = "2" -members = ["xtask", "syscall_ebpf", "syscall_ebpf-common"] +members = ["xtask", "syscall_ebpf-common"] diff --git a/user/apps/syscall_ebpf/syscall_ebpf/Cargo.toml b/user/apps/syscall_ebpf/syscall_ebpf/Cargo.toml deleted file mode 100644 index 11ee997e2..000000000 --- a/user/apps/syscall_ebpf/syscall_ebpf/Cargo.toml +++ /dev/null @@ -1,19 +0,0 @@ -[package] -name = "syscall_ebpf" -version = "0.1.0" -edition = "2021" -publish = false - -[dependencies] -aya = "0.12.0" -aya-log = "0.2.0" -syscall_ebpf-common = { path = "../syscall_ebpf-common", features = ["user"] } -anyhow = "1" -env_logger = "0.10" -libc = "0.2" -log = "0.4" -tokio = { version = "1.25", features = ["macros", "rt", "rt-multi-thread", "net", "signal"] } - -[[bin]] -name = "syscall_ebpf" -path = "src/main.rs" diff --git a/user/apps/syscall_ebpf/syscall_ebpf/src/main.rs b/user/apps/syscall_ebpf/syscall_ebpf/src/main.rs deleted file mode 100644 index 2a3a274d5..000000000 --- a/user/apps/syscall_ebpf/syscall_ebpf/src/main.rs +++ /dev/null @@ -1,47 +0,0 @@ -use aya::programs::KProbe; -use aya::{include_bytes_aligned, Bpf}; -use aya_log::BpfLogger; -use log::{info, warn, debug}; -use tokio::signal; - -#[tokio::main] -async fn main() -> Result<(), anyhow::Error> { - env_logger::init(); - - // Bump the memlock rlimit. This is needed for older kernels that don't use the - // new memcg based accounting, see https://lwn.net/Articles/837122/ - let rlim = libc::rlimit { - rlim_cur: libc::RLIM_INFINITY, - rlim_max: libc::RLIM_INFINITY, - }; - let ret = unsafe { libc::setrlimit(libc::RLIMIT_MEMLOCK, &rlim) }; - if ret != 0 { - debug!("remove limit on locked memory failed, ret is: {}", ret); - } - - // This will include your eBPF object file as raw bytes at compile-time and load it at - // runtime. This approach is recommended for most real-world use cases. If you would - // like to specify the eBPF program at runtime rather than at compile-time, you can - // reach for `Bpf::load_file` instead. - #[cfg(debug_assertions)] - let mut bpf = Bpf::load(include_bytes_aligned!( - "../../target/bpfel-unknown-none/debug/syscall_ebpf" - ))?; - #[cfg(not(debug_assertions))] - let mut bpf = Bpf::load(include_bytes_aligned!( - "../../target/bpfel-unknown-none/release/syscall_ebpf" - ))?; - if let Err(e) = BpfLogger::init(&mut bpf) { - // This can happen if you remove all log statements from your eBPF program. - warn!("failed to initialize eBPF logger: {}", e); - } - let program: &mut KProbe = bpf.program_mut("syscall_ebpf").unwrap().try_into()?; - program.load()?; - program.attach("dragonos_kernel::syscall::Syscall::handle", 0)?; - - info!("Waiting for Ctrl-C..."); - signal::ctrl_c().await?; - info!("Exiting..."); - - Ok(()) -} From 2d5de955b81dc015ab286776093884be242e303c Mon Sep 17 00:00:00 2001 From: Godones Date: Wed, 16 Oct 2024 15:54:23 +0800 Subject: [PATCH 10/10] fix: remove spinlock in PageCache --- kernel/src/filesystem/vfs/file.rs | 21 ++++++++++++++++----- kernel/src/perf/mod.rs | 2 +- 2 files changed, 17 insertions(+), 6 deletions(-) diff --git a/kernel/src/filesystem/vfs/file.rs b/kernel/src/filesystem/vfs/file.rs index 52e05322b..f3afdd0e6 100644 --- a/kernel/src/filesystem/vfs/file.rs +++ b/kernel/src/filesystem/vfs/file.rs @@ -11,6 +11,7 @@ use xarray::XArray; use super::{Dirent, FileType, IndexNode, InodeId, Metadata, SpecialNodeData}; use crate::filesystem::eventfd::EventFdInode; +use crate::libs::lazy_init::Lazy; use crate::perf::PerfEventInode; use crate::{ arch::MMArch, @@ -126,7 +127,7 @@ impl FileMode { /// 页面缓存 pub struct PageCache { xarray: SpinLock>>, - inode: SpinLock>>, + inode: Lazy>, } impl core::fmt::Debug for PageCache { @@ -149,13 +150,19 @@ impl PageCache { pub fn new(inode: Option>) -> Arc { let page_cache = Self { xarray: SpinLock::new(XArray::new()), - inode: SpinLock::new(inode), + inode: { + let v: Lazy> = Lazy::new(); + if let Some(inode) = inode { + v.init(inode); + } + v + }, }; Arc::new(page_cache) } pub fn inode(&self) -> Option> { - self.inode.lock().clone() + self.inode.try_get().cloned() } pub fn add_page(&self, offset: usize, page: &Arc) { @@ -177,8 +184,12 @@ impl PageCache { cursor.remove(); } - pub fn set_inode(&self, inode: Weak) { - *self.inode.lock() = Some(inode) + pub fn set_inode(&self, inode: Weak) -> Result<(), SystemError> { + if self.inode.initialized() { + return Err(SystemError::EINVAL); + } + self.inode.init(inode); + Ok(()) } } diff --git a/kernel/src/perf/mod.rs b/kernel/src/perf/mod.rs index de18c30ea..c0070c132 100644 --- a/kernel/src/perf/mod.rs +++ b/kernel/src/perf/mod.rs @@ -303,7 +303,7 @@ pub fn perf_event_open( let page_cache = event.page_cache(); let perf_event = Arc::new(PerfEventInode::new(event)); if let Some(cache) = page_cache { - cache.set_inode(Arc::downgrade(&(perf_event.clone() as _))); + cache.set_inode(Arc::downgrade(&(perf_event.clone() as _)))?; } let file = File::new(perf_event, file_mode)?; let fd_table = ProcessManager::current_pcb().fd_table();