|
| 1 | +// SPDX-License-Identifier: GPL-2.0 |
| 2 | +/* Copyright (c) 2022 Meta Inc. */ |
| 3 | +#include "vmlinux.h" |
| 4 | +#include <bpf/bpf_helpers.h> |
| 5 | +#include <bpf/bpf_tracing.h> |
| 6 | +#include "bpf_misc.h" |
| 7 | + |
| 8 | +extern u64 bpf_cpu_time_counter_to_ns(u64 cycles) __weak __ksym; |
| 9 | +extern u64 bpf_get_cpu_time_counter(void) __weak __ksym; |
| 10 | + |
| 11 | +SEC("syscall") |
| 12 | +__arch_x86_64 |
| 13 | +__xlated("0: call kernel-function") |
| 14 | +__naked int bpf_rdtsc(void) |
| 15 | +{ |
| 16 | + asm volatile( |
| 17 | + "call %[bpf_get_cpu_time_counter];" |
| 18 | + "exit" |
| 19 | + : |
| 20 | + : __imm(bpf_get_cpu_time_counter) |
| 21 | + : __clobber_all |
| 22 | + ); |
| 23 | +} |
| 24 | + |
| 25 | +SEC("syscall") |
| 26 | +__arch_x86_64 |
| 27 | +/* program entry for bpf_rdtsc_jit_x86_64(), regular function prologue */ |
| 28 | +__jited(" endbr64") |
| 29 | +__jited(" nopl (%rax,%rax)") |
| 30 | +__jited(" nopl (%rax)") |
| 31 | +__jited(" pushq %rbp") |
| 32 | +__jited(" movq %rsp, %rbp") |
| 33 | +__jited(" endbr64") |
| 34 | +/* save RDX in R11 as it will be overwritten */ |
| 35 | +__jited(" movq %rdx, %r11") |
| 36 | +/* lfence may not be executed depending on cpu features */ |
| 37 | +__jited(" {{(lfence|)}}") |
| 38 | +__jited(" rdtsc") |
| 39 | +/* combine EDX:EAX into RAX */ |
| 40 | +__jited(" shlq ${{(32|0x20)}}, %rdx") |
| 41 | +__jited(" orq %rdx, %rax") |
| 42 | +/* restore RDX from R11 */ |
| 43 | +__jited(" movq %r11, %rdx") |
| 44 | +__jited(" leave") |
| 45 | +__naked int bpf_rdtsc_jit_x86_64(void) |
| 46 | +{ |
| 47 | + asm volatile( |
| 48 | + "call %[bpf_get_cpu_time_counter];" |
| 49 | + "exit" |
| 50 | + : |
| 51 | + : __imm(bpf_get_cpu_time_counter) |
| 52 | + : __clobber_all |
| 53 | + ); |
| 54 | +} |
| 55 | + |
| 56 | +SEC("syscall") |
| 57 | +__arch_x86_64 |
| 58 | +__xlated("0: r1 = 42") |
| 59 | +__xlated("1: call kernel-function") |
| 60 | +__naked int bpf_cyc2ns(void) |
| 61 | +{ |
| 62 | + asm volatile( |
| 63 | + "r1=0x2a;" |
| 64 | + "call %[bpf_cpu_time_counter_to_ns];" |
| 65 | + "exit" |
| 66 | + : |
| 67 | + : __imm(bpf_cpu_time_counter_to_ns) |
| 68 | + : __clobber_all |
| 69 | + ); |
| 70 | +} |
| 71 | + |
| 72 | +SEC("syscall") |
| 73 | +__arch_x86_64 |
| 74 | +/* program entry for bpf_rdtsc_jit_x86_64(), regular function prologue */ |
| 75 | +__jited(" endbr64") |
| 76 | +__jited(" nopl (%rax,%rax)") |
| 77 | +__jited(" nopl (%rax)") |
| 78 | +__jited(" pushq %rbp") |
| 79 | +__jited(" movq %rsp, %rbp") |
| 80 | +__jited(" endbr64") |
| 81 | +/* save RDX in R11 as it will be overwritten */ |
| 82 | +__jited(" movabsq $0x2a2a2a2a2a, %rdi") |
| 83 | +__jited(" imulq ${{.*}}, %rdi, %rax") |
| 84 | +__jited(" shrq ${{.*}}, %rax") |
| 85 | +__jited(" leave") |
| 86 | +__naked int bpf_cyc2ns_jit_x86(void) |
| 87 | +{ |
| 88 | + asm volatile( |
| 89 | + "r1=0x2a2a2a2a2a ll;" |
| 90 | + "call %[bpf_cpu_time_counter_to_ns];" |
| 91 | + "exit" |
| 92 | + : |
| 93 | + : __imm(bpf_cpu_time_counter_to_ns) |
| 94 | + : __clobber_all |
| 95 | + ); |
| 96 | +} |
| 97 | + |
| 98 | +void rdtsc(void) |
| 99 | +{ |
| 100 | + bpf_get_cpu_time_counter(); |
| 101 | + bpf_cpu_time_counter_to_ns(42); |
| 102 | +} |
| 103 | + |
| 104 | +char _license[] SEC("license") = "GPL"; |
0 commit comments