diff options
author | Alexei Starovoitov <ast@plumgrid.com> | 2015-05-06 11:27:10 -0700 |
---|---|---|
committer | Alexei Starovoitov <ast@plumgrid.com> | 2015-05-09 10:29:30 -0700 |
commit | bfd60c3135c8f010a6497dfc5e7d3070e26ca4d1 (patch) | |
tree | 67622743e984f7f56922d6262369ec3094c24906 | |
parent | 5e866da65f313511f0ff36461429d835e8ce9760 (diff) | |
download | bpf-tail_call_v2.tar.gz |
bpf: allow bpf programs to tail-call other bpf programstail_call_v2
introduce bpf_tail_call(ctx, &jmp_table, index) helper function
which can be used from BPF programs like:
int bpf_prog_foo(struct pt_regs *ctx)
{
...
bpf_tail_call(ctx, &jmp_table, index);
...
}
that is roughly equivalent to:
int bpf_prog_foo(struct pt_regs *ctx)
{
...
return (*jmp_table[index])(ctx);
...
}
The important detail that it's not a normal call, but a tail call.
The kernel stack is precious, so this helper unwinds current
stack frame and jumps into another BPF program without using
any extra stack.
bpf_tail_call() arguments:
ctx - context pointer
jmp_table - one of BPF_MAP_TYPE_PROG_ARRAY maps used as the jump table
index - index in the jump table
Since all BPF programs are idenitified by file descriptor, user space
need to populate the jmp_table with FDs of other BPF programs.
If jmp_table[index] is empty the bpf_tail_call() doesn't jump anywhere
and program execution continues as normal.
Use cases:
- simplify complex programs by splitting them into a sequence of small programs
- dispatch routine
For tracing/seccomp the program may be triggered on all system calls, but
processing of syscall arguments will be different. It's more efficient to
implement them as:
int syscall_entry(struct seccomp_data *ctx)
{
bpf_tail_call(ctx, &syscall_jmp_table, ctx->nr /* syscall number */);
... default: process unknown syscall ...
}
int sys_write_event(struct seccomp_data *ctx) {...}
int sys_read_event(struct seccomp_data *ctx) {...}
syscall_jmp_table[__NR_write] = sys_write_event;
syscall_jmp_table[__NR_read] = sys_read_event;
For networking the program may call into different parsers depending on
packet format, like:
int packet_parser(struct __sk_buff *skb)
{
... parse L2, L3 here ...
__u8 ipproto = load_byte(skb, ... offsetof(struct iphdr, protocol));
bpf_tail_call(skb, &ipproto_jmp_table, ipproto);
... default: process unknown protocol ...
}
int parse_tcp(struct __sk_buff *skb) {...}
int parse_udp(struct __sk_buff *skb) {...}
ipproto_jmp_table[IPPROTO_TCP] = parse_tcp;
ipproto_jmp_table[IPPROTO_UDP] = parse_udp;
- for TC use case, bpf_tail_call() allows to implement reclassify-like
logic where one BPF program can dynamically jump into another
- bpf_map_update_elem/delete calls into BPF_MAP_TYPE_PROG_ARRAY jump table
are atomic, so user space can build chains of BPF programs on the fly
Signed-off-by: Alexei Starovoitov <ast@plumgrid.com>
-rw-r--r-- | arch/x86/net/bpf_jit_comp.c | 38 | ||||
-rw-r--r-- | include/linux/bpf.h | 9 | ||||
-rw-r--r-- | include/linux/filter.h | 10 | ||||
-rw-r--r-- | include/uapi/linux/bpf.h | 10 | ||||
-rw-r--r-- | kernel/bpf/arraymap.c | 108 | ||||
-rw-r--r-- | kernel/bpf/core.c | 36 | ||||
-rw-r--r-- | kernel/bpf/syscall.c | 3 | ||||
-rw-r--r-- | kernel/bpf/verifier.c | 8 | ||||
-rw-r--r-- | kernel/trace/bpf_trace.c | 4 | ||||
-rw-r--r-- | net/sched/cls_bpf.c | 2 | ||||
-rw-r--r-- | samples/bpf/Makefile | 4 | ||||
-rw-r--r-- | samples/bpf/bpf_helpers.h | 2 | ||||
-rw-r--r-- | samples/bpf/bpf_load.c | 46 | ||||
-rw-r--r-- | samples/bpf/tracex5_kern.c | 57 | ||||
-rw-r--r-- | samples/bpf/tracex5_user.c | 25 |
15 files changed, 342 insertions, 20 deletions
diff --git a/arch/x86/net/bpf_jit_comp.c b/arch/x86/net/bpf_jit_comp.c index 987514396c1e44..03325eb2118b00 100644 --- a/arch/x86/net/bpf_jit_comp.c +++ b/arch/x86/net/bpf_jit_comp.c @@ -691,6 +691,12 @@ xadd: if (is_imm8(insn->off)) case BPF_JMP | BPF_CALL: func = (u8 *) __bpf_call_base + imm32; jmp_offset = func - (image + addrs[i]); + if (func == (void *) bpf_tail_call) { + EMIT1(0x57); /* push %rdi */ + + /* cost of register restore and jump */ + jmp_offset += 1 + 4 + 2 + 7 * 4 + 1 + 4 + 4 + 2; + } if (seen_ld_abs) { EMIT2(0x41, 0x52); /* push %r10 */ EMIT2(0x41, 0x51); /* push %r9 */ @@ -709,6 +715,38 @@ xadd: if (is_imm8(insn->off)) EMIT2(0x41, 0x59); /* pop %r9 */ EMIT2(0x41, 0x5A); /* pop %r10 */ } + if (func == (void *) bpf_tail_call) { + EMIT1(0x5F); /* pop %rdi */ + + /* cmp rax, 0 */ + EMIT4(0x48, 0x83, 0xF8, 0x00); + + /* if bpf_prog == NULL, skip over jump */ + EMIT2(X86_JE, 7 * 4 + 1 + 4 + 4 + 2); + + /* restore rbx, r13, r14, r15 and frame */ + EMIT3_off32(0x48, 0x8B, 0x9D, -stacksize); + EMIT3_off32(0x4C, 0x8B, 0xAD, -stacksize + 8); + EMIT3_off32(0x4C, 0x8B, 0xB5, -stacksize + 16); + EMIT3_off32(0x4C, 0x8B, 0xBD, -stacksize + 24); + EMIT1(0xC9); /* leave */ + + /* need to populate rsi, if we're jumping from + * JITed program into non-JITed + */ + /* lea rsi, [rax + 40] */ + EMIT4(0x48, 0x8D, 0x70, offsetof(struct bpf_prog, insnsi)); + + /* mov rax, qword ptr [rax + 32] */ + EMIT4(0x48, 0x8B, 0x40, offsetof(struct bpf_prog, bpf_func)); + + /* now we're ready to jump into next BPF program + * rdi == ctx (1st arg) + * rsi == prog->insnsi (2nd arg) + * rax == prog->bpf_func + */ + EMIT2(0xFF, 0xE0); /* jmp rax */ + } break; /* cond jump */ diff --git a/include/linux/bpf.h b/include/linux/bpf.h index d5cda067115aaa..578aa6035845e2 100644 --- a/include/linux/bpf.h +++ b/include/linux/bpf.h @@ -126,6 +126,14 @@ struct bpf_prog_aux { struct work_struct work; }; +struct bpf_array { + struct bpf_map map; + u32 elem_size; + char value[0] __aligned(8); +}; + +void bpf_prog_array_map_clear(struct bpf_map *map); + #ifdef CONFIG_BPF_SYSCALL void bpf_register_prog_type(struct bpf_prog_type_list *tl); void bpf_register_map_type(struct bpf_map_type_list *tl); @@ -160,5 +168,6 @@ extern const struct bpf_func_proto bpf_map_delete_elem_proto; extern const struct bpf_func_proto bpf_get_prandom_u32_proto; extern const struct bpf_func_proto bpf_get_smp_processor_id_proto; +extern const struct bpf_func_proto bpf_tail_call_proto; #endif /* _LINUX_BPF_H */ diff --git a/include/linux/filter.h b/include/linux/filter.h index fa11b3a367be54..c42ad2451aec99 100644 --- a/include/linux/filter.h +++ b/include/linux/filter.h @@ -330,6 +330,16 @@ struct sk_filter { #define BPF_PROG_RUN(filter, ctx) (*filter->bpf_func)(ctx, filter->insnsi) +u64 bpf_tail_call(u64 ctx, u64 r2, u64 index, u64 r4, u64 r5); + +DECLARE_PER_CPU(u32, bpf_tail_call_cnt); + +static __always_inline u32 bpf_prog_run(struct bpf_prog *prog, void *ctx) +{ + __this_cpu_write(bpf_tail_call_cnt, 0); + return (*prog->bpf_func)(ctx, prog->insnsi); +} + static inline unsigned int bpf_prog_size(unsigned int proglen) { return max(sizeof(struct bpf_prog), diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h index a9ebdf5701e8dd..f0a9af8b4dae11 100644 --- a/include/uapi/linux/bpf.h +++ b/include/uapi/linux/bpf.h @@ -113,6 +113,7 @@ enum bpf_map_type { BPF_MAP_TYPE_UNSPEC, BPF_MAP_TYPE_HASH, BPF_MAP_TYPE_ARRAY, + BPF_MAP_TYPE_PROG_ARRAY, }; enum bpf_prog_type { @@ -210,6 +211,15 @@ enum bpf_func_id { * Return: 0 on success */ BPF_FUNC_l4_csum_replace, + + /** + * bpf_tail_call(ctx, prog_array_map, index) - jump into another BPF program + * @ctx: context pointer passed to next program + * @prog_array_map: pointer to map which type is BPF_MAP_TYPE_PROG_ARRAY + * @index: index inside array that selects specific program to run + * Return: 0 on success + */ + BPF_FUNC_tail_call, __BPF_FUNC_MAX_ID, }; diff --git a/kernel/bpf/arraymap.c b/kernel/bpf/arraymap.c index 8a6616583f38ad..3024db36fa8811 100644 --- a/kernel/bpf/arraymap.c +++ b/kernel/bpf/arraymap.c @@ -15,12 +15,6 @@ #include <linux/slab.h> #include <linux/mm.h> -struct bpf_array { - struct bpf_map map; - u32 elem_size; - char value[0] __aligned(8); -}; - /* Called from syscall */ static struct bpf_map *array_map_alloc(union bpf_attr *attr) { @@ -154,3 +148,105 @@ static int __init register_array_map(void) return 0; } late_initcall(register_array_map); + +static struct bpf_map *prog_array_map_alloc(union bpf_attr *attr) +{ + if (attr->value_size != 4) + return ERR_PTR(-EINVAL); + return array_map_alloc(attr); +} + +static void prog_array_map_free(struct bpf_map *map) +{ + struct bpf_array *array = container_of(map, struct bpf_array, map); + struct bpf_prog **progs = (struct bpf_prog **) array->value; + int i; + + synchronize_rcu(); + + /* make sure it's empty */ + for (i = 0; i < array->map.max_entries; i++) + BUG_ON(progs[i] != NULL); + kvfree(array); +} + +static void *prog_array_map_lookup_elem(struct bpf_map *map, void *key) +{ + return NULL; +} + +/* only called from syscall */ +static int prog_array_map_update_elem(struct bpf_map *map, void *key, + void *value, u64 map_flags) +{ + struct bpf_array *array = container_of(map, struct bpf_array, map); + struct bpf_prog **progs = (struct bpf_prog **) array->value; + struct bpf_prog *prog, *old_prog; + u32 index = *(u32 *)key, ufd; + + if (map_flags != BPF_ANY) + return -EINVAL; + + if (index >= array->map.max_entries) + return -E2BIG; + + ufd = *(u32 *)value; + prog = bpf_prog_get(ufd); + if (IS_ERR(prog)) + return PTR_ERR(prog); + + old_prog = xchg(progs + index, prog); + if (old_prog) + bpf_prog_put(old_prog); + + return 0; +} + +static int prog_array_map_delete_elem(struct bpf_map *map, void *key) +{ + struct bpf_array *array = container_of(map, struct bpf_array, map); + struct bpf_prog **progs = (struct bpf_prog **) array->value, *old_prog; + u32 index = *(u32 *)key; + + if (index >= array->map.max_entries) + return -E2BIG; + + old_prog = xchg(progs + index, NULL); + if (old_prog) { + bpf_prog_put(old_prog); + return 0; + } else { + return -ENOENT; + } +} + +/* decrement refcnt of all bpf_progs that are stored in this map */ +void bpf_prog_array_map_clear(struct bpf_map *map) +{ + struct bpf_array *array = container_of(map, struct bpf_array, map); + int i; + + for (i = 0; i < array->map.max_entries; i++) + prog_array_map_delete_elem(map, &i); +} + +static const struct bpf_map_ops prog_array_ops = { + .map_alloc = prog_array_map_alloc, + .map_free = prog_array_map_free, + .map_get_next_key = array_map_get_next_key, + .map_lookup_elem = prog_array_map_lookup_elem, + .map_update_elem = prog_array_map_update_elem, + .map_delete_elem = prog_array_map_delete_elem, +}; + +static struct bpf_map_type_list prog_array_type __read_mostly = { + .ops = &prog_array_ops, + .type = BPF_MAP_TYPE_PROG_ARRAY, +}; + +static int __init register_prog_array_map(void) +{ + bpf_register_map_type(&prog_array_type); + return 0; +} +late_initcall(register_prog_array_map); diff --git a/kernel/bpf/core.c b/kernel/bpf/core.c index 54f0e7fcd0e288..e0c0499382f97f 100644 --- a/kernel/bpf/core.c +++ b/kernel/bpf/core.c @@ -176,6 +176,37 @@ noinline u64 __bpf_call_base(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5) return 0; } +#define MAX_TAIL_CALL_CNT 32 +DEFINE_PER_CPU(u32, bpf_tail_call_cnt); +EXPORT_PER_CPU_SYMBOL(bpf_tail_call_cnt); + +u64 bpf_tail_call(u64 ctx, u64 r2, u64 index, u64 r4, u64 r5) +{ + struct bpf_map *map = (struct bpf_map *) (unsigned long) r2; + struct bpf_array *array = container_of(map, struct bpf_array, map); + struct bpf_prog **progs = (struct bpf_prog **) array->value; + struct bpf_prog *prog; + + if (index >= array->map.max_entries) + return 0; + + if (__this_cpu_inc_return(bpf_tail_call_cnt) > MAX_TAIL_CALL_CNT) + return 0; + + prog = READ_ONCE(progs[index]); + + return (u64) (long) prog; +} + +const struct bpf_func_proto bpf_tail_call_proto = { + .func = bpf_tail_call, + .gpl_only = false, + .ret_type = RET_INTEGER, + .arg1_type = ARG_PTR_TO_CTX, + .arg2_type = ARG_CONST_MAP_PTR, + .arg3_type = ARG_ANYTHING, +}; + /** * __bpf_prog_run - run eBPF program on a given context * @ctx: is the data we are operating on @@ -429,6 +460,11 @@ select_insn: */ BPF_R0 = (__bpf_call_base + insn->imm)(BPF_R1, BPF_R2, BPF_R3, BPF_R4, BPF_R5); + if (insn->imm == bpf_tail_call - __bpf_call_base && BPF_R0) { + ARG1 = BPF_R1; + insn = ((struct bpf_prog *) (long) BPF_R0)->insnsi; + goto select_insn; + } CONT; /* JMP */ diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c index 3bae6c59191483..7079772cc03eea 100644 --- a/kernel/bpf/syscall.c +++ b/kernel/bpf/syscall.c @@ -68,6 +68,9 @@ static int bpf_map_release(struct inode *inode, struct file *filp) { struct bpf_map *map = filp->private_data; + if (map->map_type == BPF_MAP_TYPE_PROG_ARRAY) + bpf_prog_array_map_clear(map); + bpf_map_put(map); return 0; } diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c index 47dcd3aa6e236e..57bd255254a22b 100644 --- a/kernel/bpf/verifier.c +++ b/kernel/bpf/verifier.c @@ -907,6 +907,14 @@ static int check_call(struct verifier_env *env, int func_id) fn->ret_type, func_id); return -EINVAL; } + + if (map && map->map_type == BPF_MAP_TYPE_PROG_ARRAY) { + /* prog_array map type needs extra care: + * bpf_map_update_elem() call is only allowed via syscall + */ + if (func_id == BPF_FUNC_map_update_elem) + return -EINVAL; + } return 0; } diff --git a/kernel/trace/bpf_trace.c b/kernel/trace/bpf_trace.c index 2d56ce50163264..dd7a8da782cb0d 100644 --- a/kernel/trace/bpf_trace.c +++ b/kernel/trace/bpf_trace.c @@ -50,7 +50,7 @@ unsigned int trace_call_bpf(struct bpf_prog *prog, void *ctx) } rcu_read_lock(); - ret = BPF_PROG_RUN(prog, ctx); + ret = bpf_prog_run(prog, ctx); rcu_read_unlock(); out: @@ -172,6 +172,8 @@ static const struct bpf_func_proto *kprobe_prog_func_proto(enum bpf_func_id func return &bpf_probe_read_proto; case BPF_FUNC_ktime_get_ns: return &bpf_ktime_get_ns_proto; + case BPF_FUNC_tail_call: + return &bpf_tail_call_proto; case BPF_FUNC_trace_printk: /* diff --git a/net/sched/cls_bpf.c b/net/sched/cls_bpf.c index 91bd9c19471d58..8019c4c1e3eeda 100644 --- a/net/sched/cls_bpf.c +++ b/net/sched/cls_bpf.c @@ -72,7 +72,7 @@ static int cls_bpf_classify(struct sk_buff *skb, const struct tcf_proto *tp, /* Needed here for accessing maps. */ rcu_read_lock(); list_for_each_entry_rcu(prog, &head->plist, link) { - int filter_res = BPF_PROG_RUN(prog->filter, skb); + int filter_res = bpf_prog_run(prog->filter, skb); if (filter_res == 0) continue; diff --git a/samples/bpf/Makefile b/samples/bpf/Makefile index 76e3458a5419a5..c683a51672fb8c 100644 --- a/samples/bpf/Makefile +++ b/samples/bpf/Makefile @@ -10,6 +10,7 @@ hostprogs-y += tracex1 hostprogs-y += tracex2 hostprogs-y += tracex3 hostprogs-y += tracex4 +hostprogs-y += tracex5 test_verifier-objs := test_verifier.o libbpf.o test_maps-objs := test_maps.o libbpf.o @@ -20,6 +21,7 @@ tracex1-objs := bpf_load.o libbpf.o tracex1_user.o tracex2-objs := bpf_load.o libbpf.o tracex2_user.o tracex3-objs := bpf_load.o libbpf.o tracex3_user.o tracex4-objs := bpf_load.o libbpf.o tracex4_user.o +tracex5-objs := bpf_load.o libbpf.o tracex5_user.o # Tell kbuild to always build the programs always := $(hostprogs-y) @@ -29,6 +31,7 @@ always += tracex1_kern.o always += tracex2_kern.o always += tracex3_kern.o always += tracex4_kern.o +always += tracex5_kern.o always += tcbpf1_kern.o HOSTCFLAGS += -I$(objtree)/usr/include @@ -40,6 +43,7 @@ HOSTLOADLIBES_tracex1 += -lelf HOSTLOADLIBES_tracex2 += -lelf HOSTLOADLIBES_tracex3 += -lelf HOSTLOADLIBES_tracex4 += -lelf -lrt +HOSTLOADLIBES_tracex5 += -lelf # point this to your LLVM backend with bpf support LLC=$(srctree)/tools/bpf/llvm/bld/Debug+Asserts/bin/llc diff --git a/samples/bpf/bpf_helpers.h b/samples/bpf/bpf_helpers.h index f960b5fb3ed842..02d70db6c32f87 100644 --- a/samples/bpf/bpf_helpers.h +++ b/samples/bpf/bpf_helpers.h @@ -21,6 +21,8 @@ static unsigned long long (*bpf_ktime_get_ns)(void) = (void *) BPF_FUNC_ktime_get_ns; static int (*bpf_trace_printk)(const char *fmt, int fmt_size, ...) = (void *) BPF_FUNC_trace_printk; +static int (*bpf_tail_call)(void *ctx, void *map, int index) = + (void *) BPF_FUNC_tail_call; /* llvm builtin functions that eBPF C program may use to * emit BPF_LD_ABS and BPF_LD_IND instructions diff --git a/samples/bpf/bpf_load.c b/samples/bpf/bpf_load.c index 38dac5a53b518b..c16505d4cc86ec 100644 --- a/samples/bpf/bpf_load.c +++ b/samples/bpf/bpf_load.c @@ -16,6 +16,7 @@ #include <sys/ioctl.h> #include <sys/mman.h> #include <poll.h> +#include <ctype.h> #include "libbpf.h" #include "bpf_helpers.h" #include "bpf_load.h" @@ -29,6 +30,7 @@ int map_fd[MAX_MAPS]; int prog_fd[MAX_PROGS]; int event_fd[MAX_PROGS]; int prog_cnt; +int prog_array_fd = -1; static int load_and_attach(const char *event, struct bpf_insn *prog, int size) { @@ -54,12 +56,40 @@ static int load_and_attach(const char *event, struct bpf_insn *prog, int size) return -1; } + fd = bpf_prog_load(prog_type, prog, size, license, kern_version); + + if (fd < 0) { + printf("bpf_prog_load() err=%d\n%s", errno, bpf_log_buf); + return -1; + } + + prog_fd[prog_cnt++] = fd; + + if (is_socket) + return 0; + if (is_kprobe || is_kretprobe) { if (is_kprobe) event += 7; else event += 10; + if (*event == 0) { + printf("event name cannot be empty\n"); + return -1; + } + + if (isdigit(*event)) { + int ind = atoi(event); + + err = bpf_update_elem(prog_array_fd, &ind, &fd, BPF_ANY); + if (err < 0) { + printf("failed to store prog_fd in prog_array\n"); + return -1; + } + return 0; + } + snprintf(buf, sizeof(buf), "echo '%c:%s %s' >> /sys/kernel/debug/tracing/kprobe_events", is_kprobe ? 'p' : 'r', event, event); @@ -71,18 +101,6 @@ static int load_and_attach(const char *event, struct bpf_insn *prog, int size) } } - fd = bpf_prog_load(prog_type, prog, size, license, kern_version); - - if (fd < 0) { - printf("bpf_prog_load() err=%d\n%s", errno, bpf_log_buf); - return -1; - } - - prog_fd[prog_cnt++] = fd; - - if (is_socket) - return 0; - strcpy(buf, DEBUGFS); strcat(buf, "events/kprobes/"); strcat(buf, event); @@ -130,6 +148,9 @@ static int load_maps(struct bpf_map_def *maps, int len) maps[i].max_entries); if (map_fd[i] < 0) return 1; + + if (maps[i].type == BPF_MAP_TYPE_PROG_ARRAY) + prog_array_fd = map_fd[i]; } return 0; } @@ -288,6 +309,7 @@ int load_bpf_file(char *path) } close(fd); + return 0; } diff --git a/samples/bpf/tracex5_kern.c b/samples/bpf/tracex5_kern.c new file mode 100644 index 00000000000000..33eec371e04ccc --- /dev/null +++ b/samples/bpf/tracex5_kern.c @@ -0,0 +1,57 @@ +/* Copyright (c) 2015 PLUMgrid, http://plumgrid.com + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of version 2 of the GNU General Public + * License as published by the Free Software Foundation. + */ +#include <linux/ptrace.h> +#include <linux/version.h> +#include <uapi/linux/bpf.h> +#include "bpf_helpers.h" + +struct pair { + u64 val; + u64 ip; +}; + +struct bpf_map_def SEC("maps") progs = { + .type = BPF_MAP_TYPE_PROG_ARRAY, + .key_size = sizeof(u32), + .value_size = sizeof(u32), + .max_entries = 4, +}; + +SEC("kprobe/sys_write") +int bpf_prog1(struct pt_regs *ctx) +{ + if (ctx->dx == 512) + bpf_tail_call(ctx, &progs, 0); + return 0; +} + +SEC("kprobe/sys_read") +int bpf_prog2(struct pt_regs *ctx) +{ + if (ctx->dx == 1024) + bpf_tail_call(ctx, &progs, 1); + return 0; +} + +SEC("kprobe/0") +int bpf_prog3(struct pt_regs *ctx) +{ + char fmt[] = "write512\n"; + bpf_trace_printk(fmt, sizeof(fmt)); + return 0; +} + +SEC("kprobe/1") +int bpf_prog4(struct pt_regs *ctx) +{ + char fmt[] = "read1024\n"; + bpf_trace_printk(fmt, sizeof(fmt)); + return 0; +} + +char _license[] SEC("license") = "GPL"; +u32 _version SEC("version") = LINUX_VERSION_CODE; diff --git a/samples/bpf/tracex5_user.c b/samples/bpf/tracex5_user.c new file mode 100644 index 00000000000000..eaf0d90fc2b36d --- /dev/null +++ b/samples/bpf/tracex5_user.c @@ -0,0 +1,25 @@ +#include <stdio.h> +#include <linux/bpf.h> +#include <unistd.h> +#include "libbpf.h" +#include "bpf_load.h" + +int main(int ac, char **argv) +{ + FILE *f; + char filename[256]; + + snprintf(filename, sizeof(filename), "%s_kern.o", argv[0]); + + if (load_bpf_file(filename)) { + printf("%s", bpf_log_buf); + return 1; + } + + f = popen("dd if=/dev/zero of=/dev/null count=5", "r"); + (void) f; + + read_trace_pipe(); + + return 0; +} |