Commit 59929cd1 authored by Daniel T. Lee's avatar Daniel T. Lee Committed by Daniel Borkmann
Browse files

samples, bpf: Refactor kprobe, tail call kern progs map definition



Because the previous two commit replaced the bpf_load implementation of
the user program with libbpf, the corresponding kernel program's MAP
definition can be replaced with new BTF-defined map syntax.

This commit only updates the samples which uses libbpf API for loading
bpf program not with bpf_load.

Signed-off-by: default avatarDaniel T. Lee <danieltimlee@gmail.com>
Signed-off-by: default avatarDaniel Borkmann <daniel@iogearbox.net>
Acked-by: default avatarYonghong Song <yhs@fb.com>
Link: https://lore.kernel.org/bpf/20200516040608.1377876-6-danieltimlee@gmail.com
parent 14846dda
Loading
Loading
Loading
Loading
+6 −6
Original line number Diff line number Diff line
@@ -13,12 +13,12 @@

#define MAX_IPS		8192

struct bpf_map_def SEC("maps") ip_map = {
	.type = BPF_MAP_TYPE_HASH,
	.key_size = sizeof(u64),
	.value_size = sizeof(u32),
	.max_entries = MAX_IPS,
};
struct {
	__uint(type, BPF_MAP_TYPE_HASH);
	__type(key, u64);
	__type(value, u32);
	__uint(max_entries, MAX_IPS);
} ip_map SEC(".maps");

SEC("perf_event")
int do_sample(struct bpf_perf_event_data *ctx)
+18 −18
Original line number Diff line number Diff line
@@ -19,12 +19,12 @@

#define PROG(F) SEC("socket/"__stringify(F)) int bpf_func_##F

struct bpf_map_def SEC("maps") jmp_table = {
	.type = BPF_MAP_TYPE_PROG_ARRAY,
	.key_size = sizeof(u32),
	.value_size = sizeof(u32),
	.max_entries = 8,
};
struct {
	__uint(type, BPF_MAP_TYPE_PROG_ARRAY);
	__uint(key_size, sizeof(u32));
	__uint(value_size, sizeof(u32));
	__uint(max_entries, 8);
} jmp_table SEC(".maps");

#define PARSE_VLAN 1
#define PARSE_MPLS 2
@@ -92,12 +92,12 @@ struct globals {
	struct flow_key_record flow;
};

struct bpf_map_def SEC("maps") percpu_map = {
	.type = BPF_MAP_TYPE_ARRAY,
	.key_size = sizeof(__u32),
	.value_size = sizeof(struct globals),
	.max_entries = 32,
};
struct {
	__uint(type, BPF_MAP_TYPE_ARRAY);
	__type(key, __u32);
	__type(value, struct globals);
	__uint(max_entries, 32);
} percpu_map SEC(".maps");

/* user poor man's per_cpu until native support is ready */
static struct globals *this_cpu_globals(void)
@@ -113,12 +113,12 @@ struct pair {
	__u64 bytes;
};

struct bpf_map_def SEC("maps") hash_map = {
	.type = BPF_MAP_TYPE_HASH,
	.key_size = sizeof(struct flow_key_record),
	.value_size = sizeof(struct pair),
	.max_entries = 1024,
};
struct {
	__uint(type, BPF_MAP_TYPE_HASH);
	__type(key, struct flow_key_record);
	__type(value, struct pair);
	__uint(max_entries, 1024);
} hash_map SEC(".maps");

static void update_stats(struct __sk_buff *skb, struct globals *g)
{
+12 −12
Original line number Diff line number Diff line
@@ -18,19 +18,19 @@ struct key_t {
	u32 userstack;
};

struct bpf_map_def SEC("maps") counts = {
	.type = BPF_MAP_TYPE_HASH,
	.key_size = sizeof(struct key_t),
	.value_size = sizeof(u64),
	.max_entries = 10000,
};
struct {
	__uint(type, BPF_MAP_TYPE_HASH);
	__type(key, struct key_t);
	__type(value, u64);
	__uint(max_entries, 10000);
} counts SEC(".maps");

struct bpf_map_def SEC("maps") stackmap = {
	.type = BPF_MAP_TYPE_STACK_TRACE,
	.key_size = sizeof(u32),
	.value_size = PERF_MAX_STACK_DEPTH * sizeof(u64),
	.max_entries = 10000,
};
struct {
	__uint(type, BPF_MAP_TYPE_STACK_TRACE);
	__uint(key_size, sizeof(u32));
	__uint(value_size, PERF_MAX_STACK_DEPTH * sizeof(u64));
	__uint(max_entries, 10000);
} stackmap SEC(".maps");

#define KERN_STACKID_FLAGS (0 | BPF_F_FAST_STACK_CMP)
#define USER_STACKID_FLAGS (0 | BPF_F_FAST_STACK_CMP | BPF_F_USER_STACK)
+12 −12
Original line number Diff line number Diff line
@@ -12,12 +12,12 @@
#include <bpf/bpf_tracing.h>
#include "trace_common.h"

struct bpf_map_def SEC("maps") my_map = {
	.type = BPF_MAP_TYPE_HASH,
	.key_size = sizeof(long),
	.value_size = sizeof(long),
	.max_entries = 1024,
};
struct {
	__uint(type, BPF_MAP_TYPE_HASH);
	__type(key, long);
	__type(value, long);
	__uint(max_entries, 1024);
} my_map SEC(".maps");

/* kprobe is NOT a stable ABI. If kernel internals change this bpf+kprobe
 * example will no longer be meaningful
@@ -71,12 +71,12 @@ struct hist_key {
	u64 index;
};

struct bpf_map_def SEC("maps") my_hist_map = {
	.type = BPF_MAP_TYPE_PERCPU_HASH,
	.key_size = sizeof(struct hist_key),
	.value_size = sizeof(long),
	.max_entries = 1024,
};
struct {
	__uint(type, BPF_MAP_TYPE_PERCPU_HASH);
	__uint(key_size, sizeof(struct hist_key));
	__uint(value_size, sizeof(long));
	__uint(max_entries, 1024);
} my_hist_map SEC(".maps");

SEC("kprobe/" SYSCALL(sys_write))
int bpf_prog3(struct pt_regs *ctx)
+12 −12
Original line number Diff line number Diff line
@@ -11,12 +11,12 @@
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_tracing.h>

struct bpf_map_def SEC("maps") my_map = {
	.type = BPF_MAP_TYPE_HASH,
	.key_size = sizeof(long),
	.value_size = sizeof(u64),
	.max_entries = 4096,
};
struct {
	__uint(type, BPF_MAP_TYPE_HASH);
	__type(key, long);
	__type(value, u64);
	__uint(max_entries, 4096);
} my_map SEC(".maps");

/* kprobe is NOT a stable ABI. If kernel internals change this bpf+kprobe
 * example will no longer be meaningful
@@ -42,12 +42,12 @@ static unsigned int log2l(unsigned long long n)

#define SLOTS 100

struct bpf_map_def SEC("maps") lat_map = {
	.type = BPF_MAP_TYPE_PERCPU_ARRAY,
	.key_size = sizeof(u32),
	.value_size = sizeof(u64),
	.max_entries = SLOTS,
};
struct {
	__uint(type, BPF_MAP_TYPE_PERCPU_ARRAY);
	__uint(key_size, sizeof(u32));
	__uint(value_size, sizeof(u64));
	__uint(max_entries, SLOTS);
} lat_map SEC(".maps");

SEC("kprobe/blk_account_io_completion")
int bpf_prog2(struct pt_regs *ctx)
Loading