2019-08-15 22:45:43 -07:00
|
|
|
/* SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause) */
|
selftests/bpf: add bpf-gcc support
Now that binutils and gcc support for BPF is upstream, make use of it in
BPF selftests using alu32-like approach. Share as much as possible of
CFLAGS calculation with clang.
Fixes only obvious issues, leaving more complex ones for later:
- Use gcc-provided bpf-helpers.h instead of manually defining the
helpers, change bpf_helpers.h include guard to avoid conflict.
- Include <linux/stddef.h> for __always_inline.
- Add $(OUTPUT)/../usr/include to include path in order to use local
kernel headers instead of system kernel headers when building with O=.
In order to activate the bpf-gcc support, one needs to configure
binutils and gcc with --target=bpf and make them available in $PATH. In
particular, gcc must be installed as `bpf-gcc`, which is the default.
Right now with binutils 25a2915e8dba and gcc r275589 only a handful of
tests work:
# ./test_progs_bpf_gcc
# Summary: 7/39 PASSED, 1 SKIPPED, 98 FAILED
The reason for those failures are as follows:
- Build errors:
- `error: too many function arguments for eBPF` for __always_inline
functions read_str_var and read_map_var - must be inlining issue,
and for process_l3_headers_v6, which relies on optimizing away
function arguments.
- `error: indirect call in function, which are not supported by eBPF`
where there are no obvious indirect calls in the source calls, e.g.
in __encap_ipip_none.
- `error: field 'lock' has incomplete type` for fields of `struct
bpf_spin_lock` type - bpf_spin_lock is re#defined by bpf-helpers.h,
so its usage is sensitive to order of #includes.
- `error: eBPF stack limit exceeded` in sysctl_tcp_mem.
- Load errors:
- Missing object files due to above build errors.
- `libbpf: failed to create map (name: 'test_ver.bss')`.
- `libbpf: object file doesn't contain bpf program`.
- `libbpf: Program '.text' contains unrecognized relo data pointing to
section 0`.
- `libbpf: BTF is required, but is missing or corrupted` - no BTF
support in gcc yet.
Signed-off-by: Ilya Leoshkevich <iii@linux.ibm.com>
Cc: Jose E. Marchesi <jose.marchesi@oracle.com>
Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
2019-09-12 18:05:43 +02:00
|
|
|
#ifndef __BPF_HELPERS__
|
|
|
|
#define __BPF_HELPERS__
|
2014-12-01 15:06:37 -08:00
|
|
|
|
2019-07-05 08:50:10 -07:00
|
|
|
#define __uint(name, val) int (*name)[val]
|
|
|
|
#define __type(name, val) val *name
|
|
|
|
|
2019-05-23 14:53:54 +02:00
|
|
|
/* helper macro to print out debug messages */
|
|
|
|
#define bpf_printk(fmt, ...) \
|
|
|
|
({ \
|
|
|
|
char ____fmt[] = fmt; \
|
|
|
|
bpf_trace_printk(____fmt, sizeof(____fmt), \
|
|
|
|
##__VA_ARGS__); \
|
|
|
|
})
|
|
|
|
|
selftests/bpf: add bpf-gcc support
Now that binutils and gcc support for BPF is upstream, make use of it in
BPF selftests using alu32-like approach. Share as much as possible of
CFLAGS calculation with clang.
Fixes only obvious issues, leaving more complex ones for later:
- Use gcc-provided bpf-helpers.h instead of manually defining the
helpers, change bpf_helpers.h include guard to avoid conflict.
- Include <linux/stddef.h> for __always_inline.
- Add $(OUTPUT)/../usr/include to include path in order to use local
kernel headers instead of system kernel headers when building with O=.
In order to activate the bpf-gcc support, one needs to configure
binutils and gcc with --target=bpf and make them available in $PATH. In
particular, gcc must be installed as `bpf-gcc`, which is the default.
Right now with binutils 25a2915e8dba and gcc r275589 only a handful of
tests work:
# ./test_progs_bpf_gcc
# Summary: 7/39 PASSED, 1 SKIPPED, 98 FAILED
The reason for those failures are as follows:
- Build errors:
- `error: too many function arguments for eBPF` for __always_inline
functions read_str_var and read_map_var - must be inlining issue,
and for process_l3_headers_v6, which relies on optimizing away
function arguments.
- `error: indirect call in function, which are not supported by eBPF`
where there are no obvious indirect calls in the source calls, e.g.
in __encap_ipip_none.
- `error: field 'lock' has incomplete type` for fields of `struct
bpf_spin_lock` type - bpf_spin_lock is re#defined by bpf-helpers.h,
so its usage is sensitive to order of #includes.
- `error: eBPF stack limit exceeded` in sysctl_tcp_mem.
- Load errors:
- Missing object files due to above build errors.
- `libbpf: failed to create map (name: 'test_ver.bss')`.
- `libbpf: object file doesn't contain bpf program`.
- `libbpf: Program '.text' contains unrecognized relo data pointing to
section 0`.
- `libbpf: BTF is required, but is missing or corrupted` - no BTF
support in gcc yet.
Signed-off-by: Ilya Leoshkevich <iii@linux.ibm.com>
Cc: Jose E. Marchesi <jose.marchesi@oracle.com>
Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
2019-09-12 18:05:43 +02:00
|
|
|
#ifdef __clang__
|
|
|
|
|
|
|
|
/* helper macro to place programs, maps, license in
|
|
|
|
* different sections in elf_bpf file. Section names
|
|
|
|
* are interpreted by elf_bpf loader
|
|
|
|
*/
|
|
|
|
#define SEC(NAME) __attribute__((section(NAME), used))
|
|
|
|
|
2014-12-01 15:06:37 -08:00
|
|
|
/* helper functions called from eBPF programs written in C */
|
2019-04-09 23:20:17 +02:00
|
|
|
static void *(*bpf_map_lookup_elem)(void *map, const void *key) =
|
2014-12-01 15:06:37 -08:00
|
|
|
(void *) BPF_FUNC_map_lookup_elem;
|
2019-04-09 23:20:17 +02:00
|
|
|
static int (*bpf_map_update_elem)(void *map, const void *key, const void *value,
|
2014-12-01 15:06:37 -08:00
|
|
|
unsigned long long flags) =
|
|
|
|
(void *) BPF_FUNC_map_update_elem;
|
2019-04-09 23:20:17 +02:00
|
|
|
static int (*bpf_map_delete_elem)(void *map, const void *key) =
|
2014-12-01 15:06:37 -08:00
|
|
|
(void *) BPF_FUNC_map_delete_elem;
|
2019-04-09 23:20:17 +02:00
|
|
|
static int (*bpf_map_push_elem)(void *map, const void *value,
|
2018-10-18 15:16:41 +02:00
|
|
|
unsigned long long flags) =
|
|
|
|
(void *) BPF_FUNC_map_push_elem;
|
|
|
|
static int (*bpf_map_pop_elem)(void *map, void *value) =
|
|
|
|
(void *) BPF_FUNC_map_pop_elem;
|
|
|
|
static int (*bpf_map_peek_elem)(void *map, void *value) =
|
|
|
|
(void *) BPF_FUNC_map_peek_elem;
|
2019-06-10 10:46:55 -07:00
|
|
|
static int (*bpf_probe_read)(void *dst, int size, const void *unsafe_ptr) =
|
2015-03-25 12:49:23 -07:00
|
|
|
(void *) BPF_FUNC_probe_read;
|
|
|
|
static unsigned long long (*bpf_ktime_get_ns)(void) =
|
|
|
|
(void *) BPF_FUNC_ktime_get_ns;
|
|
|
|
static int (*bpf_trace_printk)(const char *fmt, int fmt_size, ...) =
|
|
|
|
(void *) BPF_FUNC_trace_printk;
|
samples/bpf: bpf_tail_call example for tracing
kprobe example that demonstrates how future seccomp programs may look like.
It attaches to seccomp_phase1() function and tail-calls other BPF programs
depending on syscall number.
Existing optimized classic BPF seccomp programs generated by Chrome look like:
if (sd.nr < 121) {
if (sd.nr < 57) {
if (sd.nr < 22) {
if (sd.nr < 7) {
if (sd.nr < 4) {
if (sd.nr < 1) {
check sys_read
} else {
if (sd.nr < 3) {
check sys_write and sys_open
} else {
check sys_close
}
}
} else {
} else {
} else {
} else {
} else {
}
the future seccomp using native eBPF may look like:
bpf_tail_call(&sd, &syscall_jmp_table, sd.nr);
which is simpler, faster and leaves more room for per-syscall checks.
Usage:
$ sudo ./tracex5
<...>-366 [001] d... 4.870033: : read(fd=1, buf=00007f6d5bebf000, size=771)
<...>-369 [003] d... 4.870066: : mmap
<...>-369 [003] d... 4.870077: : syscall=110 (one of get/set uid/pid/gid)
<...>-369 [003] d... 4.870089: : syscall=107 (one of get/set uid/pid/gid)
sh-369 [000] d... 4.891740: : read(fd=0, buf=00000000023d1000, size=512)
sh-369 [000] d... 4.891747: : write(fd=1, buf=00000000023d3000, size=512)
sh-369 [000] d... 4.891747: : read(fd=1, buf=00000000023d3000, size=512)
Signed-off-by: Alexei Starovoitov <ast@plumgrid.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2015-05-19 16:59:05 -07:00
|
|
|
static void (*bpf_tail_call)(void *ctx, void *map, int index) =
|
|
|
|
(void *) BPF_FUNC_tail_call;
|
2015-05-19 16:59:06 -07:00
|
|
|
static unsigned long long (*bpf_get_smp_processor_id)(void) =
|
|
|
|
(void *) BPF_FUNC_get_smp_processor_id;
|
2015-06-12 19:39:12 -07:00
|
|
|
static unsigned long long (*bpf_get_current_pid_tgid)(void) =
|
|
|
|
(void *) BPF_FUNC_get_current_pid_tgid;
|
|
|
|
static unsigned long long (*bpf_get_current_uid_gid)(void) =
|
|
|
|
(void *) BPF_FUNC_get_current_uid_gid;
|
|
|
|
static int (*bpf_get_current_comm)(void *buf, int buf_size) =
|
|
|
|
(void *) BPF_FUNC_get_current_comm;
|
2017-06-02 21:03:53 -07:00
|
|
|
static unsigned long long (*bpf_perf_event_read)(void *map,
|
|
|
|
unsigned long long flags) =
|
2015-08-06 07:02:36 +00:00
|
|
|
(void *) BPF_FUNC_perf_event_read;
|
2015-09-15 23:05:43 -07:00
|
|
|
static int (*bpf_clone_redirect)(void *ctx, int ifindex, int flags) =
|
|
|
|
(void *) BPF_FUNC_clone_redirect;
|
|
|
|
static int (*bpf_redirect)(int ifindex, int flags) =
|
|
|
|
(void *) BPF_FUNC_redirect;
|
2017-07-17 09:30:25 -07:00
|
|
|
static int (*bpf_redirect_map)(void *map, int key, int flags) =
|
|
|
|
(void *) BPF_FUNC_redirect_map;
|
2016-08-10 09:45:39 -07:00
|
|
|
static int (*bpf_perf_event_output)(void *ctx, void *map,
|
|
|
|
unsigned long long flags, void *data,
|
|
|
|
int size) =
|
2015-10-20 20:02:35 -07:00
|
|
|
(void *) BPF_FUNC_perf_event_output;
|
2016-02-17 19:58:59 -08:00
|
|
|
static int (*bpf_get_stackid)(void *ctx, void *map, int flags) =
|
|
|
|
(void *) BPF_FUNC_get_stackid;
|
2019-06-10 10:46:55 -07:00
|
|
|
static int (*bpf_probe_write_user)(void *dst, const void *src, int size) =
|
2016-07-25 05:54:46 -07:00
|
|
|
(void *) BPF_FUNC_probe_write_user;
|
2016-08-12 08:57:04 -07:00
|
|
|
static int (*bpf_current_task_under_cgroup)(void *map, int index) =
|
|
|
|
(void *) BPF_FUNC_current_task_under_cgroup;
|
2016-08-19 11:55:44 -07:00
|
|
|
static int (*bpf_skb_get_tunnel_key)(void *ctx, void *key, int size, int flags) =
|
|
|
|
(void *) BPF_FUNC_skb_get_tunnel_key;
|
|
|
|
static int (*bpf_skb_set_tunnel_key)(void *ctx, void *key, int size, int flags) =
|
|
|
|
(void *) BPF_FUNC_skb_set_tunnel_key;
|
|
|
|
static int (*bpf_skb_get_tunnel_opt)(void *ctx, void *md, int size) =
|
|
|
|
(void *) BPF_FUNC_skb_get_tunnel_opt;
|
|
|
|
static int (*bpf_skb_set_tunnel_opt)(void *ctx, void *md, int size) =
|
|
|
|
(void *) BPF_FUNC_skb_set_tunnel_opt;
|
2016-09-01 18:37:25 -07:00
|
|
|
static unsigned long long (*bpf_get_prandom_u32)(void) =
|
|
|
|
(void *) BPF_FUNC_get_prandom_u32;
|
2016-12-07 15:53:14 -08:00
|
|
|
static int (*bpf_xdp_adjust_head)(void *ctx, int offset) =
|
|
|
|
(void *) BPF_FUNC_xdp_adjust_head;
|
2017-09-25 02:25:53 +02:00
|
|
|
static int (*bpf_xdp_adjust_meta)(void *ctx, int offset) =
|
|
|
|
(void *) BPF_FUNC_xdp_adjust_meta;
|
2018-07-30 17:42:30 -07:00
|
|
|
static int (*bpf_get_socket_cookie)(void *ctx) =
|
|
|
|
(void *) BPF_FUNC_get_socket_cookie;
|
2017-06-30 20:02:46 -07:00
|
|
|
static int (*bpf_setsockopt)(void *ctx, int level, int optname, void *optval,
|
|
|
|
int optlen) =
|
|
|
|
(void *) BPF_FUNC_setsockopt;
|
2017-10-20 11:05:40 -07:00
|
|
|
static int (*bpf_getsockopt)(void *ctx, int level, int optname, void *optval,
|
|
|
|
int optlen) =
|
|
|
|
(void *) BPF_FUNC_getsockopt;
|
2018-01-25 16:14:16 -08:00
|
|
|
static int (*bpf_sock_ops_cb_flags_set)(void *ctx, int flags) =
|
|
|
|
(void *) BPF_FUNC_sock_ops_cb_flags_set;
|
2017-10-18 07:10:36 -07:00
|
|
|
static int (*bpf_sk_redirect_map)(void *ctx, void *map, int key, int flags) =
|
2017-08-15 22:33:32 -07:00
|
|
|
(void *) BPF_FUNC_sk_redirect_map;
|
2018-05-14 10:00:18 -07:00
|
|
|
static int (*bpf_sk_redirect_hash)(void *ctx, void *map, void *key, int flags) =
|
|
|
|
(void *) BPF_FUNC_sk_redirect_hash;
|
2017-08-15 22:33:32 -07:00
|
|
|
static int (*bpf_sock_map_update)(void *map, void *key, void *value,
|
2017-08-28 07:10:04 -07:00
|
|
|
unsigned long long flags) =
|
2017-08-15 22:33:32 -07:00
|
|
|
(void *) BPF_FUNC_sock_map_update;
|
2018-05-14 10:00:18 -07:00
|
|
|
static int (*bpf_sock_hash_update)(void *map, void *key, void *value,
|
|
|
|
unsigned long long flags) =
|
|
|
|
(void *) BPF_FUNC_sock_hash_update;
|
2017-10-05 09:19:21 -07:00
|
|
|
static int (*bpf_perf_event_read_value)(void *map, unsigned long long flags,
|
|
|
|
void *buf, unsigned int buf_size) =
|
|
|
|
(void *) BPF_FUNC_perf_event_read_value;
|
2017-10-05 09:19:23 -07:00
|
|
|
static int (*bpf_perf_prog_read_value)(void *ctx, void *buf,
|
|
|
|
unsigned int buf_size) =
|
|
|
|
(void *) BPF_FUNC_perf_prog_read_value;
|
2017-12-11 11:36:49 -05:00
|
|
|
static int (*bpf_override_return)(void *ctx, unsigned long rc) =
|
|
|
|
(void *) BPF_FUNC_override_return;
|
2018-03-18 12:57:41 -07:00
|
|
|
static int (*bpf_msg_redirect_map)(void *ctx, void *map, int key, int flags) =
|
|
|
|
(void *) BPF_FUNC_msg_redirect_map;
|
2018-05-14 10:00:18 -07:00
|
|
|
static int (*bpf_msg_redirect_hash)(void *ctx,
|
|
|
|
void *map, void *key, int flags) =
|
|
|
|
(void *) BPF_FUNC_msg_redirect_hash;
|
2018-03-18 12:57:56 -07:00
|
|
|
static int (*bpf_msg_apply_bytes)(void *ctx, int len) =
|
|
|
|
(void *) BPF_FUNC_msg_apply_bytes;
|
2018-03-18 12:58:02 -07:00
|
|
|
static int (*bpf_msg_cork_bytes)(void *ctx, int len) =
|
|
|
|
(void *) BPF_FUNC_msg_cork_bytes;
|
2018-03-18 12:58:12 -07:00
|
|
|
static int (*bpf_msg_pull_data)(void *ctx, int start, int end, int flags) =
|
|
|
|
(void *) BPF_FUNC_msg_pull_data;
|
2018-10-19 19:56:50 -07:00
|
|
|
static int (*bpf_msg_push_data)(void *ctx, int start, int end, int flags) =
|
|
|
|
(void *) BPF_FUNC_msg_push_data;
|
2018-11-26 14:16:18 -08:00
|
|
|
static int (*bpf_msg_pop_data)(void *ctx, int start, int cut, int flags) =
|
|
|
|
(void *) BPF_FUNC_msg_pop_data;
|
selftests/bpf: Selftest for sys_connect hooks
Add selftest for BPF_CGROUP_INET4_CONNECT and BPF_CGROUP_INET6_CONNECT
attach types.
Try to connect(2) to specified IP:port and test that:
* remote IP:port pair is overridden;
* local end of connection is bound to specified IP.
All combinations of IPv4/IPv6 and TCP/UDP are tested.
Example:
# tcpdump -pn -i lo -w connect.pcap 2>/dev/null &
[1] 478
# strace -qqf -e connect -o connect.trace ./test_sock_addr.sh
Wait for testing IPv4/IPv6 to become available ... OK
Load bind4 with invalid type (can pollute stderr) ... REJECTED
Load bind4 with valid type ... OK
Attach bind4 with invalid type ... REJECTED
Attach bind4 with valid type ... OK
Load connect4 with invalid type (can pollute stderr) libbpf: load bpf \
program failed: Permission denied
libbpf: -- BEGIN DUMP LOG ---
libbpf:
0: (b7) r2 = 23569
1: (63) *(u32 *)(r1 +24) = r2
2: (b7) r2 = 16777343
3: (63) *(u32 *)(r1 +4) = r2
invalid bpf_context access off=4 size=4
[ 1518.404609] random: crng init done
libbpf: -- END LOG --
libbpf: failed to load program 'cgroup/connect4'
libbpf: failed to load object './connect4_prog.o'
... REJECTED
Load connect4 with valid type ... OK
Attach connect4 with invalid type ... REJECTED
Attach connect4 with valid type ... OK
Test case #1 (IPv4/TCP):
Requested: bind(192.168.1.254, 4040) ..
Actual: bind(127.0.0.1, 4444)
Requested: connect(192.168.1.254, 4040) from (*, *) ..
Actual: connect(127.0.0.1, 4444) from (127.0.0.4, 56068)
Test case #2 (IPv4/UDP):
Requested: bind(192.168.1.254, 4040) ..
Actual: bind(127.0.0.1, 4444)
Requested: connect(192.168.1.254, 4040) from (*, *) ..
Actual: connect(127.0.0.1, 4444) from (127.0.0.4, 56447)
Load bind6 with invalid type (can pollute stderr) ... REJECTED
Load bind6 with valid type ... OK
Attach bind6 with invalid type ... REJECTED
Attach bind6 with valid type ... OK
Load connect6 with invalid type (can pollute stderr) libbpf: load bpf \
program failed: Permission denied
libbpf: -- BEGIN DUMP LOG ---
libbpf:
0: (b7) r6 = 0
1: (63) *(u32 *)(r1 +12) = r6
invalid bpf_context access off=12 size=4
libbpf: -- END LOG --
libbpf: failed to load program 'cgroup/connect6'
libbpf: failed to load object './connect6_prog.o'
... REJECTED
Load connect6 with valid type ... OK
Attach connect6 with invalid type ... REJECTED
Attach connect6 with valid type ... OK
Test case #3 (IPv6/TCP):
Requested: bind(face:b00c:1234:5678::abcd, 6060) ..
Actual: bind(::1, 6666)
Requested: connect(face:b00c:1234:5678::abcd, 6060) from (*, *)
Actual: connect(::1, 6666) from (::6, 37458)
Test case #4 (IPv6/UDP):
Requested: bind(face:b00c:1234:5678::abcd, 6060) ..
Actual: bind(::1, 6666)
Requested: connect(face:b00c:1234:5678::abcd, 6060) from (*, *)
Actual: connect(::1, 6666) from (::6, 39315)
### SUCCESS
# egrep 'connect\(.*AF_INET' connect.trace | \
> egrep -vw 'htons\(1025\)' | fold -b -s -w 72
502 connect(7, {sa_family=AF_INET, sin_port=htons(4040),
sin_addr=inet_addr("192.168.1.254")}, 128) = 0
502 connect(8, {sa_family=AF_INET, sin_port=htons(4040),
sin_addr=inet_addr("192.168.1.254")}, 128) = 0
502 connect(9, {sa_family=AF_INET6, sin6_port=htons(6060),
inet_pton(AF_INET6, "face:b00c:1234:5678::abcd", &sin6_addr),
sin6_flowinfo=0, sin6_scope_id=0}, 128) = 0
502 connect(10, {sa_family=AF_INET6, sin6_port=htons(6060),
inet_pton(AF_INET6, "face:b00c:1234:5678::abcd", &sin6_addr),
sin6_flowinfo=0, sin6_scope_id=0}, 128) = 0
# fg
tcpdump -pn -i lo -w connect.pcap 2> /dev/null
# tcpdump -r connect.pcap -n tcp | cut -c 1-72
reading from file connect.pcap, link-type EN10MB (Ethernet)
17:57:40.383533 IP 127.0.0.4.56068 > 127.0.0.1.4444: Flags [S], seq 1333
17:57:40.383566 IP 127.0.0.1.4444 > 127.0.0.4.56068: Flags [S.], seq 112
17:57:40.383589 IP 127.0.0.4.56068 > 127.0.0.1.4444: Flags [.], ack 1, w
17:57:40.384578 IP 127.0.0.1.4444 > 127.0.0.4.56068: Flags [R.], seq 1,
17:57:40.403327 IP6 ::6.37458 > ::1.6666: Flags [S], seq 406513443, win
17:57:40.403357 IP6 ::1.6666 > ::6.37458: Flags [S.], seq 2448389240, ac
17:57:40.403376 IP6 ::6.37458 > ::1.6666: Flags [.], ack 1, win 342, opt
17:57:40.404263 IP6 ::1.6666 > ::6.37458: Flags [R.], seq 1, ack 1, win
Signed-off-by: Andrey Ignatov <rdna@fb.com>
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
2018-03-30 15:08:06 -07:00
|
|
|
static int (*bpf_bind)(void *ctx, void *addr, int addr_len) =
|
|
|
|
(void *) BPF_FUNC_bind;
|
2018-04-17 21:42:22 -07:00
|
|
|
static int (*bpf_xdp_adjust_tail)(void *ctx, int offset) =
|
|
|
|
(void *) BPF_FUNC_xdp_adjust_tail;
|
2018-04-24 17:50:30 +03:00
|
|
|
static int (*bpf_skb_get_xfrm_state)(void *ctx, int index, void *state,
|
|
|
|
int size, int flags) =
|
|
|
|
(void *) BPF_FUNC_skb_get_xfrm_state;
|
2018-08-08 01:01:31 -07:00
|
|
|
static int (*bpf_sk_select_reuseport)(void *ctx, void *map, void *key, __u32 flags) =
|
|
|
|
(void *) BPF_FUNC_sk_select_reuseport;
|
2018-04-28 22:28:12 -07:00
|
|
|
static int (*bpf_get_stack)(void *ctx, void *buf, int size, int flags) =
|
|
|
|
(void *) BPF_FUNC_get_stack;
|
2018-05-09 20:34:27 -07:00
|
|
|
static int (*bpf_fib_lookup)(void *ctx, struct bpf_fib_lookup *params,
|
|
|
|
int plen, __u32 flags) =
|
|
|
|
(void *) BPF_FUNC_fib_lookup;
|
2018-05-20 14:58:17 +01:00
|
|
|
static int (*bpf_lwt_push_encap)(void *ctx, unsigned int type, void *hdr,
|
|
|
|
unsigned int len) =
|
|
|
|
(void *) BPF_FUNC_lwt_push_encap;
|
|
|
|
static int (*bpf_lwt_seg6_store_bytes)(void *ctx, unsigned int offset,
|
|
|
|
void *from, unsigned int len) =
|
|
|
|
(void *) BPF_FUNC_lwt_seg6_store_bytes;
|
|
|
|
static int (*bpf_lwt_seg6_action)(void *ctx, unsigned int action, void *param,
|
|
|
|
unsigned int param_len) =
|
|
|
|
(void *) BPF_FUNC_lwt_seg6_action;
|
|
|
|
static int (*bpf_lwt_seg6_adjust_srh)(void *ctx, unsigned int offset,
|
|
|
|
unsigned int len) =
|
|
|
|
(void *) BPF_FUNC_lwt_seg6_adjust_srh;
|
2018-05-27 12:24:10 +01:00
|
|
|
static int (*bpf_rc_repeat)(void *ctx) =
|
|
|
|
(void *) BPF_FUNC_rc_repeat;
|
|
|
|
static int (*bpf_rc_keydown)(void *ctx, unsigned int protocol,
|
|
|
|
unsigned long long scancode, unsigned int toggle) =
|
|
|
|
(void *) BPF_FUNC_rc_keydown;
|
2018-06-03 15:59:42 -07:00
|
|
|
static unsigned long long (*bpf_get_current_cgroup_id)(void) =
|
|
|
|
(void *) BPF_FUNC_get_current_cgroup_id;
|
2018-08-02 14:27:28 -07:00
|
|
|
static void *(*bpf_get_local_storage)(void *map, unsigned long long flags) =
|
|
|
|
(void *) BPF_FUNC_get_local_storage;
|
2018-08-12 10:49:29 -07:00
|
|
|
static unsigned long long (*bpf_skb_cgroup_id)(void *ctx) =
|
|
|
|
(void *) BPF_FUNC_skb_cgroup_id;
|
|
|
|
static unsigned long long (*bpf_skb_ancestor_cgroup_id)(void *ctx, int level) =
|
|
|
|
(void *) BPF_FUNC_skb_ancestor_cgroup_id;
|
bpf: Add helper to retrieve socket in BPF
This patch adds new BPF helper functions, bpf_sk_lookup_tcp() and
bpf_sk_lookup_udp() which allows BPF programs to find out if there is a
socket listening on this host, and returns a socket pointer which the
BPF program can then access to determine, for instance, whether to
forward or drop traffic. bpf_sk_lookup_xxx() may take a reference on the
socket, so when a BPF program makes use of this function, it must
subsequently pass the returned pointer into the newly added sk_release()
to return the reference.
By way of example, the following pseudocode would filter inbound
connections at XDP if there is no corresponding service listening for
the traffic:
struct bpf_sock_tuple tuple;
struct bpf_sock_ops *sk;
populate_tuple(ctx, &tuple); // Extract the 5tuple from the packet
sk = bpf_sk_lookup_tcp(ctx, &tuple, sizeof tuple, netns, 0);
if (!sk) {
// Couldn't find a socket listening for this traffic. Drop.
return TC_ACT_SHOT;
}
bpf_sk_release(sk, 0);
return TC_ACT_OK;
Signed-off-by: Joe Stringer <joe@wand.net.nz>
Acked-by: Alexei Starovoitov <ast@kernel.org>
Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
2018-10-02 13:35:36 -07:00
|
|
|
static struct bpf_sock *(*bpf_sk_lookup_tcp)(void *ctx,
|
|
|
|
struct bpf_sock_tuple *tuple,
|
2018-11-30 15:32:20 -08:00
|
|
|
int size, unsigned long long netns_id,
|
bpf: Add helper to retrieve socket in BPF
This patch adds new BPF helper functions, bpf_sk_lookup_tcp() and
bpf_sk_lookup_udp() which allows BPF programs to find out if there is a
socket listening on this host, and returns a socket pointer which the
BPF program can then access to determine, for instance, whether to
forward or drop traffic. bpf_sk_lookup_xxx() may take a reference on the
socket, so when a BPF program makes use of this function, it must
subsequently pass the returned pointer into the newly added sk_release()
to return the reference.
By way of example, the following pseudocode would filter inbound
connections at XDP if there is no corresponding service listening for
the traffic:
struct bpf_sock_tuple tuple;
struct bpf_sock_ops *sk;
populate_tuple(ctx, &tuple); // Extract the 5tuple from the packet
sk = bpf_sk_lookup_tcp(ctx, &tuple, sizeof tuple, netns, 0);
if (!sk) {
// Couldn't find a socket listening for this traffic. Drop.
return TC_ACT_SHOT;
}
bpf_sk_release(sk, 0);
return TC_ACT_OK;
Signed-off-by: Joe Stringer <joe@wand.net.nz>
Acked-by: Alexei Starovoitov <ast@kernel.org>
Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
2018-10-02 13:35:36 -07:00
|
|
|
unsigned long long flags) =
|
|
|
|
(void *) BPF_FUNC_sk_lookup_tcp;
|
2019-03-22 09:54:06 +08:00
|
|
|
static struct bpf_sock *(*bpf_skc_lookup_tcp)(void *ctx,
|
|
|
|
struct bpf_sock_tuple *tuple,
|
|
|
|
int size, unsigned long long netns_id,
|
|
|
|
unsigned long long flags) =
|
|
|
|
(void *) BPF_FUNC_skc_lookup_tcp;
|
bpf: Add helper to retrieve socket in BPF
This patch adds new BPF helper functions, bpf_sk_lookup_tcp() and
bpf_sk_lookup_udp() which allows BPF programs to find out if there is a
socket listening on this host, and returns a socket pointer which the
BPF program can then access to determine, for instance, whether to
forward or drop traffic. bpf_sk_lookup_xxx() may take a reference on the
socket, so when a BPF program makes use of this function, it must
subsequently pass the returned pointer into the newly added sk_release()
to return the reference.
By way of example, the following pseudocode would filter inbound
connections at XDP if there is no corresponding service listening for
the traffic:
struct bpf_sock_tuple tuple;
struct bpf_sock_ops *sk;
populate_tuple(ctx, &tuple); // Extract the 5tuple from the packet
sk = bpf_sk_lookup_tcp(ctx, &tuple, sizeof tuple, netns, 0);
if (!sk) {
// Couldn't find a socket listening for this traffic. Drop.
return TC_ACT_SHOT;
}
bpf_sk_release(sk, 0);
return TC_ACT_OK;
Signed-off-by: Joe Stringer <joe@wand.net.nz>
Acked-by: Alexei Starovoitov <ast@kernel.org>
Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
2018-10-02 13:35:36 -07:00
|
|
|
static struct bpf_sock *(*bpf_sk_lookup_udp)(void *ctx,
|
|
|
|
struct bpf_sock_tuple *tuple,
|
2018-11-30 15:32:20 -08:00
|
|
|
int size, unsigned long long netns_id,
|
bpf: Add helper to retrieve socket in BPF
This patch adds new BPF helper functions, bpf_sk_lookup_tcp() and
bpf_sk_lookup_udp() which allows BPF programs to find out if there is a
socket listening on this host, and returns a socket pointer which the
BPF program can then access to determine, for instance, whether to
forward or drop traffic. bpf_sk_lookup_xxx() may take a reference on the
socket, so when a BPF program makes use of this function, it must
subsequently pass the returned pointer into the newly added sk_release()
to return the reference.
By way of example, the following pseudocode would filter inbound
connections at XDP if there is no corresponding service listening for
the traffic:
struct bpf_sock_tuple tuple;
struct bpf_sock_ops *sk;
populate_tuple(ctx, &tuple); // Extract the 5tuple from the packet
sk = bpf_sk_lookup_tcp(ctx, &tuple, sizeof tuple, netns, 0);
if (!sk) {
// Couldn't find a socket listening for this traffic. Drop.
return TC_ACT_SHOT;
}
bpf_sk_release(sk, 0);
return TC_ACT_OK;
Signed-off-by: Joe Stringer <joe@wand.net.nz>
Acked-by: Alexei Starovoitov <ast@kernel.org>
Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
2018-10-02 13:35:36 -07:00
|
|
|
unsigned long long flags) =
|
|
|
|
(void *) BPF_FUNC_sk_lookup_udp;
|
|
|
|
static int (*bpf_sk_release)(struct bpf_sock *sk) =
|
|
|
|
(void *) BPF_FUNC_sk_release;
|
2018-10-09 12:04:48 +02:00
|
|
|
static int (*bpf_skb_vlan_push)(void *ctx, __be16 vlan_proto, __u16 vlan_tci) =
|
|
|
|
(void *) BPF_FUNC_skb_vlan_push;
|
|
|
|
static int (*bpf_skb_vlan_pop)(void *ctx) =
|
|
|
|
(void *) BPF_FUNC_skb_vlan_pop;
|
2018-12-06 13:01:03 +00:00
|
|
|
static int (*bpf_rc_pointer_rel)(void *ctx, int rel_x, int rel_y) =
|
|
|
|
(void *) BPF_FUNC_rc_pointer_rel;
|
2019-01-31 15:40:08 -08:00
|
|
|
static void (*bpf_spin_lock)(struct bpf_spin_lock *lock) =
|
|
|
|
(void *) BPF_FUNC_spin_lock;
|
|
|
|
static void (*bpf_spin_unlock)(struct bpf_spin_lock *lock) =
|
|
|
|
(void *) BPF_FUNC_spin_unlock;
|
2019-02-09 23:22:28 -08:00
|
|
|
static struct bpf_sock *(*bpf_sk_fullsock)(struct bpf_sock *sk) =
|
|
|
|
(void *) BPF_FUNC_sk_fullsock;
|
|
|
|
static struct bpf_tcp_sock *(*bpf_tcp_sock)(struct bpf_sock *sk) =
|
|
|
|
(void *) BPF_FUNC_tcp_sock;
|
2019-03-12 10:23:11 -07:00
|
|
|
static struct bpf_sock *(*bpf_get_listener_sock)(struct bpf_sock *sk) =
|
|
|
|
(void *) BPF_FUNC_get_listener_sock;
|
2019-03-01 12:38:47 -08:00
|
|
|
static int (*bpf_skb_ecn_set_ce)(void *ctx) =
|
|
|
|
(void *) BPF_FUNC_skb_ecn_set_ce;
|
2019-03-22 09:54:06 +08:00
|
|
|
static int (*bpf_tcp_check_syncookie)(struct bpf_sock *sk,
|
|
|
|
void *ip, int ip_len, void *tcp, int tcp_len) =
|
|
|
|
(void *) BPF_FUNC_tcp_check_syncookie;
|
2019-03-23 15:47:05 -07:00
|
|
|
static int (*bpf_sysctl_get_name)(void *ctx, char *buf,
|
|
|
|
unsigned long long buf_len,
|
|
|
|
unsigned long long flags) =
|
|
|
|
(void *) BPF_FUNC_sysctl_get_name;
|
|
|
|
static int (*bpf_sysctl_get_current_value)(void *ctx, char *buf,
|
|
|
|
unsigned long long buf_len) =
|
|
|
|
(void *) BPF_FUNC_sysctl_get_current_value;
|
|
|
|
static int (*bpf_sysctl_get_new_value)(void *ctx, char *buf,
|
|
|
|
unsigned long long buf_len) =
|
|
|
|
(void *) BPF_FUNC_sysctl_get_new_value;
|
|
|
|
static int (*bpf_sysctl_set_new_value)(void *ctx, const char *buf,
|
|
|
|
unsigned long long buf_len) =
|
|
|
|
(void *) BPF_FUNC_sysctl_set_new_value;
|
|
|
|
static int (*bpf_strtol)(const char *buf, unsigned long long buf_len,
|
|
|
|
unsigned long long flags, long *res) =
|
|
|
|
(void *) BPF_FUNC_strtol;
|
|
|
|
static int (*bpf_strtoul)(const char *buf, unsigned long long buf_len,
|
|
|
|
unsigned long long flags, unsigned long *res) =
|
|
|
|
(void *) BPF_FUNC_strtoul;
|
2019-04-26 16:39:54 -07:00
|
|
|
static void *(*bpf_sk_storage_get)(void *map, struct bpf_sock *sk,
|
|
|
|
void *value, __u64 flags) =
|
|
|
|
(void *) BPF_FUNC_sk_storage_get;
|
|
|
|
static int (*bpf_sk_storage_delete)(void *map, struct bpf_sock *sk) =
|
|
|
|
(void *)BPF_FUNC_sk_storage_delete;
|
2019-05-23 14:47:47 -07:00
|
|
|
static int (*bpf_send_signal)(unsigned sig) = (void *)BPF_FUNC_send_signal;
|
2019-07-29 09:59:17 -07:00
|
|
|
static long long (*bpf_tcp_gen_syncookie)(struct bpf_sock *sk, void *ip,
|
|
|
|
int ip_len, void *tcp, int tcp_len) =
|
|
|
|
(void *) BPF_FUNC_tcp_gen_syncookie;
|
2014-12-01 15:06:37 -08:00
|
|
|
|
|
|
|
/* llvm builtin functions that eBPF C program may use to
|
|
|
|
* emit BPF_LD_ABS and BPF_LD_IND instructions
|
|
|
|
*/
|
|
|
|
struct sk_buff;
|
|
|
|
unsigned long long load_byte(void *skb,
|
|
|
|
unsigned long long off) asm("llvm.bpf.load.byte");
|
|
|
|
unsigned long long load_half(void *skb,
|
|
|
|
unsigned long long off) asm("llvm.bpf.load.half");
|
|
|
|
unsigned long long load_word(void *skb,
|
|
|
|
unsigned long long off) asm("llvm.bpf.load.word");
|
|
|
|
|
|
|
|
/* a helper structure used by eBPF C program
|
|
|
|
* to describe map attributes to elf_bpf loader
|
|
|
|
*/
|
|
|
|
struct bpf_map_def {
|
|
|
|
unsigned int type;
|
|
|
|
unsigned int key_size;
|
|
|
|
unsigned int value_size;
|
|
|
|
unsigned int max_entries;
|
2016-03-07 21:57:20 -08:00
|
|
|
unsigned int map_flags;
|
2017-03-22 10:00:35 -07:00
|
|
|
unsigned int inner_map_idx;
|
2017-08-18 11:28:01 -07:00
|
|
|
unsigned int numa_node;
|
2014-12-01 15:06:37 -08:00
|
|
|
};
|
|
|
|
|
selftests/bpf: add bpf-gcc support
Now that binutils and gcc support for BPF is upstream, make use of it in
BPF selftests using alu32-like approach. Share as much as possible of
CFLAGS calculation with clang.
Fixes only obvious issues, leaving more complex ones for later:
- Use gcc-provided bpf-helpers.h instead of manually defining the
helpers, change bpf_helpers.h include guard to avoid conflict.
- Include <linux/stddef.h> for __always_inline.
- Add $(OUTPUT)/../usr/include to include path in order to use local
kernel headers instead of system kernel headers when building with O=.
In order to activate the bpf-gcc support, one needs to configure
binutils and gcc with --target=bpf and make them available in $PATH. In
particular, gcc must be installed as `bpf-gcc`, which is the default.
Right now with binutils 25a2915e8dba and gcc r275589 only a handful of
tests work:
# ./test_progs_bpf_gcc
# Summary: 7/39 PASSED, 1 SKIPPED, 98 FAILED
The reason for those failures are as follows:
- Build errors:
- `error: too many function arguments for eBPF` for __always_inline
functions read_str_var and read_map_var - must be inlining issue,
and for process_l3_headers_v6, which relies on optimizing away
function arguments.
- `error: indirect call in function, which are not supported by eBPF`
where there are no obvious indirect calls in the source calls, e.g.
in __encap_ipip_none.
- `error: field 'lock' has incomplete type` for fields of `struct
bpf_spin_lock` type - bpf_spin_lock is re#defined by bpf-helpers.h,
so its usage is sensitive to order of #includes.
- `error: eBPF stack limit exceeded` in sysctl_tcp_mem.
- Load errors:
- Missing object files due to above build errors.
- `libbpf: failed to create map (name: 'test_ver.bss')`.
- `libbpf: object file doesn't contain bpf program`.
- `libbpf: Program '.text' contains unrecognized relo data pointing to
section 0`.
- `libbpf: BTF is required, but is missing or corrupted` - no BTF
support in gcc yet.
Signed-off-by: Ilya Leoshkevich <iii@linux.ibm.com>
Cc: Jose E. Marchesi <jose.marchesi@oracle.com>
Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
2019-09-12 18:05:43 +02:00
|
|
|
#else
|
|
|
|
|
|
|
|
#include <bpf-helpers.h>
|
|
|
|
|
|
|
|
#endif
|
|
|
|
|
2018-07-24 08:40:22 -07:00
|
|
|
#define BPF_ANNOTATE_KV_PAIR(name, type_key, type_val) \
|
|
|
|
struct ____btf_map_##name { \
|
|
|
|
type_key key; \
|
|
|
|
type_val value; \
|
|
|
|
}; \
|
|
|
|
struct ____btf_map_##name \
|
|
|
|
__attribute__ ((section(".maps." #name), used)) \
|
|
|
|
____btf_map_##name = { }
|
|
|
|
|
2016-11-30 17:10:11 +01:00
|
|
|
static int (*bpf_skb_load_bytes)(void *ctx, int off, void *to, int len) =
|
|
|
|
(void *) BPF_FUNC_skb_load_bytes;
|
2018-08-08 01:01:31 -07:00
|
|
|
static int (*bpf_skb_load_bytes_relative)(void *ctx, int off, void *to, int len, __u32 start_header) =
|
|
|
|
(void *) BPF_FUNC_skb_load_bytes_relative;
|
2015-04-01 17:12:13 -07:00
|
|
|
static int (*bpf_skb_store_bytes)(void *ctx, int off, void *from, int len, int flags) =
|
|
|
|
(void *) BPF_FUNC_skb_store_bytes;
|
|
|
|
static int (*bpf_l3_csum_replace)(void *ctx, int off, int from, int to, int flags) =
|
|
|
|
(void *) BPF_FUNC_l3_csum_replace;
|
|
|
|
static int (*bpf_l4_csum_replace)(void *ctx, int off, int from, int to, int flags) =
|
|
|
|
(void *) BPF_FUNC_l4_csum_replace;
|
2018-04-17 21:42:23 -07:00
|
|
|
static int (*bpf_csum_diff)(void *from, int from_size, void *to, int to_size, int seed) =
|
|
|
|
(void *) BPF_FUNC_csum_diff;
|
2016-08-12 22:17:17 +02:00
|
|
|
static int (*bpf_skb_under_cgroup)(void *ctx, void *map, int index) =
|
|
|
|
(void *) BPF_FUNC_skb_under_cgroup;
|
2016-11-30 17:10:11 +01:00
|
|
|
static int (*bpf_skb_change_head)(void *, int len, int flags) =
|
|
|
|
(void *) BPF_FUNC_skb_change_head;
|
2018-03-18 12:57:31 -07:00
|
|
|
static int (*bpf_skb_pull_data)(void *, int len) =
|
|
|
|
(void *) BPF_FUNC_skb_pull_data;
|
2019-02-27 11:08:06 -05:00
|
|
|
static unsigned int (*bpf_get_cgroup_classid)(void *ctx) =
|
|
|
|
(void *) BPF_FUNC_get_cgroup_classid;
|
|
|
|
static unsigned int (*bpf_get_route_realm)(void *ctx) =
|
|
|
|
(void *) BPF_FUNC_get_route_realm;
|
|
|
|
static int (*bpf_skb_change_proto)(void *ctx, __be16 proto, __u64 flags) =
|
|
|
|
(void *) BPF_FUNC_skb_change_proto;
|
|
|
|
static int (*bpf_skb_change_type)(void *ctx, __u32 type) =
|
|
|
|
(void *) BPF_FUNC_skb_change_type;
|
|
|
|
static unsigned int (*bpf_get_hash_recalc)(void *ctx) =
|
|
|
|
(void *) BPF_FUNC_get_hash_recalc;
|
2019-05-16 21:34:11 -07:00
|
|
|
static unsigned long long (*bpf_get_current_task)(void) =
|
2019-02-27 11:08:06 -05:00
|
|
|
(void *) BPF_FUNC_get_current_task;
|
|
|
|
static int (*bpf_skb_change_tail)(void *ctx, __u32 len, __u64 flags) =
|
|
|
|
(void *) BPF_FUNC_skb_change_tail;
|
|
|
|
static long long (*bpf_csum_update)(void *ctx, __u32 csum) =
|
|
|
|
(void *) BPF_FUNC_csum_update;
|
|
|
|
static void (*bpf_set_hash_invalid)(void *ctx) =
|
|
|
|
(void *) BPF_FUNC_set_hash_invalid;
|
|
|
|
static int (*bpf_get_numa_node_id)(void) =
|
|
|
|
(void *) BPF_FUNC_get_numa_node_id;
|
|
|
|
static int (*bpf_probe_read_str)(void *ctx, __u32 size,
|
|
|
|
const void *unsafe_ptr) =
|
|
|
|
(void *) BPF_FUNC_probe_read_str;
|
|
|
|
static unsigned int (*bpf_get_socket_uid)(void *ctx) =
|
|
|
|
(void *) BPF_FUNC_get_socket_uid;
|
|
|
|
static unsigned int (*bpf_set_hash)(void *ctx, __u32 hash) =
|
|
|
|
(void *) BPF_FUNC_set_hash;
|
|
|
|
static int (*bpf_skb_adjust_room)(void *ctx, __s32 len_diff, __u32 mode,
|
|
|
|
unsigned long long flags) =
|
|
|
|
(void *) BPF_FUNC_skb_adjust_room;
|
2015-04-01 17:12:13 -07:00
|
|
|
|
2017-09-20 09:11:58 -07:00
|
|
|
/* Scan the ARCH passed in from ARCH env variable (see Makefile) */
|
|
|
|
#if defined(__TARGET_ARCH_x86)
|
|
|
|
#define bpf_target_x86
|
|
|
|
#define bpf_target_defined
|
2019-07-11 16:29:28 +02:00
|
|
|
#elif defined(__TARGET_ARCH_s390)
|
|
|
|
#define bpf_target_s390
|
2017-09-20 09:11:58 -07:00
|
|
|
#define bpf_target_defined
|
2019-03-20 12:10:54 +02:00
|
|
|
#elif defined(__TARGET_ARCH_arm)
|
|
|
|
#define bpf_target_arm
|
|
|
|
#define bpf_target_defined
|
2017-09-20 09:11:58 -07:00
|
|
|
#elif defined(__TARGET_ARCH_arm64)
|
|
|
|
#define bpf_target_arm64
|
|
|
|
#define bpf_target_defined
|
|
|
|
#elif defined(__TARGET_ARCH_mips)
|
|
|
|
#define bpf_target_mips
|
|
|
|
#define bpf_target_defined
|
|
|
|
#elif defined(__TARGET_ARCH_powerpc)
|
|
|
|
#define bpf_target_powerpc
|
|
|
|
#define bpf_target_defined
|
|
|
|
#elif defined(__TARGET_ARCH_sparc)
|
|
|
|
#define bpf_target_sparc
|
|
|
|
#define bpf_target_defined
|
|
|
|
#else
|
|
|
|
#undef bpf_target_defined
|
|
|
|
#endif
|
|
|
|
|
|
|
|
/* Fall back to what the compiler says */
|
|
|
|
#ifndef bpf_target_defined
|
2015-07-06 16:20:07 +02:00
|
|
|
#if defined(__x86_64__)
|
2017-09-20 09:11:58 -07:00
|
|
|
#define bpf_target_x86
|
2019-07-11 16:29:28 +02:00
|
|
|
#elif defined(__s390__)
|
|
|
|
#define bpf_target_s390
|
2019-03-20 12:10:54 +02:00
|
|
|
#elif defined(__arm__)
|
|
|
|
#define bpf_target_arm
|
2017-09-20 09:11:58 -07:00
|
|
|
#elif defined(__aarch64__)
|
|
|
|
#define bpf_target_arm64
|
|
|
|
#elif defined(__mips__)
|
|
|
|
#define bpf_target_mips
|
|
|
|
#elif defined(__powerpc__)
|
|
|
|
#define bpf_target_powerpc
|
|
|
|
#elif defined(__sparc__)
|
|
|
|
#define bpf_target_sparc
|
|
|
|
#endif
|
|
|
|
#endif
|
|
|
|
|
|
|
|
#if defined(bpf_target_x86)
|
2015-07-06 16:20:07 +02:00
|
|
|
|
selftests/bpf: make PT_REGS_* work in userspace
Right now, on certain architectures, these macros are usable only with
kernel headers. This patch makes it possible to use them with userspace
headers and, as a consequence, not only in BPF samples, but also in BPF
selftests.
On s390, provide the forward declaration of struct pt_regs and cast it
to user_pt_regs in PT_REGS_* macros. This is necessary, because instead
of the full struct pt_regs, s390 exposes only its first member
user_pt_regs to userspace, and bpf_helpers.h is used with both userspace
(in selftests) and kernel (in samples) headers. It was added in commit
466698e654e8 ("s390/bpf: correct broken uapi for
BPF_PROG_TYPE_PERF_EVENT program type").
Ditto on arm64.
On x86, provide userspace versions of PT_REGS_* macros. Unlike s390 and
arm64, x86 provides struct pt_regs to both userspace and kernel, however,
with different member names.
Signed-off-by: Ilya Leoshkevich <iii@linux.ibm.com>
Acked-by: Andrii Nakryiko <andriin@fb.com>
Reviewed-by: Stanislav Fomichev <sdf@google.com>
Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
2019-07-11 16:29:29 +02:00
|
|
|
#ifdef __KERNEL__
|
2015-07-06 16:20:07 +02:00
|
|
|
#define PT_REGS_PARM1(x) ((x)->di)
|
|
|
|
#define PT_REGS_PARM2(x) ((x)->si)
|
|
|
|
#define PT_REGS_PARM3(x) ((x)->dx)
|
|
|
|
#define PT_REGS_PARM4(x) ((x)->cx)
|
|
|
|
#define PT_REGS_PARM5(x) ((x)->r8)
|
|
|
|
#define PT_REGS_RET(x) ((x)->sp)
|
|
|
|
#define PT_REGS_FP(x) ((x)->bp)
|
|
|
|
#define PT_REGS_RC(x) ((x)->ax)
|
|
|
|
#define PT_REGS_SP(x) ((x)->sp)
|
2016-04-04 22:31:34 +05:30
|
|
|
#define PT_REGS_IP(x) ((x)->ip)
|
selftests/bpf: make PT_REGS_* work in userspace
Right now, on certain architectures, these macros are usable only with
kernel headers. This patch makes it possible to use them with userspace
headers and, as a consequence, not only in BPF samples, but also in BPF
selftests.
On s390, provide the forward declaration of struct pt_regs and cast it
to user_pt_regs in PT_REGS_* macros. This is necessary, because instead
of the full struct pt_regs, s390 exposes only its first member
user_pt_regs to userspace, and bpf_helpers.h is used with both userspace
(in selftests) and kernel (in samples) headers. It was added in commit
466698e654e8 ("s390/bpf: correct broken uapi for
BPF_PROG_TYPE_PERF_EVENT program type").
Ditto on arm64.
On x86, provide userspace versions of PT_REGS_* macros. Unlike s390 and
arm64, x86 provides struct pt_regs to both userspace and kernel, however,
with different member names.
Signed-off-by: Ilya Leoshkevich <iii@linux.ibm.com>
Acked-by: Andrii Nakryiko <andriin@fb.com>
Reviewed-by: Stanislav Fomichev <sdf@google.com>
Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
2019-07-11 16:29:29 +02:00
|
|
|
#else
|
|
|
|
#ifdef __i386__
|
|
|
|
/* i386 kernel is built with -mregparm=3 */
|
|
|
|
#define PT_REGS_PARM1(x) ((x)->eax)
|
|
|
|
#define PT_REGS_PARM2(x) ((x)->edx)
|
|
|
|
#define PT_REGS_PARM3(x) ((x)->ecx)
|
|
|
|
#define PT_REGS_PARM4(x) 0
|
|
|
|
#define PT_REGS_PARM5(x) 0
|
|
|
|
#define PT_REGS_RET(x) ((x)->esp)
|
|
|
|
#define PT_REGS_FP(x) ((x)->ebp)
|
|
|
|
#define PT_REGS_RC(x) ((x)->eax)
|
|
|
|
#define PT_REGS_SP(x) ((x)->esp)
|
|
|
|
#define PT_REGS_IP(x) ((x)->eip)
|
|
|
|
#else
|
|
|
|
#define PT_REGS_PARM1(x) ((x)->rdi)
|
|
|
|
#define PT_REGS_PARM2(x) ((x)->rsi)
|
|
|
|
#define PT_REGS_PARM3(x) ((x)->rdx)
|
|
|
|
#define PT_REGS_PARM4(x) ((x)->rcx)
|
|
|
|
#define PT_REGS_PARM5(x) ((x)->r8)
|
|
|
|
#define PT_REGS_RET(x) ((x)->rsp)
|
|
|
|
#define PT_REGS_FP(x) ((x)->rbp)
|
|
|
|
#define PT_REGS_RC(x) ((x)->rax)
|
|
|
|
#define PT_REGS_SP(x) ((x)->rsp)
|
|
|
|
#define PT_REGS_IP(x) ((x)->rip)
|
|
|
|
#endif
|
|
|
|
#endif
|
2015-07-06 16:20:07 +02:00
|
|
|
|
2019-07-11 16:29:28 +02:00
|
|
|
#elif defined(bpf_target_s390)
|
2015-07-06 16:20:07 +02:00
|
|
|
|
selftests/bpf: make PT_REGS_* work in userspace
Right now, on certain architectures, these macros are usable only with
kernel headers. This patch makes it possible to use them with userspace
headers and, as a consequence, not only in BPF samples, but also in BPF
selftests.
On s390, provide the forward declaration of struct pt_regs and cast it
to user_pt_regs in PT_REGS_* macros. This is necessary, because instead
of the full struct pt_regs, s390 exposes only its first member
user_pt_regs to userspace, and bpf_helpers.h is used with both userspace
(in selftests) and kernel (in samples) headers. It was added in commit
466698e654e8 ("s390/bpf: correct broken uapi for
BPF_PROG_TYPE_PERF_EVENT program type").
Ditto on arm64.
On x86, provide userspace versions of PT_REGS_* macros. Unlike s390 and
arm64, x86 provides struct pt_regs to both userspace and kernel, however,
with different member names.
Signed-off-by: Ilya Leoshkevich <iii@linux.ibm.com>
Acked-by: Andrii Nakryiko <andriin@fb.com>
Reviewed-by: Stanislav Fomichev <sdf@google.com>
Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
2019-07-11 16:29:29 +02:00
|
|
|
/* s390 provides user_pt_regs instead of struct pt_regs to userspace */
|
|
|
|
struct pt_regs;
|
|
|
|
#define PT_REGS_S390 const volatile user_pt_regs
|
|
|
|
#define PT_REGS_PARM1(x) (((PT_REGS_S390 *)(x))->gprs[2])
|
|
|
|
#define PT_REGS_PARM2(x) (((PT_REGS_S390 *)(x))->gprs[3])
|
|
|
|
#define PT_REGS_PARM3(x) (((PT_REGS_S390 *)(x))->gprs[4])
|
|
|
|
#define PT_REGS_PARM4(x) (((PT_REGS_S390 *)(x))->gprs[5])
|
|
|
|
#define PT_REGS_PARM5(x) (((PT_REGS_S390 *)(x))->gprs[6])
|
|
|
|
#define PT_REGS_RET(x) (((PT_REGS_S390 *)(x))->gprs[14])
|
|
|
|
/* Works only with CONFIG_FRAME_POINTER */
|
|
|
|
#define PT_REGS_FP(x) (((PT_REGS_S390 *)(x))->gprs[11])
|
|
|
|
#define PT_REGS_RC(x) (((PT_REGS_S390 *)(x))->gprs[2])
|
|
|
|
#define PT_REGS_SP(x) (((PT_REGS_S390 *)(x))->gprs[15])
|
|
|
|
#define PT_REGS_IP(x) (((PT_REGS_S390 *)(x))->psw.addr)
|
2015-07-06 16:20:07 +02:00
|
|
|
|
2019-03-20 12:10:54 +02:00
|
|
|
#elif defined(bpf_target_arm)
|
|
|
|
|
|
|
|
#define PT_REGS_PARM1(x) ((x)->uregs[0])
|
|
|
|
#define PT_REGS_PARM2(x) ((x)->uregs[1])
|
|
|
|
#define PT_REGS_PARM3(x) ((x)->uregs[2])
|
|
|
|
#define PT_REGS_PARM4(x) ((x)->uregs[3])
|
|
|
|
#define PT_REGS_PARM5(x) ((x)->uregs[4])
|
|
|
|
#define PT_REGS_RET(x) ((x)->uregs[14])
|
|
|
|
#define PT_REGS_FP(x) ((x)->uregs[11]) /* Works only with CONFIG_FRAME_POINTER */
|
|
|
|
#define PT_REGS_RC(x) ((x)->uregs[0])
|
|
|
|
#define PT_REGS_SP(x) ((x)->uregs[13])
|
|
|
|
#define PT_REGS_IP(x) ((x)->uregs[12])
|
|
|
|
|
2017-09-20 09:11:58 -07:00
|
|
|
#elif defined(bpf_target_arm64)
|
2015-10-26 17:02:19 -07:00
|
|
|
|
selftests/bpf: make PT_REGS_* work in userspace
Right now, on certain architectures, these macros are usable only with
kernel headers. This patch makes it possible to use them with userspace
headers and, as a consequence, not only in BPF samples, but also in BPF
selftests.
On s390, provide the forward declaration of struct pt_regs and cast it
to user_pt_regs in PT_REGS_* macros. This is necessary, because instead
of the full struct pt_regs, s390 exposes only its first member
user_pt_regs to userspace, and bpf_helpers.h is used with both userspace
(in selftests) and kernel (in samples) headers. It was added in commit
466698e654e8 ("s390/bpf: correct broken uapi for
BPF_PROG_TYPE_PERF_EVENT program type").
Ditto on arm64.
On x86, provide userspace versions of PT_REGS_* macros. Unlike s390 and
arm64, x86 provides struct pt_regs to both userspace and kernel, however,
with different member names.
Signed-off-by: Ilya Leoshkevich <iii@linux.ibm.com>
Acked-by: Andrii Nakryiko <andriin@fb.com>
Reviewed-by: Stanislav Fomichev <sdf@google.com>
Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
2019-07-11 16:29:29 +02:00
|
|
|
/* arm64 provides struct user_pt_regs instead of struct pt_regs to userspace */
|
|
|
|
struct pt_regs;
|
|
|
|
#define PT_REGS_ARM64 const volatile struct user_pt_regs
|
|
|
|
#define PT_REGS_PARM1(x) (((PT_REGS_ARM64 *)(x))->regs[0])
|
|
|
|
#define PT_REGS_PARM2(x) (((PT_REGS_ARM64 *)(x))->regs[1])
|
|
|
|
#define PT_REGS_PARM3(x) (((PT_REGS_ARM64 *)(x))->regs[2])
|
|
|
|
#define PT_REGS_PARM4(x) (((PT_REGS_ARM64 *)(x))->regs[3])
|
|
|
|
#define PT_REGS_PARM5(x) (((PT_REGS_ARM64 *)(x))->regs[4])
|
|
|
|
#define PT_REGS_RET(x) (((PT_REGS_ARM64 *)(x))->regs[30])
|
|
|
|
/* Works only with CONFIG_FRAME_POINTER */
|
|
|
|
#define PT_REGS_FP(x) (((PT_REGS_ARM64 *)(x))->regs[29])
|
|
|
|
#define PT_REGS_RC(x) (((PT_REGS_ARM64 *)(x))->regs[0])
|
|
|
|
#define PT_REGS_SP(x) (((PT_REGS_ARM64 *)(x))->sp)
|
|
|
|
#define PT_REGS_IP(x) (((PT_REGS_ARM64 *)(x))->pc)
|
2016-04-04 22:31:34 +05:30
|
|
|
|
2017-09-20 09:11:58 -07:00
|
|
|
#elif defined(bpf_target_mips)
|
2017-06-13 16:49:37 -07:00
|
|
|
|
|
|
|
#define PT_REGS_PARM1(x) ((x)->regs[4])
|
|
|
|
#define PT_REGS_PARM2(x) ((x)->regs[5])
|
|
|
|
#define PT_REGS_PARM3(x) ((x)->regs[6])
|
|
|
|
#define PT_REGS_PARM4(x) ((x)->regs[7])
|
|
|
|
#define PT_REGS_PARM5(x) ((x)->regs[8])
|
|
|
|
#define PT_REGS_RET(x) ((x)->regs[31])
|
|
|
|
#define PT_REGS_FP(x) ((x)->regs[30]) /* Works only with CONFIG_FRAME_POINTER */
|
|
|
|
#define PT_REGS_RC(x) ((x)->regs[1])
|
|
|
|
#define PT_REGS_SP(x) ((x)->regs[29])
|
|
|
|
#define PT_REGS_IP(x) ((x)->cp0_epc)
|
|
|
|
|
2017-09-20 09:11:58 -07:00
|
|
|
#elif defined(bpf_target_powerpc)
|
2016-04-04 22:31:34 +05:30
|
|
|
|
|
|
|
#define PT_REGS_PARM1(x) ((x)->gpr[3])
|
|
|
|
#define PT_REGS_PARM2(x) ((x)->gpr[4])
|
|
|
|
#define PT_REGS_PARM3(x) ((x)->gpr[5])
|
|
|
|
#define PT_REGS_PARM4(x) ((x)->gpr[6])
|
|
|
|
#define PT_REGS_PARM5(x) ((x)->gpr[7])
|
|
|
|
#define PT_REGS_RC(x) ((x)->gpr[3])
|
|
|
|
#define PT_REGS_SP(x) ((x)->sp)
|
|
|
|
#define PT_REGS_IP(x) ((x)->nip)
|
2015-10-26 17:02:19 -07:00
|
|
|
|
2017-09-20 09:11:58 -07:00
|
|
|
#elif defined(bpf_target_sparc)
|
2017-04-22 12:31:05 -07:00
|
|
|
|
|
|
|
#define PT_REGS_PARM1(x) ((x)->u_regs[UREG_I0])
|
|
|
|
#define PT_REGS_PARM2(x) ((x)->u_regs[UREG_I1])
|
|
|
|
#define PT_REGS_PARM3(x) ((x)->u_regs[UREG_I2])
|
|
|
|
#define PT_REGS_PARM4(x) ((x)->u_regs[UREG_I3])
|
|
|
|
#define PT_REGS_PARM5(x) ((x)->u_regs[UREG_I4])
|
|
|
|
#define PT_REGS_RET(x) ((x)->u_regs[UREG_I7])
|
|
|
|
#define PT_REGS_RC(x) ((x)->u_regs[UREG_I0])
|
|
|
|
#define PT_REGS_SP(x) ((x)->u_regs[UREG_FP])
|
2017-09-20 09:11:58 -07:00
|
|
|
|
|
|
|
/* Should this also be a bpf_target check for the sparc case? */
|
2017-04-22 12:31:05 -07:00
|
|
|
#if defined(__arch64__)
|
|
|
|
#define PT_REGS_IP(x) ((x)->tpc)
|
|
|
|
#else
|
|
|
|
#define PT_REGS_IP(x) ((x)->pc)
|
|
|
|
#endif
|
|
|
|
|
2015-07-06 16:20:07 +02:00
|
|
|
#endif
|
2016-04-04 22:31:34 +05:30
|
|
|
|
2019-07-10 13:56:54 +02:00
|
|
|
#if defined(bpf_target_powerpc)
|
2016-04-04 22:31:34 +05:30
|
|
|
#define BPF_KPROBE_READ_RET_IP(ip, ctx) ({ (ip) = (ctx)->link; })
|
|
|
|
#define BPF_KRETPROBE_READ_RET_IP BPF_KPROBE_READ_RET_IP
|
2019-07-10 13:56:54 +02:00
|
|
|
#elif defined(bpf_target_sparc)
|
2017-04-22 12:31:05 -07:00
|
|
|
#define BPF_KPROBE_READ_RET_IP(ip, ctx) ({ (ip) = PT_REGS_RET(ctx); })
|
|
|
|
#define BPF_KRETPROBE_READ_RET_IP BPF_KPROBE_READ_RET_IP
|
2016-04-04 22:31:34 +05:30
|
|
|
#else
|
|
|
|
#define BPF_KPROBE_READ_RET_IP(ip, ctx) ({ \
|
|
|
|
bpf_probe_read(&(ip), sizeof(ip), (void *)PT_REGS_RET(ctx)); })
|
|
|
|
#define BPF_KRETPROBE_READ_RET_IP(ip, ctx) ({ \
|
|
|
|
bpf_probe_read(&(ip), sizeof(ip), \
|
|
|
|
(void *)(PT_REGS_FP(ctx) + sizeof(ip))); })
|
|
|
|
#endif
|
|
|
|
|
2019-08-07 14:39:52 -07:00
|
|
|
/*
|
|
|
|
* BPF_CORE_READ abstracts away bpf_probe_read() call and captures offset
|
|
|
|
* relocation for source address using __builtin_preserve_access_index()
|
|
|
|
* built-in, provided by Clang.
|
|
|
|
*
|
|
|
|
* __builtin_preserve_access_index() takes as an argument an expression of
|
|
|
|
* taking an address of a field within struct/union. It makes compiler emit
|
|
|
|
* a relocation, which records BTF type ID describing root struct/union and an
|
|
|
|
* accessor string which describes exact embedded field that was used to take
|
|
|
|
* an address. See detailed description of this relocation format and
|
|
|
|
* semantics in comments to struct bpf_offset_reloc in libbpf_internal.h.
|
|
|
|
*
|
|
|
|
* This relocation allows libbpf to adjust BPF instruction to use correct
|
|
|
|
* actual field offset, based on target kernel BTF type that matches original
|
|
|
|
* (local) BTF, used to record relocation.
|
|
|
|
*/
|
|
|
|
#define BPF_CORE_READ(dst, src) \
|
|
|
|
bpf_probe_read((dst), sizeof(*(src)), \
|
|
|
|
__builtin_preserve_access_index(src))
|
|
|
|
|
2014-12-01 15:06:37 -08:00
|
|
|
#endif
|