diff options
Diffstat (limited to 'common/native/bpf_headers/include/bpf/bpf_helpers.h')
-rw-r--r-- | common/native/bpf_headers/include/bpf/bpf_helpers.h | 60 |
1 files changed, 42 insertions, 18 deletions
diff --git a/common/native/bpf_headers/include/bpf/bpf_helpers.h b/common/native/bpf_headers/include/bpf/bpf_helpers.h index ac9f9bcb..10686a29 100644 --- a/common/native/bpf_headers/include/bpf/bpf_helpers.h +++ b/common/native/bpf_headers/include/bpf/bpf_helpers.h @@ -19,6 +19,17 @@ * * ******************************************************************************/ +// The actual versions of the bpfloader that shipped in various Android releases + +// Android P/Q/R: BpfLoader was initially part of netd, +// this was later split out into a standalone binary, but was unversioned. + +// Android S / 12 (api level 31) - added 'tethering' mainline eBPF support +#define BPFLOADER_S_VERSION 2u + +// Android T / 13 Beta 3 (api level 33) - added support for 'netd_shared' +#define BPFLOADER_T_BETA3_VERSION 13u + /* For mainline module use, you can #define BPFLOADER_{MIN/MAX}_VER * before #include "bpf_helpers.h" to change which bpfloaders will * process the resulting .o file. @@ -126,11 +137,12 @@ static int (*bpf_map_delete_elem_unsafe)(const struct bpf_map_def* map, ____btf_map_##name = { } /* type safe macro to declare a map and related accessor functions */ -#define DEFINE_BPF_MAP_UGM(the_map, TYPE, TypeOfKey, TypeOfValue, num_entries, usr, grp, md) \ +#define DEFINE_BPF_MAP_EXT(the_map, TYPE, KeyType, ValueType, num_entries, usr, grp, md, \ + selinux, pindir, share) \ const struct bpf_map_def SECTION("maps") the_map = { \ .type = BPF_MAP_TYPE_##TYPE, \ - .key_size = sizeof(TypeOfKey), \ - .value_size = sizeof(TypeOfValue), \ + .key_size = sizeof(KeyType), \ + .value_size = sizeof(ValueType), \ .max_entries = (num_entries), \ .map_flags = 0, \ .uid = (usr), \ @@ -140,34 +152,40 @@ static int (*bpf_map_delete_elem_unsafe)(const struct bpf_map_def* map, .bpfloader_max_ver = DEFAULT_BPFLOADER_MAX_VER, \ .min_kver = KVER_NONE, \ .max_kver = KVER_INF, \ + .selinux_context = selinux, \ + .pin_subdir = pindir, \ + .shared = share, \ }; \ - BPF_ANNOTATE_KV_PAIR(the_map, TypeOfKey, TypeOfValue); \ + BPF_ANNOTATE_KV_PAIR(the_map, KeyType, ValueType); \ \ - static inline __always_inline __unused TypeOfValue* bpf_##the_map##_lookup_elem( \ - const TypeOfKey* k) { \ + static inline __always_inline __unused ValueType* bpf_##the_map##_lookup_elem( \ + const KeyType* k) { \ return bpf_map_lookup_elem_unsafe(&the_map, k); \ }; \ \ static inline __always_inline __unused int bpf_##the_map##_update_elem( \ - const TypeOfKey* k, const TypeOfValue* v, unsigned long long flags) { \ + const KeyType* k, const ValueType* v, unsigned long long flags) { \ return bpf_map_update_elem_unsafe(&the_map, k, v, flags); \ }; \ \ - static inline __always_inline __unused int bpf_##the_map##_delete_elem(const TypeOfKey* k) { \ + static inline __always_inline __unused int bpf_##the_map##_delete_elem(const KeyType* k) { \ return bpf_map_delete_elem_unsafe(&the_map, k); \ }; -#define DEFINE_BPF_MAP(the_map, TYPE, TypeOfKey, TypeOfValue, num_entries) \ - DEFINE_BPF_MAP_UGM(the_map, TYPE, TypeOfKey, TypeOfValue, num_entries, AID_ROOT, AID_ROOT, 0600) +#define DEFINE_BPF_MAP_UGM(the_map, TYPE, KeyType, ValueType, num_entries, usr, grp, md) \ + DEFINE_BPF_MAP_EXT(the_map, TYPE, KeyType, ValueType, num_entries, usr, grp, md, "", "", false) -#define DEFINE_BPF_MAP_GWO(the_map, TYPE, TypeOfKey, TypeOfValue, num_entries, gid) \ - DEFINE_BPF_MAP_UGM(the_map, TYPE, TypeOfKey, TypeOfValue, num_entries, AID_ROOT, gid, 0620) +#define DEFINE_BPF_MAP(the_map, TYPE, KeyType, ValueType, num_entries) \ + DEFINE_BPF_MAP_UGM(the_map, TYPE, KeyType, ValueType, num_entries, AID_ROOT, AID_ROOT, 0600) -#define DEFINE_BPF_MAP_GRO(the_map, TYPE, TypeOfKey, TypeOfValue, num_entries, gid) \ - DEFINE_BPF_MAP_UGM(the_map, TYPE, TypeOfKey, TypeOfValue, num_entries, AID_ROOT, gid, 0640) +#define DEFINE_BPF_MAP_GWO(the_map, TYPE, KeyType, ValueType, num_entries, gid) \ + DEFINE_BPF_MAP_UGM(the_map, TYPE, KeyType, ValueType, num_entries, AID_ROOT, gid, 0620) -#define DEFINE_BPF_MAP_GRW(the_map, TYPE, TypeOfKey, TypeOfValue, num_entries, gid) \ - DEFINE_BPF_MAP_UGM(the_map, TYPE, TypeOfKey, TypeOfValue, num_entries, AID_ROOT, gid, 0660) +#define DEFINE_BPF_MAP_GRO(the_map, TYPE, KeyType, ValueType, num_entries, gid) \ + DEFINE_BPF_MAP_UGM(the_map, TYPE, KeyType, ValueType, num_entries, AID_ROOT, gid, 0640) + +#define DEFINE_BPF_MAP_GRW(the_map, TYPE, KeyType, ValueType, num_entries, gid) \ + DEFINE_BPF_MAP_UGM(the_map, TYPE, KeyType, ValueType, num_entries, AID_ROOT, gid, 0660) static int (*bpf_probe_read)(void* dst, int size, void* unsafe_ptr) = (void*) BPF_FUNC_probe_read; static int (*bpf_probe_read_str)(void* dst, int size, void* unsafe_ptr) = (void*) BPF_FUNC_probe_read_str; @@ -178,8 +196,8 @@ static unsigned long long (*bpf_get_current_pid_tgid)(void) = (void*) BPF_FUNC_g static unsigned long long (*bpf_get_current_uid_gid)(void) = (void*) BPF_FUNC_get_current_uid_gid; static unsigned long long (*bpf_get_smp_processor_id)(void) = (void*) BPF_FUNC_get_smp_processor_id; -#define DEFINE_BPF_PROG_KVER_RANGE_OPT(SECTION_NAME, prog_uid, prog_gid, the_prog, min_kv, max_kv, \ - opt) \ +#define DEFINE_BPF_PROG_EXT(SECTION_NAME, prog_uid, prog_gid, the_prog, min_kv, max_kv, opt, \ + selinux, pindir) \ const struct bpf_prog_def SECTION("progs") the_prog##_def = { \ .uid = (prog_uid), \ .gid = (prog_gid), \ @@ -188,10 +206,16 @@ static unsigned long long (*bpf_get_smp_processor_id)(void) = (void*) BPF_FUNC_g .optional = (opt), \ .bpfloader_min_ver = DEFAULT_BPFLOADER_MIN_VER, \ .bpfloader_max_ver = DEFAULT_BPFLOADER_MAX_VER, \ + .selinux_context = selinux, \ + .pin_subdir = pindir, \ }; \ SECTION(SECTION_NAME) \ int the_prog +#define DEFINE_BPF_PROG_KVER_RANGE_OPT(SECTION_NAME, prog_uid, prog_gid, the_prog, min_kv, max_kv, \ + opt) \ + DEFINE_BPF_PROG_EXT(SECTION_NAME, prog_uid, prog_gid, the_prog, min_kv, max_kv, opt, "", "") + // Programs (here used in the sense of functions/sections) marked optional are allowed to fail // to load (for example due to missing kernel patches). // The bpfloader will just ignore these failures and continue processing the next section. |