summaryrefslogtreecommitdiff
path: root/bpf_progs/netd.c
diff options
context:
space:
mode:
Diffstat (limited to 'bpf_progs/netd.c')
-rw-r--r--bpf_progs/netd.c25
1 files changed, 14 insertions, 11 deletions
diff --git a/bpf_progs/netd.c b/bpf_progs/netd.c
index f3470280..e9e1477f 100644
--- a/bpf_progs/netd.c
+++ b/bpf_progs/netd.c
@@ -203,9 +203,12 @@ static inline int bpf_owner_match(struct __sk_buff* skb, uint32_t uid, int direc
if ((enabledRules & POWERSAVE_MATCH) && !(uidRules & POWERSAVE_MATCH)) {
return BPF_DROP;
}
+ if ((enabledRules & RESTRICTED_MATCH) && !(uidRules & RESTRICTED_MATCH)) {
+ return BPF_DROP;
+ }
}
if (direction == BPF_INGRESS && (uidRules & IIF_MATCH)) {
- // Drops packets not coming from lo nor the whitelisted interface
+ // Drops packets not coming from lo nor the allowlisted interface
if (allowed_iif && skb->ifindex != 1 && skb->ifindex != allowed_iif) {
return BPF_DROP_UNLESS_DNS;
}
@@ -280,13 +283,13 @@ static __always_inline inline int bpf_traffic_account(struct __sk_buff* skb, int
return match;
}
-SEC("cgroupskb/ingress/stats")
-int bpf_cgroup_ingress(struct __sk_buff* skb) {
+DEFINE_BPF_PROG("cgroupskb/ingress/stats", AID_ROOT, AID_ROOT, bpf_cgroup_ingress)
+(struct __sk_buff* skb) {
return bpf_traffic_account(skb, BPF_INGRESS);
}
-SEC("cgroupskb/egress/stats")
-int bpf_cgroup_egress(struct __sk_buff* skb) {
+DEFINE_BPF_PROG("cgroupskb/egress/stats", AID_ROOT, AID_ROOT, bpf_cgroup_egress)
+(struct __sk_buff* skb) {
return bpf_traffic_account(skb, BPF_EGRESS);
}
@@ -315,7 +318,7 @@ DEFINE_BPF_PROG("skfilter/ingress/xtbpf", AID_ROOT, AID_NET_ADMIN, xt_bpf_ingres
return BPF_MATCH;
}
-DEFINE_BPF_PROG("skfilter/whitelist/xtbpf", AID_ROOT, AID_NET_ADMIN, xt_bpf_whitelist_prog)
+DEFINE_BPF_PROG("skfilter/allowlist/xtbpf", AID_ROOT, AID_NET_ADMIN, xt_bpf_allowlist_prog)
(struct __sk_buff* skb) {
uint32_t sock_uid = bpf_get_socket_uid(skb);
if (is_system_uid(sock_uid)) return BPF_MATCH;
@@ -327,16 +330,16 @@ DEFINE_BPF_PROG("skfilter/whitelist/xtbpf", AID_ROOT, AID_NET_ADMIN, xt_bpf_whit
if ((sock_uid == 65534) && !bpf_get_socket_cookie(skb) && is_received_skb(skb))
return BPF_MATCH;
- UidOwnerValue* whitelistMatch = bpf_uid_owner_map_lookup_elem(&sock_uid);
- if (whitelistMatch) return whitelistMatch->rule & HAPPY_BOX_MATCH ? BPF_MATCH : BPF_NOMATCH;
+ UidOwnerValue* allowlistMatch = bpf_uid_owner_map_lookup_elem(&sock_uid);
+ if (allowlistMatch) return allowlistMatch->rule & HAPPY_BOX_MATCH ? BPF_MATCH : BPF_NOMATCH;
return BPF_NOMATCH;
}
-DEFINE_BPF_PROG("skfilter/blacklist/xtbpf", AID_ROOT, AID_NET_ADMIN, xt_bpf_blacklist_prog)
+DEFINE_BPF_PROG("skfilter/denylist/xtbpf", AID_ROOT, AID_NET_ADMIN, xt_bpf_denylist_prog)
(struct __sk_buff* skb) {
uint32_t sock_uid = bpf_get_socket_uid(skb);
- UidOwnerValue* blacklistMatch = bpf_uid_owner_map_lookup_elem(&sock_uid);
- if (blacklistMatch) return blacklistMatch->rule & PENALTY_BOX_MATCH ? BPF_MATCH : BPF_NOMATCH;
+ UidOwnerValue* denylistMatch = bpf_uid_owner_map_lookup_elem(&sock_uid);
+ if (denylistMatch) return denylistMatch->rule & PENALTY_BOX_MATCH ? BPF_MATCH : BPF_NOMATCH;
return BPF_NOMATCH;
}