aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorAdityaK <appujee@google.com>2024-01-31 14:53:37 -0800
committerAdityaK <appujee@google.com>2024-01-31 16:11:31 -0800
commit145549b639b7df1b3fee330aaa3bd2e57d234c47 (patch)
treea7add0a03af3282bab879f6f3f7fb736e5df380c
parentbc8de6f6af537ef5d4947bfaa672d5798747aa69 (diff)
downloadbcc-145549b639b7df1b3fee330aaa3bd2e57d234c47.tar.gz
Convert signed div to unsigned as it is not supported in bpf
Bug: b/308826679 Context: https://github.com/llvm/llvm-project/pull/75088 Also opened an issue upstream: https://github.com/iovisor/bcc/issues/4896 Change-Id: Ia8650571dc8ba85238333f18c8e0f9b08e40f8f9
-rw-r--r--libbpf-tools/biolatency.bpf.c9
-rw-r--r--libbpf-tools/biostacks.bpf.c8
-rw-r--r--libbpf-tools/fsdist.bpf.c8
-rw-r--r--libbpf-tools/offcputime.bpf.c8
-rw-r--r--libbpf-tools/runqlat.bpf.c8
5 files changed, 26 insertions, 15 deletions
diff --git a/libbpf-tools/biolatency.bpf.c b/libbpf-tools/biolatency.bpf.c
index 429412db..d0409fd0 100644
--- a/libbpf-tools/biolatency.bpf.c
+++ b/libbpf-tools/biolatency.bpf.c
@@ -101,6 +101,7 @@ static int handle_block_rq_complete(struct request *rq, int error, unsigned int
struct hist_key hkey = {};
struct hist *histp;
s64 delta;
+ u64 udelta;
if (filter_cg && !bpf_current_task_under_cgroup(&cgroup_map, 0))
return 0;
@@ -113,6 +114,8 @@ static int handle_block_rq_complete(struct request *rq, int error, unsigned int
if (delta < 0)
goto cleanup;
+ udelta = (u64)delta;
+
if (targ_per_disk) {
struct gendisk *disk = get_disk(rq);
@@ -131,10 +134,10 @@ static int handle_block_rq_complete(struct request *rq, int error, unsigned int
}
if (targ_ms)
- delta /= 1000000U;
+ udelta /= 1000000U;
else
- delta /= 1000U;
- slot = log2l(delta);
+ udelta /= 1000U;
+ slot = log2l(udelta);
if (slot >= MAX_SLOTS)
slot = MAX_SLOTS - 1;
__sync_fetch_and_add(&histp->slots[slot], 1);
diff --git a/libbpf-tools/biostacks.bpf.c b/libbpf-tools/biostacks.bpf.c
index 0ca69880..c1dce671 100644
--- a/libbpf-tools/biostacks.bpf.c
+++ b/libbpf-tools/biostacks.bpf.c
@@ -74,6 +74,7 @@ int trace_done(void *ctx, struct request *rq)
struct internal_rqinfo *i_rqinfop;
struct hist *histp;
s64 delta;
+ u64 udelta;
i_rqinfop = bpf_map_lookup_elem(&rqinfos, &rq);
if (!i_rqinfop)
@@ -81,14 +82,15 @@ int trace_done(void *ctx, struct request *rq)
delta = (s64)(ts - i_rqinfop->start_ts);
if (delta < 0)
goto cleanup;
+ udelta = (u64)delta;
histp = bpf_map_lookup_or_try_init(&hists, &i_rqinfop->rqinfo, &zero);
if (!histp)
goto cleanup;
if (targ_ms)
- delta /= 1000000U;
+ udelta /= 1000000U;
else
- delta /= 1000U;
- slot = log2l(delta);
+ udelta /= 1000U;
+ slot = log2l(udelta);
if (slot >= MAX_SLOTS)
slot = MAX_SLOTS - 1;
__sync_fetch_and_add(&histp->slots[slot], 1);
diff --git a/libbpf-tools/fsdist.bpf.c b/libbpf-tools/fsdist.bpf.c
index 1bf98e91..19bc51a3 100644
--- a/libbpf-tools/fsdist.bpf.c
+++ b/libbpf-tools/fsdist.bpf.c
@@ -41,6 +41,7 @@ static int probe_return(enum fs_file_op op)
__u64 ts = bpf_ktime_get_ns();
__u64 *tsp, slot;
__s64 delta;
+ __u64 udelta;
tsp = bpf_map_lookup_elem(&starts, &tid);
if (!tsp)
@@ -53,12 +54,13 @@ static int probe_return(enum fs_file_op op)
if (delta < 0)
goto cleanup;
+ udelta = (__u64)delta;
if (in_ms)
- delta /= 1000000;
+ udelta /= 1000000;
else
- delta /= 1000;
+ udelta /= 1000;
- slot = log2l(delta);
+ slot = log2l(udelta);
if (slot >= MAX_SLOTS)
slot = MAX_SLOTS - 1;
__sync_fetch_and_add(&hists[op].slots[slot], 1);
diff --git a/libbpf-tools/offcputime.bpf.c b/libbpf-tools/offcputime.bpf.c
index cb20d501..3a36fa32 100644
--- a/libbpf-tools/offcputime.bpf.c
+++ b/libbpf-tools/offcputime.bpf.c
@@ -63,6 +63,7 @@ int BPF_PROG(sched_switch, bool preempt, struct task_struct *prev, struct task_s
struct internal_key *i_keyp, i_key;
struct val_t *valp, val;
s64 delta;
+ u64 udelta;
u32 pid;
if (allow_record(prev)) {
@@ -94,13 +95,14 @@ int BPF_PROG(sched_switch, bool preempt, struct task_struct *prev, struct task_s
delta = (s64)(bpf_ktime_get_ns() - i_keyp->start_ts);
if (delta < 0)
goto cleanup;
- delta /= 1000U;
- if (delta < min_block_ns || delta > max_block_ns)
+ udelta = (u64)delta;
+ udelta /= 1000U;
+ if (udelta < min_block_ns || udelta > max_block_ns)
goto cleanup;
valp = bpf_map_lookup_elem(&info, &i_keyp->key);
if (!valp)
goto cleanup;
- __sync_fetch_and_add(&valp->delta, delta);
+ __sync_fetch_and_add(&valp->delta, udelta);
cleanup:
bpf_map_delete_elem(&start, &pid);
diff --git a/libbpf-tools/runqlat.bpf.c b/libbpf-tools/runqlat.bpf.c
index 76e0553e..1ddf976e 100644
--- a/libbpf-tools/runqlat.bpf.c
+++ b/libbpf-tools/runqlat.bpf.c
@@ -80,6 +80,7 @@ static int handle_switch(bool preempt, struct task_struct *prev, struct task_str
u64 *tsp, slot;
u32 pid, hkey;
s64 delta;
+ u64 udelta;
if (filter_cg && !bpf_current_task_under_cgroup(&cgroup_map, 0))
return 0;
@@ -95,6 +96,7 @@ static int handle_switch(bool preempt, struct task_struct *prev, struct task_str
delta = bpf_ktime_get_ns() - *tsp;
if (delta < 0)
goto cleanup;
+ udelta = (u64)delta;
if (targ_per_process)
hkey = BPF_CORE_READ(next, tgid);
@@ -111,10 +113,10 @@ static int handle_switch(bool preempt, struct task_struct *prev, struct task_str
bpf_probe_read_kernel_str(&histp->comm, sizeof(histp->comm),
next->comm);
if (targ_ms)
- delta /= 1000000U;
+ udelta /= 1000000U;
else
- delta /= 1000U;
- slot = log2l(delta);
+ udelta /= 1000U;
+ slot = log2l(udelta);
if (slot >= MAX_SLOTS)
slot = MAX_SLOTS - 1;
__sync_fetch_and_add(&histp->slots[slot], 1);