summaryrefslogtreecommitdiff
path: root/mali_kbase/debug/backend/mali_kbase_debug_ktrace_csf.h
diff options
context:
space:
mode:
Diffstat (limited to 'mali_kbase/debug/backend/mali_kbase_debug_ktrace_csf.h')
-rw-r--r--mali_kbase/debug/backend/mali_kbase_debug_ktrace_csf.h129
1 files changed, 59 insertions, 70 deletions
diff --git a/mali_kbase/debug/backend/mali_kbase_debug_ktrace_csf.h b/mali_kbase/debug/backend/mali_kbase_debug_ktrace_csf.h
index 0593c30..e30aedb 100644
--- a/mali_kbase/debug/backend/mali_kbase_debug_ktrace_csf.h
+++ b/mali_kbase/debug/backend/mali_kbase_debug_ktrace_csf.h
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
/*
*
- * (C) COPYRIGHT 2020-2021 ARM Limited. All rights reserved.
+ * (C) COPYRIGHT 2020-2023 ARM Limited. All rights reserved.
*
* This program is free software and is provided to you under the terms of the
* GNU General Public License version 2 as published by the Free Software
@@ -38,10 +38,9 @@
* PRIVATE: do not use directly. Use KBASE_KTRACE_ADD_CSF() instead.
*/
-void kbasep_ktrace_add_csf(struct kbase_device *kbdev,
- enum kbase_ktrace_code code, struct kbase_queue_group *group,
- struct kbase_queue *queue, kbase_ktrace_flag_t flags,
- u64 info_val);
+void kbasep_ktrace_add_csf(struct kbase_device *kbdev, enum kbase_ktrace_code code,
+ struct kbase_queue_group *group, struct kbase_queue *queue,
+ kbase_ktrace_flag_t flags, u64 info_val);
/**
* kbasep_ktrace_add_csf_kcpu - internal function to add trace about the CSF
@@ -58,39 +57,36 @@ void kbasep_ktrace_add_csf(struct kbase_device *kbdev,
*
* PRIVATE: do not use directly. Use KBASE_KTRACE_ADD_CSF_KCPU() instead.
*/
-void kbasep_ktrace_add_csf_kcpu(struct kbase_device *kbdev,
- enum kbase_ktrace_code code,
- struct kbase_kcpu_command_queue *queue,
- u64 info_val1, u64 info_val2);
+void kbasep_ktrace_add_csf_kcpu(struct kbase_device *kbdev, enum kbase_ktrace_code code,
+ struct kbase_kcpu_command_queue *queue, u64 info_val1,
+ u64 info_val2);
#define KBASE_KTRACE_RBUF_ADD_CSF(kbdev, code, group, queue, flags, info_val) \
- kbasep_ktrace_add_csf(kbdev, KBASE_KTRACE_CODE(code), group, queue, \
- flags, info_val)
+ kbasep_ktrace_add_csf(kbdev, KBASE_KTRACE_CODE(code), group, queue, flags, info_val)
-#define KBASE_KTRACE_RBUF_ADD_CSF_KCPU(kbdev, code, queue, info_val1, \
- info_val2) kbasep_ktrace_add_csf_kcpu(kbdev, KBASE_KTRACE_CODE(code), \
- queue, info_val1, info_val2)
+#define KBASE_KTRACE_RBUF_ADD_CSF_KCPU(kbdev, code, queue, info_val1, info_val2) \
+ kbasep_ktrace_add_csf_kcpu(kbdev, KBASE_KTRACE_CODE(code), queue, info_val1, info_val2)
#else /* KBASE_KTRACE_TARGET_RBUF */
#define KBASE_KTRACE_RBUF_ADD_CSF(kbdev, code, group, queue, flags, info_val) \
- do {\
- CSTD_UNUSED(kbdev);\
- CSTD_NOP(code);\
- CSTD_UNUSED(group);\
- CSTD_UNUSED(queue);\
- CSTD_UNUSED(flags);\
- CSTD_UNUSED(info_val);\
- CSTD_NOP(0);\
+ do { \
+ CSTD_UNUSED(kbdev); \
+ CSTD_NOP(code); \
+ CSTD_UNUSED(group); \
+ CSTD_UNUSED(queue); \
+ CSTD_UNUSED(flags); \
+ CSTD_UNUSED(info_val); \
+ CSTD_NOP(0); \
} while (0)
#define KBASE_KTRACE_RBUF_ADD_CSF_KCPU(kbdev, code, queue, info_val1, info_val2) \
- do {\
- CSTD_UNUSED(kbdev);\
- CSTD_NOP(code);\
- CSTD_UNUSED(queue);\
- CSTD_UNUSED(info_val1);\
- CSTD_UNUSED(info_val2);\
+ do { \
+ CSTD_UNUSED(kbdev); \
+ CSTD_NOP(code); \
+ CSTD_UNUSED(queue); \
+ CSTD_UNUSED(info_val1); \
+ CSTD_UNUSED(info_val2); \
} while (0)
#endif /* KBASE_KTRACE_TARGET_RBUF */
@@ -112,21 +108,21 @@ void kbasep_ktrace_add_csf_kcpu(struct kbase_device *kbdev,
#else /* KBASE_KTRACE_TARGET_FTRACE */
#define KBASE_KTRACE_FTRACE_ADD_CSF(kbdev, code, group, queue, info_val) \
- do {\
- CSTD_UNUSED(kbdev);\
- CSTD_NOP(code);\
- CSTD_UNUSED(group);\
- CSTD_UNUSED(queue);\
- CSTD_UNUSED(info_val);\
- CSTD_NOP(0);\
+ do { \
+ CSTD_UNUSED(kbdev); \
+ CSTD_NOP(code); \
+ CSTD_UNUSED(group); \
+ CSTD_UNUSED(queue); \
+ CSTD_UNUSED(info_val); \
+ CSTD_NOP(0); \
} while (0)
#define KBASE_KTRACE_FTRACE_ADD_KCPU(code, queue, info_val1, info_val2) \
- do {\
- CSTD_NOP(code);\
- CSTD_UNUSED(queue);\
- CSTD_UNUSED(info_val1);\
- CSTD_UNUSED(info_val2);\
+ do { \
+ CSTD_NOP(code); \
+ CSTD_UNUSED(queue); \
+ CSTD_UNUSED(info_val1); \
+ CSTD_UNUSED(info_val2); \
} while (0)
#endif /* KBASE_KTRACE_TARGET_FTRACE */
@@ -149,15 +145,13 @@ void kbasep_ktrace_add_csf_kcpu(struct kbase_device *kbdev,
* a) be static or static inline, and
* b) just return 0 and have no other statements present in the body.
*/
-#define KBASE_KTRACE_ADD_CSF_GRP(kbdev, code, group, info_val) \
- do { \
- /* capture values that could come from non-pure fn calls */ \
- struct kbase_queue_group *__group = group; \
- u64 __info_val = info_val; \
- KBASE_KTRACE_RBUF_ADD_CSF(kbdev, code, __group, NULL, 0u, \
- __info_val); \
- KBASE_KTRACE_FTRACE_ADD_CSF(kbdev, code, __group, NULL, \
- __info_val); \
+#define KBASE_KTRACE_ADD_CSF_GRP(kbdev, code, group, info_val) \
+ do { \
+ /* capture values that could come from non-pure fn calls */ \
+ struct kbase_queue_group *__group = group; \
+ u64 __info_val = info_val; \
+ KBASE_KTRACE_RBUF_ADD_CSF(kbdev, code, __group, NULL, 0u, __info_val); \
+ KBASE_KTRACE_FTRACE_ADD_CSF(kbdev, code, __group, NULL, __info_val); \
} while (0)
/**
@@ -175,29 +169,24 @@ void kbasep_ktrace_add_csf_kcpu(struct kbase_device *kbdev,
* a) be static or static inline, and
* b) just return 0 and have no other statements present in the body.
*/
-#define KBASE_KTRACE_ADD_CSF_GRP_Q(kbdev, code, group, queue, info_val) \
- do { \
- /* capture values that could come from non-pure fn calls */ \
- struct kbase_queue_group *__group = group; \
- struct kbase_queue *__queue = queue; \
- u64 __info_val = info_val; \
- KBASE_KTRACE_RBUF_ADD_CSF(kbdev, code, __group, __queue, 0u, \
- __info_val); \
- KBASE_KTRACE_FTRACE_ADD_CSF(kbdev, code, __group, \
- __queue, __info_val); \
+#define KBASE_KTRACE_ADD_CSF_GRP_Q(kbdev, code, group, queue, info_val) \
+ do { \
+ /* capture values that could come from non-pure fn calls */ \
+ struct kbase_queue_group *__group = group; \
+ struct kbase_queue *__queue = queue; \
+ u64 __info_val = info_val; \
+ KBASE_KTRACE_RBUF_ADD_CSF(kbdev, code, __group, __queue, 0u, __info_val); \
+ KBASE_KTRACE_FTRACE_ADD_CSF(kbdev, code, __group, __queue, __info_val); \
} while (0)
-
-#define KBASE_KTRACE_ADD_CSF_KCPU(kbdev, code, queue, info_val1, info_val2) \
- do { \
- /* capture values that could come from non-pure fn calls */ \
- struct kbase_kcpu_command_queue *__queue = queue; \
- u64 __info_val1 = info_val1; \
- u64 __info_val2 = info_val2; \
- KBASE_KTRACE_RBUF_ADD_CSF_KCPU(kbdev, code, __queue, \
- __info_val1, __info_val2); \
- KBASE_KTRACE_FTRACE_ADD_KCPU(code, __queue, \
- __info_val1, __info_val2); \
+#define KBASE_KTRACE_ADD_CSF_KCPU(kbdev, code, queue, info_val1, info_val2) \
+ do { \
+ /* capture values that could come from non-pure fn calls */ \
+ struct kbase_kcpu_command_queue *__queue = queue; \
+ u64 __info_val1 = info_val1; \
+ u64 __info_val2 = info_val2; \
+ KBASE_KTRACE_RBUF_ADD_CSF_KCPU(kbdev, code, __queue, __info_val1, __info_val2); \
+ KBASE_KTRACE_FTRACE_ADD_KCPU(code, __queue, __info_val1, __info_val2); \
} while (0)
#endif /* _KBASE_DEBUG_KTRACE_CSF_H_ */