summaryrefslogtreecommitdiff
path: root/mali_kbase/mali_kbase_trace_gpu_mem.h
blob: 96d0c40530fc7eb3115d8f007658b9be6eaf8cf5 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
/*
 *
 * (C) COPYRIGHT 2020-2023 ARM Limited. All rights reserved.
 *
 * This program is free software and is provided to you under the terms of the
 * GNU General Public License version 2 as published by the Free Software
 * Foundation, and any use by you of this program is subject to the terms
 * of such GNU license.
 *
 * This program is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
 * GNU General Public License for more details.
 *
 * You should have received a copy of the GNU General Public License
 * along with this program; if not, you can access it online at
 * http://www.gnu.org/licenses/gpl-2.0.html.
 *
 */

#ifndef _KBASE_TRACE_GPU_MEM_H_
#define _KBASE_TRACE_GPU_MEM_H_

#if IS_ENABLED(CONFIG_TRACE_GPU_MEM)
#include <trace/events/gpu_mem.h>
#endif

#define DEVICE_TGID ((u32)0U)

static void kbase_trace_gpu_mem_usage(struct kbase_device *kbdev, struct kbase_context *kctx)
{
#if IS_ENABLED(CONFIG_TRACE_GPU_MEM)
	lockdep_assert_held(&kbdev->gpu_mem_usage_lock);

	trace_gpu_mem_total(kbdev->id, DEVICE_TGID, kbdev->total_gpu_pages << PAGE_SHIFT);

	if (likely(kctx))
		trace_gpu_mem_total(kbdev->id, kctx->kprcs->tgid,
				    kctx->kprcs->total_gpu_pages << PAGE_SHIFT);
#else
	CSTD_UNUSED(kbdev);
	CSTD_UNUSED(kctx);
#endif
}

static inline void kbase_trace_gpu_mem_usage_dec(struct kbase_device *kbdev,
						 struct kbase_context *kctx, size_t pages)
{
	spin_lock(&kbdev->gpu_mem_usage_lock);

	if (likely(kctx))
		kctx->kprcs->total_gpu_pages -= pages;

	kbdev->total_gpu_pages -= pages;

	kbase_trace_gpu_mem_usage(kbdev, kctx);

	spin_unlock(&kbdev->gpu_mem_usage_lock);
}

static inline void kbase_trace_gpu_mem_usage_inc(struct kbase_device *kbdev,
						 struct kbase_context *kctx, size_t pages)
{
	spin_lock(&kbdev->gpu_mem_usage_lock);

	if (likely(kctx))
		kctx->kprcs->total_gpu_pages += pages;

	kbdev->total_gpu_pages += pages;

	kbase_trace_gpu_mem_usage(kbdev, kctx);

	spin_unlock(&kbdev->gpu_mem_usage_lock);
}

/**
 * kbase_remove_dma_buf_usage - Remove a dma-buf entry captured.
 *
 * @kctx: Pointer to the kbase context
 * @alloc: Pointer to the alloc to unmap
 *
 * Remove reference to dma buf been unmapped from kbase_device level
 * rb_tree and Kbase_process level dma buf rb_tree.
 */
void kbase_remove_dma_buf_usage(struct kbase_context *kctx, struct kbase_mem_phy_alloc *alloc);

/**
 * kbase_add_dma_buf_usage - Add a dma-buf entry captured.
 *
 * @kctx: Pointer to the kbase context
 * @alloc: Pointer to the alloc to map in
 *
 * Add reference to dma buf been mapped to kbase_device level
 * rb_tree and Kbase_process level dma buf rb_tree.
 */
void kbase_add_dma_buf_usage(struct kbase_context *kctx, struct kbase_mem_phy_alloc *alloc);

#endif /* _KBASE_TRACE_GPU_MEM_H_ */