summaryrefslogtreecommitdiff
path: root/mali_pixel
diff options
context:
space:
mode:
authorVamsidhar reddy Gaddam <gvamsi@google.com>2023-12-20 12:42:26 +0000
committerVamsidhar reddy Gaddam <gvamsi@google.com>2024-01-05 09:19:17 +0000
commit11473542814286e59a89a70c969fb50a25ba921f (patch)
treebd4aa60e7d3dc895d82a36fcea0026569e3a04aa /mali_pixel
parent8768eedce66a4373c96f35c8dfb73d4668703180 (diff)
parent049a542207ed694271316782397b78b2e202086a (diff)
downloadgpu-11473542814286e59a89a70c969fb50a25ba921f.tar.gz
Merge branch 'upstream' into HEAD
Update KMD to R47P0 Bug: 315267052 Test: Outlined in go/pixel-gpu-kmd-r47p0 Change-Id: I89454c4c862033fe330b260a9bc6cc777a3ca231 Signed-off-by: Vamsidhar reddy Gaddam <gvamsi@google.com>
Diffstat (limited to 'mali_pixel')
-rw-r--r--mali_pixel/memory_group_manager.c45
1 files changed, 22 insertions, 23 deletions
diff --git a/mali_pixel/memory_group_manager.c b/mali_pixel/memory_group_manager.c
index 0cde4e0..b767811 100644
--- a/mali_pixel/memory_group_manager.c
+++ b/mali_pixel/memory_group_manager.c
@@ -56,8 +56,7 @@
#define MGM_SENTINEL_PT_SIZE U64_MAX
#define INVALID_GROUP_ID(group_id) \
- (WARN_ON((group_id) < 0) || \
- WARN_ON((group_id) >= MEMORY_GROUP_MANAGER_NR_GROUPS))
+ WARN_ON((group_id) >= MEMORY_GROUP_MANAGER_NR_GROUPS)
#if (KERNEL_VERSION(4, 20, 0) > LINUX_VERSION_CODE)
static inline vm_fault_t vmf_insert_pfn_prot(struct vm_area_struct *vma,
@@ -162,35 +161,35 @@ struct mgm_groups {
static int mgm_debugfs_state_get(void *data, u64 *val)
{
struct mgm_group *group = data;
- *val = (int)group->state;
+ *val = (u64)group->state;
return 0;
}
static int mgm_debugfs_size_get(void *data, u64 *val)
{
struct mgm_group *group = data;
- *val = atomic_read(&group->size);
+ *val = (u64)atomic_read(&group->size);
return 0;
}
static int mgm_debugfs_lp_size_get(void *data, u64 *val)
{
struct mgm_group *group = data;
- *val = atomic_read(&group->lp_size);
+ *val = (u64)atomic_read(&group->lp_size);
return 0;
}
static int mgm_debugfs_insert_pfn_get(void *data, u64 *val)
{
struct mgm_group *group = data;
- *val = atomic_read(&group->insert_pfn);
+ *val = (u64)atomic_read(&group->insert_pfn);
return 0;
}
static int mgm_debugfs_update_gpu_pte_get(void *data, u64 *val)
{
struct mgm_group *group = data;
- *val = atomic_read(&group->update_gpu_pte);
+ *val = (u64)atomic_read(&group->update_gpu_pte);
return 0;
}
@@ -409,7 +408,7 @@ static int group_active_pt_id(struct mgm_groups *data, enum pixel_mgm_group_id g
static atomic64_t total_gpu_pages = ATOMIC64_INIT(0);
-static atomic_t* get_size_counter(struct memory_group_manager_device* mgm_dev, int group_id, int order)
+static atomic_t* get_size_counter(struct memory_group_manager_device* mgm_dev, unsigned int group_id, unsigned int order)
{
static atomic_t err_atomic;
struct mgm_groups *data = mgm_dev->data;
@@ -420,13 +419,13 @@ static atomic_t* get_size_counter(struct memory_group_manager_device* mgm_dev, i
case ORDER_LARGE_PAGE:
return &data->groups[group_id].lp_size;
default:
- dev_err(data->dev, "Unknown order(%d)\n", order);
+ dev_err(data->dev, "Unknown order(%u)\n", order);
return &err_atomic;
}
}
-static void update_size(struct memory_group_manager_device *mgm_dev, int
- group_id, int order, bool alloc)
+static void update_size(struct memory_group_manager_device *mgm_dev, unsigned int
+ group_id, unsigned int order, bool alloc)
{
static DEFINE_RATELIMIT_STATE(gpu_alloc_rs, 10*HZ, 1);
atomic_t* size = get_size_counter(mgm_dev, group_id, order);
@@ -643,14 +642,14 @@ done:
EXPORT_SYMBOL(pixel_mgm_resize_group_to_fit);
static struct page *mgm_alloc_page(
- struct memory_group_manager_device *mgm_dev, int group_id,
+ struct memory_group_manager_device *mgm_dev, unsigned int group_id,
gfp_t gfp_mask, unsigned int order)
{
struct mgm_groups *const data = mgm_dev->data;
struct page *p;
dev_dbg(data->dev,
- "%s(mgm_dev=%p, group_id=%d gfp_mask=0x%x order=%u\n",
+ "%s(mgm_dev=%p, group_id=%u gfp_mask=0x%x order=%u\n",
__func__, (void *)mgm_dev, group_id, gfp_mask, order);
if (INVALID_GROUP_ID(group_id))
@@ -680,7 +679,7 @@ static struct page *mgm_alloc_page(
/* Everything should already be set up*/
break;
default:
- dev_err(data->dev, "Group %d in invalid state %d\n",
+ dev_err(data->dev, "Group %u in invalid state %d\n",
group_id, data->groups[group_id].state);
}
}
@@ -698,12 +697,12 @@ static struct page *mgm_alloc_page(
}
static void mgm_free_page(
- struct memory_group_manager_device *mgm_dev, int group_id,
+ struct memory_group_manager_device *mgm_dev, unsigned int group_id,
struct page *page, unsigned int order)
{
struct mgm_groups *const data = mgm_dev->data;
- dev_dbg(data->dev, "%s(mgm_dev=%p, group_id=%d page=%p order=%u\n",
+ dev_dbg(data->dev, "%s(mgm_dev=%p, group_id=%u page=%p order=%u\n",
__func__, (void *)mgm_dev, group_id, (void *)page, order);
if (INVALID_GROUP_ID(group_id))
@@ -739,14 +738,14 @@ static int mgm_get_import_memory_id(
}
static u64 mgm_update_gpu_pte(
- struct memory_group_manager_device *const mgm_dev, int const group_id,
+ struct memory_group_manager_device *const mgm_dev, unsigned int const group_id,
int const mmu_level, u64 pte)
{
struct mgm_groups *const data = mgm_dev->data;
unsigned int pbha;
dev_dbg(data->dev,
- "%s(mgm_dev=%p, group_id=%d, mmu_level=%d, pte=0x%llx)\n",
+ "%s(mgm_dev=%p, group_id=%u, mmu_level=%d, pte=0x%llx)\n",
__func__, (void *)mgm_dev, group_id, mmu_level, pte);
if (INVALID_GROUP_ID(group_id))
@@ -775,7 +774,7 @@ static u64 mgm_update_gpu_pte(
pte |= ((u64)pbha & PBHA_BIT_MASK) << PBHA_BIT_POS;
dev_dbg(data->dev,
- "%s: group_id=%d pbha=%d "
+ "%s: group_id=%u pbha=%d "
"pte=0x%llx -> 0x%llx\n",
__func__, group_id, pbha, old_pte, pte);
@@ -793,7 +792,7 @@ static u64 mgm_update_gpu_pte(
return pte;
}
-static u64 mgm_pte_to_original_pte(struct memory_group_manager_device *mgm_dev, int group_id,
+static u64 mgm_pte_to_original_pte(struct memory_group_manager_device *mgm_dev, unsigned int group_id,
int mmu_level, u64 pte)
{
struct mgm_groups *const data = mgm_dev->data;
@@ -812,7 +811,7 @@ static u64 mgm_pte_to_original_pte(struct memory_group_manager_device *mgm_dev,
/* All other groups will have PBHA bits, so clear them */
old_pte = pte;
pte &= ~((u64)PBHA_BIT_MASK << PBHA_BIT_POS);
- dev_dbg(data->dev, "%s: group_id=%d pte=0x%llx -> 0x%llx\n", __func__, group_id,
+ dev_dbg(data->dev, "%s: group_id=%u pte=0x%llx -> 0x%llx\n", __func__, group_id,
old_pte, pte);
}
@@ -820,7 +819,7 @@ static u64 mgm_pte_to_original_pte(struct memory_group_manager_device *mgm_dev,
}
static vm_fault_t mgm_vmf_insert_pfn_prot(
- struct memory_group_manager_device *const mgm_dev, int const group_id,
+ struct memory_group_manager_device *const mgm_dev, unsigned int const group_id,
struct vm_area_struct *const vma, unsigned long const addr,
unsigned long const pfn, pgprot_t const prot)
{
@@ -828,7 +827,7 @@ static vm_fault_t mgm_vmf_insert_pfn_prot(
vm_fault_t fault;
dev_dbg(data->dev,
- "%s(mgm_dev=%p, group_id=%d, vma=%p, addr=0x%lx, pfn=0x%lx,"
+ "%s(mgm_dev=%p, group_id=%u, vma=%p, addr=0x%lx, pfn=0x%lx,"
" prot=0x%llx)\n",
__func__, (void *)mgm_dev, group_id, (void *)vma, addr, pfn,
pgprot_val(prot));