summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorJack Diver <diverj@google.com>2024-03-05 04:30:13 +0000
committerAutomerger Merge Worker <android-build-automerger-merge-worker@system.gserviceaccount.com>2024-03-05 04:30:13 +0000
commit17105fe8e345cb98fec95e78491e7584391e795e (patch)
treee5713a696500e6d4892f1f9a06d3709badb23326
parent847c8f0095d1e1d0ba1ccf22ef2f0bd2377afb39 (diff)
parent000dfb19f80bcea36b6eba00d76d64ab43ecf83c (diff)
downloadgpu-17105fe8e345cb98fec95e78491e7584391e795e.tar.gz
mali_pixel: Refactor SLC partition management am: 000dfb19f8
Original change: https://partner-android-review.googlesource.com/c/kernel/private/google-modules/gpu/+/2753592 Change-Id: I2458e1d98cdadfc9158bd182a8f0b4090a535d26 Signed-off-by: Automerger Merge Worker <android-build-automerger-merge-worker@system.gserviceaccount.com>
-rw-r--r--common/include/uapi/gpu/arm/midgard/platform/pixel/pixel_memory_group_manager.h45
-rw-r--r--mali_pixel/Kbuild1
-rw-r--r--mali_pixel/memory_group_manager.c477
-rw-r--r--mali_pixel/pixel_slc.c215
-rw-r--r--mali_pixel/pixel_slc.h72
5 files changed, 324 insertions, 486 deletions
diff --git a/common/include/uapi/gpu/arm/midgard/platform/pixel/pixel_memory_group_manager.h b/common/include/uapi/gpu/arm/midgard/platform/pixel/pixel_memory_group_manager.h
index b575c79..893cdca 100644
--- a/common/include/uapi/gpu/arm/midgard/platform/pixel/pixel_memory_group_manager.h
+++ b/common/include/uapi/gpu/arm/midgard/platform/pixel/pixel_memory_group_manager.h
@@ -7,49 +7,4 @@
#ifndef _UAPI_PIXEL_MEMORY_GROUP_MANAGER_H_
#define _UAPI_PIXEL_MEMORY_GROUP_MANAGER_H_
-/**
- * enum pixel_mgm_group_id - Symbolic names for used memory groups
- */
-enum pixel_mgm_group_id
-{
- /* The Mali driver requires that allocations made on one of the groups
- * are not treated specially.
- */
- MGM_RESERVED_GROUP_ID = 0,
-
- /* Group for memory that should be cached in the system level cache. */
- MGM_SLC_GROUP_ID = 1,
-
- /* Group for memory explicitly allocated in SLC. */
- MGM_SLC_EXPLICIT_GROUP_ID = 2,
-
- /* Imported memory is handled by the allocator of the memory, and the Mali
- * DDK will request a group_id for such memory via mgm_get_import_memory_id().
- * We specify which group we want to use for this here.
- */
- MGM_IMPORTED_MEMORY_GROUP_ID = (MEMORY_GROUP_MANAGER_NR_GROUPS - 1),
-};
-
-/**
- * pixel_mgm_query_group_size - Query the current size of a memory group
- *
- * @mgm_dev: The memory group manager through which the request is being made.
- * @group_id: Memory group to query.
- *
- * Returns the actual size of the memory group's active partition
- */
-extern u64 pixel_mgm_query_group_size(struct memory_group_manager_device* mgm_dev,
- enum pixel_mgm_group_id group_id);
-
-/**
- * pixel_mgm_resize_group_to_fit - Resize a memory group to meet @demand, if possible
- *
- * @mgm_dev: The memory group manager through which the request is being made.
- * @group_id: Memory group for which we will change the backing partition.
- * @demand: The demanded space from the memory group.
- */
-extern void pixel_mgm_resize_group_to_fit(struct memory_group_manager_device* mgm_dev,
- enum pixel_mgm_group_id group_id,
- u64 demand);
-
#endif /* _UAPI_PIXEL_MEMORY_GROUP_MANAGER_H_ */
diff --git a/mali_pixel/Kbuild b/mali_pixel/Kbuild
index 4b519a9..4967b34 100644
--- a/mali_pixel/Kbuild
+++ b/mali_pixel/Kbuild
@@ -37,6 +37,7 @@ endif
ifeq ($(CONFIG_MALI_MEMORY_GROUP_MANAGER),m)
DEFINES += -DCONFIG_MALI_MEMORY_GROUP_MANAGER
mali_pixel-objs += memory_group_manager.o
+ mali_pixel-objs += pixel_slc.o
endif
ifeq ($(CONFIG_MALI_PRIORITY_CONTROL_MANAGER),m)
DEFINES += -DCONFIG_MALI_PRIORITY_CONTROL_MANAGER
diff --git a/mali_pixel/memory_group_manager.c b/mali_pixel/memory_group_manager.c
index 0c9a241..3015468 100644
--- a/mali_pixel/memory_group_manager.c
+++ b/mali_pixel/memory_group_manager.c
@@ -23,7 +23,7 @@
#include <linux/memory_group_manager.h>
-#include <soc/google/pt.h>
+#include "pixel_slc.h"
#include <uapi/gpu/arm/midgard/platform/pixel/pixel_memory_group_manager.h>
@@ -31,29 +31,30 @@
#define ORDER_SMALL_PAGE 0
#define ORDER_LARGE_PAGE const_ilog2(NUM_PAGES_IN_2MB_LARGE_PAGE)
-/* Borr does not have "real" PBHA support. However, since we only use a 36-bit PA on the bus,
- * AxADDR[39:36] is wired up to the GPU AxUSER[PBHA] field seen by the rest of the system.
- * Those AxADDR bits come from [39:36] in the page descriptor.
- *
- * Odin and Turse have "real" PBHA support using a dedicated output signal and page descriptor field.
- * The AxUSER[PBHA] field is driven by the GPU's PBHA signal, and AxADDR[39:36] is dropped.
- * The page descriptor PBHA field is [62:59].
- *
- * We could write to both of these locations, as each SoC only reads from its respective PBHA
- * location with the other being ignored or dropped.
- *
- * b/148988078 contains confirmation of the above description.
+/**
+ * enum mgm_group_id - Symbolic names for used memory groups
*/
-#if IS_ENABLED(CONFIG_SOC_GS101)
-#define PBHA_BIT_POS (36)
-#else
-#define PBHA_BIT_POS (59)
-#endif
-#define PBHA_BIT_MASK (0xf)
+enum mgm_group_id
+{
+ /**
+ * @MGM_RESERVED_GROUP_ID: The Mali driver requires that allocations made on one of the
+ * groups are not treated specially.
+ */
+ MGM_RESERVED_GROUP_ID = 0,
-#define MGM_PBHA_DEFAULT 0
+ /**
+ * @MGM_SLC_GROUP_ID: Group for memory that should be cached in the system level cache.
+ */
+ MGM_SLC_GROUP_ID = 1,
-#define MGM_SENTINEL_PT_SIZE U64_MAX
+ /**
+ * @MGM_IMPORTED_MEMORY_GROUP_ID: Imported memory is handled by the allocator of the memory,
+ * and the Mali DDK will request a group_id for such memory
+ * via mgm_get_import_memory_id(). We specify which group we
+ * want to use for this here.
+ */
+ MGM_IMPORTED_MEMORY_GROUP_ID = (MEMORY_GROUP_MANAGER_NR_GROUPS - 1),
+};
#define INVALID_GROUP_ID(group_id) \
WARN_ON((group_id) >= MEMORY_GROUP_MANAGER_NR_GROUPS)
@@ -81,13 +82,6 @@ static inline vm_fault_t vmf_insert_pfn_prot(struct vm_area_struct *vma,
* @lp_size: The number of allocated large(2MB) pages
* @insert_pfn: The number of calls to map pages for CPU access.
* @update_gpu_pte: The number of calls to update GPU page table entries.
- * @ptid: The active partition ID for this group
- * @pbha: The PBHA bits assigned to this group,
- * @base_pt: The base partition ID available to this group.
- * @pt_num: The number of partitions available to this group.
- * @active_pt_idx: The relative index for the partition backing the group.
- * Different from the absolute ptid.
- * @state: The lifecycle state of the partition associated with this group
* This structure allows page allocation information to be displayed via
* debugfs. Display is organized per group with small and large sized pages.
*/
@@ -98,30 +92,6 @@ struct mgm_group {
atomic_t insert_pfn;
atomic_t update_gpu_pte;
#endif
-
- ptid_t ptid;
- ptpbha_t pbha;
-
- u32 base_pt;
- u32 pt_num;
- u32 active_pt_idx;
- enum {
- MGM_GROUP_STATE_NEW = 0,
- MGM_GROUP_STATE_ENABLED = 10,
- MGM_GROUP_STATE_DISABLED_NOT_FREED = 20,
- MGM_GROUP_STATE_DISABLED = 30,
- } state;
-};
-
-/**
- * struct partition_stats - Structure for tracking sizing of a partition
- *
- * @capacity: The total capacity of each partition
- * @size: The current size of each partition
- */
-struct partition_stats {
- u64 capacity;
- atomic64_t size;
};
/**
@@ -130,26 +100,22 @@ struct partition_stats {
* @groups: To keep track of the number of allocated pages of all groups
* @ngroups: Number of groups actually used
* @npartitions: Number of partitions used by all groups combined
- * @pt_stats: The sizing info for each partition
* @dev: device attached
- * @pt_handle: Link to SLC partition data
* @kobj: &sruct kobject used for linking to pixel_stats_sysfs node
* @mgm_debugfs_root: debugfs root directory of memory group manager
+ * @slc_data: To track GPU SLC partitions.
*
* This structure allows page allocation information to be displayed via
* debugfs. Display is organized per group with small and large sized pages.
*/
struct mgm_groups {
struct mgm_group groups[MEMORY_GROUP_MANAGER_NR_GROUPS];
- size_t ngroups;
- size_t npartitions;
- struct partition_stats *pt_stats;
struct device *dev;
- struct pt_handle *pt_handle;
struct kobject kobj;
#ifdef CONFIG_MALI_MEMORY_GROUP_MANAGER_DEBUG_FS
struct dentry *mgm_debugfs_root;
#endif
+ struct slc_data slc_data;
};
/*
@@ -158,13 +124,6 @@ struct mgm_groups {
#ifdef CONFIG_MALI_MEMORY_GROUP_MANAGER_DEBUG_FS
-static int mgm_debugfs_state_get(void *data, u64 *val)
-{
- struct mgm_group *group = data;
- *val = (u64)group->state;
- return 0;
-}
-
static int mgm_debugfs_size_get(void *data, u64 *val)
{
struct mgm_group *group = data;
@@ -193,8 +152,6 @@ static int mgm_debugfs_update_gpu_pte_get(void *data, u64 *val)
return 0;
}
-DEFINE_SIMPLE_ATTRIBUTE(fops_mgm_state, mgm_debugfs_state_get,
- NULL, "%llu\n");
DEFINE_SIMPLE_ATTRIBUTE(fops_mgm_size, mgm_debugfs_size_get,
NULL, "%llu\n");
DEFINE_SIMPLE_ATTRIBUTE(fops_mgm_lp_size, mgm_debugfs_lp_size_get,
@@ -218,7 +175,6 @@ static struct {
const char *name;
const struct file_operations *fops;
} attribs[] = {
- { "state", &fops_mgm_state},
{ "size", &fops_mgm_size},
{ "lp_size", &fops_mgm_lp_size},
{ "insert_pfn", &fops_mgm_insert_pfn},
@@ -392,20 +348,6 @@ static void mgm_sysfs_term(struct mgm_groups *data)
#endif /* CONFIG_MALI_PIXEL_STATS */
-static int group_pt_id(struct mgm_groups *data, enum pixel_mgm_group_id group_id, int pt_index)
-{
- struct mgm_group *group = &data->groups[group_id];
- if (WARN_ON_ONCE(pt_index >= group->pt_num))
- return 0;
-
- return group->base_pt + pt_index;
-}
-
-static int group_active_pt_id(struct mgm_groups *data, enum pixel_mgm_group_id group_id)
-{
- return group_pt_id(data, group_id, data->groups[group_id].active_pt_idx);
-}
-
static atomic64_t total_gpu_pages = ATOMIC64_INIT(0);
static atomic_t* get_size_counter(struct memory_group_manager_device* mgm_dev, unsigned int group_id, unsigned int order)
@@ -462,185 +404,6 @@ static void update_size(struct memory_group_manager_device *mgm_dev, unsigned in
pr_warn("total_gpu_pages %lld\n", atomic64_read(&total_gpu_pages));
}
-static void pt_size_invalidate(struct mgm_groups* data, int pt_idx)
-{
- /* Set the size to a known sentinel value so that we can later detect an update */
- atomic64_set(&data->pt_stats[pt_idx].size, MGM_SENTINEL_PT_SIZE);
-}
-
-static void pt_size_init(struct mgm_groups* data, int pt_idx, size_t size)
-{
- /* The resize callback may have already been executed, which would have set
- * the correct size. Only update the size if this has not happened.
- * We can tell that no resize took place if the size is still a sentinel.
- */
- atomic64_cmpxchg(&data->pt_stats[pt_idx].size, MGM_SENTINEL_PT_SIZE, size);
-}
-
-static void validate_ptid(struct mgm_groups* data, enum pixel_mgm_group_id group_id, int ptid)
-{
- if (ptid == -EINVAL)
- dev_err(data->dev, "Failed to get partition for group: %d\n", group_id);
- else
- dev_info(data->dev, "pt_client_mutate returned ptid=%d for group=%d", ptid, group_id);
-}
-
-static void update_group(struct mgm_groups* data,
- enum pixel_mgm_group_id group_id,
- int ptid,
- int relative_pt_idx)
-{
- int const abs_pt_idx = group_pt_id(data, group_id, relative_pt_idx);
- int const pbha = pt_pbha(data->dev->of_node, abs_pt_idx);
-
- if (pbha == PT_PBHA_INVALID)
- dev_err(data->dev, "Failed to get PBHA for group: %d\n", group_id);
- else
- dev_info(data->dev, "pt_pbha returned PBHA=%d for group=%d", pbha, group_id);
-
- data->groups[group_id].ptid = ptid;
- data->groups[group_id].pbha = pbha;
- data->groups[group_id].state = MGM_GROUP_STATE_ENABLED;
- data->groups[group_id].active_pt_idx = relative_pt_idx;
-}
-
-static void disable_partition(struct mgm_groups* data, enum pixel_mgm_group_id group_id)
-{
- int const active_idx = group_active_pt_id(data, group_id);
-
- /* Skip if not already enabled */
- if (data->groups[group_id].state != MGM_GROUP_STATE_ENABLED)
- return;
-
- pt_client_disable_no_free(data->pt_handle, active_idx);
- data->groups[group_id].state = MGM_GROUP_STATE_DISABLED_NOT_FREED;
-
- pt_size_invalidate(data, active_idx);
- pt_size_init(data, active_idx, 0);
-}
-
-static void enable_partition(struct mgm_groups* data, enum pixel_mgm_group_id group_id)
-{
- int ptid;
- size_t size = 0;
- int const active_idx = group_active_pt_id(data, group_id);
-
- /* Skip if already enabled */
- if (data->groups[group_id].state == MGM_GROUP_STATE_ENABLED)
- return;
-
- pt_size_invalidate(data, active_idx);
-
- ptid = pt_client_enable_size(data->pt_handle, active_idx, &size);
-
- validate_ptid(data, group_id, ptid);
-
- update_group(data, group_id, ptid, data->groups[group_id].active_pt_idx);
-
- pt_size_init(data, active_idx, size);
-}
-
-static void set_group_partition(struct mgm_groups* data,
- enum pixel_mgm_group_id group_id,
- int new_pt_index)
-{
- int ptid;
- size_t size = 0;
- int const active_idx = group_active_pt_id(data, group_id);
- int const new_idx = group_pt_id(data, group_id, new_pt_index);
-
- /* Early out if no changes are needed */
- if (new_idx == active_idx)
- return;
-
- pt_size_invalidate(data, new_idx);
-
- ptid = pt_client_mutate_size(data->pt_handle, active_idx, new_idx, &size);
-
- validate_ptid(data, group_id, ptid);
-
- update_group(data, group_id, ptid, new_pt_index);
-
- pt_size_init(data, new_idx, size);
- /* Reset old partition size */
- atomic64_set(&data->pt_stats[active_idx].size, data->pt_stats[active_idx].capacity);
-}
-
-u64 pixel_mgm_query_group_size(struct memory_group_manager_device* mgm_dev,
- enum pixel_mgm_group_id group_id)
-{
- struct mgm_groups *data;
- struct mgm_group *group;
- u64 size = 0;
-
- /* Early out if the group doesn't exist */
- if (INVALID_GROUP_ID(group_id))
- goto done;
-
- data = mgm_dev->data;
- group = &data->groups[group_id];
-
- /* Early out if the group has no partitions */
- if (group->pt_num == 0)
- goto done;
-
- size = atomic64_read(&data->pt_stats[group_active_pt_id(data, group_id)].size);
-
-done:
- return size;
-}
-EXPORT_SYMBOL(pixel_mgm_query_group_size);
-
-void pixel_mgm_resize_group_to_fit(struct memory_group_manager_device* mgm_dev,
- enum pixel_mgm_group_id group_id,
- u64 demand)
-{
- struct mgm_groups *data;
- struct mgm_group *group;
- s64 diff, cur_size, min_diff = S64_MAX;
- int pt_idx;
-
- /* Early out if the group doesn't exist */
- if (INVALID_GROUP_ID(group_id))
- goto done;
-
- data = mgm_dev->data;
- group = &data->groups[group_id];
-
- /* Early out if the group has no partitions */
- if (group->pt_num == 0)
- goto done;
-
- /* We can disable the partition if there's no demand */
- if (demand == 0)
- {
- disable_partition(data, group_id);
- goto done;
- }
-
- /* Calculate best partition to use, by finding the nearest capacity */
- for (pt_idx = 0; pt_idx < group->pt_num; ++pt_idx)
- {
- cur_size = data->pt_stats[group_pt_id(data, group_id, pt_idx)].capacity;
- diff = abs(demand - cur_size);
-
- if (diff > min_diff)
- break;
-
- min_diff = diff;
- }
-
- /* Ensure the partition is enabled before trying to mutate it */
- enable_partition(data, group_id);
- set_group_partition(data, group_id, pt_idx - 1);
-
-done:
- dev_dbg(data->dev, "%s: resized memory_group_%d for demand: %lldB", __func__, group_id, demand);
-
- return;
-}
-EXPORT_SYMBOL(pixel_mgm_resize_group_to_fit);
-
static struct page *mgm_alloc_page(
struct memory_group_manager_device *mgm_dev, unsigned int group_id,
gfp_t gfp_mask, unsigned int order)
@@ -655,35 +418,12 @@ static struct page *mgm_alloc_page(
if (INVALID_GROUP_ID(group_id))
return NULL;
- if (WARN_ON_ONCE((group_id != MGM_RESERVED_GROUP_ID) &&
- (group_active_pt_id(data, group_id) >= data->npartitions)))
- return NULL;
-
/* We don't expect to be allocting pages into the group used for
* external or imported memory
*/
if (WARN_ON(group_id == MGM_IMPORTED_MEMORY_GROUP_ID))
return NULL;
- /* If we are allocating a page in this group for the first time then
- * ensure that we have enabled the relevant partitions for it.
- */
- if (group_id != MGM_RESERVED_GROUP_ID) {
- switch (data->groups[group_id].state) {
- case MGM_GROUP_STATE_NEW:
- enable_partition(data, group_id);
- break;
- case MGM_GROUP_STATE_ENABLED:
- case MGM_GROUP_STATE_DISABLED_NOT_FREED:
- case MGM_GROUP_STATE_DISABLED:
- /* Everything should already be set up*/
- break;
- default:
- dev_err(data->dev, "Group %u in invalid state %d\n",
- group_id, data->groups[group_id].state);
- }
- }
-
p = alloc_pages(gfp_mask, order);
if (p) {
@@ -742,7 +482,7 @@ static u64 mgm_update_gpu_pte(
int const mmu_level, u64 pte)
{
struct mgm_groups *const data = mgm_dev->data;
- unsigned int pbha;
+ u64 const old_pte = pte;
dev_dbg(data->dev,
"%s(mgm_dev=%p, group_id=%u, mmu_level=%d, pte=0x%llx)\n",
@@ -751,40 +491,22 @@ static u64 mgm_update_gpu_pte(
if (INVALID_GROUP_ID(group_id))
return pte;
- /* Clear any bits set in the PBHA range */
- if (pte & ((u64)PBHA_BIT_MASK << PBHA_BIT_POS)) {
- dev_warn(data->dev,
- "%s: updating pte with bits already set in PBHA range",
- __func__);
- pte &= ~((u64)PBHA_BIT_MASK << PBHA_BIT_POS);
- }
-
switch (group_id) {
case MGM_RESERVED_GROUP_ID:
case MGM_IMPORTED_MEMORY_GROUP_ID:
/* The reserved group doesn't set PBHA bits */
- /* TODO: Determine what to do with imported memory */
+ pte = slc_wipe_pbha(pte);
break;
+ case MGM_SLC_GROUP_ID:
+ /* Map requests for SLC memory groups to SLC */
+ pte = slc_set_pbha(&data->slc_data, pte);
default:
- /* All other groups will have PBHA bits */
- if (data->groups[group_id].state > MGM_GROUP_STATE_NEW) {
- u64 old_pte = pte;
- pbha = data->groups[group_id].pbha;
-
- pte |= ((u64)pbha & PBHA_BIT_MASK) << PBHA_BIT_POS;
-
- dev_dbg(data->dev,
- "%s: group_id=%u pbha=%d "
- "pte=0x%llx -> 0x%llx\n",
- __func__, group_id, pbha, old_pte, pte);
-
- } else {
- dev_err(data->dev,
- "Tried to get PBHA of uninitialized group=%d",
- group_id);
- }
+ break;
}
+ dev_dbg(data->dev, "%s: group_id=%u pte=0x%llx -> 0x%llx\n",
+ __func__, group_id, old_pte, pte);
+
#ifdef CONFIG_MALI_MEMORY_GROUP_MANAGER_DEBUG_FS
atomic_inc(&data->groups[group_id].update_gpu_pte);
#endif
@@ -795,27 +517,10 @@ static u64 mgm_update_gpu_pte(
static u64 mgm_pte_to_original_pte(struct memory_group_manager_device *mgm_dev, unsigned int group_id,
int mmu_level, u64 pte)
{
- struct mgm_groups *const data = mgm_dev->data;
- u64 old_pte;
-
if (INVALID_GROUP_ID(group_id))
return pte;
- switch (group_id) {
- case MGM_RESERVED_GROUP_ID:
- case MGM_IMPORTED_MEMORY_GROUP_ID:
- /* The reserved group doesn't set PBHA bits */
- /* TODO: Determine what to do with imported memory */
- break;
- default:
- /* All other groups will have PBHA bits, so clear them */
- old_pte = pte;
- pte &= ~((u64)PBHA_BIT_MASK << PBHA_BIT_POS);
- dev_dbg(data->dev, "%s: group_id=%u pte=0x%llx -> 0x%llx\n", __func__, group_id,
- old_pte, pte);
- }
-
- return pte;
+ return slc_wipe_pbha(pte);
}
static vm_fault_t mgm_vmf_insert_pfn_prot(
@@ -847,49 +552,12 @@ static vm_fault_t mgm_vmf_insert_pfn_prot(
return fault;
}
-static void mgm_resize_callback(void *data, int id, size_t size_allocated)
-{
- struct mgm_groups *const mgm_data = (struct mgm_groups *)data;
- dev_dbg(mgm_data->dev, "Resize callback called, size_allocated: %zu\n", size_allocated);
- /* Update the partition size for the group */
- atomic64_set(&mgm_data->pt_stats[id].size, size_allocated);
-}
-
static int mgm_initialize_data(struct mgm_groups *mgm_data)
{
int i, ret;
- /* +1 to include the required default group */
- const int ngroups = of_property_count_strings(mgm_data->dev->of_node, "groups") + 1;
- if (WARN_ON(ngroups < 0) ||
- WARN_ON(ngroups > MEMORY_GROUP_MANAGER_NR_GROUPS)) {
- mgm_data->ngroups = 0;
- } else {
- mgm_data->ngroups = ngroups;
- }
- mgm_data->npartitions = of_property_count_strings(mgm_data->dev->of_node, "pt_id");
-
- mgm_data->pt_stats = kzalloc(mgm_data->npartitions * sizeof(struct partition_stats), GFP_KERNEL);
- if (mgm_data->pt_stats == NULL) {
- dev_err(mgm_data->dev, "failed to allocate space for pt_stats");
- ret = -ENOMEM;
+ if ((ret = slc_init_data(&mgm_data->slc_data, mgm_data->dev)))
goto out_err;
- }
-
- for (i = 0; i < mgm_data->npartitions; i++) {
- struct partition_stats* stats;
- u32 capacity_kb;
- ret = of_property_read_u32_index(mgm_data->dev->of_node, "pt_size", i, &capacity_kb);
- if (ret) {
- dev_err(mgm_data->dev, "failed to read pt_size[%d]", i);
- continue;
- }
-
- stats = &mgm_data->pt_stats[i];
- // Convert from KB to bytes
- stats->capacity = (u64)capacity_kb << 10;
- atomic64_set(&stats->size, stats->capacity);
- }
for (i = 0; i < MEMORY_GROUP_MANAGER_NR_GROUPS; i++) {
atomic_set(&mgm_data->groups[i].size, 0);
@@ -898,50 +566,8 @@ static int mgm_initialize_data(struct mgm_groups *mgm_data)
atomic_set(&mgm_data->groups[i].insert_pfn, 0);
atomic_set(&mgm_data->groups[i].update_gpu_pte, 0);
#endif
-
- mgm_data->groups[i].pbha = MGM_PBHA_DEFAULT;
- mgm_data->groups[i].base_pt = 0;
- mgm_data->groups[i].pt_num = 0;
- mgm_data->groups[i].active_pt_idx = 0;
- mgm_data->groups[i].state = MGM_GROUP_STATE_NEW;
- }
-
- /* Discover the partitions belonging to each memory group, skipping the reserved group */
- for (i = 1; i < mgm_data->ngroups; i++) {
- /* Device tree has no description for the reserved group */
- int const dt_idx = i - 1;
-
- int err = of_property_read_u32_index(
- mgm_data->dev->of_node, "group_base_pt", dt_idx, &mgm_data->groups[i].base_pt);
- if (err) {
- dev_warn(mgm_data->dev, "failed to read base pt index for group %d", i);
- continue;
- }
-
- err = of_property_read_u32_index(
- mgm_data->dev->of_node, "group_pt_num", dt_idx, &mgm_data->groups[i].pt_num);
- if (err)
- dev_warn(mgm_data->dev, "failed to read pt number for group %d", i);
}
- /*
- * Initialize SLC partitions. We don't enable partitions until
- * we actually allocate memory to the corresponding memory
- * group
- */
- mgm_data->pt_handle =
- pt_client_register(mgm_data->dev->of_node, (void*)mgm_data, &mgm_resize_callback);
-
- if (IS_ERR(mgm_data->pt_handle)) {
- ret = PTR_ERR(mgm_data->pt_handle);
- dev_err(mgm_data->dev, "pt_client_register returned %d\n", ret);
- goto out_err;
- }
-
- /* We don't use PBHA bits for the reserved memory group, and so
- * it is effectively already initialized.
- */
- mgm_data->groups[MGM_RESERVED_GROUP_ID].state = MGM_GROUP_STATE_ENABLED;
if ((ret = mgm_debugfs_init(mgm_data)))
goto out_err;
@@ -949,20 +575,9 @@ static int mgm_initialize_data(struct mgm_groups *mgm_data)
if ((ret = mgm_sysfs_init(mgm_data)))
goto out_err;
-#ifdef CONFIG_MALI_PIXEL_GPU_SLC
- /* We enable the SLC partition by default to support dynamic SLC caching.
- * Enabling will initialize the partition, by querying the pbha and assigning a ptid.
- * We then immediately disable the partition, effectively resizing the group to zero,
- * whilst still retaining other properties such as pbha.
- */
- enable_partition(mgm_data, MGM_SLC_GROUP_ID);
- disable_partition(mgm_data, MGM_SLC_GROUP_ID);
-#endif
-
return ret;
out_err:
- kfree(mgm_data->pt_stats);
return ret;
}
@@ -983,29 +598,9 @@ static void mgm_term_data(struct mgm_groups *data)
dev_warn(data->dev,
"%zu 9 order pages in group(%d) leaked\n",
(size_t)atomic_read(&group->lp_size), i);
-
- /* Disable partition indices and free the partition */
- switch (group->state) {
-
- case MGM_GROUP_STATE_NEW:
- case MGM_GROUP_STATE_DISABLED:
- /* Nothing to do */
- break;
-
- case MGM_GROUP_STATE_ENABLED:
- pt_client_disable(data->pt_handle, group_active_pt_id(data, i));
- break;
- case MGM_GROUP_STATE_DISABLED_NOT_FREED:
- pt_client_free(data->pt_handle, group_active_pt_id(data, i));
- break;
-
- default:
- dev_err(data->dev, "Group %d in invalid state %d\n",
- i, group->state);
- }
}
- pt_client_unregister(data->pt_handle);
+ slc_term_data(&data->slc_data);
mgm_debugfs_term(data);
mgm_sysfs_term(data);
diff --git a/mali_pixel/pixel_slc.c b/mali_pixel/pixel_slc.c
new file mode 100644
index 0000000..62c6908
--- /dev/null
+++ b/mali_pixel/pixel_slc.c
@@ -0,0 +1,215 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright 2024 Google LLC.
+ *
+ * Author: Jack Diver <diverj@google.com>
+ */
+
+#include <linux/atomic.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/dev_printk.h>
+/* Pixel integration includes */
+#include "pixel_slc.h"
+
+
+/**
+ * DOC: PBHA
+ *
+ * Borr does not have "real" PBHA support. However, since we only use a 36-bit PA on the bus,
+ * AxADDR[39:36] is wired up to the GPU AxUSER[PBHA] field seen by the rest of the system.
+ * Those AxADDR bits come from [39:36] in the page descriptor.
+ *
+ * Odin and Turse have "real" PBHA support using a dedicated output signal and page descriptor field.
+ * The AxUSER[PBHA] field is driven by the GPU's PBHA signal, and AxADDR[39:36] is dropped.
+ * The page descriptor PBHA field is [62:59].
+ *
+ * We could write to both of these locations, as each SoC only reads from its respective PBHA
+ * location with the other being ignored or dropped.
+ *
+ * b/148988078 contains confirmation of the above description.
+ */
+#if IS_ENABLED(CONFIG_SOC_GS101)
+#define PBHA_BIT_POS (36)
+#else
+#define PBHA_BIT_POS (59)
+#endif
+#define PBHA_BIT_MASK (0xf)
+
+
+/**
+ * slc_wipe_pbha - Clear any set PBHA bits from the pte.
+ *
+ * @pte: The pte to strip of PBHA.
+ *
+ * Return: The PTE with all PBHA stripped.
+ */
+u64 slc_wipe_pbha(u64 pte)
+{
+ return pte & ~((u64)PBHA_BIT_MASK << PBHA_BIT_POS);
+}
+
+/**
+ * slc_set_pbha - Apply the PBHA to @pte.
+ *
+ * @data: The &struct slc_data tracking partition information.
+ * @pte: The pte to modify.
+ *
+ * Return: On success, returns a modified PTE. On failure the original PTE is returned.
+ */
+u64 slc_set_pbha(struct slc_data const *data, u64 pte)
+{
+ /* Clear any bits set in the PBHA range */
+ pte = slc_wipe_pbha(pte);
+
+ /* Apply the PBHA for the given virtual partition */
+ return pte | (((u64)data->partition.pbha) & PBHA_BIT_MASK) << PBHA_BIT_POS;
+}
+
+/**
+ * enable_partition - Enable @pt
+ *
+ * @data: The &struct slc_data tracking partition information.
+ * @pt: The &struct slc_partition representing the partition to enable.
+ */
+static void enable_partition(struct slc_data *data, struct slc_partition *pt)
+{
+ /* Skip if already enabled */
+ if (pt->enabled)
+ return;
+
+ (void)pt_client_enable(data->pt_handle, pt->index);
+ pt->enabled = true;
+
+ dev_dbg(data->dev, "enabled partition %d", pt->index);
+}
+
+/**
+ * disable_partition - Disable @pt
+ *
+ * @data: The &struct slc_data tracking partition information.
+ * @pt: The &struct slc_partition representing the partition to disable.
+ */
+static void disable_partition(struct slc_data *data, struct slc_partition *pt)
+{
+ /* Skip if not enabled */
+ if (!pt->enabled)
+ return;
+
+ pt_client_disable_no_free(data->pt_handle, pt->index);
+ pt->enabled = false;
+
+ dev_dbg(data->dev, "disabled partition %d", pt->index);
+}
+
+/**
+ * init_partition - Register and initialize a partition with the SLC driver.
+ *
+ * @data: The &struct slc_data tracking partition information.
+ * @pt: The &struct slc_partition to store the configured partition information.
+ * @index: The index of the partition, relative to the DT node.
+ *
+ * Returns EINVAL on error, otherwise 0.
+ */
+static int init_partition(struct slc_data *data, struct slc_partition *pt, u32 index)
+{
+ ptid_t ptid;
+ ptpbha_t pbha;
+ int err = -EINVAL;
+
+ ptid = pt_client_enable(data->pt_handle, index);
+ if (ptid == PT_PTID_INVALID) {
+ dev_err(data->dev, "failed to enable pt: %d\n", index);
+ goto err_exit;
+ }
+
+ pbha = pt_pbha(data->dev->of_node, index);
+ if (pbha == PT_PBHA_INVALID) {
+ dev_err(data->dev, "failed to get PBHA for pt: %d\n", index);
+ goto err_exit;
+ }
+
+ /* This retains the allocated ptid */
+ pt_client_disable_no_free(data->pt_handle, index);
+
+ /* Success */
+ err = 0;
+
+ *pt = (struct slc_partition) {
+ .index = index,
+ .ptid = ptid,
+ .pbha = pbha,
+ .enabled = false,
+ };
+
+err_exit:
+ return err;
+}
+
+
+/**
+ * term_partition - Disable and free a partition, unregistering it.
+ *
+ * @data: The &struct slc_data tracking partition information.
+ * @pt: The &struct slc_partition to terminate.
+ *
+ * Returns EINVAL on error, otherwise 0.
+ */
+static void term_partition(struct slc_data *data, struct slc_partition *pt)
+{
+ disable_partition(data, pt);
+ pt_client_free(data->pt_handle, pt->index);
+}
+
+/**
+ * slc_init_data - Read all SLC partition information, init the partitions, and track within @data.
+ *
+ * @data: The &struct slc_data tracking partition information.
+ * @dev: The platform device associated with the parent node.
+ *
+ * Return: On success, returns 0. On failure an error code is returned.
+ */
+int slc_init_data(struct slc_data *data, struct device* dev)
+{
+ int ret = -EINVAL;
+
+ if (data == NULL || dev == NULL)
+ goto err_exit;
+
+ /* Inherit the platform device */
+ data->dev = dev;
+
+ /* Register our node with the SLC driver.
+ * This detects our partitions defined within the DT.
+ */
+ data->pt_handle = pt_client_register(data->dev->of_node, NULL, NULL);
+ if (IS_ERR(data->pt_handle)) {
+ ret = PTR_ERR(data->pt_handle);
+ dev_err(data->dev, "pt_client_register failed with: %d\n", ret);
+ goto err_exit;
+ }
+
+ if ((ret = init_partition(data, &data->partition, 0)))
+ goto pt_init_err_exit;
+
+ return 0;
+
+pt_init_err_exit:
+ pt_client_unregister(data->pt_handle);
+
+err_exit:
+ return ret;
+}
+
+/**
+ * slc_term_data - Tear down SLC partitions and free tracking data.
+ *
+ * @data: The &struct slc_data tracking partition information.
+ */
+void slc_term_data(struct slc_data *data)
+{
+ term_partition(data, &data->partition);
+
+ pt_client_unregister(data->pt_handle);
+}
diff --git a/mali_pixel/pixel_slc.h b/mali_pixel/pixel_slc.h
new file mode 100644
index 0000000..40b5ad7
--- /dev/null
+++ b/mali_pixel/pixel_slc.h
@@ -0,0 +1,72 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright 2024 Google LLC.
+ *
+ * Author: Jack Diver <diverj@google.com>
+ */
+#ifndef _PIXEL_SLC_H_
+#define _PIXEL_SLC_H_
+
+#include <soc/google/pt.h>
+
+/**
+ * DOC: SLC partition management
+ *
+ * Key definitions:
+ * + Partition index - The unique index of a partition, relative to the dt node that owns it.
+ * This index is used when communicating with the underlying SLC driver.
+ * + ptid - This is the HW level ID associated with an enabled partition. These id's are allocated
+ * at partition enable time. The GPU driver will never directly use the ptid, but will
+ * track it.
+ * External analysis of the caching behavior (e.g. hit and eviction counters), are
+ * associated with a ptid, not a physical partition index.
+ * This driver attempts to hold on to any allocated ptids until driver termination to make
+ * profiling of caching performance easier.
+ * + PBHA - Acronym: Page Based Hardware Attributes. Every physical partition has a PBHA value
+ * associated with it. We insert these attributes into PTEs so that transactions with a
+ * page carry the PBHA within their high bits.
+ * Transactions with PBHA bits set are intercepted by the SLC, where the corresponding
+ * partition and it's caching behavior (Read/write alloc etc.) are looked up and applied to
+ * the transaction.
+ */
+
+/**
+ * struct slc_partition - Structure for tracking partition state.
+ */
+struct slc_partition {
+ /** @index: The active partition ID for this virtual partition */
+ u32 index;
+
+ /** @ptid: The active partition ID for this virtual partition */
+ ptid_t ptid;
+
+ /** @pbha: The page based HW attributes for this partition */
+ ptpbha_t pbha;
+
+ /** @enabled: Is the partition currently enabled */
+ bool enabled;
+};
+
+/**
+ * struct slc_data - Structure for tracking SLC context.
+ */
+struct slc_data {
+ /** @pt_handle: Link to ACPM SLC partition data */
+ struct pt_handle *pt_handle;
+
+ /** @partition: Information specific to an individual SLC partition */
+ struct slc_partition partition;
+
+ /** @dev: Inherited pointer to device attached */
+ struct device *dev;
+};
+
+int slc_init_data(struct slc_data *data, struct device* dev);
+
+void slc_term_data(struct slc_data *data);
+
+u64 slc_set_pbha(struct slc_data const *data, u64 pte);
+
+u64 slc_wipe_pbha(u64 pte);
+
+#endif /* _PIXEL_SLC_H_ */