From e0072f7ba56eac21aeb7cdbb7b809e57be3b9e02 Mon Sep 17 00:00:00 2001 From: Jesse Hall Date: Mon, 22 Mar 2021 22:13:08 -0700 Subject: mali_pixel: consolidate mali_mgm and mali_pcm Test: boot to Android home, check dmesg for mali-mcm and mali-pcm probe Signed-off-by: Jesse Hall Change-Id: I47a7da5fd3b4a24832e2978dcce0b99fe7b54c7c --- mali_pixel/Kbuild | 47 +++ mali_pixel/Kconfig | 33 ++ mali_pixel/Makefile | 21 ++ mali_pixel/mali_pixel_mod.c | 52 +++ mali_pixel/mali_pixel_mod.h | 11 + mali_pixel/memory_group_manager.c | 660 ++++++++++++++++++++++++++++++++++ mali_pixel/priority_control_manager.c | 157 ++++++++ 7 files changed, 981 insertions(+) create mode 100644 mali_pixel/Kbuild create mode 100644 mali_pixel/Kconfig create mode 100644 mali_pixel/Makefile create mode 100644 mali_pixel/mali_pixel_mod.c create mode 100644 mali_pixel/mali_pixel_mod.h create mode 100644 mali_pixel/memory_group_manager.c create mode 100644 mali_pixel/priority_control_manager.c (limited to 'mali_pixel') diff --git a/mali_pixel/Kbuild b/mali_pixel/Kbuild new file mode 100644 index 0000000..2fd00e6 --- /dev/null +++ b/mali_pixel/Kbuild @@ -0,0 +1,47 @@ +# +# (C) COPYRIGHT 2019 ARM Limited. All rights reserved. +# +# This program is free software and is provided to you under the terms of the +# GNU General Public License version 2 as published by the Free Software +# Foundation, and any use by you of this program is subject to the terms +# of such GNU licence. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program; if not, you can access it online at +# http://www.gnu.org/licenses/gpl-2.0.html. +# +# SPDX-License-Identifier: GPL-2.0 +# +# + +# make $(src) as absolute path if it isn't already, by prefixing $(srctree) +src:=$(if $(patsubst /%,,$(src)),$(srctree)/$(src),$(src)) + +CONFIG_MALI_MEMORY_GROUP_MANAGER ?= m +CONFIG_MALI_PRIORITY_CONTROL_MANAGER ?= m + +DEFINES += \ + -DCONFIG_MALI_MEMORY_GROUP_MANAGER=$(CONFIG_MALI_MEMORY_GROUP_MANAGER) \ + -DCONFIG_MALI_PRIORITY_CONTROL_MANAGER=$(CONFIG_MALI_PRIORITY_CONTROL_MANAGER) + +# Use our defines when compiling, and include mali platform module headers +ccflags-y += $(DEFINES) -I$(src)/../common/include + +mali_pixel-objs := +ifeq ($(CONFIG_MALI_MEMORY_GROUP_MANAGER),m) + mali_pixel-objs += memory_group_manager.o +endif +ifeq ($(CONFIG_MALI_PRIORITY_CONTROL_MANAGER),m) + mali_pixel-objs += priority_control_manager.o +endif + +# Add kernel module target if any of our config options is enabled +ifneq ($(mali_pixel-objs),) + obj-m += mali_pixel.o + mali_pixel-objs += mali_pixel_mod.o +endif diff --git a/mali_pixel/Kconfig b/mali_pixel/Kconfig new file mode 100644 index 0000000..bec8add --- /dev/null +++ b/mali_pixel/Kconfig @@ -0,0 +1,33 @@ +# +# (C) COPYRIGHT 2019 ARM Limited. All rights reserved. +# +# This program is free software and is provided to you under the terms of the +# GNU General Public License version 2 as published by the Free Software +# Foundation, and any use by you of this program is subject to the terms +# of such GNU licence. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program; if not, you can access it online at +# http://www.gnu.org/licenses/gpl-2.0.html. +# +# SPDX-License-Identifier: GPL-2.0 +# +# + +config MALI_MEMORY_GROUP_MANAGER + tristate "MALI_MEMORY_GROUP_MANAGER" + help + This option enables an example implementation of a memory group manager + for allocation and release of pages for memory pools managed by Mali GPU + device drivers. + +config MALI_PRIORITY_CONTROL_MANAGER + tristate "MALI_PRIORITY_CONTROL_MANAGER" + help + This option enables an implementation of a priority control manager + for determining the target GPU scheduling priority of a process. diff --git a/mali_pixel/Makefile b/mali_pixel/Makefile new file mode 100644 index 0000000..517167e --- /dev/null +++ b/mali_pixel/Makefile @@ -0,0 +1,21 @@ +# SPDX-License-Identifier: GPL-2.0 +# +# Copyright 2021 Google LLC +# + +KERNEL_SRC ?= /lib/modules/$(shell uname -r)/build +M ?= $(shell pwd) + +KBUILD_OPTIONS += CONFIG_MALI_MEMORY_GROUP_MANAGER=m +KBUILD_OPTIONS += CONFIG_MALI_PRIORITY_CONTROL_MANAGER=m + +KBUILD_OPTIONS += $(KBUILD_EXTRA) # Extra config if any + +modules: + $(MAKE) -C $(KERNEL_SRC) M=$(M) W=1 EXTRA_CFLAGS="-I$(M) -I$(M)/../common/include" modules $(KBUILD_OPTIONS) $(@) + +modules_install: + $(MAKE) -C $(KERNEL_SRC) M=$(M) modules_install + +clean: + $(MAKE) -C $(KDIR) M=$(CURDIR) clean diff --git a/mali_pixel/mali_pixel_mod.c b/mali_pixel/mali_pixel_mod.c new file mode 100644 index 0000000..47b5090 --- /dev/null +++ b/mali_pixel/mali_pixel_mod.c @@ -0,0 +1,52 @@ +// SPDX-License-Identifier: GPL-2.0 + +#include "mali_pixel_mod.h" +#include + +MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("Pixel platform integration for GPU"); +MODULE_AUTHOR(""); +MODULE_VERSION("1.0"); + +static int __init mali_pixel_init(void) +{ + int ret = 0; + +#ifdef CONFIG_MALI_MEMORY_GROUP_MANAGER + ret = platform_driver_register(&memory_group_manager_driver); +#endif + if (ret) + goto fail_mgm; + +#ifdef CONFIG_MALI_PRIORITY_CONTROL_MANAGER + ret = platform_driver_register(&priority_control_manager_driver); +#else +#endif + if (ret) + goto fail_pcm; + + goto exit; + +fail_pcm: +#ifdef CONFIG_MALI_MEMORY_GROUP_MANAGER + platform_driver_unregister(&memory_group_manager_driver); +#endif + +fail_mgm: + /* nothing to clean up here */ + +exit: + return ret; +} +module_init(mali_pixel_init); + +static void __exit mali_pixel_exit(void) +{ +#ifdef CONFIG_MALI_PRIORITY_CONTROL_MANAGER + platform_driver_unregister(&priority_control_manager_driver); +#endif +#ifdef CONFIG_MALI_MEMORY_GROUP_MANAGER + platform_driver_unregister(&memory_group_manager_driver); +#endif +} +module_exit(mali_pixel_exit); diff --git a/mali_pixel/mali_pixel_mod.h b/mali_pixel/mali_pixel_mod.h new file mode 100644 index 0000000..0f5f0d3 --- /dev/null +++ b/mali_pixel/mali_pixel_mod.h @@ -0,0 +1,11 @@ +// SPDX-License-Identifier: GPL-2.0 + +#include + +#ifdef CONFIG_MALI_MEMORY_GROUP_MANAGER +extern struct platform_driver memory_group_manager_driver; +#endif + +#ifdef CONFIG_MALI_PRIORITY_CONTROL_MANAGER +extern struct platform_driver priority_control_manager_driver; +#endif \ No newline at end of file diff --git a/mali_pixel/memory_group_manager.c b/mali_pixel/memory_group_manager.c new file mode 100644 index 0000000..7d6422e --- /dev/null +++ b/mali_pixel/memory_group_manager.c @@ -0,0 +1,660 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * memory_group_manager.c + * + * C) COPYRIGHT 2019 ARM Limited. All rights reserved. + * C) COPYRIGHT 2019-2020 Google LLC + * + */ + +/* Turn this on for more debug */ +//#define DEBUG + +#include +#include +#include +#include +#include +#include +#include +#ifdef CONFIG_DEBUG_FS +#include +#endif +#include +#include + +#include + +#define PBHA_BIT_POS (36) +#define PBHA_BIT_MASK (0xf) + +#define MGM_PBHA_DEFAULT 0 +#define GROUP_ID_TO_PT_IDX(x) ((x)-1) + +/* The Mali driver requires that allocations made on one of the groups + * are not treated specially. + */ +#define MGM_RESERVED_GROUP_ID 0 + +/* Imported memory is handled by the allocator of the memory, and the Mali + * DDK will request a group_id for such memory via mgm_get_import_memory_id(). + * We specify which group we want to use for this here. + */ +#define MGM_IMPORTED_MEMORY_GROUP_ID (MEMORY_GROUP_MANAGER_NR_GROUPS - 1) + + +#define INVALID_GROUP_ID(group_id) \ + (WARN_ON((group_id) < 0) || \ + WARN_ON((group_id) >= MEMORY_GROUP_MANAGER_NR_GROUPS)) + +#if (KERNEL_VERSION(4, 20, 0) > LINUX_VERSION_CODE) +static inline vm_fault_t vmf_insert_pfn_prot(struct vm_area_struct *vma, + unsigned long addr, unsigned long pfn, pgprot_t pgprot) +{ + int err = vm_insert_pfn_prot(vma, addr, pfn, pgprot); + + if (unlikely(err == -ENOMEM)) + return VM_FAULT_OOM; + if (unlikely(err < 0 && err != -EBUSY)) + return VM_FAULT_SIGBUS; + + return VM_FAULT_NOPAGE; +} +#endif + +/** + * struct mgm_group - Structure to keep track of the number of allocated + * pages per group + * + * @size: The number of allocated small(4KB) pages + * @lp_size: The number of allocated large(2MB) pages + * @insert_pfn: The number of calls to map pages for CPU access. + * @update_gpu_pte: The number of calls to update GPU page table entries. + * @ptid: The partition ID for this group + * @pbha: The PBHA bits assigned to this group, + * @state: The lifecycle state of the partition associated with this group + * This structure allows page allocation information to be displayed via + * debugfs. Display is organized per group with small and large sized pages. + */ +struct mgm_group { + atomic_t size; + atomic_t lp_size; + atomic_t insert_pfn; + atomic_t update_gpu_pte; + + ptid_t ptid; + ptpbha_t pbha; + enum { + MGM_GROUP_STATE_NEW = 0, + MGM_GROUP_STATE_ENABLED = 10, + MGM_GROUP_STATE_DISABLED_NOT_FREED = 20, + MGM_GROUP_STATE_DISABLED = 30, + } state; +}; + +/** + * struct mgm_groups - Structure for groups of memory group manager + * + * @groups: To keep track of the number of allocated pages of all groups + * @dev: device attached + * @pt_handle: Link to SLC partition data + * @mgm_debugfs_root: debugfs root directory of memory group manager + * + * This structure allows page allocation information to be displayed via + * debugfs. Display is organized per group with small and large sized pages. + */ +struct mgm_groups { + struct mgm_group groups[MEMORY_GROUP_MANAGER_NR_GROUPS]; + struct device *dev; + struct pt_handle *pt_handle; +#ifdef CONFIG_DEBUG_FS + struct dentry *mgm_debugfs_root; +#endif +}; + +#ifdef CONFIG_DEBUG_FS + +static int mgm_debugfs_state_get(void *data, u64 *val) +{ + struct mgm_group *group = data; + *val = (int)group->state; + return 0; +} + +static int mgm_debugfs_size_get(void *data, u64 *val) +{ + struct mgm_group *group = data; + *val = atomic_read(&group->size); + return 0; +} + +static int mgm_debugfs_lp_size_get(void *data, u64 *val) +{ + struct mgm_group *group = data; + *val = atomic_read(&group->lp_size); + return 0; +} + +static int mgm_debugfs_insert_pfn_get(void *data, u64 *val) +{ + struct mgm_group *group = data; + *val = atomic_read(&group->insert_pfn); + return 0; +} + +static int mgm_debugfs_update_gpu_pte_get(void *data, u64 *val) +{ + struct mgm_group *group = data; + *val = atomic_read(&group->update_gpu_pte); + return 0; +} + +DEFINE_SIMPLE_ATTRIBUTE(fops_mgm_state, mgm_debugfs_state_get, + NULL, "%llu\n"); +DEFINE_SIMPLE_ATTRIBUTE(fops_mgm_size, mgm_debugfs_size_get, + NULL, "%llu\n"); +DEFINE_SIMPLE_ATTRIBUTE(fops_mgm_lp_size, mgm_debugfs_lp_size_get, + NULL, "%llu\n"); +DEFINE_SIMPLE_ATTRIBUTE(fops_mgm_insert_pfn, mgm_debugfs_insert_pfn_get, + NULL, "%llu\n"); +DEFINE_SIMPLE_ATTRIBUTE(fops_mgm_update_gpu_pte, mgm_debugfs_update_gpu_pte_get, + NULL, "%llu\n"); + +static void mgm_debugfs_term(struct mgm_groups *data) +{ + debugfs_remove_recursive(data->mgm_debugfs_root); +} + +#define MGM_DEBUGFS_GROUP_NAME_MAX 10 +static int mgm_debugfs_init(struct mgm_groups *mgm_data) +{ + int i; + struct dentry *e, *g; + char debugfs_group_name[MGM_DEBUGFS_GROUP_NAME_MAX]; + + /* + * Create root directory of memory-group-manager + */ + mgm_data->mgm_debugfs_root = + debugfs_create_dir("physical-memory-group-manager", NULL); + if (IS_ERR(mgm_data->mgm_debugfs_root)) { + dev_err(mgm_data->dev, + "debugfs: Failed to create root directory\n"); + return -ENODEV; + } + + /* + * Create debugfs files per group + */ + for (i = 0; i < MEMORY_GROUP_MANAGER_NR_GROUPS; i++) { + scnprintf(debugfs_group_name, MGM_DEBUGFS_GROUP_NAME_MAX, + "group_%02d", i); + g = debugfs_create_dir(debugfs_group_name, + mgm_data->mgm_debugfs_root); + if (IS_ERR(g)) { + dev_err(mgm_data->dev, + "debugfs: Couldn't create group[%d]\n", i); + goto remove_debugfs; + } + + e = debugfs_create_file("state", 0444, g, &mgm_data->groups[i], + &fops_mgm_state); + if (IS_ERR(e)) { + dev_err(mgm_data->dev, + "debugfs: Couldn't create state[%d]\n", i); + goto remove_debugfs; + } + + + e = debugfs_create_file("size", 0444, g, &mgm_data->groups[i], + &fops_mgm_size); + if (IS_ERR(e)) { + dev_err(mgm_data->dev, + "debugfs: Couldn't create size[%d]\n", i); + goto remove_debugfs; + } + + e = debugfs_create_file("lp_size", 0444, g, + &mgm_data->groups[i], &fops_mgm_lp_size); + if (IS_ERR(e)) { + dev_err(mgm_data->dev, + "debugfs: Couldn't create lp_size[%d]\n", i); + goto remove_debugfs; + } + + e = debugfs_create_file("insert_pfn", 0444, g, + &mgm_data->groups[i], &fops_mgm_insert_pfn); + if (IS_ERR(e)) { + dev_err(mgm_data->dev, + "debugfs: Couldn't create insert_pfn[%d]\n", i); + goto remove_debugfs; + } + + e = debugfs_create_file("update_gpu_pte", 0444, g, + &mgm_data->groups[i], &fops_mgm_update_gpu_pte); + if (IS_ERR(e)) { + dev_err(mgm_data->dev, + "debugfs: Couldn't create update_gpu_pte[%d]\n", + i); + goto remove_debugfs; + } + } + + return 0; + +remove_debugfs: + mgm_debugfs_term(mgm_data); + return -ENODEV; +} + +#else + +static void mgm_debugfs_term(struct mgm_groups *data) +{ +} + +static int mgm_debugfs_init(struct mgm_groups *mgm_data) +{ + return 0; +} + +#endif /* CONFIG_DEBUG_FS */ + +#define ORDER_SMALL_PAGE 0 +#define ORDER_LARGE_PAGE 9 +static void update_size(struct memory_group_manager_device *mgm_dev, int + group_id, int order, bool alloc) +{ + struct mgm_groups *data = mgm_dev->data; + + switch (order) { + case ORDER_SMALL_PAGE: + if (alloc) + atomic_inc(&data->groups[group_id].size); + else { + WARN_ON(atomic_read(&data->groups[group_id].size) == 0); + atomic_dec(&data->groups[group_id].size); + } + break; + + case ORDER_LARGE_PAGE: + if (alloc) + atomic_inc(&data->groups[group_id].lp_size); + else { + WARN_ON(atomic_read( + &data->groups[group_id].lp_size) == 0); + atomic_dec(&data->groups[group_id].lp_size); + } + break; + + default: + dev_err(data->dev, "Unknown order(%d)\n", order); + break; + } +} + +static struct page *mgm_alloc_page( + struct memory_group_manager_device *mgm_dev, int group_id, + gfp_t gfp_mask, unsigned int order) +{ + struct mgm_groups *const data = mgm_dev->data; + struct page *p; + + dev_dbg(data->dev, + "%s(mgm_dev=%p, group_id=%d gfp_mask=0x%x order=%u\n", + __func__, (void *)mgm_dev, group_id, gfp_mask, order); + + if (INVALID_GROUP_ID(group_id)) + return NULL; + + /* We don't expect to be allocting pages into the group used for + * external or imported memory + */ + if (WARN_ON(group_id == MGM_IMPORTED_MEMORY_GROUP_ID)) + return NULL; + + /* If we are allocating a page in this group for the first time then + * ensure that we have enabled the relevant partitions for it. + */ + if (group_id != MGM_RESERVED_GROUP_ID) { + int ptid, pbha; + switch (data->groups[group_id].state) { + case MGM_GROUP_STATE_NEW: + ptid = pt_client_enable(data->pt_handle, + GROUP_ID_TO_PT_IDX(group_id)); + if (ptid == -EINVAL) { + dev_err(data->dev, + "Failed to get partition for group: " + "%d\n", group_id); + } else { + dev_info(data->dev, + "pt_client_enable returned ptid=%d for" + " group=%d", + ptid, group_id); + } + + pbha = pt_pbha(data->dev->of_node, + GROUP_ID_TO_PT_IDX(group_id)); + if (pbha == PT_PBHA_INVALID) { + dev_err(data->dev, + "Failed to get PBHA for group: %d\n", + group_id); + } else { + dev_info(data->dev, + "pt_pbha returned PBHA=%d for group=%d", + pbha, group_id); + } + + data->groups[group_id].ptid = ptid; + data->groups[group_id].pbha = pbha; + data->groups[group_id].state = MGM_GROUP_STATE_ENABLED; + + break; + case MGM_GROUP_STATE_ENABLED: + case MGM_GROUP_STATE_DISABLED_NOT_FREED: + case MGM_GROUP_STATE_DISABLED: + /* Everything should already be set up*/ + break; + default: + dev_err(data->dev, "Group %d in invalid state %d\n", + group_id, data->groups[group_id].state); + } + } + + p = alloc_pages(gfp_mask, order); + + if (p) { + update_size(mgm_dev, group_id, order, true); + } else { + struct mgm_groups *data = mgm_dev->data; + dev_err(data->dev, "alloc_pages failed\n"); + } + + return p; +} + +static void mgm_free_page( + struct memory_group_manager_device *mgm_dev, int group_id, + struct page *page, unsigned int order) +{ + struct mgm_groups *const data = mgm_dev->data; + + dev_dbg(data->dev, "%s(mgm_dev=%p, group_id=%d page=%p order=%u\n", + __func__, (void *)mgm_dev, group_id, (void *)page, order); + + if (INVALID_GROUP_ID(group_id)) + return; + + __free_pages(page, order); + + /* TODO: Determine the logic of when we disable a partition depending + * on when pages in that group drop to zero? Or after a timeout? + */ + + update_size(mgm_dev, group_id, order, false); +} + +static int mgm_get_import_memory_id( + struct memory_group_manager_device *mgm_dev, + struct memory_group_manager_import_data *import_data) +{ + struct mgm_groups *const data = mgm_dev->data; + + dev_dbg(data->dev, "%s(mgm_dev=%p, import_data=%p (type=%d)\n", + __func__, (void *)mgm_dev, (void *)import_data, + (int)import_data->type); + + if (!WARN_ON(!import_data)) { + WARN_ON(!import_data->u.dma_buf); + + WARN_ON(import_data->type != + MEMORY_GROUP_MANAGER_IMPORT_TYPE_DMA_BUF); + } + + return MGM_IMPORTED_MEMORY_GROUP_ID; +} + +static u64 mgm_update_gpu_pte( + struct memory_group_manager_device *const mgm_dev, int const group_id, + int const mmu_level, u64 pte) +{ + struct mgm_groups *const data = mgm_dev->data; + unsigned int pbha; + + dev_dbg(data->dev, + "%s(mgm_dev=%p, group_id=%d, mmu_level=%d, pte=0x%llx)\n", + __func__, (void *)mgm_dev, group_id, mmu_level, pte); + + if (INVALID_GROUP_ID(group_id)) + return pte; + + /* Clear any bits set in the PBHA range */ + if (pte & ((u64)PBHA_BIT_MASK << PBHA_BIT_POS)) { + dev_warn(data->dev, + "%s: updating pte with bits already set in PBHA range", + __func__); + pte &= ~((u64)PBHA_BIT_MASK << PBHA_BIT_POS); + } + + switch (group_id) { + case MGM_RESERVED_GROUP_ID: + case MGM_IMPORTED_MEMORY_GROUP_ID: + /* The reserved group doesn't set PBHA bits */ + /* TODO: Determine what to do with imported memory */ + break; + default: + /* All other groups will have PBHA bits */ + if (data->groups[group_id].state > MGM_GROUP_STATE_NEW) { + u64 old_pte = pte; + pbha = data->groups[group_id].pbha; + + pte |= ((u64)pbha & PBHA_BIT_MASK) << PBHA_BIT_POS; + + dev_dbg(data->dev, + "%s: group_id=%d pbha=%d " + "pte=0x%llx -> 0x%llx\n", + __func__, group_id, pbha, old_pte, pte); + + } else { + dev_err(data->dev, + "Tried to get PBHA of uninitialized group=%d", + group_id); + } + } + + atomic_inc(&data->groups[group_id].update_gpu_pte); + + return pte; +} + +static vm_fault_t mgm_vmf_insert_pfn_prot( + struct memory_group_manager_device *const mgm_dev, int const group_id, + struct vm_area_struct *const vma, unsigned long const addr, + unsigned long const pfn, pgprot_t const prot) +{ + struct mgm_groups *const data = mgm_dev->data; + vm_fault_t fault; + + dev_dbg(data->dev, + "%s(mgm_dev=%p, group_id=%d, vma=%p, addr=0x%lx, pfn=0x%lx," + " prot=0x%llx)\n", + __func__, (void *)mgm_dev, group_id, (void *)vma, addr, pfn, + pgprot_val(prot)); + + if (INVALID_GROUP_ID(group_id)) + return VM_FAULT_SIGBUS; + + fault = vmf_insert_pfn_prot(vma, addr, pfn, prot); + + if (fault == VM_FAULT_NOPAGE) + atomic_inc(&data->groups[group_id].insert_pfn); + else + dev_err(data->dev, "vmf_insert_pfn_prot failed\n"); + + return fault; +} + +static void mgm_resize_callback(void *data, int id, size_t size_allocated) +{ + /* Currently we don't do anything on partition resize */ + struct mgm_groups *const mgm_data = (struct mgm_groups *)data; + dev_dbg(mgm_data->dev, "Resize callback called, size_allocated: %zu\n", + size_allocated); +} + +static int mgm_initialize_data(struct mgm_groups *mgm_data) +{ + int i, ret; + + for (i = 0; i < MEMORY_GROUP_MANAGER_NR_GROUPS; i++) { + atomic_set(&mgm_data->groups[i].size, 0); + atomic_set(&mgm_data->groups[i].lp_size, 0); + atomic_set(&mgm_data->groups[i].insert_pfn, 0); + atomic_set(&mgm_data->groups[i].update_gpu_pte, 0); + + mgm_data->groups[i].pbha = MGM_PBHA_DEFAULT; + mgm_data->groups[i].state = MGM_GROUP_STATE_NEW; + } + + /* + * Initialize SLC partitions. We don't enable partitions until + * we actually allocate memory to the corresponding memory + * group + */ + mgm_data->pt_handle = pt_client_register( + mgm_data->dev->of_node, + (void *)mgm_data, &mgm_resize_callback); + + if (IS_ERR(mgm_data->pt_handle)) { + ret = PTR_ERR(mgm_data->pt_handle); + dev_err(mgm_data->dev, "pt_client_register returned %d\n", ret); + return ret; + } + + /* We don't use PBHA bits for the reserved memory group, and so + * it is effectively already initialized. + */ + mgm_data->groups[MGM_RESERVED_GROUP_ID].state = MGM_GROUP_STATE_ENABLED; + + ret = mgm_debugfs_init(mgm_data); + + return ret; +} + +static void mgm_term_data(struct mgm_groups *data) +{ + int i; + struct mgm_group *group; + + for (i = 0; i < MEMORY_GROUP_MANAGER_NR_GROUPS; i++) { + group = &data->groups[i]; + + /* Shouldn't have outstanding page allocations at this stage*/ + if (atomic_read(&group->size) != 0) + dev_warn(data->dev, + "%zu 0-order pages in group(%d) leaked\n", + (size_t)atomic_read(&group->size), i); + if (atomic_read(&group->lp_size) != 0) + dev_warn(data->dev, + "%zu 9 order pages in group(%d) leaked\n", + (size_t)atomic_read(&group->lp_size), i); + + /* Disable partition indices and free the partition */ + switch (group->state) { + + case MGM_GROUP_STATE_NEW: + case MGM_GROUP_STATE_DISABLED: + /* Nothing to do */ + break; + + case MGM_GROUP_STATE_ENABLED: + case MGM_GROUP_STATE_DISABLED_NOT_FREED: + pt_client_free(data->pt_handle, group->ptid); + break; + + default: + dev_err(data->dev, "Group %d in invalid state %d\n", + i, group->state); + } + } + + pt_client_unregister(data->pt_handle); + + mgm_debugfs_term(data); +} + +static int memory_group_manager_probe(struct platform_device *pdev) +{ + struct memory_group_manager_device *mgm_dev; + struct mgm_groups *mgm_data; + + mgm_dev = kzalloc(sizeof(*mgm_dev), GFP_KERNEL); + if (!mgm_dev) + return -ENOMEM; + + mgm_dev->owner = THIS_MODULE; + mgm_dev->ops.mgm_alloc_page = mgm_alloc_page; + mgm_dev->ops.mgm_free_page = mgm_free_page; + mgm_dev->ops.mgm_get_import_memory_id = + mgm_get_import_memory_id; + mgm_dev->ops.mgm_vmf_insert_pfn_prot = mgm_vmf_insert_pfn_prot; + mgm_dev->ops.mgm_update_gpu_pte = mgm_update_gpu_pte; + + mgm_data = kzalloc(sizeof(*mgm_data), GFP_KERNEL); + if (!mgm_data) { + kfree(mgm_dev); + return -ENOMEM; + } + + mgm_dev->data = mgm_data; + mgm_data->dev = &pdev->dev; + + if (mgm_initialize_data(mgm_data)) { + kfree(mgm_data); + kfree(mgm_dev); + return -ENOENT; + } + + platform_set_drvdata(pdev, mgm_dev); + dev_info(&pdev->dev, "Memory group manager probed successfully\n"); + + return 0; +} + +static int memory_group_manager_remove(struct platform_device *pdev) +{ + struct memory_group_manager_device *mgm_dev = + platform_get_drvdata(pdev); + struct mgm_groups *mgm_data = mgm_dev->data; + + mgm_term_data(mgm_data); + kfree(mgm_data); + + kfree(mgm_dev); + + dev_info(&pdev->dev, "Memory group manager removed successfully\n"); + + return 0; +} + +static const struct of_device_id memory_group_manager_dt_ids[] = { + { .compatible = "arm,physical-memory-group-manager" }, + { /* sentinel */ } +}; +MODULE_DEVICE_TABLE(of, memory_group_manager_dt_ids); + +struct platform_driver memory_group_manager_driver = { + .probe = memory_group_manager_probe, + .remove = memory_group_manager_remove, + .driver = { + .name = "mali-mgm", + .owner = THIS_MODULE, + .of_match_table = of_match_ptr(memory_group_manager_dt_ids), + /* + * Prevent the mgm_dev from being unbound and freed, as others + * may have pointers to it and would get confused, or crash, if + * it suddenly disappeared. + */ + .suppress_bind_attrs = true, + } +}; diff --git a/mali_pixel/priority_control_manager.c b/mali_pixel/priority_control_manager.c new file mode 100644 index 0000000..90ac59d --- /dev/null +++ b/mali_pixel/priority_control_manager.c @@ -0,0 +1,157 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright 2021 Google LLC. + * + * Author: Sidath Senanayake + */ + +#include +#include +#include +#include +#include +#include +#include +#include + +/** + * enum priorities - The different priorities available on the GPU + */ +enum priorities { + /** + * @PRIORITY_RT: Realtime priority + */ + PRIORITY_RT = 0, + /** + * @PRIORITY_HIGH: High priority + */ + PRIORITY_HIGH, + /** + * @PRIORITY_MED: Medium priority + */ + PRIORITY_MED, + /** + * @PRIORITY_LOW: Low priority + */ + PRIORITY_LOW, + /** + * @PRIORITY_COUNT: The number of priority classes + */ + PRIORITY_COUNT, +}; + +static const char* priority_name[PRIORITY_COUNT] = { + "realtime", + "high", + "medium", + "low", +}; + +/* + * TODO(b/182907924) Using permissive mode until we have a mechanism to validate priority requests + * in place. + */ +#define PERMISSIVE_MODE (1) + +/** + * PRIORITY_DEFAULT - The default priority that applications will be set to. + */ +#define PRIORITY_DEFAULT (PRIORITY_MED) + +/** + * pcm_scheduler_priority_check() - Checks in incoming priority request + * + * This function returns which priority a context should run at, taking into + * consideration what the context is requesting. + * + * @pcm_dev: Pointer to the priority control manager. + * @task: The task struct of the process requesting the priority + * check. + * @requested_priority: The priority that the context is requesting. + * + * Return: The priority that should be granted to the context. + */ +static int pcm_scheduler_priority_check(struct priority_control_manager_device *pcm_dev, + struct task_struct *task, int requested_priority) +{ + int ret; + struct device *dev = pcm_dev->data; + kuid_t uid = task->cred->uid; + + switch (requested_priority) + { + + /* For low priority requests, we don't apply any restrictions */ + case PRIORITY_LOW: + case PRIORITY_MED: + ret = requested_priority; + dev_dbg(dev, "UID %d request for %s priority was granted\n", + __kuid_val(uid), priority_name[requested_priority]); + break; + + /* Request is for one of the restricted priorities */ + case PRIORITY_HIGH: + case PRIORITY_RT: + if (PERMISSIVE_MODE) { + ret = requested_priority; + dev_info(dev, "UID %d request for %s priority was granted\n", + __kuid_val(uid), priority_name[requested_priority]); + } else { + ret = PRIORITY_DEFAULT; + dev_warn(dev, + "UID %d request for %s priority was denied, granted %s instead\n", + __kuid_val(uid), priority_name[requested_priority], + priority_name[ret]); + } + break; + default: + ret = PRIORITY_DEFAULT; + dev_warn(dev, "UID %d requested an invalid priority (ID: %d), granted %s instead\n", + __kuid_val(uid), requested_priority, priority_name[ret]); + } + + return ret; +} + +static int priority_control_manager_probe(struct platform_device *pdev) +{ + struct priority_control_manager_device *pcm_dev; + + pcm_dev = kzalloc(sizeof(*pcm_dev), GFP_KERNEL); + if (!pcm_dev) + return -ENOMEM; + + pcm_dev->ops.pcm_scheduler_priority_check = pcm_scheduler_priority_check; + pcm_dev->data = &pdev->dev; + + platform_set_drvdata(pdev, pcm_dev); + dev_info(&pdev->dev, "Priority control manager probed successfully\n"); + + return 0; +} + +static int priority_control_manager_remove(struct platform_device *pdev) +{ + struct priority_control_manager_device *pcm_dev = platform_get_drvdata(pdev); + + dev_info(pcm_dev->data, "Priority control manager removed successfully\n"); + + return 0; +} + +static const struct of_device_id priority_control_manager_dt_ids[] = { + { .compatible = "arm,priority-control-manager" }, + { /* sentinel */ } +}; +MODULE_DEVICE_TABLE(of, priority_control_manager_dt_ids); + +struct platform_driver priority_control_manager_driver = { + .probe = priority_control_manager_probe, + .remove = priority_control_manager_remove, + .driver = { + .name = "mali-pcm", + .owner = THIS_MODULE, + .of_match_table = of_match_ptr(priority_control_manager_dt_ids), + .suppress_bind_attrs = true, + } +}; -- cgit v1.2.3