summaryrefslogtreecommitdiff
path: root/mali_pixel/protected_memory_allocator.c
diff options
context:
space:
mode:
authorErik Staats <estaats@google.com>2021-12-10 17:42:22 -0800
committerErik Staats <estaats@google.com>2021-12-15 20:46:08 +0000
commit22bddbd92d0d31cd91b63c6d6f65438907cb7d8b (patch)
tree5a7959f75c77ff0045f10814df9e718ec2c9a860 /mali_pixel/protected_memory_allocator.c
parentb60b576952e694dda74b34ed15311d0e79c23997 (diff)
downloadgpu-22bddbd92d0d31cd91b63c6d6f65438907cb7d8b.tar.gz
Mali protected memory allocator: Use slab allocation.
Since DMA buffers have minimum allocation sizes, using slab allocation wastes less DMA buffer memory. Bug: 194627754 Test: Verified that a GPU protected memory test can pass. It sometimes fails with a GPU fault. Test: See details in testing done comment in https://partner-android-review.googlesource.com/2115313 . Change-Id: I705b7f6d41f534fd4e16e9b70b72dd36e06b31d2
Diffstat (limited to 'mali_pixel/protected_memory_allocator.c')
-rw-r--r--mali_pixel/protected_memory_allocator.c372
1 files changed, 323 insertions, 49 deletions
diff --git a/mali_pixel/protected_memory_allocator.c b/mali_pixel/protected_memory_allocator.c
index cc50690..7e5570d 100644
--- a/mali_pixel/protected_memory_allocator.c
+++ b/mali_pixel/protected_memory_allocator.c
@@ -16,6 +16,11 @@
#include <soc/samsung/exynos-smc.h>
#define MALI_PMA_DMA_HEAP_NAME "vframe-secure"
+#define MALI_PMA_SLAB_SIZE (1 << 16)
+#define MALI_PMA_SLAB_BLOCK_SIZE (PAGE_SIZE)
+#define MALI_PMA_SLAB_BLOCK_COUNT \
+ (MALI_PMA_SLAB_SIZE / MALI_PMA_SLAB_BLOCK_SIZE)
+#define MALI_PMA_MAX_ALLOC_SIZE (MALI_PMA_SLAB_SIZE)
/**
* struct mali_pma_dev - Structure for managing a Mali protected memory
@@ -24,11 +29,15 @@
* @pma_dev: The base protected memory allocator device.
* @dev: The device for which to allocate protected memory.
* @dma_heap: The DMA buffer heap from which to allocate protected memory.
+ * @slab_list: List of allocated slabs of protected memory.
+ * @slab_mutex: Mutex used to serialize access to the slab list.
*/
struct mali_pma_dev {
struct protected_memory_allocator_device pma_dev;
struct device *dev;
struct dma_heap *dma_heap;
+ struct list_head slab_list;
+ struct mutex slab_mutex;
};
/**
@@ -36,17 +45,39 @@ struct mali_pma_dev {
* protected memory allocation.
*
* @pma: The base protected memory allocation record.
- * @dma_buf: The DMA buffer allocated for the protected memory. A reference to
- * the DMA buffer is held by this pointer.
- * @dma_attachment: The DMA buffer device attachment.
- * @dma_sg_table: The DMA buffer scatter/gather table.
+ * @slab: Protected memory slab used for allocation.
+ * @first_block_index: Index of first memory block allocated from the slab.
+ * @block_count: Count of the number of blocks allocated from the slab.
*/
struct mali_protected_memory_allocation {
struct protected_memory_allocation pma;
- struct dma_buf* dma_buf;
- struct dma_buf_attachment* dma_attachment;
- struct sg_table* dma_sg_table;
+ struct mali_pma_slab *slab;
+ int first_block_index;
+ int block_count;
+};
+
+/**
+ * struct mali_pma_slab - Structure for managing a slab of Mali protected
+ * memory.
+ *
+ * @list_entry: Entry in slab list.
+ * @base: Physical base address of slab memory.
+ * @dma_buf: The DMA buffer allocated for the slab . A reference to the DMA
+ * buffer is held by this pointer.
+ * @dma_attachment: The DMA buffer device attachment.
+ * @dma_sg_table: The DMA buffer scatter/gather table.
+ * @allocated_block_map: Bit map of allocated blocks in the slab.
+ */
+struct mali_pma_slab {
+ struct list_head list_entry;
+ phys_addr_t base;
+ struct dma_buf *dma_buf;
+ struct dma_buf_attachment *dma_attachment;
+ struct sg_table *dma_sg_table;
+ uint64_t allocated_block_map;
};
+static_assert(8 * sizeof(((struct mali_pma_slab *) 0)->allocated_block_map) >=
+ MALI_PMA_SLAB_BLOCK_COUNT);
static struct protected_memory_allocation *mali_pma_alloc_page(
struct protected_memory_allocator_device *pma_dev,
@@ -60,6 +91,24 @@ static void mali_pma_free_page(
struct protected_memory_allocator_device *pma_dev,
struct protected_memory_allocation *pma);
+static bool mali_pma_slab_alloc(
+ struct mali_pma_dev* mali_pma_dev,
+ struct mali_protected_memory_allocation *mali_pma, size_t size);
+
+static void mali_pma_slab_dealloc(
+ struct mali_pma_dev* mali_pma_dev,
+ struct mali_protected_memory_allocation *mali_pma);
+
+static bool mali_pma_slab_find_available(
+ struct mali_pma_dev* mali_pma_dev, size_t size,
+ struct mali_pma_slab** p_slab, int* p_block_index);
+
+static struct mali_pma_slab* mali_pma_slab_add(
+ struct mali_pma_dev* mali_pma_dev);
+
+static void mali_pma_slab_remove(
+ struct mali_pma_dev* mali_pma_dev, struct mali_pma_slab* slab);
+
static int protected_memory_allocator_probe(struct platform_device *pdev);
static int protected_memory_allocator_remove(struct platform_device *pdev);
@@ -79,15 +128,21 @@ static struct protected_memory_allocation *mali_pma_alloc_page(
struct mali_pma_dev *mali_pma_dev;
struct protected_memory_allocation* pma = NULL;
struct mali_protected_memory_allocation *mali_pma;
- struct dma_buf* dma_buf;
- struct dma_buf_attachment* dma_attachment;
- struct sg_table* dma_sg_table;
size_t alloc_size;
- bool success = false;
+ bool succeeded = false;
/* Get the Mali protected memory allocator device record. */
mali_pma_dev = container_of(pma_dev, struct mali_pma_dev, pma_dev);
+ /* Check requested size against the maximum size. */
+ alloc_size = 1 << (PAGE_SHIFT + order);
+ if (alloc_size > MALI_PMA_MAX_ALLOC_SIZE) {
+ dev_err(mali_pma_dev->dev,
+ "Protected memory allocation size %zu too big\n",
+ alloc_size);
+ goto out;
+ }
+
/* Allocate a Mali protected memory allocation record. */
mali_pma = devm_kzalloc(
mali_pma_dev->dev, sizeof(*mali_pma), GFP_KERNEL);
@@ -100,43 +155,19 @@ static struct protected_memory_allocation *mali_pma_alloc_page(
pma = &(mali_pma->pma);
pma->order = order;
- /* Allocate a DMA buffer. */
- alloc_size = 1 << (PAGE_SHIFT + order);
- dma_buf = dma_heap_buffer_alloc(
- mali_pma_dev->dma_heap, alloc_size, O_RDWR, 0);
- if (IS_ERR(dma_buf)) {
+ /* Allocate Mali protected memory from a slab. */
+ if (!mali_pma_slab_alloc(mali_pma_dev, mali_pma, alloc_size)) {
dev_err(mali_pma_dev->dev,
- "Failed to allocate a DMA buffer of size %zu\n",
- alloc_size);
- goto out;
- }
- mali_pma->dma_buf = dma_buf;
-
- /* Attach the device to the DMA buffer. */
- dma_attachment = dma_buf_attach(dma_buf, mali_pma_dev->dev);
- if (IS_ERR(dma_attachment)) {
- dev_err(mali_pma_dev->dev,
- "Failed to attach the device to the DMA buffer\n");
- goto out;
- }
- mali_pma->dma_attachment = dma_attachment;
-
- /* Map the DMA buffer into the attached device address space. */
- dma_sg_table =
- dma_buf_map_attachment(dma_attachment, DMA_BIDIRECTIONAL);
- if (IS_ERR(dma_sg_table)) {
- dev_err(mali_pma_dev->dev, "Failed to map the DMA buffer\n");
+ "Failed to allocate Mali protected memory.\n");
goto out;
}
- mali_pma->dma_sg_table = dma_sg_table;
- pma->pa = page_to_phys(sg_page(dma_sg_table->sgl));
/* Mark the allocation as successful. */
- success = true;
+ succeeded = true;
out:
/* Clean up on error. */
- if (!success) {
+ if (!succeeded) {
if (pma) {
mali_pma_free_page(pma_dev, pma);
pma = NULL;
@@ -184,19 +215,252 @@ static void mali_pma_free_page(
mali_pma =
container_of(pma, struct mali_protected_memory_allocation, pma);
- /* Free the Mali protected memory allocation. */
- if (mali_pma->dma_sg_table) {
+ /* Deallocate Mali protected memory from the slab. */
+ mali_pma_slab_dealloc(mali_pma_dev, mali_pma);
+
+ /* Deallocate the Mali protected memory allocation record. */
+ devm_kfree(mali_pma_dev->dev, mali_pma);
+}
+
+/**
+ * mali_pma_slab_alloc - Allocate protected memory from a slab
+ *
+ * @mali_pma_dev: Mali protected memory allocator device.
+ * @mali_pma: Mali protected memory allocation record to hold the slab memory.
+ * @size: Size in bytes of memory to allocate.
+ *
+ * Return: True if memory was successfully allocated.
+ */
+static bool mali_pma_slab_alloc(
+ struct mali_pma_dev *mali_pma_dev,
+ struct mali_protected_memory_allocation *mali_pma, size_t size) {
+ struct mali_pma_slab *slab;
+ int start_block;
+ int block_count;
+ bool succeeded = false;
+
+ /* Lock the slab list. */
+ mutex_lock(&(mali_pma_dev->slab_mutex));
+
+ /*
+ * Try finding an existing slab from which to allocate. If none are
+ * available, add a new slab and allocate from it.
+ */
+ if (!mali_pma_slab_find_available(
+ mali_pma_dev, size, &slab, &start_block)) {
+ slab = mali_pma_slab_add(mali_pma_dev);
+ if (!slab) {
+ goto out;
+ }
+ start_block = 0;
+ }
+
+ /* Allocate a contiguous set of blocks from the slab. */
+ block_count = DIV_ROUND_UP(size, MALI_PMA_SLAB_BLOCK_SIZE);
+ bitmap_set((unsigned long *) &(slab->allocated_block_map),
+ start_block, block_count);
+
+ /*
+ * Use the allocated slab memory for the Mali protected memory
+ * allocation.
+ */
+ mali_pma->pma.pa =
+ slab->base + (start_block * MALI_PMA_SLAB_BLOCK_SIZE);
+ mali_pma->slab = slab;
+ mali_pma->first_block_index = start_block;
+ mali_pma->block_count = block_count;
+
+ /* Mark the allocation as successful. */
+ succeeded = true;
+
+out:
+ /* Unlock the slab list. */
+ mutex_unlock(&(mali_pma_dev->slab_mutex));
+
+ return succeeded;
+}
+
+/**
+ * mali_pma_slab_dealloc - Deallocate protected memory from a slab
+ *
+ * @mali_pma_dev: Mali protected memory allocator device.
+ * @mali_pma: Mali protected memory allocation record holding slab memory to
+ * deallocate.
+ */
+static void mali_pma_slab_dealloc(
+ struct mali_pma_dev *mali_pma_dev,
+ struct mali_protected_memory_allocation *mali_pma) {
+ struct mali_pma_slab *slab;
+
+ /* Lock the slab list. */
+ mutex_lock(&(mali_pma_dev->slab_mutex));
+
+ /* Deallocate all the blocks in the slab. */
+ slab = mali_pma->slab;
+ bitmap_clear((unsigned long *) &(slab->allocated_block_map),
+ mali_pma->first_block_index, mali_pma->block_count);
+
+ /* If no slab blocks remain allocated, remove the slab. */
+ if (bitmap_empty(
+ (unsigned long *) &(slab->allocated_block_map),
+ MALI_PMA_SLAB_BLOCK_COUNT)) {
+ mali_pma_slab_remove(mali_pma_dev, slab);
+ }
+
+ /* Unlock the slab list. */
+ mutex_unlock(&(mali_pma_dev->slab_mutex));
+}
+
+/**
+ * mali_pma_slab_find_available - Find a slab with available memory
+ *
+ * Must be called with the slab list mutex locked.
+ *
+ * @mali_pma_dev: Mali protected memory allocator device.
+ * @size: Size in bytes of requested memory.
+ * @p_slab: Returned slab with requested memory available.
+ * @p_block_index: Returned starting block index of available memory.
+ *
+ * Return: True if a slab was found with the requested memory available.
+ */
+static bool mali_pma_slab_find_available(
+ struct mali_pma_dev *mali_pma_dev, size_t size,
+ struct mali_pma_slab **p_slab, int *p_block_index) {
+ struct mali_pma_slab *slab;
+ int block_count;
+ int start_block;
+ bool found = false;
+
+ /* Ensure the slab list mutex is locked. */
+ lockdep_assert_held(&(mali_pma_dev->slab_mutex));
+
+ /* Search slabs for a contiguous set of blocks of the requested size. */
+ block_count = DIV_ROUND_UP(size, MALI_PMA_SLAB_BLOCK_SIZE);
+ list_for_each_entry(slab, &(mali_pma_dev->slab_list), list_entry) {
+ start_block = bitmap_find_next_zero_area_off(
+ (unsigned long *) &(slab->allocated_block_map),
+ MALI_PMA_SLAB_BLOCK_COUNT, 0, block_count, 0, 0);
+ if (start_block < MALI_PMA_SLAB_BLOCK_COUNT) {
+ found = true;
+ break;
+ }
+ }
+
+ /* Return results if found. */
+ if (found) {
+ *p_slab = slab;
+ *p_block_index = start_block;
+ }
+
+ return found;
+}
+
+/**
+ * mali_pma_slab_add - Allocate and add a new slab
+ *
+ * Must be called with the slab list mutex locked.
+ *
+ * @mali_pma_dev: Mali protected memory allocator device.
+ *
+ * Return: Newly added slab.
+ */
+static struct mali_pma_slab *mali_pma_slab_add(
+ struct mali_pma_dev *mali_pma_dev) {
+ struct mali_pma_slab *slab = NULL;
+ struct dma_buf *dma_buf;
+ struct dma_buf_attachment *dma_attachment;
+ struct sg_table *dma_sg_table;
+ bool succeeded = false;
+
+ /* Ensure the slab list mutex is locked. */
+ lockdep_assert_held(&(mali_pma_dev->slab_mutex));
+
+ /* Allocate and initialize a Mali protected memory slab record. */
+ slab = devm_kzalloc(mali_pma_dev->dev, sizeof(*slab), GFP_KERNEL);
+ if (!slab) {
+ dev_err(mali_pma_dev->dev,
+ "Failed to allocate a Mali protected memory slab.\n");
+ goto out;
+ }
+ INIT_LIST_HEAD(&(slab->list_entry));
+
+ /* Allocate a DMA buffer. */
+ dma_buf = dma_heap_buffer_alloc(
+ mali_pma_dev->dma_heap, MALI_PMA_SLAB_SIZE, O_RDWR, 0);
+ if (IS_ERR(dma_buf)) {
+ dev_err(mali_pma_dev->dev,
+ "Failed to allocate a DMA buffer of size %d\n",
+ MALI_PMA_SLAB_SIZE);
+ goto out;
+ }
+ slab->dma_buf = dma_buf;
+
+ /* Attach the device to the DMA buffer. */
+ dma_attachment = dma_buf_attach(dma_buf, mali_pma_dev->dev);
+ if (IS_ERR(dma_attachment)) {
+ dev_err(mali_pma_dev->dev,
+ "Failed to attach the device to the DMA buffer\n");
+ goto out;
+ }
+ slab->dma_attachment = dma_attachment;
+
+ /* Map the DMA buffer into the attached device address space. */
+ dma_sg_table =
+ dma_buf_map_attachment(dma_attachment, DMA_BIDIRECTIONAL);
+ if (IS_ERR(dma_sg_table)) {
+ dev_err(mali_pma_dev->dev, "Failed to map the DMA buffer\n");
+ goto out;
+ }
+ slab->dma_sg_table = dma_sg_table;
+ slab->base = page_to_phys(sg_page(dma_sg_table->sgl));
+
+ /* Add the slab to the slab list. */
+ list_add(&(slab->list_entry), &(mali_pma_dev->slab_list));
+
+ /* Mark that the slab was successfully added. */
+ succeeded = true;
+
+out:
+ /* Clean up on failure. */
+ if (!succeeded && (slab != NULL)) {
+ mali_pma_slab_remove(mali_pma_dev, slab);
+ slab = NULL;
+ }
+
+ return slab;
+}
+
+/**
+ * mali_pma_slab_remove - Remove and deallocate a slab
+ *
+ * Must be called with the slab list mutex locked.
+ *
+ * @mali_pma_dev: Mali protected memory allocator device.
+ * @slab: Slab to remove and deallocate.
+ */
+static void mali_pma_slab_remove(
+ struct mali_pma_dev *mali_pma_dev, struct mali_pma_slab *slab) {
+ /* Ensure the slab list mutex is locked. */
+ lockdep_assert_held(&(mali_pma_dev->slab_mutex));
+
+ /* Free the Mali protected memory slab allocation. */
+ if (slab->dma_sg_table) {
dma_buf_unmap_attachment(
- mali_pma->dma_attachment,
- mali_pma->dma_sg_table, DMA_BIDIRECTIONAL);
+ slab->dma_attachment,
+ slab->dma_sg_table, DMA_BIDIRECTIONAL);
}
- if (mali_pma->dma_attachment) {
- dma_buf_detach(mali_pma->dma_buf, mali_pma->dma_attachment);
+ if (slab->dma_attachment) {
+ dma_buf_detach(slab->dma_buf, slab->dma_attachment);
}
- if (mali_pma->dma_buf) {
- dma_buf_put(mali_pma->dma_buf);
+ if (slab->dma_buf) {
+ dma_buf_put(slab->dma_buf);
}
- devm_kfree(mali_pma_dev->dev, mali_pma);
+
+ /* Remove the slab from the slab list. */
+ list_del(&(slab->list_entry));
+
+ /* Deallocate the Mali protected memory slab record. */
+ devm_kfree(mali_pma_dev->dev, slab);
}
/**
@@ -223,6 +487,10 @@ static int protected_memory_allocator_probe(struct platform_device *pdev)
pma_dev = &(mali_pma_dev->pma_dev);
platform_set_drvdata(pdev, pma_dev);
+ /* Initialize the slab list. */
+ INIT_LIST_HEAD(&(mali_pma_dev->slab_list));
+ mutex_init(&(mali_pma_dev->slab_mutex));
+
/* Configure the Mali protected memory allocator. */
mali_pma_dev->dev = &(pdev->dev);
pma_dev->owner = THIS_MODULE;
@@ -281,6 +549,12 @@ static int protected_memory_allocator_remove(struct platform_device *pdev)
}
mali_pma_dev = container_of(pma_dev, struct mali_pma_dev, pma_dev);
+ /* Warn if there are any outstanding protected memory slabs. */
+ if (!list_empty(&(mali_pma_dev->slab_list))) {
+ dev_warn(&(pdev->dev),
+ "Some protected memory has been left allocated\n");
+ }
+
/* Disable protected mode for the GPU. */
ret = exynos_smc(
SMC_PROTECTION_SET, 0, PROT_G3D, SMC_PROTECTION_DISABLE);