aboutsummaryrefslogtreecommitdiff
path: root/lib/xlat_tables_v2
diff options
context:
space:
mode:
authorVarun Wadekar <vwadekar@nvidia.com>2018-04-03 10:44:41 -0700
committerVarun Wadekar <vwadekar@nvidia.com>2018-04-09 16:37:23 -0700
commit0ed322329339a30c6bbd81ba784bb54966e4bd6c (patch)
treecd4aad4ed8f31815c127f56d7920d9d3ddc5b544 /lib/xlat_tables_v2
parentf11916bf1dfecd7ee76240eebd847681ce366a67 (diff)
downloadarm-trusted-firmware-0ed322329339a30c6bbd81ba784bb54966e4bd6c.tar.gz
lib: xlat_tables_v2: reduce time required to add a mmap region
The last entry in the mapping table is not necessarily the same as the end of the table. This patch loops through the table to find the last entry marker, on every new mmap addition. The memove operation then has to only move the memory between current entry and the last entry. For platforms that arrange their MMIO map properly, this opearation turns out to be a NOP. The previous implementation added significant overhead per mmap addition as the memmove operation always moved the difference between the current mmap entry and the end of the table. Tested on Tegra platforms and this new approach improves the memory mapping time by ~75%, thus significantly reducing boot time on some platforms. Change-Id: Ie3478fa5942379282ef58bee2085da799137e2ca Signed-off-by: Varun Wadekar <vwadekar@nvidia.com>
Diffstat (limited to 'lib/xlat_tables_v2')
-rw-r--r--lib/xlat_tables_v2/xlat_tables_internal.c20
1 files changed, 18 insertions, 2 deletions
diff --git a/lib/xlat_tables_v2/xlat_tables_internal.c b/lib/xlat_tables_v2/xlat_tables_internal.c
index 653260cec..522b167d9 100644
--- a/lib/xlat_tables_v2/xlat_tables_internal.c
+++ b/lib/xlat_tables_v2/xlat_tables_internal.c
@@ -735,7 +735,8 @@ static int mmap_add_region_check(xlat_ctx_t *ctx, const mmap_region_t *mm)
void mmap_add_region_ctx(xlat_ctx_t *ctx, const mmap_region_t *mm)
{
mmap_region_t *mm_cursor = ctx->mmap;
- mmap_region_t *mm_last = mm_cursor + ctx->mmap_num;
+ const mmap_region_t *mm_end = ctx->mmap + ctx->mmap_num;
+ mmap_region_t *mm_last;
unsigned long long end_pa = mm->base_pa + mm->size - 1;
uintptr_t end_va = mm->base_va + mm->size - 1;
int ret;
@@ -786,6 +787,21 @@ void mmap_add_region_ctx(xlat_ctx_t *ctx, const mmap_region_t *mm)
&& (mm_cursor->size < mm->size))
++mm_cursor;
+ /*
+ * Find the last entry marker in the mmap
+ */
+ mm_last = ctx->mmap;
+ while ((mm_last->size != 0U) && (mm_last < mm_end)) {
+ ++mm_last;
+ }
+
+ /*
+ * Check if we have enough space in the memory mapping table.
+ * This shouldn't happen as we have checked in mmap_add_region_check
+ * that there is free space.
+ */
+ assert(mm_last->size == 0U);
+
/* Make room for new region by moving other regions up by one place */
memmove(mm_cursor + 1, mm_cursor,
(uintptr_t)mm_last - (uintptr_t)mm_cursor);
@@ -795,7 +811,7 @@ void mmap_add_region_ctx(xlat_ctx_t *ctx, const mmap_region_t *mm)
* This shouldn't happen as we have checked in mmap_add_region_check
* that there is free space.
*/
- assert(mm_last->size == 0);
+ assert(mm_end->size == 0U);
*mm_cursor = *mm;