mail archive of the barebox mailing list
 help / color / mirror / Atom feed
From: Sascha Hauer <s.hauer@pengutronix.de>
To: Barebox List <barebox@lists.infradead.org>
Subject: [PATCH 27/27] ARM: mmu64: Use two level pagetables in early code
Date: Fri, 12 May 2023 13:10:08 +0200	[thread overview]
Message-ID: <20230512111008.1120833-28-s.hauer@pengutronix.de> (raw)
In-Reply-To: <20230512111008.1120833-1-s.hauer@pengutronix.de>

So far we used 1GiB sized sections in the early MMU setup. This has
the disadvantage that we can't use the MMU in early code when we
require a finer granularity. Rockchip for example keeps TF-A code
in the lower memory so the code just skipped MMU initialization.
Also we can't properly map the OP-TEE space at the end of SDRAM non
executable.

With this patch we now use two level page tables and can map with 4KiB
granularity.

The MMU setup in barebox proper changes as well. Instead of disabling
the MMU for reconfiguration we can now keep the MMU enabled and just
add the mappings for SDRAM banks not known to the early code.

Signed-off-by: Sascha Hauer <s.hauer@pengutronix.de>
---
 arch/arm/cpu/mmu_64.c | 97 ++++++++++---------------------------------
 1 file changed, 21 insertions(+), 76 deletions(-)

diff --git a/arch/arm/cpu/mmu_64.c b/arch/arm/cpu/mmu_64.c
index 4b75be621d..3f9b52bbdb 100644
--- a/arch/arm/cpu/mmu_64.c
+++ b/arch/arm/cpu/mmu_64.c
@@ -22,7 +22,10 @@
 
 #include "mmu_64.h"
 
-static uint64_t *ttb;
+static uint64_t *get_ttb(void)
+{
+	return (uint64_t *)get_ttbr(current_el());
+}
 
 static void set_table(uint64_t *pt, uint64_t *table_addr)
 {
@@ -42,7 +45,7 @@ static uint64_t *alloc_pte(void)
 	if (idx * GRANULE_SIZE >= ARM_EARLY_PAGETABLE_SIZE)
 		return NULL;
 
-	return (void *)ttb + idx * GRANULE_SIZE;
+	return get_ttb() + idx * GRANULE_SIZE;
 }
 #else
 static uint64_t *alloc_pte(void)
@@ -63,7 +66,7 @@ static __maybe_unused uint64_t *find_pte(uint64_t addr)
 	uint64_t idx;
 	int i;
 
-	pte = ttb;
+	pte = get_ttb();
 
 	for (i = 0; i < 4; i++) {
 		block_shift = level2shift(i);
@@ -112,6 +115,7 @@ static void split_block(uint64_t *pte, int level)
 static void create_sections(uint64_t virt, uint64_t phys, uint64_t size,
 			    uint64_t attr)
 {
+	uint64_t *ttb = get_ttb();
 	uint64_t block_size;
 	uint64_t block_shift;
 	uint64_t *pte;
@@ -121,9 +125,6 @@ static void create_sections(uint64_t virt, uint64_t phys, uint64_t size,
 	uint64_t type;
 	int level;
 
-	if (!ttb)
-		arm_mmu_not_initialized_error();
-
 	addr = virt;
 
 	attr &= ~PTE_TYPE_MASK;
@@ -192,37 +193,25 @@ static void mmu_enable(void)
 void __mmu_init(bool mmu_on)
 {
 	struct memory_bank *bank;
-	unsigned int el;
-
-	if (mmu_on)
-		mmu_disable();
-
-	ttb = alloc_pte();
-	el = current_el();
-	set_ttbr_tcr_mair(el, (uint64_t)ttb, calc_tcr(el, BITS_PER_VA),
-			  MEMORY_ATTRIBUTES);
 
-	pr_debug("ttb: 0x%p\n", ttb);
+	reserve_sdram_region("OP-TEE", 0xf0000000 - OPTEE_SIZE, OPTEE_SIZE);
 
-	/* create a flat mapping */
-	arch_remap_range(0, 1UL << (BITS_PER_VA - 1), MAP_UNCACHED);
-
-	/* Map sdram cached. */
 	for_each_memory_bank(bank) {
 		struct resource *rsv;
+		resource_size_t pos;
 
-		arch_remap_range((void *)bank->start, bank->size, MAP_CACHED);
+		pos = bank->start;
 
 		for_each_reserved_region(bank, rsv) {
-			arch_remap_range((void *)resource_first_page(rsv),
-					 resource_count_pages(rsv), MAP_UNCACHED);
+			arch_remap_range((void *)pos, rsv->start - pos, MAP_CACHED);
+			pos = rsv->end + 1;
 		}
+
+		arch_remap_range((void *)pos, bank->start + bank->size - pos, MAP_CACHED);
 	}
 
 	/* Make zero page faulting to catch NULL pointer derefs */
 	zero_page_faulting();
-
-	mmu_enable();
 }
 
 void mmu_disable(void)
@@ -256,42 +245,6 @@ void dma_flush_range(void *ptr, size_t size)
 	v8_flush_dcache_range(start, end);
 }
 
-static void early_create_sections(void *ttb, uint64_t virt, uint64_t phys,
-				  uint64_t size, uint64_t attr)
-{
-	uint64_t block_size;
-	uint64_t block_shift;
-	uint64_t *pte;
-	uint64_t idx;
-	uint64_t addr;
-	uint64_t *table;
-
-	addr = virt;
-
-	attr &= ~PTE_TYPE_MASK;
-
-	table = ttb;
-
-	while (1) {
-		block_shift = level2shift(1);
-		idx = (addr & level2mask(1)) >> block_shift;
-		block_size = (1ULL << block_shift);
-
-		pte = table + idx;
-
-		*pte = phys | attr | PTE_TYPE_BLOCK;
-
-		if (size < block_size)
-			break;
-
-		addr += block_size;
-		phys += block_size;
-		size -= block_size;
-	}
-}
-
-#define EARLY_BITS_PER_VA 39
-
 void mmu_early_enable(unsigned long membase, unsigned long memsize)
 {
 	int el;
@@ -299,24 +252,16 @@ void mmu_early_enable(unsigned long membase, unsigned long memsize)
 
 	pr_debug("enabling MMU, ttb @ 0x%08lx\n", ttb);
 
-	/*
-	 * For the early code we only create level 1 pagetables which only
-	 * allow for a 1GiB granularity. If our membase is not aligned to that
-	 * bail out without enabling the MMU.
-	 */
-	if (membase & ((1ULL << level2shift(1)) - 1))
-		return;
+	el = current_el();
+	set_ttbr_tcr_mair(el, ttb, calc_tcr(el, BITS_PER_VA), MEMORY_ATTRIBUTES);
 
 	memset((void *)ttb, 0, GRANULE_SIZE);
 
-	el = current_el();
-	set_ttbr_tcr_mair(el, ttb, calc_tcr(el, EARLY_BITS_PER_VA), MEMORY_ATTRIBUTES);
-	early_create_sections((void *)ttb, 0, 0, 1UL << (EARLY_BITS_PER_VA - 1),
-			attrs_uncached_mem());
-	early_create_sections((void *)ttb, membase, membase, memsize - OPTEE_SIZE, CACHED_MEM);
-	tlb_invalidate();
-	isb();
-	set_cr(get_cr() | CR_M);
+	arch_remap_range(0, 1UL << (BITS_PER_VA - 1), MAP_UNCACHED);
+	arch_remap_range((void *)membase, memsize - OPTEE_SIZE, MAP_CACHED);
+	arch_remap_range((void *)membase + memsize - OPTEE_SIZE, OPTEE_SIZE, MAP_FAULT);
+
+	mmu_enable();
 }
 
 void mmu_early_disable(void)
-- 
2.39.2




  parent reply	other threads:[~2023-05-12 11:12 UTC|newest]

Thread overview: 41+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2023-05-12 11:09 [PATCH 00/27] ARM: MMU rework Sascha Hauer
2023-05-12 11:09 ` [PATCH 01/27] ARM: fix scratch mem position with OP-TEE Sascha Hauer
2023-05-12 17:17   ` Ahmad Fatoum
2023-05-12 11:09 ` [PATCH 02/27] ARM: drop cache function initialization Sascha Hauer
2023-05-12 17:19   ` Ahmad Fatoum
2023-05-12 11:09 ` [PATCH 03/27] ARM: Add _32 suffix to aarch32 specific filenames Sascha Hauer
2023-05-12 17:21   ` Ahmad Fatoum
2023-05-12 11:09 ` [PATCH 04/27] ARM: cpu.c: remove unused include Sascha Hauer
2023-05-12 17:22   ` Ahmad Fatoum
2023-05-12 11:09 ` [PATCH 05/27] ARM: mmu-common.c: use common mmu include Sascha Hauer
2023-05-12 17:23   ` Ahmad Fatoum
2023-05-12 11:09 ` [PATCH 06/27] ARM: mmu32: rename mmu.h to mmu_32.h Sascha Hauer
2023-05-12 17:23   ` Ahmad Fatoum
2023-05-12 11:09 ` [PATCH 07/27] ARM: mmu: implement MAP_FAULT Sascha Hauer
2023-05-12 11:09 ` [PATCH 08/27] ARM: mmu64: Use arch_remap_range where possible Sascha Hauer
2023-05-12 17:40   ` Ahmad Fatoum
2023-05-12 11:09 ` [PATCH 09/27] ARM: mmu32: implement zero_page_*() Sascha Hauer
2023-05-12 11:09 ` [PATCH 10/27] ARM: i.MX: Drop HAB workaround Sascha Hauer
2023-05-12 18:09   ` Ahmad Fatoum
2023-05-16  8:23     ` Sascha Hauer
2023-05-12 11:09 ` [PATCH 11/27] ARM: Move early MMU after malloc initialization Sascha Hauer
2023-05-12 18:10   ` Ahmad Fatoum
2023-05-12 11:09 ` [PATCH 12/27] ARM: mmu: move dma_sync_single_for_device to extra file Sascha Hauer
2023-05-12 18:30   ` Ahmad Fatoum
2023-05-16  9:09     ` Sascha Hauer
2023-05-12 11:09 ` [PATCH 13/27] ARM: mmu: merge mmu-early_xx.c into mmu_xx.c Sascha Hauer
2023-05-12 11:09 ` [PATCH 14/27] ARM: mmu: alloc 64k for early page tables Sascha Hauer
2023-05-12 11:09 ` [PATCH 15/27] ARM: mmu32: create alloc_pte() Sascha Hauer
2023-05-12 11:09 ` [PATCH 16/27] ARM: mmu64: " Sascha Hauer
2023-05-12 11:09 ` [PATCH 17/27] ARM: mmu: drop ttb argument Sascha Hauer
2023-05-12 11:09 ` [PATCH 18/27] ARM: mmu: always do MMU initialization early when MMU is enabled Sascha Hauer
2023-05-12 11:10 ` [PATCH 19/27] ARM: mmu32: Assume MMU is on Sascha Hauer
2023-05-12 11:10 ` [PATCH 20/27] ARM: mmu32: Fix pmd_flags_to_pte() for ARMv4/5/6 Sascha Hauer
2023-05-12 11:10 ` [PATCH 21/27] ARM: mmu32: Add pte_flags_to_pmd() Sascha Hauer
2023-05-12 11:10 ` [PATCH 22/27] ARM: mmu32: add get_pte_flags, get_pmd_flags Sascha Hauer
2023-05-12 11:10 ` [PATCH 23/27] ARM: mmu32: move functions into c file Sascha Hauer
2023-05-12 11:10 ` [PATCH 24/27] ARM: mmu32: read TTB value from register Sascha Hauer
2023-05-12 11:10 ` [PATCH 25/27] ARM: mmu32: Use pages for early MMU setup Sascha Hauer
2023-05-12 11:10 ` [PATCH 26/27] ARM: mmu32: Skip reserved ranges during initialization Sascha Hauer
2023-05-12 11:10 ` Sascha Hauer [this message]
2023-05-16 10:55   ` [PATCH 27/27] ARM: mmu64: Use two level pagetables in early code Sascha Hauer

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20230512111008.1120833-28-s.hauer@pengutronix.de \
    --to=s.hauer@pengutronix.de \
    --cc=barebox@lists.infradead.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox