mail archive of the barebox mailing list
 help / color / mirror / Atom feed
From: Sascha Hauer <s.hauer@pengutronix.de>
To: BAREBOX <barebox@lists.infradead.org>
Subject: [PATCH 2/4] ARM: MMU: drop forced pagewise mapping
Date: Mon, 23 Feb 2026 09:34:07 +0100	[thread overview]
Message-ID: <20260223-arm-mmu-v1-2-707d45f6f6e1@pengutronix.de> (raw)
In-Reply-To: <20260223-arm-mmu-v1-0-707d45f6f6e1@pengutronix.de>

We used to force pagewise mapping the the PBL because we couldn't break
a section into pages later when barebox is running from that area. We
now do the MMU setup for the barebox regions entirely in the PBL, so we
won't have to touch that again which makes the forced pagewise mapping
unnecessary. Remove it.

Signed-off-by: Sascha Hauer <s.hauer@pengutronix.de>
---
 arch/arm/cpu/mmu-common.c    |  2 --
 arch/arm/cpu/mmu-common.h    |  2 --
 arch/arm/cpu/mmu_32.c        | 15 ++-------------
 arch/arm/cpu/mmu_64.c        | 11 ++---------
 arch/riscv/include/asm/mmu.h |  3 ---
 5 files changed, 4 insertions(+), 29 deletions(-)

diff --git a/arch/arm/cpu/mmu-common.c b/arch/arm/cpu/mmu-common.c
index 0300bb9bc6..b84485a276 100644
--- a/arch/arm/cpu/mmu-common.c
+++ b/arch/arm/cpu/mmu-common.c
@@ -18,8 +18,6 @@
 
 const char *map_type_tostr(maptype_t map_type)
 {
-	map_type &= ~ARCH_MAP_FLAG_PAGEWISE;
-
 	switch (map_type) {
 	case MAP_CACHED_RWX:		return "RWX";
 	case MAP_CACHED_RO:		return "RO";
diff --git a/arch/arm/cpu/mmu-common.h b/arch/arm/cpu/mmu-common.h
index 3a3590ebb5..59abc1d9c8 100644
--- a/arch/arm/cpu/mmu-common.h
+++ b/arch/arm/cpu/mmu-common.h
@@ -11,8 +11,6 @@
 #include <linux/sizes.h>
 #include <linux/bits.h>
 
-#define ARCH_MAP_FLAG_PAGEWISE	BIT(31)
-
 struct device;
 
 void dma_inv_range(void *ptr, size_t size);
diff --git a/arch/arm/cpu/mmu_32.c b/arch/arm/cpu/mmu_32.c
index 074fd1b0ed..a5ac9a3ff9 100644
--- a/arch/arm/cpu/mmu_32.c
+++ b/arch/arm/cpu/mmu_32.c
@@ -344,7 +344,6 @@ static uint32_t get_pmd_flags(maptype_t map_type)
 static void __arch_remap_range(void *_virt_addr, phys_addr_t phys_addr, size_t size,
 			       maptype_t map_type)
 {
-	bool force_pages = map_type & ARCH_MAP_FLAG_PAGEWISE;
 	bool mmu_on;
 	u32 virt_addr = (u32)_virt_addr;
 	u32 pte_flags, pmd_flags;
@@ -372,7 +371,7 @@ static void __arch_remap_range(void *_virt_addr, phys_addr_t phys_addr, size_t s
 
 		if (size >= PGDIR_SIZE && pgdir_size_aligned &&
 		    IS_ALIGNED(phys_addr, PGDIR_SIZE) &&
-		    !pgd_type_table(*pgd) && !force_pages) {
+		    !pgd_type_table(*pgd)) {
 			/*
 			 * TODO: Add code to discard a page table and
 			 * replace it with a section
@@ -636,17 +635,7 @@ void mmu_early_enable(unsigned long membase, unsigned long memsize, unsigned lon
 	 * at this early stage
 	 */
 	early_remap_range(membase, barebox_start - membase, MAP_CACHED_RWX);
-	/*
-	 * Map the remainder of the memory explicitly with two level page tables. This is
-	 * the place where barebox proper ends at. In barebox proper we'll remap the code
-	 * segments readonly/executable and the ro segments readonly/execute never. For this
-	 * we need the memory being mapped pagewise. We can't do the split up from section
-	 * wise mapping to pagewise mapping later because that would require us to do
-	 * a break-before-make sequence which we can't do when barebox proper is running
-	 * at the location being remapped.
-	 */
-	early_remap_range(barebox_start, barebox_size,
-			  MAP_CACHED_RWX | ARCH_MAP_FLAG_PAGEWISE);
+	early_remap_range(barebox_start, barebox_size, MAP_CACHED_RWX);
 	early_remap_range(optee_start, OPTEE_SIZE, MAP_UNCACHED);
 	early_remap_range(PAGE_ALIGN_DOWN((uintptr_t)_stext), PAGE_ALIGN(_etext - _stext),
 			  MAP_CACHED_RWX);
diff --git a/arch/arm/cpu/mmu_64.c b/arch/arm/cpu/mmu_64.c
index 2ed39abeb5..69d4b89dd8 100644
--- a/arch/arm/cpu/mmu_64.c
+++ b/arch/arm/cpu/mmu_64.c
@@ -195,7 +195,6 @@ static void split_block(uint64_t *pte, int level, bool bbm)
 static int __arch_remap_range(uint64_t virt, uint64_t phys, uint64_t size,
 			      maptype_t map_type, bool bbm)
 {
-	bool force_pages = map_type & ARCH_MAP_FLAG_PAGEWISE;
 	unsigned long attr = get_pte_attrs(map_type);
 	uint64_t *ttb = get_ttb();
 	uint64_t block_size;
@@ -237,7 +236,7 @@ static int __arch_remap_range(uint64_t virt, uint64_t phys, uint64_t size,
 				        IS_ALIGNED(addr, block_size) &&
 				        IS_ALIGNED(phys, block_size);
 
-			if ((force_pages && level == 3) || (!force_pages && block_aligned)) {
+			if (block_aligned) {
 				type = (level == 3) ?
 					PTE_TYPE_PAGE : PTE_TYPE_BLOCK;
 
@@ -411,13 +410,7 @@ void mmu_early_enable(unsigned long membase, unsigned long memsize, unsigned lon
 
 	barebox_size = optee_membase - barebox_start;
 
-	/*
-	 * map barebox area using pagewise mapping. We want to modify the XN/RO
-	 * attributes later, but can't switch from sections to pages later when
-	 * executing code from it
-	 */
-	early_remap_range(barebox_start, barebox_size,
-		     MAP_CACHED_RWX | ARCH_MAP_FLAG_PAGEWISE);
+	early_remap_range(barebox_start, barebox_size, MAP_CACHED_RWX);
 
 	/* OP-TEE might be at location specified in OP-TEE header */
 	optee_get_membase(&optee_membase);
diff --git a/arch/riscv/include/asm/mmu.h b/arch/riscv/include/asm/mmu.h
index 98af92cc17..cdc599bd51 100644
--- a/arch/riscv/include/asm/mmu.h
+++ b/arch/riscv/include/asm/mmu.h
@@ -15,9 +15,6 @@
 #define ARCH_HAS_REMAP
 #define MAP_ARCH_DEFAULT MAP_CACHED
 
-/* Architecture-specific memory type flags */
-#define ARCH_MAP_FLAG_PAGEWISE		(1 << 16)	/* Force page-wise mapping */
-
 /*
  * Remap a virtual address range with specified memory type (barebox proper).
  * Used by the generic remap infrastructure after barebox is fully relocated.

-- 
2.47.3




  parent reply	other threads:[~2026-02-23  8:35 UTC|newest]

Thread overview: 8+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2026-02-23  8:34 [PATCH 0/4] ARM: MMU: Some cleanup after recent changes Sascha Hauer
2026-02-23  8:34 ` [PATCH 1/4] mmu: add MAP_CACHED_RWX mapping type Sascha Hauer
2026-02-23  8:34 ` Sascha Hauer [this message]
2026-02-23  8:34 ` [PATCH 3/4] ARM: pbl: MMU: drop unnecessary barebox mapping Sascha Hauer
2026-02-23  8:34 ` [PATCH 4/4] ARM: pbl: MMU: Make it callable multiple times Sascha Hauer
2026-02-23  8:41   ` Ahmad Fatoum
2026-02-23  9:10     ` Sascha Hauer
2026-02-23 15:32 ` (subset) [PATCH 0/4] ARM: MMU: Some cleanup after recent changes Sascha Hauer

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20260223-arm-mmu-v1-2-707d45f6f6e1@pengutronix.de \
    --to=s.hauer@pengutronix.de \
    --cc=barebox@lists.infradead.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox