mail archive of the barebox mailing list
 help / color / mirror / Atom feed
* [PATCH 0/4] ARM: MMU: Some cleanup after recent changes
@ 2026-02-23  8:34 Sascha Hauer
  2026-02-23  8:34 ` [PATCH 1/4] mmu: add MAP_CACHED_RWX mapping type Sascha Hauer
                   ` (4 more replies)
  0 siblings, 5 replies; 8+ messages in thread
From: Sascha Hauer @ 2026-02-23  8:34 UTC (permalink / raw)
  To: BAREBOX; +Cc: Sascha Hauer, Claude Sonnet 4.5

We now setup the MMU for barebox proper in the PBL already. This makes
some quirks we currently do in the PBL unnecessary. Remove them for
good. Also, make mmu_early_enable() safe to be called from board code
if necessary.

Signed-off-by: Sascha Hauer <s.hauer@pengutronix.de>
---
Sascha Hauer (4):
      mmu: add MAP_CACHED_RWX mapping type
      ARM: MMU: drop forced pagewise mapping
      ARM: pbl: MMU: drop unnecessary barebox mapping
      ARM: pbl: MMU: Make it callable multiple times

 arch/arm/cpu/mmu-common.c    |  4 +---
 arch/arm/cpu/mmu-common.h    |  6 +-----
 arch/arm/cpu/mmu_32.c        | 27 ++++++++-------------------
 arch/arm/cpu/mmu_64.c        | 27 +++++++++------------------
 arch/arm/cpu/uncompress.c    |  2 +-
 arch/arm/include/asm/mmu.h   |  2 +-
 arch/riscv/cpu/mmu.c         |  6 +++---
 arch/riscv/include/asm/mmu.h |  4 ----
 include/mmu.h                |  1 +
 9 files changed, 25 insertions(+), 54 deletions(-)
---
base-commit: ffd4db2ea98ea1b8713382ca3d7a8ea7dadca4da
change-id: 20260223-arm-mmu-880be8cbf616

Best regards,
-- 
Sascha Hauer <s.hauer@pengutronix.de>




^ permalink raw reply	[flat|nested] 8+ messages in thread

* [PATCH 1/4] mmu: add MAP_CACHED_RWX mapping type
  2026-02-23  8:34 [PATCH 0/4] ARM: MMU: Some cleanup after recent changes Sascha Hauer
@ 2026-02-23  8:34 ` Sascha Hauer
  2026-02-23  8:34 ` [PATCH 2/4] ARM: MMU: drop forced pagewise mapping Sascha Hauer
                   ` (3 subsequent siblings)
  4 siblings, 0 replies; 8+ messages in thread
From: Sascha Hauer @ 2026-02-23  8:34 UTC (permalink / raw)
  To: BAREBOX; +Cc: Sascha Hauer, Claude Sonnet 4.5

From: Sascha Hauer <sascha@saschahauer.de>

ARCH_MAP_CACHED_RWX seems to be the typical mapping for PBL and is used
by ARM32, ARM64 and RiscV. Drop the ARCH_ prefix and move it to the
generic mapping types.

Signed-off-by: Claude Sonnet 4.5 <noreply@anthropic.com>
---
 arch/arm/cpu/mmu-common.c    |  2 +-
 arch/arm/cpu/mmu-common.h    |  4 +---
 arch/arm/cpu/mmu_32.c        | 10 +++++-----
 arch/arm/cpu/mmu_64.c        |  8 ++++----
 arch/riscv/cpu/mmu.c         |  6 +++---
 arch/riscv/include/asm/mmu.h |  1 -
 include/mmu.h                |  1 +
 7 files changed, 15 insertions(+), 17 deletions(-)

diff --git a/arch/arm/cpu/mmu-common.c b/arch/arm/cpu/mmu-common.c
index 44c39dc048..0300bb9bc6 100644
--- a/arch/arm/cpu/mmu-common.c
+++ b/arch/arm/cpu/mmu-common.c
@@ -21,7 +21,7 @@ const char *map_type_tostr(maptype_t map_type)
 	map_type &= ~ARCH_MAP_FLAG_PAGEWISE;
 
 	switch (map_type) {
-	case ARCH_MAP_CACHED_RWX:	return "RWX";
+	case MAP_CACHED_RWX:		return "RWX";
 	case MAP_CACHED_RO:		return "RO";
 	case MAP_CACHED:		return "CACHED";
 	case MAP_UNCACHED:		return "UNCACHED";
diff --git a/arch/arm/cpu/mmu-common.h b/arch/arm/cpu/mmu-common.h
index b42c421ffd..3a3590ebb5 100644
--- a/arch/arm/cpu/mmu-common.h
+++ b/arch/arm/cpu/mmu-common.h
@@ -11,8 +11,6 @@
 #include <linux/sizes.h>
 #include <linux/bits.h>
 
-#define ARCH_MAP_CACHED_RWX	MAP_ARCH(2)
-
 #define ARCH_MAP_FLAG_PAGEWISE	BIT(31)
 
 struct device;
@@ -32,7 +30,7 @@ static inline maptype_t arm_mmu_maybe_skip_permissions(maptype_t map_type)
 	case MAP_CODE:
 	case MAP_CACHED:
 	case MAP_CACHED_RO:
-		return ARCH_MAP_CACHED_RWX;
+		return MAP_CACHED_RWX;
 	default:
 		return map_type;
 	}
diff --git a/arch/arm/cpu/mmu_32.c b/arch/arm/cpu/mmu_32.c
index 4e569677e1..074fd1b0ed 100644
--- a/arch/arm/cpu/mmu_32.c
+++ b/arch/arm/cpu/mmu_32.c
@@ -302,7 +302,7 @@ static uint32_t get_pte_flags(maptype_t map_type)
 {
 	if (cpu_architecture() >= CPU_ARCH_ARMv7) {
 		switch (map_type & MAP_TYPE_MASK) {
-		case ARCH_MAP_CACHED_RWX:
+		case MAP_CACHED_RWX:
 			return PTE_FLAGS_CACHED_V7_RWX;
 		case MAP_CACHED_RO:
 			return PTE_FLAGS_CACHED_RO_V7;
@@ -323,7 +323,7 @@ static uint32_t get_pte_flags(maptype_t map_type)
 		case MAP_CACHED_RO:
 		case MAP_CODE:
 			return PTE_FLAGS_CACHED_RO_V4;
-		case ARCH_MAP_CACHED_RWX:
+		case MAP_CACHED_RWX:
 		case MAP_CACHED:
 			return PTE_FLAGS_CACHED_V4;
 		case MAP_UNCACHED:
@@ -635,7 +635,7 @@ void mmu_early_enable(unsigned long membase, unsigned long memsize, unsigned lon
 	 * map the bulk of the memory as sections to avoid allocating too many page tables
 	 * at this early stage
 	 */
-	early_remap_range(membase, barebox_start - membase, ARCH_MAP_CACHED_RWX);
+	early_remap_range(membase, barebox_start - membase, MAP_CACHED_RWX);
 	/*
 	 * Map the remainder of the memory explicitly with two level page tables. This is
 	 * the place where barebox proper ends at. In barebox proper we'll remap the code
@@ -646,10 +646,10 @@ void mmu_early_enable(unsigned long membase, unsigned long memsize, unsigned lon
 	 * at the location being remapped.
 	 */
 	early_remap_range(barebox_start, barebox_size,
-			  ARCH_MAP_CACHED_RWX | ARCH_MAP_FLAG_PAGEWISE);
+			  MAP_CACHED_RWX | ARCH_MAP_FLAG_PAGEWISE);
 	early_remap_range(optee_start, OPTEE_SIZE, MAP_UNCACHED);
 	early_remap_range(PAGE_ALIGN_DOWN((uintptr_t)_stext), PAGE_ALIGN(_etext - _stext),
-			  ARCH_MAP_CACHED_RWX);
+			  MAP_CACHED_RWX);
 
 	__mmu_cache_on();
 }
diff --git a/arch/arm/cpu/mmu_64.c b/arch/arm/cpu/mmu_64.c
index 20e185cf5e..2ed39abeb5 100644
--- a/arch/arm/cpu/mmu_64.c
+++ b/arch/arm/cpu/mmu_64.c
@@ -161,7 +161,7 @@ static unsigned long get_pte_attrs(maptype_t map_type)
 		return CACHED_MEM | PTE_BLOCK_RO;
 	case MAP_CACHED_RO:
 		return attrs_xn() | CACHED_MEM | PTE_BLOCK_RO;
-	case ARCH_MAP_CACHED_RWX:
+	case MAP_CACHED_RWX:
 		return CACHED_MEM;
 	default:
 		return ~0UL;
@@ -404,7 +404,7 @@ void mmu_early_enable(unsigned long membase, unsigned long memsize, unsigned lon
 	 */
 	early_init_range(2);
 
-	early_remap_range(membase, memsize, ARCH_MAP_CACHED_RWX);
+	early_remap_range(membase, memsize, MAP_CACHED_RWX);
 
 	/* Default location for OP-TEE: end of DRAM, leave OPTEE_SIZE space for it */
 	optee_membase = membase + memsize - OPTEE_SIZE;
@@ -417,7 +417,7 @@ void mmu_early_enable(unsigned long membase, unsigned long memsize, unsigned lon
 	 * executing code from it
 	 */
 	early_remap_range(barebox_start, barebox_size,
-		     ARCH_MAP_CACHED_RWX | ARCH_MAP_FLAG_PAGEWISE);
+		     MAP_CACHED_RWX | ARCH_MAP_FLAG_PAGEWISE);
 
 	/* OP-TEE might be at location specified in OP-TEE header */
 	optee_get_membase(&optee_membase);
@@ -425,7 +425,7 @@ void mmu_early_enable(unsigned long membase, unsigned long memsize, unsigned lon
 	early_remap_range(optee_membase, OPTEE_SIZE, MAP_FAULT);
 
 	early_remap_range(PAGE_ALIGN_DOWN((uintptr_t)_stext), PAGE_ALIGN(_etext - _stext),
-			  ARCH_MAP_CACHED_RWX);
+			  MAP_CACHED_RWX);
 
 	mmu_enable();
 }
diff --git a/arch/riscv/cpu/mmu.c b/arch/riscv/cpu/mmu.c
index bafd597b69..38120782ab 100644
--- a/arch/riscv/cpu/mmu.c
+++ b/arch/riscv/cpu/mmu.c
@@ -125,14 +125,14 @@ static unsigned long flags_to_pte(maptype_t flags)
 
 	/*
 	 * Map barebox memory types to RISC-V PTE flags:
-	 * - ARCH_MAP_CACHED_RWX: read + write + execute (early boot, full RAM access)
+	 * - MAP_CACHED_RWX: read + write + execute (early boot, full RAM access)
 	 * - MAP_CODE: read + execute (text sections)
 	 * - MAP_CACHED_RO: read only (rodata sections)
 	 * - MAP_CACHED: read + write (data/bss sections)
 	 * - MAP_UNCACHED: read + write, uncached (device memory)
 	 */
 	switch (flags & MAP_TYPE_MASK) {
-	case ARCH_MAP_CACHED_RWX:
+	case MAP_CACHED_RWX:
 		/* Full access for early boot: R + W + X */
 		pte |= PTE_R | PTE_W | PTE_X;
 		break;
@@ -287,7 +287,7 @@ void mmu_early_enable(unsigned long membase, unsigned long memsize,
 	 */
 	pr_debug("Remapping RAM 0x%08lx-0x%08lx as cached RWX...\n", membase, end);
 	for (addr = membase; addr < end; addr += RISCV_L1_SIZE)
-		create_megapage(addr, addr, ARCH_MAP_CACHED_RWX);
+		create_megapage(addr, addr, MAP_CACHED_RWX);
 
 	pr_debug("Page table setup complete, used %lu KB\n",
 		 (early_pt_idx * RISCV_PGSIZE) / 1024);
diff --git a/arch/riscv/include/asm/mmu.h b/arch/riscv/include/asm/mmu.h
index ba7068c493..98af92cc17 100644
--- a/arch/riscv/include/asm/mmu.h
+++ b/arch/riscv/include/asm/mmu.h
@@ -16,7 +16,6 @@
 #define MAP_ARCH_DEFAULT MAP_CACHED
 
 /* Architecture-specific memory type flags */
-#define ARCH_MAP_CACHED_RWX		MAP_ARCH(2)	/* Cached, RWX (early boot) */
 #define ARCH_MAP_FLAG_PAGEWISE		(1 << 16)	/* Force page-wise mapping */
 
 /*
diff --git a/include/mmu.h b/include/mmu.h
index 9f582f25e1..d0143f360a 100644
--- a/include/mmu.h
+++ b/include/mmu.h
@@ -10,6 +10,7 @@
 #define MAP_FAULT		2
 #define MAP_CODE		3
 #define MAP_CACHED_RO		4
+#define MAP_CACHED_RWX		6
 
 #ifdef CONFIG_ARCH_HAS_DMA_WRITE_COMBINE
 #define MAP_WRITECOMBINE	5

-- 
2.47.3




^ permalink raw reply	[flat|nested] 8+ messages in thread

* [PATCH 2/4] ARM: MMU: drop forced pagewise mapping
  2026-02-23  8:34 [PATCH 0/4] ARM: MMU: Some cleanup after recent changes Sascha Hauer
  2026-02-23  8:34 ` [PATCH 1/4] mmu: add MAP_CACHED_RWX mapping type Sascha Hauer
@ 2026-02-23  8:34 ` Sascha Hauer
  2026-02-23  8:34 ` [PATCH 3/4] ARM: pbl: MMU: drop unnecessary barebox mapping Sascha Hauer
                   ` (2 subsequent siblings)
  4 siblings, 0 replies; 8+ messages in thread
From: Sascha Hauer @ 2026-02-23  8:34 UTC (permalink / raw)
  To: BAREBOX

We used to force pagewise mapping the the PBL because we couldn't break
a section into pages later when barebox is running from that area. We
now do the MMU setup for the barebox regions entirely in the PBL, so we
won't have to touch that again which makes the forced pagewise mapping
unnecessary. Remove it.

Signed-off-by: Sascha Hauer <s.hauer@pengutronix.de>
---
 arch/arm/cpu/mmu-common.c    |  2 --
 arch/arm/cpu/mmu-common.h    |  2 --
 arch/arm/cpu/mmu_32.c        | 15 ++-------------
 arch/arm/cpu/mmu_64.c        | 11 ++---------
 arch/riscv/include/asm/mmu.h |  3 ---
 5 files changed, 4 insertions(+), 29 deletions(-)

diff --git a/arch/arm/cpu/mmu-common.c b/arch/arm/cpu/mmu-common.c
index 0300bb9bc6..b84485a276 100644
--- a/arch/arm/cpu/mmu-common.c
+++ b/arch/arm/cpu/mmu-common.c
@@ -18,8 +18,6 @@
 
 const char *map_type_tostr(maptype_t map_type)
 {
-	map_type &= ~ARCH_MAP_FLAG_PAGEWISE;
-
 	switch (map_type) {
 	case MAP_CACHED_RWX:		return "RWX";
 	case MAP_CACHED_RO:		return "RO";
diff --git a/arch/arm/cpu/mmu-common.h b/arch/arm/cpu/mmu-common.h
index 3a3590ebb5..59abc1d9c8 100644
--- a/arch/arm/cpu/mmu-common.h
+++ b/arch/arm/cpu/mmu-common.h
@@ -11,8 +11,6 @@
 #include <linux/sizes.h>
 #include <linux/bits.h>
 
-#define ARCH_MAP_FLAG_PAGEWISE	BIT(31)
-
 struct device;
 
 void dma_inv_range(void *ptr, size_t size);
diff --git a/arch/arm/cpu/mmu_32.c b/arch/arm/cpu/mmu_32.c
index 074fd1b0ed..a5ac9a3ff9 100644
--- a/arch/arm/cpu/mmu_32.c
+++ b/arch/arm/cpu/mmu_32.c
@@ -344,7 +344,6 @@ static uint32_t get_pmd_flags(maptype_t map_type)
 static void __arch_remap_range(void *_virt_addr, phys_addr_t phys_addr, size_t size,
 			       maptype_t map_type)
 {
-	bool force_pages = map_type & ARCH_MAP_FLAG_PAGEWISE;
 	bool mmu_on;
 	u32 virt_addr = (u32)_virt_addr;
 	u32 pte_flags, pmd_flags;
@@ -372,7 +371,7 @@ static void __arch_remap_range(void *_virt_addr, phys_addr_t phys_addr, size_t s
 
 		if (size >= PGDIR_SIZE && pgdir_size_aligned &&
 		    IS_ALIGNED(phys_addr, PGDIR_SIZE) &&
-		    !pgd_type_table(*pgd) && !force_pages) {
+		    !pgd_type_table(*pgd)) {
 			/*
 			 * TODO: Add code to discard a page table and
 			 * replace it with a section
@@ -636,17 +635,7 @@ void mmu_early_enable(unsigned long membase, unsigned long memsize, unsigned lon
 	 * at this early stage
 	 */
 	early_remap_range(membase, barebox_start - membase, MAP_CACHED_RWX);
-	/*
-	 * Map the remainder of the memory explicitly with two level page tables. This is
-	 * the place where barebox proper ends at. In barebox proper we'll remap the code
-	 * segments readonly/executable and the ro segments readonly/execute never. For this
-	 * we need the memory being mapped pagewise. We can't do the split up from section
-	 * wise mapping to pagewise mapping later because that would require us to do
-	 * a break-before-make sequence which we can't do when barebox proper is running
-	 * at the location being remapped.
-	 */
-	early_remap_range(barebox_start, barebox_size,
-			  MAP_CACHED_RWX | ARCH_MAP_FLAG_PAGEWISE);
+	early_remap_range(barebox_start, barebox_size, MAP_CACHED_RWX);
 	early_remap_range(optee_start, OPTEE_SIZE, MAP_UNCACHED);
 	early_remap_range(PAGE_ALIGN_DOWN((uintptr_t)_stext), PAGE_ALIGN(_etext - _stext),
 			  MAP_CACHED_RWX);
diff --git a/arch/arm/cpu/mmu_64.c b/arch/arm/cpu/mmu_64.c
index 2ed39abeb5..69d4b89dd8 100644
--- a/arch/arm/cpu/mmu_64.c
+++ b/arch/arm/cpu/mmu_64.c
@@ -195,7 +195,6 @@ static void split_block(uint64_t *pte, int level, bool bbm)
 static int __arch_remap_range(uint64_t virt, uint64_t phys, uint64_t size,
 			      maptype_t map_type, bool bbm)
 {
-	bool force_pages = map_type & ARCH_MAP_FLAG_PAGEWISE;
 	unsigned long attr = get_pte_attrs(map_type);
 	uint64_t *ttb = get_ttb();
 	uint64_t block_size;
@@ -237,7 +236,7 @@ static int __arch_remap_range(uint64_t virt, uint64_t phys, uint64_t size,
 				        IS_ALIGNED(addr, block_size) &&
 				        IS_ALIGNED(phys, block_size);
 
-			if ((force_pages && level == 3) || (!force_pages && block_aligned)) {
+			if (block_aligned) {
 				type = (level == 3) ?
 					PTE_TYPE_PAGE : PTE_TYPE_BLOCK;
 
@@ -411,13 +410,7 @@ void mmu_early_enable(unsigned long membase, unsigned long memsize, unsigned lon
 
 	barebox_size = optee_membase - barebox_start;
 
-	/*
-	 * map barebox area using pagewise mapping. We want to modify the XN/RO
-	 * attributes later, but can't switch from sections to pages later when
-	 * executing code from it
-	 */
-	early_remap_range(barebox_start, barebox_size,
-		     MAP_CACHED_RWX | ARCH_MAP_FLAG_PAGEWISE);
+	early_remap_range(barebox_start, barebox_size, MAP_CACHED_RWX);
 
 	/* OP-TEE might be at location specified in OP-TEE header */
 	optee_get_membase(&optee_membase);
diff --git a/arch/riscv/include/asm/mmu.h b/arch/riscv/include/asm/mmu.h
index 98af92cc17..cdc599bd51 100644
--- a/arch/riscv/include/asm/mmu.h
+++ b/arch/riscv/include/asm/mmu.h
@@ -15,9 +15,6 @@
 #define ARCH_HAS_REMAP
 #define MAP_ARCH_DEFAULT MAP_CACHED
 
-/* Architecture-specific memory type flags */
-#define ARCH_MAP_FLAG_PAGEWISE		(1 << 16)	/* Force page-wise mapping */
-
 /*
  * Remap a virtual address range with specified memory type (barebox proper).
  * Used by the generic remap infrastructure after barebox is fully relocated.

-- 
2.47.3




^ permalink raw reply	[flat|nested] 8+ messages in thread

* [PATCH 3/4] ARM: pbl: MMU: drop unnecessary barebox mapping
  2026-02-23  8:34 [PATCH 0/4] ARM: MMU: Some cleanup after recent changes Sascha Hauer
  2026-02-23  8:34 ` [PATCH 1/4] mmu: add MAP_CACHED_RWX mapping type Sascha Hauer
  2026-02-23  8:34 ` [PATCH 2/4] ARM: MMU: drop forced pagewise mapping Sascha Hauer
@ 2026-02-23  8:34 ` Sascha Hauer
  2026-02-23  8:34 ` [PATCH 4/4] ARM: pbl: MMU: Make it callable multiple times Sascha Hauer
  2026-02-23 15:32 ` (subset) [PATCH 0/4] ARM: MMU: Some cleanup after recent changes Sascha Hauer
  4 siblings, 0 replies; 8+ messages in thread
From: Sascha Hauer @ 2026-02-23  8:34 UTC (permalink / raw)
  To: BAREBOX

mmu_early_enable() sets up a mapping for the barebox proper regions.
This is no longer necessary as they are configured from the ELF binary
afterwards anyway, so drop it and remove the unnecessary arguments.

Signed-off-by: Sascha Hauer <s.hauer@pengutronix.de>
---
 arch/arm/cpu/mmu_32.c      | 4 ++--
 arch/arm/cpu/mmu_64.c      | 7 +------
 arch/arm/cpu/uncompress.c  | 2 +-
 arch/arm/include/asm/mmu.h | 2 +-
 4 files changed, 5 insertions(+), 10 deletions(-)

diff --git a/arch/arm/cpu/mmu_32.c b/arch/arm/cpu/mmu_32.c
index a5ac9a3ff9..cdd4d07826 100644
--- a/arch/arm/cpu/mmu_32.c
+++ b/arch/arm/cpu/mmu_32.c
@@ -606,10 +606,10 @@ void mmu_disable(void)
 	__mmu_cache_off();
 }
 
-void mmu_early_enable(unsigned long membase, unsigned long memsize, unsigned long barebox_start)
+void mmu_early_enable(unsigned long membase, unsigned long memsize)
 {
 	uint32_t *ttb = (uint32_t *)arm_mem_ttb(membase + memsize);
-	unsigned long barebox_size, optee_start;
+	unsigned long optee_start;
 
 	pr_debug("enabling MMU, ttb @ 0x%p\n", ttb);
 
diff --git a/arch/arm/cpu/mmu_64.c b/arch/arm/cpu/mmu_64.c
index 69d4b89dd8..7f38473079 100644
--- a/arch/arm/cpu/mmu_64.c
+++ b/arch/arm/cpu/mmu_64.c
@@ -378,11 +378,10 @@ static void early_init_range(size_t total_level0_tables)
 	}
 }
 
-void mmu_early_enable(unsigned long membase, unsigned long memsize, unsigned long barebox_start)
+void mmu_early_enable(unsigned long membase, unsigned long memsize)
 {
 	int el;
 	u64 optee_membase;
-	unsigned long barebox_size;
 	unsigned long ttb = arm_mem_ttb(membase + memsize);
 
 	if (get_cr() & CR_M)
@@ -408,10 +407,6 @@ void mmu_early_enable(unsigned long membase, unsigned long memsize, unsigned lon
 	/* Default location for OP-TEE: end of DRAM, leave OPTEE_SIZE space for it */
 	optee_membase = membase + memsize - OPTEE_SIZE;
 
-	barebox_size = optee_membase - barebox_start;
-
-	early_remap_range(barebox_start, barebox_size, MAP_CACHED_RWX);
-
 	/* OP-TEE might be at location specified in OP-TEE header */
 	optee_get_membase(&optee_membase);
 
diff --git a/arch/arm/cpu/uncompress.c b/arch/arm/cpu/uncompress.c
index dffdd2c812..38f7dbc113 100644
--- a/arch/arm/cpu/uncompress.c
+++ b/arch/arm/cpu/uncompress.c
@@ -85,7 +85,7 @@ void __noreturn barebox_pbl_start(unsigned long membase, unsigned long memsize,
 	print_pbl_mem_layout(membase, endmem, barebox_base);
 #endif
 	if (IS_ENABLED(CONFIG_MMU))
-		mmu_early_enable(membase, memsize, barebox_base);
+		mmu_early_enable(membase, memsize);
 	else if (IS_ENABLED(CONFIG_ARMV7R_MPU))
 		set_cr(get_cr() | CR_C);
 
diff --git a/arch/arm/include/asm/mmu.h b/arch/arm/include/asm/mmu.h
index bcaa984a40..ce050babab 100644
--- a/arch/arm/include/asm/mmu.h
+++ b/arch/arm/include/asm/mmu.h
@@ -69,6 +69,6 @@ void __dma_clean_range(unsigned long, unsigned long);
 void __dma_flush_range(unsigned long, unsigned long);
 void __dma_inv_range(unsigned long, unsigned long);
 
-void mmu_early_enable(unsigned long membase, unsigned long memsize, unsigned long barebox_base);
+void mmu_early_enable(unsigned long membase, unsigned long memsize);
 
 #endif /* __ASM_MMU_H */

-- 
2.47.3




^ permalink raw reply	[flat|nested] 8+ messages in thread

* [PATCH 4/4] ARM: pbl: MMU: Make it callable multiple times
  2026-02-23  8:34 [PATCH 0/4] ARM: MMU: Some cleanup after recent changes Sascha Hauer
                   ` (2 preceding siblings ...)
  2026-02-23  8:34 ` [PATCH 3/4] ARM: pbl: MMU: drop unnecessary barebox mapping Sascha Hauer
@ 2026-02-23  8:34 ` Sascha Hauer
  2026-02-23  8:41   ` Ahmad Fatoum
  2026-02-23 15:32 ` (subset) [PATCH 0/4] ARM: MMU: Some cleanup after recent changes Sascha Hauer
  4 siblings, 1 reply; 8+ messages in thread
From: Sascha Hauer @ 2026-02-23  8:34 UTC (permalink / raw)
  To: BAREBOX

mmu_early_enable() is called in barebox_pbl_start() after a call to
setup_c(). setup_c() clears the BSS which means alloc_pte() starts at
index zero again when mmu_early_enable() has been called earlier
already.

Some boards might want to do exactly that: Call mmu_early_enable()
earlier to do for example otherwise expensive hashing of xloaded
binaries.

Make mmu_early_enable() safe to be called multiple times by moving the
PTE index out of the BSS.

Signed-off-by: Sascha Hauer <s.hauer@pengutronix.de>
---
 arch/arm/cpu/mmu_64.c | 5 ++++-
 1 file changed, 4 insertions(+), 1 deletion(-)

diff --git a/arch/arm/cpu/mmu_64.c b/arch/arm/cpu/mmu_64.c
index 7f38473079..2f451b3c91 100644
--- a/arch/arm/cpu/mmu_64.c
+++ b/arch/arm/cpu/mmu_64.c
@@ -86,7 +86,10 @@ static void set_pte_range(unsigned level, uint64_t *virt, phys_addr_t phys,
 #ifdef __PBL__
 static uint64_t *alloc_pte(void)
 {
-	static unsigned int idx;
+	static int idx = -1;
+
+	if (idx == -1)
+		idx = 0;
 
 	idx++;
 

-- 
2.47.3




^ permalink raw reply	[flat|nested] 8+ messages in thread

* Re: [PATCH 4/4] ARM: pbl: MMU: Make it callable multiple times
  2026-02-23  8:34 ` [PATCH 4/4] ARM: pbl: MMU: Make it callable multiple times Sascha Hauer
@ 2026-02-23  8:41   ` Ahmad Fatoum
  2026-02-23  9:10     ` Sascha Hauer
  0 siblings, 1 reply; 8+ messages in thread
From: Ahmad Fatoum @ 2026-02-23  8:41 UTC (permalink / raw)
  To: Sascha Hauer, BAREBOX

Hello Sascha,

On 2/23/26 9:34 AM, Sascha Hauer wrote:
> mmu_early_enable() is called in barebox_pbl_start() after a call to
> setup_c(). setup_c() clears the BSS which means alloc_pte() starts at
> index zero again when mmu_early_enable() has been called earlier
> already.
> 
> Some boards might want to do exactly that: Call mmu_early_enable()
> earlier to do for example otherwise expensive hashing of xloaded
> binaries.
> 
> Make mmu_early_enable() safe to be called multiple times by moving the
> PTE index out of the BSS.

How about making setup_c() safe for calling multiple times by having it
check and modify a variable in the .data section?

Would that break anything?

Cheers,
Ahmad

> 
> Signed-off-by: Sascha Hauer <s.hauer@pengutronix.de>
> ---
>  arch/arm/cpu/mmu_64.c | 5 ++++-
>  1 file changed, 4 insertions(+), 1 deletion(-)
> 
> diff --git a/arch/arm/cpu/mmu_64.c b/arch/arm/cpu/mmu_64.c
> index 7f38473079..2f451b3c91 100644
> --- a/arch/arm/cpu/mmu_64.c
> +++ b/arch/arm/cpu/mmu_64.c
> @@ -86,7 +86,10 @@ static void set_pte_range(unsigned level, uint64_t *virt, phys_addr_t phys,
>  #ifdef __PBL__
>  static uint64_t *alloc_pte(void)
>  {
> -	static unsigned int idx;
> +	static int idx = -1;
> +
> +	if (idx == -1)
> +		idx = 0;
>  
>  	idx++;
>  
> 

-- 
Pengutronix e.K.                  |                             |
Steuerwalder Str. 21              | http://www.pengutronix.de/  |
31137 Hildesheim, Germany         | Phone: +49-5121-206917-0    |
Amtsgericht Hildesheim, HRA 2686  | Fax:   +49-5121-206917-5555 |




^ permalink raw reply	[flat|nested] 8+ messages in thread

* Re: [PATCH 4/4] ARM: pbl: MMU: Make it callable multiple times
  2026-02-23  8:41   ` Ahmad Fatoum
@ 2026-02-23  9:10     ` Sascha Hauer
  0 siblings, 0 replies; 8+ messages in thread
From: Sascha Hauer @ 2026-02-23  9:10 UTC (permalink / raw)
  To: Ahmad Fatoum; +Cc: BAREBOX

On Mon, Feb 23, 2026 at 09:41:23AM +0100, Ahmad Fatoum wrote:
> Hello Sascha,
> 
> On 2/23/26 9:34 AM, Sascha Hauer wrote:
> > mmu_early_enable() is called in barebox_pbl_start() after a call to
> > setup_c(). setup_c() clears the BSS which means alloc_pte() starts at
> > index zero again when mmu_early_enable() has been called earlier
> > already.
> > 
> > Some boards might want to do exactly that: Call mmu_early_enable()
> > earlier to do for example otherwise expensive hashing of xloaded
> > binaries.
> > 
> > Make mmu_early_enable() safe to be called multiple times by moving the
> > PTE index out of the BSS.
> 
> How about making setup_c() safe for calling multiple times by having it
> check and modify a variable in the .data section?

Good idea.

> 
> Would that break anything?

I don't know. We might have code that relies on exactly the current
behaviour, but I am not aware of any.

Sascha

> 
> Cheers,
> Ahmad
> 
> > 
> > Signed-off-by: Sascha Hauer <s.hauer@pengutronix.de>
> > ---
> >  arch/arm/cpu/mmu_64.c | 5 ++++-
> >  1 file changed, 4 insertions(+), 1 deletion(-)
> > 
> > diff --git a/arch/arm/cpu/mmu_64.c b/arch/arm/cpu/mmu_64.c
> > index 7f38473079..2f451b3c91 100644
> > --- a/arch/arm/cpu/mmu_64.c
> > +++ b/arch/arm/cpu/mmu_64.c
> > @@ -86,7 +86,10 @@ static void set_pte_range(unsigned level, uint64_t *virt, phys_addr_t phys,
> >  #ifdef __PBL__
> >  static uint64_t *alloc_pte(void)
> >  {
> > -	static unsigned int idx;
> > +	static int idx = -1;
> > +
> > +	if (idx == -1)
> > +		idx = 0;
> >  
> >  	idx++;
> >  
> > 
> 
> -- 
> Pengutronix e.K.                  |                             |
> Steuerwalder Str. 21              | http://www.pengutronix.de/  |
> 31137 Hildesheim, Germany         | Phone: +49-5121-206917-0    |
> Amtsgericht Hildesheim, HRA 2686  | Fax:   +49-5121-206917-5555 |
> 
> 

-- 
Pengutronix e.K.                           |                             |
Steuerwalder Str. 21                       | http://www.pengutronix.de/  |
31137 Hildesheim, Germany                  | Phone: +49-5121-206917-0    |
Amtsgericht Hildesheim, HRA 2686           | Fax:   +49-5121-206917-5555 |



^ permalink raw reply	[flat|nested] 8+ messages in thread

* Re: (subset) [PATCH 0/4] ARM: MMU: Some cleanup after recent changes
  2026-02-23  8:34 [PATCH 0/4] ARM: MMU: Some cleanup after recent changes Sascha Hauer
                   ` (3 preceding siblings ...)
  2026-02-23  8:34 ` [PATCH 4/4] ARM: pbl: MMU: Make it callable multiple times Sascha Hauer
@ 2026-02-23 15:32 ` Sascha Hauer
  4 siblings, 0 replies; 8+ messages in thread
From: Sascha Hauer @ 2026-02-23 15:32 UTC (permalink / raw)
  To: BAREBOX, Sascha Hauer; +Cc: Sascha Hauer, Claude Sonnet 4.5


On Mon, 23 Feb 2026 09:34:05 +0100, Sascha Hauer wrote:
> We now setup the MMU for barebox proper in the PBL already. This makes
> some quirks we currently do in the PBL unnecessary. Remove them for
> good. Also, make mmu_early_enable() safe to be called from board code
> if necessary.
> 
> 

Applied, thanks!

[1/4] mmu: add MAP_CACHED_RWX mapping type
      https://git.pengutronix.de/cgit/barebox/commit/?id=06102b02b21a (link may not be stable)
[2/4] ARM: MMU: drop forced pagewise mapping
      https://git.pengutronix.de/cgit/barebox/commit/?id=72348878f2ab (link may not be stable)
[3/4] ARM: pbl: MMU: drop unnecessary barebox mapping
      https://git.pengutronix.de/cgit/barebox/commit/?id=ddb374ff808f (link may not be stable)

Best regards,
-- 
Sascha Hauer <s.hauer@pengutronix.de>




^ permalink raw reply	[flat|nested] 8+ messages in thread

end of thread, other threads:[~2026-02-23 15:32 UTC | newest]

Thread overview: 8+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2026-02-23  8:34 [PATCH 0/4] ARM: MMU: Some cleanup after recent changes Sascha Hauer
2026-02-23  8:34 ` [PATCH 1/4] mmu: add MAP_CACHED_RWX mapping type Sascha Hauer
2026-02-23  8:34 ` [PATCH 2/4] ARM: MMU: drop forced pagewise mapping Sascha Hauer
2026-02-23  8:34 ` [PATCH 3/4] ARM: pbl: MMU: drop unnecessary barebox mapping Sascha Hauer
2026-02-23  8:34 ` [PATCH 4/4] ARM: pbl: MMU: Make it callable multiple times Sascha Hauer
2026-02-23  8:41   ` Ahmad Fatoum
2026-02-23  9:10     ` Sascha Hauer
2026-02-23 15:32 ` (subset) [PATCH 0/4] ARM: MMU: Some cleanup after recent changes Sascha Hauer

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox