From: Sascha Hauer <s.hauer@pengutronix.de>
To: BAREBOX <barebox@lists.infradead.org>
Cc: "Claude Sonnet 4.5" <noreply@anthropic.com>
Subject: [PATCH 16/19] ARM: PBL: setup MMU with proper permissions from ELF segments
Date: Mon, 05 Jan 2026 12:26:57 +0100 [thread overview]
Message-ID: <20260105-pbl-load-elf-v1-16-e97853f98232@pengutronix.de> (raw)
In-Reply-To: <20260105-pbl-load-elf-v1-0-e97853f98232@pengutronix.de>
Move complete MMU setup into PBL by leveraging ELF segment information
to apply correct memory permissions before jumping to barebox proper.
After ELF relocation, parse PT_LOAD segments and map each with
permissions derived from p_flags:
- Text segments (PF_R|PF_X): Read-only + executable (MAP_CODE)
- Data segments (PF_R|PF_W): Read-write (MAP_CACHED)
- RO data segments (PF_R): Read-only (ARCH_MAP_CACHED_RO)
This ensures barebox proper starts with full W^X protection already
in place, eliminating the need for complex remapping in barebox proper.
The mmu_init() function now only sets up trap pages for exception
handling.
The framework is portable - common ELF parsing in pbl/mmu.c uses
architecture-specific early_remap_range() exported from mmu_*.c.
🤖 Generated with [Claude Code](https://claude.com/claude-code)
Co-Authored-By: Claude Sonnet 4.5 <noreply@anthropic.com>
Signed-off-by: Sascha Hauer <s.hauer@pengutronix.de>
---
arch/arm/cpu/mmu-common.c | 64 ++------------------------
arch/arm/cpu/uncompress.c | 14 ++++++
include/pbl/mmu.h | 29 ++++++++++++
pbl/Makefile | 1 +
pbl/mmu.c | 111 ++++++++++++++++++++++++++++++++++++++++++++++
5 files changed, 158 insertions(+), 61 deletions(-)
diff --git a/arch/arm/cpu/mmu-common.c b/arch/arm/cpu/mmu-common.c
index 3208139fdd24e89cf4c76e27477da23da169f164..3053abd2c7907baccc7f5686dd85de76591ad118 100644
--- a/arch/arm/cpu/mmu-common.c
+++ b/arch/arm/cpu/mmu-common.c
@@ -96,72 +96,14 @@ void zero_page_faulting(void)
remap_range(0x0, PAGE_SIZE, MAP_FAULT);
}
-/**
- * remap_range_end - remap a range identified by [start, end)
- *
- * @start: start of the range
- * @end: end of the first range (exclusive)
- * @map_type: mapping type to apply
- */
-static inline void remap_range_end(unsigned long start, unsigned long end,
- unsigned map_type)
-{
- remap_range((void *)start, end - start, map_type);
-}
-
-static inline void remap_range_end_sans_text(unsigned long start, unsigned long end,
- unsigned map_type)
-{
- unsigned long text_start = (unsigned long)&_stext;
- unsigned long text_end = (unsigned long)&_etext;
-
- if (region_overlap_end_exclusive(start, end, text_start, text_end)) {
- remap_range_end(start, text_start, MAP_CACHED);
- /* skip barebox segments here, will be mapped later */
- start = text_end;
- }
-
- remap_range_end(start, end, MAP_CACHED);
-}
-
static void mmu_remap_memory_banks(void)
{
- struct memory_bank *bank;
- unsigned long code_start = (unsigned long)&_stext;
- unsigned long code_size = (unsigned long)&__start_rodata - (unsigned long)&_stext;
- unsigned long rodata_start = (unsigned long)&__start_rodata;
- unsigned long rodata_size = (unsigned long)&__end_rodata - rodata_start;
-
/*
- * Early mmu init will have mapped everything but the initial memory area
- * (excluding final OPTEE_SIZE bytes) uncached. We have now discovered
- * all memory banks, so let's map all pages, excluding reserved memory areas
- * and barebox text area cacheable.
- *
- * This code will become much less complex once we switch over to using
- * CONFIG_MEMORY_ATTRIBUTES for MMU as well.
+ * PBL has already set up the MMU with proper permissions based on
+ * ELF segment information. We only need to set up trap pages for
+ * exception handling.
*/
- for_each_memory_bank(bank) {
- struct resource *rsv;
- resource_size_t pos;
-
- pos = bank->start;
-
- /* Skip reserved regions */
- for_each_reserved_region(bank, rsv) {
- if (pos != rsv->start)
- remap_range_end_sans_text(pos, rsv->start, MAP_CACHED);
- pos = rsv->end + 1;
- }
-
- remap_range_end_sans_text(pos, bank->start + bank->size, MAP_CACHED);
- }
-
- /* Do this while interrupt vectors are still writable */
setup_trap_pages();
-
- remap_range((void *)code_start, code_size, MAP_CODE);
- remap_range((void *)rodata_start, rodata_size, MAP_CACHED_RO);
}
static int mmu_init(void)
diff --git a/arch/arm/cpu/uncompress.c b/arch/arm/cpu/uncompress.c
index ccc3c5ae3ba60e990ee73715a49a316e2a14c44e..05f2efd48eeca58a820ac7fa4d8c6d8d3b763344 100644
--- a/arch/arm/cpu/uncompress.c
+++ b/arch/arm/cpu/uncompress.c
@@ -21,6 +21,7 @@
#include <asm/unaligned.h>
#include <compressed-dtb.h>
#include <elf.h>
+#include <pbl/mmu.h>
#include <debug_ll.h>
@@ -110,6 +111,19 @@ void __noreturn barebox_pbl_start(unsigned long membase, unsigned long memsize,
pr_debug("ELF entry point: 0x%llx\n", elf.entry);
+ /*
+ * Now that the ELF image is relocated, we know the exact addresses
+ * of all segments. Set up MMU with proper permissions based on
+ * ELF segment flags (PF_R/W/X).
+ */
+ if (IS_ENABLED(CONFIG_MMU)) {
+ ret = pbl_mmu_setup_from_elf(&elf, membase, memsize);
+ if (ret) {
+ pr_err("Failed to setup MMU from ELF: %d\n", ret);
+ hang();
+ }
+ }
+
if (IS_ENABLED(CONFIG_THUMB2_BAREBOX))
barebox = (void *)(unsigned long)(elf.entry | 1);
else
diff --git a/include/pbl/mmu.h b/include/pbl/mmu.h
new file mode 100644
index 0000000000000000000000000000000000000000..4a00d8e528ab5452981347185c9114235f213e2b
--- /dev/null
+++ b/include/pbl/mmu.h
@@ -0,0 +1,29 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+#ifndef __PBL_MMU_H
+#define __PBL_MMU_H
+
+#include <linux/types.h>
+
+struct elf_image;
+
+/**
+ * pbl_mmu_setup_from_elf() - Configure MMU using ELF segment information
+ * @elf: ELF image structure from elf_open_binary_into()
+ * @membase: Base address of RAM
+ * @memsize: Size of RAM
+ *
+ * This function sets up the MMU with proper permissions based on ELF
+ * segment flags. It should be called after elf_load_inplace() has
+ * relocated the barebox image.
+ *
+ * Segment permissions are mapped as follows:
+ * PF_R | PF_X -> Read-only + executable (text)
+ * PF_R | PF_W -> Read-write (data, bss)
+ * PF_R -> Read-only (rodata)
+ *
+ * Return: 0 on success, negative error code on failure
+ */
+int pbl_mmu_setup_from_elf(struct elf_image *elf, unsigned long membase,
+ unsigned long memsize);
+
+#endif /* __PBL_MMU_H */
diff --git a/pbl/Makefile b/pbl/Makefile
index f66391be7b2898388425657f54afcd6e4c72e3db..b78124cdcd2a4690be11d5503006723252b4904f 100644
--- a/pbl/Makefile
+++ b/pbl/Makefile
@@ -9,3 +9,4 @@ pbl-$(CONFIG_HAVE_IMAGE_COMPRESSION) += decomp.o
pbl-$(CONFIG_LIBFDT) += fdt.o
pbl-$(CONFIG_PBL_CONSOLE) += console.o
obj-pbl-y += handoff-data.o
+obj-pbl-$(CONFIG_MMU) += mmu.o
diff --git a/pbl/mmu.c b/pbl/mmu.c
new file mode 100644
index 0000000000000000000000000000000000000000..7a8f254a7bd67eccaab715832930c5d4134eb288
--- /dev/null
+++ b/pbl/mmu.c
@@ -0,0 +1,111 @@
+// SPDX-License-Identifier: GPL-2.0-only
+// SPDX-FileCopyrightText: 2025 Sascha Hauer <s.hauer@pengutronix.de>, Pengutronix
+
+#define pr_fmt(fmt) "pbl-mmu: " fmt
+
+#include <common.h>
+#include <elf.h>
+#include <mmu.h>
+#include <pbl/mmu.h>
+#include <asm/mmu.h>
+#include <linux/bits.h>
+#include <linux/sizes.h>
+
+/*
+ * Map ELF segment permissions (p_flags) to architecture MMU flags
+ */
+static unsigned int elf_flags_to_mmu_flags(u32 p_flags)
+{
+ bool readable = p_flags & PF_R;
+ bool writable = p_flags & PF_W;
+ bool executable = p_flags & PF_X;
+
+ if (readable && writable) {
+ /* Data, BSS: Read-write, cached, non-executable */
+ return MAP_CACHED;
+ } else if (readable && executable) {
+ /* Text: Read-only, cached, executable */
+ return MAP_CODE;
+ } else if (readable) {
+ /* Read-only data: Read-only, cached, non-executable */
+ return MAP_CACHED_RO;
+ } else {
+ /*
+ * Unusual: segment with no read permission.
+ * Map as uncached, non-executable for safety.
+ */
+ pr_warn("Segment with unusual permissions: flags=0x%x\n", p_flags);
+ return MAP_UNCACHED;
+ }
+}
+
+int pbl_mmu_setup_from_elf(struct elf_image *elf, unsigned long membase,
+ unsigned long memsize)
+{
+ void *phdr;
+ int i;
+ int phnum = elf_hdr_e_phnum(elf, elf->hdr_buf);
+ size_t phoff = elf_hdr_e_phoff(elf, elf->hdr_buf);
+ size_t phentsize = elf_size_of_phdr(elf);
+
+ pr_debug("Setting up MMU from ELF segments\n");
+ pr_debug("ELF entry point: 0x%llx\n", elf->entry);
+ pr_debug("ELF loaded at: 0x%p - 0x%p\n", elf->low_addr, elf->high_addr);
+
+ /*
+ * Iterate through all PT_LOAD segments and set up MMU permissions
+ * based on the segment's p_flags
+ */
+ for (i = 0; i < phnum; i++) {
+ phdr = elf->hdr_buf + phoff + i * phentsize;
+
+ if (elf_phdr_p_type(elf, phdr) != PT_LOAD)
+ continue;
+
+ u64 p_vaddr = elf_phdr_p_vaddr(elf, phdr);
+ u64 p_memsz = elf_phdr_p_memsz(elf, phdr);
+ u32 p_flags = elf_phdr_p_flags(elf, phdr);
+
+ /*
+ * Calculate actual address after relocation.
+ * For ET_EXEC: reloc_offset is 0, use p_vaddr directly
+ * For ET_DYN: reloc_offset adjusts virtual to actual address
+ */
+ unsigned long addr = p_vaddr + elf->reloc_offset;
+ unsigned long size = p_memsz;
+ unsigned long segment_end = addr + size;
+
+ /* Validate segment is within available memory */
+ if (segment_end < addr || /* overflow check */
+ addr < membase ||
+ segment_end > membase + memsize) {
+ pr_err("Segment %d outside memory bounds\n", i);
+ return -EINVAL;
+ }
+
+ /* Validate alignment - warn and round if needed */
+ if (!IS_ALIGNED(addr, SZ_4K) || !IS_ALIGNED(size, SZ_4K)) {
+ pr_warn("Segment %d not page-aligned, rounding\n", i);
+ size = ALIGN(size, SZ_4K);
+ }
+
+ unsigned int mmu_flags = elf_flags_to_mmu_flags(p_flags);
+
+ pr_debug("Segment %d: addr=0x%08lx size=0x%08lx flags=0x%x [%c%c%c] -> mmu_flags=0x%x\n",
+ i, addr, size, p_flags,
+ (p_flags & PF_R) ? 'R' : '-',
+ (p_flags & PF_W) ? 'W' : '-',
+ (p_flags & PF_X) ? 'X' : '-',
+ mmu_flags);
+
+ /*
+ * Remap this segment with proper permissions.
+ * Use page-wise mapping to allow different permissions for
+ * different segments even if they're nearby.
+ */
+ pbl_remap_range((void *)addr, addr, size, mmu_flags);
+ }
+
+ pr_debug("MMU setup from ELF complete\n");
+ return 0;
+}
--
2.47.3
next prev parent reply other threads:[~2026-01-05 11:32 UTC|newest]
Thread overview: 49+ messages / expand[flat|nested] mbox.gz Atom feed top
2026-01-05 11:26 [PATCH 00/19] PBL: Add PBL ELF loading support with dynamic relocations Sascha Hauer
2026-01-05 11:26 ` [PATCH 01/19] elf: Use memcmp to make suitable for PBL Sascha Hauer
2026-01-05 11:46 ` Ahmad Fatoum
2026-01-05 11:26 ` [PATCH 02/19] elf: build for PBL as well Sascha Hauer
2026-01-05 11:26 ` [PATCH 03/19] elf: add dynamic relocation support Sascha Hauer
2026-01-05 14:05 ` Ahmad Fatoum
2026-01-05 11:26 ` [PATCH 04/19] ARM: implement elf_apply_relocations() for ELF " Sascha Hauer
2026-01-05 11:58 ` Ahmad Fatoum
2026-01-05 19:53 ` Sascha Hauer
2026-01-05 11:26 ` [PATCH 05/19] riscv: " Sascha Hauer
2026-01-05 11:26 ` [PATCH 06/19] elf: implement elf_load_inplace() Sascha Hauer
2026-01-05 13:37 ` Ahmad Fatoum
2026-01-05 22:42 ` Sascha Hauer
2026-01-06 8:18 ` Ahmad Fatoum
2026-01-05 11:26 ` [PATCH 07/19] elf: create elf_open_binary_into() Sascha Hauer
2026-01-05 11:26 ` [PATCH 08/19] Makefile: add barebox.elf build target Sascha Hauer
2026-01-05 12:22 ` Ahmad Fatoum
2026-01-05 15:43 ` Sascha Hauer
2026-01-05 17:11 ` Ahmad Fatoum
2026-01-05 11:26 ` [PATCH 09/19] PBL: allow to link ELF image into PBL Sascha Hauer
2026-01-05 12:11 ` Ahmad Fatoum
2026-01-05 11:26 ` [PATCH 10/19] mmu: add MAP_CACHED_RO mapping type Sascha Hauer
2026-01-05 12:14 ` Ahmad Fatoum
2026-01-05 11:26 ` [PATCH 11/19] mmu: introduce pbl_remap_range() Sascha Hauer
2026-01-05 12:15 ` Ahmad Fatoum
2026-01-06 8:50 ` Ahmad Fatoum
2026-01-06 9:25 ` Sascha Hauer
2026-01-05 11:26 ` [PATCH 12/19] ARM: use relative jumps in exception table Sascha Hauer
2026-01-05 11:44 ` Ahmad Fatoum
2026-01-05 12:29 ` Sascha Hauer
2026-01-05 12:31 ` Ahmad Fatoum
2026-01-05 11:26 ` [PATCH 13/19] ARM: exceptions: make in-binary exception table const Sascha Hauer
2026-01-05 11:26 ` [PATCH 14/19] ARM: linker script: create separate PT_LOAD segments for text, rodata, and data Sascha Hauer
2026-01-05 13:11 ` Ahmad Fatoum
2026-01-05 23:01 ` Sascha Hauer
2026-01-06 7:59 ` Ahmad Fatoum
2026-01-05 11:26 ` [PATCH 15/19] ARM: link ELF image into PBL Sascha Hauer
2026-01-05 12:27 ` Ahmad Fatoum
2026-01-05 11:26 ` Sascha Hauer [this message]
2026-01-05 12:58 ` [PATCH 16/19] ARM: PBL: setup MMU with proper permissions from ELF segments Ahmad Fatoum
2026-01-05 11:26 ` [PATCH 17/19] riscv: link ELF image into PBL Sascha Hauer
2026-01-05 13:12 ` Ahmad Fatoum
2026-01-05 11:26 ` [PATCH 18/19] riscv: linker script: create separate PT_LOAD segments for text, rodata, and data Sascha Hauer
2026-01-05 13:40 ` Ahmad Fatoum
2026-01-05 11:27 ` [PATCH 19/19] riscv: add ELF segment-based memory protection with MMU Sascha Hauer
2026-01-05 13:58 ` Ahmad Fatoum
2026-01-05 14:08 ` [PATCH 00/19] PBL: Add PBL ELF loading support with dynamic relocations Ahmad Fatoum
2026-01-05 16:47 ` Sascha Hauer
2026-01-06 8:35 ` Ahmad Fatoum
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20260105-pbl-load-elf-v1-16-e97853f98232@pengutronix.de \
--to=s.hauer@pengutronix.de \
--cc=barebox@lists.infradead.org \
--cc=noreply@anthropic.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox