mail archive of the barebox mailing list
 help / color / mirror / Atom feed
From: Sascha Hauer <s.hauer@pengutronix.de>
To: BAREBOX <barebox@lists.infradead.org>
Cc: "Claude Sonnet 4.5" <noreply@anthropic.com>
Subject: [PATCH v4 18/22] ARM: PBL: setup MMU with proper permissions from ELF segments
Date: Wed, 14 Jan 2026 13:14:43 +0100	[thread overview]
Message-ID: <20260114-pbl-load-elf-v4-18-0afa78d95a7e@pengutronix.de> (raw)
In-Reply-To: <20260114-pbl-load-elf-v4-0-0afa78d95a7e@pengutronix.de>

Move complete MMU setup into PBL by leveraging ELF segment information
to apply correct memory permissions before jumping to barebox proper.

After ELF relocation, parse PT_LOAD segments and map each with
permissions derived from p_flags:
- Text segments (PF_R|PF_X): Read-only + executable (MAP_CODE)
- Data segments (PF_R|PF_W): Read-write (MAP_CACHED)
- RO data segments (PF_R): Read-only (MAP_CACHED_RO)

This ensures barebox proper starts with full W^X protection already
in place, eliminating the need for complex remapping in barebox proper.

The framework is portable - common ELF parsing in pbl/mmu.c uses
architecture-specific early_remap_range() exported from mmu_*.c.

🤖 Generated with [Claude Code](https://claude.com/claude-code)

Co-Authored-By: Claude Sonnet 4.5 <noreply@anthropic.com>
Signed-off-by: Sascha Hauer <s.hauer@pengutronix.de>
---
 arch/arm/cpu/mmu-common.c |  29 ++++++-------
 arch/arm/cpu/uncompress.c |  10 +++++
 include/pbl/mmu.h         |  38 ++++++++++++++++
 pbl/Makefile              |   1 +
 pbl/mmu.c                 | 107 ++++++++++++++++++++++++++++++++++++++++++++++
 5 files changed, 170 insertions(+), 15 deletions(-)

diff --git a/arch/arm/cpu/mmu-common.c b/arch/arm/cpu/mmu-common.c
index 67317f127cadb138cc2e85bb18c92ab47bc1206f..3a00358d6058b42584ccdbafd398b0ed6229b999 100644
--- a/arch/arm/cpu/mmu-common.c
+++ b/arch/arm/cpu/mmu-common.c
@@ -109,28 +109,26 @@ static inline void remap_range_end(unsigned long start, unsigned long end,
 	remap_range((void *)start, end - start, map_type);
 }
 
-static inline void remap_range_end_sans_text(unsigned long start, unsigned long end,
+static inline void remap_range_end_sans_image(unsigned long start, unsigned long end,
 					     unsigned map_type)
 {
-	unsigned long text_start = (unsigned long)&_stext;
-	unsigned long text_end = (unsigned long)&_etext;
+	unsigned long image_start = (unsigned long)&__image_start;
+	unsigned long image_end = (unsigned long)&__image_end;
 
-	if (region_overlap_end_exclusive(start, end, text_start, text_end)) {
-		remap_range_end(start, text_start, MAP_CACHED);
+	if (region_overlap_end_exclusive(start, end, image_start, image_end)) {
+		remap_range_end(start, image_start, MAP_CACHED);
 		/* skip barebox segments here, will be mapped later */
-		start = text_end;
+		start = image_end;
 	}
 
+	start = ALIGN(start, PAGE_SIZE);
+
 	remap_range_end(start, end, MAP_CACHED);
 }
 
 static void mmu_remap_memory_banks(void)
 {
 	struct memory_bank *bank;
-	unsigned long code_start = (unsigned long)&_stext;
-	unsigned long code_size = (unsigned long)&__start_rodata - (unsigned long)&_stext;
-	unsigned long rodata_start = (unsigned long)&__start_rodata;
-	unsigned long rodata_size = (unsigned long)&__end_rodata - rodata_start;
 
 	/*
 	 * Early mmu init will have mapped everything but the initial memory area
@@ -138,6 +136,10 @@ static void mmu_remap_memory_banks(void)
 	 * all memory banks, so let's map all pages, excluding reserved memory areas
 	 * and barebox text area cacheable.
 	 *
+	 * PBL has already set up the MMU with proper permissions for text, data
+	 * and rodata based on ELF segment information, so we don't need to remap
+	 * those here.
+	 *
 	 * This code will become much less complex once we switch over to using
 	 * CONFIG_MEMORY_ATTRIBUTES for MMU as well.
 	 */
@@ -150,16 +152,13 @@ static void mmu_remap_memory_banks(void)
 		/* Skip reserved regions */
 		for_each_reserved_region(bank, rsv) {
 			if (pos != rsv->start)
-				remap_range_end_sans_text(pos, rsv->start, MAP_CACHED);
+				remap_range_end_sans_image(pos, rsv->start, MAP_CACHED);
 			pos = rsv->end + 1;
 		}
 
-		remap_range_end_sans_text(pos, bank->res->end + 1, MAP_CACHED);
+		remap_range_end_sans_image(pos, bank->res->end + 1, MAP_CACHED);
 	}
 
-	remap_range((void *)code_start, code_size, MAP_CODE);
-	remap_range((void *)rodata_start, rodata_size, MAP_CACHED_RO);
-
 	setup_trap_pages();
 }
 
diff --git a/arch/arm/cpu/uncompress.c b/arch/arm/cpu/uncompress.c
index 10df5fcba90b3d5f616a9d16e6bdc5120cf54e9d..9a9f391022c1c78d9652b3a177e591c31fe94246 100644
--- a/arch/arm/cpu/uncompress.c
+++ b/arch/arm/cpu/uncompress.c
@@ -21,6 +21,7 @@
 #include <asm/unaligned.h>
 #include <compressed-dtb.h>
 #include <elf.h>
+#include <pbl/mmu.h>
 
 #include <debug_ll.h>
 
@@ -103,6 +104,15 @@ void __noreturn barebox_pbl_start(unsigned long membase, unsigned long memsize,
 	if (ret)
 		panic("Failed to relocate ELF: %d\n", ret);
 
+	/*
+	 * Now that the ELF image is relocated, we know the exact addresses
+	 * of all segments. Set up MMU with proper permissions based on
+	 * ELF segment flags (PF_R/W/X).
+	 */
+	ret = pbl_mmu_setup_from_elf(&elf, membase, memsize);
+	if (ret)
+		panic("Failed to setup MMU from ELF: %d\n", ret);
+
 	barebox = (void *)(unsigned long)elf.entry;
 
 	handoff_data_move(handoff_data);
diff --git a/include/pbl/mmu.h b/include/pbl/mmu.h
new file mode 100644
index 0000000000000000000000000000000000000000..72537604e2ed52bb26ac70a5424008f1c4bbde90
--- /dev/null
+++ b/include/pbl/mmu.h
@@ -0,0 +1,38 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+#ifndef __PBL_MMU_H
+#define __PBL_MMU_H
+
+#include <linux/types.h>
+
+struct elf_image;
+
+/**
+ * pbl_mmu_setup_from_elf() - Configure MMU using ELF segment information
+ * @elf: ELF image structure from elf_open_binary_into()
+ * @membase: Base address of RAM
+ * @memsize: Size of RAM
+ *
+ * This function sets up the MMU with proper permissions based on ELF
+ * segment flags. It should be called after elf_load_inplace() has
+ * relocated the barebox image.
+ *
+ * Segment permissions are mapped as follows:
+ *   PF_R | PF_X  -> Read-only + executable (text)
+ *   PF_R | PF_W  -> Read-write (data, bss)
+ *   PF_R         -> Read-only (rodata)
+ *
+ * Return: 0 on success, negative error code on failure
+ */
+#if IS_ENABLED(CONFIG_MMU)
+int pbl_mmu_setup_from_elf(struct elf_image *elf, unsigned long membase,
+			    unsigned long memsize);
+#else
+static inline int pbl_mmu_setup_from_elf(struct elf_image *elf,
+					 unsigned long membase,
+					 unsigned long memsize)
+{
+	return 0;
+}
+#endif
+
+#endif /* __PBL_MMU_H */
diff --git a/pbl/Makefile b/pbl/Makefile
index f66391be7b2898388425657f54afcd6e4c72e3db..b78124cdcd2a4690be11d5503006723252b4904f 100644
--- a/pbl/Makefile
+++ b/pbl/Makefile
@@ -9,3 +9,4 @@ pbl-$(CONFIG_HAVE_IMAGE_COMPRESSION) += decomp.o
 pbl-$(CONFIG_LIBFDT) += fdt.o
 pbl-$(CONFIG_PBL_CONSOLE) += console.o
 obj-pbl-y += handoff-data.o
+obj-pbl-$(CONFIG_MMU) += mmu.o
diff --git a/pbl/mmu.c b/pbl/mmu.c
new file mode 100644
index 0000000000000000000000000000000000000000..9227a83b48163e4cd90412c5ea85c7021ee25ab0
--- /dev/null
+++ b/pbl/mmu.c
@@ -0,0 +1,107 @@
+// SPDX-License-Identifier: GPL-2.0-only
+// SPDX-FileCopyrightText: 2025 Sascha Hauer <s.hauer@pengutronix.de>, Pengutronix
+
+#define pr_fmt(fmt) "pbl-mmu: " fmt
+
+#include <common.h>
+#include <elf.h>
+#include <mmu.h>
+#include <pbl/mmu.h>
+#include <asm/mmu.h>
+#include <linux/bits.h>
+#include <linux/sizes.h>
+
+/*
+ * Map ELF segment permissions (p_flags) to architecture MMU flags
+ */
+static unsigned int elf_flags_to_mmu_flags(u32 p_flags)
+{
+	bool readable = p_flags & PF_R;
+	bool writable = p_flags & PF_W;
+	bool executable = p_flags & PF_X;
+
+	if (readable && writable) {
+		/* Data, BSS: Read-write, cached, non-executable */
+		return MAP_CACHED;
+	} else if (readable && executable) {
+		/* Text: Read-only, cached, executable */
+		return MAP_CODE;
+	} else if (readable) {
+		/* Read-only data: Read-only, cached, non-executable */
+		return MAP_CACHED_RO;
+	} else {
+		/*
+		 * Unusual: segment with no read permission.
+		 * Map as uncached, non-executable for safety.
+		 */
+		pr_warn("Segment with unusual permissions: flags=0x%x\n", p_flags);
+		return MAP_UNCACHED;
+	}
+}
+
+int pbl_mmu_setup_from_elf(struct elf_image *elf, unsigned long membase,
+			    unsigned long memsize)
+{
+	void *phdr;
+	int i = -1;
+
+	pr_debug("Setting up MMU from ELF segments\n");
+	pr_debug("ELF loaded at: 0x%p - 0x%p\n", elf->low_addr, elf->high_addr);
+
+	/*
+	 * Iterate through all PT_LOAD segments and set up MMU permissions
+	 * based on the segment's p_flags
+	 */
+	elf_for_each_segment(phdr, elf, elf->hdr_buf) {
+		i++;
+
+		if (elf_phdr_p_type(elf, phdr) != PT_LOAD)
+			continue;
+
+		u64 p_vaddr = elf_phdr_p_vaddr(elf, phdr);
+		u64 p_memsz = elf_phdr_p_memsz(elf, phdr);
+		u32 p_flags = elf_phdr_p_flags(elf, phdr);
+
+		/*
+		 * Calculate actual address after relocation.
+		 * For ET_EXEC: reloc_offset is 0, use p_vaddr directly
+		 * For ET_DYN: reloc_offset adjusts virtual to actual address
+		 */
+		unsigned long addr = p_vaddr + elf->reloc_offset;
+		unsigned long size = p_memsz;
+		unsigned long segment_end = addr + size;
+
+		/* Validate segment is within available memory */
+		if (segment_end < addr || /* overflow check */
+		    addr < membase ||
+		    segment_end > membase + memsize) {
+			pr_err("Segment %d outside memory bounds\n", i);
+			return -EINVAL;
+		}
+
+		/* Validate alignment - warn and round if needed */
+		if (!IS_ALIGNED(size, PAGE_SIZE)) {
+			pr_debug("Segment %d not page-aligned, rounding\n", i);
+			size = ALIGN(size, PAGE_SIZE);
+		}
+
+		unsigned int mmu_flags = elf_flags_to_mmu_flags(p_flags);
+
+		pr_debug("Segment %d: addr=0x%08lx size=0x%08lx flags=0x%x [%c%c%c] -> mmu_flags=0x%x\n",
+			 i, addr, size, p_flags,
+			 (p_flags & PF_R) ? 'R' : '-',
+			 (p_flags & PF_W) ? 'W' : '-',
+			 (p_flags & PF_X) ? 'X' : '-',
+			 mmu_flags);
+
+		/*
+		 * Remap this segment with proper permissions.
+		 * Use page-wise mapping to allow different permissions for
+		 * different segments even if they're nearby.
+		 */
+		remap_range((void *)addr, size, mmu_flags);
+	}
+
+	pr_debug("MMU setup from ELF complete\n");
+	return 0;
+}

-- 
2.47.3




  parent reply	other threads:[~2026-01-14 12:36 UTC|newest]

Thread overview: 25+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2026-01-14 12:14 [PATCH v4 00/22] PBL: Add PBL ELF loading support with dynamic relocations Sascha Hauer
2026-01-14 12:14 ` [PATCH v4 01/22] Makefile.compiler: add objcopy-option Sascha Hauer
2026-01-14 12:14 ` [PATCH v4 02/22] elf: only accept images matching the native ELF_CLASS Sascha Hauer
2026-01-14 12:14 ` [PATCH v4 03/22] elf: build for PBL as well Sascha Hauer
2026-01-14 12:14 ` [PATCH v4 04/22] elf: add elf segment iterator Sascha Hauer
2026-01-14 13:45   ` Ahmad Fatoum
2026-01-14 12:14 ` [PATCH v4 05/22] elf: add dynamic relocation support Sascha Hauer
2026-01-14 12:14 ` [PATCH v4 06/22] ARM: implement elf_apply_relocations() for ELF " Sascha Hauer
2026-01-14 12:14 ` [PATCH v4 07/22] riscv: define generic relocate_image Sascha Hauer
2026-01-14 12:14 ` [PATCH v4 08/22] riscv: implement elf_apply_relocations() for ELF relocation support Sascha Hauer
2026-01-14 12:14 ` [PATCH v4 09/22] elf: implement elf_load_inplace() Sascha Hauer
2026-01-14 12:14 ` [PATCH v4 10/22] elf: create elf_open_binary_into() Sascha Hauer
2026-01-14 12:14 ` [PATCH v4 11/22] Makefile: add vmbarebox build target Sascha Hauer
2026-01-14 12:14 ` [PATCH v4 12/22] PBL: allow to link ELF image into PBL Sascha Hauer
2026-01-14 12:14 ` [PATCH v4 13/22] mmu: add MAP_CACHED_RO mapping type Sascha Hauer
2026-01-14 12:14 ` [PATCH v4 14/22] ARM: drop arm_fixup_vectors() Sascha Hauer
2026-01-14 12:14 ` [PATCH v4 15/22] ARM: linker script: create separate PT_LOAD segments for text, rodata, and data Sascha Hauer
2026-01-14 15:06   ` Ahmad Fatoum
2026-01-14 12:14 ` [PATCH v4 16/22] ARM: link ELF image into PBL Sascha Hauer
2026-01-14 12:14 ` [PATCH v4 17/22] ARM: cleanup barebox proper entry Sascha Hauer
2026-01-14 12:14 ` Sascha Hauer [this message]
2026-01-14 12:14 ` [PATCH v4 19/22] riscv: linker script: create separate PT_LOAD segments for text, rodata, and data Sascha Hauer
2026-01-14 12:14 ` [PATCH v4 20/22] riscv: link ELF image into PBL Sascha Hauer
2026-01-14 12:14 ` [PATCH v4 21/22] riscv: Allwinner D1: Drop M-Mode Sascha Hauer
2026-01-14 12:14 ` [PATCH v4 22/22] riscv: add ELF segment-based memory protection with MMU Sascha Hauer

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20260114-pbl-load-elf-v4-18-0afa78d95a7e@pengutronix.de \
    --to=s.hauer@pengutronix.de \
    --cc=barebox@lists.infradead.org \
    --cc=noreply@anthropic.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox