mail archive of the barebox mailing list
 help / color / mirror / Atom feed
From: Sascha Hauer <s.hauer@pengutronix.de>
To: BAREBOX <barebox@lists.infradead.org>
Subject: [PATCH 6/7] ARM: MMU64: map memory for barebox proper pagewise
Date: Fri, 13 Jun 2025 09:58:54 +0200	[thread overview]
Message-ID: <20250613-arm-mmu-xn-ro-v1-6-60f05c6e7b4b@pengutronix.de> (raw)
In-Reply-To: <20250613-arm-mmu-xn-ro-v1-0-60f05c6e7b4b@pengutronix.de>

Map the remainder of the memory explicitly with two level page tables. This is
the place where barebox proper ends at. In barebox proper we'll remap the code
segments readonly/executable and the ro segments readonly/execute never. For this
we need the memory being mapped pagewise. We can't do the split up from section
wise mapping to pagewise mapping later because that would require us to do
a break-before-make sequence which we can't do when barebox proper is running
at the location being remapped.

Signed-off-by: Sascha Hauer <s.hauer@pengutronix.de>
---
 arch/arm/cpu/mmu_64.c | 68 +++++++++++++++++++++++++++++++++++++++++++++++++--
 1 file changed, 66 insertions(+), 2 deletions(-)

diff --git a/arch/arm/cpu/mmu_64.c b/arch/arm/cpu/mmu_64.c
index 440258fa767735a4537abd71030a5540813fc443..dc81c1da6add38b59b44a9a4e247ab51ebc2692e 100644
--- a/arch/arm/cpu/mmu_64.c
+++ b/arch/arm/cpu/mmu_64.c
@@ -10,6 +10,7 @@
 #include <init.h>
 #include <mmu.h>
 #include <errno.h>
+#include <range.h>
 #include <zero_page.h>
 #include <linux/sizes.h>
 #include <asm/memory.h>
@@ -172,6 +173,56 @@ static void create_sections(uint64_t virt, uint64_t phys, uint64_t size,
 	tlb_invalidate();
 }
 
+/*
+ * like create_sections(), but this one creates pages instead of sections
+ */
+static void create_pages(uint64_t phys, uint64_t size, uint64_t attr)
+{
+	uint64_t virt = phys;
+	uint64_t *ttb = get_ttb();
+	uint64_t block_size;
+	uint64_t block_shift;
+	uint64_t *pte;
+	uint64_t idx;
+	uint64_t addr;
+	uint64_t *table;
+	uint64_t type;
+	int level;
+
+	addr = virt;
+
+	attr &= ~PTE_TYPE_MASK;
+
+	size = PAGE_ALIGN(size);
+
+	while (size) {
+		table = ttb;
+		for (level = 0; level < 4; level++) {
+			block_shift = level2shift(level);
+			idx = (addr & level2mask(level)) >> block_shift;
+			block_size = (1ULL << block_shift);
+
+			pte = table + idx;
+
+			if (level == 3) {
+				type = PTE_TYPE_PAGE;
+				*pte = phys | attr | type;
+				addr += block_size;
+				phys += block_size;
+				size -= block_size;
+				break;
+			} else {
+				split_block(pte, level);
+			}
+
+			table = get_level_table(pte);
+		}
+
+	}
+
+	tlb_invalidate();
+}
+
 static size_t granule_size(int level)
 {
 	switch (level) {
@@ -410,6 +461,7 @@ void mmu_early_enable(unsigned long membase, unsigned long memsize, unsigned lon
 {
 	int el;
 	u64 optee_membase;
+	unsigned long barebox_size;
 	unsigned long ttb = arm_mem_ttb(membase + memsize);
 
 	if (get_cr() & CR_M)
@@ -432,12 +484,24 @@ void mmu_early_enable(unsigned long membase, unsigned long memsize, unsigned lon
 
 	early_remap_range(membase, memsize, MAP_CACHED);
 
-	if (optee_get_membase(&optee_membase))
+	if (optee_get_membase(&optee_membase)) {
                 optee_membase = membase + memsize - OPTEE_SIZE;
 
+		barebox_size = optee_membase - barebox_start;
+
+		create_pages(optee_membase - barebox_size, barebox_size,
+			     get_pte_attrs(ARCH_MAP_CACHED_RWX));
+	} else {
+		barebox_size = membase + memsize - barebox_start;
+
+		create_pages(membase + memsize - barebox_size, barebox_size,
+			     get_pte_attrs(ARCH_MAP_CACHED_RWX));
+	}
+
 	early_remap_range(optee_membase, OPTEE_SIZE, MAP_FAULT);
 
-	early_remap_range(PAGE_ALIGN_DOWN((uintptr_t)_stext), PAGE_ALIGN(_etext - _stext), MAP_CACHED);
+	early_remap_range(PAGE_ALIGN_DOWN((uintptr_t)_stext), PAGE_ALIGN(_etext - _stext),
+			  MAP_CACHED);
 
 	mmu_enable();
 }

-- 
2.39.5




  parent reply	other threads:[~2025-06-13  9:09 UTC|newest]

Thread overview: 18+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2025-06-13  7:58 [PATCH 0/7] ARM: Map sections RO/XN Sascha Hauer
2025-06-13  7:58 ` [PATCH 1/7] memory: request RO data section as separate region Sascha Hauer
2025-06-13  9:15   ` Ahmad Fatoum
2025-06-13  7:58 ` [PATCH 2/7] ARM: pass barebox base to mmu_early_enable() Sascha Hauer
2025-06-13  9:17   ` Ahmad Fatoum
2025-06-13  7:58 ` [PATCH 3/7] ARM: mmu: move ARCH_MAP_WRITECOMBINE to header Sascha Hauer
2025-06-13  9:18   ` Ahmad Fatoum
2025-06-13  7:58 ` [PATCH 4/7] ARM: MMU: map memory for barebox proper pagewise Sascha Hauer
2025-06-13  9:23   ` Ahmad Fatoum
2025-06-13  7:58 ` [PATCH 5/7] ARM: MMU: map text segment ro and data segments execute never Sascha Hauer
2025-06-13 10:12   ` Ahmad Fatoum
2025-06-17 13:06     ` Sascha Hauer
2025-06-13 10:36   ` Ahmad Fatoum
2025-06-13  7:58 ` Sascha Hauer [this message]
2025-06-13 10:29   ` [PATCH 6/7] ARM: MMU64: map memory for barebox proper pagewise Ahmad Fatoum
2025-06-13  7:58 ` [PATCH 7/7] ARM: MMU64: map text segment ro and data segments execute never Sascha Hauer
2025-06-13 10:40   ` Ahmad Fatoum
2025-06-13 12:44 ` [PATCH 0/7] ARM: Map sections RO/XN Ahmad Fatoum

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20250613-arm-mmu-xn-ro-v1-6-60f05c6e7b4b@pengutronix.de \
    --to=s.hauer@pengutronix.de \
    --cc=barebox@lists.infradead.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox