* [PATCH v3] ARM64: mmu: implement mmu_disable completely in assembly
@ 2025-12-16 10:52 Ahmad Fatoum
2025-12-18 8:13 ` Sascha Hauer
0 siblings, 1 reply; 2+ messages in thread
From: Ahmad Fatoum @ 2025-12-16 10:52 UTC (permalink / raw)
To: barebox; +Cc: Ahmad Fatoum
Splitting mmu_disable into two noinline function on a RK3568 leads to a
barebox crash, because the code has the implicit assumption that the
compiler won't generate memory accesses including spilling to stack.
We can't guarantee this in C code, so implement the procedure in
assembly.
While at it, drop mmu_early_disable(), which is unused, superfluous and
suffers from the same issue as old mmu_disable().
Signed-off-by: Ahmad Fatoum <a.fatoum@pengutronix.de>
---
v2 -> v3:
- use switch_el (Sascha)
- fix typo: s/prologue/epilogue/ at the end
v1 -> v2:
- fix copy-paste problem in some ELs (Ulrich)
---
arch/arm/cpu/cache-armv8.S | 50 ++++++++++++++++++++++++++++++++++++
arch/arm/cpu/mmu_64.c | 27 +------------------
arch/arm/include/asm/cache.h | 1 +
arch/arm/include/asm/mmu.h | 1 -
4 files changed, 52 insertions(+), 27 deletions(-)
diff --git a/arch/arm/cpu/cache-armv8.S b/arch/arm/cpu/cache-armv8.S
index 9d9e0fb585a1..e6f50886f924 100644
--- a/arch/arm/cpu/cache-armv8.S
+++ b/arch/arm/cpu/cache-armv8.S
@@ -9,6 +9,8 @@
#include <linux/linkage.h>
#include <init.h>
+#include <asm/system.h>
+#include <asm/assembler64.h>
/*
* void v8_flush_dcache_level(level)
@@ -120,6 +122,54 @@ ENTRY(v8_invalidate_dcache_all)
ret
ENDPROC(v8_invalidate_dcache_all)
+/*
+ * void v8_mmu_disable(void)
+ *
+ * Implements the equivalent of following C code:
+ *
+ * set_cr(get_cr() & ~(CR_M | CR_C))
+ * v8_flush_dcache_all();
+ * tlb_invalidate();
+ *
+ * dsb();
+ * isb();
+ *
+ * As D$ needs to be cleaned after it was disabled, this procedure
+ * is not permitted to trigger memory access, including spilling
+ * locals to stack. It's thus necessarily needs to be implemented
+ * in assembly.
+ */
+.section .text.v8_mmu_disable
+ENTRY(v8_mmu_disable)
+ mov x14, lr
+ mov x1, #~(CR_C | CR_M)
+ switch_el x0, 3f, 2f, 1f
+3: /* EL3 */
+ mrs x0, sctlr_el3
+ and x0, x0, x1
+ msr sctlr_el3, x0
+ bl v8_flush_dcache_all
+ tlbi alle3
+ b 0f
+2: /* EL2 */
+ mrs x0, sctlr_el2
+ and x0, x0, x1
+ msr sctlr_el2, x0
+ bl v8_flush_dcache_all
+ tlbi alle2
+ b 0f
+1: /* EL1 */
+ mrs x0, sctlr_el1
+ and x0, x0, x1
+ msr sctlr_el1, x0
+ bl v8_flush_dcache_all
+ tlbi vmalle1
+0: /* common epilogue */
+ dsb sy
+ isb
+ ret x14
+ENDPROC(v8_mmu_disable)
+
/*
* void v8_flush_dcache_range(start, end)
*
diff --git a/arch/arm/cpu/mmu_64.c b/arch/arm/cpu/mmu_64.c
index f22fcb5f8ea4..56c6a21f2b2a 100644
--- a/arch/arm/cpu/mmu_64.c
+++ b/arch/arm/cpu/mmu_64.c
@@ -344,17 +344,7 @@ void __mmu_init(bool mmu_on)
void mmu_disable(void)
{
- unsigned int cr;
-
- cr = get_cr();
- cr &= ~(CR_M | CR_C);
-
- set_cr(cr);
- v8_flush_dcache_all();
- tlb_invalidate();
-
- dsb();
- isb();
+ v8_mmu_disable();
}
void dma_inv_range(void *ptr, size_t size)
@@ -436,18 +426,3 @@ void mmu_early_enable(unsigned long membase, unsigned long memsize, unsigned lon
mmu_enable();
}
-
-void mmu_early_disable(void)
-{
- unsigned int cr;
-
- cr = get_cr();
- cr &= ~(CR_M | CR_C);
-
- set_cr(cr);
- v8_flush_dcache_all();
- tlb_invalidate();
-
- dsb();
- isb();
-}
diff --git a/arch/arm/include/asm/cache.h b/arch/arm/include/asm/cache.h
index dd022c1f23f2..ea78ae123aec 100644
--- a/arch/arm/include/asm/cache.h
+++ b/arch/arm/include/asm/cache.h
@@ -15,6 +15,7 @@ void v8_flush_dcache_all(void);
void v8_invalidate_dcache_all(void);
void v8_flush_dcache_range(unsigned long start, unsigned long end);
void v8_inv_dcache_range(unsigned long start, unsigned long end);
+void v8_mmu_disable(void);
static inline void icache_invalidate(void)
{
diff --git a/arch/arm/include/asm/mmu.h b/arch/arm/include/asm/mmu.h
index 6fd1ff0d122a..bcaa984a40ed 100644
--- a/arch/arm/include/asm/mmu.h
+++ b/arch/arm/include/asm/mmu.h
@@ -70,6 +70,5 @@ void __dma_flush_range(unsigned long, unsigned long);
void __dma_inv_range(unsigned long, unsigned long);
void mmu_early_enable(unsigned long membase, unsigned long memsize, unsigned long barebox_base);
-void mmu_early_disable(void);
#endif /* __ASM_MMU_H */
--
2.47.3
^ permalink raw reply [flat|nested] 2+ messages in thread
* Re: [PATCH v3] ARM64: mmu: implement mmu_disable completely in assembly
2025-12-16 10:52 [PATCH v3] ARM64: mmu: implement mmu_disable completely in assembly Ahmad Fatoum
@ 2025-12-18 8:13 ` Sascha Hauer
0 siblings, 0 replies; 2+ messages in thread
From: Sascha Hauer @ 2025-12-18 8:13 UTC (permalink / raw)
To: barebox, Ahmad Fatoum
On Tue, 16 Dec 2025 11:52:40 +0100, Ahmad Fatoum wrote:
> Splitting mmu_disable into two noinline function on a RK3568 leads to a
> barebox crash, because the code has the implicit assumption that the
> compiler won't generate memory accesses including spilling to stack.
>
> We can't guarantee this in C code, so implement the procedure in
> assembly.
>
> [...]
Applied, thanks!
[1/1] ARM64: mmu: implement mmu_disable completely in assembly
https://git.pengutronix.de/cgit/barebox/commit/?id=3d5c7a8e4afb (link may not be stable)
Best regards,
--
Sascha Hauer <s.hauer@pengutronix.de>
^ permalink raw reply [flat|nested] 2+ messages in thread
end of thread, other threads:[~2025-12-18 8:14 UTC | newest]
Thread overview: 2+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2025-12-16 10:52 [PATCH v3] ARM64: mmu: implement mmu_disable completely in assembly Ahmad Fatoum
2025-12-18 8:13 ` Sascha Hauer
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox