mail archive of the barebox mailing list
 help / color / mirror / Atom feed
* [PATCH master 1/4] ARM: mmu64: request TTB region
@ 2023-05-26  6:33 Ahmad Fatoum
  2023-05-26  6:33 ` [PATCH master 2/4] ARM: mmu64: define early_remap_range for mmu_early_enable usage Ahmad Fatoum
                   ` (3 more replies)
  0 siblings, 4 replies; 6+ messages in thread
From: Ahmad Fatoum @ 2023-05-26  6:33 UTC (permalink / raw)
  To: barebox; +Cc: Ahmad Fatoum

ARM64 MMU code used to disable early MMU, reallocate TTB from malloc
area and then reenable it. This has recently been changed, so MMU is
left enabled like on ARM32, but unlike ARM32, the SDRAM region used in
PBL is not requested in barebox proper. Do that now.

Fixes: b53744ffe333 ("ARM: mmu64: Use two level pagetables in early code")
Signed-off-by: Ahmad Fatoum <a.fatoum@pengutronix.de>
---
 arch/arm/cpu/mmu_64.c | 12 ++++++++++++
 1 file changed, 12 insertions(+)

diff --git a/arch/arm/cpu/mmu_64.c b/arch/arm/cpu/mmu_64.c
index cdc482542202..1d5a5355c6be 100644
--- a/arch/arm/cpu/mmu_64.c
+++ b/arch/arm/cpu/mmu_64.c
@@ -192,8 +192,20 @@ static void mmu_enable(void)
  */
 void __mmu_init(bool mmu_on)
 {
+	uint64_t *ttb = get_ttb();
 	struct memory_bank *bank;
 
+	if (!request_sdram_region("ttb", (unsigned long)ttb,
+				  ARM_EARLY_PAGETABLE_SIZE))
+		/*
+		 * This can mean that:
+		 * - the early MMU code has put the ttb into a place
+		 *   which we don't have inside our available memory
+		 * - Somebody else has occupied the ttb region which means
+		 *   the ttb will get corrupted.
+		 */
+		pr_crit("Can't request SDRAM region for ttb at %p\n", ttb);
+
 	for_each_memory_bank(bank) {
 		struct resource *rsv;
 		resource_size_t pos;
-- 
2.39.2




^ permalink raw reply	[flat|nested] 6+ messages in thread

* [PATCH master 2/4] ARM: mmu64: define early_remap_range for mmu_early_enable usage
  2023-05-26  6:33 [PATCH master 1/4] ARM: mmu64: request TTB region Ahmad Fatoum
@ 2023-05-26  6:33 ` Ahmad Fatoum
  2023-05-26  6:33 ` [PATCH master 3/4] ARM: mmu32: " Ahmad Fatoum
                   ` (2 subsequent siblings)
  3 siblings, 0 replies; 6+ messages in thread
From: Ahmad Fatoum @ 2023-05-26  6:33 UTC (permalink / raw)
  To: barebox; +Cc: Ahmad Fatoum

Adding a new dma_inv_range/dma_flush_range into arch_remap_range before
MMU is enabled hangs, so let's define a new early_remap_range that should
always be safe to call while MMU is disabled. This is to prepare doing
cache maintenance in regular arch_remap_range in a later commit.

No functional change.

Signed-off-by: Ahmad Fatoum <a.fatoum@pengutronix.de>
---
 arch/arm/cpu/mmu_64.c | 39 ++++++++++++++++++++++++++-------------
 1 file changed, 26 insertions(+), 13 deletions(-)

diff --git a/arch/arm/cpu/mmu_64.c b/arch/arm/cpu/mmu_64.c
index 1d5a5355c6be..940e0e914c43 100644
--- a/arch/arm/cpu/mmu_64.c
+++ b/arch/arm/cpu/mmu_64.c
@@ -159,23 +159,36 @@ static void create_sections(uint64_t virt, uint64_t phys, uint64_t size,
 	tlb_invalidate();
 }
 
-int arch_remap_range(void *virt_addr, phys_addr_t phys_addr, size_t size, unsigned flags)
+static unsigned long get_pte_attrs(unsigned flags)
 {
-	unsigned long attrs;
-
 	switch (flags) {
 	case MAP_CACHED:
-		attrs = CACHED_MEM;
-		break;
+		return CACHED_MEM;
 	case MAP_UNCACHED:
-		attrs = attrs_uncached_mem();
-		break;
+		return attrs_uncached_mem();
 	case MAP_FAULT:
-		attrs = 0x0;
-		break;
+		return 0x0;
 	default:
-		return -EINVAL;
+		return ~0UL;
 	}
+}
+
+static void early_remap_range(uint64_t addr, size_t size, unsigned flags)
+{
+	unsigned long attrs = get_pte_attrs(flags);
+
+	if (WARN_ON(attrs == ~0UL))
+		return;
+
+	create_sections(addr, addr, size, attrs);
+}
+
+int arch_remap_range(void *virt_addr, phys_addr_t phys_addr, size_t size, unsigned flags)
+{
+	unsigned long attrs = get_pte_attrs(flags);
+
+	if (attrs == ~0UL)
+		return -EINVAL;
 
 	create_sections((uint64_t)virt_addr, phys_addr, (uint64_t)size, attrs);
 	return 0;
@@ -269,9 +282,9 @@ void mmu_early_enable(unsigned long membase, unsigned long memsize)
 
 	memset((void *)ttb, 0, GRANULE_SIZE);
 
-	remap_range(0, 1UL << (BITS_PER_VA - 1), MAP_UNCACHED);
-	remap_range((void *)membase, memsize - OPTEE_SIZE, MAP_CACHED);
-	remap_range((void *)membase + memsize - OPTEE_SIZE, OPTEE_SIZE, MAP_FAULT);
+	early_remap_range(0, 1UL << (BITS_PER_VA - 1), MAP_UNCACHED);
+	early_remap_range(membase, memsize - OPTEE_SIZE, MAP_CACHED);
+	early_remap_range(membase + memsize - OPTEE_SIZE, OPTEE_SIZE, MAP_FAULT);
 
 	mmu_enable();
 }
-- 
2.39.2




^ permalink raw reply	[flat|nested] 6+ messages in thread

* [PATCH master 3/4] ARM: mmu32: define early_remap_range for mmu_early_enable usage
  2023-05-26  6:33 [PATCH master 1/4] ARM: mmu64: request TTB region Ahmad Fatoum
  2023-05-26  6:33 ` [PATCH master 2/4] ARM: mmu64: define early_remap_range for mmu_early_enable usage Ahmad Fatoum
@ 2023-05-26  6:33 ` Ahmad Fatoum
  2023-05-26  6:33 ` [PATCH master 4/4] ARM: mmu: invalidate when mapping range uncached Ahmad Fatoum
  2023-05-30 10:26 ` [PATCH master 1/4] ARM: mmu64: request TTB region Sascha Hauer
  3 siblings, 0 replies; 6+ messages in thread
From: Ahmad Fatoum @ 2023-05-26  6:33 UTC (permalink / raw)
  To: barebox; +Cc: Ahmad Fatoum

Like done for ARM64, define early_remap_range, which should always be
safe to call while MMU is disabled. This is to prepare doing cache
maintenance in regular arch_remap_range.

No functional change.

Signed-off-by: Ahmad Fatoum <a.fatoum@pengutronix.de>
---
 arch/arm/cpu/mmu_32.c | 17 +++++++++++++----
 1 file changed, 13 insertions(+), 4 deletions(-)

diff --git a/arch/arm/cpu/mmu_32.c b/arch/arm/cpu/mmu_32.c
index 1c59225934ee..a324ebf71a55 100644
--- a/arch/arm/cpu/mmu_32.c
+++ b/arch/arm/cpu/mmu_32.c
@@ -241,7 +241,7 @@ static uint32_t get_pmd_flags(int map_type)
 	return pte_flags_to_pmd(get_pte_flags(map_type));
 }
 
-int arch_remap_range(void *_virt_addr, phys_addr_t phys_addr, size_t size, unsigned map_type)
+static void __arch_remap_range(void *_virt_addr, phys_addr_t phys_addr, size_t size, unsigned map_type)
 {
 	u32 virt_addr = (u32)_virt_addr;
 	u32 pte_flags, pmd_flags;
@@ -318,6 +318,15 @@ int arch_remap_range(void *_virt_addr, phys_addr_t phys_addr, size_t size, unsig
 	}
 
 	tlb_invalidate();
+}
+static void early_remap_range(u32 addr, size_t size, unsigned map_type)
+{
+	__arch_remap_range((void *)addr, addr, size, map_type);
+}
+
+int arch_remap_range(void *virt_addr, phys_addr_t phys_addr, size_t size, unsigned map_type)
+{
+	__arch_remap_range(virt_addr, phys_addr, size, map_type);
 	return 0;
 }
 
@@ -580,9 +589,9 @@ void mmu_early_enable(unsigned long membase, unsigned long memsize)
 	create_flat_mapping();
 
 	/* maps main memory as cachable */
-	remap_range((void *)membase, memsize - OPTEE_SIZE, MAP_CACHED);
-	remap_range((void *)membase + memsize - OPTEE_SIZE, OPTEE_SIZE, MAP_UNCACHED);
-	remap_range((void *)PAGE_ALIGN_DOWN((uintptr_t)_stext), PAGE_ALIGN(_etext - _stext), MAP_CACHED);
+	early_remap_range(membase, memsize - OPTEE_SIZE, MAP_CACHED);
+	early_remap_range(membase + memsize - OPTEE_SIZE, OPTEE_SIZE, MAP_UNCACHED);
+	early_remap_range(PAGE_ALIGN_DOWN((uintptr_t)_stext), PAGE_ALIGN(_etext - _stext), MAP_CACHED);
 
 	__mmu_cache_on();
 }
-- 
2.39.2




^ permalink raw reply	[flat|nested] 6+ messages in thread

* [PATCH master 4/4] ARM: mmu: invalidate when mapping range uncached
  2023-05-26  6:33 [PATCH master 1/4] ARM: mmu64: request TTB region Ahmad Fatoum
  2023-05-26  6:33 ` [PATCH master 2/4] ARM: mmu64: define early_remap_range for mmu_early_enable usage Ahmad Fatoum
  2023-05-26  6:33 ` [PATCH master 3/4] ARM: mmu32: " Ahmad Fatoum
@ 2023-05-26  6:33 ` Ahmad Fatoum
  2023-05-26 12:49   ` Ahmad Fatoum
  2023-05-30 10:26 ` [PATCH master 1/4] ARM: mmu64: request TTB region Sascha Hauer
  3 siblings, 1 reply; 6+ messages in thread
From: Ahmad Fatoum @ 2023-05-26  6:33 UTC (permalink / raw)
  To: barebox; +Cc: Ahmad Fatoum

memtest can call remap_range to map regions being tested as uncached,
but remap_range did not take care to evict any stale cache lines.
Do this now.

This fixes an issue of SELFTEST_MMU failing on an i.MX8MN, when running
memtest on an uncached region that was previously memtested while being
cached.

Fixes: 3100ea146688 ("ARM: rework MMU support")
Fixes: 7cc98fbb6128 ("arm: cpu: add basic arm64 mmu support")
Signed-off-by: Ahmad Fatoum <a.fatoum@pengutronix.de>
---
 arch/arm/cpu/mmu_32.c | 4 ++++
 arch/arm/cpu/mmu_64.c | 4 ++++
 2 files changed, 8 insertions(+)

diff --git a/arch/arm/cpu/mmu_32.c b/arch/arm/cpu/mmu_32.c
index a324ebf71a55..c4e5a3bb0ab2 100644
--- a/arch/arm/cpu/mmu_32.c
+++ b/arch/arm/cpu/mmu_32.c
@@ -327,6 +327,10 @@ static void early_remap_range(u32 addr, size_t size, unsigned map_type)
 int arch_remap_range(void *virt_addr, phys_addr_t phys_addr, size_t size, unsigned map_type)
 {
 	__arch_remap_range(virt_addr, phys_addr, size, map_type);
+
+	if (map_type == MAP_UNCACHED)
+		dma_inv_range(virt_addr, size);
+
 	return 0;
 }
 
diff --git a/arch/arm/cpu/mmu_64.c b/arch/arm/cpu/mmu_64.c
index 940e0e914c43..63e70963224a 100644
--- a/arch/arm/cpu/mmu_64.c
+++ b/arch/arm/cpu/mmu_64.c
@@ -191,6 +191,10 @@ int arch_remap_range(void *virt_addr, phys_addr_t phys_addr, size_t size, unsign
 		return -EINVAL;
 
 	create_sections((uint64_t)virt_addr, phys_addr, (uint64_t)size, attrs);
+
+	if (flags == MAP_UNCACHED)
+		dma_inv_range(virt_addr, size);
+
 	return 0;
 }
 
-- 
2.39.2




^ permalink raw reply	[flat|nested] 6+ messages in thread

* Re: [PATCH master 4/4] ARM: mmu: invalidate when mapping range uncached
  2023-05-26  6:33 ` [PATCH master 4/4] ARM: mmu: invalidate when mapping range uncached Ahmad Fatoum
@ 2023-05-26 12:49   ` Ahmad Fatoum
  0 siblings, 0 replies; 6+ messages in thread
From: Ahmad Fatoum @ 2023-05-26 12:49 UTC (permalink / raw)
  To: barebox; +Cc: Lucas Stach

On 26.05.23 08:33, Ahmad Fatoum wrote:
> memtest can call remap_range to map regions being tested as uncached,
> but remap_range did not take care to evict any stale cache lines.
> Do this now.
> 
> This fixes an issue of SELFTEST_MMU failing on an i.MX8MN, when running
> memtest on an uncached region that was previously memtested while being
> cached.
> 
> Fixes: 3100ea146688 ("ARM: rework MMU support")
> Fixes: 7cc98fbb6128 ("arm: cpu: add basic arm64 mmu support")
> Signed-off-by: Ahmad Fatoum <a.fatoum@pengutronix.de>
> ---
>  arch/arm/cpu/mmu_32.c | 4 ++++
>  arch/arm/cpu/mmu_64.c | 4 ++++
>  2 files changed, 8 insertions(+)
> 
> diff --git a/arch/arm/cpu/mmu_32.c b/arch/arm/cpu/mmu_32.c
> index a324ebf71a55..c4e5a3bb0ab2 100644
> --- a/arch/arm/cpu/mmu_32.c
> +++ b/arch/arm/cpu/mmu_32.c
> @@ -327,6 +327,10 @@ static void early_remap_range(u32 addr, size_t size, unsigned map_type)
>  int arch_remap_range(void *virt_addr, phys_addr_t phys_addr, size_t size, unsigned map_type)
>  {
>  	__arch_remap_range(virt_addr, phys_addr, size, map_type);
> +
> +	if (map_type == MAP_UNCACHED)
> +		dma_inv_range(virt_addr, size);
> +
>  	return 0;
>  }
>  
> diff --git a/arch/arm/cpu/mmu_64.c b/arch/arm/cpu/mmu_64.c
> index 940e0e914c43..63e70963224a 100644
> --- a/arch/arm/cpu/mmu_64.c
> +++ b/arch/arm/cpu/mmu_64.c
> @@ -191,6 +191,10 @@ int arch_remap_range(void *virt_addr, phys_addr_t phys_addr, size_t size, unsign
>  		return -EINVAL;
>  
>  	create_sections((uint64_t)virt_addr, phys_addr, (uint64_t)size, attrs);
> +
> +	if (flags == MAP_UNCACHED)
> +		dma_inv_range(virt_addr, size);

Lucas mentioned that it may be surprising that a remap would result in data loss
and suggested to flush the range before remapping. This seems non-trivial,
because dma_flush_range (which works on virtual addresses) causes a DataAbort
when called on the null page or on an OP-TEE region:

DABT (current EL) exception (ESR 0x9600014a) at 0x000000007e000000
elr: 000000007dd96054 lr : 000000007dd95c74
x0 : 000000007e000000 x1 : 000000007fbfffff
x2 : 0000000000000040 x3 : 000000000000003f
x4 : 0000000000000020 x5 : 000000007dff7908
x6 : 0000000000000030 x7 : 000000007dff7a4a
x8 : 0000000000000000 x9 : 0000000000000000
x10: 0000000000000000 x11: 00000000fffffffd
x12: 00000000fffffffd x13: 0000000000000020
x14: 0000000000000000 x15: 0000000000000001
x16: 000000007dff7908 x17: 0000000000000001
x18: 000000007dff7e90 x19: 000000007e000000
x20: 0000000001c00000 x21: 0000000000000000
x22: 0040000000000600 x23: 000000007e000000
x24: 000000004002b360 x25: 000000007dfe0000
x26: 000000000008ae15 x27: 000000005f0f8de0
x28: 0000000000000000 x29: 000000007dff7e80

Call trace:
[<7dd96054>] (v8_flush_dcache_range+0x1c/0x34) from [<7dd95ce0>] (arch_remap_range+0x64/0xa8)
[<7dd95ce0>] (arch_remap_range+0x64/0xa8) from [<7dd95e04>] (__mmu_init+0xe0/0x10c)
[<7dd95e04>] (__mmu_init+0xe0/0x10c) from [<7dd94bc0>] (mmu_init+0x7c/0xa4)

The alternative would be flushing everything by set/way (perhaps too costly),
figure out why flushing causes an abort or start doing accounting which ranges
are mapped how, so we don't need to iterate over all pages.

Given that existing users don't mind a remap deleting data:

  - either they manually flush beforehand like dma_alloc_coherent
  - or they always write before read (memtest, socfpga fncpy)
  - or they are legacy non-DT enabled framebuffer driver that don't zero-initialize
    their framebuffer anyway
  - They aren't writable (HAB ROM)

I'd suggest we take the patch as-is because it fixes a bug and upstream
users are not affected by the side-effect.

Cheers,
Ahmad
  

> +
>  	return 0;
>  }
>  

-- 
Pengutronix e.K.                           |                             |
Steuerwalder Str. 21                       | http://www.pengutronix.de/  |
31137 Hildesheim, Germany                  | Phone: +49-5121-206917-0    |
Amtsgericht Hildesheim, HRA 2686           | Fax:   +49-5121-206917-5555 |




^ permalink raw reply	[flat|nested] 6+ messages in thread

* Re: [PATCH master 1/4] ARM: mmu64: request TTB region
  2023-05-26  6:33 [PATCH master 1/4] ARM: mmu64: request TTB region Ahmad Fatoum
                   ` (2 preceding siblings ...)
  2023-05-26  6:33 ` [PATCH master 4/4] ARM: mmu: invalidate when mapping range uncached Ahmad Fatoum
@ 2023-05-30 10:26 ` Sascha Hauer
  3 siblings, 0 replies; 6+ messages in thread
From: Sascha Hauer @ 2023-05-30 10:26 UTC (permalink / raw)
  To: Ahmad Fatoum; +Cc: barebox

On Fri, May 26, 2023 at 08:33:51AM +0200, Ahmad Fatoum wrote:
> ARM64 MMU code used to disable early MMU, reallocate TTB from malloc
> area and then reenable it. This has recently been changed, so MMU is
> left enabled like on ARM32, but unlike ARM32, the SDRAM region used in
> PBL is not requested in barebox proper. Do that now.
> 
> Fixes: b53744ffe333 ("ARM: mmu64: Use two level pagetables in early code")
> Signed-off-by: Ahmad Fatoum <a.fatoum@pengutronix.de>
> ---
>  arch/arm/cpu/mmu_64.c | 12 ++++++++++++
>  1 file changed, 12 insertions(+)

Applied, thanks

Sascha

> 
> diff --git a/arch/arm/cpu/mmu_64.c b/arch/arm/cpu/mmu_64.c
> index cdc482542202..1d5a5355c6be 100644
> --- a/arch/arm/cpu/mmu_64.c
> +++ b/arch/arm/cpu/mmu_64.c
> @@ -192,8 +192,20 @@ static void mmu_enable(void)
>   */
>  void __mmu_init(bool mmu_on)
>  {
> +	uint64_t *ttb = get_ttb();
>  	struct memory_bank *bank;
>  
> +	if (!request_sdram_region("ttb", (unsigned long)ttb,
> +				  ARM_EARLY_PAGETABLE_SIZE))
> +		/*
> +		 * This can mean that:
> +		 * - the early MMU code has put the ttb into a place
> +		 *   which we don't have inside our available memory
> +		 * - Somebody else has occupied the ttb region which means
> +		 *   the ttb will get corrupted.
> +		 */
> +		pr_crit("Can't request SDRAM region for ttb at %p\n", ttb);
> +
>  	for_each_memory_bank(bank) {
>  		struct resource *rsv;
>  		resource_size_t pos;
> -- 
> 2.39.2
> 
> 
> 

-- 
Pengutronix e.K.                           |                             |
Steuerwalder Str. 21                       | http://www.pengutronix.de/  |
31137 Hildesheim, Germany                  | Phone: +49-5121-206917-0    |
Amtsgericht Hildesheim, HRA 2686           | Fax:   +49-5121-206917-5555 |



^ permalink raw reply	[flat|nested] 6+ messages in thread

end of thread, other threads:[~2023-05-30 10:27 UTC | newest]

Thread overview: 6+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2023-05-26  6:33 [PATCH master 1/4] ARM: mmu64: request TTB region Ahmad Fatoum
2023-05-26  6:33 ` [PATCH master 2/4] ARM: mmu64: define early_remap_range for mmu_early_enable usage Ahmad Fatoum
2023-05-26  6:33 ` [PATCH master 3/4] ARM: mmu32: " Ahmad Fatoum
2023-05-26  6:33 ` [PATCH master 4/4] ARM: mmu: invalidate when mapping range uncached Ahmad Fatoum
2023-05-26 12:49   ` Ahmad Fatoum
2023-05-30 10:26 ` [PATCH master 1/4] ARM: mmu64: request TTB region Sascha Hauer

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox