Skip to content

Commit 6c6f1a5

Browse files
Nicolas Pitrecfriedt
authored andcommitted
arch: arm64: mmu: revert useless cache handling
This reverts the following commits: commit c9b534c ("arch: arm64: mmu: avoid using of set/way cache instructions") commit c4ffadb ("arch: arm64: avoid invalidating of RO mem after mem map") The reason for the former is about Xen not virtualizing set/way cache operations used by sys_cache_data_invd_all() originally used prior to enabling the MMU and data cache. But the cure is worse than the Xen issue as: - Cache invalidation is performed on _every_ mapping change. - Those invalidations are completely unnecessary with a PIPT data cache. ARM64 implementations use Physically Indexed, Physically Tagged (PIPT) data caches where cache maintenance is not needed during MMU operations. - arch_mem_map() invoked with K_MEM_MAP_UNPAGED triggers page faults when accessing the unmapped region for cache operations. The page fault handler in do_page_fault() tries to reacquire z_mm_lock which is already held by the caller of arch_mem_map(). This results in a deadlock. And the latter commit disables cache operations for read-only mappings, effectively rendering the workaround described in the first commit inoperative on half the mappings, making the performance cost of the first commit's approach unjustifiable since it doesn't actually solve the problem it set out to fix. Given the above, the actual "fix" should simply have been the removal of the sys_cache_data_invd_all() as, in theory, it isn't strictly needed and its replacement is already ineffective on read-only areas as mentioned. So let's revert them, which fixes the deadlock-induced CI test failures on ARM FVP SMP configurations that were triggered when demand paging or memory mapping operations were involved. Signed-off-by: Nicolas Pitre <[email protected]>
1 parent 5b43674 commit 6c6f1a5

File tree

1 file changed

+0
-42
lines changed

1 file changed

+0
-42
lines changed

arch/arm64/core/mmu.c

Lines changed: 0 additions & 42 deletions
Original file line numberDiff line numberDiff line change
@@ -870,21 +870,6 @@ static inline void add_arm_mmu_region(struct arm_mmu_ptables *ptables,
870870
}
871871
}
872872

873-
static inline void inv_dcache_after_map_helper(void *virt, size_t size, uint32_t attrs)
874-
{
875-
/*
876-
* DC IVAC instruction requires write access permission to the VA,
877-
* otherwise it can generate a permission fault
878-
*/
879-
if ((attrs & MT_RW) != MT_RW) {
880-
return;
881-
}
882-
883-
if (MT_TYPE(attrs) == MT_NORMAL || MT_TYPE(attrs) == MT_NORMAL_WT) {
884-
sys_cache_data_invd_range(virt, size);
885-
}
886-
}
887-
888873
static void setup_page_tables(struct arm_mmu_ptables *ptables)
889874
{
890875
unsigned int index;
@@ -924,20 +909,6 @@ static void setup_page_tables(struct arm_mmu_ptables *ptables)
924909
}
925910

926911
invalidate_tlb_all();
927-
928-
for (index = 0U; index < ARRAY_SIZE(mmu_zephyr_ranges); index++) {
929-
size_t size;
930-
931-
range = &mmu_zephyr_ranges[index];
932-
size = POINTER_TO_UINT(range->end) - POINTER_TO_UINT(range->start);
933-
inv_dcache_after_map_helper(range->start, size, range->attrs);
934-
}
935-
936-
for (index = 0U; index < mmu_config.num_regions; index++) {
937-
region = &mmu_config.mmu_regions[index];
938-
inv_dcache_after_map_helper(UINT_TO_POINTER(region->base_va), region->size,
939-
region->attrs);
940-
}
941912
}
942913

943914
/* Translation table control register settings */
@@ -1132,20 +1103,8 @@ void arch_mem_map(void *virt, uintptr_t phys, size_t size, uint32_t flags)
11321103
LOG_ERR("__arch_mem_map() returned %d", ret);
11331104
k_panic();
11341105
} else {
1135-
uint32_t mem_flags = flags & K_MEM_CACHE_MASK;
1136-
11371106
sync_domains((uintptr_t)virt, size, "mem_map");
11381107
invalidate_tlb_all();
1139-
1140-
switch (mem_flags) {
1141-
case K_MEM_CACHE_WB:
1142-
case K_MEM_CACHE_WT:
1143-
mem_flags = (mem_flags == K_MEM_CACHE_WB) ? MT_NORMAL : MT_NORMAL_WT;
1144-
mem_flags |= (flags & K_MEM_PERM_RW) ? MT_RW : 0;
1145-
inv_dcache_after_map_helper(virt, size, mem_flags);
1146-
default:
1147-
break;
1148-
}
11491108
}
11501109
}
11511110

@@ -1261,7 +1220,6 @@ static int private_map(struct arm_mmu_ptables *ptables, const char *name,
12611220
__ASSERT(ret == 0, "add_map() returned %d", ret);
12621221
invalidate_tlb_all();
12631222

1264-
inv_dcache_after_map_helper(UINT_TO_POINTER(virt), size, attrs);
12651223
return ret;
12661224
}
12671225

0 commit comments

Comments
 (0)