bsps/arm: do not disable MMU during translation table management operations.

Disabling MMU requires complex cache flushing and invalidation
operations. There is almost no way how to do that right
on SMP system without stopping all other CPUs. On the other hand,
there is documented sequence of operations which should be used
according to ARM manual and it guarantees even distribution
of maintenance operations to other cores for last generation
of Cortex-A cores with multiprocessor extension.

This change could require addition of appropriate entry
to arm_cp15_start_mmu_config_table for some BSPs to ensure
that MMU table stays accessible after MMU is enabled

  {
    .begin = (uint32_t) bsp_translation_table_base,
    .end = (uint32_t) bsp_translation_table_base + 0x4000,
    .flags = ARMV7_MMU_DATA_READ_WRITE_CACHED
  }

Updates #2782
Updates #2783
This commit is contained in:
Pavel Pisa
2016-07-17 18:21:48 +02:00
parent 0d77c4f281
commit ae3578a2c9
2 changed files with 59 additions and 9 deletions

View File

@@ -12,35 +12,69 @@
* http://www.rtems.org/license/LICENSE.
*/
#include <rtems.h>
#include <libcpu/arm-cp15.h>
/*
* Translation table modification requires to propagate
* information to memory and other cores.
*
* Algorithm follows example found in the section
*
* B3.10.1 General TLB maintenance requirements
* TLB maintenance operations and the memory order model
*
* of ARM Architecture Reference Manual
* ARMv7-A and ARMv7-R edition
* ARM DDI 0406C.b (ID072512)
*/
static uint32_t set_translation_table_entries(
const void *begin,
const void *end,
uint32_t section_flags
)
{
uint32_t cl_size = arm_cp15_get_min_cache_line_size();
uint32_t *ttb = arm_cp15_get_translation_table_base();
uint32_t i = ARM_MMU_SECT_GET_INDEX(begin);
uint32_t istart = ARM_MMU_SECT_GET_INDEX(begin);
uint32_t iend = ARM_MMU_SECT_GET_INDEX(ARM_MMU_SECT_MVA_ALIGN_UP(end));
uint32_t index_mask = (1U << (32 - ARM_MMU_SECT_BASE_SHIFT)) - 1U;
uint32_t ctrl;
uint32_t section_flags_of_first_entry;
uint32_t i;
void *first_ttb_addr;
void *last_ttb_end;
ctrl = arm_cp15_mmu_disable(cl_size);
arm_cp15_tlb_invalidate();
section_flags_of_first_entry = ttb [i];
ctrl = arm_cp15_get_control();
section_flags_of_first_entry = ttb [istart];
last_ttb_end = first_ttb_addr = ttb + istart;
while (i != iend) {
for ( i = istart; i != iend; i = (i + 1U) & index_mask ) {
uint32_t addr = i << ARM_MMU_SECT_BASE_SHIFT;
ttb [i] = addr | section_flags;
i = (i + 1U) & index_mask;
last_ttb_end = ttb + i + 1;
}
arm_cp15_set_control(ctrl);
if ( ctrl & (ARM_CP15_CTRL_C | ARM_CP15_CTRL_M ) ) {
rtems_cache_flush_multiple_data_lines(first_ttb_addr,
last_ttb_end - first_ttb_addr);
}
_ARM_Data_synchronization_barrier();
for ( i = istart; i != iend; i = (i + 1U) & index_mask ) {
void *mva = (void *) (i << ARM_MMU_SECT_BASE_SHIFT);
#if defined(__ARM_ARCH_7A__)
arm_cp15_tlb_invalidate_entry_all_asids(mva);
#else
arm_cp15_tlb_instruction_invalidate_entry(mva);
arm_cp15_tlb_data_invalidate_entry(mva);
#endif
}
_ARM_Data_synchronization_barrier();
_ARM_Instruction_synchronization_barrier();
return section_flags_of_first_entry;
}

View File

@@ -559,6 +559,22 @@ arm_cp15_tlb_invalidate_entry(const void *mva)
);
}
ARM_CP15_TEXT_SECTION static inline void
arm_cp15_tlb_invalidate_entry_all_asids(const void *mva)
{
ARM_SWITCH_REGISTERS;
mva = ARM_CP15_TLB_PREPARE_MVA(mva);
__asm__ volatile (
ARM_SWITCH_TO_ARM
"mcr p15, 0, %[mva], c8, c7, 3\n"
ARM_SWITCH_BACK
: ARM_SWITCH_OUTPUT
: [mva] "r" (mva)
);
}
ARM_CP15_TEXT_SECTION static inline void
arm_cp15_tlb_instruction_invalidate(void)
{