mirror of
https://gitlab.rtems.org/rtems/rtos/rtems.git
synced 2025-12-17 05:15:24 +08:00
bsps/arm: Allow parallel start of processors
Do not wait in the individual _CPU_SMP_Start_processor() for the secondary processor. Wait for all of them in _CPU_SMP_Finalize_initialization() before the L2 cache is enabled.
This commit is contained in:
@@ -31,9 +31,10 @@
|
||||
* POSSIBILITY OF SUCH DAMAGE.
|
||||
*/
|
||||
|
||||
#include <rtems/score/smpimpl.h>
|
||||
#include <rtems/score/cpu.h>
|
||||
|
||||
#include <bsp/start.h>
|
||||
#include <rtems/score/assert.h>
|
||||
|
||||
#include <bsp/socal/alt_rstmgr.h>
|
||||
#include <bsp/socal/alt_sysmgr.h>
|
||||
@@ -42,27 +43,17 @@
|
||||
|
||||
bool _CPU_SMP_Start_processor(uint32_t cpu_index)
|
||||
{
|
||||
bool started;
|
||||
_Assert(cpu_index == 1);
|
||||
|
||||
if (cpu_index == 1) {
|
||||
alt_write_word(
|
||||
ALT_SYSMGR_ROMCODE_ADDR + ALT_SYSMGR_ROMCODE_CPU1STARTADDR_OFST,
|
||||
ALT_SYSMGR_ROMCODE_CPU1STARTADDR_VALUE_SET((uint32_t) _start)
|
||||
);
|
||||
alt_write_word(
|
||||
ALT_SYSMGR_ROMCODE_ADDR + ALT_SYSMGR_ROMCODE_CPU1STARTADDR_OFST,
|
||||
ALT_SYSMGR_ROMCODE_CPU1STARTADDR_VALUE_SET((uint32_t) _start)
|
||||
);
|
||||
|
||||
alt_clrbits_word(
|
||||
ALT_RSTMGR_MPUMODRST_ADDR,
|
||||
ALT_RSTMGR_MPUMODRST_CPU1_SET_MSK
|
||||
);
|
||||
alt_clrbits_word(
|
||||
ALT_RSTMGR_MPUMODRST_ADDR,
|
||||
ALT_RSTMGR_MPUMODRST_CPU1_SET_MSK
|
||||
);
|
||||
|
||||
/*
|
||||
* Wait for secondary processor to complete its basic initialization so
|
||||
* that we can enable the unified L2 cache.
|
||||
*/
|
||||
started = _Per_CPU_State_wait_for_non_initial_state(cpu_index, 0);
|
||||
} else {
|
||||
started = false;
|
||||
}
|
||||
|
||||
return started;
|
||||
return true;
|
||||
}
|
||||
|
||||
@@ -60,7 +60,11 @@ void _CPU_SMP_Finalize_initialization(uint32_t cpu_count)
|
||||
_Assert_Unused_variable_equals(sc, RTEMS_SUCCESSFUL);
|
||||
|
||||
#if defined(BSP_DATA_CACHE_ENABLED) || defined(BSP_INSTRUCTION_CACHE_ENABLED)
|
||||
/* Enable unified L2 cache */
|
||||
/*
|
||||
* When all secondary processors are ready to start multitasking, enable the
|
||||
* unified L2 cache.
|
||||
*/
|
||||
_SMP_Wait_for_ready_to_start_multitasking();
|
||||
rtems_cache_enable_data();
|
||||
#endif
|
||||
}
|
||||
|
||||
@@ -25,28 +25,26 @@
|
||||
* POSSIBILITY OF SUCH DAMAGE.
|
||||
*/
|
||||
|
||||
#include <rtems/score/smpimpl.h>
|
||||
#include <rtems/score/cpu.h>
|
||||
|
||||
#include <bsp/start.h>
|
||||
#include <rtems/score/assert.h>
|
||||
|
||||
bool _CPU_SMP_Start_processor(uint32_t cpu_index)
|
||||
{
|
||||
volatile uint32_t* const kick_address = (uint32_t*) 0xfffffff0UL;
|
||||
|
||||
_Assert(cpu_index == 1);
|
||||
|
||||
/*
|
||||
* Enable the second CPU.
|
||||
*/
|
||||
if (cpu_index != 0) {
|
||||
volatile uint32_t* const kick_address = (uint32_t*) 0xfffffff0UL;
|
||||
_ARM_Data_synchronization_barrier();
|
||||
_ARM_Instruction_synchronization_barrier();
|
||||
*kick_address = (uint32_t) _start;
|
||||
_ARM_Data_synchronization_barrier();
|
||||
_ARM_Instruction_synchronization_barrier();
|
||||
_ARM_Send_event();
|
||||
}
|
||||
_ARM_Data_synchronization_barrier();
|
||||
_ARM_Instruction_synchronization_barrier();
|
||||
*kick_address = (uint32_t) _start;
|
||||
_ARM_Data_synchronization_barrier();
|
||||
_ARM_Instruction_synchronization_barrier();
|
||||
_ARM_Send_event();
|
||||
|
||||
/*
|
||||
* Wait for secondary processor to complete its basic initialization so that
|
||||
* we can enable the unified L2 cache.
|
||||
*/
|
||||
return _Per_CPU_State_wait_for_non_initial_state(cpu_index, 0);
|
||||
return true;
|
||||
}
|
||||
|
||||
@@ -30,28 +30,26 @@
|
||||
* POSSIBILITY OF SUCH DAMAGE.
|
||||
*/
|
||||
|
||||
#include <rtems/score/smpimpl.h>
|
||||
#include <rtems/score/cpu.h>
|
||||
|
||||
#include <bsp/start.h>
|
||||
#include <rtems/score/assert.h>
|
||||
|
||||
bool _CPU_SMP_Start_processor(uint32_t cpu_index)
|
||||
{
|
||||
volatile uint32_t* const kick_address = (uint32_t*) 0xfffffff0UL;
|
||||
|
||||
_Assert(cpu_index == 1);
|
||||
|
||||
/*
|
||||
* Enable the second CPU.
|
||||
*/
|
||||
if (cpu_index != 0) {
|
||||
volatile uint32_t* const kick_address = (uint32_t*) 0xfffffff0UL;
|
||||
_ARM_Data_synchronization_barrier();
|
||||
_ARM_Instruction_synchronization_barrier();
|
||||
*kick_address = (uint32_t) _start;
|
||||
_ARM_Data_synchronization_barrier();
|
||||
_ARM_Instruction_synchronization_barrier();
|
||||
_ARM_Send_event();
|
||||
}
|
||||
_ARM_Data_synchronization_barrier();
|
||||
_ARM_Instruction_synchronization_barrier();
|
||||
*kick_address = (uint32_t) _start;
|
||||
_ARM_Data_synchronization_barrier();
|
||||
_ARM_Instruction_synchronization_barrier();
|
||||
_ARM_Send_event();
|
||||
|
||||
/*
|
||||
* Wait for secondary processor to complete its basic initialization so that
|
||||
* we can enable the unified L2 cache.
|
||||
*/
|
||||
return _Per_CPU_State_wait_for_non_initial_state(cpu_index, 0);
|
||||
return true;
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user