Files
rt-thread/libcpu/aarch64/common/setup.c
Bernard Xiong 743b614875
Some checks failed
RT-Thread BSP Static Build Check / 🔍 Summary of Git Diff Changes (push) Has been cancelled
RT-Thread BSP Static Build Check / ${{ matrix.legs.RTT_BSP }} (push) Has been cancelled
RT-Thread BSP Static Build Check / collect-artifacts (push) Has been cancelled
doc_doxygen / doxygen_doc generate (push) Has been cancelled
doc_doxygen / deploy (push) Has been cancelled
pkgs_test / change (push) Has been cancelled
utest_auto_run / A9 :components/dfs.cfg (push) Has been cancelled
utest_auto_run / A9 :components/lwip.cfg (push) Has been cancelled
utest_auto_run / A9 :components/netdev.cfg (push) Has been cancelled
utest_auto_run / A9 :components/sal.cfg (push) Has been cancelled
utest_auto_run / A9 :cpp11/cpp11.cfg (push) Has been cancelled
utest_auto_run / AARCH64-rtsmart :default.cfg (push) Has been cancelled
utest_auto_run / A9-rtsmart :default.cfg (push) Has been cancelled
utest_auto_run / RISCV-rtsmart :default.cfg (push) Has been cancelled
utest_auto_run / XUANTIE-rtsmart :default.cfg (push) Has been cancelled
utest_auto_run / AARCH64 :default.cfg (push) Has been cancelled
utest_auto_run / AARCH64-smp :default.cfg (push) Has been cancelled
utest_auto_run / A9 :default.cfg (push) Has been cancelled
utest_auto_run / A9-smp :default.cfg (push) Has been cancelled
utest_auto_run / RISCV :default.cfg (push) Has been cancelled
utest_auto_run / RISCV-smp :default.cfg (push) Has been cancelled
utest_auto_run / A9 :kernel/atomic_c11.cfg (push) Has been cancelled
utest_auto_run / RISCV :kernel/atomic_c11.cfg (push) Has been cancelled
utest_auto_run / A9 :kernel/ipc.cfg (push) Has been cancelled
utest_auto_run / A9 :kernel/kernel_basic.cfg (push) Has been cancelled
utest_auto_run / A9 :kernel/mem.cfg (push) Has been cancelled
ToolsCI / Tools (push) Has been cancelled
Weekly CI Scheduler / Trigger and Monitor CIs (push) Has been cancelled
Weekly CI Scheduler / Create Discussion Report (push) Has been cancelled
[components][clock_time] Refactor time subsystem around clock_time (#11111)
* [components][clock_time] Refactor time subsystem around clock_time

Introduce the clock_time core with clock source/event separation, high-resolution scheduling, and boot-time helpers, plus clock_timer adapters for timer peripherals.

Remove legacy ktime/cputime/hwtimer implementations and migrate arch and BSP time paths to the new subsystem while keeping POSIX time integration functional.

Update drivers, Kconfig/SConscript wiring, documentation, and tests; add clock_time overview docs and align naming to clock_boottime/clock_hrtimer/clock_timer.

* [components][clock_time] Use BSP-provided clock timer frequency on riscv64

* [risc-v] Use runtime clock timer frequency for tick and delays

* [bsp] Add clock timer frequency hooks for riscv64 boards

* [bsp] Update Renesas RA driver doc clock_timer link

* [bsp] Sync zynqmp-r5-axu4ev rtconfig after config refresh

* [bsp][rk3500] Update rk3500 clock configuration

* [bsp][hpmicro] Add rt_hw_us_delay hook and update board delays

* [bsp][stm32l496-st-nucleo] enable clock_time for hwtimer sample in ci

* [bsp][hpmicro] Fix rtconfig include scope for hpm6750evk

Move rtconfig.h include outside the ENET_MULTIPLE_PORT guard for hpm6750evk and hpm6750evk2 so configuration macros are available regardless of ENET settings.

* [bsp][raspi3] select clock time for systimer

* [bsp][hpm5300evk] Trim trailing blank line

* [bsp][hpm5301evklite] Trim trailing blank line

* [bsp][hpm5e00evk] Trim trailing blank line

* [bsp][hpm6200evk] Trim trailing blank line

* [bsp][hpm6300evk] Trim trailing blank line

* [bsp][hpm6750evk] Trim trailing blank line

* [bsp][hpm6750evk2] Trim trailing blank line

* [bsp][hpm6750evkmini] Trim trailing blank line

* [bsp][hpm6800evk] Trim trailing blank line

* [bsp][hpm6e00evk] Trim trailing blank line

* [bsp][nxp] switch lpc178x to gcc and remove mcx timer source

* [bsp][stm32] fix the CONFIG_RT_USING_CLOCK_TIME issue.

* [docs][clock_time] add clock time documentation

* [docs][clock_time] Update clock time subsystem documentation

- Update device driver index to use correct page reference
- Clarify upper layer responsibilities in architecture overview
- Update README to describe POSIX/libc, Soft RTC, and device driver usage
- Refine architecture diagram with improved layout and color scheme
- Remove obsolete clock_timer.md file

* [kernel][utest] Trim trailing space

* [clock_time] Fix hrtimer wrap handling

* [clock_time] fix the static rt_inline issue

* [clock_time] fix the rt_clock_hrtimer_control result issue
2026-01-31 17:44:27 +08:00

514 lines
12 KiB
C

/*
* Copyright (c) 2006-2023, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2023-02-21 GuEe-GUI first version
*/
#include <rtthread.h>
#define DBG_TAG "cpu.aa64"
#define DBG_LVL DBG_INFO
#include <rtdbg.h>
#include <smp_call.h>
#include <cpu.h>
#include <mmu.h>
#include <cpuport.h>
#include <interrupt.h>
#include <gtimer.h>
#include <setup.h>
#include <stdlib.h>
#include <ioremap.h>
#include <rtdevice.h>
#include <gic.h>
#include <gicv3.h>
#include <mm_memblock.h>
#include <dt-bindings/size.h>
extern rt_ubase_t _start, _end;
extern void _secondary_cpu_entry(void);
extern void rt_hw_builtin_fdt();
extern size_t MMUTable[];
extern void *system_vectors;
static void *fdt_ptr = RT_NULL;
static rt_size_t fdt_size = 0;
#ifdef RT_USING_SMP
extern struct cpu_ops_t cpu_psci_ops;
extern struct cpu_ops_t cpu_spin_table_ops;
#else
extern int rt_hw_cpu_id(void);
#endif
rt_uint64_t rt_cpu_mpidr_table[] =
{
[RT_CPUS_NR] = 0,
};
static struct cpu_ops_t *cpu_ops[] =
{
#ifdef RT_USING_SMP
&cpu_psci_ops,
&cpu_spin_table_ops,
#endif
};
static struct rt_ofw_node *cpu_np[RT_CPUS_NR] = { };
void rt_hw_fdt_install_early(void *fdt)
{
#ifndef RT_USING_BUILTIN_FDT
if (fdt != RT_NULL && !fdt_check_header(fdt))
{
fdt_ptr = fdt;
fdt_size = fdt_totalsize(fdt);
}
#else
(void)fdt;
#endif
}
#ifdef RT_USING_CLOCK_TIME
static rt_ubase_t loops_per_tick[RT_CPUS_NR];
static rt_ubase_t cpu_get_cycles(void)
{
rt_ubase_t cycles;
rt_hw_sysreg_read(cntpct_el0, cycles);
return cycles;
}
static void cpu_loops_per_tick_init(void)
{
rt_ubase_t offset;
volatile rt_ubase_t freq, step, cycles_end1, cycles_end2;
volatile rt_uint32_t cycles_count1 = 0, cycles_count2 = 0;
rt_hw_sysreg_read(cntfrq_el0, freq);
step = freq / RT_TICK_PER_SECOND;
cycles_end1 = cpu_get_cycles() + step;
while (cpu_get_cycles() < cycles_end1)
{
__asm__ volatile ("nop");
__asm__ volatile ("add %0, %0, #1":"=r"(cycles_count1));
}
cycles_end2 = cpu_get_cycles() + step;
while (cpu_get_cycles() < cycles_end2)
{
__asm__ volatile ("add %0, %0, #1":"=r"(cycles_count2));
}
if ((rt_int32_t)(cycles_count2 - cycles_count1) > 0)
{
offset = cycles_count2 - cycles_count1;
}
else
{
/* Impossible, but prepared for any eventualities */
offset = cycles_count2 / 4;
}
loops_per_tick[rt_hw_cpu_id()] = offset;
}
static void cpu_us_delay(rt_uint32_t us)
{
volatile rt_base_t start = cpu_get_cycles(), cycles;
cycles = ((us * 0x10c7UL) * loops_per_tick[rt_hw_cpu_id()] * RT_TICK_PER_SECOND) >> 32;
while ((cpu_get_cycles() - start) < cycles)
{
rt_hw_cpu_relax();
}
}
#endif /* RT_USING_CLOCK_TIME */
rt_weak void rt_hw_idle_wfi(void)
{
__asm__ volatile ("wfi");
}
static void system_vectors_init(void)
{
rt_hw_set_current_vbar((rt_ubase_t)&system_vectors);
}
rt_inline void cpu_info_init(void)
{
int i = 0;
rt_uint64_t mpidr;
struct rt_ofw_node *np;
/* get boot cpu info */
rt_hw_sysreg_read(mpidr_el1, mpidr);
rt_ofw_foreach_cpu_node(np)
{
rt_uint64_t hwid = rt_ofw_get_cpu_hwid(np, 0);
if ((mpidr & MPIDR_AFFINITY_MASK) != hwid)
{
/* Only save affinity and res make smp boot can check */
hwid |= 1ULL << 31;
}
else
{
hwid = mpidr;
}
cpu_np[i] = np;
rt_cpu_mpidr_table[i] = hwid;
for (int idx = 0; idx < RT_ARRAY_SIZE(cpu_ops); ++idx)
{
struct cpu_ops_t *ops = cpu_ops[idx];
if (ops->cpu_init)
{
ops->cpu_init(i, np);
}
}
if (++i >= RT_CPUS_NR)
{
break;
}
}
rt_hw_cpu_dcache_ops(RT_HW_CACHE_FLUSH, rt_cpu_mpidr_table, sizeof(rt_cpu_mpidr_table));
#if defined(RT_USING_CLOCK_TIME) && defined(RT_USING_DM)
cpu_loops_per_tick_init();
if (!rt_clock_timer_us_delay)
{
rt_clock_timer_us_delay = &cpu_us_delay;
}
#endif /* RT_USING_CLOCK_TIME && RT_USING_DM */
}
rt_inline rt_size_t string_to_size(const char *string, const char *who)
{
char unit;
rt_size_t size;
const char *cp = string;
size = atoi(cp);
while (*cp >= '0' && *cp <= '9')
{
++cp;
}
unit = *cp & '_';
if (unit == 'M')
{
size *= SIZE_MB;
}
else if (unit == 'K')
{
size *= SIZE_KB;
}
else if (unit == 'G')
{
size *= SIZE_GB;
}
else
{
LOG_W("Unknown unit of '%c' in `%s`", unit, who);
}
return size;
}
void rt_hw_common_setup(void)
{
rt_uint64_t initrd_ranges[3];
rt_size_t kernel_start, kernel_end;
rt_size_t heap_start, heap_end;
rt_size_t init_page_start, init_page_end;
rt_size_t fdt_start, fdt_end;
rt_region_t init_page_region = { 0 };
rt_region_t platform_mem_region = { 0 };
static struct mem_desc platform_mem_desc;
const rt_ubase_t pv_off = PV_OFFSET;
system_vectors_init();
#ifdef RT_USING_SMART
rt_hw_mmu_map_init(&rt_kernel_space, (void*)0xffffffff00000000, 0x20000000, MMUTable, pv_off);
#else
rt_hw_mmu_map_init(&rt_kernel_space, (void*)0xffffd0000000, 0x20000000, MMUTable, 0);
#endif
kernel_start = RT_ALIGN_DOWN((rt_size_t)rt_kmem_v2p((void *)&_start) - 64, ARCH_PAGE_SIZE);
kernel_end = RT_ALIGN((rt_size_t)rt_kmem_v2p((void *)&_end), ARCH_PAGE_SIZE);
heap_start = kernel_end;
heap_end = RT_ALIGN(heap_start + ARCH_HEAP_SIZE, ARCH_PAGE_SIZE);
init_page_start = heap_end;
init_page_end = RT_ALIGN(init_page_start + ARCH_INIT_PAGE_SIZE, ARCH_PAGE_SIZE);
fdt_start = init_page_end;
fdt_end = RT_ALIGN(fdt_start + fdt_size, ARCH_PAGE_SIZE);
platform_mem_region.start = kernel_start;
#ifndef RT_USING_BUILTIN_FDT
platform_mem_region.end = fdt_end;
#else
platform_mem_region.end = init_page_end;
(void)fdt_start;
(void)fdt_end;
#endif
rt_memblock_reserve_memory("kernel", kernel_start, kernel_end, MEMBLOCK_NONE);
rt_memblock_reserve_memory("memheap", heap_start, heap_end, MEMBLOCK_NONE);
rt_memblock_reserve_memory("init-page", init_page_start, init_page_end, MEMBLOCK_NONE);
#ifndef RT_USING_BUILTIN_FDT
rt_memblock_reserve_memory("fdt", fdt_start, fdt_end, MEMBLOCK_NONE);
/* To virtual address */
fdt_ptr = (void *)(fdt_ptr - pv_off);
#ifdef KERNEL_VADDR_START
if ((rt_ubase_t)fdt_ptr + fdt_size - KERNEL_VADDR_START > ARCH_EARLY_MAP_SIZE)
{
fdt_ptr = rt_ioremap_early(fdt_ptr + pv_off, fdt_size);
RT_ASSERT(fdt_ptr != RT_NULL);
}
#endif /* KERNEL_VADDR_START */
rt_memmove((void *)(fdt_start - pv_off), fdt_ptr, fdt_size);
fdt_ptr = (void *)fdt_start - pv_off;
#else
fdt_ptr = &rt_hw_builtin_fdt;
fdt_size = fdt_totalsize(fdt_ptr);
#endif /* RT_USING_BUILTIN_FDT */
rt_system_heap_init((void *)(heap_start - pv_off), (void *)(heap_end - pv_off));
init_page_region.start = init_page_start - pv_off;
init_page_region.end = init_page_end - pv_off;
rt_page_init(init_page_region);
/* create MMU mapping of kernel memory */
platform_mem_region.start = RT_ALIGN_DOWN(platform_mem_region.start, ARCH_PAGE_SIZE);
platform_mem_region.end = RT_ALIGN(platform_mem_region.end, ARCH_PAGE_SIZE);
platform_mem_desc.paddr_start = platform_mem_region.start;
platform_mem_desc.vaddr_start = platform_mem_region.start - pv_off;
platform_mem_desc.vaddr_end = platform_mem_region.end - pv_off - 1;
platform_mem_desc.attr = NORMAL_MEM;
rt_hw_mmu_setup(&rt_kernel_space, &platform_mem_desc, 1);
if (rt_fdt_prefetch(fdt_ptr))
{
/* Platform cannot be initialized */
RT_ASSERT(0);
}
rt_fdt_scan_chosen_stdout();
rt_fdt_scan_initrd(initrd_ranges);
rt_fdt_scan_memory();
#ifdef RT_USING_DMA
do {
const char *bootargs;
rt_ubase_t dma_pool_base;
rt_size_t cma_size = 0, coherent_pool_size = 0;
if (!rt_fdt_bootargs_select("cma=", 0, &bootargs))
{
cma_size = string_to_size(bootargs, "cma");
}
if (!rt_fdt_bootargs_select("coherent_pool=", 0, &bootargs))
{
coherent_pool_size = string_to_size(bootargs, "coherent-pool");
}
if (cma_size <= coherent_pool_size)
{
if (cma_size || coherent_pool_size)
{
LOG_W("DMA pool %s=%u > %s=%u",
"CMA", cma_size, "coherent-pool", coherent_pool_size);
}
cma_size = 8 * SIZE_MB;
coherent_pool_size = 2 * SIZE_MB;
}
dma_pool_base = platform_mem_region.end;
rt_memblock_reserve_memory("dma-pool",
dma_pool_base, dma_pool_base + cma_size + coherent_pool_size, MEMBLOCK_NONE);
if (rt_dma_pool_extract(cma_size, coherent_pool_size))
{
LOG_E("Alloc DMA pool %s=%u, %s=%u fail",
"CMA", cma_size, "coherent-pool", coherent_pool_size);
}
} while (0);
#endif /* RT_USING_DMA */
rt_memblock_setup_memory_environment();
rt_fdt_earlycon_kick(FDT_EARLYCON_KICK_UPDATE);
rt_fdt_unflatten();
cpu_info_init();
#ifdef RT_USING_PIC
rt_pic_init();
rt_pic_irq_init();
#else
/* initialize hardware interrupt */
rt_hw_interrupt_init();
/* initialize uart */
rt_hw_uart_init();
#endif
#ifndef RT_CLOCK_TIME_ARM_ARCH
/* initialize timer for os tick */
rt_hw_gtimer_init();
#endif /* !RT_CLOCK_TIME_ARM_ARCH */
#ifdef RT_USING_COMPONENTS_INIT
rt_components_board_init();
#endif
#if defined(RT_USING_CONSOLE) && defined(RT_USING_DEVICE)
rt_ofw_console_setup();
#endif
rt_thread_idle_sethook(rt_hw_idle_wfi);
#ifdef RT_USING_SMP
rt_smp_call_init();
/* Install the IPI handle */
rt_hw_ipi_handler_install(RT_SCHEDULE_IPI, rt_scheduler_ipi_handler);
rt_hw_ipi_handler_install(RT_STOP_IPI, rt_scheduler_ipi_handler);
rt_hw_ipi_handler_install(RT_SMP_CALL_IPI, rt_smp_call_ipi_handler);
rt_hw_interrupt_umask(RT_SCHEDULE_IPI);
rt_hw_interrupt_umask(RT_STOP_IPI);
rt_hw_interrupt_umask(RT_SMP_CALL_IPI);
#endif
}
#ifdef RT_USING_SMP
rt_weak void rt_hw_secondary_cpu_up(void)
{
int cpu_id = rt_hw_cpu_id();
rt_uint64_t entry = (rt_uint64_t)rt_kmem_v2p(_secondary_cpu_entry);
if (!entry)
{
LOG_E("Failed to translate '_secondary_cpu_entry' to physical address");
RT_ASSERT(0);
}
/* Maybe we are no in the first cpu */
for (int i = 0; i < RT_ARRAY_SIZE(cpu_np); ++i)
{
int err;
const char *enable_method;
if (!cpu_np[i] || i == cpu_id)
{
continue;
}
err = rt_ofw_prop_read_string(cpu_np[i], "enable-method", &enable_method);
for (int idx = 0; !err && idx < RT_ARRAY_SIZE(cpu_ops); ++idx)
{
struct cpu_ops_t *ops = cpu_ops[idx];
if (ops->method && !rt_strcmp(ops->method, enable_method) && ops->cpu_boot)
{
err = ops->cpu_boot(i, entry);
break;
}
}
if (err)
{
LOG_W("Call cpu %d on %s", i, "failed");
}
}
}
rt_weak void rt_hw_secondary_cpu_bsp_start(void)
{
int cpu_id = rt_hw_cpu_id();
system_vectors_init();
rt_hw_spin_lock(&_cpus_lock);
/* Save all mpidr */
rt_hw_sysreg_read(mpidr_el1, rt_cpu_mpidr_table[cpu_id]);
rt_hw_mmu_ktbl_set((unsigned long)MMUTable);
#ifdef RT_USING_PIC
rt_pic_irq_init();
#else
/* initialize vector table */
rt_hw_vector_init();
arm_gic_cpu_init(0, 0);
#ifdef BSP_USING_GICV3
arm_gic_redist_init(0, 0);
#endif /* BSP_USING_GICV3 */
#endif
#ifndef RT_CLOCK_TIME_ARM_ARCH
/* initialize timer for os tick */
rt_hw_gtimer_local_enable();
#endif /* !RT_CLOCK_TIME_ARM_ARCH */
rt_dm_secondary_cpu_init();
rt_hw_interrupt_umask(RT_SCHEDULE_IPI);
rt_hw_interrupt_umask(RT_STOP_IPI);
rt_hw_interrupt_umask(RT_SMP_CALL_IPI);
LOG_I("Call cpu %d on %s", cpu_id, "success");
#if defined(RT_USING_CLOCK_TIME) && defined(RT_USING_DM)
if (rt_clock_timer_us_delay == &cpu_us_delay)
{
cpu_loops_per_tick_init();
}
#endif
rt_system_scheduler_start();
}
rt_weak void rt_hw_secondary_cpu_idle_exec(void)
{
rt_hw_wfe();
}
#endif
void rt_hw_console_output(const char *str)
{
rt_fdt_earlycon_output(str);
}