[smart/aarch64] code sync (#6750)

* [smart/aarch64] sync aarch64
This commit is contained in:
Shell
2022-12-20 17:49:37 +08:00
committed by GitHub
parent f0ef8ada33
commit e8504c7cf1
114 changed files with 6099 additions and 9092 deletions

View File

@@ -82,6 +82,7 @@ config ARCH_ARM_MMU
if RT_USING_SMART
config KERNEL_VADDR_START
hex "The virtural address of kernel start"
default 0xffff000000000000 if ARCH_ARMV8
default 0xc0000000 if ARCH_ARM
default 0x80000000 if ARCH_RISCV
depends on ARCH_MM_MMU
@@ -159,6 +160,7 @@ config RT_BACKTRACE_FUNCTION_NAME
config ARCH_ARMV8
bool
select ARCH_ARM
select ARCH_ARM_MMU
config ARCH_MIPS
bool

View File

@@ -1,67 +1,64 @@
/*
* Copyright (c) 2006-2021, RT-Thread Development Team
* Copyright (c) 2006-2020, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2011-09-15 Bernard first version
* 2021-12-28 GuEe-GUI add fpu support
*/
#ifndef __ARMV8_H__
#define __ARMV8_H__
#include <rtdef.h>
/* the exception stack without VFP registers */
struct rt_hw_exp_stack
{
unsigned long long pc;
unsigned long long spsr;
unsigned long long x30;
unsigned long long xzr;
unsigned long long fpcr;
unsigned long long fpsr;
unsigned long long x28;
unsigned long long x29;
unsigned long long x26;
unsigned long long x27;
unsigned long long x24;
unsigned long long x25;
unsigned long long x22;
unsigned long long x23;
unsigned long long x20;
unsigned long long x21;
unsigned long long x18;
unsigned long long x19;
unsigned long long x16;
unsigned long long x17;
unsigned long long x14;
unsigned long long x15;
unsigned long long x12;
unsigned long long x13;
unsigned long long x10;
unsigned long long x11;
unsigned long long x8;
unsigned long long x9;
unsigned long long x6;
unsigned long long x7;
unsigned long long x4;
unsigned long long x5;
unsigned long long x2;
unsigned long long x3;
unsigned long long x0;
unsigned long long x1;
unsigned long pc;
unsigned long cpsr;
unsigned long sp_el0;
unsigned long x30;
unsigned long fpcr;
unsigned long fpsr;
unsigned long x28;
unsigned long x29;
unsigned long x26;
unsigned long x27;
unsigned long x24;
unsigned long x25;
unsigned long x22;
unsigned long x23;
unsigned long x20;
unsigned long x21;
unsigned long x18;
unsigned long x19;
unsigned long x16;
unsigned long x17;
unsigned long x14;
unsigned long x15;
unsigned long x12;
unsigned long x13;
unsigned long x10;
unsigned long x11;
unsigned long x8;
unsigned long x9;
unsigned long x6;
unsigned long x7;
unsigned long x4;
unsigned long x5;
unsigned long x2;
unsigned long x3;
unsigned long x0;
unsigned long x1;
unsigned long long fpu[16];
};
#define SP_ELx ( ( unsigned long long ) 0x01 )
#define SP_EL0 ( ( unsigned long long ) 0x00 )
#define PSTATE_EL1 ( ( unsigned long long ) 0x04 )
#define PSTATE_EL2 ( ( unsigned long long ) 0x08 )
#define PSTATE_EL3 ( ( unsigned long long ) 0x0c )
#define SP_ELx ((unsigned long)0x01)
#define SP_EL0 ((unsigned long)0x00)
#define PSTATE_EL1 ((unsigned long)0x04)
#define PSTATE_EL2 ((unsigned long)0x08)
#define PSTATE_EL3 ((unsigned long)0x0c)
rt_ubase_t rt_hw_get_current_el(void);
void rt_hw_set_elx_env(void);

View File

@@ -0,0 +1,46 @@
/*
* Copyright (c) 2006-2021, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2021-05-18 Jesven the first version
*/
.macro SAVE_FPU, reg
STR Q0, [\reg, #-0x10]!
STR Q1, [\reg, #-0x10]!
STR Q2, [\reg, #-0x10]!
STR Q3, [\reg, #-0x10]!
STR Q4, [\reg, #-0x10]!
STR Q5, [\reg, #-0x10]!
STR Q6, [\reg, #-0x10]!
STR Q7, [\reg, #-0x10]!
STR Q8, [\reg, #-0x10]!
STR Q9, [\reg, #-0x10]!
STR Q10, [\reg, #-0x10]!
STR Q11, [\reg, #-0x10]!
STR Q12, [\reg, #-0x10]!
STR Q13, [\reg, #-0x10]!
STR Q14, [\reg, #-0x10]!
STR Q15, [\reg, #-0x10]!
.endm
.macro RESTORE_FPU, reg
LDR Q15, [\reg], #0x10
LDR Q14, [\reg], #0x10
LDR Q13, [\reg], #0x10
LDR Q12, [\reg], #0x10
LDR Q11, [\reg], #0x10
LDR Q10, [\reg], #0x10
LDR Q9, [\reg], #0x10
LDR Q8, [\reg], #0x10
LDR Q7, [\reg], #0x10
LDR Q6, [\reg], #0x10
LDR Q5, [\reg], #0x10
LDR Q4, [\reg], #0x10
LDR Q3, [\reg], #0x10
LDR Q2, [\reg], #0x10
LDR Q1, [\reg], #0x10
LDR Q0, [\reg], #0x10
.endm

View File

@@ -0,0 +1,57 @@
/*
* Copyright (c) 2006-2022, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2022-06-02 Jesven the first version
*/
#include <rtthread.h>
#include <backtrace.h>
#define BT_NESTING_MAX 100
static int unwind_frame(struct bt_frame *frame)
{
unsigned long fp = frame->fp;
if ((fp & 0x7)
#ifdef RT_USING_LWP
|| fp < KERNEL_VADDR_START
#endif
)
{
return 1;
}
frame->fp = *(unsigned long *)fp;
frame->pc = *(unsigned long *)(fp + 8);
return 0;
}
static void walk_unwind(unsigned long pc, unsigned long fp)
{
struct bt_frame frame;
unsigned long lr = pc;
int nesting = 0;
frame.fp = fp;
while (nesting < BT_NESTING_MAX)
{
rt_kprintf(" %p", (void *)lr);
if (unwind_frame(&frame))
{
break;
}
lr = frame.pc;
nesting++;
}
}
void backtrace(unsigned long pc, unsigned long lr, unsigned long fp)
{
rt_kprintf("please use: addr2line -e rtthread.elf -a -f %p", (void *)pc);
walk_unwind(lr, fp);
rt_kprintf("\n");
}

View File

@@ -0,0 +1,22 @@
/*
* Copyright (c) 2006-2022, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2022-06-02 Jesven the first version
*/
#ifndef __BACKTRACE_H__
#define __BACKTRACE_H__
struct bt_frame
{
unsigned long fp;
unsigned long pc;
};
void backtrace(unsigned long pc, unsigned long lr, unsigned long fp);
#endif /*__BACKTRACE_H__*/

View File

@@ -1,5 +1,5 @@
/*
* Copyright (c) 2006-2022, RT-Thread Development Team
* Copyright (c) 2006-2020, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
@@ -127,7 +127,8 @@ __asm_flush_dcache_range:
/* x2 <- minimal cache line size in cache system */
sub x3, x2, #1
bic x0, x0, x3
1: dc civac, x0 /* clean & invalidate data or unified cache */
1: dc civac, x0 /* clean & invalidate data or unified cache */
add x0, x0, x2
cmp x0, x1
b.lo 1b

View File

@@ -0,0 +1,23 @@
/*
* Copyright (c) 2006-2022, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2022-12-18 RT-Thread the first version
*/
#ifndef __CACHE_H__
#define __CACHE_H__
void rt_hw_dcache_flush_all(void);
void rt_hw_dcache_invalidate_all(void);
void rt_hw_dcache_flush_range(unsigned long start_addr, unsigned long size);
void rt_hw_cpu_dcache_clean(void *addr, int size);
void rt_hw_cpu_dcache_invalidate(unsigned long start_addr,unsigned long size);
void rt_hw_icache_invalidate_all();
void rt_hw_icache_invalidate_range(unsigned long start_addr, int size);
#endif /* __CACHE_H__ */

View File

@@ -1,59 +1,58 @@
/*
* Copyright (c) 2006-2021, RT-Thread Development Team
* Copyright (c) 2006-2018, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2021-12-28 GuEe-GUI the first version
* 2019-03-29 quanzhao the first version
*/
#include <rthw.h>
#include <rtdef.h>
void __asm_invalidate_icache_all(void);
void __asm_flush_dcache_all(void);
void __asm_invalidate_dcache_all(void);
void __asm_flush_dcache_range(unsigned long start, unsigned long end);
void __asm_invalidate_dcache_range(unsigned long start, unsigned long end);
void __asm_invalidate_icache_all(void);
void __asm_invalidate_icache_range(unsigned long start, unsigned long end);
void __asm_invalidate_dcache_all(void);
void __asm_invalidate_icache_all(void);
void rt_hw_dcache_flush_all(void)
rt_inline rt_uint32_t rt_cpu_icache_line_size(void)
{
__asm_flush_dcache_all();
return 0;
}
void rt_hw_dcache_invalidate_all(void)
rt_inline rt_uint32_t rt_cpu_dcache_line_size(void)
{
__asm_invalidate_dcache_all();
return 0;
}
void rt_hw_dcache_flush_range(unsigned long start_addr, unsigned long size)
void rt_hw_cpu_icache_invalidate(void *addr, int size)
{
__asm_flush_dcache_range(start_addr, start_addr + size);
__asm_invalidate_icache_range((unsigned long)addr, (unsigned long)addr + size);
}
void rt_hw_dcache_invalidate_range(unsigned long start_addr,unsigned long size)
void rt_hw_cpu_dcache_invalidate(void *addr, int size)
{
__asm_invalidate_dcache_range(start_addr, start_addr + size);
__asm_invalidate_dcache_range((unsigned long)addr, (unsigned long)addr + size);
}
void rt_hw_icache_invalidate_all()
void rt_hw_cpu_dcache_clean(void *addr, int size)
{
__asm_invalidate_icache_all();
__asm_flush_dcache_range((unsigned long)addr, (unsigned long)addr + size);
}
void rt_hw_icache_invalidate_range(unsigned long start_addr, int size)
void rt_hw_cpu_dcache_clean_and_invalidate(void *addr, int size)
{
__asm_invalidate_icache_range(start_addr, start_addr + size);
__asm_flush_dcache_range((unsigned long)addr, (unsigned long)addr + size);
}
void rt_hw_cpu_icache_ops(int ops, void *addr, int size)
{
if (ops == RT_HW_CACHE_INVALIDATE)
{
rt_hw_icache_invalidate_range((unsigned long)addr, size);
rt_hw_cpu_icache_invalidate(addr, size);
}
}
@@ -61,10 +60,43 @@ void rt_hw_cpu_dcache_ops(int ops, void *addr, int size)
{
if (ops == RT_HW_CACHE_FLUSH)
{
rt_hw_dcache_flush_range((unsigned long)addr, size);
rt_hw_cpu_dcache_clean(addr, size);
}
else if (ops == RT_HW_CACHE_INVALIDATE)
{
rt_hw_dcache_invalidate_range((unsigned long)addr, size);
rt_hw_cpu_dcache_invalidate(addr, size);
}
}
rt_base_t rt_hw_cpu_icache_status(void)
{
return 0;
}
rt_base_t rt_hw_cpu_dcache_status(void)
{
return 0;
}
#ifdef RT_USING_LWP
#define ICACHE (1<<0)
#define DCACHE (1<<1)
#define BCACHE (ICACHE|DCACHE)
int sys_cacheflush(void *addr, int size, int cache)
{
if ((size_t)addr < KERNEL_VADDR_START && (size_t)addr + size <= KERNEL_VADDR_START)
{
if ((cache & DCACHE) != 0)
{
rt_hw_cpu_dcache_clean_and_invalidate(addr, size);
}
if ((cache & ICACHE) != 0)
{
rt_hw_cpu_icache_invalidate(addr, size);
}
return 0;
}
return -1;
}
#endif

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,65 @@
/*
* Copyright (c) 2006-2020, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2011-09-15 Bernard first version
*/
#ifndef __CP15_H__
#define __CP15_H__
#ifndef __STATIC_FORCEINLINE
#define __STATIC_FORCEINLINE __attribute__((always_inline)) static inline
#endif
#define __WFI() __asm__ volatile ("wfi":::"memory")
#define __WFE() __asm__ volatile ("wfe":::"memory")
#define __SEV() __asm__ volatile ("sev")
__STATIC_FORCEINLINE void __ISB(void)
{
__asm__ volatile ("isb 0xF":::"memory");
}
/**
\brief Data Synchronization Barrier
\details Acts as a special kind of Data Memory Barrier.
It completes when all explicit memory accesses before this instruction complete.
*/
__STATIC_FORCEINLINE void __DSB(void)
{
__asm__ volatile ("dsb 0xF":::"memory");
}
/**
\brief Data Memory Barrier
\details Ensures the apparent order of the explicit memory operations before
and after the instruction, without ensuring their completion.
*/
__STATIC_FORCEINLINE void __DMB(void)
{
__asm__ volatile ("dmb 0xF":::"memory");
}
unsigned long rt_cpu_get_smp_id(void);
void rt_cpu_mmu_disable(void);
void rt_cpu_mmu_enable(void);
void rt_cpu_tlb_set(volatile unsigned long*);
void rt_cpu_dcache_clean_flush(void);
void rt_cpu_icache_flush(void);
void rt_cpu_vector_set_base(rt_ubase_t addr);
void rt_hw_mmu_init(void);
void rt_hw_vector_init(void);
void set_timer_counter(unsigned int counter);
void set_timer_control(unsigned int control);
#endif

View File

@@ -1,5 +1,5 @@
/*
* Copyright (c) 2006-2021, RT-Thread Development Team
* Copyright (c) 2006-2019, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
@@ -7,15 +7,43 @@
* Date Author Notes
* 2011-09-15 Bernard first version
* 2019-07-28 zdzn add smp support
* 2021-12-21 GuEe-GUI set tpidr_el1 as multiprocessor id instead of mpidr_el1
* 2021-12-28 GuEe-GUI add spinlock for aarch64
*/
#include <rthw.h>
#include <rtthread.h>
#include <cpuport.h>
#include <board.h>
#include "cp15.h"
#define DBG_TAG "libcpu.aarch64.cpu"
#define DBG_LVL DBG_INFO
#include <rtdbg.h>
#include <string.h>
#include "cpu.h"
#include "psci_api.h"
void (*system_off)(void);
#ifdef RT_USING_SMP
#ifdef RT_USING_FDT
#include "dtb_node.h"
struct dtb_node *_cpu_node[RT_CPUS_NR];
#endif /* RT_USING_FDT */
#define MPIDR_AFF_MASK 0x000000FF00FFFFFFul
#define REPORT_ERR(retval) LOG_E("got error code %d in %s(), %s:%d", (retval), __func__, __FILE__, __LINE__)
#define CHECK_RETVAL(retval) if (retval) {REPORT_ERR(retval);}
/**
* cpu_ops_tbl contains cpu_ops_t for each cpu kernel observed,
* given cpu logical id 'i', its cpu_ops_t is 'cpu_ops_tbl[i]'
*/
struct cpu_ops_t *cpu_ops_tbl[RT_CPUS_NR];
#ifdef RT_USING_SMART
// _id_to_mpidr is a table translate logical id to mpid, which is a 64-bit value
rt_uint64_t rt_cpu_mpidr_early[RT_CPUS_NR] rt_weak = {[0 ... RT_CPUS_NR - 1] = ID_ERROR};
#else
/* The more common mpidr_el1 table, redefine it in BSP if it is in other cases */
rt_weak rt_uint64_t rt_cpu_mpidr_early[] =
{
@@ -29,65 +57,290 @@ rt_weak rt_uint64_t rt_cpu_mpidr_early[] =
[7] = 0x80000007,
[RT_CPUS_NR] = 0
};
#endif
#endif /* RT_USING_SMART */
int rt_hw_cpu_id(void)
{
rt_base_t value;
__asm__ volatile ("mrs %0, tpidr_el1":"=r"(value));
return value;
}
#ifdef RT_USING_SMP
void rt_hw_spin_lock_init(rt_hw_spinlock_t *lock)
{
lock->slock = 0;
}
#define TICKET_SHIFT 16
void rt_hw_spin_lock(rt_hw_spinlock_t *lock)
{
rt_hw_spinlock_t lock_val, new_lockval;
unsigned int tmp;
struct __arch_tickets lockval, newval;
__asm__ volatile (
/* Increment the next ticket. */
asm volatile(
/* Atomically increment the next ticket. */
" prfm pstl1strm, %3\n"
"1: ldaxr %w0, %3\n"
" add %w1, %w0, %w5\n"
" stxr %w2, %w1, %3\n"
" cbnz %w2, 1b\n"
/* Check wether we get the lock */
" eor %w1, %w0, %w0, ror #16\n"
" cbz %w1, 3f\n"
/* Did we get the lock? */
" eor %w1, %w0, %w0, ror #16\n"
" cbz %w1, 3f\n"
/*
* Didn't get lock and spin on the owner.
* Should send a local event to avoid missing an
* No: spin on the owner. Send a local event to avoid missing an
* unlock before the exclusive load.
*/
" sevl\n"
"2: wfe\n"
" ldaxrh %w2, %4\n"
" eor %w1, %w2, %w0, lsr #16\n"
" eor %w1, %w2, %w0, lsr #16\n"
" cbnz %w1, 2b\n"
/* got the lock. */
/* We got the lock. Critical section starts here. */
"3:"
: "=&r" (lock_val), "=&r" (new_lockval), "=&r" (tmp), "+Q" (*lock)
: "Q" (lock->tickets.owner), "I" (1 << 16)
: "=&r"(lockval), "=&r"(newval), "=&r"(tmp), "+Q"(*lock)
: "Q"(lock->tickets.owner), "I"(1 << TICKET_SHIFT)
: "memory");
__DMB();
rt_hw_dmb();
}
void rt_hw_spin_unlock(rt_hw_spinlock_t *lock)
{
__DMB();
__asm__ volatile (
"stlrh %w1, %0\n"
: "=Q" (lock->tickets.owner)
: "r" (lock->tickets.owner + 1)
rt_hw_dmb();
asm volatile(
" stlrh %w1, %0\n"
: "=Q"(lock->tickets.owner)
: "r"(lock->tickets.owner + 1)
: "memory");
}
static int _cpus_init_data_hardcoded(int num_cpus, rt_uint64_t *cpu_hw_ids, struct cpu_ops_t *cpu_ops[])
{
// load in cpu_hw_ids in cpuid_to_hwid,
// cpu_ops to cpu_ops_tbl
if (num_cpus > RT_CPUS_NR)
{
LOG_W("num_cpus (%d) greater than RT_CPUS_NR (%d)\n", num_cpus, RT_CPUS_NR);
num_cpus = RT_CPUS_NR;
}
for (int i = 0; i < num_cpus; i++)
{
set_hwid(i, cpu_hw_ids[i]);
cpu_ops_tbl[i] = cpu_ops[i];
}
return 0;
}
#ifdef RT_USING_FDT
/** read ('size' * 4) bytes number from start, big-endian format */
static rt_uint64_t _read_be_number(void *start, int size)
{
rt_uint64_t buf = 0;
for (; size > 0; size--)
buf = (buf << 32) | fdt32_to_cpu(*(uint32_t *)start++);
return buf;
}
/** check device-type of the node, */
static bool _node_is_cpu(struct dtb_node *node)
{
char *device_type = dtb_node_get_dtb_node_property_value(node, "device_type", NULL);
if (device_type)
{
return !strcmp(device_type, "cpu");
}
return false;
}
static int _read_and_set_hwid(struct dtb_node *cpu, int *id_pool, int *pcpuid)
{
// size/address_cells is number of elements in reg array
int size;
static int address_cells, size_cells;
if (!address_cells && !size_cells)
dtb_node_get_dtb_node_cells(cpu, &address_cells, &size_cells);
void *id_start = dtb_node_get_dtb_node_property_value(cpu, "reg", &size);
rt_uint64_t mpid = _read_be_number(id_start, address_cells);
*pcpuid = *id_pool;
*id_pool = *id_pool + 1;
set_hwid(*pcpuid, mpid);
LOG_I("Using MPID 0x%lx as cpu %d", mpid, *pcpuid);
// setting _cpu_node for cpu_init use
_cpu_node[*pcpuid] = cpu;
return 0;
}
static int _read_and_set_cpuops(struct dtb_node *cpu, int cpuid)
{
char *method = dtb_node_get_dtb_node_property_value(cpu, "enable-method", NULL);
if (!method)
{
LOG_E("Cannot read method from cpu node");
return -1;
}
struct cpu_ops_t *cpu_ops;
if (!strcmp(method, cpu_ops_psci.method))
{
cpu_ops = &cpu_ops_psci;
}
else if (!strcmp(method, cpu_ops_spin_tbl.method))
{
cpu_ops = &cpu_ops_spin_tbl;
}
else
{
cpu_ops = RT_NULL;
LOG_E("Not supported cpu_ops: %s", method);
}
cpu_ops_tbl[cpuid] = cpu_ops;
LOG_D("Using boot method [%s] for cpu %d", cpu_ops->method, cpuid);
return 0;
}
static int _cpus_init_data_fdt()
{
// cpuid_to_hwid and cpu_ops_tbl with fdt
void *root = get_dtb_node_head();
int id_pool = 0;
int cpuid;
struct dtb_node *cpus = dtb_node_get_dtb_node_by_path(root, "/cpus");
// for each cpu node (device-type is cpu), read its mpid and set its cpuid_to_hwid
for_each_node_child(cpus)
{
if (!_node_is_cpu(cpus))
{
continue;
}
if (id_pool > RT_CPUS_NR)
{
LOG_W("Reading more cpus from FDT than RT_CPUS_NR"
"\n Parsing will not continue and only %d cpus will be used.", RT_CPUS_NR);
break;
}
_read_and_set_hwid(cpus, &id_pool, &cpuid);
_read_and_set_cpuops(cpus, cpuid);
}
return 0;
}
#endif /* RT_USING_FDT */
/** init cpu with hardcoded infomation or parsing from FDT */
static int _cpus_init(int num_cpus, rt_uint64_t *cpu_hw_ids, struct cpu_ops_t *cpu_ops[])
{
int retval;
// first setup cpu_ops_tbl and cpuid_to_hwid
if (num_cpus > 0)
retval = _cpus_init_data_hardcoded(num_cpus, cpu_hw_ids, cpu_ops);
else
{
retval = -1;
#ifdef RT_USING_FDT
retval = _cpus_init_data_fdt();
#endif
}
if (retval)
return retval;
// using cpuid_to_hwid and cpu_ops_tbl to call method_init and cpu_init
// assuming that cpuid 0 has already init
for (int i = 1; i < RT_CPUS_NR; i++)
{
if (cpuid_to_hwid(i) == ID_ERROR)
{
LOG_E("Failed to find hardware id of CPU %d", i);
continue;
}
if (cpu_ops_tbl[i] && cpu_ops_tbl[i]->cpu_init)
{
retval = cpu_ops_tbl[i]->cpu_init(i);
CHECK_RETVAL(retval);
}
else
{
LOG_E("Failed to find cpu_init for cpu %d with cpu_ops[%p], cpu_ops->cpu_init[%p]"
, cpuid_to_hwid(i), cpu_ops_tbl[i], cpu_ops_tbl[i] ? cpu_ops_tbl[i]->cpu_init : NULL);
}
}
return 0;
}
static void _boot_secondary(void)
{
for (int i = 1; i < RT_CPUS_NR; i++)
{
int retval = -0xbad0; // mark no support operation
if (cpu_ops_tbl[i] && cpu_ops_tbl[i]->cpu_boot)
retval = cpu_ops_tbl[i]->cpu_boot(i);
if (retval)
{
if (retval == -0xbad0)
LOG_E("No cpu_ops was probed for CPU %d. Try to configure it or use fdt", i);
else
LOG_E("Failed to boot secondary CPU %d, error code %d", i, retval);
} else {
LOG_I("Secondary CPU %d booted", i);
}
}
}
rt_weak void rt_hw_secondary_cpu_up(void)
{
_boot_secondary();
}
/**
* @brief boot cpu with hardcoded data
*
* @param num_cpus number of cpus
* @param cpu_hw_ids each element represents a hwid of cpu[i]
* @param cpu_ops each element represents a pointer to cpu_ops of cpu[i]
* @return int 0 on success,
*/
int rt_hw_cpu_boot_secondary(int num_cpus, rt_uint64_t *cpu_hw_ids, struct cpu_ops_t *cpu_ops[])
{
int retval = 0;
if (num_cpus < 1 || !cpu_hw_ids || !cpu_ops)
return -1;
retval = _cpus_init(num_cpus, cpu_hw_ids, cpu_ops);
CHECK_RETVAL(retval);
return retval;
}
#define CPU_INIT_USING_FDT 0,0,0
/**
* @brief Initialize cpu infomation from fdt
*
* @return int
*/
int rt_hw_cpu_init()
{
#ifdef RT_USING_FDT
return _cpus_init(CPU_INIT_USING_FDT);
#else
LOG_E("CPU init failed since RT_USING_FDT was not defined");
return -0xa; /* no fdt support */
#endif /* RT_USING_FDT */
}
rt_weak void rt_hw_secondary_cpu_idle_exec(void)
{
asm volatile("wfe" ::
: "memory", "cc");
}
#endif /*RT_USING_SMP*/
/**
@@ -98,14 +351,19 @@ void rt_hw_spin_unlock(rt_hw_spinlock_t *lock)
/** shutdown CPU */
rt_weak void rt_hw_cpu_shutdown()
{
register rt_int32_t level;
rt_uint32_t level;
rt_kprintf("shutdown...\n");
if (system_off)
system_off();
LOG_E("system shutdown failed");
level = rt_hw_interrupt_disable();
while (level)
{
RT_ASSERT(0);
}
}
MSH_CMD_EXPORT_ALIAS(rt_hw_cpu_shutdown, shutdown, shutdown machine);
/*@}*/

View File

@@ -0,0 +1,63 @@
/*
* Copyright (c) 2006-2019, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
*/
#ifndef __RT_HW_CPU_H__
#define __RT_HW_CPU_H__
#include <rthw.h>
#include <rtthread.h>
#include <stdbool.h>
#ifndef RT_CPUS_NR
#define RT_CPUS_NR 1
#endif /* RT_CPUS_NR */
#ifdef RT_USING_SMP
struct cpu_ops_t
{
const char *method;
int (*cpu_init)(rt_uint32_t id);
int (*cpu_boot)(rt_uint32_t id);
void (*cpu_shutdown)(void);
};
/**
* Identifier to mark a wrong CPU MPID.
* All elements in rt_cpu_mpidr_early[] should be initialized with this value
*/
#define ID_ERROR __INT64_MAX__
extern rt_uint64_t rt_cpu_mpidr_early[];
extern struct dtb_node *_cpu_node[];
#define cpuid_to_hwid(cpuid) \
((((cpuid) >= 0) && ((cpuid) < RT_CPUS_NR)) ? rt_cpu_mpidr_early[cpuid] : ID_ERROR)
#define set_hwid(cpuid, hwid) \
((((cpuid) >= 0) && ((cpuid) < RT_CPUS_NR)) ? (rt_cpu_mpidr_early[cpuid] = (hwid)) : ID_ERROR)
#define get_cpu_node(cpuid) \
((((cpuid) >= 0) && ((cpuid) < RT_CPUS_NR)) ? _cpu_node[cpuid] : NULL)
#define set_cpu_node(cpuid, node) \
((((cpuid) >= 0) && ((cpuid) < RT_CPUS_NR)) ? (_cpu_node[cpuid] = node) : NULL)
extern int rt_hw_cpu_init();
extern int rt_hw_cpu_boot_secondary(int num_cpus, rt_uint64_t *cpu_hw_ids, struct cpu_ops_t *cpu_ops[]);
extern void rt_hw_secondary_cpu_idle_exec(void);
extern struct cpu_ops_t cpu_ops_psci;
extern struct cpu_ops_t cpu_ops_spin_tbl;
#endif /* RT_USING_SMP */
extern void rt_hw_cpu_shutdown(void);
extern void (*system_off)(void);
#endif /* __RT_HW_CPU_H__ */

View File

@@ -1,82 +1,103 @@
/*
* Copyright (c) 2006-2022, RT-Thread Development Team
* Copyright (c) 2006-2020, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Date Author Notes
* 2018-10-06 ZhaoXiaowei the first version
*/
.text
.globl rt_hw_get_current_el
rt_hw_get_current_el:
MRS X0, CurrentEL
CMP X0, 0xc
B.EQ 3f
CMP X0, 0x8
B.EQ 2f
CMP X0, 0x4
B.EQ 1f
LDR X0, =0
B 0f
MRS X0, CurrentEL
CMP X0, 0xc
B.EQ 3f
CMP X0, 0x8
B.EQ 2f
CMP X0, 0x4
B.EQ 1f
LDR X0, =0
B 0f
3:
LDR X0, =3
B 0f
LDR X0, =3
B 0f
2:
LDR X0, =2
B 0f
LDR X0, =2
B 0f
1:
LDR X0, =1
B 0f
LDR X0, =1
B 0f
0:
RET
RET
.globl rt_hw_set_current_vbar
rt_hw_set_current_vbar:
MRS X1, CurrentEL
CMP X1, 0xc
B.EQ 3f
CMP X1, 0x8
B.EQ 2f
CMP X1, 0x4
B.EQ 1f
B 0f
MRS X1, CurrentEL
CMP X1, 0xc
B.EQ 3f
CMP X1, 0x8
B.EQ 2f
CMP X1, 0x4
B.EQ 1f
B 0f
3:
MSR VBAR_EL3,X0
B 0f
MSR VBAR_EL3,X0
B 0f
2:
MSR VBAR_EL2,X0
B 0f
MSR VBAR_EL2,X0
B 0f
1:
MSR VBAR_EL1,X0
B 0f
MSR VBAR_EL1,X0
B 0f
0:
RET
RET
.globl rt_hw_set_elx_env
rt_hw_set_elx_env:
MRS X1, CurrentEL
CMP X1, 0xc
B.EQ 3f
CMP X1, 0x8
B.EQ 2f
CMP X1, 0x4
B.EQ 1f
B 0f
MRS X1, CurrentEL
CMP X1, 0xc
B.EQ 3f
CMP X1, 0x8
B.EQ 2f
CMP X1, 0x4
B.EQ 1f
B 0f
3:
MRS X0, SCR_EL3
ORR X0, X0, #0xF /* SCR_EL3.NS|IRQ|FIQ|EA */
MSR SCR_EL3, X0
B 0f
MRS X0, SCR_EL3
ORR X0, X0, #0xF /* SCR_EL3.NS|IRQ|FIQ|EA */
MSR SCR_EL3, X0
B 0f
2:
MRS X0, HCR_EL2
ORR X0, X0, #0x38
MSR HCR_EL2, X0
B 0f
MRS X0, HCR_EL2
ORR X0, X0, #0x38
MSR HCR_EL2, X0
B 0f
1:
B 0f
B 0f
0:
RET
.global rt_cpu_vector_set_base
rt_cpu_vector_set_base:
MSR VBAR_EL1,X0
RET
/**
* unsigned long rt_hw_ffz(unsigned long x)
*/
.global rt_hw_ffz
rt_hw_ffz:
mvn x1, x0
clz x0, x1
mov x1, #0x3f
sub x0, x1, x0
ret
.global rt_hw_clz
rt_hw_clz:
clz x0, x0
ret

View File

@@ -0,0 +1,21 @@
#ifndef __CPU_OPS_COMMON_H__
#define __CPU_OPS_COMMON_H__
#include <rthw.h>
#include <rtthread.h>
#include <mmu.h>
#include "entry_point.h"
static inline rt_uint64_t get_secondary_entry_pa(void)
{
rt_uint64_t secondary_entry_pa = (rt_uint64_t)rt_hw_mmu_v2p(&mmu_info, _secondary_cpu_entry);
if (!secondary_entry_pa)
{
LOG_E("Failed to translate 'secondary_entry_pa' to physical address");
return 0;
}
return secondary_entry_pa;
}
#endif /* __CPU_OPS_COMMON_H__ */

View File

@@ -0,0 +1,70 @@
/*
* Copyright (c) 2006-2019, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
*/
#include <rthw.h>
#include <rtthread.h>
#include <stdint.h>
#ifdef RT_USING_SMP
#define DBG_TAG "libcpu.aarch64.cpu_psci"
#define DBG_LVL DBG_INFO
#include <rtdbg.h>
#include "cpu_ops_common.h"
#include "cpu.h"
#include "errno.h"
#include "psci.h"
#include "psci_api.h"
static int (*_psci_init)(void) = psci_init;
static int __call_method_init()
{
int (*init)(void) = _psci_init;
_psci_init = RT_NULL;
return init();
}
/** return 0 on success, otherwise failed */
#define _call_method_init() ((_psci_init) ? __call_method_init() : 0);
static int cpu_psci_cpu_init(rt_uint32_t cpuid)
{
// init psci only once
return _call_method_init();
}
static int cpu_psci_cpu_boot(rt_uint32_t cpuid)
{
rt_uint64_t secondary_entry_pa = get_secondary_entry_pa();
if (!secondary_entry_pa)
return -1;
if (!psci_ops.cpu_on) {
LOG_E("Uninitialized psci operation");
return -1;
}
return psci_ops.cpu_on(cpuid_to_hwid(cpuid), secondary_entry_pa);
}
static void cpu_psci_cpu_shutdown()
{
psci_ops.cpu_off(cpuid_to_hwid(rt_hw_cpu_id()));
}
struct cpu_ops_t cpu_ops_psci = {
.method = "psci",
.cpu_boot = cpu_psci_cpu_boot,
.cpu_init = cpu_psci_cpu_init,
.cpu_shutdown = cpu_psci_cpu_shutdown,
};
#endif /* RT_USING_SMP */

View File

@@ -0,0 +1,80 @@
/*
* Copyright (c) 2006-2022, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
*/
#include <rthw.h>
#include <rtthread.h>
#include "cpu.h"
#ifdef RT_USING_SMART
#include <ioremap.h>
#else
#define rt_ioremap(x, ...) (x)
#define rt_iounmap(x)
#endif
#define DBG_TAG "libcpu.aarch64.cpu_spin_table"
#define DBG_LVL DBG_INFO
#include <rtdbg.h>
#include "cpu_ops_common.h"
#ifdef RT_USING_SMP
#ifdef RT_USING_FDT
#include <dtb_node.h>
static rt_uint64_t cpu_release_addr[RT_CPUS_NR];
static int spin_table_cpu_init(rt_uint32_t cpuid)
{
struct dtb_node *cpu = get_cpu_node(cpuid);
if (!cpu)
return -1; /* uninitialized cpu node in fdt */
int size;
rt_uint64_t *phead = (rt_uint64_t*)dtb_node_get_dtb_node_property_value(cpu, "cpu-release-addr", &size);
cpu_release_addr[cpuid] = fdt64_to_cpu(*phead);
LOG_D("Using release address 0x%p for CPU %d", cpu_release_addr[cpuid], cpuid);
return 0;
}
static int spin_table_cpu_boot(rt_uint32_t cpuid)
{
rt_uint64_t secondary_entry_pa = get_secondary_entry_pa();
if (!secondary_entry_pa)
return -1;
// map release_addr to addressable place
void *rel_va = (void *)cpu_release_addr[cpuid];
#ifdef RT_USING_SMART
rel_va = rt_ioremap(rel_va, sizeof(cpu_release_addr[0]));
#endif
if (!rel_va)
{
LOG_E("IO remap failing");
return -1;
}
__asm__ volatile("str %0, [%1]" ::"rZ"(secondary_entry_pa), "r"(rel_va));
__asm__ volatile("dsb sy");
__asm__ volatile("sev");
rt_iounmap(rel_va);
return 0;
}
#endif /* RT_USING_FDT */
struct cpu_ops_t cpu_ops_spin_tbl = {
.method = "spin-table",
#ifdef RT_USING_FDT
.cpu_init = spin_table_cpu_init,
.cpu_boot = spin_table_cpu_boot,
#endif
};
#endif /* RT_USING_SMP */

View File

@@ -5,20 +5,12 @@
*
* Change Logs:
* Date Author Notes
* 2021-09-10 GuEe-GUI first version
*/
#ifndef __CPUPORT_H__
#define __CPUPORT_H__
#ifndef CPUPORT_H__
#define CPUPORT_H__
#include <rtdef.h>
#define __WFI() __asm__ volatile ("wfi":::"memory")
#define __WFE() __asm__ volatile ("wfe":::"memory")
#define __SEV() __asm__ volatile ("sev")
#define __ISB() __asm__ volatile ("isb 0xf":::"memory")
#define __DSB() __asm__ volatile ("dsb 0xf":::"memory")
#define __DMB() __asm__ volatile ("dmb 0xf":::"memory")
#include <armv8.h>
#ifdef RT_USING_SMP
typedef union {
@@ -32,17 +24,17 @@ typedef union {
rt_inline void rt_hw_isb(void)
{
__asm__ volatile ("isb":::"memory");
asm volatile ("isb":::"memory");
}
rt_inline void rt_hw_dmb(void)
{
__asm__ volatile ("dmb sy":::"memory");
asm volatile ("dmb sy":::"memory");
}
rt_inline void rt_hw_dsb(void)
{
__asm__ volatile ("dsb sy":::"memory");
asm volatile ("dsb sy":::"memory");
}
#endif /* __CPUPORT_H__ */
#endif /*CPUPORT_H__*/

View File

@@ -0,0 +1,13 @@
/*
* Copyright (c) 2006-2022, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
*/
#ifndef __ENTRY_POINT_H__
#define __ENTRY_POINT_H__
extern void _secondary_cpu_entry(void);
#endif /* __ENTRY_POINT_H__ */

View File

@@ -0,0 +1,232 @@
#include "rtthread.h"
static void data_abort(unsigned long far, unsigned long iss)
{
rt_kprintf("fault addr = 0x%016lx\n", far);
if (iss & 0x40)
{
rt_kprintf("abort caused by write instruction\n");
}
else
{
rt_kprintf("abort caused by read instruction\n");
}
switch (iss & 0x3f)
{
case 0b000000:
rt_kprintf("Address size fault, zeroth level of translation or translation table base register\n");
break;
case 0b000001:
rt_kprintf("Address size fault, first level\n");
break;
case 0b000010:
rt_kprintf("Address size fault, second level\n");
break;
case 0b000011:
rt_kprintf("Address size fault, third level\n");
break;
case 0b000100:
rt_kprintf("Translation fault, zeroth level\n");
break;
case 0b000101:
rt_kprintf("Translation fault, first level\n");
break;
case 0b000110:
rt_kprintf("Translation fault, second level\n");
break;
case 0b000111:
rt_kprintf("Translation fault, third level\n");
break;
case 0b001001:
rt_kprintf("Access flag fault, first level\n");
break;
case 0b001010:
rt_kprintf("Access flag fault, second level\n");
break;
case 0b001011:
rt_kprintf("Access flag fault, third level\n");
break;
case 0b001101:
rt_kprintf("Permission fault, first level\n");
break;
case 0b001110:
rt_kprintf("Permission fault, second level\n");
break;
case 0b001111:
rt_kprintf("Permission fault, third level\n");
break;
case 0b010000:
rt_kprintf("Synchronous external abort, not on translation table walk\n");
break;
case 0b011000:
rt_kprintf("Synchronous parity or ECC error on memory access, not on translation table walk\n");
break;
case 0b010100:
rt_kprintf("Synchronous external abort on translation table walk, zeroth level\n");
break;
case 0b010101:
rt_kprintf("Synchronous external abort on translation table walk, first level\n");
break;
case 0b010110:
rt_kprintf("Synchronous external abort on translation table walk, second level\n");
break;
case 0b010111:
rt_kprintf("Synchronous external abort on translation table walk, third level\n");
break;
case 0b011100:
rt_kprintf("Synchronous parity or ECC error on memory access on translation table walk, zeroth level\n");
break;
case 0b011101:
rt_kprintf("Synchronous parity or ECC error on memory access on translation table walk, first level\n");
break;
case 0b011110:
rt_kprintf("Synchronous parity or ECC error on memory access on translation table walk, second level\n");
break;
case 0b011111:
rt_kprintf("Synchronous parity or ECC error on memory access on translation table walk, third level\n");
break;
case 0b100001:
rt_kprintf("Alignment fault\n");
break;
case 0b110000:
rt_kprintf("TLB conflict abort\n");
break;
case 0b110100:
rt_kprintf("IMPLEMENTATION DEFINED fault (Lockdown fault)\n");
break;
case 0b110101:
rt_kprintf("IMPLEMENTATION DEFINED fault (Unsupported Exclusive access fault)\n");
break;
case 0b111101:
rt_kprintf("Section Domain Fault, used only for faults reported in the PAR_EL1\n");
break;
case 0b111110:
rt_kprintf("Page Domain Fault, used only for faults reported in the PAR_EL1\n");
break;
default:
rt_kprintf("unknow abort\n");
break;
}
}
void process_exception(unsigned long esr, unsigned long epc)
{
rt_uint8_t ec;
rt_uint32_t iss;
unsigned long fault_addr;
rt_kprintf("\nexception info:\n");
ec = (unsigned char)((esr >> 26) & 0x3fU);
iss = (unsigned int)(esr & 0x00ffffffU);
rt_kprintf("esr.EC :0x%02x\n", ec);
rt_kprintf("esr.IL :0x%02x\n", (unsigned char)((esr >> 25) & 0x01U));
rt_kprintf("esr.ISS:0x%08x\n", iss);
rt_kprintf("epc :0x%016p\n", (void *)epc);
switch (ec)
{
case 0x00:
rt_kprintf("Exceptions with an unknow reason\n");
break;
case 0x01:
rt_kprintf("Exceptions from an WFI or WFE instruction\n");
break;
case 0x03:
rt_kprintf("Exceptions from an MCR or MRC access to CP15 from AArch32\n");
break;
case 0x04:
rt_kprintf("Exceptions from an MCRR or MRRC access to CP15 from AArch32\n");
break;
case 0x05:
rt_kprintf("Exceptions from an MCR or MRC access to CP14 from AArch32\n");
break;
case 0x06:
rt_kprintf("Exceptions from an LDC or STC access to CP14 from AArch32\n");
break;
case 0x07:
rt_kprintf("Exceptions from Access to Advanced SIMD or floating-point registers\n");
break;
case 0x08:
rt_kprintf("Exceptions from an MRC (or VMRS) access to CP10 from AArch32\n");
break;
case 0x0c:
rt_kprintf("Exceptions from an MCRR or MRRC access to CP14 from AArch32\n");
break;
case 0x0e:
rt_kprintf("Exceptions that occur because ther value of PSTATE.IL is 1\n");
break;
case 0x11:
rt_kprintf("SVC call from AArch32 state\n");
break;
case 0x15:
rt_kprintf("SVC call from AArch64 state\n");
break;
case 0x20:
rt_kprintf("Instruction abort from lower exception level\n");
break;
case 0x21:
rt_kprintf("Instruction abort from current exception level\n");
break;
case 0x22:
rt_kprintf("PC alignment fault\n");
break;
case 0x24:
rt_kprintf("Data abort from a lower Exception level\n");
__asm__ volatile("mrs %0, far_el1":"=r"(fault_addr));
data_abort(fault_addr, iss);
break;
case 0x25:
rt_kprintf("Data abort\n");
__asm__ volatile("mrs %0, far_el1":"=r"(fault_addr));
data_abort(fault_addr, iss);
break;
default:
rt_kprintf("Other error\n");
break;
}
}

View File

@@ -9,7 +9,6 @@
* 2014-04-03 Grissiom many enhancements
* 2018-11-22 Jesven add rt_hw_ipi_send()
* add rt_hw_ipi_handler_install()
* 2022-03-08 GuEe-GUI add BSP bind SPI CPU self support
*/
#include <rthw.h>
@@ -17,17 +16,15 @@
#if defined(BSP_USING_GIC) && defined(BSP_USING_GICV2)
#include <gic.h>
#include <cpuport.h>
#include <board.h>
#include "gic.h"
#include "cp15.h"
struct arm_gic
{
rt_uint64_t offset; /* the first interrupt index in the vector table */
rt_uint64_t dist_hw_base; /* the base address of the gic distributor */
rt_uint64_t cpu_hw_base; /* the base address of the gic cpu interface */
rt_uint64_t cpu_hw_base; /* the base addrees of the gic cpu interface */
};
/* 'ARM_GIC_MAX_NR' is the number of cores */
@@ -35,33 +32,33 @@ static struct arm_gic _gic_table[ARM_GIC_MAX_NR];
/** Macro to access the Generic Interrupt Controller Interface (GICC)
*/
#define GIC_CPU_CTRL(hw_base) HWREG32((hw_base) + 0x00U)
#define GIC_CPU_PRIMASK(hw_base) HWREG32((hw_base) + 0x04U)
#define GIC_CPU_BINPOINT(hw_base) HWREG32((hw_base) + 0x08U)
#define GIC_CPU_INTACK(hw_base) HWREG32((hw_base) + 0x0cU)
#define GIC_CPU_EOI(hw_base) HWREG32((hw_base) + 0x10U)
#define GIC_CPU_RUNNINGPRI(hw_base) HWREG32((hw_base) + 0x14U)
#define GIC_CPU_HIGHPRI(hw_base) HWREG32((hw_base) + 0x18U)
#define GIC_CPU_IIDR(hw_base) HWREG32((hw_base) + 0xFCU)
#define GIC_CPU_CTRL(hw_base) __REG32((hw_base) + 0x00U)
#define GIC_CPU_PRIMASK(hw_base) __REG32((hw_base) + 0x04U)
#define GIC_CPU_BINPOINT(hw_base) __REG32((hw_base) + 0x08U)
#define GIC_CPU_INTACK(hw_base) __REG32((hw_base) + 0x0cU)
#define GIC_CPU_EOI(hw_base) __REG32((hw_base) + 0x10U)
#define GIC_CPU_RUNNINGPRI(hw_base) __REG32((hw_base) + 0x14U)
#define GIC_CPU_HIGHPRI(hw_base) __REG32((hw_base) + 0x18U)
#define GIC_CPU_IIDR(hw_base) __REG32((hw_base) + 0xFCU)
/** Macro to access the Generic Interrupt Controller Distributor (GICD)
*/
#define GIC_DIST_CTRL(hw_base) HWREG32((hw_base) + 0x000U)
#define GIC_DIST_TYPE(hw_base) HWREG32((hw_base) + 0x004U)
#define GIC_DIST_IGROUP(hw_base, n) HWREG32((hw_base) + 0x080U + ((n)/32U) * 4U)
#define GIC_DIST_ENABLE_SET(hw_base, n) HWREG32((hw_base) + 0x100U + ((n)/32U) * 4U)
#define GIC_DIST_ENABLE_CLEAR(hw_base, n) HWREG32((hw_base) + 0x180U + ((n)/32U) * 4U)
#define GIC_DIST_PENDING_SET(hw_base, n) HWREG32((hw_base) + 0x200U + ((n)/32U) * 4U)
#define GIC_DIST_PENDING_CLEAR(hw_base, n) HWREG32((hw_base) + 0x280U + ((n)/32U) * 4U)
#define GIC_DIST_ACTIVE_SET(hw_base, n) HWREG32((hw_base) + 0x300U + ((n)/32U) * 4U)
#define GIC_DIST_ACTIVE_CLEAR(hw_base, n) HWREG32((hw_base) + 0x380U + ((n)/32U) * 4U)
#define GIC_DIST_PRI(hw_base, n) HWREG32((hw_base) + 0x400U + ((n)/4U) * 4U)
#define GIC_DIST_TARGET(hw_base, n) HWREG32((hw_base) + 0x800U + ((n)/4U) * 4U)
#define GIC_DIST_CONFIG(hw_base, n) HWREG32((hw_base) + 0xc00U + ((n)/16U) * 4U)
#define GIC_DIST_SOFTINT(hw_base) HWREG32((hw_base) + 0xf00U)
#define GIC_DIST_CPENDSGI(hw_base, n) HWREG32((hw_base) + 0xf10U + ((n)/4U) * 4U)
#define GIC_DIST_SPENDSGI(hw_base, n) HWREG32((hw_base) + 0xf20U + ((n)/4U) * 4U)
#define GIC_DIST_ICPIDR2(hw_base) HWREG32((hw_base) + 0xfe8U)
#define GIC_DIST_CTRL(hw_base) __REG32((hw_base) + 0x000U)
#define GIC_DIST_TYPE(hw_base) __REG32((hw_base) + 0x004U)
#define GIC_DIST_IGROUP(hw_base, n) __REG32((hw_base) + 0x080U + ((n)/32U) * 4U)
#define GIC_DIST_ENABLE_SET(hw_base, n) __REG32((hw_base) + 0x100U + ((n)/32U) * 4U)
#define GIC_DIST_ENABLE_CLEAR(hw_base, n) __REG32((hw_base) + 0x180U + ((n)/32U) * 4U)
#define GIC_DIST_PENDING_SET(hw_base, n) __REG32((hw_base) + 0x200U + ((n)/32U) * 4U)
#define GIC_DIST_PENDING_CLEAR(hw_base, n) __REG32((hw_base) + 0x280U + ((n)/32U) * 4U)
#define GIC_DIST_ACTIVE_SET(hw_base, n) __REG32((hw_base) + 0x300U + ((n)/32U) * 4U)
#define GIC_DIST_ACTIVE_CLEAR(hw_base, n) __REG32((hw_base) + 0x380U + ((n)/32U) * 4U)
#define GIC_DIST_PRI(hw_base, n) __REG32((hw_base) + 0x400U + ((n)/4U) * 4U)
#define GIC_DIST_TARGET(hw_base, n) __REG32((hw_base) + 0x800U + ((n)/4U) * 4U)
#define GIC_DIST_CONFIG(hw_base, n) __REG32((hw_base) + 0xc00U + ((n)/16U) * 4U)
#define GIC_DIST_SOFTINT(hw_base) __REG32((hw_base) + 0xf00U)
#define GIC_DIST_CPENDSGI(hw_base, n) __REG32((hw_base) + 0xf10U + ((n)/4U) * 4U)
#define GIC_DIST_SPENDSGI(hw_base, n) __REG32((hw_base) + 0xf20U + ((n)/4U) * 4U)
#define GIC_DIST_ICPIDR2(hw_base) __REG32((hw_base) + 0xfe8U)
static unsigned int _gic_max_irq;
@@ -184,7 +181,7 @@ void arm_gic_clear_pending_irq(rt_uint64_t index, int irq)
}
}
void arm_gic_set_configuration(rt_uint64_t index, int irq, rt_uint32_t config)
void arm_gic_set_configuration(rt_uint64_t index, int irq, uint32_t config)
{
rt_uint64_t icfgr;
rt_uint64_t shift;
@@ -328,8 +325,6 @@ void arm_gic_send_sgi(rt_uint64_t index, int irq, rt_uint64_t target_list, rt_ui
GIC_DIST_SOFTINT(_gic_table[index].dist_hw_base) =
((filter_list & 0x3U) << 24U) | ((target_list & 0xFFUL) << 16U) | (irq & 0x0FUL);
__DSB();
}
rt_uint64_t arm_gic_get_high_pending_irq(rt_uint64_t index)
@@ -348,8 +343,8 @@ rt_uint64_t arm_gic_get_interface_id(rt_uint64_t index)
void arm_gic_set_group(rt_uint64_t index, int irq, rt_uint64_t group)
{
rt_uint32_t igroupr;
rt_uint32_t shift;
uint32_t igroupr;
uint32_t shift;
RT_ASSERT(index < ARM_GIC_MAX_NR);
RT_ASSERT(group <= 1U);
@@ -380,10 +375,6 @@ int arm_gic_dist_init(rt_uint64_t index, rt_uint64_t dist_base, int irq_start)
unsigned int gic_type, i;
rt_uint64_t cpumask = 1U << 0U;
#ifdef ARM_SPI_BIND_CPU_ID
cpumask = 1U << ARM_SPI_BIND_CPU_ID;
#endif
RT_ASSERT(index < ARM_GIC_MAX_NR);
_gic_table[index].dist_hw_base = dist_base;
@@ -438,6 +429,12 @@ int arm_gic_dist_init(rt_uint64_t index, rt_uint64_t dist_base, int irq_start)
}
/* All interrupts defaults to IGROUP1(IRQ). */
/*
for (i = 0; i < _gic_max_irq; i += 32)
{
GIC_DIST_IGROUP(dist_base, i) = 0xffffffffU;
}
*/
for (i = 0U; i < _gic_max_irq; i += 32U)
{
GIC_DIST_IGROUP(dist_base, i) = 0U;
@@ -489,17 +486,23 @@ void arm_gic_dump(rt_uint64_t index)
rt_kprintf("--- hw mask ---\n");
for (i = 0U; i < _gic_max_irq / 32U; i++)
{
rt_kprintf("0x%08x, ", GIC_DIST_ENABLE_SET(_gic_table[index].dist_hw_base, i * 32U));
rt_kprintf("0x%08x, ",
GIC_DIST_ENABLE_SET(_gic_table[index].dist_hw_base,
i * 32U));
}
rt_kprintf("\n--- hw pending ---\n");
for (i = 0U; i < _gic_max_irq / 32U; i++)
{
rt_kprintf("0x%08x, ", GIC_DIST_PENDING_SET(_gic_table[index].dist_hw_base, i * 32U));
rt_kprintf("0x%08x, ",
GIC_DIST_PENDING_SET(_gic_table[index].dist_hw_base,
i * 32U));
}
rt_kprintf("\n--- hw active ---\n");
for (i = 0U; i < _gic_max_irq / 32U; i++)
{
rt_kprintf("0x%08x, ", GIC_DIST_ACTIVE_SET(_gic_table[index].dist_hw_base, i * 32U));
rt_kprintf("0x%08x, ",
GIC_DIST_ACTIVE_SET(_gic_table[index].dist_hw_base,
i * 32U));
}
rt_kprintf("\n");
}

View File

@@ -11,9 +11,8 @@
#ifndef __GIC_H__
#define __GIC_H__
#include <rtdef.h>
#if defined(BSP_USING_GIC) && defined(BSP_USING_GICV2)
#include <rthw.h>
#include <board.h>
int arm_gic_get_active_irq(rt_uint64_t index);
void arm_gic_ack(rt_uint64_t index, int irq);
@@ -25,7 +24,7 @@ rt_uint64_t arm_gic_get_pending_irq(rt_uint64_t index, int irq);
void arm_gic_set_pending_irq(rt_uint64_t index, int irq);
void arm_gic_clear_pending_irq(rt_uint64_t index, int irq);
void arm_gic_set_configuration(rt_uint64_t index, int irq, rt_uint32_t config);
void arm_gic_set_configuration(rt_uint64_t index, int irq, uint32_t config);
rt_uint64_t arm_gic_get_configuration(rt_uint64_t index, int irq);
void arm_gic_clear_active(rt_uint64_t index, int irq);
@@ -59,7 +58,5 @@ int arm_gic_cpu_init(rt_uint64_t index, rt_uint64_t cpu_base);
void arm_gic_dump_type(rt_uint64_t index);
void arm_gic_dump(rt_uint64_t index);
#endif /* defined(BSP_USING_GIC) && defined(BSP_USING_GICV2) */
#endif

View File

@@ -27,7 +27,7 @@
#if defined(BSP_USING_GIC) && defined(BSP_USING_GICV3)
#include <gicv3.h>
#include <cpuport.h>
#include <cp15.h>
#include <board.h>
@@ -479,7 +479,7 @@ rt_uint64_t arm_gic_get_irq_status(rt_uint64_t index, int irq)
}
#ifdef RT_USING_SMP
void arm_gic_send_affinity_sgi(rt_uint64_t index, int irq, rt_uint64_t cpu_masks[], rt_uint64_t routing_mode)
void arm_gic_send_affinity_sgi(rt_uint64_t index, int irq, rt_uint32_t cpu_masks[], rt_uint64_t routing_mode)
{
const int cpu_mask_cpu_max_nr = sizeof(cpu_masks[0]) * 8;
rt_uint64_t int_id = (irq & 0xf) << 24;
@@ -731,9 +731,22 @@ int arm_gic_redist_init(rt_uint64_t index, rt_uint64_t redist_base)
{
int i;
int cpu_id = rt_hw_cpu_id();
static int master_cpu_id = -1;
RT_ASSERT(index < ARM_GIC_MAX_NR);
if (master_cpu_id < 0)
{
master_cpu_id = cpu_id;
rt_hw_cpu_dcache_ops(RT_HW_CACHE_FLUSH, &master_cpu_id, sizeof(master_cpu_id));
}
if (!_gic_table[index].redist_hw_base[master_cpu_id])
{
_gic_table[index].redist_hw_base[master_cpu_id] = redist_base;
}
redist_base = _gic_table[index].redist_hw_base[master_cpu_id];
redist_base += cpu_id * (2 << 16);
_gic_table[index].redist_hw_base[cpu_id] = redist_base;

View File

@@ -51,7 +51,7 @@ rt_uint64_t arm_gic_get_binary_point(rt_uint64_t index);
rt_uint64_t arm_gic_get_irq_status(rt_uint64_t index, int irq);
#ifdef RT_USING_SMP
void arm_gic_send_affinity_sgi(rt_uint64_t index, int irq, rt_uint64_t cpu_masks[], rt_uint64_t routing_mode);
void arm_gic_send_affinity_sgi(rt_uint64_t index, int irq, rt_uint32_t cpu_masks[], rt_uint64_t routing_mode);
#endif
rt_uint64_t arm_gic_get_high_pending_irq(rt_uint64_t index);

View File

@@ -26,9 +26,9 @@ static void rt_hw_timer_isr(int vector, void *parameter)
void rt_hw_gtimer_init(void)
{
rt_hw_interrupt_install(EL1_PHY_TIMER_IRQ_NUM, rt_hw_timer_isr, RT_NULL, "tick");
__ISB();
rt_hw_isb();
timer_step = rt_hw_get_gtimer_frq();
__DSB();
rt_hw_dsb();
timer_step /= RT_TICK_PER_SECOND;
rt_hw_gtimer_local_enable();
}

View File

@@ -18,7 +18,12 @@ void rt_hw_gtimer_local_enable(void);
void rt_hw_gtimer_local_disable(void);
void rt_hw_gtimer_enable();
void rt_hw_gtimer_disable();
rt_inline void rt_hw_gtimer_disable(void)
{
__asm__ volatile ("msr CNTP_CTL_EL0, xzr":::"memory");
}
void rt_hw_set_gtimer_val(rt_uint64_t value);
rt_uint64_t rt_hw_get_gtimer_val();
rt_uint64_t rt_hw_get_cntpct_val();

View File

@@ -0,0 +1,16 @@
/*
* Copyright (c) 2006-2020, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2022-02-24 GuEe-GUI first version
*/
#include <hypercall.h>
rt_err_t rt_hv_stage2_map(unsigned long paddr, unsigned long size)
{
return rt_hw_hypercall(120, paddr & (~4095), (paddr & (~4095)) + size, (1 << 0) | (1 << 1) | (1 << 4), 0, 0, 0, 0);
}

View File

@@ -0,0 +1,27 @@
/*
* Copyright (c) 2006-2020, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2022-02-24 GuEe-GUI first version
*/
#ifndef __HYPERCALL_H__
#define __HYPERCALL_H__
#include <rtdef.h>
rt_inline rt_uint32_t rt_hw_hypercall(rt_uint32_t w0, rt_uint64_t x1, rt_uint64_t x2,
rt_uint64_t x3, rt_uint64_t x4, rt_uint64_t x5, rt_uint64_t x6, rt_uint32_t w7)
{
register rt_uint64_t ret __asm__ ("x0");
__asm__ volatile ("hvc #0");
return (rt_uint32_t)ret;
}
rt_err_t rt_hv_stage2_map(unsigned long paddr, unsigned long size);
#endif

View File

@@ -1,5 +1,5 @@
/*
* Copyright (c) 2006-2021, RT-Thread Development Team
* Copyright (c) 2006-2022, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
@@ -14,23 +14,45 @@
#include "interrupt.h"
#include "gic.h"
#include "gicv3.h"
#include "armv8.h"
#include "mmu.h"
#include "cpuport.h"
/* exception and interrupt handler table */
struct rt_irq_desc isr_table[MAX_HANDLERS];
#ifndef RT_USING_SMP
/* Those variables will be accessed in ISR, so we need to share them. */
rt_ubase_t rt_interrupt_from_thread = 0;
rt_ubase_t rt_interrupt_to_thread = 0;
rt_ubase_t rt_thread_switch_interrupt_flag = 0;
#endif
extern int system_vectors;
#ifndef RT_CPUS_NR
#define RT_CPUS_NR 1
#endif
const unsigned int VECTOR_BASE = 0x00;
extern void rt_cpu_vector_set_base(void *addr);
extern void *system_vectors;
#ifdef RT_USING_SMP
#define rt_interrupt_nest rt_cpu_self()->irq_nest
#else
extern volatile rt_uint8_t rt_interrupt_nest;
#endif
#ifdef SOC_BCM283x
static void default_isr_handler(int vector, void *param)
{
#ifdef RT_USING_SMP
rt_kprintf("cpu %d unhandled irq: %d\n", rt_hw_cpu_id(),vector);
#else
rt_kprintf("unhandled irq: %d\n",vector);
#endif
}
#endif
void rt_hw_vector_init(void)
{
rt_hw_set_current_vbar((rt_ubase_t)&system_vectors);
rt_cpu_vector_set_base(&system_vectors);
}
/**
@@ -38,23 +60,69 @@ void rt_hw_vector_init(void)
*/
void rt_hw_interrupt_init(void)
{
#ifdef SOC_BCM283x
rt_uint32_t index;
/* initialize vector table */
rt_hw_vector_init();
/* initialize exceptions table */
rt_memset(isr_table, 0x00, sizeof(isr_table));
#ifndef BSP_USING_GIC
/* mask all of interrupts */
IRQ_DISABLE_BASIC = 0x000000ff;
IRQ_DISABLE1 = 0xffffffff;
IRQ_DISABLE2 = 0xffffffff;
for (index = 0; index < MAX_HANDLERS; index ++)
{
isr_table[index].handler = default_isr_handler;
isr_table[index].param = RT_NULL;
#ifdef RT_USING_INTERRUPT_INFO
rt_strncpy(isr_table[index].name, "unknown", RT_NAME_MAX);
isr_table[index].counter = 0;
#endif
}
/* init interrupt nest, and context in thread sp */
rt_interrupt_nest = 0;
rt_interrupt_from_thread = 0;
rt_interrupt_to_thread = 0;
rt_thread_switch_interrupt_flag = 0;
#else
/* initialize ARM GIC */
arm_gic_dist_init(0, platform_get_gic_dist_base(), GIC_IRQ_START);
arm_gic_cpu_init(0, platform_get_gic_cpu_base());
rt_uint64_t gic_cpu_base;
rt_uint64_t gic_dist_base;
#ifdef BSP_USING_GICV3
arm_gic_redist_init(0, platform_get_gic_redist_base());
rt_uint64_t gic_rdist_base;
#endif
rt_uint64_t gic_irq_start;
/* initialize vector table */
rt_hw_vector_init();
/* initialize exceptions table */
rt_memset(isr_table, 0x00, sizeof(isr_table));
/* initialize ARM GIC */
#ifdef RT_USING_SMART
gic_dist_base = (rt_uint64_t)rt_hw_mmu_map(&mmu_info, 0, (void*)platform_get_gic_dist_base(), 0x2000, MMU_MAP_K_DEVICE);
gic_cpu_base = (rt_uint64_t)rt_hw_mmu_map(&mmu_info, 0, (void*)platform_get_gic_cpu_base(), 0x1000, MMU_MAP_K_DEVICE);
#ifdef BSP_USING_GICV3
gic_rdist_base = (rt_uint64_t)rt_hw_mmu_map(&mmu_info, 0, (void*)platform_get_gic_redist_base(),
RT_CPUS_NR * (2 << 16), MMU_MAP_K_DEVICE);
#endif
#else
gic_dist_base = platform_get_gic_dist_base();
gic_cpu_base = platform_get_gic_cpu_base();
#ifdef BSP_USING_GICV3
gic_rdist_base = platform_get_gic_redist_base();
#endif
#endif
gic_irq_start = GIC_IRQ_START;
arm_gic_dist_init(0, gic_dist_base, gic_irq_start);
arm_gic_cpu_init(0, gic_cpu_base);
#ifdef BSP_USING_GICV3
arm_gic_redist_init(0, gic_rdist_base);
#endif
#endif
}
@@ -65,7 +133,7 @@ void rt_hw_interrupt_init(void)
*/
void rt_hw_interrupt_mask(int vector)
{
#ifndef BSP_USING_GIC
#ifdef SOC_BCM283x
if (vector < 32)
{
IRQ_DISABLE1 = (1 << vector);
@@ -91,8 +159,8 @@ void rt_hw_interrupt_mask(int vector)
*/
void rt_hw_interrupt_umask(int vector)
{
#ifndef BSP_USING_GIC
if (vector < 32)
#ifdef SOC_BCM283x
if (vector < 32)
{
IRQ_ENABLE1 = (1 << vector);
}
@@ -117,7 +185,7 @@ void rt_hw_interrupt_umask(int vector)
*/
int rt_hw_interrupt_get_irq(void)
{
#ifdef BSP_USING_GIC
#ifndef SOC_BCM283x
return arm_gic_get_active_irq(0);
#else
return 0;
@@ -130,11 +198,12 @@ int rt_hw_interrupt_get_irq(void)
*/
void rt_hw_interrupt_ack(int vector)
{
#ifdef BSP_USING_GIC
#ifndef SOC_BCM283x
arm_gic_ack(0, vector);
#endif
}
#ifndef SOC_BCM283x
/**
* This function set interrupt CPU targets.
* @param vector: the interrupt number
@@ -142,9 +211,7 @@ void rt_hw_interrupt_ack(int vector)
*/
void rt_hw_interrupt_set_target_cpus(int vector, unsigned int cpu_mask)
{
#ifdef BSP_USING_GIC
arm_gic_set_cpu(0, vector, cpu_mask);
#endif
}
/**
@@ -154,11 +221,7 @@ void rt_hw_interrupt_set_target_cpus(int vector, unsigned int cpu_mask)
*/
unsigned int rt_hw_interrupt_get_target_cpus(int vector)
{
#ifdef BSP_USING_GIC
return arm_gic_get_target_cpu(0, vector);
#else
return -RT_ERROR;
#endif
}
/**
@@ -168,9 +231,7 @@ unsigned int rt_hw_interrupt_get_target_cpus(int vector)
*/
void rt_hw_interrupt_set_triger_mode(int vector, unsigned int mode)
{
#ifdef BSP_USING_GIC
arm_gic_set_configuration(0, vector, mode);
#endif
}
/**
@@ -180,11 +241,7 @@ void rt_hw_interrupt_set_triger_mode(int vector, unsigned int mode)
*/
unsigned int rt_hw_interrupt_get_triger_mode(int vector)
{
#ifdef BSP_USING_GIC
return arm_gic_get_configuration(0, vector);
#else
return -RT_ERROR;
#endif
}
/**
@@ -193,9 +250,7 @@ unsigned int rt_hw_interrupt_get_triger_mode(int vector)
*/
void rt_hw_interrupt_set_pending(int vector)
{
#ifdef BSP_USING_GIC
arm_gic_set_pending_irq(0, vector);
#endif
}
/**
@@ -205,11 +260,7 @@ void rt_hw_interrupt_set_pending(int vector)
*/
unsigned int rt_hw_interrupt_get_pending(int vector)
{
#ifdef BSP_USING_GIC
return arm_gic_get_pending_irq(0, vector);
#else
return -RT_ERROR;
#endif
}
/**
@@ -218,9 +269,7 @@ unsigned int rt_hw_interrupt_get_pending(int vector)
*/
void rt_hw_interrupt_clear_pending(int vector)
{
#ifdef BSP_USING_GIC
arm_gic_clear_pending_irq(0, vector);
#endif
}
/**
@@ -230,9 +279,7 @@ void rt_hw_interrupt_clear_pending(int vector)
*/
void rt_hw_interrupt_set_priority(int vector, unsigned int priority)
{
#ifdef BSP_USING_GIC
arm_gic_set_priority(0, vector, priority);
#endif
}
/**
@@ -242,11 +289,7 @@ void rt_hw_interrupt_set_priority(int vector, unsigned int priority)
*/
unsigned int rt_hw_interrupt_get_priority(int vector)
{
#ifdef BSP_USING_GIC
return arm_gic_get_priority(0, vector);
#else
return -RT_ERROR;
#endif
}
/**
@@ -255,9 +298,7 @@ unsigned int rt_hw_interrupt_get_priority(int vector)
*/
void rt_hw_interrupt_set_priority_mask(unsigned int priority)
{
#ifdef BSP_USING_GIC
arm_gic_set_interface_prior_mask(0, priority);
#endif
}
/**
@@ -267,11 +308,7 @@ void rt_hw_interrupt_set_priority_mask(unsigned int priority)
*/
unsigned int rt_hw_interrupt_get_priority_mask(void)
{
#ifdef BSP_USING_GIC
return arm_gic_get_interface_prior_mask(0);
#else
return -RT_ERROR;
#endif
}
/**
@@ -281,7 +318,6 @@ unsigned int rt_hw_interrupt_get_priority_mask(void)
*/
int rt_hw_interrupt_set_prior_group_bits(unsigned int bits)
{
#ifdef BSP_USING_GIC
int status;
if (bits < 8)
@@ -295,9 +331,6 @@ int rt_hw_interrupt_set_prior_group_bits(unsigned int bits)
}
return (status);
#else
return -RT_ERROR;
#endif
}
/**
@@ -307,16 +340,13 @@ int rt_hw_interrupt_set_prior_group_bits(unsigned int bits)
*/
unsigned int rt_hw_interrupt_get_prior_group_bits(void)
{
#ifdef BSP_USING_GIC
unsigned int bp;
bp = arm_gic_get_binary_point(0) & 0x07;
return (7 - bp);
#else
return -RT_ERROR;
#endif
}
#endif /* SOC_BCM283x */
/**
* This function will install a interrupt service routine to a interrupt.
@@ -349,26 +379,10 @@ rt_isr_handler_t rt_hw_interrupt_install(int vector, rt_isr_handler_t handler,
#ifdef RT_USING_SMP
void rt_hw_ipi_send(int ipi_vector, unsigned int cpu_mask)
{
#ifdef BSP_USING_GIC
#ifdef BSP_USING_GICV2
arm_gic_send_sgi(0, ipi_vector, cpu_mask, 0);
#else
arm_gic_send_affinity_sgi(0, ipi_vector, (rt_uint64_t *)&cpu_mask, GICV3_ROUTED_TO_SPEC);
#endif
#else
int i;
__DSB();
for (i = 0; i < RT_CPUS_NR; ++i)
{
if (cpu_mask & (1 << i))
{
IPI_MAILBOX_SET(i) = 1 << ipi_vector;
}
}
__DSB();
#elif defined(BSP_USING_GICV3)
arm_gic_send_affinity_sgi(0, ipi_vector, (unsigned int *)&cpu_mask, GICV3_ROUTED_TO_SPEC);
#endif
}
@@ -378,4 +392,3 @@ void rt_hw_ipi_handler_install(int ipi_vector, rt_isr_handler_t ipi_isr_handler)
rt_hw_interrupt_install(ipi_vector, ipi_isr_handler, 0, "IPI_HANDLER");
}
#endif

View File

@@ -1,5 +1,5 @@
/*
* Copyright (c) 2006-2021, RT-Thread Development Team
* Copyright (c) 2006-2018, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*

File diff suppressed because it is too large Load Diff

View File

@@ -1,29 +1,23 @@
/*
* Copyright (c) 2006-2021, RT-Thread Development Team
* Copyright (c) 2006-2018, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2021-11-28 GuEe-GUI the first version
* 2021-05-12 RT-Thread the first version
*/
#ifndef __MMU_H_
#define __MMU_H_
#include <rtthread.h>
/* normal memory wra mapping type */
#define NORMAL_MEM 0
#define NORMAL_MEM 0
/* normal nocache memory mapping type */
#define NORMAL_NOCACHE_MEM 1
#define NORMAL_NOCACHE_MEM 1
/* device mapping type */
#define DEVICE_MEM 2
#define MMU_MAP_ERROR_VANOTALIGN (-1)
#define MMU_MAP_ERROR_PANOTALIGN (-2)
#define MMU_MAP_ERROR_NOPAGE (-3)
#define MMU_MAP_ERROR_CONFLICT (-4)
#define DEVICE_MEM 2
struct mem_desc
{
@@ -33,42 +27,119 @@ struct mem_desc
unsigned long attr;
};
#define MMU_AF_SHIFT 10
#define MMU_SHARED_SHIFT 8
#define MMU_AP_SHIFT 6
#define MMU_MA_SHIFT 2
#define MMU_AF_SHIFT 10
#define MMU_SHARED_SHIFT 8
#define MMU_AP_SHIFT 6
#define MMU_MA_SHIFT 2
#define MMU_AP_KAUN 0UL /* kernel r/w, user none */
#define MMU_AP_KAUA 1UL /* kernel r/w, user r/w */
#define MMU_AP_KRUN 2UL /* kernel r, user none */
#define MMU_AP_KRUR 3UL /* kernel r, user r */
#define MMU_AP_KAUN 0UL /* kernel r/w, user none */
#define MMU_AP_KAUA 1UL /* kernel r/w, user r/w */
#define MMU_AP_KRUN 2UL /* kernel r, user none */
#define MMU_AP_KRUR 3UL /* kernel r, user r */
#define MMU_MAP_CUSTOM(ap, mtype) \
(\
(0x1UL << MMU_AF_SHIFT) |\
(0x2UL << MMU_SHARED_SHIFT) |\
((ap) << MMU_AP_SHIFT) |\
((mtype) << MMU_MA_SHIFT)\
)
#define MMU_MAP_K_RO MMU_MAP_CUSTOM(MMU_AP_KRUN, NORMAL_MEM)
#define MMU_MAP_K_RWCB MMU_MAP_CUSTOM(MMU_AP_KAUN, NORMAL_MEM)
#define MMU_MAP_K_RW MMU_MAP_CUSTOM(MMU_AP_KAUN, NORMAL_NOCACHE_MEM)
#define MMU_MAP_K_DEVICE MMU_MAP_CUSTOM(MMU_AP_KAUN, DEVICE_MEM)
#define MMU_MAP_U_RO MMU_MAP_CUSTOM(MMU_AP_KRUR, NORMAL_NOCACHE_MEM)
#define MMU_MAP_U_RWCB MMU_MAP_CUSTOM(MMU_AP_KAUA, NORMAL_MEM)
#define MMU_MAP_U_RW MMU_MAP_CUSTOM(MMU_AP_KAUA, NORMAL_NOCACHE_MEM)
#define MMU_MAP_U_DEVICE MMU_MAP_CUSTOM(MMU_AP_KAUA, DEVICE_MEM)
#define MMU_MAP_K_RO (\
(0x1UL << MMU_AF_SHIFT) |\
(0x2UL << MMU_SHARED_SHIFT) |\
(MMU_AP_KRUN << MMU_AP_SHIFT) |\
(NORMAL_MEM << MMU_MA_SHIFT)\
)
#define MMU_MAP_K_RWCB (\
(0x1UL << MMU_AF_SHIFT) |\
(0x2UL << MMU_SHARED_SHIFT) |\
(MMU_AP_KAUN << MMU_AP_SHIFT) |\
(NORMAL_MEM << MMU_MA_SHIFT)\
)
#define MMU_MAP_K_RW (\
(0x1UL << MMU_AF_SHIFT) |\
(0x2UL << MMU_SHARED_SHIFT) |\
(MMU_AP_KAUN << MMU_AP_SHIFT) |\
(NORMAL_NOCACHE_MEM << MMU_MA_SHIFT)\
)
#define MMU_MAP_K_DEVICE (\
(0x1UL << MMU_AF_SHIFT) |\
(0x2UL << MMU_SHARED_SHIFT) |\
(MMU_AP_KAUN << MMU_AP_SHIFT) |\
(DEVICE_MEM << MMU_MA_SHIFT)\
)
#define MMU_MAP_U_RO (\
(0x1UL << MMU_AF_SHIFT) |\
(0x2UL << MMU_SHARED_SHIFT) |\
(MMU_AP_KRUR << MMU_AP_SHIFT) |\
(NORMAL_NOCACHE_MEM << MMU_MA_SHIFT)\
)
#define MMU_MAP_U_RWCB (\
(0x1UL << MMU_AF_SHIFT) |\
(0x2UL << MMU_SHARED_SHIFT) |\
(MMU_AP_KAUA << MMU_AP_SHIFT) |\
(NORMAL_MEM << MMU_MA_SHIFT)\
)
#define MMU_MAP_U_RW (\
(0x1UL << MMU_AF_SHIFT) |\
(0x2UL << MMU_SHARED_SHIFT) |\
(MMU_AP_KAUA << MMU_AP_SHIFT) |\
(NORMAL_NOCACHE_MEM << MMU_MA_SHIFT)\
)
#define MMU_MAP_U_DEVICE (\
(0x1UL << MMU_AF_SHIFT) |\
(0x2UL << MMU_SHARED_SHIFT) |\
(MMU_AP_KAUA << MMU_AP_SHIFT) |\
(DEVICE_MEM << MMU_MA_SHIFT)\
)
#define MMU_MAP_CUSTOM(ap, mtype) (\
(0x1UL << MMU_AF_SHIFT) |\
(0x2UL << MMU_SHARED_SHIFT) |\
((ap) << MMU_AP_SHIFT) |\
((mtype) << MMU_MA_SHIFT)\
)
#define ARCH_SECTION_SHIFT 21
#define ARCH_SECTION_SIZE (1 << ARCH_SECTION_SHIFT)
#define ARCH_SECTION_MASK (ARCH_SECTION_SIZE - 1)
#define ARCH_PAGE_SHIFT 12
#define ARCH_PAGE_SIZE (1 << ARCH_PAGE_SHIFT)
#define ARCH_PAGE_MASK (ARCH_PAGE_SIZE - 1)
#define ARCH_PAGE_TBL_SHIFT 12
#define ARCH_PAGE_TBL_SIZE (1 << ARCH_PAGE_TBL_SHIFT)
#define ARCH_PAGE_TBL_MASK (ARCH_PAGE_TBL_SIZE - 1)
#define ARCH_ADDRESS_WIDTH_BITS 64
#define MMU_MAP_ERROR_VANOTALIGN -1
#define MMU_MAP_ERROR_PANOTALIGN -2
#define MMU_MAP_ERROR_NOPAGE -3
#define MMU_MAP_ERROR_CONFLICT -4
typedef struct
{
size_t *vtable;
size_t vstart;
size_t vend;
size_t pv_off;
} rt_mmu_info;
void rt_hw_mmu_setup_early(unsigned long *tbl0, unsigned long *tbl1, unsigned long size, unsigned long pv_off);
void rt_hw_mmu_setup(struct mem_desc *mdesc, int desc_nr);
int rt_hw_mmu_map_init(rt_mmu_info *mmu_info, void* v_address, size_t size, size_t *vtable, size_t pv_off);
int rt_hw_mmu_ioremap_init(rt_mmu_info *mmu_info, void* v_address, size_t size);
#ifdef RT_USING_SMART
void *rt_hw_mmu_map(rt_mmu_info *mmu_info, void *v_addr, void* p_addr, size_t size, size_t attr);
void *rt_hw_mmu_map_auto(rt_mmu_info *mmu_info, void *v_addr, size_t size, size_t attr);
#else
void *rt_hw_mmu_map(rt_mmu_info *mmu_info, void* p_addr, size_t size, size_t attr);
#endif
void rt_hw_mmu_unmap(rt_mmu_info *mmu_info, void* v_addr, size_t size);
void *rt_hw_mmu_v2p(rt_mmu_info *mmu_info, void* v_addr);
void rt_hw_mmu_ktbl_set(unsigned long tbl);
void *rt_hw_mmu_tbl_get();
void rt_hw_mmu_switch(void *mmu_table);
void rt_hw_init_mmu_table(struct mem_desc *mdesc, rt_size_t desc_nr);
void rt_hw_mmu_init(void);
int rt_hw_mmu_map(unsigned long addr, unsigned long size, unsigned long attr);
void rt_hw_dcache_flush_all(void);
void rt_hw_dcache_invalidate_all(void);
void rt_hw_dcache_flush_range(unsigned long start_addr, unsigned long size);
void rt_hw_dcache_invalidate_range(unsigned long start_addr,unsigned long size);
extern rt_mmu_info mmu_info;
void rt_hw_icache_invalidate_all();
void rt_hw_icache_invalidate_range(unsigned long start_addr, int size);
#endif /* __MMU_H_ */
#endif

View File

@@ -1,103 +1,288 @@
/*
* Copyright (c) 2006-2021, RT-Thread Development Team
* Copyright (c) 2006-2022, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2021-09-09 GuEe-GUI The first version
*/
#include <rthw.h>
#include <rtthread.h>
#include <stdint.h>
#include <stdio.h>
#include <string.h>
#include "cpu.h"
#include "psci.h"
#include "psci_api.h"
#include "smccc.h"
#include <psci.h>
#include <smccc.h>
#include <armv8.h>
#define DBG_TAG "libcpu.aarch64.psci"
#define DBG_LVL DBG_INFO
#include <rtdbg.h>
typedef uint64_t (*psci_call_handle)(uint32_t fn, uint64_t arg0, uint64_t arg1, uint64_t arg2);
/** template for creating 4 PSCI ops: SUSPEND, OFF, ON, MIGRATE */
#define COMMON_PSCI_OPS_TEMPLATE(VER, SUSPEND, OFF, ON, MIGRATE) \
static int psci_##VER##_cpu_suspend(uint32_t state, unsigned long entry_point) \
{ \
return psci_call((SUSPEND), state, entry_point, 0); \
} \
static int psci_##VER##_cpu_off(uint32_t state) \
{ \
return psci_call((OFF), state, 0, 0); \
} \
static int psci_##VER##_cpu_on(unsigned long cpuid, unsigned long entry_point) \
{ \
return psci_call((ON), cpuid, entry_point, 0); \
} \
static int psci_##VER##_migrate(unsigned long cpuid) \
{ \
return psci_call((MIGRATE), cpuid, 0, 0); \
}
static uint64_t psci_smc_call(uint32_t fn, uint64_t arg0, uint64_t arg1, uint64_t arg2)
#ifdef RT_USING_FDT
#include "dtb_node.h"
struct psci_ops_t psci_ops;
#if __SIZE_WIDTH__ == 64
#define PSCI_FN_NATIVE(version, name) PSCI_##version##_FN64_##name
#else
#define PSCI_FN_NATIVE(version, name) PSCI_##version##_FN_##name
#endif
/**
* SMCCC can use either smc or hvc method
* smccc_call will be init to proper interface when psci_init() was executed
*/
static void (*smccc_call)(unsigned long a0, unsigned long a1, unsigned long a2,
unsigned long a3, unsigned long a4, unsigned long a5,
unsigned long a6, unsigned long a7, struct arm_smccc_res_t *res,
struct arm_smccc_quirk_t *quirk);
static rt_uint32_t psci_call(unsigned long a0, unsigned long a1, unsigned long a2, unsigned long a3)
{
return arm_smc_call(fn, arg0, arg1, arg2, 0, 0, 0, 0).x0;
struct arm_smccc_res_t res;
smccc_call(a0, a1, a2, a3, 0, 0, 0, 0, &res, (void *)0);
return res.a0;
}
static uint64_t psci_hvc_call(uint32_t fn, uint64_t arg0, uint64_t arg1, uint64_t arg2)
static int _psci_probe_version(char *version, int *major, int *minor);
static int _psci_init_with_version(int major, int minor);
static struct dtb_node *psci_node;
static int psci_ver_major;
static int psci_ver_minor;
/**
* @brief init psci operations.
* using device tree to probe version and psci-method,
* setup psci ops for future use
*
* @return int 0 on success
*/
int psci_init()
{
return arm_hvc_call(fn, arg0, arg1, arg2, 0, 0, 0, 0).x0;
}
static psci_call_handle psci_call = psci_smc_call;
static uint64_t shutdown_args[3] = {0, 0, 0};
static uint64_t reboot_args[3] = {0, 0, 0};
void arm_psci_init(uint64_t method, uint64_t *platform_shutdown_args, uint64_t *platform_reboot_args)
{
switch (method)
void *root = get_dtb_node_head();
psci_node = dtb_node_get_dtb_node_by_path(root, "/psci");
if (!psci_node)
{
case PSCI_METHOD_SMC:
psci_call = psci_smc_call;
break;
case PSCI_METHOD_HVC:
if (rt_hw_get_current_el() < 2)
LOG_E("No PSCI node found");
return -1;
}
char *compatible = dtb_node_get_dtb_node_property_value(psci_node, "compatible", NULL);
char *method = dtb_node_get_dtb_node_property_value(psci_node, "method", NULL);
int retval = 0;
// setup psci-method
if (!strcmp("hvc", method))
{
smccc_call = arm_smccc_hvc;
}
else if (!strcmp("smc", method))
{
smccc_call = arm_smccc_smc;
}
else
{
LOG_E("Unknown PSCI method: %s", method);
return -1;
}
LOG_D("Using psci method %s", method);
retval = _psci_probe_version(compatible, &psci_ver_major, &psci_ver_minor);
if (retval != 0)
return retval;
// init psci_ops with specified psci version
retval = _psci_init_with_version(psci_ver_major, psci_ver_minor);
return retval;
}
/* function id of PSCI v0.1 should be probed in FDT, they are implementation defined value */
static rt_uint32_t cpu_suspend_0_1;
static rt_uint32_t cpu_off_0_1;
static rt_uint32_t cpu_on_0_1;
static rt_uint32_t migrate_0_1;
/* basic operations TEMPLATE for API since 0.1 version */
COMMON_PSCI_OPS_TEMPLATE(0_1, cpu_suspend_0_1, cpu_off_0_1, cpu_on_0_1, migrate_0_1);
/* used for v0.1 only, rely on FDT to probe function id */
#define PROBE_AND_SET(FUNC_NAME) \
do \
{ \
int num_of_elem; \
funcid = \
dtb_node_get_dtb_node_property_value(psci_node, #FUNC_NAME, &num_of_elem); \
if (num_of_elem != 4 || funcid == 0 || *funcid == 0) \
{ \
LOG_E("Failed to probe " #FUNC_NAME " in FDT"); \
} \
else \
{ \
FUNC_NAME##_0_1 = (rt_uint32_t)fdt32_to_cpu(*funcid); \
psci_ops.FUNC_NAME = psci_0_1_##FUNC_NAME; \
} \
} while (0)
static int psci_0_1_init()
{
// reading function id from fdt
rt_uint32_t *funcid;
PROBE_AND_SET(cpu_suspend);
PROBE_AND_SET(cpu_off);
PROBE_AND_SET(cpu_on);
PROBE_AND_SET(migrate);
return 0;
}
COMMON_PSCI_OPS_TEMPLATE(0_2, PSCI_FN_NATIVE(0_2, CPU_SUSPEND), PSCI_0_2_FN_CPU_OFF, PSCI_FN_NATIVE(0_2, CPU_ON), PSCI_FN_NATIVE(0_2, MIGRATE));
static rt_uint32_t psci_0_2_get_version(void)
{
return psci_call(PSCI_0_2_FN_PSCI_VERSION, 0, 0, 0);
}
static void psci_0_2_set_basic_ops()
{
psci_ops = (struct psci_ops_t){
.get_version = psci_0_2_get_version,
// followings API are v0.1 compatible
.cpu_suspend = psci_0_2_cpu_suspend,
.cpu_off = psci_0_2_cpu_off,
.cpu_on = psci_0_2_cpu_on,
.migrate = psci_0_2_migrate,
};
}
static void psci_0_2_system_off(void)
{
psci_call(PSCI_0_2_FN_SYSTEM_OFF, 0, 0, 0);
}
static void psci_0_2_system_reset(void)
{
psci_call(PSCI_0_2_FN_SYSTEM_RESET, 0, 0, 0);
}
static int psci_0_2_init()
{
psci_0_2_set_basic_ops();
// TODO init other version 0.2 features...
// psci system off and reset which controlling machine
psci_ops.system_off = psci_0_2_system_off;
psci_ops.system_reset = psci_0_2_system_reset;
system_off = psci_0_2_system_off;
return 0;
}
/* PSCI v1.0 & after */
static int psci_1_0_features(uint32_t psci_func_id)
{
return psci_call(PSCI_1_0_FN_PSCI_FEATURES,
psci_func_id, 0, 0);
}
static int psci_1_0_init()
{
psci_0_2_init();
// TODO init other version 1.0 features...
// remove unsupported features
if (psci_1_0_features(PSCI_0_2_FN_SYSTEM_OFF) == PSCI_RET_NOT_SUPPORTED)
{
psci_ops.system_off = RT_NULL;
system_off = RT_NULL;
}
else
LOG_D("Using SYSTEM OFF feature");
if (psci_1_0_features(PSCI_0_2_FN_SYSTEM_RESET) == PSCI_RET_NOT_SUPPORTED)
psci_ops.system_reset = RT_NULL;
else
LOG_D("Using SYSTEM RESET feature");
return 0;
}
/* probe psci version from fdt or SMC call */
static int _psci_probe_version(char *version, int *major, int *minor)
{
int retval = 0;
// if strcmp compatible 'arm,psci-0.1'
if (!strcmp(version, "arm,psci"))
{
*major = 0;
*minor = 1;
}
else if (!strncmp(version, "arm,psci-", 8))
{
// since psci-0.2, using psci call to probe version
rt_uint32_t ret = psci_0_2_get_version();
*major = PSCI_VERSION_MAJOR(ret);
*minor = PSCI_VERSION_MINOR(ret);
}
else
{
LOG_E("[%s] was not a proper PSCI version", version);
retval = -1;
}
LOG_D("Using PSCI v%d.%d", *major, *minor);
return retval;
}
/* init psci ops with version info */
static int _psci_init_with_version(int major, int minor)
{
int retval = -0xbeef; // mark unsupported
if (major == 0)
{
// for v0.1, psci function id was provided fdt
if (minor == 1)
{
psci_call = psci_hvc_call;
retval = psci_0_1_init();
}
else if (minor == 2)
{
retval = psci_0_2_init();
}
break;
}
if (platform_shutdown_args != RT_NULL)
else if (major == 1)
{
shutdown_args[0] = platform_shutdown_args[0];
shutdown_args[1] = platform_shutdown_args[1];
shutdown_args[2] = platform_shutdown_args[2];
// psci_1_0_init is a base setup for version after v1.0
retval = psci_1_0_init();
}
if (platform_reboot_args != RT_NULL)
if (retval == -0xbeef)
{
reboot_args[0] = platform_reboot_args[0];
reboot_args[1] = platform_reboot_args[1];
reboot_args[2] = platform_reboot_args[2];
LOG_E("PSCI init with incompatible version %d.%d", major, minor);
}
return retval;
}
uint32_t arm_psci_get_version()
{
return (uint32_t)psci_call(PSCI_0_2_FN_PSCI_VERSION, 0, 0, 0);
}
uint32_t arm_psci_get_affinity_info(uint64_t target_affinity, uint64_t lowest_affinity_level)
{
return (uint32_t)psci_call(PSCI_0_2_FN_AFFINITY_INFO, target_affinity, lowest_affinity_level, 0);
}
uint32_t arm_psci_get_feature(uint32_t psci_func_id)
{
return (uint32_t)psci_call(PSCI_1_0_FN_PSCI_FEATURES, psci_func_id, 0, 0);
}
uint32_t arm_psci_cpu_off(uint64_t state)
{
return (uint32_t)psci_call(PSCI_0_2_FN_CPU_OFF, state, 0, 0);
}
uint32_t arm_psci_cpu_on(uint64_t mpid, uint64_t entry)
{
/* [40:63] and [24:31] must be zero, other is aff3, aff2, aff1, aff0 */
mpid = mpid & 0xff00ffffff;
return (uint32_t)psci_call(PSCI_0_2_FN_CPU_ON, mpid, entry, 0);
}
uint32_t arm_psci_cpu_suspend(uint32_t power_state, uint64_t entry)
{
return (uint32_t)psci_call(PSCI_0_2_FN_CPU_SUSPEND, power_state, entry, 0);
}
void arm_psci_system_off()
{
psci_call(PSCI_0_2_FN_SYSTEM_OFF, shutdown_args[0], shutdown_args[1], shutdown_args[2]);
}
void arm_psci_system_reboot()
{
psci_call(PSCI_0_2_FN_SYSTEM_RESET, reboot_args[0], reboot_args[1], reboot_args[2]);
}
#endif /* RT_USING_FDT */

View File

@@ -1,34 +1,25 @@
/*
* Copyright (c) 2006-2021, RT-Thread Development Team
* Copyright (c) 2006-2022, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2021-09-09 GuEe-GUI The first version
*/
#ifndef __PSCI_H__
#define __PSCI_H__
#include <stdint.h>
/*
* Non-Confidential PSCI 1.0 release (30 January 2015), and errata fix for PSCI 0.2, unsupport PSCI 0.1
/**
* PSCI protocol content
* For PSCI v0.1, only return values below are protocol defined
*/
#define PSCI_VER_0_2 0x00000002
#define PSCI_METHOD_SMC 3
#define PSCI_METHOD_HVC 2
/* PSCI 0.2 interface */
/* PSCI v0.2 interface */
#define PSCI_0_2_FN_BASE 0x84000000
#define PSCI_0_2_FN(n) (PSCI_0_2_FN_BASE + (n))
#define PSCI_0_2_FN_END 0x8400001F
#define PSCI_0_2_FN64_BASE 0xC4000000
#define PSCI_0_2_64BIT 0x40000000
#define PSCI_0_2_FN64_BASE (PSCI_0_2_FN_BASE + PSCI_0_2_64BIT)
#define PSCI_0_2_FN64(n) (PSCI_0_2_FN64_BASE + (n))
#define PSCI_0_2_FN64_END 0xC400001F
#define PSCI_0_2_FN_PSCI_VERSION PSCI_0_2_FN(0)
#define PSCI_0_2_FN_CPU_SUSPEND PSCI_0_2_FN(1)
@@ -44,90 +35,69 @@
#define PSCI_0_2_FN64_CPU_SUSPEND PSCI_0_2_FN64(1)
#define PSCI_0_2_FN64_CPU_ON PSCI_0_2_FN64(3)
#define PSCI_0_2_FN64_AFFINITY_INFO PSCI_0_2_FN64(4)
#define PSCI_0_2_FN64_MIGRATE PSCI_0_2_FN64(5)
#define PSCI_0_2_FN64_MIGRATE (5)
#define PSCI_0_2_FN64_MIGRATE_INFO_UP_CPU PSCI_0_2_FN64(7)
/* PSCI 1.0 interface */
#define PSCI_1_0_FN_PSCI_FEATURES PSCI_0_2_FN(10)
#define PSCI_1_0_FN_CPU_FREEZE PSCI_0_2_FN(11)
#define PSCI_1_0_FN_CPU_DEFAULT_SUSPEND PSCI_0_2_FN(12)
#define PSCI_1_0_FN_NODE_HW_STATE PSCI_0_2_FN(13)
#define PSCI_1_0_FN_SYSTEM_SUSPEND PSCI_0_2_FN(14)
#define PSCI_1_0_FN_SET_SUSPEND_MODE PSCI_0_2_FN(15)
#define PSCI_1_0_FN_STAT_RESIDENCY PSCI_0_2_FN(16)
#define PSCI_1_0_FN_STAT_COUNT PSCI_0_2_FN(17)
#define PSCI_1_1_FN_SYSTEM_RESET2 PSCI_0_2_FN(18)
#define PSCI_1_0_FN64_CPU_DEFAULT_SUSPEND PSCI_0_2_FN64(12)
#define PSCI_1_0_FN64_NODE_HW_STATE PSCI_0_2_FN64(13)
#define PSCI_1_0_FN64_SYSTEM_SUSPEND PSCI_0_2_FN64(14)
#define PSCI_1_0_FN64_STAT_RESIDENCY PSCI_0_2_FN64(16)
#define PSCI_1_0_FN64_STAT_COUNT PSCI_0_2_FN64(17)
#define PSCI_1_1_FN64_SYSTEM_RESET2 PSCI_0_2_FN64(18)
/* 1KB stack per core */
#define PSCI_STACK_SHIFT 10
#define PSCI_STACK_SIZE (1 << PSCI_STACK_SHIFT)
/* PSCI v0.2 power state encoding for CPU_SUSPEND function */
#define PSCI_0_2_POWER_STATE_ID_MASK 0xffff
#define PSCI_0_2_POWER_STATE_ID_SHIFT 0
#define PSCI_0_2_POWER_STATE_TYPE_SHIFT 16
#define PSCI_0_2_POWER_STATE_TYPE_MASK (0x1 << PSCI_0_2_POWER_STATE_TYPE_SHIFT)
#define PSCI_0_2_POWER_STATE_AFFL_SHIFT 24
#define PSCI_0_2_POWER_STATE_AFFL_MASK (0x3 << PSCI_0_2_POWER_STATE_AFFL_SHIFT)
/* PSCI affinity level state returned by AFFINITY_INFO */
#define PSCI_AFFINITY_LEVEL_ON 0
#define PSCI_AFFINITY_LEVEL_OFF 1
#define PSCI_AFFINITY_LEVEL_ON_PENDING 2
/* PSCI extended power state encoding for CPU_SUSPEND function */
#define PSCI_1_0_EXT_POWER_STATE_ID_MASK 0xfffffff
#define PSCI_1_0_EXT_POWER_STATE_ID_SHIFT 0
#define PSCI_1_0_EXT_POWER_STATE_TYPE_SHIFT 30
#define PSCI_1_0_EXT_POWER_STATE_TYPE_MASK (0x1 << PSCI_1_0_EXT_POWER_STATE_TYPE_SHIFT)
/*
* PSCI power state
* power_level:
* Level 0: cores
* Level 1: clusters
* Level 2: system
* state_type:
* value 0: standby or retention state
* value 1: powerdown state(entry and context_id is valid)
* state_id:
* StateID
*/
#define PSCI_POWER_STATE(power_level, state_type, state_id) \
( \
((power_level) << 24) | \
((state_type) << 16) | \
((state_id) << 24) \
)
/* PSCI v0.2 affinity level state returned by AFFINITY_INFO */
#define PSCI_0_2_AFFINITY_LEVEL_ON 0
#define PSCI_0_2_AFFINITY_LEVEL_OFF 1
#define PSCI_0_2_AFFINITY_LEVEL_ON_PENDING 2
/*
* For system, cluster, core
* 0: run
* 1: standby(only core)
* 2: retention
* 3: powerdown
*/
#define PSCI_POWER_STATE_ID(state_id_power_level, system, cluster, core) \
( \
((state_id_power_level) << 12) | \
((system) << 8) | \
((cluster) << 4) | \
(core) \
)
/* PSCI v0.2 multicore support in Trusted OS returned by MIGRATE_INFO_TYPE */
#define PSCI_0_2_TOS_UP_MIGRATE 0
#define PSCI_0_2_TOS_UP_NO_MIGRATE 1
#define PSCI_0_2_TOS_MP 2
#define PSCI_RET_SUCCESS 0
#define PSCI_RET_NOT_SUPPORTED (-1)
#define PSCI_RET_INVALID_PARAMETERS (-2)
#define PSCI_RET_DENIED (-3)
#define PSCI_RET_ALREADY_ON (-4)
#define PSCI_RET_ON_PENDING (-5)
#define PSCI_RET_INTERNAL_FAILURE (-6)
#define PSCI_RET_NOT_PRESENT (-7)
#define PSCI_RET_DISABLED (-8)
#define PSCI_RET_INVALID_ADDRESS (-9)
/* PSCI version decoding (independent of PSCI version) */
#define PSCI_VERSION_MAJOR_SHIFT 16
#define PSCI_VERSION_MINOR_MASK ((1U << PSCI_VERSION_MAJOR_SHIFT) - 1)
#define PSCI_VERSION_MAJOR_MASK ~PSCI_VERSION_MINOR_MASK
#define PSCI_VERSION_MAJOR(ver) (((ver) & PSCI_VERSION_MAJOR_MASK) >> PSCI_VERSION_MAJOR_SHIFT)
#define PSCI_VERSION_MINOR(ver) ((ver) & PSCI_VERSION_MINOR_MASK)
#define PSCI_VERSION(maj, min) \
((((maj) << PSCI_VERSION_MAJOR_SHIFT) & PSCI_VERSION_MAJOR_MASK) | \
((min) & PSCI_VERSION_MINOR_MASK))
void arm_psci_init(uint64_t method, uint64_t *platform_shutdown_args, uint64_t *platform_reboot_args);
/* PSCI features decoding (>=1.0) */
#define PSCI_1_0_FEATURES_CPU_SUSPEND_PF_SHIFT 1
#define PSCI_1_0_FEATURES_CPU_SUSPEND_PF_MASK (0x1 << PSCI_1_0_FEATURES_CPU_SUSPEND_PF_SHIFT)
uint32_t arm_psci_get_version();
uint32_t arm_psci_get_affinity_info(uint64_t target_affinity, uint64_t lowest_affinity_level);
uint32_t arm_psci_get_feature(uint32_t psci_func_id);
#define PSCI_1_0_OS_INITIATED BIT(0)
#define PSCI_1_0_SUSPEND_MODE_PC 0
#define PSCI_1_0_SUSPEND_MODE_OSI 1
uint32_t arm_psci_cpu_off(uint64_t state);
uint32_t arm_psci_cpu_on(uint64_t mpid, uint64_t entry);
uint32_t arm_psci_cpu_suspend(uint32_t power_state, uint64_t entry);
void arm_psci_system_off();
void arm_psci_system_reboot();
/* PSCI return values (inclusive of all PSCI versions) */
#define PSCI_RET_SUCCESS 0
#define PSCI_RET_NOT_SUPPORTED -1
#define PSCI_RET_INVALID_PARAMS -2
#define PSCI_RET_DENIED -3
#define PSCI_RET_ALREADY_ON -4
#define PSCI_RET_ON_PENDING -5
#define PSCI_RET_INTERNAL_FAILURE -6
#define PSCI_RET_NOT_PRESENT -7
#define PSCI_RET_DISABLED -8
#define PSCI_RET_INVALID_ADDRESS -9
#endif /*__PSCI_H__*/

View File

@@ -0,0 +1,34 @@
/*
* Copyright (c) 2006-2022, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
*/
#ifndef __PSCI_API_H__
#define __PSCI_API_H__
#include <rthw.h>
#include <rtthread.h>
#include <stdint.h>
#include "psci_api.h"
/** generic psci ops supported v0.1 v0.2 v1.0 v1.1 */
struct psci_ops_t
{
uint32_t (*get_version)(void);
int32_t (*cpu_suspend)(uint32_t state, unsigned long entry_point);
int32_t (*cpu_off)(uint32_t state);
int32_t (*cpu_on)(unsigned long cpuid, unsigned long entry_point);
int32_t (*migrate)(unsigned long cpuid);
void (*system_off)(void);
void (*system_reset)(void);
};
extern struct psci_ops_t psci_ops;
extern int psci_init(void);
#endif // __PSCI_API_H__

View File

@@ -1,37 +1,32 @@
/*
* Copyright (c) 2006-2021, RT-Thread Development Team
* Copyright (c) 2006-2022, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2021-09-09 GuEe-GUI The first version
*/
/*
* smc calling convention call
/**
* SMCCC v0.2
* ARM DEN0028E chapter 2.6
*/
.macro SMCCC_CALL INS
stp x8, x29, [sp,#-16]! /* push the frame pointer (x29) for the purposes of AAPCS64 compatibility */
\INS #0
ldp x8, x29, [sp], #16
stp x0, x1, [x8]
stp x2, x3, [x8, #16]
str x6, [x8, #32]
.macro SMCCC instr
stp x29, x30, [sp, #-16]!
mov x29, sp
\instr #0
// store in arm_smccc_res
ldr x4, [sp, #16]
stp x0, x1, [x4, #0]
stp x2, x3, [x4, #16]
1:
ldp x29, x30, [sp], #16
ret
.endm
.endm
/*
* smc call
*/
.globl arm_smc_call
arm_smc_call:
SMCCC_CALL smc
.global arm_smccc_smc
arm_smccc_smc:
SMCCC smc
/*
* hvc call
*/
.globl arm_hvc_call
arm_hvc_call:
SMCCC_CALL hvc
.global arm_smccc_hvc
arm_smccc_hvc:
SMCCC hvc

View File

@@ -1,33 +1,45 @@
/*
* Copyright (c) 2006-2021, RT-Thread Development Team
* Copyright (c) 2006-2019, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2021-09-09 GuEe-GUI The first version
*/
#ifndef __SMCCC_H__
#define __SMCCC_H__
#include <stdint.h>
/*
* The ARM SMCCC v1.0 calling convention provides the following guarantees about registers:
* Register Modified Return State
* X0...X3 Yes Result values
* X4...X17 Yes Unpredictable
* X18...X30 No Preserved
* SP_EL0 No Preserved
* SP_ELx No Preserved
/**
* result from SMC/HVC call
* ARM DEN0028E chapter 5,
*/
struct arm_smccc_ret
typedef struct arm_smccc_res_t
{
unsigned long a0;
// reserved for ARM SMC and HVC Fast Call services
unsigned long a1;
unsigned long a2;
unsigned long a3;
} arm_smccc_res_t;
/**
* quirk is a structure contains vendor specified information,
* it just a placeholder currently
*/
struct arm_smccc_quirk_t
{
uint64_t x0; /* Parameter registers */
uint64_t x1; /* Parameter registers */
uint64_t x2; /* Parameter registers */
uint64_t x3; /* Parameter registers */
uint64_t x6; /* Parameter register: Optional Session ID register */
};
struct arm_smccc_ret arm_smc_call(uint32_t w0, uint64_t x1, uint64_t x2, uint64_t x3, uint64_t x4, uint64_t x5, uint64_t x6, uint32_t w7);
struct arm_smccc_ret arm_hvc_call(uint32_t w0, uint64_t x1, uint64_t x2, uint64_t x3, uint64_t x4, uint64_t x5, uint64_t x6, uint32_t w7);
/* smccc version 0.2 */
void arm_smccc_smc(unsigned long a0, unsigned long a1, unsigned long a2,
unsigned long a3, unsigned long a4, unsigned long a5,
unsigned long a6, unsigned long a7, struct arm_smccc_res_t *res,
struct arm_smccc_quirk_t *quirk);
void arm_smccc_hvc(unsigned long a0, unsigned long a1, unsigned long a2,
unsigned long a3, unsigned long a4, unsigned long a5,
unsigned long a6, unsigned long a7, struct arm_smccc_res_t *res,
struct arm_smccc_quirk_t *quirk);
#endif /* __SMCCC_H__ */

View File

@@ -5,14 +5,15 @@
*
* Change Logs:
* Date Author Notes
* 2011-09-23 Bernard the first version
* 2011-10-05 Bernard add thumb mode
* 2021-11-04 GuEe-GUI set sp with SP_ELx
* 2021-12-28 GuEe-GUI add fpu support
* 2021-05-12 RT-Thread init
*/
#include <board.h>
#include <rtthread.h>
#include <armv8.h>
#define INITIAL_SPSR_EL1 (PSTATE_EL1 | SP_ELx)
/**
* This function will initialize thread stack
*
@@ -23,87 +24,84 @@
*
* @return stack address
*/
rt_uint8_t *rt_hw_stack_init(void *tentry, void *parameter, rt_uint8_t *stack_addr, void *texit)
rt_uint8_t *rt_hw_stack_init(void *tentry, void *parameter,
rt_uint8_t *stack_addr, void *texit)
{
static const rt_ubase_t initial_spsr[] =
{
[1] = PSTATE_EL1 | SP_ELx,
[2] = PSTATE_EL2 | SP_ELx,
[3] = PSTATE_EL3 | SP_ELx
};
/* The AAPCS64 requires 128-bit (16 byte) stack alignment */
rt_ubase_t *stk = (rt_ubase_t*)RT_ALIGN_DOWN((rt_ubase_t)stack_addr, 16);
rt_ubase_t *stk;
*(--stk) = (rt_ubase_t) 0; /* Q0 */
*(--stk) = (rt_ubase_t) 0; /* Q0 */
*(--stk) = (rt_ubase_t) 0; /* Q1 */
*(--stk) = (rt_ubase_t) 0; /* Q1 */
*(--stk) = (rt_ubase_t) 0; /* Q2 */
*(--stk) = (rt_ubase_t) 0; /* Q2 */
*(--stk) = (rt_ubase_t) 0; /* Q3 */
*(--stk) = (rt_ubase_t) 0; /* Q3 */
*(--stk) = (rt_ubase_t) 0; /* Q4 */
*(--stk) = (rt_ubase_t) 0; /* Q4 */
*(--stk) = (rt_ubase_t) 0; /* Q5 */
*(--stk) = (rt_ubase_t) 0; /* Q5 */
*(--stk) = (rt_ubase_t) 0; /* Q6 */
*(--stk) = (rt_ubase_t) 0; /* Q6 */
*(--stk) = (rt_ubase_t) 0; /* Q7 */
*(--stk) = (rt_ubase_t) 0; /* Q7 */
*(--stk) = (rt_ubase_t) 0; /* Q8 */
*(--stk) = (rt_ubase_t) 0; /* Q8 */
*(--stk) = (rt_ubase_t) 0; /* Q9 */
*(--stk) = (rt_ubase_t) 0; /* Q9 */
*(--stk) = (rt_ubase_t) 0; /* Q10 */
*(--stk) = (rt_ubase_t) 0; /* Q10 */
*(--stk) = (rt_ubase_t) 0; /* Q11 */
*(--stk) = (rt_ubase_t) 0; /* Q11 */
*(--stk) = (rt_ubase_t) 0; /* Q12 */
*(--stk) = (rt_ubase_t) 0; /* Q12 */
*(--stk) = (rt_ubase_t) 0; /* Q13 */
*(--stk) = (rt_ubase_t) 0; /* Q13 */
*(--stk) = (rt_ubase_t) 0; /* Q14 */
*(--stk) = (rt_ubase_t) 0; /* Q14 */
*(--stk) = (rt_ubase_t) 0; /* Q15 */
*(--stk) = (rt_ubase_t) 0; /* Q15 */
stk = (rt_ubase_t *)stack_addr;
*(--stk) = ( rt_ubase_t ) 11; /* X1 */
*(--stk) = ( rt_ubase_t ) parameter; /* X0 */
*(--stk) = ( rt_ubase_t ) 33; /* X3 */
*(--stk) = ( rt_ubase_t ) 22; /* X2 */
*(--stk) = ( rt_ubase_t ) 55; /* X5 */
*(--stk) = ( rt_ubase_t ) 44; /* X4 */
*(--stk) = ( rt_ubase_t ) 77; /* X7 */
*(--stk) = ( rt_ubase_t ) 66; /* X6 */
*(--stk) = ( rt_ubase_t ) 99; /* X9 */
*(--stk) = ( rt_ubase_t ) 88; /* X8 */
*(--stk) = ( rt_ubase_t ) 11; /* X11 */
*(--stk) = ( rt_ubase_t ) 10; /* X10 */
*(--stk) = ( rt_ubase_t ) 13; /* X13 */
*(--stk) = ( rt_ubase_t ) 12; /* X12 */
*(--stk) = ( rt_ubase_t ) 15; /* X15 */
*(--stk) = ( rt_ubase_t ) 14; /* X14 */
*(--stk) = ( rt_ubase_t ) 17; /* X17 */
*(--stk) = ( rt_ubase_t ) 16; /* X16 */
*(--stk) = ( rt_ubase_t ) 19; /* X19 */
*(--stk) = ( rt_ubase_t ) 18; /* X18 */
*(--stk) = ( rt_ubase_t ) 21; /* X21 */
*(--stk) = ( rt_ubase_t ) 20; /* X20 */
*(--stk) = ( rt_ubase_t ) 23; /* X23 */
*(--stk) = ( rt_ubase_t ) 22; /* X22 */
*(--stk) = ( rt_ubase_t ) 25; /* X25 */
*(--stk) = ( rt_ubase_t ) 24; /* X24 */
*(--stk) = ( rt_ubase_t ) 27; /* X27 */
*(--stk) = ( rt_ubase_t ) 26; /* X26 */
*(--stk) = ( rt_ubase_t ) 29; /* X29 */
*(--stk) = ( rt_ubase_t ) 28; /* X28 */
*(--stk) = ( rt_ubase_t ) 0; /* FPSR */
*(--stk) = ( rt_ubase_t ) 0; /* FPCR */
*(--stk) = ( rt_ubase_t ) 0; /* XZR - has no effect, used so there are an even number of registers. */
*(--stk) = ( rt_ubase_t ) texit; /* X30 - procedure call link register. */
*(--stk) = (rt_ubase_t)0; /* Q0 */
*(--stk) = (rt_ubase_t)0; /* Q0 */
*(--stk) = (rt_ubase_t)0; /* Q1 */
*(--stk) = (rt_ubase_t)0; /* Q1 */
*(--stk) = (rt_ubase_t)0; /* Q2 */
*(--stk) = (rt_ubase_t)0; /* Q2 */
*(--stk) = (rt_ubase_t)0; /* Q3 */
*(--stk) = (rt_ubase_t)0; /* Q3 */
*(--stk) = (rt_ubase_t)0; /* Q4 */
*(--stk) = (rt_ubase_t)0; /* Q4 */
*(--stk) = (rt_ubase_t)0; /* Q5 */
*(--stk) = (rt_ubase_t)0; /* Q5 */
*(--stk) = (rt_ubase_t)0; /* Q6 */
*(--stk) = (rt_ubase_t)0; /* Q6 */
*(--stk) = (rt_ubase_t)0; /* Q7 */
*(--stk) = (rt_ubase_t)0; /* Q7 */
*(--stk) = (rt_ubase_t)0; /* Q8 */
*(--stk) = (rt_ubase_t)0; /* Q8 */
*(--stk) = (rt_ubase_t)0; /* Q9 */
*(--stk) = (rt_ubase_t)0; /* Q9 */
*(--stk) = (rt_ubase_t)0; /* Q10 */
*(--stk) = (rt_ubase_t)0; /* Q10 */
*(--stk) = (rt_ubase_t)0; /* Q11 */
*(--stk) = (rt_ubase_t)0; /* Q11 */
*(--stk) = (rt_ubase_t)0; /* Q12 */
*(--stk) = (rt_ubase_t)0; /* Q12 */
*(--stk) = (rt_ubase_t)0; /* Q13 */
*(--stk) = (rt_ubase_t)0; /* Q13 */
*(--stk) = (rt_ubase_t)0; /* Q14 */
*(--stk) = (rt_ubase_t)0; /* Q14 */
*(--stk) = (rt_ubase_t)0; /* Q15 */
*(--stk) = (rt_ubase_t)0; /* Q15 */
*(--stk) = initial_spsr[rt_hw_get_current_el()];
*(--stk) = ( rt_ubase_t ) tentry; /* Exception return address. */
*(--stk) = (rt_ubase_t)1; /* X1 */
*(--stk) = (rt_ubase_t)parameter; /* X0 */
*(--stk) = (rt_ubase_t)3; /* X3 */
*(--stk) = (rt_ubase_t)2; /* X2 */
*(--stk) = (rt_ubase_t)5; /* X5 */
*(--stk) = (rt_ubase_t)4; /* X4 */
*(--stk) = (rt_ubase_t)7; /* X7 */
*(--stk) = (rt_ubase_t)6; /* X6 */
*(--stk) = (rt_ubase_t)9; /* X9 */
*(--stk) = (rt_ubase_t)8; /* X8 */
*(--stk) = (rt_ubase_t)11; /* X11 */
*(--stk) = (rt_ubase_t)10; /* X10 */
*(--stk) = (rt_ubase_t)13; /* X13 */
*(--stk) = (rt_ubase_t)12; /* X12 */
*(--stk) = (rt_ubase_t)15; /* X15 */
*(--stk) = (rt_ubase_t)14; /* X14 */
*(--stk) = (rt_ubase_t)17; /* X17 */
*(--stk) = (rt_ubase_t)16; /* X16 */
*(--stk) = (rt_ubase_t)19; /* X19 */
*(--stk) = (rt_ubase_t)18; /* X18 */
*(--stk) = (rt_ubase_t)21; /* X21 */
*(--stk) = (rt_ubase_t)20; /* X20 */
*(--stk) = (rt_ubase_t)23; /* X23 */
*(--stk) = (rt_ubase_t)22; /* X22 */
*(--stk) = (rt_ubase_t)25; /* X25 */
*(--stk) = (rt_ubase_t)24; /* X24 */
*(--stk) = (rt_ubase_t)27; /* X27 */
*(--stk) = (rt_ubase_t)26; /* X26 */
*(--stk) = (rt_ubase_t)29; /* X29 */
*(--stk) = (rt_ubase_t)28; /* X28 */
*(--stk) = (rt_ubase_t)0; /* FPSR */
*(--stk) = (rt_ubase_t)0; /* FPCR */
*(--stk) = (rt_ubase_t)texit; /* X30 - procedure call link register. */
*(--stk) = (rt_ubase_t)0; /* sp_el0 */
*(--stk) = INITIAL_SPSR_EL1;
*(--stk) = (rt_ubase_t)tentry; /* Exception return address. */
/* return task's current stack address */
return (rt_uint8_t *)stk;

View File

@@ -0,0 +1,18 @@
/*
* Copyright (c) 2006-2018, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
*/
.global Reset_Handler
.section ".start", "ax"
Reset_Handler:
nop
.text
.weak SVC_Handler
SVC_Handler:
ret

View File

@@ -1,5 +1,5 @@
/*
* Copyright (c) 2006-2021, RT-Thread Development Team
* Copyright (c) 2006-2018, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
@@ -13,12 +13,66 @@
#include <board.h>
#include <armv8.h>
#include <interrupt.h>
#include "interrupt.h"
#include <backtrace.h>
void rt_unwind(struct rt_hw_exp_stack *regs, int pc_adj)
{
}
#ifdef RT_USING_FINSH
extern long list_thread(void);
#endif
#ifdef RT_USING_LWP
#include <lwp.h>
#include <lwp_arch.h>
#ifdef LWP_USING_CORE_DUMP
#include <lwp_core_dump.h>
#endif
void sys_exit(int value);
void check_user_fault(struct rt_hw_exp_stack *regs, uint32_t pc_adj, char *info)
{
uint32_t mode = regs->cpsr;
if ((mode & 0x1f) == 0x00)
{
rt_kprintf("%s! pc = 0x%08x\n", info, regs->pc - pc_adj);
#ifdef LWP_USING_CORE_DUMP
lwp_core_dump(regs, pc_adj);
#endif
sys_exit(-1);
}
}
int check_user_stack(unsigned long esr, struct rt_hw_exp_stack *regs)
{
unsigned char ec;
void *dfar;
int ret = 0;
ec = (unsigned char)((esr >> 26) & 0x3fU);
switch (ec)
{
case 0x20:
case 0x21:
case 0x24:
asm volatile("mrs %0, far_el1":"=r"(dfar));
if (arch_expand_user_stack(dfar))
{
ret = 1;
}
break;
default:
break;
}
return ret;
}
#endif
/**
* this function will show registers of CPU
*
@@ -35,84 +89,42 @@ void rt_hw_show_register(struct rt_hw_exp_stack *regs)
rt_kprintf("X20:0x%16.16p X21:0x%16.16p X22:0x%16.16p X23:0x%16.16p\n", (void *)regs->x20, (void *)regs->x21, (void *)regs->x22, (void *)regs->x23);
rt_kprintf("X24:0x%16.16p X25:0x%16.16p X26:0x%16.16p X27:0x%16.16p\n", (void *)regs->x24, (void *)regs->x25, (void *)regs->x26, (void *)regs->x27);
rt_kprintf("X28:0x%16.16p X29:0x%16.16p X30:0x%16.16p\n", (void *)regs->x28, (void *)regs->x29, (void *)regs->x30);
rt_kprintf("SPSR :0x%16.16p\n", (void *)regs->spsr);
rt_kprintf("SP_EL0:0x%16.16p\n", (void *)regs->sp_el0);
rt_kprintf("SPSR :0x%16.16p\n", (void *)regs->cpsr);
rt_kprintf("EPC :0x%16.16p\n", (void *)regs->pc);
}
/**
* When comes across an instruction which it cannot handle,
* it takes the undefined instruction trap.
*
* @param regs system registers
*
* @note never invoke this function in application
*/
void rt_hw_trap_error(struct rt_hw_exp_stack *regs)
{
rt_kprintf("error exception:\n");
rt_hw_show_register(regs);
#ifdef RT_USING_FINSH
list_thread();
#endif
rt_hw_cpu_shutdown();
}
void rt_hw_trap_irq(void)
{
#ifndef BSP_USING_GIC
#ifdef SOC_BCM283x
extern rt_uint8_t core_timer_flag;
void *param;
uint32_t irq;
rt_isr_handler_t isr_func;
extern struct rt_irq_desc isr_table[];
uint32_t value = IRQ_PEND_BASIC & 0x3ff;
uint32_t value = 0;
value = IRQ_PEND_BASIC & 0x3ff;
#ifdef RT_USING_SMP
uint32_t cpu_id = rt_hw_cpu_id();
uint32_t mailbox_data = IPI_MAILBOX_CLEAR(cpu_id);
#else
uint32_t cpu_id = 0;
#endif
uint32_t int_source = CORE_IRQSOURCE(cpu_id) & 0x3ff;
if (int_source & 0x02)
if(core_timer_flag != 0)
{
isr_func = isr_table[IRQ_ARM_TIMER].handler;
#ifdef RT_USING_INTERRUPT_INFO
isr_table[IRQ_ARM_TIMER].counter++;
#endif
if (isr_func)
uint32_t cpu_id = rt_hw_cpu_id();
uint32_t int_source = CORE_IRQSOURCE(cpu_id);
if (int_source & 0x0f)
{
param = isr_table[IRQ_ARM_TIMER].param;
isr_func(IRQ_ARM_TIMER, param);
}
return;
}
#ifdef RT_USING_SMP
if (int_source & 0xf0)
{
/* it's a ipi interrupt */
if (mailbox_data & 0x1)
{
/* clear mailbox */
IPI_MAILBOX_CLEAR(cpu_id) = mailbox_data;
isr_func = isr_table[IRQ_ARM_MAILBOX].handler;
#ifdef RT_USING_INTERRUPT_INFO
isr_table[IRQ_ARM_MAILBOX].counter++;
#endif
if (isr_func)
if (int_source & 0x08)
{
param = isr_table[IRQ_ARM_MAILBOX].param;
isr_func(IRQ_ARM_MAILBOX, param);
isr_func = isr_table[IRQ_ARM_TIMER].handler;
#ifdef RT_USING_INTERRUPT_INFO
isr_table[IRQ_ARM_TIMER].counter++;
#endif
if (isr_func)
{
param = isr_table[IRQ_ARM_TIMER].param;
isr_func(IRQ_ARM_TIMER, param);
}
}
}
else
{
CORE_MAILBOX3_CLEAR(cpu_id) = mailbox_data;
}
return;
}
#endif /* RT_USING_SMP */
/* local interrupt*/
if (value)
@@ -184,19 +196,74 @@ void rt_hw_trap_irq(void)
void rt_hw_trap_fiq(void)
{
void *param;
int ir;
int ir, ir_self;
rt_isr_handler_t isr_func;
extern struct rt_irq_desc isr_table[];
ir = rt_hw_interrupt_get_irq();
/* bit 10~12 is cpuid, bit 0~9 is interrup id */
ir_self = ir & 0x3ffUL;
/* get interrupt service routine */
isr_func = isr_table[ir].handler;
param = isr_table[ir].param;
isr_func = isr_table[ir_self].handler;
param = isr_table[ir_self].param;
/* turn to interrupt service routine */
isr_func(ir, param);
isr_func(ir_self, param);
/* end of interrupt */
rt_hw_interrupt_ack(ir);
}
void process_exception(unsigned long esr, unsigned long epc);
void SVC_Handler(struct rt_hw_exp_stack *regs);
void rt_hw_trap_exception(struct rt_hw_exp_stack *regs)
{
unsigned long esr;
unsigned char ec;
asm volatile("mrs %0, esr_el1":"=r"(esr));
ec = (unsigned char)((esr >> 26) & 0x3fU);
#ifdef RT_USING_LWP
if (dbg_check_event(regs, esr))
{
return;
}
else
#endif
if (ec == 0x15) /* is 64bit syscall ? */
{
SVC_Handler(regs);
/* never return here */
}
#ifdef RT_USING_LWP
if (check_user_stack(esr, regs))
{
return;
}
#endif
process_exception(esr, regs->pc);
rt_hw_show_register(regs);
rt_kprintf("current: %s\n", rt_thread_self()->name);
#ifdef RT_USING_LWP
check_user_fault(regs, 0, "user fault");
#endif
#ifdef RT_USING_FINSH
list_thread();
#endif
backtrace((unsigned long)regs->pc, (unsigned long)regs->x30, (unsigned long)regs->x29);
rt_hw_cpu_shutdown();
}
void rt_hw_trap_serror(struct rt_hw_exp_stack *regs)
{
rt_kprintf("SError\n");
rt_hw_show_register(regs);
rt_kprintf("current: %s\n", rt_thread_self()->name);
#ifdef RT_USING_FINSH
list_thread();
#endif
rt_hw_cpu_shutdown();
}

View File

@@ -1,44 +1,60 @@
/*
* Copyright (c) 2006-2022, RT-Thread Development Team
* Copyright (c) 2006-2020, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Date Author Notes
* 2018-10-06 ZhaoXiaowei the first version
* 2022-02-16 GuEe-GUI replace vectors entry to macro
* 2018-10-06 ZhaoXiaowei the first version
*/
.macro ventry label
.align 7
b \label
.endm
.text
.globl system_vectors
.globl vector_error
.globl vector_exception
.globl vector_irq
.globl vector_fiq
.align 11
system_vectors:
/* Exception from CurrentEL (EL1t) with SP_EL0 (SPSEL = 0) */
ventry vector_error /* Synchronous */
ventry vector_irq /* IRQ/vIRQ */
ventry vector_fiq /* FIQ/vFIQ */
ventry vector_error /* SError/vSError */
/* Exception from CurrentEL (EL1h) with SP_ELn */
ventry vector_error /* Synchronous */
ventry vector_irq /* IRQ/vIRQ */
ventry vector_fiq /* FIQ/vFIQ */
ventry vector_error /* SError/vSError */
system_vectors:
.align 11
.set VBAR, system_vectors
.org VBAR
/* Exception from CurrentEL (EL1) with SP_EL0 (SPSEL=1) */
.org (VBAR + 0x00 + 0)
B vector_serror /* Synchronous */
.org (VBAR + 0x80 + 0)
B vector_serror /* IRQ/vIRQ */
.org (VBAR + 0x100 + 0)
B vector_serror /* FIQ/vFIQ */
.org (VBAR + 0x180 + 0)
B vector_serror /* Error/vError */
/* Exception from CurrentEL (EL1) with SP_ELn */
.org (VBAR + 0x200 + 0)
B vector_exception /* Synchronous */
.org (VBAR + 0x280 + 0)
B vector_irq /* IRQ/vIRQ */
.org (VBAR + 0x300 + 0)
B vector_fiq /* FIQ/vFIQ */
.org (VBAR + 0x380 + 0)
B vector_serror
/* Exception from lower EL, aarch64 */
ventry vector_error /* Synchronous */
ventry vector_error /* IRQ/vIRQ */
ventry vector_error /* FIQ/vFIQ */
ventry vector_error /* SError/vSError */
.org (VBAR + 0x400 + 0)
B vector_exception
.org (VBAR + 0x480 + 0)
B vector_irq
.org (VBAR + 0x500 + 0)
B vector_fiq
.org (VBAR + 0x580 + 0)
B vector_serror
/* Exception from lower EL, aarch32 */
ventry vector_error /* Synchronous */
ventry vector_error /* IRQ/vIRQ */
ventry vector_error /* FIQ/vFIQ */
ventry vector_error /* SError/vSError */
.org (VBAR + 0x600 + 0)
B vector_serror
.org (VBAR + 0x680 + 0)
B vector_serror
.org (VBAR + 0x700 + 0)
B vector_serror
.org (VBAR + 0x780 + 0)
B vector_serror

View File

@@ -1,182 +1,291 @@
/*
* Copyright (c) 2006-2022, RT-Thread Development Team
* Copyright (c) 2006-2020, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Date Author Notes
* 2020-01-15 bigmagic the first version
* 2020-08-10 SummerGift support clang compiler
* 2021-11-04 GuEe-GUI set sp with SP_ELx
* 2021-12-28 GuEe-GUI add smp support
*/
#include "rtconfig.h"
#define SECONDARY_STACK_SIZE 4096
.section ".text.entrypoint","ax"
.global __start
.globl _start
.globl secondary_cpu_start
_start:
#ifdef RT_USING_SMP
mrs x1, mpidr_el1
adr x4, .boot_cpu_mpidr
str x1, [x4]
dsb sy
__start:
#ifdef ARCH_ARM_BOOTWITH_FLUSH_CACHE
bl __asm_flush_dcache_all
#endif
bl rt_hw_cpu_id_set
/* read cpu id, stop slave cores */
mrs x0, tpidr_el1
cbz x0, .L__cpu_0 /* .L prefix is the local label in ELF */
bl __asm_flush_dcache_all /* The kernel and data must flush to DDR */
/* cpu id > 0, stop */
/* cpu id == 0 will also goto here after returned from entry() if possible */
.L__current_cpu_idle:
wfe
b .L__current_cpu_idle
secondary_cpu_start:
#ifdef RT_USING_SMP
adr x4, .boot_cpu_mpidr
ldr x4, [x4]
dsb sy
/* Read cpu mpidr_el1 */
mrs x1, mpidr_el1
.L__cpu_0:
/* set stack before our code, Define stack pointer for current exception level */
adr x1, __start
/* Read cpu id */
ldr x0, =rt_cpu_mpidr_early /* BSP must be defined `rt_cpu_mpidr_early' table in smp */
mov x2, #0
cpu_id_confirm:
add x2, x2, #1 /* Next cpu id inc */
ldr x3, [x0], #8
dsb sy
cmp x3, #0
beq cpu_idle /* Mean that `rt_cpu_mpidr_early' table is end */
cmp x3, x1
bne cpu_id_confirm
/* Get cpu id success */
sub x0, x2, #1
msr tpidr_el1, x0 /* Save cpu id global */
cmp x3, x4 /* If it is boot cpu */
beq boot_cpu_setup
/* Set current cpu's stack top */
sub x0, x0, #1
mov x1, #SECONDARY_STACK_SIZE
adr x2, .secondary_cpu_stack_top
msub x1, x0, x1, x2
b cpu_check_el
#else
msr tpidr_el1, xzr
#endif /* RT_USING_SMP */
boot_cpu_setup:
ldr x1, =_start
cpu_check_el:
/* set up EL1 */
mrs x0, CurrentEL /* CurrentEL Register. bit 2, 3. Others reserved */
and x0, x0, #12 /* Clear reserved bits */
and x0, x0, #12 /* clear reserved bits */
/* Running at EL3? */
cmp x0, #12 /* EL3 value is 0b1100 */
bne cpu_not_in_el3
/* running at EL3? */
cmp x0, #12 /* 1100b. So, EL3 */
bne .L__not_in_el3 /* 11? !EL3 -> 5: */
/* Should never be executed, just for completeness. (EL3) */
mov x2, #(1 << 0) /* EL0 and EL1 are in Non-Secure state */
orr x2, x2, #(1 << 4) /* RES1 */
orr x2, x2, #(1 << 5) /* RES1 */
bic x2, x2, #(1 << 7) /* SMC instructions are enabled at EL1 and above */
orr x2, x2, #(1 << 8) /* HVC instructions are enabled at EL1 and above */
orr x2, x2, #(1 << 10) /* The next lower level is AArch64 */
msr scr_el3, x2
mov x2, #9 /* Next level is 0b1001->EL2h */
orr x2, x2, #(1 << 6) /* Mask FIQ */
orr x2, x2, #(1 << 7) /* Mask IRQ */
orr x2, x2, #(1 << 8) /* Mask SError */
orr x2, x2, #(1 << 9) /* Mask Debug Exception */
msr spsr_el3, x2
adr x2, cpu_in_el2
/* should never be executed, just for completeness. (EL3) */
mov x2, #0x5b1
msr scr_el3, x2 /* SCR_ELn Secure Configuration Register */
mov x2, #0x3c9
msr spsr_el3, x2 /* SPSR_ELn. Saved Program Status Register. 1111001001 */
adr x2, .L__not_in_el3
msr elr_el3, x2
eret
eret /* Exception Return: from EL3, continue from .L__not_in_el3 */
cpu_not_in_el3: /* Running at EL2 or EL1 */
cmp x0, #4 /* EL1 = 0100 */
beq cpu_in_el1
.L__not_in_el3: /* running at EL2 or EL1 */
cmp x0, #4 /* 0x04 0100 EL1 */
beq .L__in_el1 /* EL1 -> 5: */
cpu_in_el2:
/* Enable CNTP for EL1 */
mrs x0, hcr_el2
bic x0, x0, #0xff
msr hcr_el2, x0
msr sp_el1, x1 /* in EL2, set sp of EL1 to _start */
/* enable CNTP for EL1 */
mrs x0, cnthctl_el2 /* Counter-timer Hypervisor Control register */
orr x0, x0, #3
msr cnthctl_el2, x0
msr cntvoff_el2, xzr
mov x0, #(1 << 31) /* Enable AArch64 in EL1 */
/* enable AArch64 in EL1 */
mov x0, #(1 << 31) /* AArch64 */
orr x0, x0, #(1 << 1) /* SWIO hardwired on Pi3 */
msr hcr_el2, x0
mrs x0, hcr_el2
mov x2, #5 /* Next level is 0b0101->EL1h */
orr x2, x2, #(1 << 6) /* Mask FIQ */
orr x2, x2, #(1 << 7) /* Mask IRQ */
orr x2, x2, #(1 << 8) /* Mask SError */
orr x2, x2, #(1 << 9) /* Mask Debug Exception */
msr spsr_el2, x2
adr x2, cpu_in_el1
/* change execution level to EL1 */
mov x2, #0x3c4
msr spsr_el2, x2 /* 1111000100 */
adr x2, .L__in_el1
msr elr_el2, x2
eret
cpu_in_el1:
msr spsel, #1
mov sp, x1 /* Set sp in el1 */
eret /* exception return. from EL2. continue from .L__in_el1 */
/* Avoid trap from SIMD or float point instruction */
.L__in_el1:
#ifdef RT_USING_LWP
ldr x9, =PV_OFFSET
#else
mov x9, #0
#endif
mov sp, x1 /* in EL1. Set sp to _start */
/* Set CPACR_EL1 (Architecture Feature Access Control Register) to avoid trap from SIMD or float point instruction */
mov x1, #0x00300000 /* Don't trap any SIMD/FP instructions in both EL0 and EL1 */
msr cpacr_el1, x1
mrs x1, sctlr_el1
orr x1, x1, #(1 << 12) /* Enable Instruction */
bic x1, x1, #(3 << 3) /* Disable SP Alignment check */
bic x1, x1, #(1 << 1) /* Disable Alignment check */
msr sctlr_el1, x1
/* clear bss */
ldr x1, =__bss_start /* get bss start address */
ldr x2, =__bss_end
sub x2, x2, x1 /* get bss size */
add x1, x1, x9
#ifdef RT_USING_SMP
ldr x1, =_start
cmp sp, x1
bne secondary_cpu_c_start
#endif /* RT_USING_SMP */
and x3, x2, #7 /* x3 is < 7 */
ldr x4, =~0x7
and x2, x2, x4 /* mask ~7 */
ldr x0, =__bss_start
ldr x1, =__bss_end
sub x2, x1, x0
mov x3, x1
cmp x2, #7
bls clean_bss_check
.L__clean_bss_loop:
cbz x2, .L__clean_bss_loop_1
str xzr, [x1], #8
sub x2, x2, #8
b .L__clean_bss_loop
clean_bss_loop_quad:
str xzr, [x0], #8
sub x2, x3, x0
cmp x2, #7
bhi clean_bss_loop_quad
cmp x1, x0
bls jump_to_entry
.L__clean_bss_loop_1:
cbz x3, .L__jump_to_entry
strb wzr, [x1], #1
sub x3, x3, #1
b .L__clean_bss_loop_1
clean_bss_loop_byte:
str xzr, [x0], #1
.L__jump_to_entry: /* jump to C code, should not return */
bl mmu_tcr_init
clean_bss_check:
cmp x1, x0
bhi clean_bss_loop_byte
adr x1, __start
ldr x0, =~0x1fffff
and x0, x1, x0
add x1, x0, #0x1000
jump_to_entry:
b rtthread_startup
msr ttbr0_el1, x0
msr ttbr1_el1, x1
dsb sy
cpu_idle:
wfe
b cpu_idle
#ifdef RT_USING_SMP
.align 3
.boot_cpu_mpidr:
.quad 0x0
.align 12
.secondary_cpu_stack:
.space (SECONDARY_STACK_SIZE * (RT_CPUS_NR - 1))
.secondary_cpu_stack_top:
ldr x2, =0x40000000 /* map 1G memory for kernel space */
#ifdef RT_USING_LWP
ldr x3, =PV_OFFSET
#endif
bl rt_hw_mmu_setup_early
ldr x30, =after_mmu_enable /* set LR to after_mmu_enable function, it's a v_addr */
mrs x1, sctlr_el1
bic x1, x1, #(3 << 3) /* dis SA, SA0 */
bic x1, x1, #(1 << 1) /* dis A */
orr x1, x1, #(1 << 12) /* I */
orr x1, x1, #(1 << 2) /* C */
orr x1, x1, #(1 << 0) /* M */
msr sctlr_el1, x1 /* enable MMU */
dsb sy
isb sy
ic ialluis /* Invalidate all instruction caches in Inner Shareable domain to Point of Unification */
dsb sy
isb sy
tlbi vmalle1 /* Invalidate all stage 1 translations used at EL1 with the current VMID */
dsb sy
isb sy
ret
after_mmu_enable:
#ifdef RT_USING_LWP
mrs x0, tcr_el1 /* disable ttbr0, only using kernel space */
orr x0, x0, #(1 << 7)
msr tcr_el1, x0
msr ttbr0_el1, xzr
dsb sy
#endif
mov x0, #1
msr spsel, x0
adr x1, __start
mov sp, x1 /* sp_el1 set to _start */
b rtthread_startup
#ifdef RT_USING_SMP
/**
* secondary cpu
*/
.globl _secondary_cpu_entry
_secondary_cpu_entry:
bl rt_hw_cpu_id_set
adr x1, __start
/* set up EL1 */
mrs x0, CurrentEL /* CurrentEL Register. bit 2, 3. Others reserved */
and x0, x0, #12 /* clear reserved bits */
/* running at EL3? */
cmp x0, #12 /* 1100b. So, EL3 */
bne .L__not_in_el3_cpux /* 11? !EL3 -> 5: */
/* should never be executed, just for completeness. (EL3) */
mov x2, #0x5b1
msr scr_el3, x2 /* SCR_ELn Secure Configuration Register */
mov x2, #0x3c9
msr spsr_el3, x2 /* SPSR_ELn. Saved Program Status Register. 1111001001 */
adr x2, .L__not_in_el3_cpux
msr elr_el3, x2
eret /* Exception Return: from EL3, continue from .L__not_in_el3 */
.L__not_in_el3_cpux: /* running at EL2 or EL1 */
cmp x0, #4 /* 0x04 0100 EL1 */
beq .L__in_el1_cpux /* EL1 -> 5: */
mrs x0, hcr_el2
bic x0, x0, #0xff
msr hcr_el2, x0
msr sp_el1, x1 /* in EL2, set sp of EL1 to _start */
/* enable CNTP for EL1 */
mrs x0, cnthctl_el2 /* Counter-timer Hypervisor Control register */
orr x0, x0, #3
msr cnthctl_el2, x0
msr cntvoff_el2, xzr
/* enable AArch64 in EL1 */
mov x0, #(1 << 31) /* AArch64 */
orr x0, x0, #(1 << 1) /* SWIO hardwired on Pi3 */
msr hcr_el2, x0
mrs x0, hcr_el2
/* change execution level to EL1 */
mov x2, #0x3c4
msr spsr_el2, x2 /* 1111000100 */
adr x2, .L__in_el1_cpux
msr elr_el2, x2
eret /* exception return. from EL2. continue from .L__in_el1 */
.L__in_el1_cpux:
adr x19, .L__in_el1_cpux
ldr x8, =.L__in_el1_cpux
sub x19, x19, x8 /* get PV_OFFSET */
mrs x0, tpidr_el1
/* each cpu init stack is 8k */
sub x1, x1, x0, lsl #13
mov sp, x1 /* in EL1. Set sp to _start */
/* Set CPACR_EL1 (Architecture Feature Access Control Register) to avoid trap from SIMD or float point instruction */
mov x1, #0x00300000 /* Don't trap any SIMD/FP instructions in both EL0 and EL1 */
msr cpacr_el1, x1
.L__jump_to_entry_cpux: /* jump to C code, should not return */
/* init mmu early */
bl mmu_tcr_init
adr x1, __start
ldr x0, =~0x1fffff
and x0, x1, x0
add x1, x0, #0x1000
msr ttbr0_el1, x0
msr ttbr1_el1, x1
dsb sy
ldr x30, =after_mmu_enable_cpux /* set LR to after_mmu_enable function, it's a v_addr */
mrs x1, sctlr_el1
bic x1, x1, #(3 << 3) /* dis SA, SA0 */
bic x1, x1, #(1 << 1) /* dis A */
orr x1, x1, #(1 << 12) /* I */
orr x1, x1, #(1 << 2) /* C */
orr x1, x1, #(1 << 0) /* M */
msr sctlr_el1, x1 /* enable MMU */
dsb sy
isb sy
ic ialluis /* Invalidate all instruction caches in Inner Shareable domain to Point of Unification */
dsb sy
isb sy
tlbi vmalle1 /* Invalidate all stage 1 translations used at EL1 with the current VMID */
dsb sy
isb sy
ret
after_mmu_enable_cpux:
mrs x0, tcr_el1 /* disable ttbr0, only using kernel space */
orr x0, x0, #(1 << 7)
msr tcr_el1, x0
msr ttbr0_el1, xzr
dsb sy
mov x0, #1
msr spsel, x0
mrs x0, tpidr_el1
/* each cpu init stack is 8k */
adr x1, __start
sub x1, x1, x0, lsl #13
mov sp, x1 /* in EL1. Set sp to _start */
b rt_hw_secondary_cpu_bsp_start
#endif