Misc Cortex-A5 changes include new file for cache operations

This commit is contained in:
Gregory Nutt
2013-07-20 13:06:00 -06:00
parent b26d5c7164
commit 66259bfc53
14 changed files with 961 additions and 183 deletions
+3
View File
@@ -5141,3 +5141,6 @@
the SAMA5D3x-EK board(s) in particular. There is very little here on
the first check-in, this structure is being used now primarily to
create the Cortex-A5 support (2013-7-19).
* arch/arm/src/armv7-a/arm_cache.S: Cortex-A5 cache operations
(2013-7-20).
+5 -5
View File
@@ -58,15 +58,15 @@
#define PSR_MODE_SHIFT (1) /* Bits 0-4: Mode fields */
#define PSR_MODE_MASK (31 << PSR_MODE_SHIFT)
# define PSR_MODE_USER (16 << PSR_MODE_SHIFT) /* User mode */
# define PSR_MODE_USR (16 << PSR_MODE_SHIFT) /* User mode */
# define PSR_MODE_FIQ (17 << PSR_MODE_SHIFT) /* FIQ mode */
# define PSR_MODE_IRQ (18 << PSR_MODE_SHIFT) /* IRQ mode */
# define PSR_MODE_SUPER (19 << PSR_MODE_SHIFT) /* Supervisor mode */
# define PSR_MODE_SVC (19 << PSR_MODE_SHIFT) /* Supervisor mode */
# define PSR_MODE_MON (22 << PSR_MODE_SHIFT) /* Monitor mode */
# define PSR_MODE_ABORT (23 << PSR_MODE_SHIFT) /* Abort mode */
# define PSR_MODE_ABT (23 << PSR_MODE_SHIFT) /* Abort mode */
# define PSR_MODE_HYP (26 << PSR_MODE_SHIFT) /* Hyp mode */
# define PSR_MODE_UNDEF (27 << PSR_MODE_SHIFT) /* Undefined mode */
# define PSR_MODE_SYSTEM (31 << PSR_MODE_SHIFT) /* System mode */
# define PSR_MODE_UND (27 << PSR_MODE_SHIFT) /* Undefined mode */
# define PSR_MODE_SYS (31 << PSR_MODE_SHIFT) /* System mode */
#define PSR_T_BIT (1 << 5) /* Bit 5: Thumb execution state bit */
#define PSR_MASK_SHIFT (6) /* Bits 6-8: Mask Bits */
#define PSR_MASK_MASK (7 << PSR_GE_SHIFT)
+318
View File
@@ -0,0 +1,318 @@
/****************************************************************************
* arch/arm/src/armv7-a/arm_cache.S
*
* Copyright (C) 2013 Gregory Nutt. All rights reserved.
* Author: Gregory Nutt <gnutt@nuttx.org>
*
* References:
*
* "Cortex-A5 MPCore, Technical Reference Manual", Revision: r0p1,
* Copyright © 2010 ARM. All rights reserved. ARM DDI 0434B (ID101810)
* "ARM® Architecture Reference Manual, ARMv7-A and ARMv7-R edition",
* Copyright © 1996-1998, 2000, 2004-2012 ARM. All rights reserved. ARM
* DDI 0406C.b (ID072512)
*
* Portions of this file derive from Atmel sample code for the SAMA5D3 Cortex-A5
* which also has a modified BSD-style license:
*
* Copyright (c) 2012, Atmel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
* 3. Neither the name NuttX nor Atmel nor the names of the contributors may
* be used to endorse or promote products derived from this software
* without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
* COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
* OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
* AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
* ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*
****************************************************************************/
/* cp15_cache Cache Operations
*
* Usage
*
* They are performed as MCR instructions and only operate on a level 1 cache
* associated with ARM v7 processor.
*
* The supported operations are:
*
* 1. Any of these operations can be applied to any data cache or any
* unified cache.
* 2. Invalidate by MVA. Performs an invalidate of a data or unified cache
* line
* based on the address it contains.
* 3. Invalidate by set/way. Performs an invalidate of a data or unified
* cache line based on its location in the cache hierarchy.
* 4. Clean by MVA. Performs a clean of a data or unified cache line based
* on the address it contains.
* 5. Clean by set/way. Performs a clean of a data or unified cache line
* based on its location in the cache hierarchy.
* 6. Clean and Invalidate by MVA. Performs a clean and invalidate of a
* data or unified cache line based on the address it contains.
* 7. Clean and Invalidate by set/way. Performs a clean and invalidate of
* a data or unified cache line based on its location in the cache
* hierarchy.
*
* NOTE: Many of these operations are implemented as assembly language
* macros or as C inline functions in the file cache.h. The larger functions
* are implemented here as C-callable functions.
*/
/****************************************************************************
* Included Files
****************************************************************************/
.file "arm_cache.S"
/****************************************************************************
* Preprocessor Definitions
****************************************************************************/
/****************************************************************************
* Public Symbols
****************************************************************************/
.globl cp15_coherent_dcache_for_dma
.globl cp15_invalidate_dcache_for_dma
.globl cp15_clean_dcache_for_dma
.globl cp15_flush_dcache_for_dma
.globl cp15_flush_kern_dcache_for_dma
/****************************************************************************
* Public Functions
****************************************************************************/
.text
/****************************************************************************
* Name: cp15_coherent_dcache_for_dma
*
* Description:
* Ensure that the I and D caches are coherent within specified region.
* This is typically used when code has been written to a memory region,
* and will be executed.
*
* Input Parameters:
* start - virtual start address of region
* end - virtual end address of region
*
* Returned Value:
* None
*
****************************************************************************/
.globl cp15_coherent_dcache_for_dma
.type cp15_coherent_dcache_for_dma, function
cp15_coherent_dcache_for_dma:
mrc p15, 0, r3, c0, c0, 1
lsr r3, r3, #16
and r3, r3, #0xf
mov r2, #4
mov r2, r2, lsl r3
sub r3, r2, #1
bic r12, r0, r3
1:
mcr p15, 0, r12, c7, c11, 1
add r12, r12, r2
cmp r12, r1
blo 1b
dsb
mrc p15, 0, r3, c0, c0, 1
and r3, r3, #0xf
mov r2, #4
mov r2, r2, lsl r3
sub r3, r2, #1
bic r12, r0, r3
2:
mcr p15, 0, r12, c7, c5, 1
add r12, r12, r2
cmp r12, r1
blo 2b
mov r0, #0
mcr p15, 0, r0, c7, c1, 6
mcr p15, 0, r0, c7, c5, 6
dsb
isb
bx lr
.size cp15_coherent_dcache_for_dma, . - cp15_coherent_dcache_for_dma
/****************************************************************************
* Name: cp15_invalidate_dcache_for_dma
*
* Description:
* Invalidate the data cache within the specified region; we will be
* performing a DMA operation in this region and we want to purge old data
* in the cache.
*
* Input Parameters:
* start - virtual start address of region
* end - virtual end address of region
*
* Returned Value:
* None
*
****************************************************************************/
.globl cp15_invalidate_dcache_for_dma
.type cp15_invalidate_dcache_for_dma, function
cp15_invalidate_dcache_for_dma:
mrc p15, 0, r3, c0, c0, 1
lsr r3, r3, #16
and r3, r3, #0xf
mov r2, #4
mov r2, r2, lsl r3
sub r3, r2, #1
tst r0, r3
bic r0, r0, r3
mcrne p15, 0, r0, c7, c14, 1
tst r1, r3
bic r1, r1, r3
mcrne p15, 0, r1, c7, c14, 1
3:
mcr p15, 0, r0, c7, c6, 1
add r0, r0, r2
cmp r0, r1
blo 3b
dsb
bx lr
.size cp15_coherent_dcache_for_dma, . - cp15_coherent_dcache_for_dma
/****************************************************************************
* Name: cp15_clean_dcache_for_dma
*
* Description:
* Clean the data cache within the specified region
*
* Input Parameters:
* start - virtual start address of region
* end - virtual end address of region
*
* Returned Value:
* None
*
****************************************************************************/
.globl cp15_clean_dcache_for_dma
.type cp15_clean_dcache_for_dma, function
cp15_clean_dcache_for_dma:
mrc p15, 0, r3, c0, c0, 1
lsr r3, r3, #16
and r3, r3, #0xf
mov r2, #4
mov r2, r2, lsl r3
sub r3, r2, #1
bic r0, r0, r3
4:
mcr p15, 0, r0, c7, c10, 1
add r0, r0, r2
cmp r0, r1
blo 4b
dsb
bx lr
.size cp15_clean_dcache_for_dma, . - cp15_clean_dcache_for_dma
/****************************************************************************
* Name: cp15_flush_dcache_for_dma
*
* Description:
* Flush the data cache within the specified region
*
* Input Parameters:
* start - virtual start address of region
* end - virtual end address of region
*
* Returned Value:
* None
*
****************************************************************************/
.globl cp15_flush_dcache_for_dma
.type cp15_flush_dcache_for_dma, function
cp15_flush_dcache_for_dma:
mrc p15, 0, r3, c0, c0, 1
lsr r3, r3, #16
and r3, r3, #0xf
mov r2, #4
mov r2, r2, lsl r3
sub r3, r2, #1
bic r0, r0, r3
5:
mcr p15, 0, r0, c7, c14, 1
add r0, r0, r2
cmp r0, r1
blo 5b
dsb
bx lr
.size cp15_flush_dcache_for_dma, . - cp15_flush_dcache_for_dma
/****************************************************************************
* Name: cp15_flush_kern_dcache_for_dma
*
* Description:
* Ensure that the data held in the page kaddr is written back to the page
* in question.
*
* Input Parameters:
* start - virtual start address of region
* end - virtual end address of region
*
* Returned Value:
* None
*
****************************************************************************/
.globl cp15_flush_kern_dcache_for_dma
.type cp15_flush_kern_dcache_for_dma, function
cp15_flush_kern_dcache_for_dma:
mrc p15, 0, r3, c0, c0, 1
lsr r3, r3, #16
and r3, r3, #0xf
mov r2, #4
mov r2, r2, lsl r3
add r1, r0, r1
sub r3, r2, #1
bic r0, r0, r3
mcr p15, 0, r0, c7, c14, 1
add r0, r0, r2
cmp r0, r1
blo 1b
dsb
bx lr
.size cp15_flush_kern_dcache_for_dma, . - cp15_flush_kern_dcache_for_dma
.end
+18 -19
View File
@@ -86,12 +86,12 @@
* If CONFIG_PAGING is selected in the NuttX configuration file, then these
* additional input values are expected:
*
* far - Fault address register. On a data abort, the ARM MMU places the
* miss virtual address (MVA) into the FAR register. This is the address
* dfar - Fault address register. On a data abort, the ARM MMU places the
* miss virtual address (MVA) into the DFAR register. This is the address
* of the data which, when accessed, caused the fault.
* fsr - Fault status register. On a data a abort, the ARM MMU places an
* dfsr - Fault status register. On a data a abort, the ARM MMU places an
* encoded four-bit value, the fault status, along with the four-bit
* encoded domain number, in the data FSR
* encoded domain number, in the data DFSR
*
* Description:
* This is the data abort exception handler. The ARM data abort exception
@@ -100,10 +100,10 @@
****************************************************************************/
#ifdef CONFIG_PAGING
void arm_dataabort(uint32_t *regs, uint32_t far, uint32_t fsr)
void arm_dataabort(uint32_t *regs, uint32_t dfar, uint32_t dfsr)
{
FAR struct tcb_s *tcb = (FAR struct tcb_s *)g_readytorun.head;
#ifdef CONFIG_PAGING
DFAR struct tcb_s *tcb = (DFAR struct tcb_s *)g_readytorun.head;
uint32_t *savestate;
/* Save the saved processor context in current_regs where it can be accessed
@@ -112,10 +112,8 @@ void arm_dataabort(uint32_t *regs, uint32_t far, uint32_t fsr)
savestate = (uint32_t*)current_regs;
#endif
current_regs = regs;
#ifdef CONFIG_PAGING
/* In the NuttX on-demand paging implementation, only the read-only, .text
* section is paged. However, the ARM compiler generated PC-relative data
* fetches from within the .text sections. Also, it is customary to locate
@@ -129,19 +127,19 @@ void arm_dataabort(uint32_t *regs, uint32_t far, uint32_t fsr)
* fatal error.
*/
pglldbg("FSR: %08x FAR: %08x\n", fsr, far);
if ((fsr & FSR_MASK) != FSR_PAGE)
pglldbg("DFSR: %08x DFAR: %08x\n", dfsr, dfar);
if ((dfsr & FSR_MASK) != FSR_PAGE)
{
goto segfault;
}
/* Check the (virtual) address of data that caused the data abort. When
* the exception occurred, this address was provided in the FAR register.
* the exception occurred, this address was provided in the DFAR register.
* (It has not yet been saved in the register context save area).
*/
pgllvdbg("VBASE: %08x VEND: %08x\n", PG_PAGED_VBASE, PG_PAGED_VEND);
if (far < PG_PAGED_VBASE || far >= PG_PAGED_VEND)
if (dfar < PG_PAGED_VBASE || dfar >= PG_PAGED_VEND)
{
goto segfault;
}
@@ -152,7 +150,7 @@ void arm_dataabort(uint32_t *regs, uint32_t far, uint32_t fsr)
* prefetch and data aborts.
*/
tcb->xcp.far = regs[REG_R15];
tcb->xcp.dfar = regs[REG_R15];
/* Call pg_miss() to schedule the page fill. A consequences of this
* call are:
@@ -177,14 +175,14 @@ void arm_dataabort(uint32_t *regs, uint32_t far, uint32_t fsr)
return;
segfault:
#endif
lldbg("Data abort. PC: %08x FAR: %08x FSR: %08x\n", regs[REG_PC], far, fsr);
lldbg("Data abort. PC: %08x DFAR: %08x DFSR: %08x\n",
regs[REG_PC], dfar, dfsr);
PANIC();
}
#else /* CONFIG_PAGING */
void arm_dataabort(uint32_t *regs)
void arm_dataabort(uint32_t *regs, uint32_t dfar, uint32_t dfsr)
{
/* Save the saved processor context in current_regs where it can be accessed
* for register dumps and possibly context switching.
@@ -192,9 +190,10 @@ void arm_dataabort(uint32_t *regs)
current_regs = regs;
/* Crash -- possibly showing diagnost debug information. */
/* Crash -- possibly showing diagnostic debug information. */
lldbg("Data abort. PC: %08x\n", regs[REG_PC]);
lldbg("Data abort. PC: %08x DFAR: %08x DFSR: %08x\n",
regs[REG_PC], dfar, dfsr);
PANIC();
}
+41 -7
View File
@@ -230,7 +230,7 @@
__start:
/* Make sure that we are in SVC mode with all IRQs disabled */
mov r0, #(PSR_MODE_SUPER | PSR_I_BIT | PSR_F_BIT)
mov r0, #(PSR_MODE_SVC | PSR_I_BIT | PSR_F_BIT)
msr cpsr_c, r0
/* Initialize DRAM using a macro provided by board-specific logic.
@@ -341,15 +341,49 @@ __start:
* r4 = Address of the base of the L1 table
*/
/* Invalidate caches and TLBs.
*
* NOTE: "The ARMv7 Virtual Memory System Architecture (VMSA) does not
* support a CP15 operation to invalidate the entire data cache. ...
* In normal usage the only time the entire data cache has to be
* invalidated is on reset."
*
* REVISIT: This could be an issue if NuttX is every started in a
* context where the DCache could be dirty.
*/
mov r0, #0
mcr p15, 0, r0, c7, c7 /* Invalidate I,D caches */
mcr p15, 0, r0, c7, c10, 4 /* Drain write buffer */
mcr p15, 0, r0, c8, c7 /* Invalidate I,D TLBs */
mcr p15, 0, r4, c2, c0 /* Load page table pointer */
mcr CP15_ICIALLUIS(r0) /* Invalidate entire instruction cache Inner Shareable */
/* Load the page table address.
*
* NOTES:
* - Here we assume that the page table address is aligned to at least
* least a 16KB boundary (bits 0-13 are zero). No masking is provided
* to protect against an unaligned page table address.
* - The Cortex-A5 has two page table address registers, TTBR0 and 1.
* Only TTBR0 is used in this implementation but both are initialized.
*
* Here we expect to have:
* r0 = Zero
* r4 = Address of the base of the L1 table
*/
mcr CP15_TTBR0(r4)
mcr CP15_TTBR1(r4)
/* Clear the TTB control register (TTBCR) to indicate that we are using
* TTBR0. r0 still holds the value of zero.
*/
mcr CP15_TTBCR(r0)
/* Enable DCache write-through if so configured.
*
* The Cortex-A5 MPCore data cache only supports a write-back policy.
*/
#ifdef CPU_DCACHE_WRITETHROUGH
mov r0, #4 /* Disable write-back on caches explicitly */
mcr p15, 7, r0, c15, c0, 0
#endif
/* Enable the MMU and caches
+3 -3
View File
@@ -119,20 +119,20 @@ void up_initial_state(struct tcb_s *tcb)
{
/* It is a kernel thread.. set supervisor mode */
cpsr = PSR_MODE_SUPER | PSR_F_BIT;
cpsr = PSR_MODE_SVC | PSR_F_BIT;
}
else
{
/* It is a normal task or a pthread. Set user mode */
cpsr = PSR_MODE_USER | PSR_F_BIT;
cpsr = PSR_MODE_USR | PSR_F_BIT;
}
#else
/* If the kernel build is not selected, then all threads run in
* supervisor-mode.
*/
cpsr = PSR_MODE_SUPER | PSR_F_BIT;
cpsr = PSR_MODE_SVC | PSR_F_BIT;
#endif
/* Enable or disable interrupts, based on user configuration */
+24 -6
View File
@@ -87,9 +87,10 @@
*
****************************************************************************/
void arm_prefetchabort(uint32_t *regs)
{
#ifdef CONFIG_PAGING
void arm_prefetchabort(uint32_t *regs, uint32_t ifar, uint32_t ifsr)
{
uint32_t *savestate;
/* Save the saved processor context in current_regs where it can be accessed
@@ -97,10 +98,8 @@ void arm_prefetchabort(uint32_t *regs)
*/
savestate = (uint32_t*)current_regs;
#endif
current_regs = regs;
#ifdef CONFIG_PAGING
/* Get the (virtual) address of instruction that caused the prefetch abort.
* When the exception occurred, this address was provided in the lr register
* and this value was saved in the context save area as the PC at the
@@ -146,9 +145,28 @@ void arm_prefetchabort(uint32_t *regs)
current_regs = savestate;
}
else
#endif
{
lldbg("Prefetch abort. PC: %08x\n", regs[REG_PC]);
lldbg("Prefetch abort. PC: %08x IFAR: %08x IFSR: %08x\n",
regs[REG_PC], ifar, ifsr);
PANIC();
}
}
#else /* CONFIG_PAGING */
void arm_prefetchabort(uint32_t *regs, uint32_t ifar, uint32_t ifsr)
{
/* Save the saved processor context in current_regs where it can be accessed
* for register dumps and possibly context switching.
*/
current_regs = regs;
/* Crash -- possibly showing diagnostic debug information. */
lldbg("Prefetch abort. PC: %08x IFAR: %08x IFSR: %08x\n",
regs[REG_PC], ifar, ifsr);
PANIC();
}
#endif /* CONFIG_PAGING */
+2 -2
View File
@@ -162,7 +162,7 @@ void up_schedule_sigaction(struct tcb_s *tcb, sig_deliver_t sigdeliver)
*/
current_regs[REG_PC] = (uint32_t)up_sigdeliver;
current_regs[REG_CPSR] = PSR_MODE_SUPER | PSR_I_BIT | PSR_F_BIT;
current_regs[REG_CPSR] = PSR_MODE_SVC | PSR_I_BIT | PSR_F_BIT;
/* And make sure that the saved context in the TCB
* is the same as the interrupt return context.
@@ -194,7 +194,7 @@ void up_schedule_sigaction(struct tcb_s *tcb, sig_deliver_t sigdeliver)
*/
tcb->xcp.regs[REG_PC] = (uint32_t)up_sigdeliver;
tcb->xcp.regs[REG_CPSR] = PSR_MODE_SUPER | PSR_I_BIT | PSR_F_BIT;
tcb->xcp.regs[REG_CPSR] = PSR_MODE_SVC | PSR_I_BIT | PSR_F_BIT;
}
irqrestore(flags);
+15 -8
View File
@@ -41,6 +41,7 @@
#include <nuttx/irq.h>
#include "arm.h"
#include "cp15.h"
/************************************************************************************
* Definitions
@@ -87,6 +88,7 @@ g_aborttmp:
.globl arm_vectorirq
.type arm_vectorirq, %function
arm_vectorirq:
/* On entry, we are in IRQ mode. We are free to use
* the IRQ mode r13 and r14.
@@ -101,7 +103,7 @@ arm_vectorirq:
/* Then switch back to SVC mode */
bic lr, lr, #PSR_MODE_MASK /* Keep F and T bits */
orr lr, lr, #(PSR_MODE_SUPER | PSR_I_BIT)
orr lr, lr, #(PSR_MODE_SVC | PSR_I_BIT)
msr cpsr_c, lr /* Switch to SVC mode */
/* Create a context structure. First set aside a stack frame
@@ -163,6 +165,7 @@ arm_vectorirq:
.globl arm_vectorswi
.type arm_vectorswi, %function
arm_vectorswi:
/* Create a context structure. First set aside a stack frame
@@ -213,6 +216,7 @@ arm_vectorswi:
.globl arm_vectordata
.type arm_vectordata, %function
arm_vectordata:
/* On entry we are free to use the ABORT mode registers
* r13 and r14
@@ -227,7 +231,7 @@ arm_vectordata:
/* Then switch back to SVC mode */
bic lr, lr, #PSR_MODE_MASK /* Keep F and T bits */
orr lr, lr, #(PSR_MODE_SUPER | PSR_I_BIT)
orr lr, lr, #(PSR_MODE_SVC | PSR_I_BIT)
msr cpsr_c, lr /* Switch to SVC mode */
/* Create a context structure. First set aside a stack frame
@@ -256,10 +260,8 @@ arm_vectordata:
mov fp, #0 /* Init frame pointer */
mov r0, sp /* Get r0=xcp */
#ifdef CONFIG_PAGING
mrc p15, 0, r2, c5, c0, 0 /* Get r2=FSR */
mrc p15, 0, r1, c6, c0, 0 /* Get R1=FAR */
#endif
mrc CP15_DFAR(r1) /* Get R1=DFAR */
mrc CP15_DFSR(r2) /* Get r2=DFSR */
bl arm_dataabort /* Call the handler */
/* Restore the CPSR, SVC modr registers and return */
@@ -287,6 +289,7 @@ arm_vectordata:
.globl arm_vectorprefetch
.type arm_vectorprefetch, %function
arm_vectorprefetch:
/* On entry we are free to use the ABORT mode registers
* r13 and r14
@@ -301,7 +304,7 @@ arm_vectorprefetch:
/* Then switch back to SVC mode */
bic lr, lr, #PSR_MODE_MASK /* Keep F and T bits */
orr lr, lr, #(PSR_MODE_SUPER | PSR_I_BIT)
orr lr, lr, #(PSR_MODE_SVC | PSR_I_BIT)
msr cpsr_c, lr /* Switch to SVC mode */
/* Create a context structure. First set aside a stack frame
@@ -330,6 +333,8 @@ arm_vectorprefetch:
mov fp, #0 /* Init frame pointer */
mov r0, sp /* Get r0=xcp */
mrc CP15_IFAR(r1) /* Get R1=IFAR */
mrc CP15_IFSR(r2) /* Get r2=IFSR */
bl arm_prefetchabort /* Call the handler */
/* Restore the CPSR, SVC modr registers and return */
@@ -355,6 +360,7 @@ arm_vectorprefetch:
.globl arm_vectorundefinsn
.type arm_vectorundefinsn, %function
arm_vectorundefinsn:
/* On entry we are free to use the UND mode registers
* r13 and r14
@@ -368,7 +374,7 @@ arm_vectorundefinsn:
/* Then switch back to SVC mode */
bic lr, lr, #PSR_MODE_MASK /* Keep F and T bits */
orr lr, lr, #(PSR_MODE_SUPER | PSR_I_BIT)
orr lr, lr, #(PSR_MODE_SVC | PSR_I_BIT)
msr cpsr_c, lr /* Switch to SVC mode */
/* Create a context structure. First set aside a stack frame
@@ -421,6 +427,7 @@ arm_vectorundefinsn:
.globl arm_vectorfiq
.type arm_vectorfiq, %function
arm_vectorfiq:
subs pc, lr, #4
.size arm_vectorfiq, . - arm_vectorfiq
File diff suppressed because it is too large Load Diff
+8 -8
View File
@@ -238,11 +238,11 @@
nop
.endm
/* The ARMv7-aA architecture supports two translation tables. This
/* The ARMv7-aA architecture supports two translation tables. This
* implementation, however, uses only translation table 0. This
* functions clears the TTB control register (TTBCR), indicating that
* we are using TTB 0. This is it writes the value of the page table
* to Translation Table Base Register 0 (TTBR0).
* macro writes the address of the page table to the Translation
* Table Base Register 0 (TTBR0) . Then it clears the TTB control
* register (TTBCR), indicating that we are using TTB 0it.
*/
.macro cp14_wrttb, ttb, scratch
@@ -288,11 +288,11 @@ static inline void cp15_wrdacr(unsigned int dacr)
);
}
/* The ARMv7-aA architecture supports two translation tables. This
/* The ARMv7-aA architecture supports two translation tables. This
* implementation, however, uses only translation table 0. This
* functions clears the TTB control register (TTBCR), indicating that
* we are using TTB 0. This is it writes the value of the page table
* to Translation Table Base Register 0 (TTBR0).
* function writes the address of the page table to the Translation
* Table Base Register 0 (TTBR0). Then it clears the TTB control
* register (TTBCR), indicating that we are using TTBR0.
*/
static inline void cp14_wrttb(unsigned int ttb)
+1 -1
View File
@@ -154,7 +154,7 @@
/* Sizes of sections/regions. The boot logic in lpc31_boot.c, will select
* 1Mb level 1 MMU mappings to span the entire physical address space.
* The definitiions below specify the number of 1Mb entries that are
* The definitions below specify the number of 1Mb entries that are
* required to span a particular address region.
*/
+3 -2
View File
@@ -35,8 +35,9 @@
HEAD_ASRC = arm_head.S
CMN_ASRCS = arm_vectors.S arm_vectortab.S arm_fullcontextrestore.S
CMN_ASRCS += arm_saveusercontext.S arm_vectoraddrexcptn.S arm_vfork.S
CMN_ASRCS = arm_vectors.S arm_vectortab.S arm_cache.S
CMN_ASRCS += arm_fullcontextrestore.S arm_saveusercontext.S
CMN_ASRCS += arm_vectoraddrexcptn.S arm_vfork.S
CMN_CSRCS = up_initialize.c up_idle.c up_interruptcontext.c up_exit.c
CMN_CSRCS += up_createstack.c up_releasestack.c up_usestack.c up_vfork.c
+1 -1
View File
@@ -173,7 +173,7 @@
/* Sizes of sections/regions. The boot logic in sam_boot.c, will select
* 1Mb level 1 MMU mappings to span the entire physical address space.
* The definitiions below specify the number of 1Mb entries that are
* The definitions below specify the number of 1Mb entries that are
* required to span a particular address region.
*/