bsps: Remove imported Xilinx headers

This removes the headers imported from the embeddedsw repository in
favor of a much thinner shim. This also removes the complicated build
system configuration necessary to support use of these headers. The
primary reason for removal is that certain external Xilinx libraries
also require use of these headers and this causes version mismatches and
header conflicts that can be avoided.
This commit is contained in:
Kinsey Moore
2024-10-23 12:06:17 -05:00
committed by Kinsey Moore
parent 79bed0f191
commit 1bba349478
77 changed files with 192 additions and 9139 deletions

View File

@@ -1,8 +1,15 @@
/* SPDX-License-Identifier: BSD-2-Clause */
/**
* @file
*
* @ingroup RTEMSBSPsArmXilinxVersal
*
* @brief This header file provides BSP-specific interfaces.
*/
/*
* COPYRIGHT (c) 2023.
* On-Line Applications Research Corporation (OAR).
* Copyright (C) 2024 On-Line Applications Research Corporation (OAR)
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -26,12 +33,9 @@
* POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef LIBBSP_SHARED_XIL_SYSTEM_H
#define LIBBSP_SHARED_XIL_SYSTEM_H
#ifndef LIBBSP_ARM_XILINX_ZYNQMP_RPU_BSP_XIL_COMPAT_H
#define LIBBSP_ARM_XILINX_ZYNQMP_RPU_BSP_XIL_COMPAT_H
/*
* This file defines anything necessary for the Xilinx support infrastructure to
* function properly on a particular platform.
*/
#include <bsp/xil-compat-common.h>
#endif

View File

@@ -1,8 +1,15 @@
/* SPDX-License-Identifier: BSD-2-Clause */
/**
* @file
*
* @ingroup RTEMSBSPsAArch64XilinxZynqMP
*
* @brief This header file provides BSP-specific interfaces.
*/
/*
* COPYRIGHT (c) 2023.
* On-Line Applications Research Corporation (OAR).
* Copyright (C) 2024 On-Line Applications Research Corporation (OAR)
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -26,12 +33,9 @@
* POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef LIBBSP_SHARED_XIL_SYSTEM_H
#define LIBBSP_SHARED_XIL_SYSTEM_H
#ifndef LIBBSP_AARCH64_XILINX_ZYNQMP_BSP_XIL_COMPAT_H
#define LIBBSP_AARCH64_XILINX_ZYNQMP_BSP_XIL_COMPAT_H
/*
* This file defines anything necessary for the Xilinx support infrastructure to
* function properly on a particular platform.
*/
#include <bsp/xil-compat-common.h>
#endif

View File

@@ -1,8 +1,15 @@
/* SPDX-License-Identifier: BSD-2-Clause */
/**
* @file
*
* @ingroup RTEMSBSPsArmXilinxZynqMPRPU
*
* @brief This header file provides BSP-specific interfaces.
*/
/*
* COPYRIGHT (c) 2023.
* On-Line Applications Research Corporation (OAR).
* Copyright (C) 2024 On-Line Applications Research Corporation (OAR)
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -26,13 +33,10 @@
* POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef LIBBSP_SHARED_XIL_SYSTEM_H
#define LIBBSP_SHARED_XIL_SYSTEM_H
#ifndef LIBBSP_ARM_XILINX_ZYNQMP_RPU_BSP_XIL_COMPAT_H
#define LIBBSP_ARM_XILINX_ZYNQMP_RPU_BSP_XIL_COMPAT_H
/*
* This file defines anything necessary for the Xilinx support infrastructure to
* function properly
*/
#include <bsp/xil-compat-common.h>
#define ARMR5
#endif

View File

@@ -0,0 +1,91 @@
/* SPDX-License-Identifier: BSD-2-Clause */
/**
* @file
*
* @ingroup RTEMSBSPsShared
*
* @brief This header file provides a minimal shim for Xilinx drivers.
*/
/*
* Copyright (C) 2024 On-Line Applications Research Corporation (OAR)
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef LIBBSP_SHARED_XIL_COMPAT_COMMON_H
#define LIBBSP_SHARED_XIL_COMPAT_COMMON_H
#include <stdint.h>
typedef uint8_t u8;
typedef int8_t s8;
typedef uint16_t u16;
typedef uint32_t u32;
typedef int32_t s32;
typedef uint64_t u64;
typedef intptr_t INTPTR;
typedef uintptr_t UINTPTR;
#define INLINE inline
#define XST_SUCCESS 0
#define XST_FAILURE 1
#define XST_DEVICE_IS_STARTED 5
#define XST_DEVICE_BUSY 21
#define XST_FLASH_TIMEOUT_ERROR 1134
#define XST_SPI_TRANSFER_DONE 1152
#define XST_SPI_COMMAND_ERROR 1162
#define XST_SPI_POLL_DONE 1163
#include <rtems/score/assert.h>
#define Xil_AssertNonvoid(expr) _Assert(expr);
#define Xil_AssertVoid(expr) _Assert(expr);
#define Xil_AssertVoidAlways() _Assert(false);
#include <stdio.h>
#define xil_printf(args...) printf(args)
#define XIL_COMPONENT_IS_READY 0x1U
#include <string.h>
#define Xil_MemCpy(dest, src, count) memcpy(dest, src, count)
static inline uint32_t Xil_In32(uintptr_t addr)
{
return *(volatile uint32_t *) addr;
}
static inline void Xil_Out32(uintptr_t addr, uint32_t value)
{
*(volatile uint32_t *)addr = value;
}
#include <rtems/rtems/cache.h>
#define Xil_DCacheInvalidateRange(addr, len) \
rtems_cache_flush_multiple_data_lines((void*)addr, len); \
rtems_cache_invalidate_multiple_data_lines((void*)addr, len)
#define Xil_DCacheFlushRange(addr, len) \
rtems_cache_flush_multiple_data_lines((void*)addr, len)
#include <unistd.h>
#include <bspopts.h>
#endif

View File

@@ -38,11 +38,12 @@ extern "C" {
/***************************** Include Files *********************************/
#ifndef __rtems__
#include "xil_types.h"
#include "xil_assert.h"
#include "xil_io.h"
#ifdef __rtems__
#include <xil_system.h>
#else
#include <bsp/xil-compat.h>
#endif
/************************** Constant Definitions *****************************/

View File

@@ -167,6 +167,7 @@ extern "C" {
#endif
/***************************** Include Files *********************************/
#ifndef __rtems__
#include "xil_types.h"
#include <string.h>
#include "xstatus.h"
@@ -177,6 +178,12 @@ extern "C" {
#if defined (XCLOCKING)
#include "xil_clocking.h"
#endif
#else
#include <bsp/xil-compat.h>
#include <string.h>
#include "xnandpsu_hw.h"
#include "xnandpsu_onfi.h"
#endif
/************************** Constant Definitions *****************************/
#define XNANDPSU_DEBUG

View File

@@ -39,7 +39,11 @@ extern "C" {
#endif
/***************************** Include Files *********************************/
#ifndef __rtems__
#include "xil_io.h"
#else
#include <bsp/xil-compat.h>
#endif
/************************** Constant Definitions *****************************/

View File

@@ -32,7 +32,11 @@ extern "C" {
#endif
/***************************** Include Files *********************************/
#ifndef __rtems__
#include "xil_types.h"
#else
#include <bsp/xil-compat.h>
#endif
/************************** Constant Definitions *****************************/
/* Standard ONFI 3.1 Commands */

View File

@@ -181,13 +181,19 @@ extern "C" {
/***************************** Include Files *********************************/
#ifndef __rtems__
#include "xstatus.h"
#endif
#include "xqspipsu_hw.h"
#ifndef __rtems__
#include "xil_cache.h"
#include "xil_mem.h"
#if defined (XCLOCKING)
#include "xil_clocking.h"
#endif
#else
#include <bsp/xil-compat.h>
#endif
/**************************** Type Definitions *******************************/
/**

View File

@@ -41,7 +41,11 @@ extern "C" {
/***************************** Include Files *********************************/
#ifndef __rtems__
#include "xparameters.h" /* SDK generated parameters */
#else
#include <bsp/xil-compat.h>
#endif
#include "xqspipsu.h" /* QSPIPSU device driver */
/************************** Constant Definitions *****************************/

View File

@@ -43,10 +43,14 @@ extern "C" {
/***************************** Include Files *********************************/
#ifndef __rtems__
#include "xil_types.h"
#include "xil_assert.h"
#include "xil_io.h"
#include "xparameters.h"
#else
#include <bsp/xil-compat.h>
#endif
/************************** Constant Definitions *****************************/
/**

View File

@@ -1,75 +0,0 @@
/******************************************************************************
* Copyright (c) 2014 - 2021 Xilinx, Inc. All rights reserved.
* SPDX-License-Identifier: MIT
******************************************************************************/
/*****************************************************************************/
/**
*
* @file xil_cache.h
*
* @addtogroup a53_64_cache_apis Cortex A53 64bit Processor Cache Functions
*
* Cache functions provide access to cache related operations such as flush
* and invalidate for instruction and data caches. It gives option to perform
* the cache operations on a single cacheline, a range of memory and an entire
* cache.
*
* @{
*
* <pre>
* MODIFICATION HISTORY:
*
* Ver Who Date Changes
* ----- ---- -------- -----------------------------------------------
* 5.00 pkp 05/29/14 First release
* </pre>
*
******************************************************************************/
#ifndef XIL_CACHE_H
#define XIL_CACHE_H
#include "xil_types.h"
#ifdef __cplusplus
extern "C" {
#endif
/**
*@cond nocomments
*/
/************************** Constant Definitions *****************************/
#define L1_DATA_PREFETCH_CONTROL_MASK 0xE000
#define L1_DATA_PREFETCH_CONTROL_SHIFT 13
/**
*@endcond
*/
/***************** Macros (Inline Functions) Definitions *********************/
#define Xil_DCacheFlushRange Xil_DCacheInvalidateRange
/************************** Function Prototypes ******************************/
void Xil_DCacheEnable(void);
void Xil_DCacheDisable(void);
void Xil_DCacheInvalidate(void);
void Xil_DCacheInvalidateRange(INTPTR adr, INTPTR len);
void Xil_DCacheInvalidateLine(INTPTR adr);
void Xil_DCacheFlush(void);
void Xil_DCacheFlushLine(INTPTR adr);
void Xil_ICacheEnable(void);
void Xil_ICacheDisable(void);
void Xil_ICacheInvalidate(void);
void Xil_ICacheInvalidateRange(INTPTR adr, INTPTR len);
void Xil_ICacheInvalidateLine(INTPTR adr);
void Xil_ConfigureL1Prefetch(u8 num);
#ifdef __cplusplus
}
#endif
#endif
/**
* @} End of "addtogroup a53_64_cache_apis".
*/

View File

@@ -1,408 +0,0 @@
/******************************************************************************
* Copyright (c) 2015 - 2022 Xilinx, Inc. All rights reserved.
* SPDX-License-Identifier: MIT
******************************************************************************/
/*****************************************************************************/
/**
*
* @file xil_exception.h
*
* This header file contains ARM Cortex A53,A9,R5 specific exception related APIs.
* For exception related functions that can be used across all Xilinx supported
* processors, please use xil_exception.h.
*
* @addtogroup arm_exception_apis ARM Processor Exception Handling
* @{
* ARM processors specific exception related APIs for cortex A53,A9 and R5 can
* utilized for enabling/disabling IRQ, registering/removing handler for
* exceptions or initializing exception vector table with null handler.
*
* <pre>
* MODIFICATION HISTORY:
*
* Ver Who Date Changes
* ----- -------- -------- -----------------------------------------------
* 5.2 pkp 28/05/15 First release
* 6.0 mus 27/07/16 Consolidated file for a53,a9 and r5 processors
* 6.7 mna 26/04/18 Add API Xil_GetExceptionRegisterHandler.
* 6.7 asa 18/05/18 Update signature of API Xil_GetExceptionRegisterHandler.
* 7.0 mus 01/03/19 Tweak Xil_ExceptionEnableMask and
* Xil_ExceptionDisableMask macros to support legacy
* examples for Cortexa72 EL3 exception level.
* 7.3 mus 04/15/20 Added Xil_EnableNestedInterrupts and
* Xil_DisableNestedInterrupts macros for ARMv8.
* For Cortexa72, these macro's would not be supported
* at EL3, as Cortexa72 is using GIC-500(GICv3), which
* triggeres only FIQ at EL3. Fix for CR#1062506
* 7.6 mus 09/17/21 Updated flag checking to fix warning reported with
* -Wundef compiler option CR#1110261
* 7.7 mus 01/31/22 Few of the #defines in xil_exception.h in are treated
* in different way based on "versal" flag. In existing
* flow, this flag is defined only in xparameters.h and
* BSP compiler flags, it is not defined in application
* compiler flags. So, including xil_exception.h in
* application source file, without including
* xparameters.h results in incorrect behavior.
* Including xparameters.h in xil_exception.h to avoid
* such issues. It fixes CR#1120498.
* 7.7 sk 03/02/22 Define XExc_VectorTableEntry structure to fix
* misra_c_2012_rule_5_6 violation.
* 7.7 sk 03/02/22 Add XExc_VectorTable as extern to fix misra_c_2012_
* rule_8_4 violation.
* </pre>
*
******************************************************************************/
/**
*@cond nocomments
*/
#ifndef XIL_EXCEPTION_H /* prevent circular inclusions */
#define XIL_EXCEPTION_H /* by using protection macros */
/***************************** Include Files ********************************/
#include "xil_types.h"
#include "xpseudo_asm.h"
#include "bspconfig.h"
#include "xparameters.h"
#ifdef __cplusplus
extern "C" {
#endif
/************************** Constant Definitions ****************************/
#define XIL_EXCEPTION_FIQ XREG_CPSR_FIQ_ENABLE
#define XIL_EXCEPTION_IRQ XREG_CPSR_IRQ_ENABLE
#define XIL_EXCEPTION_ALL (XREG_CPSR_FIQ_ENABLE | XREG_CPSR_IRQ_ENABLE)
#define XIL_EXCEPTION_ID_FIRST 0U
#if defined (__aarch64__)
#define XIL_EXCEPTION_ID_SYNC_INT 1U
#define XIL_EXCEPTION_ID_IRQ_INT 2U
#define XIL_EXCEPTION_ID_FIQ_INT 3U
#define XIL_EXCEPTION_ID_SERROR_ABORT_INT 4U
#define XIL_EXCEPTION_ID_LAST 5U
#else
#define XIL_EXCEPTION_ID_RESET 0U
#define XIL_EXCEPTION_ID_UNDEFINED_INT 1U
#define XIL_EXCEPTION_ID_SWI_INT 2U
#define XIL_EXCEPTION_ID_PREFETCH_ABORT_INT 3U
#define XIL_EXCEPTION_ID_DATA_ABORT_INT 4U
#define XIL_EXCEPTION_ID_IRQ_INT 5U
#define XIL_EXCEPTION_ID_FIQ_INT 6U
#define XIL_EXCEPTION_ID_LAST 6U
#endif
/*
* XIL_EXCEPTION_ID_INT is defined for all Xilinx processors.
*/
#if defined (versal) && !defined(ARMR5) && EL3
#define XIL_EXCEPTION_ID_INT XIL_EXCEPTION_ID_FIQ_INT
#else
#define XIL_EXCEPTION_ID_INT XIL_EXCEPTION_ID_IRQ_INT
#endif
/**************************** Type Definitions ******************************/
/**
* This typedef is the exception handler function.
*/
typedef void (*Xil_ExceptionHandler)(void *data);
typedef void (*Xil_InterruptHandler)(void *data);
typedef struct {
Xil_ExceptionHandler Handler;
void *Data;
} XExc_VectorTableEntry;
extern XExc_VectorTableEntry XExc_VectorTable[];
/**
*@endcond
*/
/***************** Macros (Inline Functions) Definitions ********************/
/****************************************************************************/
/**
* @brief Enable Exceptions.
*
* @param Mask: Value for enabling the exceptions.
*
* @return None.
*
* @note If bit is 0, exception is enabled.
* C-Style signature: void Xil_ExceptionEnableMask(Mask)
*
******************************************************************************/
#if defined (versal) && !defined(ARMR5) && EL3
/*
* Cortexa72 processor in versal is coupled with GIC-500, and GIC-500 supports
* only FIQ at EL3. Hence, tweaking this macro to always enable FIQ
* ignoring argument passed by user.
*/
#define Xil_ExceptionEnableMask(Mask) \
mtcpsr(mfcpsr() & ~ ((XIL_EXCEPTION_FIQ) & XIL_EXCEPTION_ALL))
#elif defined (__GNUC__) || defined (__ICCARM__)
#define Xil_ExceptionEnableMask(Mask) \
mtcpsr(mfcpsr() & ~ ((Mask) & XIL_EXCEPTION_ALL))
#else
#define Xil_ExceptionEnableMask(Mask) \
{ \
register u32 Reg __asm("cpsr"); \
mtcpsr((Reg) & (~((Mask) & XIL_EXCEPTION_ALL))); \
}
#endif
/****************************************************************************/
/**
* @brief Enable the IRQ exception.
*
* @return None.
*
* @note None.
*
******************************************************************************/
#if defined (versal) && !defined(ARMR5) && EL3
#define Xil_ExceptionEnable() \
Xil_ExceptionEnableMask(XIL_EXCEPTION_FIQ)
#else
#define Xil_ExceptionEnable() \
Xil_ExceptionEnableMask(XIL_EXCEPTION_IRQ)
#endif
/****************************************************************************/
/**
* @brief Disable Exceptions.
*
* @param Mask: Value for disabling the exceptions.
*
* @return None.
*
* @note If bit is 1, exception is disabled.
* C-Style signature: Xil_ExceptionDisableMask(Mask)
*
******************************************************************************/
#if defined (versal) && !defined(ARMR5) && EL3
/*
* Cortexa72 processor in versal is coupled with GIC-500, and GIC-500 supports
* only FIQ at EL3. Hence, tweaking this macro to always disable FIQ
* ignoring argument passed by user.
*/
#define Xil_ExceptionDisableMask(Mask) \
mtcpsr(mfcpsr() | ((XIL_EXCEPTION_FIQ) & XIL_EXCEPTION_ALL))
#elif defined (__GNUC__) || defined (__ICCARM__)
#define Xil_ExceptionDisableMask(Mask) \
mtcpsr(mfcpsr() | ((Mask) & XIL_EXCEPTION_ALL))
#else
#define Xil_ExceptionDisableMask(Mask) \
{ \
register u32 Reg __asm("cpsr"); \
mtcpsr((Reg) | ((Mask) & XIL_EXCEPTION_ALL)); \
}
#endif
/****************************************************************************/
/**
* Disable the IRQ exception.
*
* @return None.
*
* @note None.
*
******************************************************************************/
#define Xil_ExceptionDisable() \
Xil_ExceptionDisableMask(XIL_EXCEPTION_IRQ)
#if ( defined (PLATFORM_ZYNQMP) && defined (EL3) && (EL3==1) )
/****************************************************************************/
/**
* @brief Enable nested interrupts by clearing the I bit in DAIF.This
* macro is defined for Cortex-A53 64 bit mode BSP configured to run
* at EL3.. However,it is not defined for Versal Cortex-A72 BSP
* configured to run at EL3. Reason is, Cortex-A72 is coupled
* with GIC-500(GICv3 specifications) and it triggers only FIQ at EL3.
*
* @return None.
*
* @note This macro is supposed to be used from interrupt handlers. In the
* interrupt handler the interrupts are disabled by default (I bit
* is set as 1). To allow nesting of interrupts, this macro should be
* used. It clears the I bit. Once that bit is cleared and provided the
* preemption of interrupt conditions are met in the GIC, nesting of
* interrupts will start happening.
* Caution: This macro must be used with caution. Before calling this
* macro, the user must ensure that the source of the current IRQ
* is appropriately cleared. Otherwise, as soon as we clear the I
* bit, there can be an infinite loop of interrupts with an
* eventual crash (all the stack space getting consumed).
******************************************************************************/
#define Xil_EnableNestedInterrupts() \
__asm__ __volatile__ ("mrs X1, ELR_EL3"); \
__asm__ __volatile__ ("mrs X2, SPSR_EL3"); \
__asm__ __volatile__ ("stp X1,X2, [sp,#-0x10]!"); \
__asm__ __volatile__ ("mrs X1, DAIF"); \
__asm__ __volatile__ ("bic X1,X1,#(0x1<<7)"); \
__asm__ __volatile__ ("msr DAIF, X1"); \
/****************************************************************************/
/**
* @brief Disable the nested interrupts by setting the I bit in DAIF. This
* macro is defined for Cortex-A53 64 bit mode BSP configured to run
* at EL3.
*
* @return None.
*
* @note This macro is meant to be called in the interrupt service routines.
* This macro cannot be used independently. It can only be used when
* nesting of interrupts have been enabled by using the macro
* Xil_EnableNestedInterrupts(). In a typical flow, the user first
* calls the Xil_EnableNestedInterrupts in the ISR at the appropriate
* point. The user then must call this macro before exiting the interrupt
* service routine. This macro puts the ARM back in IRQ mode and
* hence sets back the I bit.
******************************************************************************/
#define Xil_DisableNestedInterrupts() \
__asm__ __volatile__ ("ldp X1,X2, [sp,#0x10]!"); \
__asm__ __volatile__ ("msr ELR_EL3, X1"); \
__asm__ __volatile__ ("msr SPSR_EL3, X2"); \
__asm__ __volatile__ ("mrs X1, DAIF"); \
__asm__ __volatile__ ("orr X1, X1, #(0x1<<7)"); \
__asm__ __volatile__ ("msr DAIF, X1"); \
#elif (defined (EL1_NONSECURE) && (EL1_NONSECURE==1))
/****************************************************************************/
/**
* @brief Enable nested interrupts by clearing the I bit in DAIF.This
* macro is defined for Cortex-A53 64 bit mode and Cortex-A72 64 bit
* BSP configured to run at EL1 NON SECURE
*
* @return None.
*
* @note This macro is supposed to be used from interrupt handlers. In the
* interrupt handler the interrupts are disabled by default (I bit
* is set as 1). To allow nesting of interrupts, this macro should be
* used. It clears the I bit. Once that bit is cleared and provided the
* preemption of interrupt conditions are met in the GIC, nesting of
* interrupts will start happening.
* Caution: This macro must be used with caution. Before calling this
* macro, the user must ensure that the source of the current IRQ
* is appropriately cleared. Otherwise, as soon as we clear the I
* bit, there can be an infinite loop of interrupts with an
* eventual crash (all the stack space getting consumed).
******************************************************************************/
#define Xil_EnableNestedInterrupts() \
__asm__ __volatile__ ("mrs X1, ELR_EL1"); \
__asm__ __volatile__ ("mrs X2, SPSR_EL1"); \
__asm__ __volatile__ ("stp X1,X2, [sp,#-0x10]!"); \
__asm__ __volatile__ ("mrs X1, DAIF"); \
__asm__ __volatile__ ("bic X1,X1,#(0x1<<7)"); \
__asm__ __volatile__ ("msr DAIF, X1"); \
/****************************************************************************/
/**
* @brief Disable the nested interrupts by setting the I bit in DAIF. This
* macro is defined for Cortex-A53 64 bit mode and Cortex-A72 64 bit
* BSP configured to run at EL1 NON SECURE
*
* @return None.
*
* @note This macro is meant to be called in the interrupt service routines.
* This macro cannot be used independently. It can only be used when
* nesting of interrupts have been enabled by using the macro
* Xil_EnableNestedInterrupts(). In a typical flow, the user first
* calls the Xil_EnableNestedInterrupts in the ISR at the appropriate
* point. The user then must call this macro before exiting the interrupt
* service routine. This macro puts the ARM back in IRQ mode and
* hence sets back the I bit.
******************************************************************************/
#define Xil_DisableNestedInterrupts() \
__asm__ __volatile__ ("ldp X1,X2, [sp,#0x10]!"); \
__asm__ __volatile__ ("msr ELR_EL1, X1"); \
__asm__ __volatile__ ("msr SPSR_EL1, X2"); \
__asm__ __volatile__ ("mrs X1, DAIF"); \
__asm__ __volatile__ ("orr X1, X1, #(0x1<<7)"); \
__asm__ __volatile__ ("msr DAIF, X1"); \
#elif (!defined (__aarch64__) && !defined (ARMA53_32))
/****************************************************************************/
/**
* @brief Enable nested interrupts by clearing the I and F bits in CPSR. This
* API is defined for cortex-a9 and cortex-r5.
*
* @return None.
*
* @note This macro is supposed to be used from interrupt handlers. In the
* interrupt handler the interrupts are disabled by default (I and F
* are 1). To allow nesting of interrupts, this macro should be
* used. It clears the I and F bits by changing the ARM mode to
* system mode. Once these bits are cleared and provided the
* preemption of interrupt conditions are met in the GIC, nesting of
* interrupts will start happening.
* Caution: This macro must be used with caution. Before calling this
* macro, the user must ensure that the source of the current IRQ
* is appropriately cleared. Otherwise, as soon as we clear the I and
* F bits, there can be an infinite loop of interrupts with an
* eventual crash (all the stack space getting consumed).
******************************************************************************/
#define Xil_EnableNestedInterrupts() \
__asm__ __volatile__ ("stmfd sp!, {lr}"); \
__asm__ __volatile__ ("mrs lr, spsr"); \
__asm__ __volatile__ ("stmfd sp!, {lr}"); \
__asm__ __volatile__ ("msr cpsr_c, #0x1F"); \
__asm__ __volatile__ ("stmfd sp!, {lr}");
/****************************************************************************/
/**
* @brief Disable the nested interrupts by setting the I and F bits. This API
* is defined for cortex-a9 and cortex-r5.
*
* @return None.
*
* @note This macro is meant to be called in the interrupt service routines.
* This macro cannot be used independently. It can only be used when
* nesting of interrupts have been enabled by using the macro
* Xil_EnableNestedInterrupts(). In a typical flow, the user first
* calls the Xil_EnableNestedInterrupts in the ISR at the appropriate
* point. The user then must call this macro before exiting the interrupt
* service routine. This macro puts the ARM back in IRQ/FIQ mode and
* hence sets back the I and F bits.
******************************************************************************/
#define Xil_DisableNestedInterrupts() \
__asm__ __volatile__ ("ldmfd sp!, {lr}"); \
__asm__ __volatile__ ("msr cpsr_c, #0x92"); \
__asm__ __volatile__ ("ldmfd sp!, {lr}"); \
__asm__ __volatile__ ("msr spsr_cxsf, lr"); \
__asm__ __volatile__ ("ldmfd sp!, {lr}"); \
#endif
/************************** Variable Definitions ****************************/
/************************** Function Prototypes *****************************/
extern void Xil_ExceptionRegisterHandler(u32 Exception_id,
Xil_ExceptionHandler Handler,
void *Data);
extern void Xil_ExceptionRemoveHandler(u32 Exception_id);
extern void Xil_GetExceptionRegisterHandler(u32 Exception_id,
Xil_ExceptionHandler *Handler, void **Data);
extern void Xil_ExceptionInit(void);
#if defined (__aarch64__)
void Xil_SyncAbortHandler(void *CallBackRef);
void Xil_SErrorAbortHandler(void *CallBackRef);
#else
extern void Xil_DataAbortHandler(void *CallBackRef);
extern void Xil_PrefetchAbortHandler(void *CallBackRef);
extern void Xil_UndefinedExceptionHandler(void *CallBackRef);
#endif
#ifdef __cplusplus
}
#endif /* __cplusplus */
#endif /* XIL_EXCEPTION_H */
/**
* @} End of "addtogroup arm_exception_apis".
*/

View File

@@ -1,53 +0,0 @@
/******************************************************************************
* Copyright (c) 2015 - 2020 Xilinx, Inc. All rights reserved.
* SPDX-License-Identifier: MIT
******************************************************************************/
/*****************************************************************************/
/**
*
* @file xpseudo_asm.h
*
* @addtogroup a53_32_specific Cortex A53 32bit Processor Specific Include Files
*
* The xpseudo_asm.h includes xreg_cortexa53.h and xpseudo_asm_gcc.h.
* The xreg_cortexa53.h file contains definitions for inline assembler code.
* It provides inline definitions for Cortex A53 GPRs, SPRs, co-processor
* registers and floating point registers.
*
* The xpseudo_asm_gcc.h contains the definitions for the most often used inline
* assembler instructions, available as macros. These can be very useful for
* tasks such as setting or getting special purpose registers, synchronization,
* or cache manipulation etc. These inline assembler instructions can be used
* from drivers and user applications written in C.
*
* @{
*
*
* <pre>
* MODIFICATION HISTORY:
*
* Ver Who Date Changes
* ----- ---- -------- -----------------------------------------------
* 5.2 pkp 28/05/15 First release
* </pre>
*
******************************************************************************/
#ifndef XPSEUDO_ASM_H
#define XPSEUDO_ASM_H
#ifdef __cplusplus
extern "C" {
#endif
#include "xreg_cortexa53.h"
#include "xpseudo_asm_gcc.h"
#ifdef __cplusplus
}
#endif
#endif /* XPSEUDO_ASM_H */
/**
* @} End of "addtogroup a53_32_specific".
*/

View File

@@ -1,394 +0,0 @@
/******************************************************************************
* Copyright (c) 2015 - 2021 Xilinx, Inc. All rights reserved.
* SPDX-License-Identifier: MIT
******************************************************************************/
/*****************************************************************************/
/**
*
* @file xreg_cortexa53.h
*
* This header file contains definitions for using inline assembler code. It is
* written specifically for the GNU.
*
* All of the ARM Cortex A53 GPRs, SPRs, and Debug Registers are defined along
* with the positions of the bits within the registers.
*
* <pre>
* MODIFICATION HISTORY:
*
* Ver Who Date Changes
* ----- -------- -------- -----------------------------------------------
* 5.2 pkp 28/05/15 First release
* </pre>
*
******************************************************************************/
#ifndef XREG_CORTEXA53_H
#define XREG_CORTEXA53_H
#ifdef __cplusplus
extern "C" {
#endif /* __cplusplus */
/**
*@cond nocomments
*/
/* GPRs */
#define XREG_GPR0 r0
#define XREG_GPR1 r1
#define XREG_GPR2 r2
#define XREG_GPR3 r3
#define XREG_GPR4 r4
#define XREG_GPR5 r5
#define XREG_GPR6 r6
#define XREG_GPR7 r7
#define XREG_GPR8 r8
#define XREG_GPR9 r9
#define XREG_GPR10 r10
#define XREG_GPR11 r11
#define XREG_GPR12 r12
#define XREG_GPR13 r13
#define XREG_GPR14 r14
#define XREG_GPR15 r15
#define XREG_CPSR cpsr
/* Coprocessor number defines */
#define XREG_CP0 0
#define XREG_CP1 1
#define XREG_CP2 2
#define XREG_CP3 3
#define XREG_CP4 4
#define XREG_CP5 5
#define XREG_CP6 6
#define XREG_CP7 7
#define XREG_CP8 8
#define XREG_CP9 9
#define XREG_CP10 10
#define XREG_CP11 11
#define XREG_CP12 12
#define XREG_CP13 13
#define XREG_CP14 14
#define XREG_CP15 15
/* Coprocessor control register defines */
#define XREG_CR0 cr0
#define XREG_CR1 cr1
#define XREG_CR2 cr2
#define XREG_CR3 cr3
#define XREG_CR4 cr4
#define XREG_CR5 cr5
#define XREG_CR6 cr6
#define XREG_CR7 cr7
#define XREG_CR8 cr8
#define XREG_CR9 cr9
#define XREG_CR10 cr10
#define XREG_CR11 cr11
#define XREG_CR12 cr12
#define XREG_CR13 cr13
#define XREG_CR14 cr14
#define XREG_CR15 cr15
/* Current Processor Status Register (CPSR) Bits */
#define XREG_CPSR_THUMB_MODE 0x20
#define XREG_CPSR_MODE_BITS 0x1F
#define XREG_CPSR_SYSTEM_MODE 0x1F
#define XREG_CPSR_UNDEFINED_MODE 0x1B
#define XREG_CPSR_DATA_ABORT_MODE 0x17
#define XREG_CPSR_SVC_MODE 0x13
#define XREG_CPSR_IRQ_MODE 0x12
#define XREG_CPSR_FIQ_MODE 0x11
#define XREG_CPSR_USER_MODE 0x10
#define XREG_CPSR_IRQ_ENABLE 0x80
#define XREG_CPSR_FIQ_ENABLE 0x40
#define XREG_CPSR_N_BIT 0x80000000
#define XREG_CPSR_Z_BIT 0x40000000
#define XREG_CPSR_C_BIT 0x20000000
#define XREG_CPSR_V_BIT 0x10000000
/* CP15 defines */
/* C0 Register defines */
#define XREG_CP15_MAIN_ID "p15, 0, %0, c0, c0, 0"
#define XREG_CP15_CACHE_TYPE "p15, 0, %0, c0, c0, 1"
#define XREG_CP15_TCM_TYPE "p15, 0, %0, c0, c0, 2"
#define XREG_CP15_TLB_TYPE "p15, 0, %0, c0, c0, 3"
#define XREG_CP15_MULTI_PROC_AFFINITY "p15, 0, %0, c0, c0, 5"
#define XREG_CP15_PROC_FEATURE_0 "p15, 0, %0, c0, c1, 0"
#define XREG_CP15_PROC_FEATURE_1 "p15, 0, %0, c0, c1, 1"
#define XREG_CP15_DEBUG_FEATURE_0 "p15, 0, %0, c0, c1, 2"
#define XREG_CP15_MEMORY_FEATURE_0 "p15, 0, %0, c0, c1, 4"
#define XREG_CP15_MEMORY_FEATURE_1 "p15, 0, %0, c0, c1, 5"
#define XREG_CP15_MEMORY_FEATURE_2 "p15, 0, %0, c0, c1, 6"
#define XREG_CP15_MEMORY_FEATURE_3 "p15, 0, %0, c0, c1, 7"
#define XREG_CP15_INST_FEATURE_0 "p15, 0, %0, c0, c2, 0"
#define XREG_CP15_INST_FEATURE_1 "p15, 0, %0, c0, c2, 1"
#define XREG_CP15_INST_FEATURE_2 "p15, 0, %0, c0, c2, 2"
#define XREG_CP15_INST_FEATURE_3 "p15, 0, %0, c0, c2, 3"
#define XREG_CP15_INST_FEATURE_4 "p15, 0, %0, c0, c2, 4"
#define XREG_CP15_CACHE_SIZE_ID "p15, 1, %0, c0, c0, 0"
#define XREG_CP15_CACHE_LEVEL_ID "p15, 1, %0, c0, c0, 1"
#define XREG_CP15_AUXILARY_ID "p15, 1, %0, c0, c0, 7"
#define XREG_CP15_CACHE_SIZE_SEL "p15, 2, %0, c0, c0, 0"
/* C1 Register Defines */
#define XREG_CP15_SYS_CONTROL "p15, 0, %0, c1, c0, 0"
#define XREG_CP15_AUX_CONTROL "p15, 0, %0, c1, c0, 1"
#define XREG_CP15_CP_ACCESS_CONTROL "p15, 0, %0, c1, c0, 2"
#define XREG_CP15_SECURE_CONFIG "p15, 0, %0, c1, c1, 0"
#define XREG_CP15_SECURE_DEBUG_ENABLE "p15, 0, %0, c1, c1, 1"
#define XREG_CP15_NS_ACCESS_CONTROL "p15, 0, %0, c1, c1, 2"
#define XREG_CP15_VIRTUAL_CONTROL "p15, 0, %0, c1, c1, 3"
/* XREG_CP15_CONTROL bit defines */
#define XREG_CP15_CONTROL_TE_BIT 0x40000000U
#define XREG_CP15_CONTROL_AFE_BIT 0x20000000U
#define XREG_CP15_CONTROL_TRE_BIT 0x10000000U
#define XREG_CP15_CONTROL_NMFI_BIT 0x08000000U
#define XREG_CP15_CONTROL_EE_BIT 0x02000000U
#define XREG_CP15_CONTROL_HA_BIT 0x00020000U
#define XREG_CP15_CONTROL_RR_BIT 0x00004000U
#define XREG_CP15_CONTROL_V_BIT 0x00002000U
#define XREG_CP15_CONTROL_I_BIT 0x00001000U
#define XREG_CP15_CONTROL_Z_BIT 0x00000800U
#define XREG_CP15_CONTROL_SW_BIT 0x00000400U
#define XREG_CP15_CONTROL_B_BIT 0x00000080U
#define XREG_CP15_CONTROL_C_BIT 0x00000004U
#define XREG_CP15_CONTROL_A_BIT 0x00000002U
#define XREG_CP15_CONTROL_M_BIT 0x00000001U
/* C2 Register Defines */
#define XREG_CP15_TTBR0 "p15, 0, %0, c2, c0, 0"
#define XREG_CP15_TTBR1 "p15, 0, %0, c2, c0, 1"
#define XREG_CP15_TTB_CONTROL "p15, 0, %0, c2, c0, 2"
/* C3 Register Defines */
#define XREG_CP15_DOMAIN_ACCESS_CTRL "p15, 0, %0, c3, c0, 0"
/* C4 Register Defines */
/* Not Used */
/* C5 Register Defines */
#define XREG_CP15_DATA_FAULT_STATUS "p15, 0, %0, c5, c0, 0"
#define XREG_CP15_INST_FAULT_STATUS "p15, 0, %0, c5, c0, 1"
#define XREG_CP15_AUX_DATA_FAULT_STATUS "p15, 0, %0, c5, c1, 0"
#define XREG_CP15_AUX_INST_FAULT_STATUS "p15, 0, %0, c5, c1, 1"
/* C6 Register Defines */
#define XREG_CP15_DATA_FAULT_ADDRESS "p15, 0, %0, c6, c0, 0"
#define XREG_CP15_INST_FAULT_ADDRESS "p15, 0, %0, c6, c0, 2"
/* C7 Register Defines */
#define XREG_CP15_NOP "p15, 0, %0, c7, c0, 4"
#define XREG_CP15_INVAL_IC_POU_IS "p15, 0, %0, c7, c1, 0"
#define XREG_CP15_INVAL_BRANCH_ARRAY_IS "p15, 0, %0, c7, c1, 6"
#define XREG_CP15_PHYS_ADDR "p15, 0, %0, c7, c4, 0"
#define XREG_CP15_INVAL_IC_POU "p15, 0, %0, c7, c5, 0"
#define XREG_CP15_INVAL_IC_LINE_MVA_POU "p15, 0, %0, c7, c5, 1"
/* The CP15 register access below has been deprecated in favor of the new
* isb instruction in Cortex A53.
*/
#define XREG_CP15_INST_SYNC_BARRIER "p15, 0, %0, c7, c5, 4"
#define XREG_CP15_INVAL_BRANCH_ARRAY "p15, 0, %0, c7, c5, 6"
#define XREG_CP15_INVAL_DC_LINE_MVA_POC "p15, 0, %0, c7, c6, 1"
#define XREG_CP15_INVAL_DC_LINE_SW "p15, 0, %0, c7, c6, 2"
#define XREG_CP15_VA_TO_PA_CURRENT_0 "p15, 0, %0, c7, c8, 0"
#define XREG_CP15_VA_TO_PA_CURRENT_1 "p15, 0, %0, c7, c8, 1"
#define XREG_CP15_VA_TO_PA_CURRENT_2 "p15, 0, %0, c7, c8, 2"
#define XREG_CP15_VA_TO_PA_CURRENT_3 "p15, 0, %0, c7, c8, 3"
#define XREG_CP15_VA_TO_PA_OTHER_0 "p15, 0, %0, c7, c8, 4"
#define XREG_CP15_VA_TO_PA_OTHER_1 "p15, 0, %0, c7, c8, 5"
#define XREG_CP15_VA_TO_PA_OTHER_2 "p15, 0, %0, c7, c8, 6"
#define XREG_CP15_VA_TO_PA_OTHER_3 "p15, 0, %0, c7, c8, 7"
#define XREG_CP15_CLEAN_DC_LINE_MVA_POC "p15, 0, %0, c7, c10, 1"
#define XREG_CP15_CLEAN_DC_LINE_SW "p15, 0, %0, c7, c10, 2"
/* The next two CP15 register accesses below have been deprecated in favor
* of the new dsb and dmb instructions in Cortex A53.
*/
#define XREG_CP15_DATA_SYNC_BARRIER "p15, 0, %0, c7, c10, 4"
#define XREG_CP15_DATA_MEMORY_BARRIER "p15, 0, %0, c7, c10, 5"
#define XREG_CP15_CLEAN_DC_LINE_MVA_POU "p15, 0, %0, c7, c11, 1"
#define XREG_CP15_NOP2 "p15, 0, %0, c7, c13, 1"
#define XREG_CP15_CLEAN_INVAL_DC_LINE_MVA_POC "p15, 0, %0, c7, c14, 1"
#define XREG_CP15_CLEAN_INVAL_DC_LINE_SW "p15, 0, %0, c7, c14, 2"
/* C8 Register Defines */
#define XREG_CP15_INVAL_TLB_IS "p15, 0, %0, c8, c3, 0"
#define XREG_CP15_INVAL_TLB_MVA_IS "p15, 0, %0, c8, c3, 1"
#define XREG_CP15_INVAL_TLB_ASID_IS "p15, 0, %0, c8, c3, 2"
#define XREG_CP15_INVAL_TLB_MVA_ASID_IS "p15, 0, %0, c8, c3, 3"
#define XREG_CP15_INVAL_ITLB_UNLOCKED "p15, 0, %0, c8, c5, 0"
#define XREG_CP15_INVAL_ITLB_MVA "p15, 0, %0, c8, c5, 1"
#define XREG_CP15_INVAL_ITLB_ASID "p15, 0, %0, c8, c5, 2"
#define XREG_CP15_INVAL_DTLB_UNLOCKED "p15, 0, %0, c8, c6, 0"
#define XREG_CP15_INVAL_DTLB_MVA "p15, 0, %0, c8, c6, 1"
#define XREG_CP15_INVAL_DTLB_ASID "p15, 0, %0, c8, c6, 2"
#define XREG_CP15_INVAL_UTLB_UNLOCKED "p15, 0, %0, c8, c7, 0"
#define XREG_CP15_INVAL_UTLB_MVA "p15, 0, %0, c8, c7, 1"
#define XREG_CP15_INVAL_UTLB_ASID "p15, 0, %0, c8, c7, 2"
#define XREG_CP15_INVAL_UTLB_MVA_ASID "p15, 0, %0, c8, c7, 3"
/* C9 Register Defines */
#define XREG_CP15_PERF_MONITOR_CTRL "p15, 0, %0, c9, c12, 0"
#define XREG_CP15_COUNT_ENABLE_SET "p15, 0, %0, c9, c12, 1"
#define XREG_CP15_COUNT_ENABLE_CLR "p15, 0, %0, c9, c12, 2"
#define XREG_CP15_V_FLAG_STATUS "p15, 0, %0, c9, c12, 3"
#define XREG_CP15_SW_INC "p15, 0, %0, c9, c12, 4"
#define XREG_CP15_EVENT_CNTR_SEL "p15, 0, %0, c9, c12, 5"
#define XREG_CP15_PERF_CYCLE_COUNTER "p15, 0, %0, c9, c13, 0"
#define XREG_CP15_EVENT_TYPE_SEL "p15, 0, %0, c9, c13, 1"
#define XREG_CP15_PERF_MONITOR_COUNT "p15, 0, %0, c9, c13, 2"
#define XREG_CP15_USER_ENABLE "p15, 0, %0, c9, c14, 0"
#define XREG_CP15_INTR_ENABLE_SET "p15, 0, %0, c9, c14, 1"
#define XREG_CP15_INTR_ENABLE_CLR "p15, 0, %0, c9, c14, 2"
/* C10 Register Defines */
#define XREG_CP15_TLB_LOCKDWN "p15, 0, %0, c10, c0, 0"
#define XREG_CP15_PRI_MEM_REMAP "p15, 0, %0, c10, c2, 0"
#define XREG_CP15_NORM_MEM_REMAP "p15, 0, %0, c10, c2, 1"
/* C11 Register Defines */
/* Not used */
/* C12 Register Defines */
#define XREG_CP15_VEC_BASE_ADDR "p15, 0, %0, c12, c0, 0"
#define XREG_CP15_MONITOR_VEC_BASE_ADDR "p15, 0, %0, c12, c0, 1"
#define XREG_CP15_INTERRUPT_STATUS "p15, 0, %0, c12, c1, 0"
#define XREG_CP15_VIRTUALIZATION_INTR "p15, 0, %0, c12, c1, 1"
/* C13 Register Defines */
#define XREG_CP15_CONTEXT_ID "p15, 0, %0, c13, c0, 1"
#define USER_RW_THREAD_PID "p15, 0, %0, c13, c0, 2"
#define USER_RO_THREAD_PID "p15, 0, %0, c13, c0, 3"
#define USER_PRIV_THREAD_PID "p15, 0, %0, c13, c0, 4"
/* C14 Register Defines */
/* not used */
/* C15 Register Defines */
#define XREG_CP15_POWER_CTRL "p15, 0, %0, c15, c0, 0"
#define XREG_CP15_CONFIG_BASE_ADDR "p15, 4, %0, c15, c0, 0"
#define XREG_CP15_READ_TLB_ENTRY "p15, 5, %0, c15, c4, 2"
#define XREG_CP15_WRITE_TLB_ENTRY "p15, 5, %0, c15, c4, 4"
#define XREG_CP15_MAIN_TLB_VA "p15, 5, %0, c15, c5, 2"
#define XREG_CP15_MAIN_TLB_PA "p15, 5, %0, c15, c6, 2"
#define XREG_CP15_MAIN_TLB_ATTR "p15, 5, %0, c15, c7, 2"
/* MPE register definitions */
#define XREG_FPSID c0
#define XREG_FPSCR c1
#define XREG_MVFR1 c6
#define XREG_MVFR0 c7
#define XREG_FPEXC c8
#define XREG_FPINST c9
#define XREG_FPINST2 c10
/* FPSID bits */
#define XREG_FPSID_IMPLEMENTER_BIT (24)
#define XREG_FPSID_IMPLEMENTER_MASK (0xFF << FPSID_IMPLEMENTER_BIT)
#define XREG_FPSID_SOFTWARE (1<<23)
#define XREG_FPSID_ARCH_BIT (16)
#define XREG_FPSID_ARCH_MASK (0xF << FPSID_ARCH_BIT)
#define XREG_FPSID_PART_BIT (8)
#define XREG_FPSID_PART_MASK (0xFF << FPSID_PART_BIT)
#define XREG_FPSID_VARIANT_BIT (4)
#define XREG_FPSID_VARIANT_MASK (0xF << FPSID_VARIANT_BIT)
#define XREG_FPSID_REV_BIT (0)
#define XREG_FPSID_REV_MASK (0xF << FPSID_REV_BIT)
/* FPSCR bits */
#define XREG_FPSCR_N_BIT (1 << 31)
#define XREG_FPSCR_Z_BIT (1 << 30)
#define XREG_FPSCR_C_BIT (1 << 29)
#define XREG_FPSCR_V_BIT (1 << 28)
#define XREG_FPSCR_QC (1 << 27)
#define XREG_FPSCR_AHP (1 << 26)
#define XREG_FPSCR_DEFAULT_NAN (1 << 25)
#define XREG_FPSCR_FLUSHTOZERO (1 << 24)
#define XREG_FPSCR_ROUND_NEAREST (0 << 22)
#define XREG_FPSCR_ROUND_PLUSINF (1 << 22)
#define XREG_FPSCR_ROUND_MINUSINF (2 << 22)
#define XREG_FPSCR_ROUND_TOZERO (3 << 22)
#define XREG_FPSCR_RMODE_BIT (22)
#define XREG_FPSCR_RMODE_MASK (3 << FPSCR_RMODE_BIT)
#define XREG_FPSCR_STRIDE_BIT (20)
#define XREG_FPSCR_STRIDE_MASK (3 << FPSCR_STRIDE_BIT)
#define XREG_FPSCR_LENGTH_BIT (16)
#define XREG_FPSCR_LENGTH_MASK (7 << FPSCR_LENGTH_BIT)
#define XREG_FPSCR_IDC (1 << 7)
#define XREG_FPSCR_IXC (1 << 4)
#define XREG_FPSCR_UFC (1 << 3)
#define XREG_FPSCR_OFC (1 << 2)
#define XREG_FPSCR_DZC (1 << 1)
#define XREG_FPSCR_IOC (1 << 0)
/* MVFR0 bits */
#define XREG_MVFR0_RMODE_BIT (28)
#define XREG_MVFR0_RMODE_MASK (0xF << XREG_MVFR0_RMODE_BIT)
#define XREG_MVFR0_SHORT_VEC_BIT (24)
#define XREG_MVFR0_SHORT_VEC_MASK (0xF << XREG_MVFR0_SHORT_VEC_BIT)
#define XREG_MVFR0_SQRT_BIT (20)
#define XREG_MVFR0_SQRT_MASK (0xF << XREG_MVFR0_SQRT_BIT)
#define XREG_MVFR0_DIVIDE_BIT (16)
#define XREG_MVFR0_DIVIDE_MASK (0xF << XREG_MVFR0_DIVIDE_BIT)
#define XREG_MVFR0_EXEC_TRAP_BIT (12)
#define XREG_MVFR0_EXEC_TRAP_MASK (0xF << XREG_MVFR0_EXEC_TRAP_BIT)
#define XREG_MVFR0_DP_BIT (8)
#define XREG_MVFR0_DP_MASK (0xF << XREG_MVFR0_DP_BIT)
#define XREG_MVFR0_SP_BIT (4)
#define XREG_MVFR0_SP_MASK (0xF << XREG_MVFR0_SP_BIT)
#define XREG_MVFR0_A_SIMD_BIT (0)
#define XREG_MVFR0_A_SIMD_MASK (0xF << MVFR0_A_SIMD_BIT)
/* FPEXC bits */
#define XREG_FPEXC_EX (1 << 31)
#define XREG_FPEXC_EN (1 << 30)
#define XREG_FPEXC_DEX (1 << 29)
#define XREG_CONTROL_DCACHE_BIT (0X00000001U<<2U)
#define XREG_CONTROL_ICACHE_BIT (0X00000001U<<12U)
/**
*@endcond
*/
#ifdef __cplusplus
}
#endif /* __cplusplus */
#endif /* XREG_CORTEXA53_H */

View File

@@ -1,75 +0,0 @@
/******************************************************************************
* Copyright (c) 2014 - 2021 Xilinx, Inc. All rights reserved.
* SPDX-License-Identifier: MIT
******************************************************************************/
/*****************************************************************************/
/**
*
* @file xil_cache.h
*
* @addtogroup a53_64_cache_apis Cortex A53 64bit Processor Cache Functions
*
* Cache functions provide access to cache related operations such as flush
* and invalidate for instruction and data caches. It gives option to perform
* the cache operations on a single cacheline, a range of memory and an entire
* cache.
*
* @{
*
* <pre>
* MODIFICATION HISTORY:
*
* Ver Who Date Changes
* ----- ---- -------- -----------------------------------------------
* 5.00 pkp 05/29/14 First release
* </pre>
*
******************************************************************************/
#ifndef XIL_CACHE_H
#define XIL_CACHE_H
#include "xil_types.h"
#ifdef __cplusplus
extern "C" {
#endif
/**
*@cond nocomments
*/
/************************** Constant Definitions *****************************/
#define L1_DATA_PREFETCH_CONTROL_MASK 0xE000
#define L1_DATA_PREFETCH_CONTROL_SHIFT 13
/**
*@endcond
*/
/***************** Macros (Inline Functions) Definitions *********************/
#define Xil_DCacheFlushRange Xil_DCacheInvalidateRange
/************************** Function Prototypes ******************************/
void Xil_DCacheEnable(void);
void Xil_DCacheDisable(void);
void Xil_DCacheInvalidate(void);
void Xil_DCacheInvalidateRange(INTPTR adr, INTPTR len);
void Xil_DCacheInvalidateLine(INTPTR adr);
void Xil_DCacheFlush(void);
void Xil_DCacheFlushLine(INTPTR adr);
void Xil_ICacheEnable(void);
void Xil_ICacheDisable(void);
void Xil_ICacheInvalidate(void);
void Xil_ICacheInvalidateRange(INTPTR adr, INTPTR len);
void Xil_ICacheInvalidateLine(INTPTR adr);
void Xil_ConfigureL1Prefetch(u8 num);
#ifdef __cplusplus
}
#endif
#endif
/**
* @} End of "addtogroup a53_64_cache_apis".
*/

View File

@@ -1,408 +0,0 @@
/******************************************************************************
* Copyright (c) 2015 - 2022 Xilinx, Inc. All rights reserved.
* SPDX-License-Identifier: MIT
******************************************************************************/
/*****************************************************************************/
/**
*
* @file xil_exception.h
*
* This header file contains ARM Cortex A53,A9,R5 specific exception related APIs.
* For exception related functions that can be used across all Xilinx supported
* processors, please use xil_exception.h.
*
* @addtogroup arm_exception_apis ARM Processor Exception Handling
* @{
* ARM processors specific exception related APIs for cortex A53,A9 and R5 can
* utilized for enabling/disabling IRQ, registering/removing handler for
* exceptions or initializing exception vector table with null handler.
*
* <pre>
* MODIFICATION HISTORY:
*
* Ver Who Date Changes
* ----- -------- -------- -----------------------------------------------
* 5.2 pkp 28/05/15 First release
* 6.0 mus 27/07/16 Consolidated file for a53,a9 and r5 processors
* 6.7 mna 26/04/18 Add API Xil_GetExceptionRegisterHandler.
* 6.7 asa 18/05/18 Update signature of API Xil_GetExceptionRegisterHandler.
* 7.0 mus 01/03/19 Tweak Xil_ExceptionEnableMask and
* Xil_ExceptionDisableMask macros to support legacy
* examples for Cortexa72 EL3 exception level.
* 7.3 mus 04/15/20 Added Xil_EnableNestedInterrupts and
* Xil_DisableNestedInterrupts macros for ARMv8.
* For Cortexa72, these macro's would not be supported
* at EL3, as Cortexa72 is using GIC-500(GICv3), which
* triggeres only FIQ at EL3. Fix for CR#1062506
* 7.6 mus 09/17/21 Updated flag checking to fix warning reported with
* -Wundef compiler option CR#1110261
* 7.7 mus 01/31/22 Few of the #defines in xil_exception.h in are treated
* in different way based on "versal" flag. In existing
* flow, this flag is defined only in xparameters.h and
* BSP compiler flags, it is not defined in application
* compiler flags. So, including xil_exception.h in
* application source file, without including
* xparameters.h results in incorrect behavior.
* Including xparameters.h in xil_exception.h to avoid
* such issues. It fixes CR#1120498.
* 7.7 sk 03/02/22 Define XExc_VectorTableEntry structure to fix
* misra_c_2012_rule_5_6 violation.
* 7.7 sk 03/02/22 Add XExc_VectorTable as extern to fix misra_c_2012_
* rule_8_4 violation.
* </pre>
*
******************************************************************************/
/**
*@cond nocomments
*/
#ifndef XIL_EXCEPTION_H /* prevent circular inclusions */
#define XIL_EXCEPTION_H /* by using protection macros */
/***************************** Include Files ********************************/
#include "xil_types.h"
#include "xpseudo_asm.h"
#include "bspconfig.h"
#include "xparameters.h"
#ifdef __cplusplus
extern "C" {
#endif
/************************** Constant Definitions ****************************/
#define XIL_EXCEPTION_FIQ XREG_CPSR_FIQ_ENABLE
#define XIL_EXCEPTION_IRQ XREG_CPSR_IRQ_ENABLE
#define XIL_EXCEPTION_ALL (XREG_CPSR_FIQ_ENABLE | XREG_CPSR_IRQ_ENABLE)
#define XIL_EXCEPTION_ID_FIRST 0U
#if defined (__aarch64__)
#define XIL_EXCEPTION_ID_SYNC_INT 1U
#define XIL_EXCEPTION_ID_IRQ_INT 2U
#define XIL_EXCEPTION_ID_FIQ_INT 3U
#define XIL_EXCEPTION_ID_SERROR_ABORT_INT 4U
#define XIL_EXCEPTION_ID_LAST 5U
#else
#define XIL_EXCEPTION_ID_RESET 0U
#define XIL_EXCEPTION_ID_UNDEFINED_INT 1U
#define XIL_EXCEPTION_ID_SWI_INT 2U
#define XIL_EXCEPTION_ID_PREFETCH_ABORT_INT 3U
#define XIL_EXCEPTION_ID_DATA_ABORT_INT 4U
#define XIL_EXCEPTION_ID_IRQ_INT 5U
#define XIL_EXCEPTION_ID_FIQ_INT 6U
#define XIL_EXCEPTION_ID_LAST 6U
#endif
/*
* XIL_EXCEPTION_ID_INT is defined for all Xilinx processors.
*/
#if defined (versal) && !defined(ARMR5) && EL3
#define XIL_EXCEPTION_ID_INT XIL_EXCEPTION_ID_FIQ_INT
#else
#define XIL_EXCEPTION_ID_INT XIL_EXCEPTION_ID_IRQ_INT
#endif
/**************************** Type Definitions ******************************/
/**
* This typedef is the exception handler function.
*/
typedef void (*Xil_ExceptionHandler)(void *data);
typedef void (*Xil_InterruptHandler)(void *data);
typedef struct {
Xil_ExceptionHandler Handler;
void *Data;
} XExc_VectorTableEntry;
extern XExc_VectorTableEntry XExc_VectorTable[];
/**
*@endcond
*/
/***************** Macros (Inline Functions) Definitions ********************/
/****************************************************************************/
/**
* @brief Enable Exceptions.
*
* @param Mask: Value for enabling the exceptions.
*
* @return None.
*
* @note If bit is 0, exception is enabled.
* C-Style signature: void Xil_ExceptionEnableMask(Mask)
*
******************************************************************************/
#if defined (versal) && !defined(ARMR5) && EL3
/*
* Cortexa72 processor in versal is coupled with GIC-500, and GIC-500 supports
* only FIQ at EL3. Hence, tweaking this macro to always enable FIQ
* ignoring argument passed by user.
*/
#define Xil_ExceptionEnableMask(Mask) \
mtcpsr(mfcpsr() & ~ ((XIL_EXCEPTION_FIQ) & XIL_EXCEPTION_ALL))
#elif defined (__GNUC__) || defined (__ICCARM__)
#define Xil_ExceptionEnableMask(Mask) \
mtcpsr(mfcpsr() & ~ ((Mask) & XIL_EXCEPTION_ALL))
#else
#define Xil_ExceptionEnableMask(Mask) \
{ \
register u32 Reg __asm("cpsr"); \
mtcpsr((Reg) & (~((Mask) & XIL_EXCEPTION_ALL))); \
}
#endif
/****************************************************************************/
/**
* @brief Enable the IRQ exception.
*
* @return None.
*
* @note None.
*
******************************************************************************/
#if defined (versal) && !defined(ARMR5) && EL3
#define Xil_ExceptionEnable() \
Xil_ExceptionEnableMask(XIL_EXCEPTION_FIQ)
#else
#define Xil_ExceptionEnable() \
Xil_ExceptionEnableMask(XIL_EXCEPTION_IRQ)
#endif
/****************************************************************************/
/**
* @brief Disable Exceptions.
*
* @param Mask: Value for disabling the exceptions.
*
* @return None.
*
* @note If bit is 1, exception is disabled.
* C-Style signature: Xil_ExceptionDisableMask(Mask)
*
******************************************************************************/
#if defined (versal) && !defined(ARMR5) && EL3
/*
* Cortexa72 processor in versal is coupled with GIC-500, and GIC-500 supports
* only FIQ at EL3. Hence, tweaking this macro to always disable FIQ
* ignoring argument passed by user.
*/
#define Xil_ExceptionDisableMask(Mask) \
mtcpsr(mfcpsr() | ((XIL_EXCEPTION_FIQ) & XIL_EXCEPTION_ALL))
#elif defined (__GNUC__) || defined (__ICCARM__)
#define Xil_ExceptionDisableMask(Mask) \
mtcpsr(mfcpsr() | ((Mask) & XIL_EXCEPTION_ALL))
#else
#define Xil_ExceptionDisableMask(Mask) \
{ \
register u32 Reg __asm("cpsr"); \
mtcpsr((Reg) | ((Mask) & XIL_EXCEPTION_ALL)); \
}
#endif
/****************************************************************************/
/**
* Disable the IRQ exception.
*
* @return None.
*
* @note None.
*
******************************************************************************/
#define Xil_ExceptionDisable() \
Xil_ExceptionDisableMask(XIL_EXCEPTION_IRQ)
#if ( defined (PLATFORM_ZYNQMP) && defined (EL3) && (EL3==1) )
/****************************************************************************/
/**
* @brief Enable nested interrupts by clearing the I bit in DAIF.This
* macro is defined for Cortex-A53 64 bit mode BSP configured to run
* at EL3.. However,it is not defined for Versal Cortex-A72 BSP
* configured to run at EL3. Reason is, Cortex-A72 is coupled
* with GIC-500(GICv3 specifications) and it triggers only FIQ at EL3.
*
* @return None.
*
* @note This macro is supposed to be used from interrupt handlers. In the
* interrupt handler the interrupts are disabled by default (I bit
* is set as 1). To allow nesting of interrupts, this macro should be
* used. It clears the I bit. Once that bit is cleared and provided the
* preemption of interrupt conditions are met in the GIC, nesting of
* interrupts will start happening.
* Caution: This macro must be used with caution. Before calling this
* macro, the user must ensure that the source of the current IRQ
* is appropriately cleared. Otherwise, as soon as we clear the I
* bit, there can be an infinite loop of interrupts with an
* eventual crash (all the stack space getting consumed).
******************************************************************************/
#define Xil_EnableNestedInterrupts() \
__asm__ __volatile__ ("mrs X1, ELR_EL3"); \
__asm__ __volatile__ ("mrs X2, SPSR_EL3"); \
__asm__ __volatile__ ("stp X1,X2, [sp,#-0x10]!"); \
__asm__ __volatile__ ("mrs X1, DAIF"); \
__asm__ __volatile__ ("bic X1,X1,#(0x1<<7)"); \
__asm__ __volatile__ ("msr DAIF, X1"); \
/****************************************************************************/
/**
* @brief Disable the nested interrupts by setting the I bit in DAIF. This
* macro is defined for Cortex-A53 64 bit mode BSP configured to run
* at EL3.
*
* @return None.
*
* @note This macro is meant to be called in the interrupt service routines.
* This macro cannot be used independently. It can only be used when
* nesting of interrupts have been enabled by using the macro
* Xil_EnableNestedInterrupts(). In a typical flow, the user first
* calls the Xil_EnableNestedInterrupts in the ISR at the appropriate
* point. The user then must call this macro before exiting the interrupt
* service routine. This macro puts the ARM back in IRQ mode and
* hence sets back the I bit.
******************************************************************************/
#define Xil_DisableNestedInterrupts() \
__asm__ __volatile__ ("ldp X1,X2, [sp,#0x10]!"); \
__asm__ __volatile__ ("msr ELR_EL3, X1"); \
__asm__ __volatile__ ("msr SPSR_EL3, X2"); \
__asm__ __volatile__ ("mrs X1, DAIF"); \
__asm__ __volatile__ ("orr X1, X1, #(0x1<<7)"); \
__asm__ __volatile__ ("msr DAIF, X1"); \
#elif (defined (EL1_NONSECURE) && (EL1_NONSECURE==1))
/****************************************************************************/
/**
* @brief Enable nested interrupts by clearing the I bit in DAIF.This
* macro is defined for Cortex-A53 64 bit mode and Cortex-A72 64 bit
* BSP configured to run at EL1 NON SECURE
*
* @return None.
*
* @note This macro is supposed to be used from interrupt handlers. In the
* interrupt handler the interrupts are disabled by default (I bit
* is set as 1). To allow nesting of interrupts, this macro should be
* used. It clears the I bit. Once that bit is cleared and provided the
* preemption of interrupt conditions are met in the GIC, nesting of
* interrupts will start happening.
* Caution: This macro must be used with caution. Before calling this
* macro, the user must ensure that the source of the current IRQ
* is appropriately cleared. Otherwise, as soon as we clear the I
* bit, there can be an infinite loop of interrupts with an
* eventual crash (all the stack space getting consumed).
******************************************************************************/
#define Xil_EnableNestedInterrupts() \
__asm__ __volatile__ ("mrs X1, ELR_EL1"); \
__asm__ __volatile__ ("mrs X2, SPSR_EL1"); \
__asm__ __volatile__ ("stp X1,X2, [sp,#-0x10]!"); \
__asm__ __volatile__ ("mrs X1, DAIF"); \
__asm__ __volatile__ ("bic X1,X1,#(0x1<<7)"); \
__asm__ __volatile__ ("msr DAIF, X1"); \
/****************************************************************************/
/**
* @brief Disable the nested interrupts by setting the I bit in DAIF. This
* macro is defined for Cortex-A53 64 bit mode and Cortex-A72 64 bit
* BSP configured to run at EL1 NON SECURE
*
* @return None.
*
* @note This macro is meant to be called in the interrupt service routines.
* This macro cannot be used independently. It can only be used when
* nesting of interrupts have been enabled by using the macro
* Xil_EnableNestedInterrupts(). In a typical flow, the user first
* calls the Xil_EnableNestedInterrupts in the ISR at the appropriate
* point. The user then must call this macro before exiting the interrupt
* service routine. This macro puts the ARM back in IRQ mode and
* hence sets back the I bit.
******************************************************************************/
#define Xil_DisableNestedInterrupts() \
__asm__ __volatile__ ("ldp X1,X2, [sp,#0x10]!"); \
__asm__ __volatile__ ("msr ELR_EL1, X1"); \
__asm__ __volatile__ ("msr SPSR_EL1, X2"); \
__asm__ __volatile__ ("mrs X1, DAIF"); \
__asm__ __volatile__ ("orr X1, X1, #(0x1<<7)"); \
__asm__ __volatile__ ("msr DAIF, X1"); \
#elif (!defined (__aarch64__) && !defined (ARMA53_32))
/****************************************************************************/
/**
* @brief Enable nested interrupts by clearing the I and F bits in CPSR. This
* API is defined for cortex-a9 and cortex-r5.
*
* @return None.
*
* @note This macro is supposed to be used from interrupt handlers. In the
* interrupt handler the interrupts are disabled by default (I and F
* are 1). To allow nesting of interrupts, this macro should be
* used. It clears the I and F bits by changing the ARM mode to
* system mode. Once these bits are cleared and provided the
* preemption of interrupt conditions are met in the GIC, nesting of
* interrupts will start happening.
* Caution: This macro must be used with caution. Before calling this
* macro, the user must ensure that the source of the current IRQ
* is appropriately cleared. Otherwise, as soon as we clear the I and
* F bits, there can be an infinite loop of interrupts with an
* eventual crash (all the stack space getting consumed).
******************************************************************************/
#define Xil_EnableNestedInterrupts() \
__asm__ __volatile__ ("stmfd sp!, {lr}"); \
__asm__ __volatile__ ("mrs lr, spsr"); \
__asm__ __volatile__ ("stmfd sp!, {lr}"); \
__asm__ __volatile__ ("msr cpsr_c, #0x1F"); \
__asm__ __volatile__ ("stmfd sp!, {lr}");
/****************************************************************************/
/**
* @brief Disable the nested interrupts by setting the I and F bits. This API
* is defined for cortex-a9 and cortex-r5.
*
* @return None.
*
* @note This macro is meant to be called in the interrupt service routines.
* This macro cannot be used independently. It can only be used when
* nesting of interrupts have been enabled by using the macro
* Xil_EnableNestedInterrupts(). In a typical flow, the user first
* calls the Xil_EnableNestedInterrupts in the ISR at the appropriate
* point. The user then must call this macro before exiting the interrupt
* service routine. This macro puts the ARM back in IRQ/FIQ mode and
* hence sets back the I and F bits.
******************************************************************************/
#define Xil_DisableNestedInterrupts() \
__asm__ __volatile__ ("ldmfd sp!, {lr}"); \
__asm__ __volatile__ ("msr cpsr_c, #0x92"); \
__asm__ __volatile__ ("ldmfd sp!, {lr}"); \
__asm__ __volatile__ ("msr spsr_cxsf, lr"); \
__asm__ __volatile__ ("ldmfd sp!, {lr}"); \
#endif
/************************** Variable Definitions ****************************/
/************************** Function Prototypes *****************************/
extern void Xil_ExceptionRegisterHandler(u32 Exception_id,
Xil_ExceptionHandler Handler,
void *Data);
extern void Xil_ExceptionRemoveHandler(u32 Exception_id);
extern void Xil_GetExceptionRegisterHandler(u32 Exception_id,
Xil_ExceptionHandler *Handler, void **Data);
extern void Xil_ExceptionInit(void);
#if defined (__aarch64__)
void Xil_SyncAbortHandler(void *CallBackRef);
void Xil_SErrorAbortHandler(void *CallBackRef);
#else
extern void Xil_DataAbortHandler(void *CallBackRef);
extern void Xil_PrefetchAbortHandler(void *CallBackRef);
extern void Xil_UndefinedExceptionHandler(void *CallBackRef);
#endif
#ifdef __cplusplus
}
#endif /* __cplusplus */
#endif /* XIL_EXCEPTION_H */
/**
* @} End of "addtogroup arm_exception_apis".
*/

View File

@@ -1,56 +0,0 @@
/******************************************************************************
* Copyright (c) 2014 - 2020 Xilinx, Inc. All rights reserved.
* SPDX-License-Identifier: MIT
******************************************************************************/
/*****************************************************************************/
/**
*
* @file xpseudo_asm.h
*
* @addtogroup a53_64_specific Cortex A53 64bit Processor Specific Include Files
*
* The xpseudo_asm.h includes xreg_cortexa53.h and xpseudo_asm_gcc.h.
* The xreg_cortexa53.h file contains definitions for inline assembler code.
* It provides inline definitions for Cortex A53 GPRs, SPRs and floating point
* registers.
*
* The xpseudo_asm_gcc.h contains the definitions for the most often used inline
* assembler instructions, available as macros. These can be very useful for
* tasks such as setting or getting special purpose registers, synchronization,
* or cache manipulation etc. These inline assembler instructions can be used
* from drivers and user applications written in C.
*
* @{
*
* <pre>
* MODIFICATION HISTORY:
*
* Ver Who Date Changes
* ----- ---- -------- -----------------------------------------------
* 5.00 pkp 05/29/14 First release
* </pre>
*
******************************************************************************/
#ifndef XPSEUDO_ASM_H
#define XPSEUDO_ASM_H
#ifdef __cplusplus
extern "C" {
#endif
#include "xreg_cortexa53.h"
#ifdef __clang__
#include "xpseudo_asm_armclang.h"
#else
#include "xpseudo_asm_gcc.h"
#endif
#ifdef __cplusplus
}
#endif
#endif /* XPSEUDO_ASM_H */
/**
* @} End of "addtogroup a53_64_specific".
*/

View File

@@ -1,163 +0,0 @@
/******************************************************************************
* Copyright (c) 2014 - 2021 Xilinx, Inc. All rights reserved.
* SPDX-License-Identifier: MIT
******************************************************************************/
/*****************************************************************************/
/**
*
* @file xreg_cortexa53.h
*
* This header file contains definitions for using inline assembler code. It is
* written specifically for the GNU compiler.
*
* All of the ARM Cortex A53 GPRs, SPRs, and Debug Registers are defined along
* with the positions of the bits within the registers.
*
* <pre>
* MODIFICATION HISTORY:
*
* Ver Who Date Changes
* ----- -------- -------- -----------------------------------------------
* 5.00 pkp 05/29/14 First release
* </pre>
*
******************************************************************************/
#ifndef XREG_CORTEXA53_H
#define XREG_CORTEXA53_H
#ifdef __cplusplus
extern "C" {
#endif /* __cplusplus */
/**
*@cond nocomments
*/
/* GPRs */
#define XREG_GPR0 x0
#define XREG_GPR1 x1
#define XREG_GPR2 x2
#define XREG_GPR3 x3
#define XREG_GPR4 x4
#define XREG_GPR5 x5
#define XREG_GPR6 x6
#define XREG_GPR7 x7
#define XREG_GPR8 x8
#define XREG_GPR9 x9
#define XREG_GPR10 x10
#define XREG_GPR11 x11
#define XREG_GPR12 x12
#define XREG_GPR13 x13
#define XREG_GPR14 x14
#define XREG_GPR15 x15
#define XREG_GPR16 x16
#define XREG_GPR17 x17
#define XREG_GPR18 x18
#define XREG_GPR19 x19
#define XREG_GPR20 x20
#define XREG_GPR21 x21
#define XREG_GPR22 x22
#define XREG_GPR23 x23
#define XREG_GPR24 x24
#define XREG_GPR25 x25
#define XREG_GPR26 x26
#define XREG_GPR27 x27
#define XREG_GPR28 x28
#define XREG_GPR29 x29
#define XREG_GPR30 x30
#define XREG_CPSR cpsr
/* Current Processor Status Register (CPSR) Bits */
#define XREG_CPSR_MODE_BITS 0x1FU
#define XREG_CPSR_EL3h_MODE 0xDU
#define XREG_CPSR_EL3t_MODE 0xCU
#define XREG_CPSR_EL2h_MODE 0x9U
#define XREG_CPSR_EL2t_MODE 0x8U
#define XREG_CPSR_EL1h_MODE 0x5U
#define XREG_CPSR_EL1t_MODE 0x4U
#define XREG_CPSR_EL0t_MODE 0x0U
#define XREG_CPSR_IRQ_ENABLE 0x80U
#define XREG_CPSR_FIQ_ENABLE 0x40U
#define XREG_CPSR_N_BIT 0x80000000U
#define XREG_CPSR_Z_BIT 0x40000000U
#define XREG_CPSR_C_BIT 0x20000000U
#define XREG_CPSR_V_BIT 0x10000000U
/* FPSID bits */
#define XREG_FPSID_IMPLEMENTER_BIT (24U)
#define XREG_FPSID_IMPLEMENTER_MASK (0x000000FFU << FPSID_IMPLEMENTER_BIT)
#define XREG_FPSID_SOFTWARE (0X00000001U<<23U)
#define XREG_FPSID_ARCH_BIT (16U)
#define XREG_FPSID_ARCH_MASK (0x0000000FU << FPSID_ARCH_BIT)
#define XREG_FPSID_PART_BIT (8U)
#define XREG_FPSID_PART_MASK (0x000000FFU << FPSID_PART_BIT)
#define XREG_FPSID_VARIANT_BIT (4U)
#define XREG_FPSID_VARIANT_MASK (0x0000000FU << FPSID_VARIANT_BIT)
#define XREG_FPSID_REV_BIT (0U)
#define XREG_FPSID_REV_MASK (0x0000000FU << FPSID_REV_BIT)
/* FPSCR bits */
#define XREG_FPSCR_N_BIT (0X00000001U << 31U)
#define XREG_FPSCR_Z_BIT (0X00000001U << 30U)
#define XREG_FPSCR_C_BIT (0X00000001U << 29U)
#define XREG_FPSCR_V_BIT (0X00000001U << 28U)
#define XREG_FPSCR_QC (0X00000001U << 27U)
#define XREG_FPSCR_AHP (0X00000001U << 26U)
#define XREG_FPSCR_DEFAULT_NAN (0X00000001U << 25U)
#define XREG_FPSCR_FLUSHTOZERO (0X00000001U << 24U)
#define XREG_FPSCR_ROUND_NEAREST (0X00000000U << 22U)
#define XREG_FPSCR_ROUND_PLUSINF (0X00000001U << 22U)
#define XREG_FPSCR_ROUND_MINUSINF (0X00000002U << 22U)
#define XREG_FPSCR_ROUND_TOZERO (0X00000003U << 22U)
#define XREG_FPSCR_RMODE_BIT (22U)
#define XREG_FPSCR_RMODE_MASK (0X00000003U << FPSCR_RMODE_BIT)
#define XREG_FPSCR_STRIDE_BIT (20U)
#define XREG_FPSCR_STRIDE_MASK (0X00000003U << FPSCR_STRIDE_BIT)
#define XREG_FPSCR_LENGTH_BIT (16U)
#define XREG_FPSCR_LENGTH_MASK (0X00000007U << FPSCR_LENGTH_BIT)
#define XREG_FPSCR_IDC (0X00000001U << 7U)
#define XREG_FPSCR_IXC (0X00000001U << 4U)
#define XREG_FPSCR_UFC (0X00000001U << 3U)
#define XREG_FPSCR_OFC (0X00000001U << 2U)
#define XREG_FPSCR_DZC (0X00000001U << 1U)
#define XREG_FPSCR_IOC (0X00000001U << 0U)
/* MVFR0 bits */
#define XREG_MVFR0_RMODE_BIT (28U)
#define XREG_MVFR0_RMODE_MASK (0x0000000FU << XREG_MVFR0_RMODE_BIT)
#define XREG_MVFR0_SHORT_VEC_BIT (24U)
#define XREG_MVFR0_SHORT_VEC_MASK (0x0000000FU << XREG_MVFR0_SHORT_VEC_BIT)
#define XREG_MVFR0_SQRT_BIT (20U)
#define XREG_MVFR0_SQRT_MASK (0x0000000FU << XREG_MVFR0_SQRT_BIT)
#define XREG_MVFR0_DIVIDE_BIT (16U)
#define XREG_MVFR0_DIVIDE_MASK (0x0000000FU << XREG_MVFR0_DIVIDE_BIT)
#define XREG_MVFR0_EXEC_TRAP_BIT (0X00000012U)
#define XREG_MVFR0_EXEC_TRAP_MASK (0X0000000FU << XREG_MVFR0_EXEC_TRAP_BIT)
#define XREG_MVFR0_DP_BIT (8U)
#define XREG_MVFR0_DP_MASK (0x0000000FU << XREG_MVFR0_DP_BIT)
#define XREG_MVFR0_SP_BIT (4U)
#define XREG_MVFR0_SP_MASK (0x0000000FU << XREG_MVFR0_SP_BIT)
#define XREG_MVFR0_A_SIMD_BIT (0U)
#define XREG_MVFR0_A_SIMD_MASK (0x0000000FU << MVFR0_A_SIMD_BIT)
/* FPEXC bits */
#define XREG_FPEXC_EX (0X00000001U << 31U)
#define XREG_FPEXC_EN (0X00000001U << 30U)
#define XREG_FPEXC_DEX (0X00000001U << 29U)
#define XREG_CONTROL_DCACHE_BIT (0X00000001U<<2U)
#define XREG_CONTROL_ICACHE_BIT (0X00000001U<<12U)
/**
*@endcond
*/
#ifdef __cplusplus
}
#endif /* __cplusplus */
#endif /* XREG_CORTEXA53_H */

View File

@@ -1,105 +0,0 @@
/******************************************************************************
* Copyright (c) 2010 - 2021 Xilinx, Inc. All rights reserved.
* SPDX-License-Identifier: MIT
******************************************************************************/
/*****************************************************************************/
/**
*
* @file xil_cache.h
*
* @addtogroup a9_cache_apis Cortex A9 Processor Cache Functions
*
* Cache functions provide access to cache related operations such as flush
* and invalidate for instruction and data caches. It gives option to perform
* the cache operations on a single cacheline, a range of memory and an entire
* cache.
*
* @{
*
* <pre>
* MODIFICATION HISTORY:
*
* Ver Who Date Changes
* ----- ---- -------- -----------------------------------------------
* 1.00a ecm 01/29/10 First release
* 3.04a sdm 01/02/12 Remove redundant dsb/dmb instructions in cache maintenance
* APIs.
* 6.8 aru 09/06/18 Removed compilation warnings for ARMCC toolchain.
* </pre>
*
******************************************************************************/
/**
*@cond nocomments
*/
#ifndef XIL_CACHE_H
#define XIL_CACHE_H
#include "xil_types.h"
#ifdef __cplusplus
extern "C" {
#endif
#ifdef __GNUC__
#define asm_cp15_inval_dc_line_mva_poc(param) __asm__ __volatile__("mcr " \
XREG_CP15_INVAL_DC_LINE_MVA_POC :: "r" (param));
#define asm_cp15_clean_inval_dc_line_mva_poc(param) __asm__ __volatile__("mcr " \
XREG_CP15_CLEAN_INVAL_DC_LINE_MVA_POC :: "r" (param));
#define asm_cp15_inval_ic_line_mva_pou(param) __asm__ __volatile__("mcr " \
XREG_CP15_INVAL_IC_LINE_MVA_POU :: "r" (param));
#define asm_cp15_inval_dc_line_sw(param) __asm__ __volatile__("mcr " \
XREG_CP15_INVAL_DC_LINE_SW :: "r" (param));
#define asm_cp15_clean_inval_dc_line_sw(param) __asm__ __volatile__("mcr " \
XREG_CP15_CLEAN_INVAL_DC_LINE_SW :: "r" (param));
#elif defined (__ICCARM__)
#define asm_cp15_inval_dc_line_mva_poc(param) __asm volatile ("mcr " \
XREG_CP15_INVAL_DC_LINE_MVA_POC :: "r" (param));
#define asm_cp15_clean_inval_dc_line_mva_poc(param) __asm volatile ("mcr " \
XREG_CP15_CLEAN_INVAL_DC_LINE_MVA_POC :: "r" (param));
#define asm_cp15_inval_ic_line_mva_pou(param) __asm volatile ("mcr " \
XREG_CP15_INVAL_IC_LINE_MVA_POU :: "r" (param));
#define asm_cp15_inval_dc_line_sw(param) __asm volatile ("mcr " \
XREG_CP15_INVAL_DC_LINE_SW :: "r" (param));
#define asm_cp15_clean_inval_dc_line_sw(param) __asm volatile ("mcr " \
XREG_CP15_CLEAN_INVAL_DC_LINE_SW :: "r" (param));
#endif
/**
*@endcond
*/
void Xil_DCacheEnable(void);
void Xil_DCacheDisable(void);
void Xil_DCacheInvalidate(void);
void Xil_DCacheInvalidateRange(INTPTR adr, u32 len);
void Xil_DCacheFlush(void);
void Xil_DCacheFlushRange(INTPTR adr, u32 len);
void Xil_ICacheEnable(void);
void Xil_ICacheDisable(void);
void Xil_ICacheInvalidate(void);
void Xil_ICacheInvalidateRange(INTPTR adr, u32 len);
#ifdef __cplusplus
}
#endif
#endif
/**
* @} End of "addtogroup a9_cache_apis".
*/

View File

@@ -1,408 +0,0 @@
/******************************************************************************
* Copyright (c) 2015 - 2022 Xilinx, Inc. All rights reserved.
* SPDX-License-Identifier: MIT
******************************************************************************/
/*****************************************************************************/
/**
*
* @file xil_exception.h
*
* This header file contains ARM Cortex A53,A9,R5 specific exception related APIs.
* For exception related functions that can be used across all Xilinx supported
* processors, please use xil_exception.h.
*
* @addtogroup arm_exception_apis ARM Processor Exception Handling
* @{
* ARM processors specific exception related APIs for cortex A53,A9 and R5 can
* utilized for enabling/disabling IRQ, registering/removing handler for
* exceptions or initializing exception vector table with null handler.
*
* <pre>
* MODIFICATION HISTORY:
*
* Ver Who Date Changes
* ----- -------- -------- -----------------------------------------------
* 5.2 pkp 28/05/15 First release
* 6.0 mus 27/07/16 Consolidated file for a53,a9 and r5 processors
* 6.7 mna 26/04/18 Add API Xil_GetExceptionRegisterHandler.
* 6.7 asa 18/05/18 Update signature of API Xil_GetExceptionRegisterHandler.
* 7.0 mus 01/03/19 Tweak Xil_ExceptionEnableMask and
* Xil_ExceptionDisableMask macros to support legacy
* examples for Cortexa72 EL3 exception level.
* 7.3 mus 04/15/20 Added Xil_EnableNestedInterrupts and
* Xil_DisableNestedInterrupts macros for ARMv8.
* For Cortexa72, these macro's would not be supported
* at EL3, as Cortexa72 is using GIC-500(GICv3), which
* triggeres only FIQ at EL3. Fix for CR#1062506
* 7.6 mus 09/17/21 Updated flag checking to fix warning reported with
* -Wundef compiler option CR#1110261
* 7.7 mus 01/31/22 Few of the #defines in xil_exception.h in are treated
* in different way based on "versal" flag. In existing
* flow, this flag is defined only in xparameters.h and
* BSP compiler flags, it is not defined in application
* compiler flags. So, including xil_exception.h in
* application source file, without including
* xparameters.h results in incorrect behavior.
* Including xparameters.h in xil_exception.h to avoid
* such issues. It fixes CR#1120498.
* 7.7 sk 03/02/22 Define XExc_VectorTableEntry structure to fix
* misra_c_2012_rule_5_6 violation.
* 7.7 sk 03/02/22 Add XExc_VectorTable as extern to fix misra_c_2012_
* rule_8_4 violation.
* </pre>
*
******************************************************************************/
/**
*@cond nocomments
*/
#ifndef XIL_EXCEPTION_H /* prevent circular inclusions */
#define XIL_EXCEPTION_H /* by using protection macros */
/***************************** Include Files ********************************/
#include "xil_types.h"
#include "xpseudo_asm.h"
#include "bspconfig.h"
#include "xparameters.h"
#ifdef __cplusplus
extern "C" {
#endif
/************************** Constant Definitions ****************************/
#define XIL_EXCEPTION_FIQ XREG_CPSR_FIQ_ENABLE
#define XIL_EXCEPTION_IRQ XREG_CPSR_IRQ_ENABLE
#define XIL_EXCEPTION_ALL (XREG_CPSR_FIQ_ENABLE | XREG_CPSR_IRQ_ENABLE)
#define XIL_EXCEPTION_ID_FIRST 0U
#if defined (__aarch64__)
#define XIL_EXCEPTION_ID_SYNC_INT 1U
#define XIL_EXCEPTION_ID_IRQ_INT 2U
#define XIL_EXCEPTION_ID_FIQ_INT 3U
#define XIL_EXCEPTION_ID_SERROR_ABORT_INT 4U
#define XIL_EXCEPTION_ID_LAST 5U
#else
#define XIL_EXCEPTION_ID_RESET 0U
#define XIL_EXCEPTION_ID_UNDEFINED_INT 1U
#define XIL_EXCEPTION_ID_SWI_INT 2U
#define XIL_EXCEPTION_ID_PREFETCH_ABORT_INT 3U
#define XIL_EXCEPTION_ID_DATA_ABORT_INT 4U
#define XIL_EXCEPTION_ID_IRQ_INT 5U
#define XIL_EXCEPTION_ID_FIQ_INT 6U
#define XIL_EXCEPTION_ID_LAST 6U
#endif
/*
* XIL_EXCEPTION_ID_INT is defined for all Xilinx processors.
*/
#if defined (versal) && !defined(ARMR5) && EL3
#define XIL_EXCEPTION_ID_INT XIL_EXCEPTION_ID_FIQ_INT
#else
#define XIL_EXCEPTION_ID_INT XIL_EXCEPTION_ID_IRQ_INT
#endif
/**************************** Type Definitions ******************************/
/**
* This typedef is the exception handler function.
*/
typedef void (*Xil_ExceptionHandler)(void *data);
typedef void (*Xil_InterruptHandler)(void *data);
typedef struct {
Xil_ExceptionHandler Handler;
void *Data;
} XExc_VectorTableEntry;
extern XExc_VectorTableEntry XExc_VectorTable[];
/**
*@endcond
*/
/***************** Macros (Inline Functions) Definitions ********************/
/****************************************************************************/
/**
* @brief Enable Exceptions.
*
* @param Mask: Value for enabling the exceptions.
*
* @return None.
*
* @note If bit is 0, exception is enabled.
* C-Style signature: void Xil_ExceptionEnableMask(Mask)
*
******************************************************************************/
#if defined (versal) && !defined(ARMR5) && EL3
/*
* Cortexa72 processor in versal is coupled with GIC-500, and GIC-500 supports
* only FIQ at EL3. Hence, tweaking this macro to always enable FIQ
* ignoring argument passed by user.
*/
#define Xil_ExceptionEnableMask(Mask) \
mtcpsr(mfcpsr() & ~ ((XIL_EXCEPTION_FIQ) & XIL_EXCEPTION_ALL))
#elif defined (__GNUC__) || defined (__ICCARM__)
#define Xil_ExceptionEnableMask(Mask) \
mtcpsr(mfcpsr() & ~ ((Mask) & XIL_EXCEPTION_ALL))
#else
#define Xil_ExceptionEnableMask(Mask) \
{ \
register u32 Reg __asm("cpsr"); \
mtcpsr((Reg) & (~((Mask) & XIL_EXCEPTION_ALL))); \
}
#endif
/****************************************************************************/
/**
* @brief Enable the IRQ exception.
*
* @return None.
*
* @note None.
*
******************************************************************************/
#if defined (versal) && !defined(ARMR5) && EL3
#define Xil_ExceptionEnable() \
Xil_ExceptionEnableMask(XIL_EXCEPTION_FIQ)
#else
#define Xil_ExceptionEnable() \
Xil_ExceptionEnableMask(XIL_EXCEPTION_IRQ)
#endif
/****************************************************************************/
/**
* @brief Disable Exceptions.
*
* @param Mask: Value for disabling the exceptions.
*
* @return None.
*
* @note If bit is 1, exception is disabled.
* C-Style signature: Xil_ExceptionDisableMask(Mask)
*
******************************************************************************/
#if defined (versal) && !defined(ARMR5) && EL3
/*
* Cortexa72 processor in versal is coupled with GIC-500, and GIC-500 supports
* only FIQ at EL3. Hence, tweaking this macro to always disable FIQ
* ignoring argument passed by user.
*/
#define Xil_ExceptionDisableMask(Mask) \
mtcpsr(mfcpsr() | ((XIL_EXCEPTION_FIQ) & XIL_EXCEPTION_ALL))
#elif defined (__GNUC__) || defined (__ICCARM__)
#define Xil_ExceptionDisableMask(Mask) \
mtcpsr(mfcpsr() | ((Mask) & XIL_EXCEPTION_ALL))
#else
#define Xil_ExceptionDisableMask(Mask) \
{ \
register u32 Reg __asm("cpsr"); \
mtcpsr((Reg) | ((Mask) & XIL_EXCEPTION_ALL)); \
}
#endif
/****************************************************************************/
/**
* Disable the IRQ exception.
*
* @return None.
*
* @note None.
*
******************************************************************************/
#define Xil_ExceptionDisable() \
Xil_ExceptionDisableMask(XIL_EXCEPTION_IRQ)
#if ( defined (PLATFORM_ZYNQMP) && defined (EL3) && (EL3==1) )
/****************************************************************************/
/**
* @brief Enable nested interrupts by clearing the I bit in DAIF.This
* macro is defined for Cortex-A53 64 bit mode BSP configured to run
* at EL3.. However,it is not defined for Versal Cortex-A72 BSP
* configured to run at EL3. Reason is, Cortex-A72 is coupled
* with GIC-500(GICv3 specifications) and it triggers only FIQ at EL3.
*
* @return None.
*
* @note This macro is supposed to be used from interrupt handlers. In the
* interrupt handler the interrupts are disabled by default (I bit
* is set as 1). To allow nesting of interrupts, this macro should be
* used. It clears the I bit. Once that bit is cleared and provided the
* preemption of interrupt conditions are met in the GIC, nesting of
* interrupts will start happening.
* Caution: This macro must be used with caution. Before calling this
* macro, the user must ensure that the source of the current IRQ
* is appropriately cleared. Otherwise, as soon as we clear the I
* bit, there can be an infinite loop of interrupts with an
* eventual crash (all the stack space getting consumed).
******************************************************************************/
#define Xil_EnableNestedInterrupts() \
__asm__ __volatile__ ("mrs X1, ELR_EL3"); \
__asm__ __volatile__ ("mrs X2, SPSR_EL3"); \
__asm__ __volatile__ ("stp X1,X2, [sp,#-0x10]!"); \
__asm__ __volatile__ ("mrs X1, DAIF"); \
__asm__ __volatile__ ("bic X1,X1,#(0x1<<7)"); \
__asm__ __volatile__ ("msr DAIF, X1"); \
/****************************************************************************/
/**
* @brief Disable the nested interrupts by setting the I bit in DAIF. This
* macro is defined for Cortex-A53 64 bit mode BSP configured to run
* at EL3.
*
* @return None.
*
* @note This macro is meant to be called in the interrupt service routines.
* This macro cannot be used independently. It can only be used when
* nesting of interrupts have been enabled by using the macro
* Xil_EnableNestedInterrupts(). In a typical flow, the user first
* calls the Xil_EnableNestedInterrupts in the ISR at the appropriate
* point. The user then must call this macro before exiting the interrupt
* service routine. This macro puts the ARM back in IRQ mode and
* hence sets back the I bit.
******************************************************************************/
#define Xil_DisableNestedInterrupts() \
__asm__ __volatile__ ("ldp X1,X2, [sp,#0x10]!"); \
__asm__ __volatile__ ("msr ELR_EL3, X1"); \
__asm__ __volatile__ ("msr SPSR_EL3, X2"); \
__asm__ __volatile__ ("mrs X1, DAIF"); \
__asm__ __volatile__ ("orr X1, X1, #(0x1<<7)"); \
__asm__ __volatile__ ("msr DAIF, X1"); \
#elif (defined (EL1_NONSECURE) && (EL1_NONSECURE==1))
/****************************************************************************/
/**
* @brief Enable nested interrupts by clearing the I bit in DAIF.This
* macro is defined for Cortex-A53 64 bit mode and Cortex-A72 64 bit
* BSP configured to run at EL1 NON SECURE
*
* @return None.
*
* @note This macro is supposed to be used from interrupt handlers. In the
* interrupt handler the interrupts are disabled by default (I bit
* is set as 1). To allow nesting of interrupts, this macro should be
* used. It clears the I bit. Once that bit is cleared and provided the
* preemption of interrupt conditions are met in the GIC, nesting of
* interrupts will start happening.
* Caution: This macro must be used with caution. Before calling this
* macro, the user must ensure that the source of the current IRQ
* is appropriately cleared. Otherwise, as soon as we clear the I
* bit, there can be an infinite loop of interrupts with an
* eventual crash (all the stack space getting consumed).
******************************************************************************/
#define Xil_EnableNestedInterrupts() \
__asm__ __volatile__ ("mrs X1, ELR_EL1"); \
__asm__ __volatile__ ("mrs X2, SPSR_EL1"); \
__asm__ __volatile__ ("stp X1,X2, [sp,#-0x10]!"); \
__asm__ __volatile__ ("mrs X1, DAIF"); \
__asm__ __volatile__ ("bic X1,X1,#(0x1<<7)"); \
__asm__ __volatile__ ("msr DAIF, X1"); \
/****************************************************************************/
/**
* @brief Disable the nested interrupts by setting the I bit in DAIF. This
* macro is defined for Cortex-A53 64 bit mode and Cortex-A72 64 bit
* BSP configured to run at EL1 NON SECURE
*
* @return None.
*
* @note This macro is meant to be called in the interrupt service routines.
* This macro cannot be used independently. It can only be used when
* nesting of interrupts have been enabled by using the macro
* Xil_EnableNestedInterrupts(). In a typical flow, the user first
* calls the Xil_EnableNestedInterrupts in the ISR at the appropriate
* point. The user then must call this macro before exiting the interrupt
* service routine. This macro puts the ARM back in IRQ mode and
* hence sets back the I bit.
******************************************************************************/
#define Xil_DisableNestedInterrupts() \
__asm__ __volatile__ ("ldp X1,X2, [sp,#0x10]!"); \
__asm__ __volatile__ ("msr ELR_EL1, X1"); \
__asm__ __volatile__ ("msr SPSR_EL1, X2"); \
__asm__ __volatile__ ("mrs X1, DAIF"); \
__asm__ __volatile__ ("orr X1, X1, #(0x1<<7)"); \
__asm__ __volatile__ ("msr DAIF, X1"); \
#elif (!defined (__aarch64__) && !defined (ARMA53_32))
/****************************************************************************/
/**
* @brief Enable nested interrupts by clearing the I and F bits in CPSR. This
* API is defined for cortex-a9 and cortex-r5.
*
* @return None.
*
* @note This macro is supposed to be used from interrupt handlers. In the
* interrupt handler the interrupts are disabled by default (I and F
* are 1). To allow nesting of interrupts, this macro should be
* used. It clears the I and F bits by changing the ARM mode to
* system mode. Once these bits are cleared and provided the
* preemption of interrupt conditions are met in the GIC, nesting of
* interrupts will start happening.
* Caution: This macro must be used with caution. Before calling this
* macro, the user must ensure that the source of the current IRQ
* is appropriately cleared. Otherwise, as soon as we clear the I and
* F bits, there can be an infinite loop of interrupts with an
* eventual crash (all the stack space getting consumed).
******************************************************************************/
#define Xil_EnableNestedInterrupts() \
__asm__ __volatile__ ("stmfd sp!, {lr}"); \
__asm__ __volatile__ ("mrs lr, spsr"); \
__asm__ __volatile__ ("stmfd sp!, {lr}"); \
__asm__ __volatile__ ("msr cpsr_c, #0x1F"); \
__asm__ __volatile__ ("stmfd sp!, {lr}");
/****************************************************************************/
/**
* @brief Disable the nested interrupts by setting the I and F bits. This API
* is defined for cortex-a9 and cortex-r5.
*
* @return None.
*
* @note This macro is meant to be called in the interrupt service routines.
* This macro cannot be used independently. It can only be used when
* nesting of interrupts have been enabled by using the macro
* Xil_EnableNestedInterrupts(). In a typical flow, the user first
* calls the Xil_EnableNestedInterrupts in the ISR at the appropriate
* point. The user then must call this macro before exiting the interrupt
* service routine. This macro puts the ARM back in IRQ/FIQ mode and
* hence sets back the I and F bits.
******************************************************************************/
#define Xil_DisableNestedInterrupts() \
__asm__ __volatile__ ("ldmfd sp!, {lr}"); \
__asm__ __volatile__ ("msr cpsr_c, #0x92"); \
__asm__ __volatile__ ("ldmfd sp!, {lr}"); \
__asm__ __volatile__ ("msr spsr_cxsf, lr"); \
__asm__ __volatile__ ("ldmfd sp!, {lr}"); \
#endif
/************************** Variable Definitions ****************************/
/************************** Function Prototypes *****************************/
extern void Xil_ExceptionRegisterHandler(u32 Exception_id,
Xil_ExceptionHandler Handler,
void *Data);
extern void Xil_ExceptionRemoveHandler(u32 Exception_id);
extern void Xil_GetExceptionRegisterHandler(u32 Exception_id,
Xil_ExceptionHandler *Handler, void **Data);
extern void Xil_ExceptionInit(void);
#if defined (__aarch64__)
void Xil_SyncAbortHandler(void *CallBackRef);
void Xil_SErrorAbortHandler(void *CallBackRef);
#else
extern void Xil_DataAbortHandler(void *CallBackRef);
extern void Xil_PrefetchAbortHandler(void *CallBackRef);
extern void Xil_UndefinedExceptionHandler(void *CallBackRef);
#endif
#ifdef __cplusplus
}
#endif /* __cplusplus */
#endif /* XIL_EXCEPTION_H */
/**
* @} End of "addtogroup arm_exception_apis".
*/

View File

@@ -1,60 +0,0 @@
/******************************************************************************
* Copyright (c) 2009 - 2020 Xilinx, Inc. All rights reserved.
* SPDX-License-Identifier: MIT
******************************************************************************/
/*****************************************************************************/
/**
*
* @file xpseudo_asm.h
*
* @addtogroup a9_specific Cortex A9 Processor Specific Include Files
*
* The xpseudo_asm.h includes xreg_cortexa9.h and xpseudo_asm_gcc.h.
*
* The xreg_cortexa9.h file contains definitions for inline assembler code.
* It provides inline definitions for Cortex A9 GPRs, SPRs, MPE registers,
* co-processor registers and Debug registers.
*
* The xpseudo_asm_gcc.h contains the definitions for the most often used inline
* assembler instructions, available as macros. These can be very useful for
* tasks such as setting or getting special purpose registers, synchronization,
* or cache manipulation etc. These inline assembler instructions can be used
* from drivers and user applications written in C.
*
* @{
* <pre>
* MODIFICATION HISTORY:
*
* Ver Who Date Changes
* ----- ---- -------- -----------------------------------------------
* 1.00a ecm 10/18/09 First release
* 3.04a sdm 01/02/12 Remove redundant dsb in mcr instruction.
* 6.8 aru 09/06/18 Removed compilation warnings for ARMCC toolchain.
* </pre>
*
******************************************************************************/
#ifndef XPSEUDO_ASM_H
#define XPSEUDO_ASM_H
#ifdef __cplusplus
extern "C" {
#endif
#include "xreg_cortexa9.h"
#ifdef __GNUC__
#include "xpseudo_asm_gcc.h"
#elif defined (__ICCARM__)
#include "xpseudo_asm_iccarm.h"
#else
#include "xpseudo_asm_rvct.h"
#endif
#ifdef __cplusplus
}
#endif
#endif /* XPSEUDO_ASM_H */
/**
* @} End of "addtogroup a9_specific".
*/

File diff suppressed because it is too large Load Diff

View File

@@ -1,95 +0,0 @@
/******************************************************************************
* Copyright (c) 2014 - 2021 Xilinx, Inc. All rights reserved.
* SPDX-License-Identifier: MIT
******************************************************************************/
/*****************************************************************************/
/**
*
* @file xil_cache.h
*
* @addtogroup r5_cache_apis Cortex R5 Processor Cache Functions
*
* Cache functions provide access to cache related operations such as flush
* and invalidate for instruction and data caches. It gives option to perform
* the cache operations on a single cacheline, a range of memory and an entire
* cache.
*
* @{
* <pre>
* MODIFICATION HISTORY:
*
* Ver Who Date Changes
* ----- ---- -------- -----------------------------------------------
* 5.00 pkp 02/20/14 First release
* 6.2 mus 01/27/17 Updated to support IAR compiler
* </pre>
*
******************************************************************************/
#ifndef XIL_CACHE_H
#define XIL_CACHE_H
#include "xil_types.h"
#ifdef __cplusplus
extern "C" {
#endif
/**
*@cond nocomments
*/
#if defined (__GNUC__)
#define asm_inval_dc_line_mva_poc(param) __asm__ __volatile__("mcr " \
XREG_CP15_INVAL_DC_LINE_MVA_POC :: "r" (param))
#define asm_clean_inval_dc_line_sw(param) __asm__ __volatile__("mcr " \
XREG_CP15_CLEAN_INVAL_DC_LINE_SW :: "r" (param))
#define asm_clean_inval_dc_line_mva_poc(param) __asm__ __volatile__("mcr " \
XREG_CP15_CLEAN_INVAL_DC_LINE_MVA_POC :: "r" (param))
#define asm_inval_ic_line_mva_pou(param) __asm__ __volatile__("mcr " \
XREG_CP15_INVAL_IC_LINE_MVA_POU :: "r" (param))
#elif defined (__ICCARM__)
#define asm_inval_dc_line_mva_poc(param) __asm volatile("mcr " \
XREG_CP15_INVAL_DC_LINE_MVA_POC :: "r" (param))
#define asm_clean_inval_dc_line_sw(param) __asm volatile("mcr " \
XREG_CP15_CLEAN_INVAL_DC_LINE_SW :: "r" (param))
#define asm_clean_inval_dc_line_mva_poc(param) __asm volatile("mcr " \
XREG_CP15_CLEAN_INVAL_DC_LINE_MVA_POC :: "r" (param))
#define asm_inval_ic_line_mva_pou(param) __asm volatile("mcr " \
XREG_CP15_INVAL_IC_LINE_MVA_POU :: "r" (param))
#endif
/**
*@endcond
*/
void Xil_DCacheEnable(void);
void Xil_DCacheDisable(void);
void Xil_DCacheInvalidate(void);
void Xil_DCacheInvalidateRange(INTPTR adr, u32 len);
void Xil_DCacheFlush(void);
void Xil_DCacheFlushRange(INTPTR adr, u32 len);
void Xil_DCacheInvalidateLine(INTPTR adr);
void Xil_DCacheFlushLine(INTPTR adr);
void Xil_DCacheStoreLine(INTPTR adr);
void Xil_ICacheEnable(void);
void Xil_ICacheDisable(void);
void Xil_ICacheInvalidate(void);
void Xil_ICacheInvalidateRange(INTPTR adr, u32 len);
void Xil_ICacheInvalidateLine(INTPTR adr);
#ifdef __cplusplus
}
#endif
#endif
/**
* @} End of "addtogroup r5_cache_apis".
*/

View File

@@ -1,408 +0,0 @@
/******************************************************************************
* Copyright (c) 2015 - 2022 Xilinx, Inc. All rights reserved.
* SPDX-License-Identifier: MIT
******************************************************************************/
/*****************************************************************************/
/**
*
* @file xil_exception.h
*
* This header file contains ARM Cortex A53,A9,R5 specific exception related APIs.
* For exception related functions that can be used across all Xilinx supported
* processors, please use xil_exception.h.
*
* @addtogroup arm_exception_apis ARM Processor Exception Handling
* @{
* ARM processors specific exception related APIs for cortex A53,A9 and R5 can
* utilized for enabling/disabling IRQ, registering/removing handler for
* exceptions or initializing exception vector table with null handler.
*
* <pre>
* MODIFICATION HISTORY:
*
* Ver Who Date Changes
* ----- -------- -------- -----------------------------------------------
* 5.2 pkp 28/05/15 First release
* 6.0 mus 27/07/16 Consolidated file for a53,a9 and r5 processors
* 6.7 mna 26/04/18 Add API Xil_GetExceptionRegisterHandler.
* 6.7 asa 18/05/18 Update signature of API Xil_GetExceptionRegisterHandler.
* 7.0 mus 01/03/19 Tweak Xil_ExceptionEnableMask and
* Xil_ExceptionDisableMask macros to support legacy
* examples for Cortexa72 EL3 exception level.
* 7.3 mus 04/15/20 Added Xil_EnableNestedInterrupts and
* Xil_DisableNestedInterrupts macros for ARMv8.
* For Cortexa72, these macro's would not be supported
* at EL3, as Cortexa72 is using GIC-500(GICv3), which
* triggeres only FIQ at EL3. Fix for CR#1062506
* 7.6 mus 09/17/21 Updated flag checking to fix warning reported with
* -Wundef compiler option CR#1110261
* 7.7 mus 01/31/22 Few of the #defines in xil_exception.h in are treated
* in different way based on "versal" flag. In existing
* flow, this flag is defined only in xparameters.h and
* BSP compiler flags, it is not defined in application
* compiler flags. So, including xil_exception.h in
* application source file, without including
* xparameters.h results in incorrect behavior.
* Including xparameters.h in xil_exception.h to avoid
* such issues. It fixes CR#1120498.
* 7.7 sk 03/02/22 Define XExc_VectorTableEntry structure to fix
* misra_c_2012_rule_5_6 violation.
* 7.7 sk 03/02/22 Add XExc_VectorTable as extern to fix misra_c_2012_
* rule_8_4 violation.
* </pre>
*
******************************************************************************/
/**
*@cond nocomments
*/
#ifndef XIL_EXCEPTION_H /* prevent circular inclusions */
#define XIL_EXCEPTION_H /* by using protection macros */
/***************************** Include Files ********************************/
#include "xil_types.h"
#include "xpseudo_asm.h"
#include "bspconfig.h"
#include "xparameters.h"
#ifdef __cplusplus
extern "C" {
#endif
/************************** Constant Definitions ****************************/
#define XIL_EXCEPTION_FIQ XREG_CPSR_FIQ_ENABLE
#define XIL_EXCEPTION_IRQ XREG_CPSR_IRQ_ENABLE
#define XIL_EXCEPTION_ALL (XREG_CPSR_FIQ_ENABLE | XREG_CPSR_IRQ_ENABLE)
#define XIL_EXCEPTION_ID_FIRST 0U
#if defined (__aarch64__)
#define XIL_EXCEPTION_ID_SYNC_INT 1U
#define XIL_EXCEPTION_ID_IRQ_INT 2U
#define XIL_EXCEPTION_ID_FIQ_INT 3U
#define XIL_EXCEPTION_ID_SERROR_ABORT_INT 4U
#define XIL_EXCEPTION_ID_LAST 5U
#else
#define XIL_EXCEPTION_ID_RESET 0U
#define XIL_EXCEPTION_ID_UNDEFINED_INT 1U
#define XIL_EXCEPTION_ID_SWI_INT 2U
#define XIL_EXCEPTION_ID_PREFETCH_ABORT_INT 3U
#define XIL_EXCEPTION_ID_DATA_ABORT_INT 4U
#define XIL_EXCEPTION_ID_IRQ_INT 5U
#define XIL_EXCEPTION_ID_FIQ_INT 6U
#define XIL_EXCEPTION_ID_LAST 6U
#endif
/*
* XIL_EXCEPTION_ID_INT is defined for all Xilinx processors.
*/
#if defined (versal) && !defined(ARMR5) && EL3
#define XIL_EXCEPTION_ID_INT XIL_EXCEPTION_ID_FIQ_INT
#else
#define XIL_EXCEPTION_ID_INT XIL_EXCEPTION_ID_IRQ_INT
#endif
/**************************** Type Definitions ******************************/
/**
* This typedef is the exception handler function.
*/
typedef void (*Xil_ExceptionHandler)(void *data);
typedef void (*Xil_InterruptHandler)(void *data);
typedef struct {
Xil_ExceptionHandler Handler;
void *Data;
} XExc_VectorTableEntry;
extern XExc_VectorTableEntry XExc_VectorTable[];
/**
*@endcond
*/
/***************** Macros (Inline Functions) Definitions ********************/
/****************************************************************************/
/**
* @brief Enable Exceptions.
*
* @param Mask: Value for enabling the exceptions.
*
* @return None.
*
* @note If bit is 0, exception is enabled.
* C-Style signature: void Xil_ExceptionEnableMask(Mask)
*
******************************************************************************/
#if defined (versal) && !defined(ARMR5) && EL3
/*
* Cortexa72 processor in versal is coupled with GIC-500, and GIC-500 supports
* only FIQ at EL3. Hence, tweaking this macro to always enable FIQ
* ignoring argument passed by user.
*/
#define Xil_ExceptionEnableMask(Mask) \
mtcpsr(mfcpsr() & ~ ((XIL_EXCEPTION_FIQ) & XIL_EXCEPTION_ALL))
#elif defined (__GNUC__) || defined (__ICCARM__)
#define Xil_ExceptionEnableMask(Mask) \
mtcpsr(mfcpsr() & ~ ((Mask) & XIL_EXCEPTION_ALL))
#else
#define Xil_ExceptionEnableMask(Mask) \
{ \
register u32 Reg __asm("cpsr"); \
mtcpsr((Reg) & (~((Mask) & XIL_EXCEPTION_ALL))); \
}
#endif
/****************************************************************************/
/**
* @brief Enable the IRQ exception.
*
* @return None.
*
* @note None.
*
******************************************************************************/
#if defined (versal) && !defined(ARMR5) && EL3
#define Xil_ExceptionEnable() \
Xil_ExceptionEnableMask(XIL_EXCEPTION_FIQ)
#else
#define Xil_ExceptionEnable() \
Xil_ExceptionEnableMask(XIL_EXCEPTION_IRQ)
#endif
/****************************************************************************/
/**
* @brief Disable Exceptions.
*
* @param Mask: Value for disabling the exceptions.
*
* @return None.
*
* @note If bit is 1, exception is disabled.
* C-Style signature: Xil_ExceptionDisableMask(Mask)
*
******************************************************************************/
#if defined (versal) && !defined(ARMR5) && EL3
/*
* Cortexa72 processor in versal is coupled with GIC-500, and GIC-500 supports
* only FIQ at EL3. Hence, tweaking this macro to always disable FIQ
* ignoring argument passed by user.
*/
#define Xil_ExceptionDisableMask(Mask) \
mtcpsr(mfcpsr() | ((XIL_EXCEPTION_FIQ) & XIL_EXCEPTION_ALL))
#elif defined (__GNUC__) || defined (__ICCARM__)
#define Xil_ExceptionDisableMask(Mask) \
mtcpsr(mfcpsr() | ((Mask) & XIL_EXCEPTION_ALL))
#else
#define Xil_ExceptionDisableMask(Mask) \
{ \
register u32 Reg __asm("cpsr"); \
mtcpsr((Reg) | ((Mask) & XIL_EXCEPTION_ALL)); \
}
#endif
/****************************************************************************/
/**
* Disable the IRQ exception.
*
* @return None.
*
* @note None.
*
******************************************************************************/
#define Xil_ExceptionDisable() \
Xil_ExceptionDisableMask(XIL_EXCEPTION_IRQ)
#if ( defined (PLATFORM_ZYNQMP) && defined (EL3) && (EL3==1) )
/****************************************************************************/
/**
* @brief Enable nested interrupts by clearing the I bit in DAIF.This
* macro is defined for Cortex-A53 64 bit mode BSP configured to run
* at EL3.. However,it is not defined for Versal Cortex-A72 BSP
* configured to run at EL3. Reason is, Cortex-A72 is coupled
* with GIC-500(GICv3 specifications) and it triggers only FIQ at EL3.
*
* @return None.
*
* @note This macro is supposed to be used from interrupt handlers. In the
* interrupt handler the interrupts are disabled by default (I bit
* is set as 1). To allow nesting of interrupts, this macro should be
* used. It clears the I bit. Once that bit is cleared and provided the
* preemption of interrupt conditions are met in the GIC, nesting of
* interrupts will start happening.
* Caution: This macro must be used with caution. Before calling this
* macro, the user must ensure that the source of the current IRQ
* is appropriately cleared. Otherwise, as soon as we clear the I
* bit, there can be an infinite loop of interrupts with an
* eventual crash (all the stack space getting consumed).
******************************************************************************/
#define Xil_EnableNestedInterrupts() \
__asm__ __volatile__ ("mrs X1, ELR_EL3"); \
__asm__ __volatile__ ("mrs X2, SPSR_EL3"); \
__asm__ __volatile__ ("stp X1,X2, [sp,#-0x10]!"); \
__asm__ __volatile__ ("mrs X1, DAIF"); \
__asm__ __volatile__ ("bic X1,X1,#(0x1<<7)"); \
__asm__ __volatile__ ("msr DAIF, X1"); \
/****************************************************************************/
/**
* @brief Disable the nested interrupts by setting the I bit in DAIF. This
* macro is defined for Cortex-A53 64 bit mode BSP configured to run
* at EL3.
*
* @return None.
*
* @note This macro is meant to be called in the interrupt service routines.
* This macro cannot be used independently. It can only be used when
* nesting of interrupts have been enabled by using the macro
* Xil_EnableNestedInterrupts(). In a typical flow, the user first
* calls the Xil_EnableNestedInterrupts in the ISR at the appropriate
* point. The user then must call this macro before exiting the interrupt
* service routine. This macro puts the ARM back in IRQ mode and
* hence sets back the I bit.
******************************************************************************/
#define Xil_DisableNestedInterrupts() \
__asm__ __volatile__ ("ldp X1,X2, [sp,#0x10]!"); \
__asm__ __volatile__ ("msr ELR_EL3, X1"); \
__asm__ __volatile__ ("msr SPSR_EL3, X2"); \
__asm__ __volatile__ ("mrs X1, DAIF"); \
__asm__ __volatile__ ("orr X1, X1, #(0x1<<7)"); \
__asm__ __volatile__ ("msr DAIF, X1"); \
#elif (defined (EL1_NONSECURE) && (EL1_NONSECURE==1))
/****************************************************************************/
/**
* @brief Enable nested interrupts by clearing the I bit in DAIF.This
* macro is defined for Cortex-A53 64 bit mode and Cortex-A72 64 bit
* BSP configured to run at EL1 NON SECURE
*
* @return None.
*
* @note This macro is supposed to be used from interrupt handlers. In the
* interrupt handler the interrupts are disabled by default (I bit
* is set as 1). To allow nesting of interrupts, this macro should be
* used. It clears the I bit. Once that bit is cleared and provided the
* preemption of interrupt conditions are met in the GIC, nesting of
* interrupts will start happening.
* Caution: This macro must be used with caution. Before calling this
* macro, the user must ensure that the source of the current IRQ
* is appropriately cleared. Otherwise, as soon as we clear the I
* bit, there can be an infinite loop of interrupts with an
* eventual crash (all the stack space getting consumed).
******************************************************************************/
#define Xil_EnableNestedInterrupts() \
__asm__ __volatile__ ("mrs X1, ELR_EL1"); \
__asm__ __volatile__ ("mrs X2, SPSR_EL1"); \
__asm__ __volatile__ ("stp X1,X2, [sp,#-0x10]!"); \
__asm__ __volatile__ ("mrs X1, DAIF"); \
__asm__ __volatile__ ("bic X1,X1,#(0x1<<7)"); \
__asm__ __volatile__ ("msr DAIF, X1"); \
/****************************************************************************/
/**
* @brief Disable the nested interrupts by setting the I bit in DAIF. This
* macro is defined for Cortex-A53 64 bit mode and Cortex-A72 64 bit
* BSP configured to run at EL1 NON SECURE
*
* @return None.
*
* @note This macro is meant to be called in the interrupt service routines.
* This macro cannot be used independently. It can only be used when
* nesting of interrupts have been enabled by using the macro
* Xil_EnableNestedInterrupts(). In a typical flow, the user first
* calls the Xil_EnableNestedInterrupts in the ISR at the appropriate
* point. The user then must call this macro before exiting the interrupt
* service routine. This macro puts the ARM back in IRQ mode and
* hence sets back the I bit.
******************************************************************************/
#define Xil_DisableNestedInterrupts() \
__asm__ __volatile__ ("ldp X1,X2, [sp,#0x10]!"); \
__asm__ __volatile__ ("msr ELR_EL1, X1"); \
__asm__ __volatile__ ("msr SPSR_EL1, X2"); \
__asm__ __volatile__ ("mrs X1, DAIF"); \
__asm__ __volatile__ ("orr X1, X1, #(0x1<<7)"); \
__asm__ __volatile__ ("msr DAIF, X1"); \
#elif (!defined (__aarch64__) && !defined (ARMA53_32))
/****************************************************************************/
/**
* @brief Enable nested interrupts by clearing the I and F bits in CPSR. This
* API is defined for cortex-a9 and cortex-r5.
*
* @return None.
*
* @note This macro is supposed to be used from interrupt handlers. In the
* interrupt handler the interrupts are disabled by default (I and F
* are 1). To allow nesting of interrupts, this macro should be
* used. It clears the I and F bits by changing the ARM mode to
* system mode. Once these bits are cleared and provided the
* preemption of interrupt conditions are met in the GIC, nesting of
* interrupts will start happening.
* Caution: This macro must be used with caution. Before calling this
* macro, the user must ensure that the source of the current IRQ
* is appropriately cleared. Otherwise, as soon as we clear the I and
* F bits, there can be an infinite loop of interrupts with an
* eventual crash (all the stack space getting consumed).
******************************************************************************/
#define Xil_EnableNestedInterrupts() \
__asm__ __volatile__ ("stmfd sp!, {lr}"); \
__asm__ __volatile__ ("mrs lr, spsr"); \
__asm__ __volatile__ ("stmfd sp!, {lr}"); \
__asm__ __volatile__ ("msr cpsr_c, #0x1F"); \
__asm__ __volatile__ ("stmfd sp!, {lr}");
/****************************************************************************/
/**
* @brief Disable the nested interrupts by setting the I and F bits. This API
* is defined for cortex-a9 and cortex-r5.
*
* @return None.
*
* @note This macro is meant to be called in the interrupt service routines.
* This macro cannot be used independently. It can only be used when
* nesting of interrupts have been enabled by using the macro
* Xil_EnableNestedInterrupts(). In a typical flow, the user first
* calls the Xil_EnableNestedInterrupts in the ISR at the appropriate
* point. The user then must call this macro before exiting the interrupt
* service routine. This macro puts the ARM back in IRQ/FIQ mode and
* hence sets back the I and F bits.
******************************************************************************/
#define Xil_DisableNestedInterrupts() \
__asm__ __volatile__ ("ldmfd sp!, {lr}"); \
__asm__ __volatile__ ("msr cpsr_c, #0x92"); \
__asm__ __volatile__ ("ldmfd sp!, {lr}"); \
__asm__ __volatile__ ("msr spsr_cxsf, lr"); \
__asm__ __volatile__ ("ldmfd sp!, {lr}"); \
#endif
/************************** Variable Definitions ****************************/
/************************** Function Prototypes *****************************/
extern void Xil_ExceptionRegisterHandler(u32 Exception_id,
Xil_ExceptionHandler Handler,
void *Data);
extern void Xil_ExceptionRemoveHandler(u32 Exception_id);
extern void Xil_GetExceptionRegisterHandler(u32 Exception_id,
Xil_ExceptionHandler *Handler, void **Data);
extern void Xil_ExceptionInit(void);
#if defined (__aarch64__)
void Xil_SyncAbortHandler(void *CallBackRef);
void Xil_SErrorAbortHandler(void *CallBackRef);
#else
extern void Xil_DataAbortHandler(void *CallBackRef);
extern void Xil_PrefetchAbortHandler(void *CallBackRef);
extern void Xil_UndefinedExceptionHandler(void *CallBackRef);
#endif
#ifdef __cplusplus
}
#endif /* __cplusplus */
#endif /* XIL_EXCEPTION_H */
/**
* @} End of "addtogroup arm_exception_apis".
*/

View File

@@ -1,117 +0,0 @@
/******************************************************************************
* Copyright (c) 2014 - 2022 Xilinx, Inc. All rights reserved.
* SPDX-License-Identifier: MIT
******************************************************************************/
/*****************************************************************************/
/**
* @file xil_mmu.h
*
* @addtogroup r5_mpu_apis Cortex R5 Processor MPU specific APIs
*
* MPU functions provides access to MPU operations such as enable MPU, disable
* MPU and set attribute for section of memory.
* Boot code invokes Init_MPU function to configure the MPU. A total of 10 MPU
* regions are allocated with another 6 being free for users. Overview of the
* memory attributes for different MPU regions is as given below,
*
*| | Memory Range | Attributes of MPURegion |
*|-----------------------|-------------------------|-----------------------------|
*| DDR | 0x00000000 - 0x7FFFFFFF | Normal write-back Cacheable |
*| PL | 0x80000000 - 0xBFFFFFFF | Strongly Ordered |
*| QSPI | 0xC0000000 - 0xDFFFFFFF | Device Memory |
*| PCIe | 0xE0000000 - 0xEFFFFFFF | Device Memory |
*| STM_CORESIGHT | 0xF8000000 - 0xF8FFFFFF | Device Memory |
*| RPU_R5_GIC | 0xF9000000 - 0xF90FFFFF | Device memory |
*| FPS | 0xFD000000 - 0xFDFFFFFF | Device Memory |
*| LPS | 0xFE000000 - 0xFFFFFFFF | Device Memory |
*| OCM | 0xFFFC0000 - 0xFFFFFFFF | Normal write-back Cacheable |
*
*
* @note
* For a system where DDR is less than 2GB, region after DDR and before PL is
* marked as undefined in translation table. Memory range 0xFE000000-0xFEFFFFFF is
* allocated for upper LPS slaves, where as memory region 0xFF000000-0xFFFFFFFF is
* allocated for lower LPS slaves.
*
* @{
* <pre>
* MODIFICATION HISTORY:
*
* Ver Who Date Changes
* ----- ---- -------- ---------------------------------------------------
* 5.00 pkp 02/10/14 Initial version
* 6.4 asa 08/16/17 Added many APIs for MPU access to make MPU usage
* user-friendly. The APIs added are: Xil_UpdateMPUConfig,
* Xil_GetMPUConfig, Xil_GetNumOfFreeRegions,
* Xil_GetNextMPURegion, Xil_DisableMPURegionByRegNum,
* Xil_GetMPUFreeRegMask, Xil_SetMPURegionByRegNum, and
* Xil_InitializeExistingMPURegConfig.
* Added a new array of structure of type XMpuConfig to
* represent the MPU configuration table.
* 7.7 sk 01/10/22 Modify Xil_SetTlbAttributes function argument name to fix
* misra_c_2012_rule_8_3 violation.
* </pre>
*
*
*
******************************************************************************/
/**
*@cond nocomments
*/
#ifndef XIL_MPU_H
#define XIL_MPU_H
#ifdef __cplusplus
extern "C" {
#endif /* __cplusplus */
#include "xil_types.h"
/***************************** Include Files *********************************/
/***************** Macros (Inline Functions) Definitions *********************/
#define MPU_REG_DISABLED 0U
#define MPU_REG_ENABLED 1U
#define MAX_POSSIBLE_MPU_REGS 16U
/**************************** Type Definitions *******************************/
struct XMpuConfig{
u32 RegionStatus; /* Enabled or disabled */
INTPTR BaseAddress;/* MPU region base address */
u64 Size; /* MPU region size address */
u32 Attribute; /* MPU region size attribute */
};
typedef struct XMpuConfig XMpu_Config[MAX_POSSIBLE_MPU_REGS];
extern XMpu_Config Mpu_Config;
/************************** Constant Definitions *****************************/
/************************** Variable Definitions *****************************/
/************************** Function Prototypes ******************************/
/**
*@endcond
*/
void Xil_SetTlbAttributes(INTPTR addr, u32 attrib);
void Xil_EnableMPU(void);
void Xil_DisableMPU(void);
u32 Xil_SetMPURegion(INTPTR addr, u64 size, u32 attrib);
u32 Xil_UpdateMPUConfig(u32 reg_num, INTPTR address, u32 size, u32 attrib);
void Xil_GetMPUConfig (XMpu_Config mpuconfig);
u32 Xil_GetNumOfFreeRegions (void);
u32 Xil_GetNextMPURegion(void);
u32 Xil_DisableMPURegionByRegNum (u32 reg_num);
u16 Xil_GetMPUFreeRegMask (void);
u32 Xil_SetMPURegionByRegNum (u32 reg_num, INTPTR addr, u64 size, u32 attrib);
void* Xil_MemMap(UINTPTR Physaddr, size_t size, u32 flags);
#ifdef __cplusplus
}
#endif /* __cplusplus */
#endif /* XIL_MPU_H */
/**
* @} End of "addtogroup r5_mpu_apis".
*/

View File

@@ -1,60 +0,0 @@
/******************************************************************************
* Copyright (c) 2014 - 2020 Xilinx, Inc. All rights reserved.
* SPDX-License-Identifier: MIT
******************************************************************************/
/*****************************************************************************/
/**
*
* @file xpseudo_asm.h
*
* @addtogroup r5_specific Cortex R5 Processor Specific Include Files
*
* The xpseudo_asm.h includes xreg_cortexr5.h and xpseudo_asm_gcc.h.
*
* The xreg_cortexr5.h file contains definitions for inline assembler code.
* It provides inline definitions for Cortex R5 GPRs, SPRs,co-processor
* registers and Debug register
*
* The xpseudo_asm_gcc.h contains the definitions for the most often used
* inline assembler instructions, available as macros. These can be very
* useful for tasks such as setting or getting special purpose registers,
* synchronization,or cache manipulation. These inline assembler instructions
* can be used from drivers and user applications written in C.
*
* @{
* <pre>
* MODIFICATION HISTORY:
*
* Ver Who Date Changes
* ----- ---- -------- -----------------------------------------------
* 5.00 pkp 02/10/14 Initial version
* 6.2 mus 01/27/17 Updated to support IAR compiler
* 7.3 dp 06/25/20 Initial version for armclang
* </pre>
*
******************************************************************************/
#ifndef XPSEUDO_ASM_H /* prevent circular inclusions */
#define XPSEUDO_ASM_H /* by using protection macros */
#ifdef __cplusplus
extern "C" {
#endif
#include "xreg_cortexr5.h"
#if defined (__clang__)
#include "xpseudo_asm_armclang.h"
#elif defined (__GNUC__)
#include "xpseudo_asm_gcc.h"
#elif defined (__ICCARM__)
#include "xpseudo_asm_iccarm.h"
#endif
#ifdef __cplusplus
}
#endif
#endif /* XPSEUDO_ASM_H */
/**
* @} End of "addtogroup r5_specific".
*/

View File

@@ -1,429 +0,0 @@
/******************************************************************************
* Copyright (c) 2014 - 2022 Xilinx, Inc. All rights reserved.
* SPDX-License-Identifier: MIT
******************************************************************************/
/*****************************************************************************/
/**
*
* @file xreg_cortexr5.h
*
* This header file contains definitions for using inline assembler code. It is
* written specifically for the GNU, IAR, ARMCC compiler.
*
* All of the ARM Cortex R5 GPRs, SPRs, and Debug Registers are defined along
* with the positions of the bits within the registers.
*
* <pre>
* MODIFICATION HISTORY:
*
* Ver Who Date Changes
* ----- -------- -------- -----------------------------------------------
* 5.00 pkp 02/10/14 Initial version
* 7.7 sk 01/10/22 Update PRIV_RW_USER_RW macro from unsigned to unsigned
* long to fix misra_c_2012_rule_12_2 violation.
* </pre>
*
******************************************************************************/
/**
*@cond nocomments
*/
#ifndef XREG_CORTEXR5_H /* prevent circular inclusions */
#define XREG_CORTEXR5_H /* by using protection macros */
#ifdef __cplusplus
extern "C" {
#endif /* __cplusplus */
/* GPRs */
#define XREG_GPR0 r0
#define XREG_GPR1 r1
#define XREG_GPR2 r2
#define XREG_GPR3 r3
#define XREG_GPR4 r4
#define XREG_GPR5 r5
#define XREG_GPR6 r6
#define XREG_GPR7 r7
#define XREG_GPR8 r8
#define XREG_GPR9 r9
#define XREG_GPR10 r10
#define XREG_GPR11 r11
#define XREG_GPR12 r12
#define XREG_GPR13 r13
#define XREG_GPR14 r14
#define XREG_GPR15 r15
#define XREG_CPSR cpsr
/* Coprocessor number defines */
#define XREG_CP0 0
#define XREG_CP1 1
#define XREG_CP2 2
#define XREG_CP3 3
#define XREG_CP4 4
#define XREG_CP5 5
#define XREG_CP6 6
#define XREG_CP7 7
#define XREG_CP8 8
#define XREG_CP9 9
#define XREG_CP10 10
#define XREG_CP11 11
#define XREG_CP12 12
#define XREG_CP13 13
#define XREG_CP14 14
#define XREG_CP15 15
/* Coprocessor control register defines */
#define XREG_CR0 cr0
#define XREG_CR1 cr1
#define XREG_CR2 cr2
#define XREG_CR3 cr3
#define XREG_CR4 cr4
#define XREG_CR5 cr5
#define XREG_CR6 cr6
#define XREG_CR7 cr7
#define XREG_CR8 cr8
#define XREG_CR9 cr9
#define XREG_CR10 cr10
#define XREG_CR11 cr11
#define XREG_CR12 cr12
#define XREG_CR13 cr13
#define XREG_CR14 cr14
#define XREG_CR15 cr15
/* Current Processor Status Register (CPSR) Bits */
#define XREG_CPSR_THUMB_MODE 0x20U
#define XREG_CPSR_MODE_BITS 0x1FU
#define XREG_CPSR_SYSTEM_MODE 0x1FU
#define XREG_CPSR_UNDEFINED_MODE 0x1BU
#define XREG_CPSR_DATA_ABORT_MODE 0x17U
#define XREG_CPSR_SVC_MODE 0x13U
#define XREG_CPSR_IRQ_MODE 0x12U
#define XREG_CPSR_FIQ_MODE 0x11U
#define XREG_CPSR_USER_MODE 0x10U
#define XREG_CPSR_IRQ_ENABLE 0x80U
#define XREG_CPSR_FIQ_ENABLE 0x40U
#define XREG_CPSR_N_BIT 0x80000000U
#define XREG_CPSR_Z_BIT 0x40000000U
#define XREG_CPSR_C_BIT 0x20000000U
#define XREG_CPSR_V_BIT 0x10000000U
/*MPU region definitions*/
#define REGION_32B 0x00000004U
#define REGION_64B 0x00000005U
#define REGION_128B 0x00000006U
#define REGION_256B 0x00000007U
#define REGION_512B 0x00000008U
#define REGION_1K 0x00000009U
#define REGION_2K 0x0000000AU
#define REGION_4K 0x0000000BU
#define REGION_8K 0x0000000CU
#define REGION_16K 0x0000000DU
#define REGION_32K 0x0000000EU
#define REGION_64K 0x0000000FU
#define REGION_128K 0x00000010U
#define REGION_256K 0x00000011U
#define REGION_512K 0x00000012U
#define REGION_1M 0x00000013U
#define REGION_2M 0x00000014U
#define REGION_4M 0x00000015U
#define REGION_8M 0x00000016U
#define REGION_16M 0x00000017U
#define REGION_32M 0x00000018U
#define REGION_64M 0x00000019U
#define REGION_128M 0x0000001AU
#define REGION_256M 0x0000001BU
#define REGION_512M 0x0000001CU
#define REGION_1G 0x0000001DU
#define REGION_2G 0x0000001EU
#define REGION_4G 0x0000001FU
#define REGION_EN 0x00000001U
#define SHAREABLE 0x00000004U /*shareable */
#define STRONG_ORDERD_SHARED 0x00000000U /*strongly ordered, always shareable*/
#define DEVICE_SHARED 0x00000001U /*device, shareable*/
#define DEVICE_NONSHARED 0x00000010U /*device, non shareable*/
#define NORM_NSHARED_WT_NWA 0x00000002U /*Outer and Inner write-through, no write-allocate non-shareable*/
#define NORM_SHARED_WT_NWA 0x00000006U /*Outer and Inner write-through, no write-allocate shareable*/
#define NORM_NSHARED_WB_NWA 0x00000003U /*Outer and Inner write-back, no write-allocate non shareable*/
#define NORM_SHARED_WB_NWA 0x00000007U /*Outer and Inner write-back, no write-allocate shareable*/
#define NORM_NSHARED_NCACHE 0x00000008U /*Outer and Inner Non cacheable non shareable*/
#define NORM_SHARED_NCACHE 0x0000000CU /*Outer and Inner Non cacheable shareable*/
#define NORM_NSHARED_WB_WA 0x0000000BU /*Outer and Inner write-back non shared*/
#define NORM_SHARED_WB_WA 0x0000000FU /*Outer and Inner write-back shared*/
/* inner and outer cache policies can be combined for different combinations */
#define NORM_IN_POLICY_NCACHE 0x00000020U /*inner non cacheable*/
#define NORM_IN_POLICY_WB_WA 0x00000021U /*inner write back write allocate*/
#define NORM_IN_POLICY_WT_NWA 0x00000022U /*inner write through no write allocate*/
#define NORM_IN_POLICY_WB_NWA 0x00000023U /*inner write back no write allocate*/
#define NORM_OUT_POLICY_NCACHE 0x00000020U /*outer non cacheable*/
#define NORM_OUT_POLICY_WB_WA 0x00000028U /*outer write back write allocate*/
#define NORM_OUT_POLICY_WT_NWA 0x00000030U /*outer write through no write allocate*/
#define NORM_OUT_POLICY_WB_NWA 0x00000038U /*outer write back no write allocate*/
#define NO_ACCESS (0x00000000U<<8U) /*No access*/
#define PRIV_RW_USER_NA (0x00000001U<<8U) /*Privileged access only*/
#define PRIV_RW_USER_RO (0x00000002U<<8U) /*Writes in User mode generate permission faults*/
#define PRIV_RW_USER_RW (0x00000003UL<<8U) /*Full Access*/
#define PRIV_RO_USER_NA (0x00000005U<<8U) /*Privileged eead only*/
#define PRIV_RO_USER_RO (0x00000006U<<8U) /*Privileged/User read-only*/
#define EXECUTE_NEVER (0x00000001U<<12U) /* Bit 12*/
/* CP15 defines */
/* C0 Register defines */
#define XREG_CP15_MAIN_ID "p15, 0, %0, c0, c0, 0"
#define XREG_CP15_CACHE_TYPE "p15, 0, %0, c0, c0, 1"
#define XREG_CP15_TCM_TYPE "p15, 0, %0, c0, c0, 2"
#define XREG_CP15_TLB_TYPE "p15, 0, %0, c0, c0, 3"
#define XREG_CP15_MPU_TYPE "p15, 0, %0, c0, c0, 4"
#define XREG_CP15_MULTI_PROC_AFFINITY "p15, 0, %0, c0, c0, 5"
#define XREG_CP15_PROC_FEATURE_0 "p15, 0, %0, c0, c1, 0"
#define XREG_CP15_PROC_FEATURE_1 "p15, 0, %0, c0, c1, 1"
#define XREG_CP15_DEBUG_FEATURE_0 "p15, 0, %0, c0, c1, 2"
#define XREG_CP15_MEMORY_FEATURE_0 "p15, 0, %0, c0, c1, 4"
#define XREG_CP15_MEMORY_FEATURE_1 "p15, 0, %0, c0, c1, 5"
#define XREG_CP15_MEMORY_FEATURE_2 "p15, 0, %0, c0, c1, 6"
#define XREG_CP15_MEMORY_FEATURE_3 "p15, 0, %0, c0, c1, 7"
#define XREG_CP15_INST_FEATURE_0 "p15, 0, %0, c0, c2, 0"
#define XREG_CP15_INST_FEATURE_1 "p15, 0, %0, c0, c2, 1"
#define XREG_CP15_INST_FEATURE_2 "p15, 0, %0, c0, c2, 2"
#define XREG_CP15_INST_FEATURE_3 "p15, 0, %0, c0, c2, 3"
#define XREG_CP15_INST_FEATURE_4 "p15, 0, %0, c0, c2, 4"
#define XREG_CP15_INST_FEATURE_5 "p15, 0, %0, c0, c2, 5"
#define XREG_CP15_CACHE_SIZE_ID "p15, 1, %0, c0, c0, 0"
#define XREG_CP15_CACHE_LEVEL_ID "p15, 1, %0, c0, c0, 1"
#define XREG_CP15_AUXILARY_ID "p15, 1, %0, c0, c0, 7"
#define XREG_CP15_CACHE_SIZE_SEL "p15, 2, %0, c0, c0, 0"
/* C1 Register Defines */
#define XREG_CP15_SYS_CONTROL "p15, 0, %0, c1, c0, 0"
#define XREG_CP15_AUX_CONTROL "p15, 0, %0, c1, c0, 1"
#define XREG_CP15_CP_ACCESS_CONTROL "p15, 0, %0, c1, c0, 2"
/* XREG_CP15_CONTROL bit defines */
#define XREG_CP15_CONTROL_TE_BIT 0x40000000U
#define XREG_CP15_CONTROL_AFE_BIT 0x20000000U
#define XREG_CP15_CONTROL_TRE_BIT 0x10000000U
#define XREG_CP15_CONTROL_NMFI_BIT 0x08000000U
#define XREG_CP15_CONTROL_EE_BIT 0x02000000U
#define XREG_CP15_CONTROL_HA_BIT 0x00020000U
#define XREG_CP15_CONTROL_RR_BIT 0x00004000U
#define XREG_CP15_CONTROL_V_BIT 0x00002000U
#define XREG_CP15_CONTROL_I_BIT 0x00001000U
#define XREG_CP15_CONTROL_Z_BIT 0x00000800U
#define XREG_CP15_CONTROL_SW_BIT 0x00000400U
#define XREG_CP15_CONTROL_B_BIT 0x00000080U
#define XREG_CP15_CONTROL_C_BIT 0x00000004U
#define XREG_CP15_CONTROL_A_BIT 0x00000002U
#define XREG_CP15_CONTROL_M_BIT 0x00000001U
/* C2 Register Defines */
/* Not Used */
/* C3 Register Defines */
/* Not Used */
/* C4 Register Defines */
/* Not Used */
/* C5 Register Defines */
#define XREG_CP15_DATA_FAULT_STATUS "p15, 0, %0, c5, c0, 0"
#define XREG_CP15_INST_FAULT_STATUS "p15, 0, %0, c5, c0, 1"
#define XREG_CP15_AUX_DATA_FAULT_STATUS "p15, 0, %0, c5, c1, 0"
#define XREG_CP15_AUX_INST_FAULT_STATUS "p15, 0, %0, c5, c1, 1"
/* C6 Register Defines */
#define XREG_CP15_DATA_FAULT_ADDRESS "p15, 0, %0, c6, c0, 0"
#define XREG_CP15_INST_FAULT_ADDRESS "p15, 0, %0, c6, c0, 2"
#define XREG_CP15_MPU_REG_BASEADDR "p15, 0, %0, c6, c1, 0"
#define XREG_CP15_MPU_REG_SIZE_EN "p15, 0, %0, c6, c1, 2"
#define XREG_CP15_MPU_REG_ACCESS_CTRL "p15, 0, %0, c6, c1, 4"
#define XREG_CP15_MPU_MEMORY_REG_NUMBER "p15, 0, %0, c6, c2, 0"
/* C7 Register Defines */
#define XREG_CP15_NOP "p15, 0, %0, c7, c0, 4"
#define XREG_CP15_INVAL_IC_POU "p15, 0, %0, c7, c5, 0"
#define XREG_CP15_INVAL_IC_LINE_MVA_POU "p15, 0, %0, c7, c5, 1"
/* The CP15 register access below has been deprecated in favor of the new
* isb instruction in Cortex R5.
*/
#define XREG_CP15_INST_SYNC_BARRIER "p15, 0, %0, c7, c5, 4"
#define XREG_CP15_INVAL_BRANCH_ARRAY "p15, 0, %0, c7, c5, 6"
#define XREG_CP15_INVAL_BRANCH_ARRAY_LINE "p15, 0, %0, c7, c5, 7"
#define XREG_CP15_INVAL_DC_LINE_MVA_POC "p15, 0, %0, c7, c6, 1"
#define XREG_CP15_INVAL_DC_LINE_SW "p15, 0, %0, c7, c6, 2"
#define XREG_CP15_CLEAN_DC_LINE_MVA_POC "p15, 0, %0, c7, c10, 1"
#define XREG_CP15_CLEAN_DC_LINE_SW "p15, 0, %0, c7, c10, 2"
#define XREG_CP15_INVAL_DC_ALL "p15, 0, %0, c15, c5, 0"
/* The next two CP15 register accesses below have been deprecated in favor
* of the new dsb and dmb instructions in Cortex R5.
*/
#define XREG_CP15_DATA_SYNC_BARRIER "p15, 0, %0, c7, c10, 4"
#define XREG_CP15_DATA_MEMORY_BARRIER "p15, 0, %0, c7, c10, 5"
#define XREG_CP15_CLEAN_DC_LINE_MVA_POU "p15, 0, %0, c7, c11, 1"
#define XREG_CP15_NOP2 "p15, 0, %0, c7, c13, 1"
#define XREG_CP15_CLEAN_INVAL_DC_LINE_MVA_POC "p15, 0, %0, c7, c14, 1"
#define XREG_CP15_CLEAN_INVAL_DC_LINE_SW "p15, 0, %0, c7, c14, 2"
/* C8 Register Defines */
/* Not Used */
/* C9 Register Defines */
#define XREG_CP15_ATCM_REG_SIZE_ADDR "p15, 0, %0, c9, c1, 1"
#define XREG_CP15_BTCM_REG_SIZE_ADDR "p15, 0, %0, c9, c1, 0"
#define XREG_CP15_TCM_SELECTION "p15, 0, %0, c9, c2, 0"
#define XREG_CP15_PERF_MONITOR_CTRL "p15, 0, %0, c9, c12, 0"
#define XREG_CP15_COUNT_ENABLE_SET "p15, 0, %0, c9, c12, 1"
#define XREG_CP15_COUNT_ENABLE_CLR "p15, 0, %0, c9, c12, 2"
#define XREG_CP15_V_FLAG_STATUS "p15, 0, %0, c9, c12, 3"
#define XREG_CP15_SW_INC "p15, 0, %0, c9, c12, 4"
#define XREG_CP15_EVENT_CNTR_SEL "p15, 0, %0, c9, c12, 5"
#define XREG_CP15_PERF_CYCLE_COUNTER "p15, 0, %0, c9, c13, 0"
#define XREG_CP15_EVENT_TYPE_SEL "p15, 0, %0, c9, c13, 1"
#define XREG_CP15_PERF_MONITOR_COUNT "p15, 0, %0, c9, c13, 2"
#define XREG_CP15_USER_ENABLE "p15, 0, %0, c9, c14, 0"
#define XREG_CP15_INTR_ENABLE_SET "p15, 0, %0, c9, c14, 1"
#define XREG_CP15_INTR_ENABLE_CLR "p15, 0, %0, c9, c14, 2"
/* C10 Register Defines */
/* Not used */
/* C11 Register Defines */
/* Not used */
/* C12 Register Defines */
/* Not used */
/* C13 Register Defines */
#define XREG_CP15_CONTEXT_ID "p15, 0, %0, c13, c0, 1"
#define USER_RW_THREAD_PID "p15, 0, %0, c13, c0, 2"
#define USER_RO_THREAD_PID "p15, 0, %0, c13, c0, 3"
#define USER_PRIV_THREAD_PID "p15, 0, %0, c13, c0, 4"
/* C14 Register Defines */
/* not used */
/* C15 Register Defines */
#define XREG_CP15_SEC_AUX_CTRL "p15, 0, %0, c15, c0, 0"
/* MPE register definitions */
#define XREG_FPSID c0
#define XREG_FPSCR c1
#define XREG_MVFR1 c6
#define XREG_MVFR0 c7
#define XREG_FPEXC c8
#define XREG_FPINST c9
#define XREG_FPINST2 c10
/* FPSID bits */
#define XREG_FPSID_IMPLEMENTER_BIT (24U)
#define XREG_FPSID_IMPLEMENTER_MASK (0x000000FFU << FPSID_IMPLEMENTER_BIT)
#define XREG_FPSID_SOFTWARE (0X00000001U << 23U)
#define XREG_FPSID_ARCH_BIT (16U)
#define XREG_FPSID_ARCH_MASK (0x0000000FU << FPSID_ARCH_BIT)
#define XREG_FPSID_PART_BIT (8U)
#define XREG_FPSID_PART_MASK (0x000000FFU << FPSID_PART_BIT)
#define XREG_FPSID_VARIANT_BIT (4U)
#define XREG_FPSID_VARIANT_MASK (0x0000000FU << FPSID_VARIANT_BIT)
#define XREG_FPSID_REV_BIT (0U)
#define XREG_FPSID_REV_MASK (0x0000000FU << FPSID_REV_BIT)
/* FPSCR bits */
#define XREG_FPSCR_N_BIT (0X00000001U << 31U)
#define XREG_FPSCR_Z_BIT (0X00000001U << 30U)
#define XREG_FPSCR_C_BIT (0X00000001U << 29U)
#define XREG_FPSCR_V_BIT (0X00000001U << 28U)
#define XREG_FPSCR_QC (0X00000001U << 27U)
#define XREG_FPSCR_AHP (0X00000001U << 26U)
#define XREG_FPSCR_DEFAULT_NAN (0X00000001U << 25U)
#define XREG_FPSCR_FLUSHTOZERO (0X00000001U << 24U)
#define XREG_FPSCR_ROUND_NEAREST (0X00000000U << 22U)
#define XREG_FPSCR_ROUND_PLUSINF (0X00000001U << 22U)
#define XREG_FPSCR_ROUND_MINUSINF (0X00000002U << 22U)
#define XREG_FPSCR_ROUND_TOZERO (0X00000003U << 22U)
#define XREG_FPSCR_RMODE_BIT (22U)
#define XREG_FPSCR_RMODE_MASK (0X00000003U << FPSCR_RMODE_BIT)
#define XREG_FPSCR_STRIDE_BIT (20U)
#define XREG_FPSCR_STRIDE_MASK (0X00000003U << FPSCR_STRIDE_BIT)
#define XREG_FPSCR_LENGTH_BIT (16U)
#define XREG_FPSCR_LENGTH_MASK (0X00000007U << FPSCR_LENGTH_BIT)
#define XREG_FPSCR_IDC (0X00000001U << 7U)
#define XREG_FPSCR_IXC (0X00000001U << 4U)
#define XREG_FPSCR_UFC (0X00000001U << 3U)
#define XREG_FPSCR_OFC (0X00000001U << 2U)
#define XREG_FPSCR_DZC (0X00000001U << 1U)
#define XREG_FPSCR_IOC (0X00000001U << 0U)
/* MVFR0 bits */
#define XREG_MVFR0_RMODE_BIT (28U)
#define XREG_MVFR0_RMODE_MASK (0x0000000FU << XREG_MVFR0_RMODE_BIT)
#define XREG_MVFR0_SHORT_VEC_BIT (24U)
#define XREG_MVFR0_SHORT_VEC_MASK (0x0000000FU << XREG_MVFR0_SHORT_VEC_BIT)
#define XREG_MVFR0_SQRT_BIT (20U)
#define XREG_MVFR0_SQRT_MASK (0x0000000FU << XREG_MVFR0_SQRT_BIT)
#define XREG_MVFR0_DIVIDE_BIT (16U)
#define XREG_MVFR0_DIVIDE_MASK (0x0000000FU << XREG_MVFR0_DIVIDE_BIT)
#define XREG_MVFR0_EXEC_TRAP_BIT (12U)
#define XREG_MVFR0_EXEC_TRAP_MASK (0x0000000FU << XREG_MVFR0_EXEC_TRAP_BIT)
#define XREG_MVFR0_DP_BIT (8U)
#define XREG_MVFR0_DP_MASK (0x0000000FU << XREG_MVFR0_DP_BIT)
#define XREG_MVFR0_SP_BIT (4U)
#define XREG_MVFR0_SP_MASK (0x0000000FU << XREG_MVFR0_SP_BIT)
#define XREG_MVFR0_A_SIMD_BIT (0U)
#define XREG_MVFR0_A_SIMD_MASK (0x0000000FU << MVFR0_A_SIMD_BIT)
/* FPEXC bits */
#define XREG_FPEXC_EX (0X00000001U << 31U)
#define XREG_FPEXC_EN (0X00000001U << 30U)
#define XREG_FPEXC_DEX (0X00000001U << 29U)
#ifdef __cplusplus
}
#endif /* __cplusplus */
#endif /* XREG_CORTEXR5_H */
/**
*@endcond
*/

View File

@@ -1 +0,0 @@
/* Intentional blank stub file for Xilinx driver compatibility. */

View File

@@ -1,392 +0,0 @@
/******************************************************************************
* Copyright (c) 2009 - 2021 Xilinx, Inc. All rights reserved.
* SPDX-License-Identifier: MIT
******************************************************************************/
/*****************************************************************************/
/**
*
* @file xil_cache.h
*
* @addtogroup microblaze_cache_apis Microblaze Cache APIs
* @{
*
*
* The xil_cache.h file contains cache related driver functions (or macros)
* that can be used to access the device. The user should refer to the
* hardware device specification for more details of the device operation.
* The functions in this header file can be used across all Xilinx supported
* processors.
*
* <pre>
* MODIFICATION HISTORY:
*
* Ver Who Date Changes
* ----- ---- -------- -------------------------------------------------------
* 1.00 hbm 07/28/09 Initial release
* 3.02a sdm 10/24/11 Updated the file to include xparameters.h so that
* the correct cache flush routines are used based on
* whether the write-back or write-through caches are
* used (cr #630532).
* 3.10a asa 05/04/13 This version of MicroBlaze BSP adds support for system
* cache/L2 cache. The existing/old APIs/macros in this
* file are renamed to imply that they deal with L1 cache.
* New macros/APIs are added to address similar features for
* L2 cache. Users can include this file in their application
* to use the various cache related APIs. These changes are
* done for implementing PR #697214.
*
* </pre>
*
*
******************************************************************************/
#ifndef XIL_CACHE_H
#define XIL_CACHE_H
#if defined XENV_VXWORKS
/* VxWorks environment */
#error "Unknown processor / architecture. Must be PPC for VxWorks."
#else
/* standalone environment */
#include "mb_interface.h"
#include "xil_types.h"
#include "xparameters.h"
#ifdef __cplusplus
extern "C" {
#endif
/****************************************************************************/
/**
*
* @brief Invalidate the entire L1 data cache. If the cacheline is modified
* (dirty), the modified contents are lost.
*
*
* @return None.
*
* @note Processor must be in real mode.
****************************************************************************/
#define Xil_L1DCacheInvalidate() microblaze_invalidate_dcache()
/****************************************************************************/
/**
*
* @brief Invalidate the entire L2 data cache. If the cacheline is modified
* (dirty),the modified contents are lost.
*
* @return None.
*
* @note Processor must be in real mode.
****************************************************************************/
#define Xil_L2CacheInvalidate() microblaze_invalidate_cache_ext()
/****************************************************************************/
/**
*
* @brief Invalidate the L1 data cache for the given address range.
* If the bytes specified by the address (Addr) are cached by the L1
* data cache, the cacheline containing that byte is invalidated.If
* the cacheline is modified (dirty), the modified contents are lost.
*
* @param Addr is address of range to be invalidated.
* @param Len is the length in bytes to be invalidated.
*
* @return None.
*
* @note Processor must be in real mode.
****************************************************************************/
#define Xil_L1DCacheInvalidateRange(Addr, Len) \
microblaze_invalidate_dcache_range((Addr), (Len))
/****************************************************************************/
/**
*
* @brief Invalidate the L1 data cache for the given address range.
* If the bytes specified by the address (Addr) are cached by the
* L1 data cache, the cacheline containing that byte is invalidated.
* If the cacheline is modified (dirty), the modified contents are lost.
*
* @param Addr: address of range to be invalidated.
* @param Len: length in bytes to be invalidated.
*
* @return None.
*
* @note Processor must be in real mode.
****************************************************************************/
#define Xil_L2CacheInvalidateRange(Addr, Len) \
microblaze_invalidate_cache_ext_range((Addr), (Len))
/****************************************************************************/
/**
* @brief Flush the L1 data cache for the given address range.
* If the bytes specified by the address (Addr) are cached by the
* data cache, and is modified (dirty), the cacheline will be written
* to system memory.The cacheline will also be invalidated.
*
* @param Addr: the starting address of the range to be flushed.
* @param Len: length in byte to be flushed.
*
* @return None.
*
****************************************************************************/
#if (XPAR_MICROBLAZE_DCACHE_USE_WRITEBACK == 1)
# define Xil_L1DCacheFlushRange(Addr, Len) \
microblaze_flush_dcache_range((Addr), (Len))
#else
# define Xil_L1DCacheFlushRange(Addr, Len) \
microblaze_invalidate_dcache_range((Addr), (Len))
#endif /* XPAR_MICROBLAZE_DCACHE_USE_WRITEBACK */
/****************************************************************************/
/**
* @brief Flush the L2 data cache for the given address range.
* If the bytes specified by the address (Addr) are cached by the
* data cache, and is modified (dirty), the cacheline will be
* written to system memory. The cacheline will also be invalidated.
*
* @param Addr: the starting address of the range to be flushed.
* @param Len: length in byte to be flushed.
*
* @return None.
*
****************************************************************************/
#define Xil_L2CacheFlushRange(Addr, Len) \
microblaze_flush_cache_ext_range((Addr), (Len))
/****************************************************************************/
/**
* @brief Flush the entire L1 data cache. If any cacheline is dirty, the
* cacheline will be written to system memory. The entire data cache
* will be invalidated.
*
* @return None.
*
****************************************************************************/
#if (XPAR_MICROBLAZE_DCACHE_USE_WRITEBACK == 1)
# define Xil_L1DCacheFlush() microblaze_flush_dcache()
#else
# define Xil_L1DCacheFlush() microblaze_invalidate_dcache()
#endif /* XPAR_MICROBLAZE_DCACHE_USE_WRITEBACK */
/****************************************************************************/
/**
* @brief Flush the entire L2 data cache. If any cacheline is dirty, the
* cacheline will be written to system memory. The entire data cache
* will be invalidated.
*
* @return None.
*
****************************************************************************/
#define Xil_L2CacheFlush() microblaze_flush_cache_ext()
/****************************************************************************/
/**
*
* @brief Invalidate the instruction cache for the given address range.
*
* @param Addr is address of ragne to be invalidated.
* @param Len is the length in bytes to be invalidated.
*
* @return None.
*
****************************************************************************/
#define Xil_L1ICacheInvalidateRange(Addr, Len) \
microblaze_invalidate_icache_range((Addr), (Len))
/****************************************************************************/
/**
*
* @brief Invalidate the entire instruction cache.
*
* @return None.
*
****************************************************************************/
#define Xil_L1ICacheInvalidate() \
microblaze_invalidate_icache()
/****************************************************************************/
/**
*
* @brief Enable the L1 data cache.
*
* @return None.
*
* @note This is processor specific.
*
****************************************************************************/
#define Xil_L1DCacheEnable() \
microblaze_enable_dcache()
/****************************************************************************/
/**
*
* @brief Disable the L1 data cache.
*
* @return None.
*
* @note This is processor specific.
*
****************************************************************************/
#define Xil_L1DCacheDisable() \
microblaze_disable_dcache()
/****************************************************************************/
/**
*
* @brief Enable the instruction cache.
*
* @return None.
*
* @note This is processor specific.
*
****************************************************************************/
#define Xil_L1ICacheEnable() \
microblaze_enable_icache()
/****************************************************************************/
/**
*
* @brief Disable the L1 Instruction cache.
*
* @return None.
*
* @note This is processor specific.
*
****************************************************************************/
#define Xil_L1ICacheDisable() \
microblaze_disable_icache()
/****************************************************************************/
/**
*
* @brief Enable the data cache.
*
* @return None.
*
****************************************************************************/
#define Xil_DCacheEnable() Xil_L1DCacheEnable()
/****************************************************************************/
/**
*
* @brief Enable the instruction cache.
*
* @return None.
*
*
****************************************************************************/
#define Xil_ICacheEnable() Xil_L1ICacheEnable()
/****************************************************************************/
/**
*
* @brief Invalidate the entire Data cache.
*
* @return None.
*
****************************************************************************/
#define Xil_DCacheInvalidate() \
Xil_L2CacheInvalidate(); \
Xil_L1DCacheInvalidate();
/****************************************************************************/
/**
*
* @brief Invalidate the Data cache for the given address range.
* If the bytes specified by the address (adr) are cached by the
* Data cache, the cacheline containing that byte is invalidated.
* If the cacheline is modified (dirty), the modified contents are
* lost and are NOT written to system memory before the line is
* invalidated.
*
* @param Addr: Start address of range to be invalidated.
* @param Len: Length of range to be invalidated in bytes.
*
* @return None.
*
****************************************************************************/
#define Xil_DCacheInvalidateRange(Addr, Len) \
Xil_L2CacheInvalidateRange((Addr), (Len)); \
Xil_L1DCacheInvalidateRange((Addr), (Len));
/****************************************************************************/
/**
*
* @brief Flush the entire Data cache.
*
* @return None.
*
****************************************************************************/
#define Xil_DCacheFlush() \
Xil_L2CacheFlush(); \
Xil_L1DCacheFlush();
/****************************************************************************/
/**
* @brief Flush the Data cache for the given address range.
* If the bytes specified by the address (adr) are cached by the
* Data cache, the cacheline containing that byte is invalidated.
* If the cacheline is modified (dirty), the written to system
* memory first before the before the line is invalidated.
*
* @param Addr: Start address of range to be flushed.
* @param Len: Length of range to be flushed in bytes.
*
* @return None.
*
****************************************************************************/
#define Xil_DCacheFlushRange(Addr, Len) \
Xil_L2CacheFlushRange((Addr), (Len)); \
Xil_L1DCacheFlushRange((Addr), (Len));
/****************************************************************************/
/**
* @brief Invalidate the entire instruction cache.
*
* @return None.
*
****************************************************************************/
#define Xil_ICacheInvalidate() \
Xil_L2CacheInvalidate(); \
Xil_L1ICacheInvalidate();
/****************************************************************************/
/**
* @brief Invalidate the instruction cache for the given address range.
* If the bytes specified by the address (adr) are cached by the
* Data cache, the cacheline containing that byte is invalidated.
* If the cacheline is modified (dirty), the modified contents are
* lost and are NOT written to system memory before the line is
* invalidated.
*
* @param Addr: Start address of ragne to be invalidated.
* @param Len: Length of range to be invalidated in bytes.
*
* @return None.
*
****************************************************************************/
#define Xil_ICacheInvalidateRange(Addr, Len) \
Xil_L2CacheInvalidateRange((Addr), (Len)); \
Xil_L1ICacheInvalidateRange((Addr), (Len));
void Xil_DCacheDisable(void);
void Xil_ICacheDisable(void);
#ifdef __cplusplus
}
#endif
#endif
#endif
/**
* @} End of "addtogroup microblaze_cache_apis".
*/

View File

@@ -1,112 +0,0 @@
/******************************************************************************
* Copyright (c) 2009 - 2021 Xilinx, Inc. All rights reserved.
* SPDX-License-Identifier: MIT
******************************************************************************/
/*****************************************************************************/
/**
*
* @file xil_exception.h
*
* @addtogroup microblaze_exception_apis Microblaze Exception APIs
* @{
*
* The xil_exception.h file, available in the <install-directory>/src/microblaze folder,
* contains Microblaze specific exception related APIs and macros. Application programs
* can use these APIs for various exception related operations. For example, enable exception,
* disable exception, register exception hander.
*
* @note To use exception related functions, xil_exception.h must be added in source code
*
*
* <pre>
* MODIFICATION HISTORY:
*
* Ver Who Date Changes
* ----- ---- -------- -------------------------------------------------------
* 1.00 hbm 07/28/09 Initial release
*
* </pre>
*
******************************************************************************/
/**
*@cond nocomments
*/
#ifndef XIL_EXCEPTION_H /* prevent circular inclusions */
#define XIL_EXCEPTION_H /* by using protection macros */
#include "xil_types.h"
#ifdef __cplusplus
extern "C" {
#endif
/************************** Constant Definitions *****************************/
/*
* These constants are specific to Microblaze processor.
*/
#define XIL_EXCEPTION_ID_FIRST 0U
#define XIL_EXCEPTION_ID_FSL 0U
#define XIL_EXCEPTION_ID_UNALIGNED_ACCESS 1U
#define XIL_EXCEPTION_ID_ILLEGAL_OPCODE 2U
#define XIL_EXCEPTION_ID_M_AXI_I_EXCEPTION 3U
#define XIL_EXCEPTION_ID_IPLB_EXCEPTION 3U
#define XIL_EXCEPTION_ID_M_AXI_D_EXCEPTION 4U
#define XIL_EXCEPTION_ID_DPLB_EXCEPTION 4U
#define XIL_EXCEPTION_ID_DIV_BY_ZERO 5U
#define XIL_EXCEPTION_ID_FPU 6U
#define XIL_EXCEPTION_ID_STACK_VIOLATION 7U
#define XIL_EXCEPTION_ID_MMU 7U
#define XIL_EXCEPTION_ID_LAST XIL_EXCEPTION_ID_MMU
/*
* XIL_EXCEPTION_ID_INT is defined for all processors, but with different value.
*/
#define XIL_EXCEPTION_ID_INT 16U /**
* exception ID for interrupt
*/
/**************************** Type Definitions *******************************/
/**
* This typedef is the exception handler function.
*/
typedef void (*Xil_ExceptionHandler)(void *Data);
/**
* This data type defines an interrupt handler for a device.
* The argument points to the instance of the component
*/
typedef void (*XInterruptHandler) (void *InstancePtr);
/***************** Macros (Inline Functions) Definitions *********************/
/************************** Function Prototypes ******************************/
extern void Xil_ExceptionRegisterHandler(u32 Id,
Xil_ExceptionHandler Handler,
void *Data);
extern void Xil_ExceptionRemoveHandler(u32 Id);
extern void Xil_ExceptionInit(void);
extern void Xil_ExceptionEnable(void);
extern void Xil_ExceptionDisable(void);
#ifdef __cplusplus
}
#endif
#endif
/**
*@endcond
*/
/**
* @} End of "addtogroup microblaze_exception_apis".
*/

View File

@@ -1,37 +0,0 @@
/* SPDX-License-Identifier: BSD-2-Clause */
/*
* COPYRIGHT (c) 2023.
* On-Line Applications Research Corporation (OAR).
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef LIBBSP_SHARED_XIL_SYSTEM_H
#define LIBBSP_SHARED_XIL_SYSTEM_H
/*
* This file defines anything necessary for the Xilinx support infrastructure to
* function properly on a particular platform.
*/
#endif

View File

@@ -1 +0,0 @@
#include <sys/unistd.h>

View File

@@ -1,113 +0,0 @@
/******************************************************************************
* Copyright (c) 2010 - 2021 Xilinx, Inc. All rights reserved.
* SPDX-License-Identifier: MIT
******************************************************************************/
/*****************************************************************************/
/**
*
* @file xbasic_types.h
*
*
* @note Dummy File for backwards compatibility
*
*
* <pre>
* MODIFICATION HISTORY:
*
* Ver Who Date Changes
* ----- ---- -------- -------------------------------------------------------
* 1.00a adk 1/31/14 Added in bsp common folder for backward compatibility
* 7.0 aru 01/21/19 Modified the typedef of u32,u16,u8
* 7.0 aru 02/06/19 Included stdint.h and stddef.h
* </pre>
*
******************************************************************************/
/**
*@cond nocomments
*/
#ifndef XBASIC_TYPES_H /* prevent circular inclusions */
#define XBASIC_TYPES_H /* by using protection macros */
#ifdef __cplusplus
extern "C" {
#endif
#include <stdint.h>
#include <stddef.h>
/** @name Legacy types
* Deprecated legacy types.
* @{
*/
typedef uint8_t Xuint8; /**< unsigned 8-bit */
typedef char Xint8; /**< signed 8-bit */
typedef uint16_t Xuint16; /**< unsigned 16-bit */
typedef short Xint16; /**< signed 16-bit */
typedef uint32_t Xuint32; /**< unsigned 32-bit */
typedef long Xint32; /**< signed 32-bit */
typedef float Xfloat32; /**< 32-bit floating point */
typedef double Xfloat64; /**< 64-bit double precision FP */
typedef unsigned long Xboolean; /**< boolean (XTRUE or XFALSE) */
#if !defined __XUINT64__
typedef struct
{
Xuint32 Upper;
Xuint32 Lower;
} Xuint64;
#endif
/** @name New types
* New simple types.
* @{
*/
#ifndef __KERNEL__
#ifndef XIL_TYPES_H
typedef Xuint32 u32;
typedef Xuint16 u16;
typedef Xuint8 u8;
#endif
#else
#include <linux/types.h>
#endif
#ifndef TRUE
# define TRUE 1U
#endif
#ifndef FALSE
# define FALSE 0U
#endif
#ifndef NULL
#define NULL 0U
#endif
/*
* Xilinx NULL, TRUE and FALSE legacy support. Deprecated.
* Please use NULL, TRUE and FALSE
*/
#define XNULL NULL
#define XTRUE TRUE
#define XFALSE FALSE
/*
* This file is deprecated and users
* should use xil_types.h and xil_assert.h\n\r
*/
#warning The xbasics_type.h file is deprecated and users should use xil_types.h and xil_assert.
#warning Please refer the Standalone BSP UG647 for further details
#ifdef __cplusplus
}
#endif
#endif /* end of protection macro */
/**
*@endcond
*/

View File

@@ -1,2 +0,0 @@
/* Minimal stub file for Xilinx driver compatibility. */
#define xdbg_printf(...)

View File

@@ -1,176 +0,0 @@
/******************************************************************************
* Copyright (c) 2009 - 2021 Xilinx, Inc. All rights reserved.
* SPDX-License-Identifier: MIT
******************************************************************************/
/*****************************************************************************/
/**
*
* @file xil_assert.h
*
* @addtogroup common_assert_apis Assert APIs and Macros
*
* The xil_assert.h file contains assert related functions and macros.
* Assert APIs/Macros specifies that a application program satisfies certain
* conditions at particular points in its execution. These function can be
* used by application programs to ensure that, application code is satisfying
* certain conditions.
*
* @{
* <pre>
* MODIFICATION HISTORY:
*
* Ver Who Date Changes
* ----- ---- -------- -------------------------------------------------------
* 1.00a hbm 07/14/09 First release
* 6.0 kvn 05/31/16 Make Xil_AsserWait a global variable
* </pre>
*
******************************************************************************/
/**
*@cond nocomments
*/
#ifndef XIL_ASSERT_H /* prevent circular inclusions */
#define XIL_ASSERT_H /* by using protection macros */
#include "xil_types.h"
#ifdef __cplusplus
extern "C" {
#endif
/***************************** Include Files *********************************/
/************************** Constant Definitions *****************************/
#define XIL_ASSERT_NONE 0U
#define XIL_ASSERT_OCCURRED 1U
#define XNULL NULL
extern u32 Xil_AssertStatus;
extern s32 Xil_AssertWait;
extern void Xil_Assert(const char8 *File, s32 Line);
/**
*@endcond
*/
void XNullHandler(void *NullParameter);
/**
* This data type defines a callback to be invoked when an
* assert occurs. The callback is invoked only when asserts are enabled
*/
typedef void (*Xil_AssertCallback) (const char8 *File, s32 Line);
/***************** Macros (Inline Functions) Definitions *********************/
#ifndef NDEBUG
/*****************************************************************************/
/**
* @brief This assert macro is to be used for void functions. This in
* conjunction with the Xil_AssertWait boolean can be used to
* accommodate tests so that asserts which fail allow execution to
* continue.
*
* @param Expression: expression to be evaluated. If it evaluates to
* false, the assert occurs.
*
* @return Returns void unless the Xil_AssertWait variable is true, in which
* case no return is made and an infinite loop is entered.
*
******************************************************************************/
#define Xil_AssertVoid(Expression) \
{ \
if (Expression) { \
Xil_AssertStatus = XIL_ASSERT_NONE; \
} else { \
Xil_Assert(__FILE__, __LINE__); \
Xil_AssertStatus = XIL_ASSERT_OCCURRED; \
return; \
} \
}
/*****************************************************************************/
/**
* @brief This assert macro is to be used for functions that do return a
* value. This in conjunction with the Xil_AssertWait boolean can be
* used to accommodate tests so that asserts which fail allow execution
* to continue.
*
* @param Expression: expression to be evaluated. If it evaluates to false,
* the assert occurs.
*
* @return Returns 0 unless the Xil_AssertWait variable is true, in which
* case no return is made and an infinite loop is entered.
*
******************************************************************************/
#define Xil_AssertNonvoid(Expression) \
{ \
if (Expression) { \
Xil_AssertStatus = XIL_ASSERT_NONE; \
} else { \
Xil_Assert(__FILE__, __LINE__); \
Xil_AssertStatus = XIL_ASSERT_OCCURRED; \
return 0; \
} \
}
/*****************************************************************************/
/**
* @brief Always assert. This assert macro is to be used for void functions.
* Use for instances where an assert should always occur.
*
* @return Returns void unless the Xil_AssertWait variable is true, in which
* case no return is made and an infinite loop is entered.
*
******************************************************************************/
#define Xil_AssertVoidAlways() \
{ \
Xil_Assert(__FILE__, __LINE__); \
Xil_AssertStatus = XIL_ASSERT_OCCURRED; \
return; \
}
/*****************************************************************************/
/**
* @brief Always assert. This assert macro is to be used for functions that
* do return a value. Use for instances where an assert should always
* occur.
*
* @return Returns void unless the Xil_AssertWait variable is true, in which
* case no return is made and an infinite loop is entered.
*
******************************************************************************/
#define Xil_AssertNonvoidAlways() \
{ \
Xil_Assert(__FILE__, __LINE__); \
Xil_AssertStatus = XIL_ASSERT_OCCURRED; \
return 0; \
}
#else
#define Xil_AssertVoid(Expression)
#define Xil_AssertVoidAlways()
#define Xil_AssertNonvoid(Expression)
#define Xil_AssertNonvoidAlways()
#endif
/************************** Function Prototypes ******************************/
void Xil_AssertSetCallback(Xil_AssertCallback Routine);
#ifdef __cplusplus
}
#endif
#endif /* end of protection macro */
/**
* @} End of "addtogroup common_assert_apis".
*/

View File

@@ -1,412 +0,0 @@
/******************************************************************************
* Copyright (c) 2014 - 2021 Xilinx, Inc. All rights reserved.
* SPDX-License-Identifier: MIT
******************************************************************************/
/*****************************************************************************/
/**
*
* @file xil_io.h
*
* @addtogroup common_io_interfacing_apis Register IO interfacing APIs
*
* The xil_io.h file contains the interface for the general I/O component, which
* encapsulates the Input/Output functions for the processors that do not
* require any special I/O handling.
*
* @{
* <pre>
* MODIFICATION HISTORY:
*
* Ver Who Date Changes
* ----- -------- -------- -----------------------------------------------
* 5.00 pkp 05/29/14 First release
* 6.00 mus 08/19/16 Remove checking of __LITTLE_ENDIAN__ flag for
* ARM processors
* 7.20 har 01/03/20 Added Xil_SecureOut32 for avoiding blindwrite for
* CR-1049218
* 7.30 kpt 09/21/20 Moved Xil_EndianSwap16 and Xil_EndianSwap32 to
* xil_io.h and made them as static inline
* am 10/13/20 Changed the return type of Xil_SecureOut32 function
* from u32 to int
* 7.50 dp 02/12/21 Fix compilation error in Xil_EndianSwap32() that occur
* when -Werror=conversion compiler flag is enabled
* 7.5 mus 05/17/21 Update the functions with comments. It fixes CR#1067739.
*
* </pre>
******************************************************************************/
#ifndef XIL_IO_H /* prevent circular inclusions */
#define XIL_IO_H /* by using protection macros */
#ifdef __cplusplus
extern "C" {
#endif
/***************************** Include Files *********************************/
#include "xil_types.h"
#include "xil_printf.h"
#include "xstatus.h"
#if defined (__MICROBLAZE__)
#include "mb_interface.h"
#else
#include "xpseudo_asm.h"
#endif
/************************** Function Prototypes ******************************/
#ifdef ENABLE_SAFETY
extern u32 XStl_RegUpdate(u32 RegAddr, u32 RegVal);
#endif
/***************** Macros (Inline Functions) Definitions *********************/
#if defined __GNUC__
#if defined (__MICROBLAZE__)
# define INST_SYNC mbar(0)
# define DATA_SYNC mbar(1)
# else
# define SYNCHRONIZE_IO dmb()
# define INST_SYNC isb()
# define DATA_SYNC dsb()
# endif
#else
# define SYNCHRONIZE_IO
# define INST_SYNC
# define DATA_SYNC
# define INST_SYNC
# define DATA_SYNC
#endif
#if defined (__GNUC__) || defined (__ICCARM__) || defined (__MICROBLAZE__)
#define INLINE inline
#else
#define INLINE __inline
#endif
/*****************************************************************************/
/**
*
* @brief Performs an input operation for a memory location by reading
* from the specified address and returning the 8 bit Value read from
* that address.
*
* @param Addr: contains the address to perform the input operation
*
* @return The 8 bit Value read from the specified input address.
*
******************************************************************************/
static INLINE u8 Xil_In8(UINTPTR Addr)
{
return *(volatile u8 *) Addr;
}
/*****************************************************************************/
/**
*
* @brief Performs an input operation for a memory location by reading from
* the specified address and returning the 16 bit Value read from that
* address.
*
* @param Addr: contains the address to perform the input operation
*
* @return The 16 bit Value read from the specified input address.
*
******************************************************************************/
static INLINE u16 Xil_In16(UINTPTR Addr)
{
return *(volatile u16 *) Addr;
}
/*****************************************************************************/
/**
*
* @brief Performs an input operation for a memory location by
* reading from the specified address and returning the 32 bit Value
* read from that address.
*
* @param Addr: contains the address to perform the input operation
*
* @return The 32 bit Value read from the specified input address.
*
******************************************************************************/
static INLINE u32 Xil_In32(UINTPTR Addr)
{
return *(volatile u32 *) Addr;
}
/*****************************************************************************/
/**
*
* @brief Performs an input operation for a memory location by reading the
* 64 bit Value read from that address.
*
*
* @param Addr: contains the address to perform the input operation
*
* @return The 64 bit Value read from the specified input address.
*
******************************************************************************/
static INLINE u64 Xil_In64(UINTPTR Addr)
{
return *(volatile u64 *) Addr;
}
/*****************************************************************************/
/**
*
* @brief Performs an output operation for an memory location by
* writing the 8 bit Value to the the specified address.
*
* @param Addr: contains the address to perform the output operation
* @param Value: contains the 8 bit Value to be written at the specified
* address.
*
* @return None.
*
******************************************************************************/
static INLINE void Xil_Out8(UINTPTR Addr, u8 Value)
{
/* write 8 bit value to specified address */
volatile u8 *LocalAddr = (volatile u8 *)Addr;
*LocalAddr = Value;
}
/*****************************************************************************/
/**
*
* @brief Performs an output operation for a memory location by writing the
* 16 bit Value to the the specified address.
*
* @param Addr contains the address to perform the output operation
* @param Value contains the Value to be written at the specified address.
*
* @return None.
*
******************************************************************************/
static INLINE void Xil_Out16(UINTPTR Addr, u16 Value)
{
/* write 16 bit value to specified address */
volatile u16 *LocalAddr = (volatile u16 *)Addr;
*LocalAddr = Value;
}
/*****************************************************************************/
/**
*
* @brief Performs an output operation for a memory location by writing the
* 32 bit Value to the the specified address.
*
* @param Addr contains the address to perform the output operation
* @param Value contains the 32 bit Value to be written at the specified
* address.
*
* @return None.
*
******************************************************************************/
static INLINE void Xil_Out32(UINTPTR Addr, u32 Value)
{
/* write 32 bit value to specified address */
#ifndef ENABLE_SAFETY
volatile u32 *LocalAddr = (volatile u32 *)Addr;
*LocalAddr = Value;
#else
XStl_RegUpdate(Addr, Value);
#endif
}
/*****************************************************************************/
/**
*
* @brief Performs an output operation for a memory location by writing the
* 64 bit Value to the the specified address.
*
* @param Addr contains the address to perform the output operation
* @param Value contains 64 bit Value to be written at the specified address.
*
* @return None.
*
******************************************************************************/
static INLINE void Xil_Out64(UINTPTR Addr, u64 Value)
{
/* write 64 bit value to specified address */
volatile u64 *LocalAddr = (volatile u64 *)Addr;
*LocalAddr = Value;
}
/*****************************************************************************/
/**
*
* @brief Performs an output operation for a memory location by writing the
* 32 bit Value to the the specified address and then reading it
* back to verify the value written in the register.
*
* @param Addr contains the address to perform the output operation
* @param Value contains 32 bit Value to be written at the specified address
*
* @return Returns Status
* - XST_SUCCESS on success
* - XST_FAILURE on failure
*
*****************************************************************************/
static INLINE int Xil_SecureOut32(UINTPTR Addr, u32 Value)
{
int Status = XST_FAILURE;
u32 ReadReg;
u32 ReadRegTemp;
/* writing 32 bit value to specified address */
Xil_Out32(Addr, Value);
/* verify value written to specified address with multiple reads */
ReadReg = Xil_In32(Addr);
ReadRegTemp = Xil_In32(Addr);
if( (ReadReg == Value) && (ReadRegTemp == Value) ) {
Status = XST_SUCCESS;
}
return Status;
}
/*****************************************************************************/
/**
*
* @brief Perform a 16-bit endian conversion.
*
* @param Data: 16 bit value to be converted
*
* @return 16 bit Data with converted endianness
*
******************************************************************************/
static INLINE __attribute__((always_inline)) u16 Xil_EndianSwap16(u16 Data)
{
return (u16) (((Data & 0xFF00U) >> 8U) | ((Data & 0x00FFU) << 8U));
}
/*****************************************************************************/
/**
*
* @brief Perform a 32-bit endian conversion.
*
* @param Data: 32 bit value to be converted
*
* @return 32 bit data with converted endianness
*
******************************************************************************/
static INLINE __attribute__((always_inline)) u32 Xil_EndianSwap32(u32 Data)
{
u16 LoWord;
u16 HiWord;
/* get each of the half words from the 32 bit word */
LoWord = (u16) (Data & 0x0000FFFFU);
HiWord = (u16) ((Data & 0xFFFF0000U) >> 16U);
/* byte swap each of the 16 bit half words */
LoWord = (u16)(((LoWord & 0xFF00U) >> 8U) | ((LoWord & 0x00FFU) << 8U));
HiWord = (u16)(((HiWord & 0xFF00U) >> 8U) | ((HiWord & 0x00FFU) << 8U));
/* swap the half words before returning the value */
return ((((u32)LoWord) << (u32)16U) | (u32)HiWord);
}
#if defined (__MICROBLAZE__)
#ifdef __LITTLE_ENDIAN__
# define Xil_In16LE Xil_In16
# define Xil_In32LE Xil_In32
# define Xil_Out16LE Xil_Out16
# define Xil_Out32LE Xil_Out32
# define Xil_Htons Xil_EndianSwap16
# define Xil_Htonl Xil_EndianSwap32
# define Xil_Ntohs Xil_EndianSwap16
# define Xil_Ntohl Xil_EndianSwap32
# else
# define Xil_In16BE Xil_In16
# define Xil_In32BE Xil_In32
# define Xil_Out16BE Xil_Out16
# define Xil_Out32BE Xil_Out32
# define Xil_Htons(Data) (Data)
# define Xil_Htonl(Data) (Data)
# define Xil_Ntohs(Data) (Data)
# define Xil_Ntohl(Data) (Data)
#endif
#else
# define Xil_In16LE Xil_In16
# define Xil_In32LE Xil_In32
# define Xil_Out16LE Xil_Out16
# define Xil_Out32LE Xil_Out32
# define Xil_Htons Xil_EndianSwap16
# define Xil_Htonl Xil_EndianSwap32
# define Xil_Ntohs Xil_EndianSwap16
# define Xil_Ntohl Xil_EndianSwap32
#endif
#if defined (__MICROBLAZE__)
#ifdef __LITTLE_ENDIAN__
static INLINE u16 Xil_In16BE(UINTPTR Addr)
#else
static INLINE u16 Xil_In16LE(UINTPTR Addr)
#endif
#else
static INLINE u16 Xil_In16BE(UINTPTR Addr)
#endif
{
u16 value = Xil_In16(Addr);
return Xil_EndianSwap16(value);
}
#if defined (__MICROBLAZE__)
#ifdef __LITTLE_ENDIAN__
static INLINE u32 Xil_In32BE(UINTPTR Addr)
#else
static INLINE u32 Xil_In32LE(UINTPTR Addr)
#endif
#else
static INLINE u32 Xil_In32BE(UINTPTR Addr)
#endif
{
u32 value = Xil_In32(Addr);
return Xil_EndianSwap32(value);
}
#if defined (__MICROBLAZE__)
#ifdef __LITTLE_ENDIAN__
static INLINE void Xil_Out16BE(UINTPTR Addr, u16 Value)
#else
static INLINE void Xil_Out16LE(UINTPTR Addr, u16 Value)
#endif
#else
static INLINE void Xil_Out16BE(UINTPTR Addr, u16 Value)
#endif
{
Value = Xil_EndianSwap16(Value);
Xil_Out16(Addr, Value);
}
#if defined (__MICROBLAZE__)
#ifdef __LITTLE_ENDIAN__
static INLINE void Xil_Out32BE(UINTPTR Addr, u32 Value)
#else
static INLINE void Xil_Out32LE(UINTPTR Addr, u32 Value)
#endif
#else
static INLINE void Xil_Out32BE(UINTPTR Addr, u32 Value)
#endif
{
Value = Xil_EndianSwap32(Value);
Xil_Out32(Addr, Value);
}
#ifdef __cplusplus
}
#endif
#endif /* end of protection macro */
/**
* @} End of "addtogroup common_io_interfacing_apis".
*/

View File

@@ -1,47 +0,0 @@
/******************************************************************************/
/**
* Copyright (c) 2015 - 2020 Xilinx, Inc. All rights reserved.
* SPDX-License-Identifier: MIT
******************************************************************************/
/****************************************************************************/
/**
* @file xil_mem.h
*
* @addtogroup common_mem_operation_api Customized APIs for Memory Operations
*
* The xil_mem.h file contains prototype for functions related
* to memory operations. These APIs are applicable for all processors supported
* by Xilinx.
*
* @{
* <pre>
* MODIFICATION HISTORY:
*
* Ver Who Date Changes
* ----- -------- -------- -----------------------------------------------
* 6.1 nsk 11/07/16 First release.
* 7.0 mus 01/07/19 Add cpp extern macro
*
* </pre>
*
*****************************************************************************/
#ifndef XIL_MEM_H /* prevent circular inclusions */
#define XIL_MEM_H /* by using protection macros */
#ifdef __cplusplus
extern "C" {
#endif
/************************** Function Prototypes *****************************/
void Xil_MemCpy(void* dst, const void* src, u32 cnt);
#ifdef __cplusplus
}
#endif
#endif /* XIL_MEM_H */
/**
* @} End of "addtogroup common_mem_operation_api".
*/

View File

@@ -1,44 +0,0 @@
/*
* SPDX-License-Identifier: BSD-2-Clause
*
* Copyright (C) 2022 On-Line Applications Research Corporation (OAR)
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef XIL_PRINTF_H
#define XIL_PRINTF_H
#ifdef __cplusplus
extern "C" {
#endif
#include <stdio.h>
#define xil_printf(args...) printf(args)
#define print(args...) printf(args)
#ifdef __cplusplus
}
#endif
#endif /* XIL_PRINTF_H */

View File

@@ -1 +0,0 @@
/* Intentional blank stub file for Xilinx driver compatibility. */

View File

@@ -1,203 +0,0 @@
/******************************************************************************
* Copyright (c) 2010 - 2021 Xilinx, Inc. All rights reserved.
* SPDX-License-Identifier: MIT
******************************************************************************/
/*****************************************************************************/
/**
*
* @file xil_types.h
*
* @addtogroup common_types Basic Data types for Xilinx&reg; Software IP
*
* The xil_types.h file contains basic types for Xilinx software IP. These data types
* are applicable for all processors supported by Xilinx.
* @{
* <pre>
* MODIFICATION HISTORY:
*
* Ver Who Date Changes
* ----- ---- -------- -------------------------------------------------------
* 1.00a hbm 07/14/09 First release
* 3.03a sdm 05/30/11 Added Xuint64 typedef and XUINT64_MSW/XUINT64_LSW macros
* 5.00 pkp 05/29/14 Made changes for 64 bit architecture
* srt 07/14/14 Use standard definitions from stdint.h and stddef.h
* Define LONG and ULONG datatypes and mask values
* 7.00 mus 01/07/19 Add cpp extern macro
* 7.1 aru 08/19/19 Shift the value in UPPER_32_BITS only if it
* is 64-bit processor
* </pre>
*
******************************************************************************/
/**
*@cond nocomments
*/
#ifndef XIL_TYPES_H /* prevent circular inclusions */
#define XIL_TYPES_H /* by using protection macros */
#ifdef __cplusplus
extern "C" {
#endif
#include <stdint.h>
#include <stddef.h>
/************************** Constant Definitions *****************************/
#ifndef TRUE
# define TRUE 1U
#endif
#ifndef FALSE
# define FALSE 0U
#endif
#ifndef NULL
#define NULL 0U
#endif
#define XIL_COMPONENT_IS_READY 0x11111111U /**< In device drivers, This macro will be
assigend to "IsReady" member of driver
instance to indicate that driver
instance is initialized and ready to use. */
#define XIL_COMPONENT_IS_STARTED 0x22222222U /**< In device drivers, This macro will be assigend to
"IsStarted" member of driver instance
to indicate that driver instance is
started and it can be enabled. */
/* @name New types
* New simple types.
* @{
*/
#ifndef __KERNEL__
#ifndef XBASIC_TYPES_H
/*
* guarded against xbasic_types.h.
*/
typedef uint8_t u8;
typedef uint16_t u16;
typedef uint32_t u32;
/** @}*/
#define __XUINT64__
typedef struct
{
u32 Upper;
u32 Lower;
} Xuint64;
/*****************************************************************************/
/**
* @brief Return the most significant half of the 64 bit data type.
*
* @param x is the 64 bit word.
*
* @return The upper 32 bits of the 64 bit word.
*
******************************************************************************/
#define XUINT64_MSW(x) ((x).Upper)
/*****************************************************************************/
/**
* @brief Return the least significant half of the 64 bit data type.
*
* @param x is the 64 bit word.
*
* @return The lower 32 bits of the 64 bit word.
*
******************************************************************************/
#define XUINT64_LSW(x) ((x).Lower)
#endif /* XBASIC_TYPES_H */
/*
* xbasic_types.h does not typedef s* or u64
*/
/** @{ */
typedef char char8;
typedef int8_t s8;
typedef int16_t s16;
typedef int32_t s32;
typedef int64_t s64;
typedef uint64_t u64;
typedef int sint32;
typedef intptr_t INTPTR;
typedef uintptr_t UINTPTR;
typedef ptrdiff_t PTRDIFF;
/** @}*/
#if !defined(LONG) || !defined(ULONG)
typedef long LONG;
typedef unsigned long ULONG;
#endif
#define ULONG64_HI_MASK 0xFFFFFFFF00000000U
#define ULONG64_LO_MASK ~ULONG64_HI_MASK
#else
#include <linux/types.h>
#endif
/** @{ */
/**
* This data type defines an interrupt handler for a device.
* The argument points to the instance of the component
*/
typedef void (*XInterruptHandler) (void *InstancePtr);
/**
* This data type defines an exception handler for a processor.
* The argument points to the instance of the component
*/
typedef void (*XExceptionHandler) (void *InstancePtr);
/**
* @brief Returns 32-63 bits of a number.
* @param n : Number being accessed.
* @return Bits 32-63 of number.
*
* @note A basic shift-right of a 64- or 32-bit quantity.
* Use this to suppress the "right shift count >= width of type"
* warning when that quantity is 32-bits.
*/
#if defined (__aarch64__) || defined (__arch64__)
#define UPPER_32_BITS(n) ((u32)(((n) >> 16) >> 16))
#else
#define UPPER_32_BITS(n) 0U
#endif
/**
* @brief Returns 0-31 bits of a number
* @param n : Number being accessed.
* @return Bits 0-31 of number
*/
#define LOWER_32_BITS(n) ((u32)(n))
/************************** Constant Definitions *****************************/
#ifndef TRUE
#define TRUE 1U
#endif
#ifndef FALSE
#define FALSE 0U
#endif
#ifndef NULL
#define NULL 0U
#endif
#ifdef __cplusplus
}
#endif
#endif /* end of protection macro */
/**
*@endcond
*/
/**
* @} End of "addtogroup common_types".
*/

View File

@@ -1,44 +0,0 @@
/*
* SPDX-License-Identifier: BSD-2-Clause
*
* Copyright (C) 2022 On-Line Applications Research Corporation (OAR)
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef XPARAMETERS_H
#define XPARAMETERS_H
#ifdef __cplusplus
extern "C" {
#endif
#include <bspopts.h>
#define EL3 1
#define EL1_NONSECURE 0
#ifdef __cplusplus
}
#endif
#endif /* XPARAMETERS_H */

View File

@@ -1,240 +0,0 @@
/******************************************************************************
* Copyright (c) 2014 - 2021 Xilinx, Inc. All rights reserved.
* SPDX-License-Identifier: MIT
******************************************************************************/
/*****************************************************************************/
/**
*
* @file xpseudo_asm_gcc.h
*
* This header file contains macros for using inline assembler code. It is
* written specifically for the GNU compiler.
*
* <pre>
* MODIFICATION HISTORY:
*
* Ver Who Date Changes
* ----- -------- -------- -----------------------------------------------
* 5.00 pkp 05/21/14 First release
* 6.0 mus 07/27/16 Consolidated file for a53,a9 and r5 processors
* 7.2 asa 04/03/20 Renamed the str macro to strw.
* 7.2 dp 04/30/20 Added clobber "cc" to mtcpsr for aarch32 processors
* </pre>
*
******************************************************************************/
/**
*@cond nocomments
*/
#ifndef XPSEUDO_ASM_GCC_H /* prevent circular inclusions */
#define XPSEUDO_ASM_GCC_H /* by using protection macros */
/***************************** Include Files ********************************/
#include "xil_types.h"
#ifdef __cplusplus
extern "C" {
#endif /* __cplusplus */
/************************** Constant Definitions ****************************/
/**************************** Type Definitions ******************************/
/***************** Macros (Inline Functions) Definitions ********************/
/* necessary for pre-processor */
#define stringify(s) tostring(s)
#define tostring(s) #s
#if defined (__aarch64__)
/* pseudo assembler instructions */
#define mfcpsr() ({u32 rval = 0U; \
asm volatile("mrs %0, DAIF" : "=r" (rval));\
rval;\
})
#define mtcpsr(v) __asm__ __volatile__ ("msr DAIF, %0" : : "r" (v))
#define cpsiei() //__asm__ __volatile__("cpsie i\n")
#define cpsidi() //__asm__ __volatile__("cpsid i\n")
#define cpsief() //__asm__ __volatile__("cpsie f\n")
#define cpsidf() //__asm__ __volatile__("cpsid f\n")
#define mtgpr(rn, v) /*__asm__ __volatile__(\
"mov r" stringify(rn) ", %0 \n"\
: : "r" (v)\
)*/
#define mfgpr(rn) /*({u32 rval; \
__asm__ __volatile__(\
"mov %0,r" stringify(rn) "\n"\
: "=r" (rval)\
);\
rval;\
})*/
/* memory synchronization operations */
/* Instruction Synchronization Barrier */
#define isb() __asm__ __volatile__ ("isb sy")
/* Data Synchronization Barrier */
#define dsb() __asm__ __volatile__("dsb sy")
/* Data Memory Barrier */
#define dmb() __asm__ __volatile__("dmb sy")
/* Memory Operations */
#define ldr(adr) ({u64 rval; \
__asm__ __volatile__(\
"ldr %0,[%1]"\
: "=r" (rval) : "r" (adr)\
);\
rval;\
})
#define mfelrel3() ({u64 rval = 0U; \
asm volatile("mrs %0, ELR_EL3" : "=r" (rval));\
rval;\
})
#define mtelrel3(v) __asm__ __volatile__ ("msr ELR_EL3, %0" : : "r" (v))
#else
/* pseudo assembler instructions */
#define mfcpsr() ({u32 rval = 0U; \
__asm__ __volatile__(\
"mrs %0, cpsr\n"\
: "=r" (rval)\
);\
rval;\
})
#define mtcpsr(v) __asm__ __volatile__(\
"msr cpsr,%0\n"\
: : "r" (v) : "cc" \
)
#define cpsiei() __asm__ __volatile__("cpsie i\n")
#define cpsidi() __asm__ __volatile__("cpsid i\n")
#define cpsief() __asm__ __volatile__("cpsie f\n")
#define cpsidf() __asm__ __volatile__("cpsid f\n")
#define mtgpr(rn, v) __asm__ __volatile__(\
"mov r" stringify(rn) ", %0 \n"\
: : "r" (v)\
)
#define mfgpr(rn) ({u32 rval; \
__asm__ __volatile__(\
"mov %0,r" stringify(rn) "\n"\
: "=r" (rval)\
);\
rval;\
})
/* memory synchronization operations */
/* Instruction Synchronization Barrier */
#define isb() __asm__ __volatile__ ("isb" : : : "memory")
/* Data Synchronization Barrier */
#define dsb() __asm__ __volatile__ ("dsb" : : : "memory")
/* Data Memory Barrier */
#define dmb() __asm__ __volatile__ ("dmb" : : : "memory")
/* Memory Operations */
#define ldr(adr) ({u32 rval; \
__asm__ __volatile__(\
"ldr %0,[%1]"\
: "=r" (rval) : "r" (adr)\
);\
rval;\
})
#endif
#define ldrb(adr) ({u8 rval; \
__asm__ __volatile__(\
"ldrb %0,[%1]"\
: "=r" (rval) : "r" (adr)\
);\
rval;\
})
#define strw(adr, val) __asm__ __volatile__(\
"str %0,[%1]\n"\
: : "r" (val), "r" (adr)\
)
#define strb(adr, val) __asm__ __volatile__(\
"strb %0,[%1]\n"\
: : "r" (val), "r" (adr)\
)
/* Count leading zeroes (clz) */
#define clz(arg) ({u8 rval; \
__asm__ __volatile__(\
"clz %0,%1"\
: "=r" (rval) : "r" (arg)\
);\
rval;\
})
#if defined (__aarch64__)
#define mtcpdc(reg,val) __asm__ __volatile__("dc " #reg ",%0" : : "r" (val))
#define mtcpic(reg,val) __asm__ __volatile__("ic " #reg ",%0" : : "r" (val))
#define mtcpicall(reg) __asm__ __volatile__("ic " #reg)
#define mtcptlbi(reg) __asm__ __volatile__("tlbi " #reg)
#define mtcpat(reg,val) __asm__ __volatile__("at " #reg ",%0" : : "r" (val))
/* CP15 operations */
#define mfcp(reg) ({u64 rval = 0U;\
__asm__ __volatile__("mrs %0, " #reg : "=r" (rval));\
rval;\
})
#define mtcp(reg,val) __asm__ __volatile__("msr " #reg ",%0" : : "r" (val))
#else
/* CP15 operations */
#define mtcp(rn, v) __asm__ __volatile__(\
"mcr " rn "\n"\
: : "r" (v)\
);
#define mfcp(rn) ({u32 rval = 0U; \
__asm__ __volatile__(\
"mrc " rn "\n"\
: "=r" (rval)\
);\
rval;\
})
#endif
/************************** Variable Definitions ****************************/
/************************** Function Prototypes *****************************/
#ifdef __cplusplus
}
#endif /* __cplusplus */
/**
*@endcond
*/
#endif /* XPSEUDO_ASM_GCC_H */

File diff suppressed because it is too large Load Diff

View File

@@ -1,197 +0,0 @@
/******************************************************************************
* Copyright (c) 2010 - 2020 Xilinx, Inc. All rights reserved.
* SPDX-License-Identifier: MIT
******************************************************************************/
/*****************************************************************************/
/**
*
* @file xil_types.h
*
* @addtogroup common_types Basic Data types for Xilinx&reg; Software IP
*
* The xil_types.h file contains basic types for Xilinx software IP. These data types
* are applicable for all processors supported by Xilinx.
* @{
* <pre>
* MODIFICATION HISTORY:
*
* Ver Who Date Changes
* ----- ---- -------- -------------------------------------------------------
* 1.00a hbm 07/14/09 First release
* 3.03a sdm 05/30/11 Added Xuint64 typedef and XUINT64_MSW/XUINT64_LSW macros
* 5.00 pkp 05/29/14 Made changes for 64 bit architecture
* srt 07/14/14 Use standard definitions from stdint.h and stddef.h
* Define LONG and ULONG datatypes and mask values
* 7.00 mus 01/07/19 Add cpp extern macro
* 7.1 aru 08/19/19 Shift the value in UPPER_32_BITS only if it
* is 64-bit processor
* </pre>
*
******************************************************************************/
#ifndef XIL_TYPES_H /* prevent circular inclusions */
#define XIL_TYPES_H /* by using protection macros */
#ifdef __cplusplus
extern "C" {
#endif
#include <stdint.h>
#include <stddef.h>
/************************** Constant Definitions *****************************/
#ifndef TRUE
# define TRUE 1U
#endif
#ifndef FALSE
# define FALSE 0U
#endif
#ifndef NULL
#define NULL 0U
#endif
#define XIL_COMPONENT_IS_READY 0x11111111U /**< In device drivers, This macro will be
assigend to "IsReady" member of driver
instance to indicate that driver
instance is initialized and ready to use. */
#define XIL_COMPONENT_IS_STARTED 0x22222222U /**< In device drivers, This macro will be assigend to
"IsStarted" member of driver instance
to indicate that driver instance is
started and it can be enabled. */
/* @name New types
* New simple types.
* @{
*/
#ifndef __KERNEL__
#ifndef XBASIC_TYPES_H
/*
* guarded against xbasic_types.h.
*/
typedef uint8_t u8;
typedef uint16_t u16;
typedef uint32_t u32;
/** @}*/
#define __XUINT64__
typedef struct
{
u32 Upper;
u32 Lower;
} Xuint64;
/*****************************************************************************/
/**
* @brief Return the most significant half of the 64 bit data type.
*
* @param x is the 64 bit word.
*
* @return The upper 32 bits of the 64 bit word.
*
******************************************************************************/
#define XUINT64_MSW(x) ((x).Upper)
/*****************************************************************************/
/**
* @brief Return the least significant half of the 64 bit data type.
*
* @param x is the 64 bit word.
*
* @return The lower 32 bits of the 64 bit word.
*
******************************************************************************/
#define XUINT64_LSW(x) ((x).Lower)
#endif /* XBASIC_TYPES_H */
/*
* xbasic_types.h does not typedef s* or u64
*/
/** @{ */
typedef char char8;
typedef int8_t s8;
typedef int16_t s16;
typedef int32_t s32;
typedef int64_t s64;
typedef uint64_t u64;
typedef int sint32;
typedef intptr_t INTPTR;
typedef uintptr_t UINTPTR;
typedef ptrdiff_t PTRDIFF;
/** @}*/
#if !defined(LONG) || !defined(ULONG)
typedef long LONG;
typedef unsigned long ULONG;
#endif
#define ULONG64_HI_MASK 0xFFFFFFFF00000000U
#define ULONG64_LO_MASK ~ULONG64_HI_MASK
#else
#include <linux/types.h>
#endif
/** @{ */
/**
* This data type defines an interrupt handler for a device.
* The argument points to the instance of the component
*/
typedef void (*XInterruptHandler) (void *InstancePtr);
/**
* This data type defines an exception handler for a processor.
* The argument points to the instance of the component
*/
typedef void (*XExceptionHandler) (void *InstancePtr);
/**
* @brief Returns 32-63 bits of a number.
* @param n : Number being accessed.
* @return Bits 32-63 of number.
*
* @note A basic shift-right of a 64- or 32-bit quantity.
* Use this to suppress the "right shift count >= width of type"
* warning when that quantity is 32-bits.
*/
#if defined (__aarch64__) || defined (__arch64__)
#define UPPER_32_BITS(n) ((u32)(((n) >> 16) >> 16))
#else
#define UPPER_32_BITS(n) 0U
#endif
/**
* @brief Returns 0-31 bits of a number
* @param n : Number being accessed.
* @return Bits 0-31 of number
*/
#define LOWER_32_BITS(n) ((u32)(n))
/************************** Constant Definitions *****************************/
#ifndef TRUE
#define TRUE 1U
#endif
#ifndef FALSE
#define FALSE 0U
#endif
#ifndef NULL
#define NULL 0U
#endif
#ifdef __cplusplus
}
#endif
#endif /* end of protection macro */
/**
* @} End of "addtogroup common_types".
*/

View File

@@ -47,16 +47,7 @@ extern "C" {
#include "xil_assert.h"
#include "xil_io.h"
#else
#include <common/xil_types.h>
static inline u32 Xil_In32(UINTPTR Addr)
{
return *(volatile u32 *) Addr;
}
static inline void Xil_Out32(UINTPTR Addr, u32 Value)
{
volatile u32 *LocalAddr = (volatile u32 *)Addr;
*LocalAddr = Value;
}
#include <bsp/xil-compat.h>
#endif /* __rtems__ */
/*

View File

@@ -1,8 +1,15 @@
/* SPDX-License-Identifier: BSD-2-Clause */
/**
* @file
*
* @ingroup RTEMSBSPsMicroblaze
*
* @brief This header file provides BSP-specific interfaces.
*/
/*
* COPYRIGHT (c) 2023.
* On-Line Applications Research Corporation (OAR).
* Copyright (C) 2024 On-Line Applications Research Corporation (OAR)
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -26,13 +33,9 @@
* POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef LIBBSP_SHARED_XIL_SYSTEM_H
#define LIBBSP_SHARED_XIL_SYSTEM_H
#ifndef LIBBSP_MICROBLAZE_FPGA_BSP_XIL_COMPAT_H
#define LIBBSP_MICROBLAZE_FPGA_BSP_XIL_COMPAT_H
/*
* This file defines anything necessary for the Xilinx support infrastructure to
* function properly on a particular platform.
*/
#define ARMA9
#include <bsp/xil-compat-common.h>
#endif

View File

@@ -98,8 +98,10 @@
/***************************** Include Files *********************************/
#include "xnandpsu.h"
#include "xnandpsu_bbm.h"
#ifndef __rtems__
#include "sleep.h"
#include "xil_mem.h"
#endif
/************************** Constant Definitions *****************************/
static const XNandPsu_EccMatrix EccMatrix[] = {

View File

@@ -35,10 +35,14 @@
/***************************** Include Files *********************************/
#include <string.h> /**< For Xil_MemCpy and memset */
#ifndef __rtems__
#include "xil_types.h"
#endif
#include "xnandpsu.h"
#include "xnandpsu_bbm.h"
#ifndef __rtems__
#include "xil_mem.h"
#endif
/************************** Constant Definitions *****************************/

View File

@@ -83,8 +83,10 @@
#include "xqspipsu.h"
#include "xqspipsu_control.h"
#ifndef __rtems__
#include "sleep.h"
#ifdef __rtems__
#else
#include <bsp/xil-compat.h>
#include <rtems/rtems/cache.h>
#endif

Some files were not shown because too many files have changed in this diff Show More