diff --git a/common/inc/tx_api.h b/common/inc/tx_api.h
index 40ef7ef5..af384662 100644
--- a/common/inc/tx_api.h
+++ b/common/inc/tx_api.h
@@ -26,7 +26,7 @@
/* APPLICATION INTERFACE DEFINITION RELEASE */
/* */
/* tx_api.h PORTABLE C */
-/* 6.1.10 */
+/* 6.1.11 */
/* AUTHOR */
/* */
/* William E. Lamie, Microsoft Corporation */
@@ -82,6 +82,10 @@
/* add unused parameter macro, */
/* update patch number, */
/* resulting in version 6.1.10 */
+/* 04-25-2022 Wenhui Xie Modified comment(s), */
+/* optimized the definition of */
+/* TX_TIMER_TICKS_PER_SECOND, */
+/* resulting in version 6.1.11 */
/* */
/**************************************************************************/
@@ -118,7 +122,7 @@ extern "C" {
#define AZURE_RTOS_THREADX
#define THREADX_MAJOR_VERSION 6
#define THREADX_MINOR_VERSION 1
-#define THREADX_PATCH_VERSION 10
+#define THREADX_PATCH_VERSION 11
/* Define the following symbol for backward compatibility */
#define EL_PRODUCT_THREADX
@@ -221,7 +225,7 @@ extern "C" {
as a compilation option. */
#ifndef TX_TIMER_TICKS_PER_SECOND
-#define TX_TIMER_TICKS_PER_SECOND ((ULONG) 100)
+#define TX_TIMER_TICKS_PER_SECOND (100UL)
#endif
diff --git a/common/inc/tx_user_sample.h b/common/inc/tx_user_sample.h
index e22babf4..d04d2dcb 100644
--- a/common/inc/tx_user_sample.h
+++ b/common/inc/tx_user_sample.h
@@ -26,7 +26,7 @@
/* PORT SPECIFIC C INFORMATION RELEASE */
/* */
/* tx_user.h PORTABLE C */
-/* 6.1.9 */
+/* 6.1.11 */
/* */
/* AUTHOR */
/* */
@@ -58,6 +58,10 @@
/* user-configurable symbol */
/* TX_TIMER_TICKS_PER_SECOND */
/* resulting in version 6.1.9 */
+/* 04-25-2022 Wenhui Xie Modified comment(s), */
+/* optimized the definition of */
+/* TX_TIMER_TICKS_PER_SECOND, */
+/* resulting in version 6.1.11 */
/* */
/**************************************************************************/
@@ -116,7 +120,7 @@
Note: the actual hardware timer value may need to be changed (usually in tx_initialize_low_level). */
/*
-#define TX_TIMER_TICKS_PER_SECOND ((ULONG) 100)
+#define TX_TIMER_TICKS_PER_SECOND (100UL)
*/
/* Determine if there is a FileX pointer in the thread control block.
diff --git a/common/src/tx_event_flags_get.c b/common/src/tx_event_flags_get.c
index 1e79a4cd..aba42c6b 100644
--- a/common/src/tx_event_flags_get.c
+++ b/common/src/tx_event_flags_get.c
@@ -36,7 +36,7 @@
/* FUNCTION RELEASE */
/* */
/* _tx_event_flags_get PORTABLE C */
-/* 6.1 */
+/* 6.1.11 */
/* AUTHOR */
/* */
/* William E. Lamie, Microsoft Corporation */
@@ -73,9 +73,12 @@
/* */
/* DATE NAME DESCRIPTION */
/* */
-/* 05-19-2020 William E. Lamie Initial Version 6.0 */
-/* 09-30-2020 Yuxin Zhou Modified comment(s), */
+/* 05-19-2020 William E. Lamie Initial Version 6.0 */
+/* 09-30-2020 Yuxin Zhou Modified comment(s), */
/* resulting in version 6.1 */
+/* 04-25-2022 Scott Larson Modified comment(s), */
+/* handle 0 flags case, */
+/* resulting in version 6.1.11 */
/* */
/**************************************************************************/
UINT _tx_event_flags_get(TX_EVENT_FLAGS_GROUP *group_ptr, ULONG requested_flags,
@@ -276,11 +279,12 @@ UINT interrupted_set_request;
if (wait_option != TX_NO_WAIT)
{
- /* Determine if the preempt disable flag is non-zero. */
- if (_tx_thread_preempt_disable != ((UINT) 0))
+ /* Determine if the preempt disable flag is non-zero OR the requested events is 0. */
+ if ((_tx_thread_preempt_disable != ((UINT) 0)) || (requested_flags == (UINT) 0))
{
- /* Suspension is not allowed if the preempt disable flag is non-zero at this point, return error completion. */
+ /* Suspension is not allowed if the preempt disable flag is non-zero at this point,
+ or if requested_flags is 0, return error completion. */
status = TX_NO_EVENTS;
}
else
diff --git a/common/src/tx_event_flags_set.c b/common/src/tx_event_flags_set.c
index 9228f4cd..da0ff553 100644
--- a/common/src/tx_event_flags_set.c
+++ b/common/src/tx_event_flags_set.c
@@ -36,7 +36,7 @@
/* FUNCTION RELEASE */
/* */
/* _tx_event_flags_set PORTABLE C */
-/* 6.1 */
+/* 6.1.11 */
/* AUTHOR */
/* */
/* William E. Lamie, Microsoft Corporation */
@@ -72,9 +72,13 @@
/* */
/* DATE NAME DESCRIPTION */
/* */
-/* 05-19-2020 William E. Lamie Initial Version 6.0 */
-/* 09-30-2020 Yuxin Zhou Modified comment(s), */
+/* 05-19-2020 William E. Lamie Initial Version 6.0 */
+/* 09-30-2020 Yuxin Zhou Modified comment(s), */
/* resulting in version 6.1 */
+/* 04-25-2022 William E. Lamie Modified comment(s), and */
+/* added corrected preemption */
+/* check logic, resulting in */
+/* version 6.1.11 */
/* */
/**************************************************************************/
UINT _tx_event_flags_set(TX_EVENT_FLAGS_GROUP *group_ptr, ULONG flags_to_set, UINT set_option)
@@ -264,9 +268,6 @@ VOID (*events_set_notify)(struct TX_EVENT_FLAGS_GROUP_STRUCT *notify_
/* Yes, resume the thread and apply any event flag
clearing. */
- /* Set the preempt check flag. */
- preempt_check = TX_TRUE;
-
/* Return the actual event flags that satisfied the request. */
suspend_info_ptr = TX_VOID_TO_ULONG_POINTER_CONVERT(thread_ptr -> tx_thread_additional_suspend_info);
*suspend_info_ptr = current_event_flags;
@@ -336,6 +337,11 @@ VOID (*events_set_notify)(struct TX_EVENT_FLAGS_GROUP_STRUCT *notify_
/* Disable preemption while we process the suspended list. */
_tx_thread_preempt_disable++;
+ /* Since we have temporarily disabled preemption globally, set the preempt
+ check flag to check for any preemption condition - including from
+ unrelated ISR processing. */
+ preempt_check = TX_TRUE;
+
/* Loop to examine all of the suspended threads. */
do
{
@@ -419,9 +425,6 @@ VOID (*events_set_notify)(struct TX_EVENT_FLAGS_GROUP_STRUCT *notify_
/* Yes, this request can be handled now. */
- /* Set the preempt check flag. */
- preempt_check = TX_TRUE;
-
/* Determine if the thread is still suspended on the event flag group. If not, a wait
abort must have been done from an ISR. */
if (thread_ptr -> tx_thread_state == TX_EVENT_FLAG)
diff --git a/common/src/tx_initialize_kernel_enter.c b/common/src/tx_initialize_kernel_enter.c
index c18cec37..12e77dc5 100644
--- a/common/src/tx_initialize_kernel_enter.c
+++ b/common/src/tx_initialize_kernel_enter.c
@@ -30,6 +30,9 @@
#include "tx_thread.h"
#include "tx_timer.h"
+#if defined(TX_ENABLE_EXECUTION_CHANGE_NOTIFY) || defined(TX_EXECUTION_PROFILE_ENABLE)
+extern VOID _tx_execution_initialize(VOID);
+#endif
/* Define any port-specific scheduling data structures. */
@@ -46,7 +49,7 @@ TX_SAFETY_CRITICAL_EXCEPTION_HANDLER
/* FUNCTION RELEASE */
/* */
/* _tx_initialize_kernel_enter PORTABLE C */
-/* 6.1 */
+/* 6.1.11 */
/* AUTHOR */
/* */
/* William E. Lamie, Microsoft Corporation */
@@ -84,9 +87,12 @@ TX_SAFETY_CRITICAL_EXCEPTION_HANDLER
/* */
/* DATE NAME DESCRIPTION */
/* */
-/* 05-19-2020 William E. Lamie Initial Version 6.0 */
-/* 09-30-2020 Yuxin Zhou Modified comment(s), */
+/* 05-19-2020 William E. Lamie Initial Version 6.0 */
+/* 09-30-2020 Yuxin Zhou Modified comment(s), */
/* resulting in version 6.1 */
+/* 04-25-2022 Scott Larson Modified comment(s), */
+/* added EPK initialization, */
+/* resulting in version 6.1.11 */
/* */
/**************************************************************************/
VOID _tx_initialize_kernel_enter(VOID)
@@ -138,6 +144,11 @@ VOID _tx_initialize_kernel_enter(VOID)
/* Call any port specific pre-scheduler processing. */
TX_PORT_SPECIFIC_PRE_SCHEDULER_INITIALIZATION
+#if defined(TX_ENABLE_EXECUTION_CHANGE_NOTIFY) || defined(TX_EXECUTION_PROFILE_ENABLE)
+ /* Initialize Execution Profile Kit. */
+ _tx_execution_initialize();
+#endif
+
/* Enter the scheduling loop to start executing threads! */
_tx_thread_schedule();
diff --git a/common_smp/inc/tx_api.h b/common_smp/inc/tx_api.h
index 9591aee9..e17e0ff3 100644
--- a/common_smp/inc/tx_api.h
+++ b/common_smp/inc/tx_api.h
@@ -26,7 +26,7 @@
/* APPLICATION INTERFACE DEFINITION RELEASE */
/* */
/* tx_api.h PORTABLE SMP */
-/* 6.1.10 */
+/* 6.1.11 */
/* AUTHOR */
/* */
/* William E. Lamie, Microsoft Corporation */
@@ -71,6 +71,10 @@
/* add unused parameter macro, */
/* update patch number, */
/* resulting in version 6.1.10 */
+/* 04-25-2022 Wenhui Xie Modified comment(s), */
+/* optimized the definition of */
+/* TX_TIMER_TICKS_PER_SECOND, */
+/* resulting in version 6.1.11 */
/* */
/**************************************************************************/
@@ -123,7 +127,7 @@ extern "C" {
#define AZURE_RTOS_THREADX
#define THREADX_MAJOR_VERSION 6
#define THREADX_MINOR_VERSION 1
-#define THREADX_PATCH_VERSION 10
+#define THREADX_PATCH_VERSION 11
/* Define the following symbol for backward compatibility */
#define EL_PRODUCT_THREADX
@@ -226,7 +230,7 @@ extern "C" {
as a compilation option. */
#ifndef TX_TIMER_TICKS_PER_SECOND
-#define TX_TIMER_TICKS_PER_SECOND ((ULONG) 100)
+#define TX_TIMER_TICKS_PER_SECOND (100UL)
#endif
diff --git a/common_smp/inc/tx_user_sample.h b/common_smp/inc/tx_user_sample.h
index e22babf4..d04d2dcb 100644
--- a/common_smp/inc/tx_user_sample.h
+++ b/common_smp/inc/tx_user_sample.h
@@ -26,7 +26,7 @@
/* PORT SPECIFIC C INFORMATION RELEASE */
/* */
/* tx_user.h PORTABLE C */
-/* 6.1.9 */
+/* 6.1.11 */
/* */
/* AUTHOR */
/* */
@@ -58,6 +58,10 @@
/* user-configurable symbol */
/* TX_TIMER_TICKS_PER_SECOND */
/* resulting in version 6.1.9 */
+/* 04-25-2022 Wenhui Xie Modified comment(s), */
+/* optimized the definition of */
+/* TX_TIMER_TICKS_PER_SECOND, */
+/* resulting in version 6.1.11 */
/* */
/**************************************************************************/
@@ -116,7 +120,7 @@
Note: the actual hardware timer value may need to be changed (usually in tx_initialize_low_level). */
/*
-#define TX_TIMER_TICKS_PER_SECOND ((ULONG) 100)
+#define TX_TIMER_TICKS_PER_SECOND (100UL)
*/
/* Determine if there is a FileX pointer in the thread control block.
diff --git a/common_smp/src/tx_event_flags_get.c b/common_smp/src/tx_event_flags_get.c
index 1e79a4cd..aba42c6b 100644
--- a/common_smp/src/tx_event_flags_get.c
+++ b/common_smp/src/tx_event_flags_get.c
@@ -36,7 +36,7 @@
/* FUNCTION RELEASE */
/* */
/* _tx_event_flags_get PORTABLE C */
-/* 6.1 */
+/* 6.1.11 */
/* AUTHOR */
/* */
/* William E. Lamie, Microsoft Corporation */
@@ -73,9 +73,12 @@
/* */
/* DATE NAME DESCRIPTION */
/* */
-/* 05-19-2020 William E. Lamie Initial Version 6.0 */
-/* 09-30-2020 Yuxin Zhou Modified comment(s), */
+/* 05-19-2020 William E. Lamie Initial Version 6.0 */
+/* 09-30-2020 Yuxin Zhou Modified comment(s), */
/* resulting in version 6.1 */
+/* 04-25-2022 Scott Larson Modified comment(s), */
+/* handle 0 flags case, */
+/* resulting in version 6.1.11 */
/* */
/**************************************************************************/
UINT _tx_event_flags_get(TX_EVENT_FLAGS_GROUP *group_ptr, ULONG requested_flags,
@@ -276,11 +279,12 @@ UINT interrupted_set_request;
if (wait_option != TX_NO_WAIT)
{
- /* Determine if the preempt disable flag is non-zero. */
- if (_tx_thread_preempt_disable != ((UINT) 0))
+ /* Determine if the preempt disable flag is non-zero OR the requested events is 0. */
+ if ((_tx_thread_preempt_disable != ((UINT) 0)) || (requested_flags == (UINT) 0))
{
- /* Suspension is not allowed if the preempt disable flag is non-zero at this point, return error completion. */
+ /* Suspension is not allowed if the preempt disable flag is non-zero at this point,
+ or if requested_flags is 0, return error completion. */
status = TX_NO_EVENTS;
}
else
diff --git a/common_smp/src/tx_event_flags_set.c b/common_smp/src/tx_event_flags_set.c
index 9228f4cd..da0ff553 100644
--- a/common_smp/src/tx_event_flags_set.c
+++ b/common_smp/src/tx_event_flags_set.c
@@ -36,7 +36,7 @@
/* FUNCTION RELEASE */
/* */
/* _tx_event_flags_set PORTABLE C */
-/* 6.1 */
+/* 6.1.11 */
/* AUTHOR */
/* */
/* William E. Lamie, Microsoft Corporation */
@@ -72,9 +72,13 @@
/* */
/* DATE NAME DESCRIPTION */
/* */
-/* 05-19-2020 William E. Lamie Initial Version 6.0 */
-/* 09-30-2020 Yuxin Zhou Modified comment(s), */
+/* 05-19-2020 William E. Lamie Initial Version 6.0 */
+/* 09-30-2020 Yuxin Zhou Modified comment(s), */
/* resulting in version 6.1 */
+/* 04-25-2022 William E. Lamie Modified comment(s), and */
+/* added corrected preemption */
+/* check logic, resulting in */
+/* version 6.1.11 */
/* */
/**************************************************************************/
UINT _tx_event_flags_set(TX_EVENT_FLAGS_GROUP *group_ptr, ULONG flags_to_set, UINT set_option)
@@ -264,9 +268,6 @@ VOID (*events_set_notify)(struct TX_EVENT_FLAGS_GROUP_STRUCT *notify_
/* Yes, resume the thread and apply any event flag
clearing. */
- /* Set the preempt check flag. */
- preempt_check = TX_TRUE;
-
/* Return the actual event flags that satisfied the request. */
suspend_info_ptr = TX_VOID_TO_ULONG_POINTER_CONVERT(thread_ptr -> tx_thread_additional_suspend_info);
*suspend_info_ptr = current_event_flags;
@@ -336,6 +337,11 @@ VOID (*events_set_notify)(struct TX_EVENT_FLAGS_GROUP_STRUCT *notify_
/* Disable preemption while we process the suspended list. */
_tx_thread_preempt_disable++;
+ /* Since we have temporarily disabled preemption globally, set the preempt
+ check flag to check for any preemption condition - including from
+ unrelated ISR processing. */
+ preempt_check = TX_TRUE;
+
/* Loop to examine all of the suspended threads. */
do
{
@@ -419,9 +425,6 @@ VOID (*events_set_notify)(struct TX_EVENT_FLAGS_GROUP_STRUCT *notify_
/* Yes, this request can be handled now. */
- /* Set the preempt check flag. */
- preempt_check = TX_TRUE;
-
/* Determine if the thread is still suspended on the event flag group. If not, a wait
abort must have been done from an ISR. */
if (thread_ptr -> tx_thread_state == TX_EVENT_FLAG)
diff --git a/common_smp/src/tx_thread_system_suspend.c b/common_smp/src/tx_thread_system_suspend.c
index 8838d98e..a76a00d4 100644
--- a/common_smp/src/tx_thread_system_suspend.c
+++ b/common_smp/src/tx_thread_system_suspend.c
@@ -38,7 +38,7 @@
/* FUNCTION RELEASE */
/* */
/* _tx_thread_system_suspend PORTABLE SMP */
-/* 6.1 */
+/* 6.1.11 */
/* AUTHOR */
/* */
/* William E. Lamie, Microsoft Corporation */
@@ -87,7 +87,10 @@
/* */
/* DATE NAME DESCRIPTION */
/* */
-/* 09-30-2020 William E. Lamie Initial Version 6.1 */
+/* 09-30-2020 William E. Lamie Initial Version 6.1 */
+/* 04-25-2022 Scott Larson Modified comments and fixed */
+/* loop to find next thread, */
+/* resulting in version 6.1.11 */
/* */
/**************************************************************************/
VOID _tx_thread_system_suspend(TX_THREAD *thread_ptr)
@@ -667,9 +670,18 @@ UINT processing_complete;
/* Calculate the possible complex path. */
complex_path_possible = possible_cores & available_cores;
+ /* Check if we need to loop to find the next highest priority thread. */
+ if (next_priority == TX_MAX_PRIORITIES)
+ {
+ loop_finished = TX_TRUE;
+ }
+ else
+ {
+ loop_finished = TX_FALSE;
+ }
+
/* Loop to find the next highest priority ready thread that is allowed to run on this core. */
- loop_finished = TX_FALSE;
- do
+ while (loop_finished == TX_FALSE)
{
/* Determine if there is a thread to examine. */
@@ -814,7 +826,7 @@ UINT processing_complete;
}
}
}
- } while (loop_finished == TX_FALSE);
+ }
#ifdef TX_THREAD_SMP_INTER_CORE_INTERRUPT
diff --git a/ports/c667x/ccs/inc/tx_port.h b/ports/c667x/ccs/inc/tx_port.h
index 402304c6..0f56b180 100644
--- a/ports/c667x/ccs/inc/tx_port.h
+++ b/ports/c667x/ccs/inc/tx_port.h
@@ -26,7 +26,7 @@
/* PORT SPECIFIC C INFORMATION RELEASE */
/* */
/* tx_port.h C667x/TI */
-/* 6.1.6 */
+/* 6.1.11 */
/* */
/* AUTHOR */
/* */
@@ -51,6 +51,10 @@
/* 04-02-2021 Bhupendra Naphade Modified comment(s),updated */
/* macro definition, */
/* resulting in version 6.1.6 */
+/* 04-25-2022 Wenhui Xie Modified comment(s), */
+/* optimized the definition of */
+/* TX_TIMER_TICKS_PER_SECOND, */
+/* resulting in version 6.1.11 */
/* */
/**************************************************************************/
@@ -125,7 +129,7 @@ typedef unsigned short USHORT;
#ifndef TX_TIMER_TICKS_PER_SECOND
-#define TX_TIMER_TICKS_PER_SECOND ((ULONG) 100)
+#define TX_TIMER_TICKS_PER_SECOND (100UL)
#endif
diff --git a/ports/cortex_a12/ac6/example_build/sample_threadx.c b/ports/cortex_a12/ac6/example_build/sample_threadx.c
new file mode 100644
index 00000000..8c61de06
--- /dev/null
+++ b/ports/cortex_a12/ac6/example_build/sample_threadx.c
@@ -0,0 +1,369 @@
+/* This is a small demo of the high-performance ThreadX kernel. It includes examples of eight
+ threads of different priorities, using a message queue, semaphore, mutex, event flags group,
+ byte pool, and block pool. */
+
+#include "tx_api.h"
+
+#define DEMO_STACK_SIZE 1024
+#define DEMO_BYTE_POOL_SIZE 9120
+#define DEMO_BLOCK_POOL_SIZE 100
+#define DEMO_QUEUE_SIZE 100
+
+
+/* Define the ThreadX object control blocks... */
+
+TX_THREAD thread_0;
+TX_THREAD thread_1;
+TX_THREAD thread_2;
+TX_THREAD thread_3;
+TX_THREAD thread_4;
+TX_THREAD thread_5;
+TX_THREAD thread_6;
+TX_THREAD thread_7;
+TX_QUEUE queue_0;
+TX_SEMAPHORE semaphore_0;
+TX_MUTEX mutex_0;
+TX_EVENT_FLAGS_GROUP event_flags_0;
+TX_BYTE_POOL byte_pool_0;
+TX_BLOCK_POOL block_pool_0;
+
+
+/* Define the counters used in the demo application... */
+
+ULONG thread_0_counter;
+ULONG thread_1_counter;
+ULONG thread_1_messages_sent;
+ULONG thread_2_counter;
+ULONG thread_2_messages_received;
+ULONG thread_3_counter;
+ULONG thread_4_counter;
+ULONG thread_5_counter;
+ULONG thread_6_counter;
+ULONG thread_7_counter;
+
+
+/* Define thread prototypes. */
+
+void thread_0_entry(ULONG thread_input);
+void thread_1_entry(ULONG thread_input);
+void thread_2_entry(ULONG thread_input);
+void thread_3_and_4_entry(ULONG thread_input);
+void thread_5_entry(ULONG thread_input);
+void thread_6_and_7_entry(ULONG thread_input);
+
+
+/* Define main entry point. */
+
+int main()
+{
+
+ /* Enter the ThreadX kernel. */
+ tx_kernel_enter();
+}
+
+
+/* Define what the initial system looks like. */
+
+void tx_application_define(void *first_unused_memory)
+{
+
+CHAR *pointer = TX_NULL;
+
+
+ /* Create a byte memory pool from which to allocate the thread stacks. */
+ tx_byte_pool_create(&byte_pool_0, "byte pool 0", first_unused_memory, DEMO_BYTE_POOL_SIZE);
+
+ /* Put system definition stuff in here, e.g. thread creates and other assorted
+ create information. */
+
+ /* Allocate the stack for thread 0. */
+ tx_byte_allocate(&byte_pool_0, (VOID **) &pointer, DEMO_STACK_SIZE, TX_NO_WAIT);
+
+ /* Create the main thread. */
+ tx_thread_create(&thread_0, "thread 0", thread_0_entry, 0,
+ pointer, DEMO_STACK_SIZE,
+ 1, 1, TX_NO_TIME_SLICE, TX_AUTO_START);
+
+
+ /* Allocate the stack for thread 1. */
+ tx_byte_allocate(&byte_pool_0, (VOID **) &pointer, DEMO_STACK_SIZE, TX_NO_WAIT);
+
+ /* Create threads 1 and 2. These threads pass information through a ThreadX
+ message queue. It is also interesting to note that these threads have a time
+ slice. */
+ tx_thread_create(&thread_1, "thread 1", thread_1_entry, 1,
+ pointer, DEMO_STACK_SIZE,
+ 16, 16, 4, TX_AUTO_START);
+
+ /* Allocate the stack for thread 2. */
+ tx_byte_allocate(&byte_pool_0, (VOID **) &pointer, DEMO_STACK_SIZE, TX_NO_WAIT);
+
+ tx_thread_create(&thread_2, "thread 2", thread_2_entry, 2,
+ pointer, DEMO_STACK_SIZE,
+ 16, 16, 4, TX_AUTO_START);
+
+ /* Allocate the stack for thread 3. */
+ tx_byte_allocate(&byte_pool_0, (VOID **) &pointer, DEMO_STACK_SIZE, TX_NO_WAIT);
+
+ /* Create threads 3 and 4. These threads compete for a ThreadX counting semaphore.
+ An interesting thing here is that both threads share the same instruction area. */
+ tx_thread_create(&thread_3, "thread 3", thread_3_and_4_entry, 3,
+ pointer, DEMO_STACK_SIZE,
+ 8, 8, TX_NO_TIME_SLICE, TX_AUTO_START);
+
+ /* Allocate the stack for thread 4. */
+ tx_byte_allocate(&byte_pool_0, (VOID **) &pointer, DEMO_STACK_SIZE, TX_NO_WAIT);
+
+ tx_thread_create(&thread_4, "thread 4", thread_3_and_4_entry, 4,
+ pointer, DEMO_STACK_SIZE,
+ 8, 8, TX_NO_TIME_SLICE, TX_AUTO_START);
+
+ /* Allocate the stack for thread 5. */
+ tx_byte_allocate(&byte_pool_0, (VOID **) &pointer, DEMO_STACK_SIZE, TX_NO_WAIT);
+
+ /* Create thread 5. This thread simply pends on an event flag which will be set
+ by thread_0. */
+ tx_thread_create(&thread_5, "thread 5", thread_5_entry, 5,
+ pointer, DEMO_STACK_SIZE,
+ 4, 4, TX_NO_TIME_SLICE, TX_AUTO_START);
+
+ /* Allocate the stack for thread 6. */
+ tx_byte_allocate(&byte_pool_0, (VOID **) &pointer, DEMO_STACK_SIZE, TX_NO_WAIT);
+
+ /* Create threads 6 and 7. These threads compete for a ThreadX mutex. */
+ tx_thread_create(&thread_6, "thread 6", thread_6_and_7_entry, 6,
+ pointer, DEMO_STACK_SIZE,
+ 8, 8, TX_NO_TIME_SLICE, TX_AUTO_START);
+
+ /* Allocate the stack for thread 7. */
+ tx_byte_allocate(&byte_pool_0, (VOID **) &pointer, DEMO_STACK_SIZE, TX_NO_WAIT);
+
+ tx_thread_create(&thread_7, "thread 7", thread_6_and_7_entry, 7,
+ pointer, DEMO_STACK_SIZE,
+ 8, 8, TX_NO_TIME_SLICE, TX_AUTO_START);
+
+ /* Allocate the message queue. */
+ tx_byte_allocate(&byte_pool_0, (VOID **) &pointer, DEMO_QUEUE_SIZE*sizeof(ULONG), TX_NO_WAIT);
+
+ /* Create the message queue shared by threads 1 and 2. */
+ tx_queue_create(&queue_0, "queue 0", TX_1_ULONG, pointer, DEMO_QUEUE_SIZE*sizeof(ULONG));
+
+ /* Create the semaphore used by threads 3 and 4. */
+ tx_semaphore_create(&semaphore_0, "semaphore 0", 1);
+
+ /* Create the event flags group used by threads 1 and 5. */
+ tx_event_flags_create(&event_flags_0, "event flags 0");
+
+ /* Create the mutex used by thread 6 and 7 without priority inheritance. */
+ tx_mutex_create(&mutex_0, "mutex 0", TX_NO_INHERIT);
+
+ /* Allocate the memory for a small block pool. */
+ tx_byte_allocate(&byte_pool_0, (VOID **) &pointer, DEMO_BLOCK_POOL_SIZE, TX_NO_WAIT);
+
+ /* Create a block memory pool to allocate a message buffer from. */
+ tx_block_pool_create(&block_pool_0, "block pool 0", sizeof(ULONG), pointer, DEMO_BLOCK_POOL_SIZE);
+
+ /* Allocate a block and release the block memory. */
+ tx_block_allocate(&block_pool_0, (VOID **) &pointer, TX_NO_WAIT);
+
+ /* Release the block back to the pool. */
+ tx_block_release(pointer);
+}
+
+
+
+/* Define the test threads. */
+
+void thread_0_entry(ULONG thread_input)
+{
+
+UINT status;
+
+
+ /* This thread simply sits in while-forever-sleep loop. */
+ while(1)
+ {
+
+ /* Increment the thread counter. */
+ thread_0_counter++;
+
+ /* Sleep for 10 ticks. */
+ tx_thread_sleep(10);
+
+ /* Set event flag 0 to wakeup thread 5. */
+ status = tx_event_flags_set(&event_flags_0, 0x1, TX_OR);
+
+ /* Check status. */
+ if (status != TX_SUCCESS)
+ break;
+ }
+}
+
+
+void thread_1_entry(ULONG thread_input)
+{
+
+UINT status;
+
+
+ /* This thread simply sends messages to a queue shared by thread 2. */
+ while(1)
+ {
+
+ /* Increment the thread counter. */
+ thread_1_counter++;
+
+ /* Send message to queue 0. */
+ status = tx_queue_send(&queue_0, &thread_1_messages_sent, TX_WAIT_FOREVER);
+
+ /* Check completion status. */
+ if (status != TX_SUCCESS)
+ break;
+
+ /* Increment the message sent. */
+ thread_1_messages_sent++;
+ }
+}
+
+
+void thread_2_entry(ULONG thread_input)
+{
+
+ULONG received_message;
+UINT status;
+
+ /* This thread retrieves messages placed on the queue by thread 1. */
+ while(1)
+ {
+
+ /* Increment the thread counter. */
+ thread_2_counter++;
+
+ /* Retrieve a message from the queue. */
+ status = tx_queue_receive(&queue_0, &received_message, TX_WAIT_FOREVER);
+
+ /* Check completion status and make sure the message is what we
+ expected. */
+ if ((status != TX_SUCCESS) || (received_message != thread_2_messages_received))
+ break;
+
+ /* Otherwise, all is okay. Increment the received message count. */
+ thread_2_messages_received++;
+ }
+}
+
+
+void thread_3_and_4_entry(ULONG thread_input)
+{
+
+UINT status;
+
+
+ /* This function is executed from thread 3 and thread 4. As the loop
+ below shows, these function compete for ownership of semaphore_0. */
+ while(1)
+ {
+
+ /* Increment the thread counter. */
+ if (thread_input == 3)
+ thread_3_counter++;
+ else
+ thread_4_counter++;
+
+ /* Get the semaphore with suspension. */
+ status = tx_semaphore_get(&semaphore_0, TX_WAIT_FOREVER);
+
+ /* Check status. */
+ if (status != TX_SUCCESS)
+ break;
+
+ /* Sleep for 2 ticks to hold the semaphore. */
+ tx_thread_sleep(2);
+
+ /* Release the semaphore. */
+ status = tx_semaphore_put(&semaphore_0);
+
+ /* Check status. */
+ if (status != TX_SUCCESS)
+ break;
+ }
+}
+
+
+void thread_5_entry(ULONG thread_input)
+{
+
+UINT status;
+ULONG actual_flags;
+
+
+ /* This thread simply waits for an event in a forever loop. */
+ while(1)
+ {
+
+ /* Increment the thread counter. */
+ thread_5_counter++;
+
+ /* Wait for event flag 0. */
+ status = tx_event_flags_get(&event_flags_0, 0x1, TX_OR_CLEAR,
+ &actual_flags, TX_WAIT_FOREVER);
+
+ /* Check status. */
+ if ((status != TX_SUCCESS) || (actual_flags != 0x1))
+ break;
+ }
+}
+
+
+void thread_6_and_7_entry(ULONG thread_input)
+{
+
+UINT status;
+
+
+ /* This function is executed from thread 6 and thread 7. As the loop
+ below shows, these function compete for ownership of mutex_0. */
+ while(1)
+ {
+
+ /* Increment the thread counter. */
+ if (thread_input == 6)
+ thread_6_counter++;
+ else
+ thread_7_counter++;
+
+ /* Get the mutex with suspension. */
+ status = tx_mutex_get(&mutex_0, TX_WAIT_FOREVER);
+
+ /* Check status. */
+ if (status != TX_SUCCESS)
+ break;
+
+ /* Get the mutex again with suspension. This shows
+ that an owning thread may retrieve the mutex it
+ owns multiple times. */
+ status = tx_mutex_get(&mutex_0, TX_WAIT_FOREVER);
+
+ /* Check status. */
+ if (status != TX_SUCCESS)
+ break;
+
+ /* Sleep for 2 ticks to hold the mutex. */
+ tx_thread_sleep(2);
+
+ /* Release the mutex. */
+ status = tx_mutex_put(&mutex_0);
+
+ /* Check status. */
+ if (status != TX_SUCCESS)
+ break;
+
+ /* Release the mutex again. This will actually
+ release ownership since it was obtained twice. */
+ status = tx_mutex_put(&mutex_0);
+
+ /* Check status. */
+ if (status != TX_SUCCESS)
+ break;
+ }
+}
diff --git a/ports/cortex_a12/ac6/example_build/sample_threadx/.cproject b/ports/cortex_a12/ac6/example_build/sample_threadx/.cproject
new file mode 100644
index 00000000..e212b36d
--- /dev/null
+++ b/ports/cortex_a12/ac6/example_build/sample_threadx/.cproject
@@ -0,0 +1,176 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/ports/cortex_a12/ac6/example_build/sample_threadx/.project b/ports/cortex_a12/ac6/example_build/sample_threadx/.project
new file mode 100644
index 00000000..ed4c0885
--- /dev/null
+++ b/ports/cortex_a12/ac6/example_build/sample_threadx/.project
@@ -0,0 +1,27 @@
+
+
+ sample_threadx
+
+
+ tx
+
+
+
+ org.eclipse.cdt.managedbuilder.core.genmakebuilder
+ clean,full,incremental,
+
+
+
+
+ org.eclipse.cdt.managedbuilder.core.ScannerConfigBuilder
+ full,incremental,
+
+
+
+
+
+ org.eclipse.cdt.core.cnature
+ org.eclipse.cdt.managedbuilder.core.managedBuildNature
+ org.eclipse.cdt.managedbuilder.core.ScannerConfigNature
+
+
diff --git a/ports/cortex_a12/ac6/example_build/sample_threadx/sample_threadx.c b/ports/cortex_a12/ac6/example_build/sample_threadx/sample_threadx.c
new file mode 100644
index 00000000..8c61de06
--- /dev/null
+++ b/ports/cortex_a12/ac6/example_build/sample_threadx/sample_threadx.c
@@ -0,0 +1,369 @@
+/* This is a small demo of the high-performance ThreadX kernel. It includes examples of eight
+ threads of different priorities, using a message queue, semaphore, mutex, event flags group,
+ byte pool, and block pool. */
+
+#include "tx_api.h"
+
+#define DEMO_STACK_SIZE 1024
+#define DEMO_BYTE_POOL_SIZE 9120
+#define DEMO_BLOCK_POOL_SIZE 100
+#define DEMO_QUEUE_SIZE 100
+
+
+/* Define the ThreadX object control blocks... */
+
+TX_THREAD thread_0;
+TX_THREAD thread_1;
+TX_THREAD thread_2;
+TX_THREAD thread_3;
+TX_THREAD thread_4;
+TX_THREAD thread_5;
+TX_THREAD thread_6;
+TX_THREAD thread_7;
+TX_QUEUE queue_0;
+TX_SEMAPHORE semaphore_0;
+TX_MUTEX mutex_0;
+TX_EVENT_FLAGS_GROUP event_flags_0;
+TX_BYTE_POOL byte_pool_0;
+TX_BLOCK_POOL block_pool_0;
+
+
+/* Define the counters used in the demo application... */
+
+ULONG thread_0_counter;
+ULONG thread_1_counter;
+ULONG thread_1_messages_sent;
+ULONG thread_2_counter;
+ULONG thread_2_messages_received;
+ULONG thread_3_counter;
+ULONG thread_4_counter;
+ULONG thread_5_counter;
+ULONG thread_6_counter;
+ULONG thread_7_counter;
+
+
+/* Define thread prototypes. */
+
+void thread_0_entry(ULONG thread_input);
+void thread_1_entry(ULONG thread_input);
+void thread_2_entry(ULONG thread_input);
+void thread_3_and_4_entry(ULONG thread_input);
+void thread_5_entry(ULONG thread_input);
+void thread_6_and_7_entry(ULONG thread_input);
+
+
+/* Define main entry point. */
+
+int main()
+{
+
+ /* Enter the ThreadX kernel. */
+ tx_kernel_enter();
+}
+
+
+/* Define what the initial system looks like. */
+
+void tx_application_define(void *first_unused_memory)
+{
+
+CHAR *pointer = TX_NULL;
+
+
+ /* Create a byte memory pool from which to allocate the thread stacks. */
+ tx_byte_pool_create(&byte_pool_0, "byte pool 0", first_unused_memory, DEMO_BYTE_POOL_SIZE);
+
+ /* Put system definition stuff in here, e.g. thread creates and other assorted
+ create information. */
+
+ /* Allocate the stack for thread 0. */
+ tx_byte_allocate(&byte_pool_0, (VOID **) &pointer, DEMO_STACK_SIZE, TX_NO_WAIT);
+
+ /* Create the main thread. */
+ tx_thread_create(&thread_0, "thread 0", thread_0_entry, 0,
+ pointer, DEMO_STACK_SIZE,
+ 1, 1, TX_NO_TIME_SLICE, TX_AUTO_START);
+
+
+ /* Allocate the stack for thread 1. */
+ tx_byte_allocate(&byte_pool_0, (VOID **) &pointer, DEMO_STACK_SIZE, TX_NO_WAIT);
+
+ /* Create threads 1 and 2. These threads pass information through a ThreadX
+ message queue. It is also interesting to note that these threads have a time
+ slice. */
+ tx_thread_create(&thread_1, "thread 1", thread_1_entry, 1,
+ pointer, DEMO_STACK_SIZE,
+ 16, 16, 4, TX_AUTO_START);
+
+ /* Allocate the stack for thread 2. */
+ tx_byte_allocate(&byte_pool_0, (VOID **) &pointer, DEMO_STACK_SIZE, TX_NO_WAIT);
+
+ tx_thread_create(&thread_2, "thread 2", thread_2_entry, 2,
+ pointer, DEMO_STACK_SIZE,
+ 16, 16, 4, TX_AUTO_START);
+
+ /* Allocate the stack for thread 3. */
+ tx_byte_allocate(&byte_pool_0, (VOID **) &pointer, DEMO_STACK_SIZE, TX_NO_WAIT);
+
+ /* Create threads 3 and 4. These threads compete for a ThreadX counting semaphore.
+ An interesting thing here is that both threads share the same instruction area. */
+ tx_thread_create(&thread_3, "thread 3", thread_3_and_4_entry, 3,
+ pointer, DEMO_STACK_SIZE,
+ 8, 8, TX_NO_TIME_SLICE, TX_AUTO_START);
+
+ /* Allocate the stack for thread 4. */
+ tx_byte_allocate(&byte_pool_0, (VOID **) &pointer, DEMO_STACK_SIZE, TX_NO_WAIT);
+
+ tx_thread_create(&thread_4, "thread 4", thread_3_and_4_entry, 4,
+ pointer, DEMO_STACK_SIZE,
+ 8, 8, TX_NO_TIME_SLICE, TX_AUTO_START);
+
+ /* Allocate the stack for thread 5. */
+ tx_byte_allocate(&byte_pool_0, (VOID **) &pointer, DEMO_STACK_SIZE, TX_NO_WAIT);
+
+ /* Create thread 5. This thread simply pends on an event flag which will be set
+ by thread_0. */
+ tx_thread_create(&thread_5, "thread 5", thread_5_entry, 5,
+ pointer, DEMO_STACK_SIZE,
+ 4, 4, TX_NO_TIME_SLICE, TX_AUTO_START);
+
+ /* Allocate the stack for thread 6. */
+ tx_byte_allocate(&byte_pool_0, (VOID **) &pointer, DEMO_STACK_SIZE, TX_NO_WAIT);
+
+ /* Create threads 6 and 7. These threads compete for a ThreadX mutex. */
+ tx_thread_create(&thread_6, "thread 6", thread_6_and_7_entry, 6,
+ pointer, DEMO_STACK_SIZE,
+ 8, 8, TX_NO_TIME_SLICE, TX_AUTO_START);
+
+ /* Allocate the stack for thread 7. */
+ tx_byte_allocate(&byte_pool_0, (VOID **) &pointer, DEMO_STACK_SIZE, TX_NO_WAIT);
+
+ tx_thread_create(&thread_7, "thread 7", thread_6_and_7_entry, 7,
+ pointer, DEMO_STACK_SIZE,
+ 8, 8, TX_NO_TIME_SLICE, TX_AUTO_START);
+
+ /* Allocate the message queue. */
+ tx_byte_allocate(&byte_pool_0, (VOID **) &pointer, DEMO_QUEUE_SIZE*sizeof(ULONG), TX_NO_WAIT);
+
+ /* Create the message queue shared by threads 1 and 2. */
+ tx_queue_create(&queue_0, "queue 0", TX_1_ULONG, pointer, DEMO_QUEUE_SIZE*sizeof(ULONG));
+
+ /* Create the semaphore used by threads 3 and 4. */
+ tx_semaphore_create(&semaphore_0, "semaphore 0", 1);
+
+ /* Create the event flags group used by threads 1 and 5. */
+ tx_event_flags_create(&event_flags_0, "event flags 0");
+
+ /* Create the mutex used by thread 6 and 7 without priority inheritance. */
+ tx_mutex_create(&mutex_0, "mutex 0", TX_NO_INHERIT);
+
+ /* Allocate the memory for a small block pool. */
+ tx_byte_allocate(&byte_pool_0, (VOID **) &pointer, DEMO_BLOCK_POOL_SIZE, TX_NO_WAIT);
+
+ /* Create a block memory pool to allocate a message buffer from. */
+ tx_block_pool_create(&block_pool_0, "block pool 0", sizeof(ULONG), pointer, DEMO_BLOCK_POOL_SIZE);
+
+ /* Allocate a block and release the block memory. */
+ tx_block_allocate(&block_pool_0, (VOID **) &pointer, TX_NO_WAIT);
+
+ /* Release the block back to the pool. */
+ tx_block_release(pointer);
+}
+
+
+
+/* Define the test threads. */
+
+void thread_0_entry(ULONG thread_input)
+{
+
+UINT status;
+
+
+ /* This thread simply sits in while-forever-sleep loop. */
+ while(1)
+ {
+
+ /* Increment the thread counter. */
+ thread_0_counter++;
+
+ /* Sleep for 10 ticks. */
+ tx_thread_sleep(10);
+
+ /* Set event flag 0 to wakeup thread 5. */
+ status = tx_event_flags_set(&event_flags_0, 0x1, TX_OR);
+
+ /* Check status. */
+ if (status != TX_SUCCESS)
+ break;
+ }
+}
+
+
+void thread_1_entry(ULONG thread_input)
+{
+
+UINT status;
+
+
+ /* This thread simply sends messages to a queue shared by thread 2. */
+ while(1)
+ {
+
+ /* Increment the thread counter. */
+ thread_1_counter++;
+
+ /* Send message to queue 0. */
+ status = tx_queue_send(&queue_0, &thread_1_messages_sent, TX_WAIT_FOREVER);
+
+ /* Check completion status. */
+ if (status != TX_SUCCESS)
+ break;
+
+ /* Increment the message sent. */
+ thread_1_messages_sent++;
+ }
+}
+
+
+void thread_2_entry(ULONG thread_input)
+{
+
+ULONG received_message;
+UINT status;
+
+ /* This thread retrieves messages placed on the queue by thread 1. */
+ while(1)
+ {
+
+ /* Increment the thread counter. */
+ thread_2_counter++;
+
+ /* Retrieve a message from the queue. */
+ status = tx_queue_receive(&queue_0, &received_message, TX_WAIT_FOREVER);
+
+ /* Check completion status and make sure the message is what we
+ expected. */
+ if ((status != TX_SUCCESS) || (received_message != thread_2_messages_received))
+ break;
+
+ /* Otherwise, all is okay. Increment the received message count. */
+ thread_2_messages_received++;
+ }
+}
+
+
+void thread_3_and_4_entry(ULONG thread_input)
+{
+
+UINT status;
+
+
+ /* This function is executed from thread 3 and thread 4. As the loop
+ below shows, these function compete for ownership of semaphore_0. */
+ while(1)
+ {
+
+ /* Increment the thread counter. */
+ if (thread_input == 3)
+ thread_3_counter++;
+ else
+ thread_4_counter++;
+
+ /* Get the semaphore with suspension. */
+ status = tx_semaphore_get(&semaphore_0, TX_WAIT_FOREVER);
+
+ /* Check status. */
+ if (status != TX_SUCCESS)
+ break;
+
+ /* Sleep for 2 ticks to hold the semaphore. */
+ tx_thread_sleep(2);
+
+ /* Release the semaphore. */
+ status = tx_semaphore_put(&semaphore_0);
+
+ /* Check status. */
+ if (status != TX_SUCCESS)
+ break;
+ }
+}
+
+
+void thread_5_entry(ULONG thread_input)
+{
+
+UINT status;
+ULONG actual_flags;
+
+
+ /* This thread simply waits for an event in a forever loop. */
+ while(1)
+ {
+
+ /* Increment the thread counter. */
+ thread_5_counter++;
+
+ /* Wait for event flag 0. */
+ status = tx_event_flags_get(&event_flags_0, 0x1, TX_OR_CLEAR,
+ &actual_flags, TX_WAIT_FOREVER);
+
+ /* Check status. */
+ if ((status != TX_SUCCESS) || (actual_flags != 0x1))
+ break;
+ }
+}
+
+
+void thread_6_and_7_entry(ULONG thread_input)
+{
+
+UINT status;
+
+
+ /* This function is executed from thread 6 and thread 7. As the loop
+ below shows, these function compete for ownership of mutex_0. */
+ while(1)
+ {
+
+ /* Increment the thread counter. */
+ if (thread_input == 6)
+ thread_6_counter++;
+ else
+ thread_7_counter++;
+
+ /* Get the mutex with suspension. */
+ status = tx_mutex_get(&mutex_0, TX_WAIT_FOREVER);
+
+ /* Check status. */
+ if (status != TX_SUCCESS)
+ break;
+
+ /* Get the mutex again with suspension. This shows
+ that an owning thread may retrieve the mutex it
+ owns multiple times. */
+ status = tx_mutex_get(&mutex_0, TX_WAIT_FOREVER);
+
+ /* Check status. */
+ if (status != TX_SUCCESS)
+ break;
+
+ /* Sleep for 2 ticks to hold the mutex. */
+ tx_thread_sleep(2);
+
+ /* Release the mutex. */
+ status = tx_mutex_put(&mutex_0);
+
+ /* Check status. */
+ if (status != TX_SUCCESS)
+ break;
+
+ /* Release the mutex again. This will actually
+ release ownership since it was obtained twice. */
+ status = tx_mutex_put(&mutex_0);
+
+ /* Check status. */
+ if (status != TX_SUCCESS)
+ break;
+ }
+}
diff --git a/ports/cortex_a12/ac6/example_build/sample_threadx/sample_threadx.launch b/ports/cortex_a12/ac6/example_build/sample_threadx/sample_threadx.launch
new file mode 100644
index 00000000..1efaa3f2
--- /dev/null
+++ b/ports/cortex_a12/ac6/example_build/sample_threadx/sample_threadx.launch
@@ -0,0 +1,188 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/ports/cortex_a12/ac6/example_build/sample_threadx/sample_threadx.scat b/ports/cortex_a12/ac6/example_build/sample_threadx/sample_threadx.scat
new file mode 100644
index 00000000..d23881cd
--- /dev/null
+++ b/ports/cortex_a12/ac6/example_build/sample_threadx/sample_threadx.scat
@@ -0,0 +1,44 @@
+;*******************************************************
+; Copyright (c) 2011-2016 Arm Limited (or its affiliates). All rights reserved.
+; Use, modification and redistribution of this file is subject to your possession of a
+; valid End User License Agreement for the Arm Product of which these examples are part of
+; and your compliance with all applicable terms and conditions of such licence agreement.
+;*******************************************************
+
+; Scatter-file for ARMv7-A bare-metal example on Versatile Express
+
+; This scatter-file places application code, data, stack and heap at suitable addresses in the memory map.
+
+
+SDRAM 0x80000000 0x20000000
+{
+ VECTORS +0
+ {
+ * (VECTORS, +FIRST) ; Vector table and other (assembler) startup code
+ * (InRoot$$Sections) ; All (library) code that must be in a root region
+ }
+
+ RO_CODE +0
+ { * (+RO-CODE) } ; Application RO code (.text)
+
+ RO_DATA +0
+ { * (+RO-DATA) } ; Application RO data (.constdata)
+
+ RW_DATA +0
+ { * (+RW) } ; Application RW data (.data)
+
+ ZI_DATA +0
+ { * (+ZI) } ; Application ZI data (.bss)
+
+ ARM_LIB_HEAP 0x80040000 EMPTY 0x00040000 ; Application heap
+ { }
+
+ ARM_LIB_STACK 0x80090000 EMPTY 0x00010000 ; Application (SVC mode) stack
+ { }
+
+; IRQ_STACK 0x800A0000 EMPTY -0x00010000 ; IRQ mode stack
+; { }
+
+ TTB 0x80100000 EMPTY 0x4000 ; Level-1 Translation Table for MMU
+ { }
+}
diff --git a/ports/cortex_a12/ac6/example_build/sample_threadx/startup.S b/ports/cortex_a12/ac6/example_build/sample_threadx/startup.S
new file mode 100644
index 00000000..670fadb9
--- /dev/null
+++ b/ports/cortex_a12/ac6/example_build/sample_threadx/startup.S
@@ -0,0 +1,397 @@
+//----------------------------------------------------------------
+// ARMv7-A Embedded example - Startup Code
+//
+// Copyright (c) 2005-2018 Arm Limited (or its affiliates). All rights reserved.
+// Use, modification and redistribution of this file is subject to your possession of a
+// valid End User License Agreement for the Arm Product of which these examples are part of
+// and your compliance with all applicable terms and conditions of such licence agreement.
+//----------------------------------------------------------------
+
+// Standard definitions of mode bits and interrupt (I & F) flags in PSRs
+
+#define Mode_USR 0x10
+#define Mode_FIQ 0x11
+#define Mode_IRQ 0x12
+#define Mode_SVC 0x13
+#define Mode_ABT 0x17
+#define Mode_UND 0x1B
+#define Mode_SYS 0x1F
+
+#define I_Bit 0x80 // When I bit is set, IRQ is disabled
+#define F_Bit 0x40 // When F bit is set, FIQ is disabled
+
+
+ .section VECTORS, "ax"
+ .align 3
+ .cfi_sections .debug_frame // put stack frame info into .debug_frame instead of .eh_frame
+
+
+//----------------------------------------------------------------
+// Entry point for the Reset handler
+//----------------------------------------------------------------
+
+ .global Vectors
+
+//----------------------------------------------------------------
+// Exception Vector Table
+//----------------------------------------------------------------
+// Note: LDR PC instructions are used here, though branch (B) instructions
+// could also be used, unless the exception handlers are >32MB away.
+
+Vectors:
+ LDR PC, Reset_Addr
+ LDR PC, Undefined_Addr
+ LDR PC, SVC_Addr
+ LDR PC, Prefetch_Addr
+ LDR PC, Abort_Addr
+ LDR PC, Hypervisor_Addr
+ LDR PC, IRQ_Addr
+ LDR PC, FIQ_Addr
+
+
+ .balign 4
+Reset_Addr:
+ .word Reset_Handler
+Undefined_Addr:
+ .word __tx_undefined
+SVC_Addr:
+ .word __tx_swi_interrupt
+Prefetch_Addr:
+ .word __tx_prefetch_handler
+Abort_Addr:
+ .word __tx_abort_handler
+Hypervisor_Addr:
+ .word __tx_reserved_handler
+IRQ_Addr:
+ .word __tx_irq_handler
+FIQ_Addr:
+ .word __tx_fiq_handler
+
+
+//----------------------------------------------------------------
+// Exception Handlers
+//----------------------------------------------------------------
+
+Undefined_Handler:
+ B Undefined_Handler
+SVC_Handler:
+ B SVC_Handler
+Prefetch_Handler:
+ B Prefetch_Handler
+Abort_Handler:
+ B Abort_Handler
+Hypervisor_Handler:
+ B Hypervisor_Handler
+IRQ_Handler:
+ B IRQ_Handler
+FIQ_Handler:
+ B FIQ_Handler
+
+
+//----------------------------------------------------------------
+// Reset Handler
+//----------------------------------------------------------------
+Reset_Handler:
+
+//----------------------------------------------------------------
+// Disable caches and MMU in case they were left enabled from an earlier run
+// This does not need to be done from a cold reset
+//----------------------------------------------------------------
+
+ MRC p15, 0, r0, c1, c0, 0 // Read System Control Register
+ BIC r0, r0, #(0x1 << 12) // Clear I bit 12 to disable I Cache
+ BIC r0, r0, #(0x1 << 2) // Clear C bit 2 to disable D Cache
+ BIC r0, r0, #0x1 // Clear M bit 0 to disable MMU
+ BIC r0, r0, #(0x1 << 11) // Clear Z bit 11 to disable branch prediction
+ MCR p15, 0, r0, c1, c0, 0 // Write System Control Register
+ ISB
+
+// The MMU is enabled later, before calling main(). Caches are enabled inside main(),
+// after the MMU has been enabled and scatterloading has been performed.
+
+//----------------------------------------------------------------
+// ACTLR.SMP bit must be set before the caches and MMU are enabled,
+// or any cache and TLB maintenance operations are performed, even for single-core
+//----------------------------------------------------------------
+ MRC p15, 0, r0, c1, c0, 1 // Read ACTLR
+ ORR r0, r0, #(1 << 6) // Set ACTLR.SMP bit
+ MCR p15, 0, r0, c1, c0, 1 // Write ACTLR
+ ISB
+
+//----------------------------------------------------------------
+// Invalidate Data and Instruction TLBs and branch predictor
+// This does not need to be done from a cold reset
+//----------------------------------------------------------------
+
+ MOV r0,#0
+ MCR p15, 0, r0, c8, c7, 0 // I-TLB and D-TLB invalidation
+ MCR p15, 0, r0, c7, c5, 6 // BPIALL - Invalidate entire branch predictor array
+
+//----------------------------------------------------------------
+// Initialize Supervisor Mode Stack
+// Note stack must be 8 byte aligned.
+//----------------------------------------------------------------
+
+ LDR SP, =Image$$ARM_LIB_STACK$$ZI$$Limit
+
+//----------------------------------------------------------------
+// Disable loop-buffer to fix errata on A15 r0p0
+//----------------------------------------------------------------
+ MRC p15, 0, r0, c0, c0, 0 // Read main ID register MIDR
+ MOV r1, r0, lsr #4 // Extract Primary Part Number
+ LDR r2, =0xFFF
+ AND r1, r1, r2
+ LDR r2, =0xC0F
+ CMP r1, r2 // Is this an A15?
+ BNE notA15r0p0 // Jump if not A15
+ AND r5, r0, #0x00f00000 // Variant
+ AND r6, r0, #0x0000000f // Revision
+ ORRS r6, r6, r5 // Combine variant and revision
+ BNE notA15r0p0 // Jump if not r0p0
+ MRC p15, 0, r0, c1, c0, 1 // Read Aux Ctrl Reg
+ ORR r0, r0, #(1 << 1) // Set bit 1 to Disable Loop Buffer
+ MCR p15, 0, r0, c1, c0, 1 // Write Aux Ctrl Reg
+ ISB
+notA15r0p0:
+
+//----------------------------------------------------------------
+// Set Vector Base Address Register (VBAR) to point to this application's vector table
+//----------------------------------------------------------------
+
+ LDR r0, =Vectors
+ MCR p15, 0, r0, c12, c0, 0
+
+//----------------------------------------------------------------
+// Cache Invalidation code for ARMv7-A
+// The caches, MMU and BTB do not need post-reset invalidation on Cortex-A7,
+// but forcing a cache invalidation makes the code more portable to other CPUs (e.g. Cortex-A9)
+//----------------------------------------------------------------
+
+ // Invalidate L1 Instruction Cache
+
+ MRC p15, 1, r0, c0, c0, 1 // Read Cache Level ID Register (CLIDR)
+ TST r0, #0x3 // Harvard Cache?
+ MOV r0, #0 // SBZ
+ MCRNE p15, 0, r0, c7, c5, 0 // ICIALLU - Invalidate instruction cache and flush branch target cache
+
+ // Invalidate Data/Unified Caches
+
+ MRC p15, 1, r0, c0, c0, 1 // Read CLIDR
+ ANDS r3, r0, #0x07000000 // Extract coherency level
+ MOV r3, r3, LSR #23 // Total cache levels << 1
+ BEQ Finished // If 0, no need to clean
+
+ MOV r10, #0 // R10 holds current cache level << 1
+Loop1:
+ ADD r2, r10, r10, LSR #1 // R2 holds cache "Set" position
+ MOV r1, r0, LSR r2 // Bottom 3 bits are the Cache-type for this level
+ AND r1, r1, #7 // Isolate those lower 3 bits
+ CMP r1, #2
+ BLT Skip // No cache or only instruction cache at this level
+
+ MCR p15, 2, r10, c0, c0, 0 // Write the Cache Size selection register
+ ISB // ISB to sync the change to the CacheSizeID reg
+ MRC p15, 1, r1, c0, c0, 0 // Reads current Cache Size ID register
+ AND r2, r1, #7 // Extract the line length field
+ ADD r2, r2, #4 // Add 4 for the line length offset (log2 16 bytes)
+ LDR r4, =0x3FF
+ ANDS r4, r4, r1, LSR #3 // R4 is the max number on the way size (right aligned)
+ CLZ r5, r4 // R5 is the bit position of the way size increment
+ LDR r7, =0x7FFF
+ ANDS r7, r7, r1, LSR #13 // R7 is the max number of the index size (right aligned)
+
+Loop2:
+ MOV r9, r4 // R9 working copy of the max way size (right aligned)
+
+Loop3:
+ ORR r11, r10, r9, LSL r5 // Factor in the Way number and cache number into R11
+ ORR r11, r11, r7, LSL r2 // Factor in the Set number
+ MCR p15, 0, r11, c7, c6, 2 // Invalidate by Set/Way
+ SUBS r9, r9, #1 // Decrement the Way number
+ BGE Loop3
+ SUBS r7, r7, #1 // Decrement the Set number
+ BGE Loop2
+Skip:
+ ADD r10, r10, #2 // Increment the cache number
+ CMP r3, r10
+ BGT Loop1
+
+Finished:
+
+
+//----------------------------------------------------------------
+// MMU Configuration
+// Set translation table base
+//----------------------------------------------------------------
+
+ // Two translation tables are supported, TTBR0 and TTBR1
+ // Configure translation table base (TTB) control register cp15,c2
+ // to a value of all zeros, indicates we are using TTB register 0.
+
+ MOV r0,#0x0
+ MCR p15, 0, r0, c2, c0, 2
+
+ // write the address of our page table base to TTB register 0
+ LDR r0,=Image$$TTB$$ZI$$Base
+
+ MOV r1, #0x08 // RGN=b01 (outer cacheable write-back cached, write allocate)
+ // S=0 (translation table walk to non-shared memory)
+ ORR r1,r1,#0x40 // IRGN=b01 (inner cacheability for the translation table walk is Write-back Write-allocate)
+
+ ORR r0,r0,r1
+
+ MCR p15, 0, r0, c2, c0, 0
+
+
+//----------------------------------------------------------------
+// PAGE TABLE generation
+
+// Generate the page tables
+// Build a flat translation table for the whole address space.
+// ie: Create 4096 1MB sections from 0x000xxxxx to 0xFFFxxxxx
+
+
+// 31 20 19 18 17 16 15 14 12 11 10 9 8 5 4 3 2 1 0
+// |section base address| 0 0 |nG| S |AP2| TEX | AP | P | Domain | XN | C B | 1 0|
+//
+// Bits[31:20] - Top 12 bits of VA is pointer into table
+// nG[17]=0 - Non global, enables matching against ASID in the TLB when set.
+// S[16]=0 - Indicates normal memory is shared when set.
+// AP2[15]=0
+// AP[11:10]=11 - Configure for full read/write access in all modes
+// TEX[14:12]=000
+// CB[3:2]= 00 - Set attributes to Strongly-ordered memory.
+// (except for the code segment descriptor, see below)
+// IMPP[9]=0 - Ignored
+// Domain[5:8]=1111 - Set all pages to use domain 15
+// XN[4]=1 - Execute never on Strongly-ordered memory
+// Bits[1:0]=10 - Indicate entry is a 1MB section
+//----------------------------------------------------------------
+ LDR r0,=Image$$TTB$$ZI$$Base
+ LDR r1,=0xfff // loop counter
+ LDR r2,=0b00000000000000000000110111100010
+
+ // r0 contains the address of the translation table base
+ // r1 is loop counter
+ // r2 is level1 descriptor (bits 19:0)
+
+ // use loop counter to create 4096 individual table entries.
+ // this writes from address 'Image$$TTB$$ZI$$Base' +
+ // offset 0x3FFC down to offset 0x0 in word steps (4 bytes)
+
+init_ttb_1:
+ ORR r3, r2, r1, LSL#20 // R3 now contains full level1 descriptor to write
+ ORR r3, r3, #0b0000000010000 // Set XN bit
+ STR r3, [r0, r1, LSL#2] // Str table entry at TTB base + loopcount*4
+ SUBS r1, r1, #1 // Decrement loop counter
+ BPL init_ttb_1
+
+ // In this example, the 1MB section based at '__code_start' is setup specially as cacheable (write back mode).
+ // TEX[14:12]=001 and CB[3:2]= 11, Outer and inner write back, write allocate normal memory.
+ LDR r1,=Image$$VECTORS$$Base // Base physical address of code segment
+ LSR r1, #20 // Shift right to align to 1MB boundaries
+ ORR r3, r2, r1, LSL#20 // Setup the initial level1 descriptor again
+ ORR r3, r3, #0b0000000001100 // Set CB bits
+ ORR r3, r3, #0b1000000000000 // Set TEX bit 12
+ STR r3, [r0, r1, LSL#2] // str table entry
+
+//----------------------------------------------------------------
+// Setup domain control register - Enable all domains to client mode
+//----------------------------------------------------------------
+
+ MRC p15, 0, r0, c3, c0, 0 // Read Domain Access Control Register
+ LDR r0, =0x55555555 // Initialize every domain entry to b01 (client)
+ MCR p15, 0, r0, c3, c0, 0 // Write Domain Access Control Register
+
+#if defined(__ARM_NEON) || defined(__ARM_FP)
+//----------------------------------------------------------------
+// Enable access to NEON/VFP by enabling access to Coprocessors 10 and 11.
+// Enables Full Access i.e. in both privileged and non privileged modes
+//----------------------------------------------------------------
+
+ MRC p15, 0, r0, c1, c0, 2 // Read Coprocessor Access Control Register (CPACR)
+ ORR r0, r0, #(0xF << 20) // Enable access to CP 10 & 11
+ MCR p15, 0, r0, c1, c0, 2 // Write Coprocessor Access Control Register (CPACR)
+ ISB
+
+//----------------------------------------------------------------
+// Switch on the VFP and NEON hardware
+//----------------------------------------------------------------
+
+ MOV r0, #0x40000000
+ VMSR FPEXC, r0 // Write FPEXC register, EN bit set
+#endif
+
+
+//----------------------------------------------------------------
+// Enable MMU and branch to __main
+// Leaving the caches disabled until after scatter loading.
+//----------------------------------------------------------------
+
+ LDR r12,=__main
+
+ MRC p15, 0, r0, c1, c0, 0 // Read System Control Register
+ BIC r0, r0, #(0x1 << 12) // Clear I bit 12 to disable I Cache
+ BIC r0, r0, #(0x1 << 2) // Clear C bit 2 to disable D Cache
+ BIC r0, r0, #0x2 // Clear A bit 1 to disable strict alignment fault checking
+ ORR r0, r0, #0x1 // Set M bit 0 to enable MMU before scatter loading
+ MCR p15, 0, r0, c1, c0, 0 // Write System Control Register
+ ISB
+
+// Now the MMU is enabled, virtual to physical address translations will occur. This will affect the next
+// instruction fetch.
+//
+// The two instructions currently in the pipeline will have been fetched before the MMU was enabled.
+// The branch to __main is safe because the Virtual Address (VA) is the same as the Physical Address (PA)
+// (flat mapping) of this code that enables the MMU and performs the branch
+
+ BX r12 // Branch to __main C library entry point
+
+
+
+//----------------------------------------------------------------
+// Enable caches and branch prediction
+// This code must be run from a privileged mode
+//----------------------------------------------------------------
+
+ .section ENABLECACHES,"ax"
+ .align 3
+
+ .global enable_caches
+ .type enable_caches, "function"
+ .cfi_startproc
+enable_caches:
+
+//----------------------------------------------------------------
+// Enable caches and branch prediction
+//----------------------------------------------------------------
+
+ MRC p15, 0, r0, c1, c0, 0 // Read System Control Register
+ ORR r0, r0, #(0x1 << 12) // Set I bit 12 to enable I Cache
+ ORR r0, r0, #(0x1 << 2) // Set C bit 2 to enable D Cache
+ ORR r0, r0, #(0x1 << 11) // Set Z bit 11 to enable branch prediction
+ MCR p15, 0, r0, c1, c0, 0 // Write System Control Register
+ ISB
+
+ MRC p15, 0, r0, c1, c0, 1 // Read Auxiliary Control Register
+ ORR r0, #2 // L2EN bit, enable L2 cache
+ ORR r0, r0, #(0x1 << 2) // Set DP bit 2 to enable L1 Dside prefetch
+ MCR p15, 0, r0, c1, c0, 1 // Write Auxiliary Control Register
+ ISB
+
+ BX lr
+ .cfi_endproc
+
+ .global disable_caches
+ .type disable_caches, "function"
+disable_caches:
+
+ MRC p15, 0, r0, c1, c0, 0 // Read System Control Register
+ BIC r0, r0, #(0x1 << 12) // Clear I bit 12 to disable I Cache
+ BIC r0, r0, #(0x1 << 2) // Clear C bit 2 to disable D Cache
+ MCR p15, 0, r0, c1, c0, 0 // Write System Control Register
+ ISB
+
+ BX lr
+
+
diff --git a/ports/cortex_a12/ac6/example_build/sample_threadx/tx_initialize_low_level.S b/ports/cortex_a12/ac6/example_build/sample_threadx/tx_initialize_low_level.S
new file mode 100644
index 00000000..715958f0
--- /dev/null
+++ b/ports/cortex_a12/ac6/example_build/sample_threadx/tx_initialize_low_level.S
@@ -0,0 +1,299 @@
+/**************************************************************************/
+/* */
+/* Copyright (c) Microsoft Corporation. All rights reserved. */
+/* */
+/* This software is licensed under the Microsoft Software License */
+/* Terms for Microsoft Azure RTOS. Full text of the license can be */
+/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
+/* and in the root directory of this software. */
+/* */
+/**************************************************************************/
+
+
+/**************************************************************************/
+/**************************************************************************/
+/** */
+/** ThreadX Component */
+/** */
+/** Initialize */
+/** */
+/**************************************************************************/
+/**************************************************************************/
+
+ .arm
+
+SVC_MODE = 0xD3 // Disable IRQ/FIQ SVC mode
+IRQ_MODE = 0xD2 // Disable IRQ/FIQ IRQ mode
+FIQ_MODE = 0xD1 // Disable IRQ/FIQ FIQ mode
+SYS_MODE = 0xDF // Disable IRQ/FIQ SYS mode
+FIQ_STACK_SIZE = 512 // FIQ stack size
+IRQ_STACK_SIZE = 1024 // IRQ stack size
+SYS_STACK_SIZE = 1024 // System stack size
+
+ .global _tx_thread_system_stack_ptr
+ .global _tx_initialize_unused_memory
+ .global _tx_thread_context_save
+ .global _tx_thread_context_restore
+ .global _tx_timer_interrupt
+
+
+
+/* Define the 16-bit Thumb mode veneer for _tx_initialize_low_level for
+ applications calling this function from to 16-bit Thumb mode. */
+
+ .text
+ .align 2
+ .thumb
+ .global $_tx_initialize_low_level
+ .type $_tx_initialize_low_level,function
+$_tx_initialize_low_level:
+ BX pc // Switch to 32-bit mode
+ NOP //
+ .arm
+ STMFD sp!, {lr} // Save return address
+ BL _tx_initialize_low_level // Call _tx_initialize_low_level function
+ LDMFD sp!, {lr} // Recover saved return address
+ BX lr // Return to 16-bit caller
+
+
+ .text
+ .align 2
+/**************************************************************************/
+/* */
+/* FUNCTION RELEASE */
+/* */
+/* _tx_initialize_low_level ARMV7-A */
+/* 6.1.11 */
+/* AUTHOR */
+/* */
+/* William E. Lamie, Microsoft Corporation */
+/* */
+/* DESCRIPTION */
+/* */
+/* This function is responsible for any low-level processor */
+/* initialization, including setting up interrupt vectors, setting */
+/* up a periodic timer interrupt source, saving the system stack */
+/* pointer for use in ISR processing later, and finding the first */
+/* available RAM memory address for tx_application_define. */
+/* */
+/* INPUT */
+/* */
+/* None */
+/* */
+/* OUTPUT */
+/* */
+/* None */
+/* */
+/* CALLS */
+/* */
+/* None */
+/* */
+/* CALLED BY */
+/* */
+/* _tx_initialize_kernel_enter ThreadX entry function */
+/* */
+/* RELEASE HISTORY */
+/* */
+/* DATE NAME DESCRIPTION */
+/* */
+/* 09-30-2020 William E. Lamie Initial Version 6.1 */
+/* 04-25-2022 Zhen Kong Updated comments, */
+/* resulting in version 6.1.11 */
+/* */
+/**************************************************************************/
+ .global _tx_initialize_low_level
+ .type _tx_initialize_low_level,function
+_tx_initialize_low_level:
+
+ /* We must be in SVC mode at this point! */
+
+ /* Setup various stack pointers. */
+
+ LDR r1, =Image$$ARM_LIB_STACK$$ZI$$Limit // Get pointer to stack area
+
+#ifdef TX_ENABLE_IRQ_NESTING
+
+ /* Setup the system mode stack for nested interrupt support */
+
+ LDR r2, =SYS_STACK_SIZE // Pickup stack size
+ MOV r3, #SYS_MODE // Build SYS mode CPSR
+ MSR CPSR_c, r3 // Enter SYS mode
+ SUB r1, r1, #1 // Backup 1 byte
+ BIC r1, r1, #7 // Ensure 8-byte alignment
+ MOV sp, r1 // Setup SYS stack pointer
+ SUB r1, r1, r2 // Calculate start of next stack
+#endif
+
+ LDR r2, =FIQ_STACK_SIZE // Pickup stack size
+ MOV r0, #FIQ_MODE // Build FIQ mode CPSR
+ MSR CPSR, r0 // Enter FIQ mode
+ SUB r1, r1, #1 // Backup 1 byte
+ BIC r1, r1, #7 // Ensure 8-byte alignment
+ MOV sp, r1 // Setup FIQ stack pointer
+ SUB r1, r1, r2 // Calculate start of next stack
+ LDR r2, =IRQ_STACK_SIZE // Pickup IRQ stack size
+ MOV r0, #IRQ_MODE // Build IRQ mode CPSR
+ MSR CPSR, r0 // Enter IRQ mode
+ SUB r1, r1, #1 // Backup 1 byte
+ BIC r1, r1, #7 // Ensure 8-byte alignment
+ MOV sp, r1 // Setup IRQ stack pointer
+ SUB r3, r1, r2 // Calculate end of IRQ stack
+ MOV r0, #SVC_MODE // Build SVC mode CPSR
+ MSR CPSR, r0 // Enter SVC mode
+ LDR r2, =Image$$ARM_LIB_STACK$$Base // Pickup stack bottom
+ CMP r3, r2 // Compare the current stack end with the bottom
+_stack_error_loop:
+ BLT _stack_error_loop // If the IRQ stack exceeds the stack bottom, just sit here!
+
+ LDR r2, =_tx_thread_system_stack_ptr // Pickup stack pointer
+ STR r1, [r2] // Save the system stack
+
+ LDR r1, =Image$$ZI_DATA$$ZI$$Limit // Get end of non-initialized RAM area
+ LDR r2, =_tx_initialize_unused_memory // Pickup unused memory ptr address
+ ADD r1, r1, #8 // Increment to next free word
+ STR r1, [r2] // Save first free memory address
+
+#ifdef __THUMB_INTERWORK
+ BX lr // Return to caller
+#else
+ MOV pc, lr // Return to caller
+#endif
+
+/* Define shells for each of the interrupt vectors. */
+
+ .global __tx_undefined
+__tx_undefined:
+ B __tx_undefined // Undefined handler
+
+ .global __tx_swi_interrupt
+__tx_swi_interrupt:
+ B __tx_swi_interrupt // Software interrupt handler
+
+ .global __tx_prefetch_handler
+__tx_prefetch_handler:
+ B __tx_prefetch_handler // Prefetch exception handler
+
+ .global __tx_abort_handler
+__tx_abort_handler:
+ B __tx_abort_handler // Abort exception handler
+
+ .global __tx_reserved_handler
+__tx_reserved_handler:
+ B __tx_reserved_handler // Reserved exception handler
+
+ .global __tx_irq_processing_return
+ .type __tx_irq_processing_return,function
+ .global __tx_irq_handler
+__tx_irq_handler:
+
+ /* Jump to context save to save system context. */
+ B _tx_thread_context_save
+__tx_irq_processing_return:
+
+ /* At this point execution is still in the IRQ mode. The CPSR, point of
+ interrupt, and all C scratch registers are available for use. In
+ addition, IRQ interrupts may be re-enabled - with certain restrictions -
+ if nested IRQ interrupts are desired. Interrupts may be re-enabled over
+ small code sequences where lr is saved before enabling interrupts and
+ restored after interrupts are again disabled. */
+
+ /* Interrupt nesting is allowed after calling _tx_thread_irq_nesting_start
+ from IRQ mode with interrupts disabled. This routine switches to the
+ system mode and returns with IRQ interrupts enabled.
+
+ NOTE: It is very important to ensure all IRQ interrupts are cleared
+ prior to enabling nested IRQ interrupts. */
+#ifdef TX_ENABLE_IRQ_NESTING
+ BL _tx_thread_irq_nesting_start
+#endif
+
+ /* For debug purpose, execute the timer interrupt processing here. In
+ a real system, some kind of status indication would have to be checked
+ before the timer interrupt handler could be called. */
+
+ BL _tx_timer_interrupt // Timer interrupt handler
+
+
+ /* If interrupt nesting was started earlier, the end of interrupt nesting
+ service must be called before returning to _tx_thread_context_restore.
+ This routine returns in processing in IRQ mode with interrupts disabled. */
+#ifdef TX_ENABLE_IRQ_NESTING
+ BL _tx_thread_irq_nesting_end
+#endif
+
+ /* Jump to context restore to restore system context. */
+ B _tx_thread_context_restore
+
+
+ /* This is an example of a vectored IRQ handler. */
+
+ /* At this point execution is still in the IRQ mode. The CPSR, point of
+ interrupt, and all C scratch registers are available for use. In
+ addition, IRQ interrupts may be re-enabled - with certain restrictions -
+ if nested IRQ interrupts are desired. Interrupts may be re-enabled over
+ small code sequences where lr is saved before enabling interrupts and
+ restored after interrupts are again disabled. */
+
+
+ /* Interrupt nesting is allowed after calling _tx_thread_irq_nesting_start
+ from IRQ mode with interrupts disabled. This routine switches to the
+ system mode and returns with IRQ interrupts enabled.
+
+ NOTE: It is very important to ensure all IRQ interrupts are cleared
+ prior to enabling nested IRQ interrupts. */
+
+ /* Application IRQ handlers can be called here! */
+
+ /* If interrupt nesting was started earlier, the end of interrupt nesting
+ service must be called before returning to _tx_thread_context_restore.
+ This routine returns in processing in IRQ mode with interrupts disabled. */
+
+ /* Jump to context restore to restore system context. */
+
+#ifdef TX_ENABLE_FIQ_SUPPORT
+ .global __tx_fiq_handler
+ .global __tx_fiq_processing_return
+__tx_fiq_handler:
+
+ /* Jump to fiq context save to save system context. */
+ B _tx_thread_fiq_context_save
+__tx_fiq_processing_return:
+
+ /* At this point execution is still in the FIQ mode. The CPSR, point of
+ interrupt, and all C scratch registers are available for use. */
+
+ /* Interrupt nesting is allowed after calling _tx_thread_fiq_nesting_start
+ from FIQ mode with interrupts disabled. This routine switches to the
+ system mode and returns with FIQ interrupts enabled.
+
+ NOTE: It is very important to ensure all FIQ interrupts are cleared
+ prior to enabling nested FIQ interrupts. */
+#ifdef TX_ENABLE_FIQ_NESTING
+ BL _tx_thread_fiq_nesting_start
+#endif
+
+ /* Application FIQ handlers can be called here! */
+
+ /* If interrupt nesting was started earlier, the end of interrupt nesting
+ service must be called before returning to _tx_thread_fiq_context_restore. */
+#ifdef TX_ENABLE_FIQ_NESTING
+ BL _tx_thread_fiq_nesting_end
+#endif
+
+ /* Jump to fiq context restore to restore system context. */
+ B _tx_thread_fiq_context_restore
+
+
+#else
+ .global __tx_fiq_handler
+__tx_fiq_handler:
+ B __tx_fiq_handler // FIQ interrupt handler
+#endif
+
+
+BUILD_OPTIONS:
+ .word _tx_build_options // Reference to bring in
+VERSION_ID:
+ .word _tx_version_id // Reference to bring in
+
+
diff --git a/ports/cortex_a12/ac6/example_build/tx/.cproject b/ports/cortex_a12/ac6/example_build/tx/.cproject
new file mode 100644
index 00000000..c6b251b2
--- /dev/null
+++ b/ports/cortex_a12/ac6/example_build/tx/.cproject
@@ -0,0 +1,146 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/ports/cortex_a12/ac6/example_build/tx/.project b/ports/cortex_a12/ac6/example_build/tx/.project
new file mode 100644
index 00000000..863ca5cb
--- /dev/null
+++ b/ports/cortex_a12/ac6/example_build/tx/.project
@@ -0,0 +1,48 @@
+
+
+ tx
+
+
+
+
+
+ org.eclipse.cdt.managedbuilder.core.genmakebuilder
+ clean,full,incremental,
+
+
+
+
+ org.eclipse.cdt.managedbuilder.core.ScannerConfigBuilder
+ full,incremental,
+
+
+
+
+
+ org.eclipse.cdt.core.cnature
+ org.eclipse.cdt.managedbuilder.core.managedBuildNature
+ org.eclipse.cdt.managedbuilder.core.ScannerConfigNature
+
+
+
+ inc_generic
+ 2
+ $%7BPARENT-5-PROJECT_LOC%7D/common/inc
+
+
+ inc_port
+ 2
+ $%7BPARENT-2-PROJECT_LOC%7D/inc
+
+
+ src_generic
+ 2
+ $%7BPARENT-5-PROJECT_LOC%7D/common/src
+
+
+ src_port
+ 2
+ $%7BPARENT-2-PROJECT_LOC%7D/src
+
+
+
diff --git a/ports/cortex_a12/ac6/inc/tx_port.h b/ports/cortex_a12/ac6/inc/tx_port.h
new file mode 100644
index 00000000..19463de1
--- /dev/null
+++ b/ports/cortex_a12/ac6/inc/tx_port.h
@@ -0,0 +1,328 @@
+/**************************************************************************/
+/* */
+/* Copyright (c) Microsoft Corporation. All rights reserved. */
+/* */
+/* This software is licensed under the Microsoft Software License */
+/* Terms for Microsoft Azure RTOS. Full text of the license can be */
+/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
+/* and in the root directory of this software. */
+/* */
+/**************************************************************************/
+
+
+/**************************************************************************/
+/**************************************************************************/
+/** */
+/** ThreadX Component */
+/** */
+/** Port Specific */
+/** */
+/**************************************************************************/
+/**************************************************************************/
+
+
+/**************************************************************************/
+/* */
+/* PORT SPECIFIC C INFORMATION RELEASE */
+/* */
+/* tx_port.h ARMv7-A */
+/* 6.1.11 */
+/* */
+/* AUTHOR */
+/* */
+/* William E. Lamie, Microsoft Corporation */
+/* */
+/* DESCRIPTION */
+/* */
+/* This file contains data type definitions that make the ThreadX */
+/* real-time kernel function identically on a variety of different */
+/* processor architectures. For example, the size or number of bits */
+/* in an "int" data type vary between microprocessor architectures and */
+/* even C compilers for the same microprocessor. ThreadX does not */
+/* directly use native C data types. Instead, ThreadX creates its */
+/* own special types that can be mapped to actual data types by this */
+/* file to guarantee consistency in the interface and functionality. */
+/* */
+/* RELEASE HISTORY */
+/* */
+/* DATE NAME DESCRIPTION */
+/* */
+/* 09-30-2020 William E. Lamie Initial Version 6.1 */
+/* 04-02-2021 Bhupendra Naphade Modified comment(s),updated */
+/* macro definition, */
+/* resulting in version 6.1.6 */
+/* 04-25-2022 Zhen Kong Updated comments, */
+/* resulting in version 6.1.11 */
+/* */
+/**************************************************************************/
+
+#ifndef TX_PORT_H
+#define TX_PORT_H
+
+
+/* Determine if the optional ThreadX user define file should be used. */
+
+#ifdef TX_INCLUDE_USER_DEFINE_FILE
+
+
+/* Yes, include the user defines in tx_user.h. The defines in this file may
+ alternately be defined on the command line. */
+
+#include "tx_user.h"
+#endif
+
+
+/* Define compiler library include files. */
+
+#include
+#include
+
+
+/* Define ThreadX basic types for this port. */
+
+#define VOID void
+typedef char CHAR;
+typedef unsigned char UCHAR;
+typedef int INT;
+typedef unsigned int UINT;
+typedef long LONG;
+typedef unsigned long ULONG;
+typedef short SHORT;
+typedef unsigned short USHORT;
+
+
+/* Define the priority levels for ThreadX. Legal values range
+ from 32 to 1024 and MUST be evenly divisible by 32. */
+
+#ifndef TX_MAX_PRIORITIES
+#define TX_MAX_PRIORITIES 32
+#endif
+
+
+/* Define the minimum stack for a ThreadX thread on this processor. If the size supplied during
+ thread creation is less than this value, the thread create call will return an error. */
+
+#ifndef TX_MINIMUM_STACK
+#define TX_MINIMUM_STACK 200 /* Minimum stack size for this port */
+#endif
+
+
+/* Define the system timer thread's default stack size and priority. These are only applicable
+ if TX_TIMER_PROCESS_IN_ISR is not defined. */
+
+#ifndef TX_TIMER_THREAD_STACK_SIZE
+#define TX_TIMER_THREAD_STACK_SIZE 1024 /* Default timer thread stack size */
+#endif
+
+#ifndef TX_TIMER_THREAD_PRIORITY
+#define TX_TIMER_THREAD_PRIORITY 0 /* Default timer thread priority */
+#endif
+
+
+/* Define various constants for the ThreadX ARM port. */
+
+#ifdef TX_ENABLE_FIQ_SUPPORT
+#define TX_INT_DISABLE 0xC0 /* Disable IRQ & FIQ interrupts */
+#else
+#define TX_INT_DISABLE 0x80 /* Disable IRQ interrupts */
+#endif
+#define TX_INT_ENABLE 0x00 /* Enable IRQ interrupts */
+
+
+/* Define the clock source for trace event entry time stamp. The following two item are port specific.
+ For example, if the time source is at the address 0x0a800024 and is 16-bits in size, the clock
+ source constants would be:
+
+#define TX_TRACE_TIME_SOURCE *((ULONG *) 0x0a800024)
+#define TX_TRACE_TIME_MASK 0x0000FFFFUL
+
+*/
+
+#ifndef TX_TRACE_TIME_SOURCE
+#define TX_TRACE_TIME_SOURCE ++_tx_trace_simulated_time
+#endif
+#ifndef TX_TRACE_TIME_MASK
+#define TX_TRACE_TIME_MASK 0xFFFFFFFFUL
+#endif
+
+
+/* Define the port specific options for the _tx_build_options variable. This variable indicates
+ how the ThreadX library was built. */
+
+#ifdef TX_ENABLE_FIQ_SUPPORT
+#define TX_FIQ_ENABLED 1
+#else
+#define TX_FIQ_ENABLED 0
+#endif
+
+#ifdef TX_ENABLE_IRQ_NESTING
+#define TX_IRQ_NESTING_ENABLED 2
+#else
+#define TX_IRQ_NESTING_ENABLED 0
+#endif
+
+#ifdef TX_ENABLE_FIQ_NESTING
+#define TX_FIQ_NESTING_ENABLED 4
+#else
+#define TX_FIQ_NESTING_ENABLED 0
+#endif
+
+#define TX_PORT_SPECIFIC_BUILD_OPTIONS TX_FIQ_ENABLED | TX_IRQ_NESTING_ENABLED | TX_FIQ_NESTING_ENABLED
+
+
+/* Define the in-line initialization constant so that modules with in-line
+ initialization capabilities can prevent their initialization from being
+ a function call. */
+
+#define TX_INLINE_INITIALIZATION
+
+
+/* Determine whether or not stack checking is enabled. By default, ThreadX stack checking is
+ disabled. When the following is defined, ThreadX thread stack checking is enabled. If stack
+ checking is enabled (TX_ENABLE_STACK_CHECKING is defined), the TX_DISABLE_STACK_FILLING
+ define is negated, thereby forcing the stack fill which is necessary for the stack checking
+ logic. */
+
+#ifdef TX_ENABLE_STACK_CHECKING
+#undef TX_DISABLE_STACK_FILLING
+#endif
+
+
+/* Define the TX_THREAD control block extensions for this port. The main reason
+ for the multiple macros is so that backward compatibility can be maintained with
+ existing ThreadX kernel awareness modules. */
+
+#define TX_THREAD_EXTENSION_0
+#define TX_THREAD_EXTENSION_1
+#define TX_THREAD_EXTENSION_2 ULONG tx_thread_vfp_enable;
+#define TX_THREAD_EXTENSION_3
+
+
+/* Define the port extensions of the remaining ThreadX objects. */
+
+#define TX_BLOCK_POOL_EXTENSION
+#define TX_BYTE_POOL_EXTENSION
+#define TX_EVENT_FLAGS_GROUP_EXTENSION
+#define TX_MUTEX_EXTENSION
+#define TX_QUEUE_EXTENSION
+#define TX_SEMAPHORE_EXTENSION
+#define TX_TIMER_EXTENSION
+
+
+/* Define the user extension field of the thread control block. Nothing
+ additional is needed for this port so it is defined as white space. */
+
+#ifndef TX_THREAD_USER_EXTENSION
+#define TX_THREAD_USER_EXTENSION
+#endif
+
+
+/* Define the macros for processing extensions in tx_thread_create, tx_thread_delete,
+ tx_thread_shell_entry, and tx_thread_terminate. */
+
+
+#define TX_THREAD_CREATE_EXTENSION(thread_ptr)
+#define TX_THREAD_DELETE_EXTENSION(thread_ptr)
+#define TX_THREAD_COMPLETED_EXTENSION(thread_ptr)
+#define TX_THREAD_TERMINATED_EXTENSION(thread_ptr)
+
+
+/* Define the ThreadX object creation extensions for the remaining objects. */
+
+#define TX_BLOCK_POOL_CREATE_EXTENSION(pool_ptr)
+#define TX_BYTE_POOL_CREATE_EXTENSION(pool_ptr)
+#define TX_EVENT_FLAGS_GROUP_CREATE_EXTENSION(group_ptr)
+#define TX_MUTEX_CREATE_EXTENSION(mutex_ptr)
+#define TX_QUEUE_CREATE_EXTENSION(queue_ptr)
+#define TX_SEMAPHORE_CREATE_EXTENSION(semaphore_ptr)
+#define TX_TIMER_CREATE_EXTENSION(timer_ptr)
+
+
+/* Define the ThreadX object deletion extensions for the remaining objects. */
+
+#define TX_BLOCK_POOL_DELETE_EXTENSION(pool_ptr)
+#define TX_BYTE_POOL_DELETE_EXTENSION(pool_ptr)
+#define TX_EVENT_FLAGS_GROUP_DELETE_EXTENSION(group_ptr)
+#define TX_MUTEX_DELETE_EXTENSION(mutex_ptr)
+#define TX_QUEUE_DELETE_EXTENSION(queue_ptr)
+#define TX_SEMAPHORE_DELETE_EXTENSION(semaphore_ptr)
+#define TX_TIMER_DELETE_EXTENSION(timer_ptr)
+
+
+/* Determine if the ARM architecture has the CLZ instruction. This is available on
+ architectures v5 and above. If available, redefine the macro for calculating the
+ lowest bit set. */
+
+#if __TARGET_ARCH_ARM > 4
+
+#ifndef __thumb__
+
+#define TX_LOWEST_SET_BIT_CALCULATE(m, b) m = m & ((ULONG) (-((LONG) m))); \
+ asm volatile (" CLZ %0,%1 ": "=r" (b) : "r" (m) ); \
+ b = 31 - b;
+#endif
+#endif
+
+
+/* Define ThreadX interrupt lockout and restore macros for protection on
+ access of critical kernel information. The restore interrupt macro must
+ restore the interrupt posture of the running thread prior to the value
+ present prior to the disable macro. In most cases, the save area macro
+ is used to define a local function save area for the disable and restore
+ macros. */
+
+#ifdef __thumb__
+
+unsigned int _tx_thread_interrupt_disable(void);
+unsigned int _tx_thread_interrupt_restore(UINT old_posture);
+
+
+#define TX_INTERRUPT_SAVE_AREA UINT interrupt_save;
+
+#define TX_DISABLE interrupt_save = _tx_thread_interrupt_disable();
+#define TX_RESTORE _tx_thread_interrupt_restore(interrupt_save);
+
+#else
+
+#define TX_INTERRUPT_SAVE_AREA UINT interrupt_save, tx_temp;
+
+#ifdef TX_ENABLE_FIQ_SUPPORT
+#define TX_DISABLE asm volatile (" MRS %0,CPSR; CPSID if ": "=r" (interrupt_save) );
+#else
+#define TX_DISABLE asm volatile (" MRS %0,CPSR; CPSID i ": "=r" (interrupt_save) );
+#endif
+
+#define TX_RESTORE asm volatile (" MSR CPSR_c,%0 "::"r" (interrupt_save) );
+
+#endif
+
+
+/* Define VFP extension for the ARMv7-A. Each is assumed to be called in the context of the executing
+ thread. */
+
+void tx_thread_vfp_enable(void);
+void tx_thread_vfp_disable(void);
+
+
+/* Define the interrupt lockout macros for each ThreadX object. */
+
+#define TX_BLOCK_POOL_DISABLE TX_DISABLE
+#define TX_BYTE_POOL_DISABLE TX_DISABLE
+#define TX_EVENT_FLAGS_GROUP_DISABLE TX_DISABLE
+#define TX_MUTEX_DISABLE TX_DISABLE
+#define TX_QUEUE_DISABLE TX_DISABLE
+#define TX_SEMAPHORE_DISABLE TX_DISABLE
+
+
+/* Define the version ID of ThreadX. This may be utilized by the application. */
+
+#ifdef TX_THREAD_INIT
+CHAR _tx_version_id[] =
+ "Copyright (c) Microsoft Corporation. All rights reserved. * ThreadX ARMv7-A Version 6.1.11 *";
+#else
+extern CHAR _tx_version_id[];
+#endif
+
+
+#endif
+
diff --git a/ports/cortex_a12/ac6/src/tx_thread_context_restore.S b/ports/cortex_a12/ac6/src/tx_thread_context_restore.S
new file mode 100644
index 00000000..fae7e72d
--- /dev/null
+++ b/ports/cortex_a12/ac6/src/tx_thread_context_restore.S
@@ -0,0 +1,222 @@
+/**************************************************************************/
+/* */
+/* Copyright (c) Microsoft Corporation. All rights reserved. */
+/* */
+/* This software is licensed under the Microsoft Software License */
+/* Terms for Microsoft Azure RTOS. Full text of the license can be */
+/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
+/* and in the root directory of this software. */
+/* */
+/**************************************************************************/
+
+
+/**************************************************************************/
+/**************************************************************************/
+/** */
+/** ThreadX Component */
+/** */
+/** Thread */
+/** */
+/**************************************************************************/
+/**************************************************************************/
+
+ .arm
+
+#ifdef TX_ENABLE_FIQ_SUPPORT
+SVC_MODE = 0xD3 // Disable IRQ/FIQ, SVC mode
+IRQ_MODE = 0xD2 // Disable IRQ/FIQ, IRQ mode
+#else
+SVC_MODE = 0x93 // Disable IRQ, SVC mode
+IRQ_MODE = 0x92 // Disable IRQ, IRQ mode
+#endif
+
+ .global _tx_thread_system_state
+ .global _tx_thread_current_ptr
+ .global _tx_thread_execute_ptr
+ .global _tx_timer_time_slice
+ .global _tx_thread_schedule
+ .global _tx_thread_preempt_disable
+
+
+
+/* No 16-bit Thumb mode veneer code is needed for _tx_thread_context_restore
+ since it will never be called 16-bit mode. */
+
+ .arm
+ .text
+ .align 2
+/**************************************************************************/
+/* */
+/* FUNCTION RELEASE */
+/* */
+/* _tx_thread_context_restore ARMv7-A */
+/* 6.1.11 */
+/* AUTHOR */
+/* */
+/* William E. Lamie, Microsoft Corporation */
+/* */
+/* DESCRIPTION */
+/* */
+/* This function restores the interrupt context if it is processing a */
+/* nested interrupt. If not, it returns to the interrupt thread if no */
+/* preemption is necessary. Otherwise, if preemption is necessary or */
+/* if no thread was running, the function returns to the scheduler. */
+/* */
+/* INPUT */
+/* */
+/* None */
+/* */
+/* OUTPUT */
+/* */
+/* None */
+/* */
+/* CALLS */
+/* */
+/* _tx_thread_schedule Thread scheduling routine */
+/* */
+/* CALLED BY */
+/* */
+/* ISRs Interrupt Service Routines */
+/* */
+/* RELEASE HISTORY */
+/* */
+/* DATE NAME DESCRIPTION */
+/* */
+/* 09-30-2020 William E. Lamie Initial Version 6.1 */
+/* 10-15-2021 William E. Lamie Modified comment(s), added */
+/* execution profile support, */
+/* resulting in version 6.1.9 */
+/* 04-25-2022 Zhen Kong Updated comments, */
+/* resulting in version 6.1.11 */
+/* */
+/**************************************************************************/
+ .global _tx_thread_context_restore
+ .type _tx_thread_context_restore,function
+_tx_thread_context_restore:
+
+ /* Lockout interrupts. */
+
+#ifdef TX_ENABLE_FIQ_SUPPORT
+ CPSID if // Disable IRQ and FIQ interrupts
+#else
+ CPSID i // Disable IRQ interrupts
+#endif
+
+#if (defined(TX_ENABLE_EXECUTION_CHANGE_NOTIFY) || defined(TX_EXECUTION_PROFILE_ENABLE))
+
+ /* Call the ISR exit function to indicate an ISR is complete. */
+
+ BL _tx_execution_isr_exit // Call the ISR exit function
+#endif
+
+ /* Determine if interrupts are nested. */
+
+ LDR r3, =_tx_thread_system_state // Pickup address of system state variable
+ LDR r2, [r3] // Pickup system state
+ SUB r2, r2, #1 // Decrement the counter
+ STR r2, [r3] // Store the counter
+ CMP r2, #0 // Was this the first interrupt?
+ BEQ __tx_thread_not_nested_restore // If so, not a nested restore
+
+ /* Interrupts are nested. */
+
+ /* Just recover the saved registers and return to the point of
+ interrupt. */
+
+ LDMIA sp!, {r0, r10, r12, lr} // Recover SPSR, POI, and scratch regs
+ MSR SPSR_cxsf, r0 // Put SPSR back
+ LDMIA sp!, {r0-r3} // Recover r0-r3
+ MOVS pc, lr // Return to point of interrupt
+
+__tx_thread_not_nested_restore:
+
+ /* Determine if a thread was interrupted and no preemption is required. */
+
+ LDR r1, =_tx_thread_current_ptr // Pickup address of current thread ptr
+ LDR r0, [r1] // Pickup actual current thread pointer
+ CMP r0, #0 // Is it NULL?
+ BEQ __tx_thread_idle_system_restore // Yes, idle system was interrupted
+
+ LDR r3, =_tx_thread_preempt_disable // Pickup preempt disable address
+ LDR r2, [r3] // Pickup actual preempt disable flag
+ CMP r2, #0 // Is it set?
+ BNE __tx_thread_no_preempt_restore // Yes, don't preempt this thread
+ LDR r3, =_tx_thread_execute_ptr // Pickup address of execute thread ptr
+ LDR r2, [r3] // Pickup actual execute thread pointer
+ CMP r0, r2 // Is the same thread highest priority?
+ BNE __tx_thread_preempt_restore // No, preemption needs to happen
+
+
+__tx_thread_no_preempt_restore:
+
+ /* Recover the saved context and return to the point of interrupt. */
+
+ /* Pickup the saved stack pointer. */
+
+ /* Recover the saved context and return to the point of interrupt. */
+ LDMIA sp!, {r0, r10, r12, lr} // Recover SPSR, POI, and scratch regs
+ MSR SPSR_cxsf, r0 // Put SPSR back
+ LDMIA sp!, {r0-r3} // Recover r0-r3
+ MOVS pc, lr // Return to point of interrupt
+
+__tx_thread_preempt_restore:
+
+ LDMIA sp!, {r3, r10, r12, lr} // Recover temporarily saved registers
+ MOV r1, lr // Save lr (point of interrupt)
+ MOV r2, #SVC_MODE // Build SVC mode CPSR
+ MSR CPSR_c, r2 // Enter SVC mode
+ STR r1, [sp, #-4]! // Save point of interrupt
+ STMDB sp!, {r4-r12, lr} // Save upper half of registers
+ MOV r4, r3 // Save SPSR in r4
+ MOV r2, #IRQ_MODE // Build IRQ mode CPSR
+ MSR CPSR_c, r2 // Enter IRQ mode
+ LDMIA sp!, {r0-r3} // Recover r0-r3
+ MOV r5, #SVC_MODE // Build SVC mode CPSR
+ MSR CPSR_c, r5 // Enter SVC mode
+ STMDB sp!, {r0-r3} // Save r0-r3 on thread's stack
+
+ LDR r1, =_tx_thread_current_ptr // Pickup address of current thread ptr
+ LDR r0, [r1] // Pickup current thread pointer
+
+#ifdef TX_ENABLE_VFP_SUPPORT
+ LDR r2, [r0, #144] // Pickup the VFP enabled flag
+ CMP r2, #0 // Is the VFP enabled?
+ BEQ _tx_skip_irq_vfp_save // No, skip VFP IRQ save
+ VMRS r2, FPSCR // Pickup the FPSCR
+ STR r2, [sp, #-4]! // Save FPSCR
+ VSTMDB sp!, {D16-D31} // Save D16-D31
+ VSTMDB sp!, {D0-D15} // Save D0-D15
+
+_tx_skip_irq_vfp_save:
+
+#endif
+
+ MOV r3, #1 // Build interrupt stack type
+ STMDB sp!, {r3, r4} // Save interrupt stack type and SPSR
+ STR sp, [r0, #8] // Save stack pointer in thread control
+ // block
+
+ /* Save the remaining time-slice and disable it. */
+ LDR r3, =_tx_timer_time_slice // Pickup time-slice variable address
+ LDR r2, [r3] // Pickup time-slice
+ CMP r2, #0 // Is it active?
+ BEQ __tx_thread_dont_save_ts // No, don't save it
+ STR r2, [r0, #24] // Save thread's time-slice
+ MOV r2, #0 // Clear value
+ STR r2, [r3] // Disable global time-slice flag
+
+__tx_thread_dont_save_ts:
+
+ /* Clear the current task pointer. */
+ MOV r0, #0 // NULL value
+ STR r0, [r1] // Clear current thread pointer
+
+ /* Return to the scheduler. */
+ B _tx_thread_schedule // Return to scheduler
+
+__tx_thread_idle_system_restore:
+
+ /* Just return back to the scheduler! */
+ MOV r0, #SVC_MODE // Build SVC mode CPSR
+ MSR CPSR_c, r0 // Enter SVC mode
+ B _tx_thread_schedule // Return to scheduler
diff --git a/ports/cortex_a12/ac6/src/tx_thread_context_save.S b/ports/cortex_a12/ac6/src/tx_thread_context_save.S
new file mode 100644
index 00000000..7ac48c2e
--- /dev/null
+++ b/ports/cortex_a12/ac6/src/tx_thread_context_save.S
@@ -0,0 +1,172 @@
+/**************************************************************************/
+/* */
+/* Copyright (c) Microsoft Corporation. All rights reserved. */
+/* */
+/* This software is licensed under the Microsoft Software License */
+/* Terms for Microsoft Azure RTOS. Full text of the license can be */
+/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
+/* and in the root directory of this software. */
+/* */
+/**************************************************************************/
+
+
+/**************************************************************************/
+/**************************************************************************/
+/** */
+/** ThreadX Component */
+/** */
+/** Thread */
+/** */
+/**************************************************************************/
+/**************************************************************************/
+
+ .global _tx_thread_system_state
+ .global _tx_thread_current_ptr
+ .global __tx_irq_processing_return
+
+
+/* No 16-bit Thumb mode veneer code is needed for _tx_thread_context_save
+ since it will never be called 16-bit mode. */
+
+ .arm
+ .text
+ .align 2
+/**************************************************************************/
+/* */
+/* FUNCTION RELEASE */
+/* */
+/* _tx_thread_context_save ARMv7-A */
+/* 6.1.11 */
+/* AUTHOR */
+/* */
+/* William E. Lamie, Microsoft Corporation */
+/* */
+/* DESCRIPTION */
+/* */
+/* This function saves the context of an executing thread in the */
+/* beginning of interrupt processing. The function also ensures that */
+/* the system stack is used upon return to the calling ISR. */
+/* */
+/* INPUT */
+/* */
+/* None */
+/* */
+/* OUTPUT */
+/* */
+/* None */
+/* */
+/* CALLS */
+/* */
+/* None */
+/* */
+/* CALLED BY */
+/* */
+/* ISRs */
+/* */
+/* RELEASE HISTORY */
+/* */
+/* DATE NAME DESCRIPTION */
+/* */
+/* 09-30-2020 William E. Lamie Initial Version 6.1 */
+/* 10-15-2021 William E. Lamie Modified comment(s), added */
+/* execution profile support, */
+/* resulting in version 6.1.9 */
+/* 04-25-2022 Zhen Kong Updated comments, */
+/* resulting in version 6.1.11 */
+/* */
+/**************************************************************************/
+ .global _tx_thread_context_save
+ .type _tx_thread_context_save,function
+_tx_thread_context_save:
+
+ /* Upon entry to this routine, it is assumed that IRQ interrupts are locked
+ out, we are in IRQ mode, and all registers are intact. */
+
+ /* Check for a nested interrupt condition. */
+
+ STMDB sp!, {r0-r3} // Save some working registers
+#ifdef TX_ENABLE_FIQ_SUPPORT
+ CPSID if // Disable FIQ interrupts
+#endif
+ LDR r3, =_tx_thread_system_state // Pickup address of system state variable
+ LDR r2, [r3] // Pickup system state
+ CMP r2, #0 // Is this the first interrupt?
+ BEQ __tx_thread_not_nested_save // Yes, not a nested context save
+
+ /* Nested interrupt condition. */
+
+ ADD r2, r2, #1 // Increment the interrupt counter
+ STR r2, [r3] // Store it back in the variable
+
+ /* Save the rest of the scratch registers on the stack and return to the
+ calling ISR. */
+
+ MRS r0, SPSR // Pickup saved SPSR
+ SUB lr, lr, #4 // Adjust point of interrupt
+ STMDB sp!, {r0, r10, r12, lr} // Store other registers
+
+ /* Return to the ISR. */
+
+ MOV r10, #0 // Clear stack limit
+
+#if (defined(TX_ENABLE_EXECUTION_CHANGE_NOTIFY) || defined(TX_EXECUTION_PROFILE_ENABLE))
+
+ /* Call the ISR enter function to indicate an ISR is executing. */
+
+ PUSH {lr} // Save ISR lr
+ BL _tx_execution_isr_enter // Call the ISR enter function
+ POP {lr} // Recover ISR lr
+#endif
+
+ B __tx_irq_processing_return // Continue IRQ processing
+
+__tx_thread_not_nested_save:
+
+ /* Otherwise, not nested, check to see if a thread was running. */
+ ADD r2, r2, #1 // Increment the interrupt counter
+ STR r2, [r3] // Store it back in the variable
+ LDR r1, =_tx_thread_current_ptr // Pickup address of current thread ptr
+ LDR r0, [r1] // Pickup current thread pointer
+ CMP r0, #0 // Is it NULL?
+ BEQ __tx_thread_idle_system_save // If so, interrupt occurred in
+ // scheduling loop - nothing needs saving!
+
+ /* Save minimal context of interrupted thread. */
+
+ MRS r2, SPSR // Pickup saved SPSR
+ SUB lr, lr, #4 // Adjust point of interrupt
+ STMDB sp!, {r2, r10, r12, lr} // Store other registers
+
+ MOV r10, #0 // Clear stack limit
+
+#if (defined(TX_ENABLE_EXECUTION_CHANGE_NOTIFY) || defined(TX_EXECUTION_PROFILE_ENABLE))
+
+ /* Call the ISR enter function to indicate an ISR is executing. */
+
+ PUSH {lr} // Save ISR lr
+ BL _tx_execution_isr_enter // Call the ISR enter function
+ POP {lr} // Recover ISR lr
+#endif
+
+ B __tx_irq_processing_return // Continue IRQ processing
+
+__tx_thread_idle_system_save:
+
+ /* Interrupt occurred in the scheduling loop. */
+
+ /* Not much to do here, just adjust the stack pointer, and return to IRQ
+ processing. */
+
+ MOV r10, #0 // Clear stack limit
+
+#if (defined(TX_ENABLE_EXECUTION_CHANGE_NOTIFY) || defined(TX_EXECUTION_PROFILE_ENABLE))
+
+ /* Call the ISR enter function to indicate an ISR is executing. */
+
+ PUSH {lr} // Save ISR lr
+ BL _tx_execution_isr_enter // Call the ISR enter function
+ POP {lr} // Recover ISR lr
+#endif
+
+ ADD sp, sp, #16 // Recover saved registers
+ B __tx_irq_processing_return // Continue IRQ processing
diff --git a/ports/cortex_a12/ac6/src/tx_thread_fiq_context_restore.S b/ports/cortex_a12/ac6/src/tx_thread_fiq_context_restore.S
new file mode 100644
index 00000000..006be973
--- /dev/null
+++ b/ports/cortex_a12/ac6/src/tx_thread_fiq_context_restore.S
@@ -0,0 +1,223 @@
+/**************************************************************************/
+/* */
+/* Copyright (c) Microsoft Corporation. All rights reserved. */
+/* */
+/* This software is licensed under the Microsoft Software License */
+/* Terms for Microsoft Azure RTOS. Full text of the license can be */
+/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
+/* and in the root directory of this software. */
+/* */
+/**************************************************************************/
+
+
+/**************************************************************************/
+/**************************************************************************/
+/** */
+/** ThreadX Component */
+/** */
+/** Thread */
+/** */
+/**************************************************************************/
+/**************************************************************************/
+
+SVC_MODE = 0xD3 // SVC mode
+FIQ_MODE = 0xD1 // FIQ mode
+MODE_MASK = 0x1F // Mode mask
+THUMB_MASK = 0x20 // Thumb bit mask
+IRQ_MODE_BITS = 0x12 // IRQ mode bits
+
+
+ .global _tx_thread_system_state
+ .global _tx_thread_current_ptr
+ .global _tx_thread_system_stack_ptr
+ .global _tx_thread_execute_ptr
+ .global _tx_timer_time_slice
+ .global _tx_thread_schedule
+ .global _tx_thread_preempt_disable
+ .global _tx_execution_isr_exit
+
+
+/* No 16-bit Thumb mode veneer code is needed for _tx_thread_fiq_context_restore
+ since it will never be called 16-bit mode. */
+
+ .arm
+ .text
+ .align 2
+/**************************************************************************/
+/* */
+/* FUNCTION RELEASE */
+/* */
+/* _tx_thread_fiq_context_restore ARMv7-A */
+/* 6.1.11 */
+/* AUTHOR */
+/* */
+/* William E. Lamie, Microsoft Corporation */
+/* */
+/* DESCRIPTION */
+/* */
+/* This function restores the fiq interrupt context when processing a */
+/* nested interrupt. If not, it returns to the interrupt thread if no */
+/* preemption is necessary. Otherwise, if preemption is necessary or */
+/* if no thread was running, the function returns to the scheduler. */
+/* */
+/* INPUT */
+/* */
+/* None */
+/* */
+/* OUTPUT */
+/* */
+/* None */
+/* */
+/* CALLS */
+/* */
+/* _tx_thread_schedule Thread scheduling routine */
+/* */
+/* CALLED BY */
+/* */
+/* FIQ ISR Interrupt Service Routines */
+/* */
+/* RELEASE HISTORY */
+/* */
+/* DATE NAME DESCRIPTION */
+/* */
+/* 09-30-2020 William E. Lamie Initial Version 6.1 */
+/* 10-15-2021 William E. Lamie Modified comment(s), added */
+/* execution profile support, */
+/* resulting in version 6.1.9 */
+/* 04-25-2022 Zhen Kong Updated comments, */
+/* resulting in version 6.1.11 */
+/* */
+/**************************************************************************/
+ .global _tx_thread_fiq_context_restore
+ .type _tx_thread_fiq_context_restore,function
+_tx_thread_fiq_context_restore:
+
+ /* Lockout interrupts. */
+
+ CPSID if // Disable IRQ and FIQ interrupts
+
+#if (defined(TX_ENABLE_EXECUTION_CHANGE_NOTIFY) || defined(TX_EXECUTION_PROFILE_ENABLE))
+
+ /* Call the ISR exit function to indicate an ISR is complete. */
+
+ BL _tx_execution_isr_exit // Call the ISR exit function
+#endif
+
+ /* Determine if interrupts are nested. */
+ LDR r3, =_tx_thread_system_state // Pickup address of system state variable
+ LDR r2, [r3] // Pickup system state
+ SUB r2, r2, #1 // Decrement the counter
+ STR r2, [r3] // Store the counter
+ CMP r2, #0 // Was this the first interrupt?
+ BEQ __tx_thread_fiq_not_nested_restore // If so, not a nested restore
+
+ /* Interrupts are nested. */
+
+ /* Just recover the saved registers and return to the point of
+ interrupt. */
+
+ LDMIA sp!, {r0, r10, r12, lr} // Recover SPSR, POI, and scratch regs
+ MSR SPSR_cxsf, r0 // Put SPSR back
+ LDMIA sp!, {r0-r3} // Recover r0-r3
+ MOVS pc, lr // Return to point of interrupt
+
+__tx_thread_fiq_not_nested_restore:
+
+ /* Determine if a thread was interrupted and no preemption is required. */
+
+ LDR r1, [sp] // Pickup the saved SPSR
+ MOV r2, #MODE_MASK // Build mask to isolate the interrupted mode
+ AND r1, r1, r2 // Isolate mode bits
+ CMP r1, #IRQ_MODE_BITS // Was an interrupt taken in IRQ mode before we
+ // got to context save? */
+ BEQ __tx_thread_fiq_no_preempt_restore // Yes, just go back to point of interrupt
+
+
+ LDR r1, =_tx_thread_current_ptr // Pickup address of current thread ptr
+ LDR r0, [r1] // Pickup actual current thread pointer
+ CMP r0, #0 // Is it NULL?
+ BEQ __tx_thread_fiq_idle_system_restore // Yes, idle system was interrupted
+
+ LDR r3, =_tx_thread_preempt_disable // Pickup preempt disable address
+ LDR r2, [r3] // Pickup actual preempt disable flag
+ CMP r2, #0 // Is it set?
+ BNE __tx_thread_fiq_no_preempt_restore // Yes, don't preempt this thread
+ LDR r3, =_tx_thread_execute_ptr // Pickup address of execute thread ptr
+ LDR r2, [r3] // Pickup actual execute thread pointer
+ CMP r0, r2 // Is the same thread highest priority?
+ BNE __tx_thread_fiq_preempt_restore // No, preemption needs to happen
+
+
+__tx_thread_fiq_no_preempt_restore:
+
+ /* Restore interrupted thread or ISR. */
+ /* Recover the saved context and return to the point of interrupt. */
+
+ LDMIA sp!, {r0, lr} // Recover SPSR, POI, and scratch regs
+ MSR SPSR_cxsf, r0 // Put SPSR back
+ LDMIA sp!, {r0-r3} // Recover r0-r3
+ MOVS pc, lr // Return to point of interrupt
+
+__tx_thread_fiq_preempt_restore:
+
+ LDMIA sp!, {r3, lr} // Recover temporarily saved registers
+ MOV r1, lr // Save lr (point of interrupt)
+ MOV r2, #SVC_MODE // Build SVC mode CPSR
+ MSR CPSR_c, r2 // Enter SVC mode
+ STR r1, [sp, #-4]! // Save point of interrupt
+ STMDB sp!, {r4-r12, lr} // Save upper half of registers
+ MOV r4, r3 // Save SPSR in r4
+ MOV r2, #FIQ_MODE // Build FIQ mode CPSR
+ MSR CPSR_c, r2 // Reenter FIQ mode
+ LDMIA sp!, {r0-r3} // Recover r0-r3
+ MOV r5, #SVC_MODE // Build SVC mode CPSR
+ MSR CPSR_c, r5 // Enter SVC mode
+ STMDB sp!, {r0-r3} // Save r0-r3 on thread's stack
+
+ LDR r1, =_tx_thread_current_ptr // Pickup address of current thread ptr
+ LDR r0, [r1] // Pickup current thread pointer
+
+#ifdef TX_ENABLE_VFP_SUPPORT
+ LDR r2, [r0, #144] // Pickup the VFP enabled flag
+ CMP r2, #0 // Is the VFP enabled?
+ BEQ _tx_skip_fiq_vfp_save // No, skip VFP IRQ save
+ VMRS r2, FPSCR // Pickup the FPSCR
+ STR r2, [sp, #-4]! // Save FPSCR
+ VSTMDB sp!, {D16-D31} // Save D16-D31
+ VSTMDB sp!, {D0-D15} // Save D0-D15
+_tx_skip_fiq_vfp_save:
+#endif
+
+ MOV r3, #1 // Build interrupt stack type
+ STMDB sp!, {r3, r4} // Save interrupt stack type and SPSR
+ STR sp, [r0, #8] // Save stack pointer in thread control
+ // block */
+ LDR r3, =_tx_timer_time_slice // Pickup time-slice variable address
+ LDR r2, [r3] // Pickup time-slice
+ CMP r2, #0 // Is it active?
+ BEQ __tx_thread_fiq_dont_save_ts // No, don't save it
+
+ STR r2, [r0, #24] // Save thread's time-slice
+ MOV r2, #0 // Clear value
+ STR r2, [r3] // Disable global time-slice flag
+
+__tx_thread_fiq_dont_save_ts:
+
+ /* Clear the current task pointer. */
+
+ MOV r0, #0 // NULL value
+ STR r0, [r1] // Clear current thread pointer
+
+ /* Return to the scheduler. */
+
+ B _tx_thread_schedule // Return to scheduler
+
+__tx_thread_fiq_idle_system_restore:
+
+ /* Just return back to the scheduler! */
+
+ ADD sp, sp, #24 // Recover FIQ stack space
+ MOV r3, #SVC_MODE // Build SVC mode CPSR
+ MSR CPSR_c, r3 // Lockout interrupts
+ B _tx_thread_schedule // Return to scheduler
+
diff --git a/ports/cortex_a12/ac6/src/tx_thread_fiq_context_save.S b/ports/cortex_a12/ac6/src/tx_thread_fiq_context_save.S
new file mode 100644
index 00000000..7db6a4c2
--- /dev/null
+++ b/ports/cortex_a12/ac6/src/tx_thread_fiq_context_save.S
@@ -0,0 +1,178 @@
+/**************************************************************************/
+/* */
+/* Copyright (c) Microsoft Corporation. All rights reserved. */
+/* */
+/* This software is licensed under the Microsoft Software License */
+/* Terms for Microsoft Azure RTOS. Full text of the license can be */
+/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
+/* and in the root directory of this software. */
+/* */
+/**************************************************************************/
+
+
+/**************************************************************************/
+/**************************************************************************/
+/** */
+/** ThreadX Component */
+/** */
+/** Thread */
+/** */
+/**************************************************************************/
+/**************************************************************************/
+
+ .global _tx_thread_system_state
+ .global _tx_thread_current_ptr
+ .global __tx_fiq_processing_return
+ .global _tx_execution_isr_enter
+
+
+/* No 16-bit Thumb mode veneer code is needed for _tx_thread_fiq_context_save
+ since it will never be called 16-bit mode. */
+
+ .arm
+ .text
+ .align 2
+/**************************************************************************/
+/* */
+/* FUNCTION RELEASE */
+/* */
+/* _tx_thread_fiq_context_save ARMv7-A */
+/* 6.1.11 */
+/* AUTHOR */
+/* */
+/* William E. Lamie, Microsoft Corporation */
+/* */
+/* DESCRIPTION */
+/* */
+/* This function saves the context of an executing thread in the */
+/* beginning of interrupt processing. The function also ensures that */
+/* the system stack is used upon return to the calling ISR. */
+/* */
+/* INPUT */
+/* */
+/* None */
+/* */
+/* OUTPUT */
+/* */
+/* None */
+/* */
+/* CALLS */
+/* */
+/* None */
+/* */
+/* CALLED BY */
+/* */
+/* ISRs */
+/* */
+/* RELEASE HISTORY */
+/* */
+/* DATE NAME DESCRIPTION */
+/* */
+/* 09-30-2020 William E. Lamie Initial Version 6.1 */
+/* 10-15-2021 William E. Lamie Modified comment(s), added */
+/* execution profile support, */
+/* resulting in version 6.1.9 */
+/* 04-25-2022 Zhen Kong Updated comments, */
+/* resulting in version 6.1.11 */
+/* */
+/**************************************************************************/
+ .global _tx_thread_fiq_context_save
+ .type _tx_thread_fiq_context_save,function
+_tx_thread_fiq_context_save:
+
+ /* Upon entry to this routine, it is assumed that IRQ interrupts are locked
+ out, we are in IRQ mode, and all registers are intact. */
+
+ /* Check for a nested interrupt condition. */
+
+ STMDB sp!, {r0-r3} // Save some working registers
+ LDR r3, =_tx_thread_system_state // Pickup address of system state variable
+ LDR r2, [r3] // Pickup system state
+ CMP r2, #0 // Is this the first interrupt?
+ BEQ __tx_thread_fiq_not_nested_save // Yes, not a nested context save
+
+ /* Nested interrupt condition. */
+
+ ADD r2, r2, #1 // Increment the interrupt counter
+ STR r2, [r3] // Store it back in the variable
+
+ /* Save the rest of the scratch registers on the stack and return to the
+ calling ISR. */
+
+ MRS r0, SPSR // Pickup saved SPSR
+ SUB lr, lr, #4 // Adjust point of interrupt
+ STMDB sp!, {r0, r10, r12, lr} // Store other registers
+
+ /* Return to the ISR. */
+
+ MOV r10, #0 // Clear stack limit
+
+#if (defined(TX_ENABLE_EXECUTION_CHANGE_NOTIFY) || defined(TX_EXECUTION_PROFILE_ENABLE))
+
+ /* Call the ISR enter function to indicate an ISR is executing. */
+
+ PUSH {lr} // Save ISR lr
+ BL _tx_execution_isr_enter // Call the ISR enter function
+ POP {lr} // Recover ISR lr
+#endif
+
+ B __tx_fiq_processing_return // Continue FIQ processing
+//
+__tx_thread_fiq_not_nested_save:
+
+ /* Otherwise, not nested, check to see if a thread was running. */
+
+ ADD r2, r2, #1 // Increment the interrupt counter
+ STR r2, [r3] // Store it back in the variable
+ LDR r1, =_tx_thread_current_ptr // Pickup address of current thread ptr
+ LDR r0, [r1] // Pickup current thread pointer
+ CMP r0, #0 // Is it NULL?
+ BEQ __tx_thread_fiq_idle_system_save // If so, interrupt occurred in
+ // scheduling loop - nothing needs saving!
+
+ /* Save minimal context of interrupted thread. */
+
+ MRS r2, SPSR // Pickup saved SPSR
+ SUB lr, lr, #4 // Adjust point of interrupt
+ STMDB sp!, {r2, lr} // Store other registers, Note that we don't
+ // need to save sl and ip since FIQ has
+ // copies of these registers. Nested
+ // interrupt processing does need to save
+ // these registers.
+
+ MOV r10, #0 // Clear stack limit
+
+#if (defined(TX_ENABLE_EXECUTION_CHANGE_NOTIFY) || defined(TX_EXECUTION_PROFILE_ENABLE))
+
+ /* Call the ISR enter function to indicate an ISR is executing. */
+
+ PUSH {lr} // Save ISR lr
+ BL _tx_execution_isr_enter // Call the ISR enter function
+ POP {lr} // Recover ISR lr
+#endif
+
+ B __tx_fiq_processing_return // Continue FIQ processing
+
+__tx_thread_fiq_idle_system_save:
+
+ /* Interrupt occurred in the scheduling loop. */
+
+#if (defined(TX_ENABLE_EXECUTION_CHANGE_NOTIFY) || defined(TX_EXECUTION_PROFILE_ENABLE))
+
+ /* Call the ISR enter function to indicate an ISR is executing. */
+
+ PUSH {lr} // Save ISR lr
+ BL _tx_execution_isr_enter // Call the ISR enter function
+ POP {lr} // Recover ISR lr
+#endif
+
+ /* Not much to do here, save the current SPSR and LR for possible
+ use in IRQ interrupted in idle system conditions, and return to
+ FIQ interrupt processing. */
+
+ MRS r0, SPSR // Pickup saved SPSR
+ SUB lr, lr, #4 // Adjust point of interrupt
+ STMDB sp!, {r0, lr} // Store other registers that will get used
+ // or stripped off the stack in context
+ // restore
+ B __tx_fiq_processing_return // Continue FIQ processing
diff --git a/ports/cortex_a12/ac6/src/tx_thread_fiq_nesting_end.S b/ports/cortex_a12/ac6/src/tx_thread_fiq_nesting_end.S
new file mode 100644
index 00000000..b34d881e
--- /dev/null
+++ b/ports/cortex_a12/ac6/src/tx_thread_fiq_nesting_end.S
@@ -0,0 +1,104 @@
+/**************************************************************************/
+/* */
+/* Copyright (c) Microsoft Corporation. All rights reserved. */
+/* */
+/* This software is licensed under the Microsoft Software License */
+/* Terms for Microsoft Azure RTOS. Full text of the license can be */
+/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
+/* and in the root directory of this software. */
+/* */
+/**************************************************************************/
+
+
+/**************************************************************************/
+/**************************************************************************/
+/** */
+/** ThreadX Component */
+/** */
+/** Thread */
+/** */
+/**************************************************************************/
+/**************************************************************************/
+
+#ifdef TX_ENABLE_FIQ_SUPPORT
+DISABLE_INTS = 0xC0 // Disable IRQ/FIQ interrupts
+#else
+DISABLE_INTS = 0x80 // Disable IRQ interrupts
+#endif
+MODE_MASK = 0x1F // Mode mask
+FIQ_MODE_BITS = 0x11 // FIQ mode bits
+
+
+/* No 16-bit Thumb mode veneer code is needed for _tx_thread_fiq_nesting_end
+ since it will never be called 16-bit mode. */
+
+ .arm
+ .text
+ .align 2
+/**************************************************************************/
+/* */
+/* FUNCTION RELEASE */
+/* */
+/* _tx_thread_fiq_nesting_end ARMv7-A */
+/* 6.1.11 */
+/* AUTHOR */
+/* */
+/* William E. Lamie, Microsoft Corporation */
+/* */
+/* DESCRIPTION */
+/* */
+/* This function is called by the application from FIQ mode after */
+/* _tx_thread_fiq_nesting_start has been called and switches the FIQ */
+/* processing from system mode back to FIQ mode prior to the ISR */
+/* calling _tx_thread_fiq_context_restore. Note that this function */
+/* assumes the system stack pointer is in the same position after */
+/* nesting start function was called. */
+/* */
+/* This function assumes that the system mode stack pointer was setup */
+/* during low-level initialization (tx_initialize_low_level.s). */
+/* */
+/* This function returns with FIQ interrupts disabled. */
+/* */
+/* INPUT */
+/* */
+/* None */
+/* */
+/* OUTPUT */
+/* */
+/* None */
+/* */
+/* CALLS */
+/* */
+/* None */
+/* */
+/* CALLED BY */
+/* */
+/* ISRs */
+/* */
+/* RELEASE HISTORY */
+/* */
+/* DATE NAME DESCRIPTION */
+/* */
+/* 09-30-2020 William E. Lamie Initial Version 6.1 */
+/* 04-25-2022 Zhen Kong Updated comments, */
+/* resulting in version 6.1.11 */
+/* */
+/**************************************************************************/
+ .global _tx_thread_fiq_nesting_end
+ .type _tx_thread_fiq_nesting_end,function
+_tx_thread_fiq_nesting_end:
+ MOV r3,lr // Save ISR return address
+ MRS r0, CPSR // Pickup the CPSR
+ ORR r0, r0, #DISABLE_INTS // Build disable interrupt value
+ MSR CPSR_c, r0 // Disable interrupts
+ LDMIA sp!, {r1, lr} // Pickup saved lr (and r1 throw-away for
+ // 8-byte alignment logic)
+ BIC r0, r0, #MODE_MASK // Clear mode bits
+ ORR r0, r0, #FIQ_MODE_BITS // Build IRQ mode CPSR
+ MSR CPSR_c, r0 // Reenter IRQ mode
+
+#ifdef __THUMB_INTERWORK
+ BX r3 // Return to caller
+#else
+ MOV pc, r3 // Return to caller
+#endif
diff --git a/ports/cortex_a12/ac6/src/tx_thread_fiq_nesting_start.S b/ports/cortex_a12/ac6/src/tx_thread_fiq_nesting_start.S
new file mode 100644
index 00000000..c9cd5a06
--- /dev/null
+++ b/ports/cortex_a12/ac6/src/tx_thread_fiq_nesting_start.S
@@ -0,0 +1,96 @@
+/**************************************************************************/
+/* */
+/* Copyright (c) Microsoft Corporation. All rights reserved. */
+/* */
+/* This software is licensed under the Microsoft Software License */
+/* Terms for Microsoft Azure RTOS. Full text of the license can be */
+/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
+/* and in the root directory of this software. */
+/* */
+/**************************************************************************/
+
+
+/**************************************************************************/
+/**************************************************************************/
+/** */
+/** ThreadX Component */
+/** */
+/** Thread */
+/** */
+/**************************************************************************/
+/**************************************************************************/
+
+FIQ_DISABLE = 0x40 // FIQ disable bit
+MODE_MASK = 0x1F // Mode mask
+SYS_MODE_BITS = 0x1F // System mode bits
+
+
+/* No 16-bit Thumb mode veneer code is needed for _tx_thread_fiq_nesting_start
+ since it will never be called 16-bit mode. */
+
+ .arm
+ .text
+ .align 2
+/**************************************************************************/
+/* */
+/* FUNCTION RELEASE */
+/* */
+/* _tx_thread_fiq_nesting_start ARMv7-A */
+/* 6.1.11 */
+/* AUTHOR */
+/* */
+/* William E. Lamie, Microsoft Corporation */
+/* */
+/* DESCRIPTION */
+/* */
+/* This function is called by the application from FIQ mode after */
+/* _tx_thread_fiq_context_save has been called and switches the FIQ */
+/* processing to the system mode so nested FIQ interrupt processing */
+/* is possible (system mode has its own "lr" register). Note that */
+/* this function assumes that the system mode stack pointer was setup */
+/* during low-level initialization (tx_initialize_low_level.s). */
+/* */
+/* This function returns with FIQ interrupts enabled. */
+/* */
+/* INPUT */
+/* */
+/* None */
+/* */
+/* OUTPUT */
+/* */
+/* None */
+/* */
+/* CALLS */
+/* */
+/* None */
+/* */
+/* CALLED BY */
+/* */
+/* ISRs */
+/* */
+/* RELEASE HISTORY */
+/* */
+/* DATE NAME DESCRIPTION */
+/* */
+/* 09-30-2020 William E. Lamie Initial Version 6.1 */
+/* 04-25-2022 Zhen Kong Updated comments, */
+/* resulting in version 6.1.11 */
+/* */
+/**************************************************************************/
+ .global _tx_thread_fiq_nesting_start
+ .type _tx_thread_fiq_nesting_start,function
+_tx_thread_fiq_nesting_start:
+ MOV r3,lr // Save ISR return address
+ MRS r0, CPSR // Pickup the CPSR
+ BIC r0, r0, #MODE_MASK // Clear the mode bits
+ ORR r0, r0, #SYS_MODE_BITS // Build system mode CPSR
+ MSR CPSR_c, r0 // Enter system mode
+ STMDB sp!, {r1, lr} // Push the system mode lr on the system mode stack
+ // and push r1 just to keep 8-byte alignment
+ BIC r0, r0, #FIQ_DISABLE // Build enable FIQ CPSR
+ MSR CPSR_c, r0 // Enter system mode
+#ifdef __THUMB_INTERWORK
+ BX r3 // Return to caller
+#else
+ MOV pc, r3 // Return to caller
+#endif
diff --git a/ports/cortex_a12/ac6/src/tx_thread_interrupt_control.S b/ports/cortex_a12/ac6/src/tx_thread_interrupt_control.S
new file mode 100644
index 00000000..63b1609a
--- /dev/null
+++ b/ports/cortex_a12/ac6/src/tx_thread_interrupt_control.S
@@ -0,0 +1,104 @@
+/**************************************************************************/
+/* */
+/* Copyright (c) Microsoft Corporation. All rights reserved. */
+/* */
+/* This software is licensed under the Microsoft Software License */
+/* Terms for Microsoft Azure RTOS. Full text of the license can be */
+/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
+/* and in the root directory of this software. */
+/* */
+/**************************************************************************/
+
+
+/**************************************************************************/
+/**************************************************************************/
+/** */
+/** ThreadX Component */
+/** */
+/** Thread */
+/** */
+/**************************************************************************/
+/**************************************************************************/
+
+INT_MASK = 0x03F
+
+
+/* Define the 16-bit Thumb mode veneer for _tx_thread_interrupt_control for
+ applications calling this function from to 16-bit Thumb mode. */
+
+ .text
+ .align 2
+ .global $_tx_thread_interrupt_control
+$_tx_thread_interrupt_control:
+ .thumb
+ BX pc // Switch to 32-bit mode
+ NOP //
+ .arm
+ STMFD sp!, {lr} // Save return address
+ BL _tx_thread_interrupt_control // Call _tx_thread_interrupt_control function
+ LDMFD sp!, {lr} // Recover saved return address
+ BX lr // Return to 16-bit caller
+
+
+ .text
+ .align 2
+/**************************************************************************/
+/* */
+/* FUNCTION RELEASE */
+/* */
+/* _tx_thread_interrupt_control ARMv7-A */
+/* 6.1.11 */
+/* AUTHOR */
+/* */
+/* William E. Lamie, Microsoft Corporation */
+/* */
+/* DESCRIPTION */
+/* */
+/* This function is responsible for changing the interrupt lockout */
+/* posture of the system. */
+/* */
+/* INPUT */
+/* */
+/* new_posture New interrupt lockout posture */
+/* */
+/* OUTPUT */
+/* */
+/* old_posture Old interrupt lockout posture */
+/* */
+/* CALLS */
+/* */
+/* None */
+/* */
+/* CALLED BY */
+/* */
+/* Application Code */
+/* */
+/* RELEASE HISTORY */
+/* */
+/* DATE NAME DESCRIPTION */
+/* */
+/* 09-30-2020 William E. Lamie Initial Version 6.1 */
+/* 04-25-2022 Zhen Kong Updated comments, */
+/* resulting in version 6.1.11 */
+/* */
+/**************************************************************************/
+ .global _tx_thread_interrupt_control
+ .type _tx_thread_interrupt_control,function
+_tx_thread_interrupt_control:
+
+ /* Pickup current interrupt lockout posture. */
+
+ MRS r3, CPSR // Pickup current CPSR
+ MOV r2, #INT_MASK // Build interrupt mask
+ AND r1, r3, r2 // Clear interrupt lockout bits
+ ORR r1, r1, r0 // Or-in new interrupt lockout bits
+
+ /* Apply the new interrupt posture. */
+
+ MSR CPSR_c, r1 // Setup new CPSR
+ BIC r0, r3, r2 // Return previous interrupt mask
+#ifdef __THUMB_INTERWORK
+ BX lr // Return to caller
+#else
+ MOV pc, lr // Return to caller
+#endif
diff --git a/ports/cortex_a12/ac6/src/tx_thread_interrupt_disable.S b/ports/cortex_a12/ac6/src/tx_thread_interrupt_disable.S
new file mode 100644
index 00000000..13258808
--- /dev/null
+++ b/ports/cortex_a12/ac6/src/tx_thread_interrupt_disable.S
@@ -0,0 +1,101 @@
+/**************************************************************************/
+/* */
+/* Copyright (c) Microsoft Corporation. All rights reserved. */
+/* */
+/* This software is licensed under the Microsoft Software License */
+/* Terms for Microsoft Azure RTOS. Full text of the license can be */
+/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
+/* and in the root directory of this software. */
+/* */
+/**************************************************************************/
+
+
+/**************************************************************************/
+/**************************************************************************/
+/** */
+/** ThreadX Component */
+/** */
+/** Thread */
+/** */
+/**************************************************************************/
+/**************************************************************************/
+
+/* Define the 16-bit Thumb mode veneer for _tx_thread_interrupt_disable for
+ applications calling this function from to 16-bit Thumb mode. */
+
+ .text
+ .align 2
+ .global $_tx_thread_interrupt_disable
+$_tx_thread_interrupt_disable:
+ .thumb
+ BX pc // Switch to 32-bit mode
+ NOP //
+ .arm
+ STMFD sp!, {lr} // Save return address
+ BL _tx_thread_interrupt_disable // Call _tx_thread_interrupt_disable function
+ LDMFD sp!, {lr} // Recover saved return address
+ BX lr // Return to 16-bit caller
+
+
+ .text
+ .align 2
+/**************************************************************************/
+/* */
+/* FUNCTION RELEASE */
+/* */
+/* _tx_thread_interrupt_disable ARMv7-A */
+/* 6.1.11 */
+/* AUTHOR */
+/* */
+/* William E. Lamie, Microsoft Corporation */
+/* */
+/* DESCRIPTION */
+/* */
+/* This function is responsible for disabling interrupts */
+/* */
+/* INPUT */
+/* */
+/* None */
+/* */
+/* OUTPUT */
+/* */
+/* old_posture Old interrupt lockout posture */
+/* */
+/* CALLS */
+/* */
+/* None */
+/* */
+/* CALLED BY */
+/* */
+/* Application Code */
+/* */
+/* RELEASE HISTORY */
+/* */
+/* DATE NAME DESCRIPTION */
+/* */
+/* 09-30-2020 William E. Lamie Initial Version 6.1 */
+/* 04-25-2022 Zhen Kong Updated comments, */
+/* resulting in version 6.1.11 */
+/* */
+/**************************************************************************/
+ .global _tx_thread_interrupt_disable
+ .type _tx_thread_interrupt_disable,function
+_tx_thread_interrupt_disable:
+
+ /* Pickup current interrupt lockout posture. */
+
+ MRS r0, CPSR // Pickup current CPSR
+
+ /* Mask interrupts. */
+
+#ifdef TX_ENABLE_FIQ_SUPPORT
+ CPSID if // Disable IRQ and FIQ
+#else
+ CPSID i // Disable IRQ
+#endif
+
+#ifdef __THUMB_INTERWORK
+ BX lr // Return to caller
+#else
+ MOV pc, lr // Return to caller
+#endif
diff --git a/ports/cortex_a12/ac6/src/tx_thread_interrupt_restore.S b/ports/cortex_a12/ac6/src/tx_thread_interrupt_restore.S
new file mode 100644
index 00000000..2d582511
--- /dev/null
+++ b/ports/cortex_a12/ac6/src/tx_thread_interrupt_restore.S
@@ -0,0 +1,93 @@
+/**************************************************************************/
+/* */
+/* Copyright (c) Microsoft Corporation. All rights reserved. */
+/* */
+/* This software is licensed under the Microsoft Software License */
+/* Terms for Microsoft Azure RTOS. Full text of the license can be */
+/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
+/* and in the root directory of this software. */
+/* */
+/**************************************************************************/
+
+
+/**************************************************************************/
+/**************************************************************************/
+/** */
+/** ThreadX Component */
+/** */
+/** Thread */
+/** */
+/**************************************************************************/
+/**************************************************************************/
+
+/* Define the 16-bit Thumb mode veneer for _tx_thread_interrupt_restore for
+ applications calling this function from to 16-bit Thumb mode. */
+
+ .text
+ .align 2
+ .global $_tx_thread_interrupt_restore
+$_tx_thread_interrupt_restore:
+ .thumb
+ BX pc // Switch to 32-bit mode
+ NOP //
+ .arm
+ STMFD sp!, {lr} // Save return address
+ BL _tx_thread_interrupt_restore // Call _tx_thread_interrupt_restore function
+ LDMFD sp!, {lr} // Recover saved return address
+ BX lr // Return to 16-bit caller
+
+
+ .text
+ .align 2
+/**************************************************************************/
+/* */
+/* FUNCTION RELEASE */
+/* */
+/* _tx_thread_interrupt_restore ARMv7-A */
+/* 6.1.11 */
+/* AUTHOR */
+/* */
+/* William E. Lamie, Microsoft Corporation */
+/* */
+/* DESCRIPTION */
+/* */
+/* This function is responsible for restoring interrupts to the state */
+/* returned by a previous _tx_thread_interrupt_disable call. */
+/* */
+/* INPUT */
+/* */
+/* old_posture Old interrupt lockout posture */
+/* */
+/* OUTPUT */
+/* */
+/* None */
+/* */
+/* CALLS */
+/* */
+/* None */
+/* */
+/* CALLED BY */
+/* */
+/* Application Code */
+/* */
+/* RELEASE HISTORY */
+/* */
+/* DATE NAME DESCRIPTION */
+/* */
+/* 09-30-2020 William E. Lamie Initial Version 6.1 */
+/* 04-25-2022 Zhen Kong Updated comments, */
+/* resulting in version 6.1.11 */
+/* */
+/**************************************************************************/
+ .global _tx_thread_interrupt_restore
+ .type _tx_thread_interrupt_restore,function
+_tx_thread_interrupt_restore:
+
+ /* Apply the new interrupt posture. */
+
+ MSR CPSR_c, r0 // Setup new CPSR
+#ifdef __THUMB_INTERWORK
+ BX lr // Return to caller
+#else
+ MOV pc, lr // Return to caller
+#endif
diff --git a/ports/cortex_a12/ac6/src/tx_thread_irq_nesting_end.S b/ports/cortex_a12/ac6/src/tx_thread_irq_nesting_end.S
new file mode 100644
index 00000000..ec7e63c6
--- /dev/null
+++ b/ports/cortex_a12/ac6/src/tx_thread_irq_nesting_end.S
@@ -0,0 +1,103 @@
+/**************************************************************************/
+/* */
+/* Copyright (c) Microsoft Corporation. All rights reserved. */
+/* */
+/* This software is licensed under the Microsoft Software License */
+/* Terms for Microsoft Azure RTOS. Full text of the license can be */
+/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
+/* and in the root directory of this software. */
+/* */
+/**************************************************************************/
+
+
+/**************************************************************************/
+/**************************************************************************/
+/** */
+/** ThreadX Component */
+/** */
+/** Thread */
+/** */
+/**************************************************************************/
+/**************************************************************************/
+
+#ifdef TX_ENABLE_FIQ_SUPPORT
+DISABLE_INTS = 0xC0 // Disable IRQ/FIQ interrupts
+#else
+DISABLE_INTS = 0x80 // Disable IRQ interrupts
+#endif
+MODE_MASK = 0x1F // Mode mask
+IRQ_MODE_BITS = 0x12 // IRQ mode bits
+
+
+/* No 16-bit Thumb mode veneer code is needed for _tx_thread_irq_nesting_end
+ since it will never be called 16-bit mode. */
+
+ .arm
+ .text
+ .align 2
+/**************************************************************************/
+/* */
+/* FUNCTION RELEASE */
+/* */
+/* _tx_thread_irq_nesting_end ARMv7-A */
+/* 6.1.11 */
+/* AUTHOR */
+/* */
+/* William E. Lamie, Microsoft Corporation */
+/* */
+/* DESCRIPTION */
+/* */
+/* This function is called by the application from IRQ mode after */
+/* _tx_thread_irq_nesting_start has been called and switches the IRQ */
+/* processing from system mode back to IRQ mode prior to the ISR */
+/* calling _tx_thread_context_restore. Note that this function */
+/* assumes the system stack pointer is in the same position after */
+/* nesting start function was called. */
+/* */
+/* This function assumes that the system mode stack pointer was setup */
+/* during low-level initialization (tx_initialize_low_level.s). */
+/* */
+/* This function returns with IRQ interrupts disabled. */
+/* */
+/* INPUT */
+/* */
+/* None */
+/* */
+/* OUTPUT */
+/* */
+/* None */
+/* */
+/* CALLS */
+/* */
+/* None */
+/* */
+/* CALLED BY */
+/* */
+/* ISRs */
+/* */
+/* RELEASE HISTORY */
+/* */
+/* DATE NAME DESCRIPTION */
+/* */
+/* 09-30-2020 William E. Lamie Initial Version 6.1 */
+/* 04-25-2022 Zhen Kong Updated comments, */
+/* resulting in version 6.1.11 */
+/* */
+/**************************************************************************/
+ .global _tx_thread_irq_nesting_end
+ .type _tx_thread_irq_nesting_end,function
+_tx_thread_irq_nesting_end:
+ MOV r3,lr // Save ISR return address
+ MRS r0, CPSR // Pickup the CPSR
+ ORR r0, r0, #DISABLE_INTS // Build disable interrupt value
+ MSR CPSR_c, r0 // Disable interrupts
+ LDMIA sp!, {r1, lr} // Pickup saved lr (and r1 throw-away for
+ // 8-byte alignment logic)
+ BIC r0, r0, #MODE_MASK // Clear mode bits
+ ORR r0, r0, #IRQ_MODE_BITS // Build IRQ mode CPSR
+ MSR CPSR_c, r0 // Reenter IRQ mode
+#ifdef __THUMB_INTERWORK
+ BX r3 // Return to caller
+#else
+ MOV pc, r3 // Return to caller
+#endif
diff --git a/ports/cortex_a12/ac6/src/tx_thread_irq_nesting_start.S b/ports/cortex_a12/ac6/src/tx_thread_irq_nesting_start.S
new file mode 100644
index 00000000..c69976ed
--- /dev/null
+++ b/ports/cortex_a12/ac6/src/tx_thread_irq_nesting_start.S
@@ -0,0 +1,96 @@
+/**************************************************************************/
+/* */
+/* Copyright (c) Microsoft Corporation. All rights reserved. */
+/* */
+/* This software is licensed under the Microsoft Software License */
+/* Terms for Microsoft Azure RTOS. Full text of the license can be */
+/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
+/* and in the root directory of this software. */
+/* */
+/**************************************************************************/
+
+
+/**************************************************************************/
+/**************************************************************************/
+/** */
+/** ThreadX Component */
+/** */
+/** Thread */
+/** */
+/**************************************************************************/
+/**************************************************************************/
+
+IRQ_DISABLE = 0x80 // IRQ disable bit
+MODE_MASK = 0x1F // Mode mask
+SYS_MODE_BITS = 0x1F // System mode bits
+
+
+/* No 16-bit Thumb mode veneer code is needed for _tx_thread_irq_nesting_start
+ since it will never be called 16-bit mode. */
+
+ .arm
+ .text
+ .align 2
+/**************************************************************************/
+/* */
+/* FUNCTION RELEASE */
+/* */
+/* _tx_thread_irq_nesting_start ARMv7-A */
+/* 6.1.11 */
+/* AUTHOR */
+/* */
+/* William E. Lamie, Microsoft Corporation */
+/* */
+/* DESCRIPTION */
+/* */
+/* This function is called by the application from IRQ mode after */
+/* _tx_thread_context_save has been called and switches the IRQ */
+/* processing to the system mode so nested IRQ interrupt processing */
+/* is possible (system mode has its own "lr" register). Note that */
+/* this function assumes that the system mode stack pointer was setup */
+/* during low-level initialization (tx_initialize_low_level.s). */
+/* */
+/* This function returns with IRQ interrupts enabled. */
+/* */
+/* INPUT */
+/* */
+/* None */
+/* */
+/* OUTPUT */
+/* */
+/* None */
+/* */
+/* CALLS */
+/* */
+/* None */
+/* */
+/* CALLED BY */
+/* */
+/* ISRs */
+/* */
+/* RELEASE HISTORY */
+/* */
+/* DATE NAME DESCRIPTION */
+/* */
+/* 09-30-2020 William E. Lamie Initial Version 6.1 */
+/* 04-25-2022 Zhen Kong Updated comments, */
+/* resulting in version 6.1.11 */
+/* */
+/**************************************************************************/
+ .global _tx_thread_irq_nesting_start
+ .type _tx_thread_irq_nesting_start,function
+_tx_thread_irq_nesting_start:
+ MOV r3,lr // Save ISR return address
+ MRS r0, CPSR // Pickup the CPSR
+ BIC r0, r0, #MODE_MASK // Clear the mode bits
+ ORR r0, r0, #SYS_MODE_BITS // Build system mode CPSR
+ MSR CPSR_c, r0 // Enter system mode
+ STMDB sp!, {r1, lr} // Push the system mode lr on the system mode stack
+ // and push r1 just to keep 8-byte alignment
+ BIC r0, r0, #IRQ_DISABLE // Build enable IRQ CPSR
+ MSR CPSR_c, r0 // Enter system mode
+#ifdef __THUMB_INTERWORK
+ BX r3 // Return to caller
+#else
+ MOV pc, r3 // Return to caller
+#endif
diff --git a/ports/cortex_a12/ac6/src/tx_thread_schedule.S b/ports/cortex_a12/ac6/src/tx_thread_schedule.S
new file mode 100644
index 00000000..8330e9df
--- /dev/null
+++ b/ports/cortex_a12/ac6/src/tx_thread_schedule.S
@@ -0,0 +1,230 @@
+/**************************************************************************/
+/* */
+/* Copyright (c) Microsoft Corporation. All rights reserved. */
+/* */
+/* This software is licensed under the Microsoft Software License */
+/* Terms for Microsoft Azure RTOS. Full text of the license can be */
+/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
+/* and in the root directory of this software. */
+/* */
+/**************************************************************************/
+
+
+/**************************************************************************/
+/**************************************************************************/
+/** */
+/** ThreadX Component */
+/** */
+/** Thread */
+/** */
+/**************************************************************************/
+/**************************************************************************/
+
+ .global _tx_thread_execute_ptr
+ .global _tx_thread_current_ptr
+ .global _tx_timer_time_slice
+
+
+/* Define the 16-bit Thumb mode veneer for _tx_thread_schedule for
+ applications calling this function from to 16-bit Thumb mode. */
+
+ .text
+ .align 2
+ .global $_tx_thread_schedule
+ .type $_tx_thread_schedule,function
+$_tx_thread_schedule:
+ .thumb
+ BX pc // Switch to 32-bit mode
+ NOP //
+ .arm
+ STMFD sp!, {lr} // Save return address
+ BL _tx_thread_schedule // Call _tx_thread_schedule function
+ LDMFD sp!, {lr} // Recover saved return address
+ BX lr // Return to 16-bit caller
+
+
+ .text
+ .align 2
+/**************************************************************************/
+/* */
+/* FUNCTION RELEASE */
+/* */
+/* _tx_thread_schedule ARMv7-A */
+/* 6.1.11 */
+/* AUTHOR */
+/* */
+/* William E. Lamie, Microsoft Corporation */
+/* */
+/* DESCRIPTION */
+/* */
+/* This function waits for a thread control block pointer to appear in */
+/* the _tx_thread_execute_ptr variable. Once a thread pointer appears */
+/* in the variable, the corresponding thread is resumed. */
+/* */
+/* INPUT */
+/* */
+/* None */
+/* */
+/* OUTPUT */
+/* */
+/* None */
+/* */
+/* CALLS */
+/* */
+/* None */
+/* */
+/* CALLED BY */
+/* */
+/* _tx_initialize_kernel_enter ThreadX entry function */
+/* _tx_thread_system_return Return to system from thread */
+/* _tx_thread_context_restore Restore thread's context */
+/* */
+/* RELEASE HISTORY */
+/* */
+/* DATE NAME DESCRIPTION */
+/* */
+/* 09-30-2020 William E. Lamie Initial Version 6.1 */
+/* 10-15-2021 William E. Lamie Modified comment(s), added */
+/* execution profile support, */
+/* resulting in version 6.1.9 */
+/* 04-25-2022 Zhen Kong Updated comments, */
+/* resulting in version 6.1.11 */
+/* */
+/**************************************************************************/
+ .global _tx_thread_schedule
+ .type _tx_thread_schedule,function
+_tx_thread_schedule:
+
+ /* Enable interrupts. */
+
+#ifdef TX_ENABLE_FIQ_SUPPORT
+ CPSIE if // Enable IRQ and FIQ interrupts
+#else
+ CPSIE i // Enable IRQ interrupts
+#endif
+
+ /* Wait for a thread to execute. */
+ LDR r1, =_tx_thread_execute_ptr // Address of thread execute ptr
+
+__tx_thread_schedule_loop:
+
+ LDR r0, [r1] // Pickup next thread to execute
+ CMP r0, #0 // Is it NULL?
+ BEQ __tx_thread_schedule_loop // If so, keep looking for a thread
+ /* Yes! We have a thread to execute. Lockout interrupts and
+ transfer control to it. */
+
+#ifdef TX_ENABLE_FIQ_SUPPORT
+ CPSID if // Disable IRQ and FIQ interrupts
+#else
+ CPSID i // Disable IRQ interrupts
+#endif
+
+ /* Setup the current thread pointer. */
+
+ LDR r1, =_tx_thread_current_ptr // Pickup address of current thread
+ STR r0, [r1] // Setup current thread pointer
+
+ /* Increment the run count for this thread. */
+
+ LDR r2, [r0, #4] // Pickup run counter
+ LDR r3, [r0, #24] // Pickup time-slice for this thread
+ ADD r2, r2, #1 // Increment thread run-counter
+ STR r2, [r0, #4] // Store the new run counter
+
+ /* Setup time-slice, if present. */
+
+ LDR r2, =_tx_timer_time_slice // Pickup address of time-slice
+ // variable
+ LDR sp, [r0, #8] // Switch stack pointers
+ STR r3, [r2] // Setup time-slice
+
+#if (defined(TX_ENABLE_EXECUTION_CHANGE_NOTIFY) || defined(TX_EXECUTION_PROFILE_ENABLE))
+
+ /* Call the thread entry function to indicate the thread is executing. */
+
+ MOV r5, r0 // Save r0
+ BL _tx_execution_thread_enter // Call the thread execution enter function
+ MOV r0, r5 // Restore r0
+#endif
+
+ /* Determine if an interrupt frame or a synchronous task suspension frame
+ is present. */
+
+ LDMIA sp!, {r4, r5} // Pickup the stack type and saved CPSR
+ CMP r4, #0 // Check for synchronous context switch
+ BEQ _tx_solicited_return
+ MSR SPSR_cxsf, r5 // Setup SPSR for return
+#ifdef TX_ENABLE_VFP_SUPPORT
+ LDR r1, [r0, #144] // Pickup the VFP enabled flag
+ CMP r1, #0 // Is the VFP enabled?
+ BEQ _tx_skip_interrupt_vfp_restore // No, skip VFP interrupt restore
+ VLDMIA sp!, {D0-D15} // Recover D0-D15
+ VLDMIA sp!, {D16-D31} // Recover D16-D31
+ LDR r4, [sp], #4 // Pickup FPSCR
+ VMSR FPSCR, r4 // Restore FPSCR
+_tx_skip_interrupt_vfp_restore:
+#endif
+ LDMIA sp!, {r0-r12, lr, pc}^ // Return to point of thread interrupt
+
+_tx_solicited_return:
+
+#ifdef TX_ENABLE_VFP_SUPPORT
+ LDR r1, [r0, #144] // Pickup the VFP enabled flag
+ CMP r1, #0 // Is the VFP enabled?
+ BEQ _tx_skip_solicited_vfp_restore // No, skip VFP solicited restore
+ VLDMIA sp!, {D8-D15} // Recover D8-D15
+ VLDMIA sp!, {D16-D31} // Recover D16-D31
+ LDR r4, [sp], #4 // Pickup FPSCR
+ VMSR FPSCR, r4 // Restore FPSCR
+_tx_skip_solicited_vfp_restore:
+#endif
+ MSR CPSR_cxsf, r5 // Recover CPSR
+ LDMIA sp!, {r4-r11, lr} // Return to thread synchronously
+#ifdef __THUMB_INTERWORK
+ BX lr // Return to caller
+#else
+ MOV pc, lr // Return to caller
+#endif
+
+#ifdef TX_ENABLE_VFP_SUPPORT
+
+ .global tx_thread_vfp_enable
+ .type tx_thread_vfp_enable,function
+tx_thread_vfp_enable:
+ MRS r2, CPSR // Pickup the CPSR
+#ifdef TX_ENABLE_FIQ_SUPPORT
+ CPSID if // Enable IRQ and FIQ interrupts
+#else
+ CPSID i // Enable IRQ interrupts
+#endif
+ LDR r0, =_tx_thread_current_ptr // Build current thread pointer address
+ LDR r1, [r0] // Pickup current thread pointer
+ CMP r1, #0 // Check for NULL thread pointer
+ BEQ __tx_no_thread_to_enable // If NULL, skip VFP enable
+ MOV r0, #1 // Build enable value
+ STR r0, [r1, #144] // Set the VFP enable flag (tx_thread_vfp_enable field in TX_THREAD)
+__tx_no_thread_to_enable:
+ MSR CPSR_cxsf, r2 // Recover CPSR
+ BX LR // Return to caller
+
+ .global tx_thread_vfp_disable
+ .type tx_thread_vfp_disable,function
+tx_thread_vfp_disable:
+ MRS r2, CPSR // Pickup the CPSR
+#ifdef TX_ENABLE_FIQ_SUPPORT
+ CPSID if // Enable IRQ and FIQ interrupts
+#else
+ CPSID i // Enable IRQ interrupts
+#endif
+ LDR r0, =_tx_thread_current_ptr // Build current thread pointer address
+ LDR r1, [r0] // Pickup current thread pointer
+ CMP r1, #0 // Check for NULL thread pointer
+ BEQ __tx_no_thread_to_disable // If NULL, skip VFP disable
+ MOV r0, #0 // Build disable value
+ STR r0, [r1, #144] // Clear the VFP enable flag (tx_thread_vfp_enable field in TX_THREAD)
+__tx_no_thread_to_disable:
+ MSR CPSR_cxsf, r2 // Recover CPSR
+ BX LR // Return to caller
+
+#endif
diff --git a/ports/cortex_a12/ac6/src/tx_thread_stack_build.S b/ports/cortex_a12/ac6/src/tx_thread_stack_build.S
new file mode 100644
index 00000000..f413e673
--- /dev/null
+++ b/ports/cortex_a12/ac6/src/tx_thread_stack_build.S
@@ -0,0 +1,164 @@
+/**************************************************************************/
+/* */
+/* Copyright (c) Microsoft Corporation. All rights reserved. */
+/* */
+/* This software is licensed under the Microsoft Software License */
+/* Terms for Microsoft Azure RTOS. Full text of the license can be */
+/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
+/* and in the root directory of this software. */
+/* */
+/**************************************************************************/
+
+
+/**************************************************************************/
+/**************************************************************************/
+/** */
+/** ThreadX Component */
+/** */
+/** Thread */
+/** */
+/**************************************************************************/
+/**************************************************************************/
+ .arm
+
+SVC_MODE = 0x13 // SVC mode
+#ifdef TX_ENABLE_FIQ_SUPPORT
+CPSR_MASK = 0xDF // Mask initial CPSR, IRQ & FIQ interrupts enabled
+#else
+CPSR_MASK = 0x9F // Mask initial CPSR, IRQ interrupts enabled
+#endif
+
+
+/* Define the 16-bit Thumb mode veneer for _tx_thread_stack_build for
+ applications calling this function from to 16-bit Thumb mode. */
+
+ .text
+ .align 2
+ .thumb
+ .global $_tx_thread_stack_build
+ .type $_tx_thread_stack_build,function
+$_tx_thread_stack_build:
+ BX pc // Switch to 32-bit mode
+ NOP //
+ .arm
+ STMFD sp!, {lr} // Save return address
+ BL _tx_thread_stack_build // Call _tx_thread_stack_build function
+ LDMFD sp!, {lr} // Recover saved return address
+ BX lr // Return to 16-bit caller
+
+
+ .text
+ .align 2
+/**************************************************************************/
+/* */
+/* FUNCTION RELEASE */
+/* */
+/* _tx_thread_stack_build ARMv7-A */
+/* 6.1.11 */
+/* AUTHOR */
+/* */
+/* William E. Lamie, Microsoft Corporation */
+/* */
+/* DESCRIPTION */
+/* */
+/* This function builds a stack frame on the supplied thread's stack. */
+/* The stack frame results in a fake interrupt return to the supplied */
+/* function pointer. */
+/* */
+/* INPUT */
+/* */
+/* thread_ptr Pointer to thread control blk */
+/* function_ptr Pointer to return function */
+/* */
+/* OUTPUT */
+/* */
+/* None */
+/* */
+/* CALLS */
+/* */
+/* None */
+/* */
+/* CALLED BY */
+/* */
+/* _tx_thread_create Create thread service */
+/* */
+/* RELEASE HISTORY */
+/* */
+/* DATE NAME DESCRIPTION */
+/* */
+/* 09-30-2020 William E. Lamie Initial Version 6.1 */
+/* 04-25-2022 Zhen Kong Updated comments, */
+/* resulting in version 6.1.11 */
+/* */
+/**************************************************************************/
+ .global _tx_thread_stack_build
+ .type _tx_thread_stack_build,function
+_tx_thread_stack_build:
+
+
+ /* Build a fake interrupt frame. The form of the fake interrupt stack
+ on the ARMv7-A should look like the following after it is built:
+
+ Stack Top: 1 Interrupt stack frame type
+ CPSR Initial value for CPSR
+ a1 (r0) Initial value for a1
+ a2 (r1) Initial value for a2
+ a3 (r2) Initial value for a3
+ a4 (r3) Initial value for a4
+ v1 (r4) Initial value for v1
+ v2 (r5) Initial value for v2
+ v3 (r6) Initial value for v3
+ v4 (r7) Initial value for v4
+ v5 (r8) Initial value for v5
+ sb (r9) Initial value for sb
+ sl (r10) Initial value for sl
+ fp (r11) Initial value for fp
+ ip (r12) Initial value for ip
+ lr (r14) Initial value for lr
+ pc (r15) Initial value for
+ 0 For stack backtracing
+
+ Stack Bottom: (higher memory address) */
+
+ LDR r2, [r0, #16] // Pickup end of stack area
+ BIC r2, r2, #7 // Ensure 8-byte alignment
+ SUB r2, r2, #76 // Allocate space for the stack frame
+
+ /* Actually build the stack frame. */
+
+ MOV r3, #1 // Build interrupt stack type
+ STR r3, [r2, #0] // Store stack type
+ MOV r3, #0 // Build initial register value
+ STR r3, [r2, #8] // Store initial r0
+ STR r3, [r2, #12] // Store initial r1
+ STR r3, [r2, #16] // Store initial r2
+ STR r3, [r2, #20] // Store initial r3
+ STR r3, [r2, #24] // Store initial r4
+ STR r3, [r2, #28] // Store initial r5
+ STR r3, [r2, #32] // Store initial r6
+ STR r3, [r2, #36] // Store initial r7
+ STR r3, [r2, #40] // Store initial r8
+ STR r3, [r2, #44] // Store initial r9
+ LDR r3, [r0, #12] // Pickup stack starting address
+ STR r3, [r2, #48] // Store initial r10 (sl)
+ LDR r3,=_tx_thread_schedule // Pickup address of _tx_thread_schedule for GDB backtrace
+ STR r3, [r2, #60] // Store initial r14 (lr)
+ MOV r3, #0 // Build initial register value
+ STR r3, [r2, #52] // Store initial r11
+ STR r3, [r2, #56] // Store initial r12
+ STR r1, [r2, #64] // Store initial pc
+ STR r3, [r2, #68] // 0 for back-trace
+ MRS r1, CPSR // Pickup CPSR
+ BIC r1, r1, #CPSR_MASK // Mask mode bits of CPSR
+ ORR r3, r1, #SVC_MODE // Build CPSR, SVC mode, interrupts enabled
+ STR r3, [r2, #4] // Store initial CPSR
+
+ /* Setup stack pointer. */
+
+ STR r2, [r0, #8] // Save stack pointer in thread's
+ // control block
+#ifdef __THUMB_INTERWORK
+ BX lr // Return to caller
+#else
+ MOV pc, lr // Return to caller
+#endif
diff --git a/ports/cortex_a12/ac6/src/tx_thread_system_return.S b/ports/cortex_a12/ac6/src/tx_thread_system_return.S
new file mode 100644
index 00000000..cb7d62ce
--- /dev/null
+++ b/ports/cortex_a12/ac6/src/tx_thread_system_return.S
@@ -0,0 +1,162 @@
+/**************************************************************************/
+/* */
+/* Copyright (c) Microsoft Corporation. All rights reserved. */
+/* */
+/* This software is licensed under the Microsoft Software License */
+/* Terms for Microsoft Azure RTOS. Full text of the license can be */
+/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
+/* and in the root directory of this software. */
+/* */
+/**************************************************************************/
+
+
+/**************************************************************************/
+/**************************************************************************/
+/** */
+/** ThreadX Component */
+/** */
+/** Thread */
+/** */
+/**************************************************************************/
+/**************************************************************************/
+
+ .arm
+
+
+ .global _tx_thread_current_ptr
+ .global _tx_timer_time_slice
+ .global _tx_thread_schedule
+
+
+
+/* Define the 16-bit Thumb mode veneer for _tx_thread_system_return for
+ applications calling this function from to 16-bit Thumb mode. */
+
+ .text
+ .align 2
+ .global $_tx_thread_system_return
+ .type $_tx_thread_system_return,function
+$_tx_thread_system_return:
+ .thumb
+ BX pc // Switch to 32-bit mode
+ NOP //
+ .arm
+ STMFD sp!, {lr} // Save return address
+ BL _tx_thread_system_return // Call _tx_thread_system_return function
+ LDMFD sp!, {lr} // Recover saved return address
+ BX lr // Return to 16-bit caller
+
+
+ .text
+ .align 2
+/**************************************************************************/
+/* */
+/* FUNCTION RELEASE */
+/* */
+/* _tx_thread_system_return ARMv7-A */
+/* 6.1.11 */
+/* AUTHOR */
+/* */
+/* William E. Lamie, Microsoft Corporation */
+/* */
+/* DESCRIPTION */
+/* */
+/* This function is target processor specific. It is used to transfer */
+/* control from a thread back to the ThreadX system. Only a */
+/* minimal context is saved since the compiler assumes temp registers */
+/* are going to get slicked by a function call anyway. */
+/* */
+/* INPUT */
+/* */
+/* None */
+/* */
+/* OUTPUT */
+/* */
+/* None */
+/* */
+/* CALLS */
+/* */
+/* _tx_thread_schedule Thread scheduling loop */
+/* */
+/* CALLED BY */
+/* */
+/* ThreadX components */
+/* */
+/* RELEASE HISTORY */
+/* */
+/* DATE NAME DESCRIPTION */
+/* */
+/* 09-30-2020 William E. Lamie Initial Version 6.1 */
+/* 10-15-2021 William E. Lamie Modified comment(s), added */
+/* execution profile support, */
+/* resulting in version 6.1.9 */
+/* 04-25-2022 Zhen Kong Updated comments, */
+/* resulting in version 6.1.11 */
+/* */
+/**************************************************************************/
+ .global _tx_thread_system_return
+ .type _tx_thread_system_return,function
+_tx_thread_system_return:
+
+ /* Save minimal context on the stack. */
+
+ STMDB sp!, {r4-r11, lr} // Save minimal context
+
+ LDR r4, =_tx_thread_current_ptr // Pickup address of current ptr
+ LDR r5, [r4] // Pickup current thread pointer
+
+#ifdef TX_ENABLE_VFP_SUPPORT
+ LDR r1, [r5, #144] // Pickup the VFP enabled flag
+ CMP r1, #0 // Is the VFP enabled?
+ BEQ _tx_skip_solicited_vfp_save // No, skip VFP solicited save
+ VMRS r1, FPSCR // Pickup the FPSCR
+ STR r1, [sp, #-4]! // Save FPSCR
+ VSTMDB sp!, {D16-D31} // Save D16-D31
+ VSTMDB sp!, {D8-D15} // Save D8-D15
+_tx_skip_solicited_vfp_save:
+#endif
+
+ MOV r0, #0 // Build a solicited stack type
+ MRS r1, CPSR // Pickup the CPSR
+ STMDB sp!, {r0-r1} // Save type and CPSR
+
+ /* Lockout interrupts. */
+
+#ifdef TX_ENABLE_FIQ_SUPPORT
+ CPSID if // Disable IRQ and FIQ interrupts
+#else
+ CPSID i // Disable IRQ interrupts
+#endif
+
+#if (defined(TX_ENABLE_EXECUTION_CHANGE_NOTIFY) || defined(TX_EXECUTION_PROFILE_ENABLE))
+
+ /* Call the thread exit function to indicate the thread is no longer executing. */
+
+ BL _tx_execution_thread_exit // Call the thread exit function
+#endif
+ MOV r3, r4 // Pickup address of current ptr
+ MOV r0, r5 // Pickup current thread pointer
+ LDR r2, =_tx_timer_time_slice // Pickup address of time slice
+ LDR r1, [r2] // Pickup current time slice
+
+ /* Save current stack and switch to system stack. */
+
+ STR sp, [r0, #8] // Save thread stack pointer
+
+ /* Determine if the time-slice is active. */
+
+ MOV r4, #0 // Build clear value
+ CMP r1, #0 // Is a time-slice active?
+ BEQ __tx_thread_dont_save_ts // No, don't save the time-slice
+
+ /* Save time-slice for the thread and clear the current time-slice. */
+
+ STR r4, [r2] // Clear time-slice
+ STR r1, [r0, #24] // Save current time-slice
+
+__tx_thread_dont_save_ts:
+
+ /* Clear the current thread pointer. */
+
+ STR r4, [r3] // Clear current thread pointer
+ B _tx_thread_schedule // Jump to scheduler!
diff --git a/ports/cortex_a12/ac6/src/tx_thread_vectored_context_save.S b/ports/cortex_a12/ac6/src/tx_thread_vectored_context_save.S
new file mode 100644
index 00000000..d846223f
--- /dev/null
+++ b/ports/cortex_a12/ac6/src/tx_thread_vectored_context_save.S
@@ -0,0 +1,165 @@
+/**************************************************************************/
+/* */
+/* Copyright (c) Microsoft Corporation. All rights reserved. */
+/* */
+/* This software is licensed under the Microsoft Software License */
+/* Terms for Microsoft Azure RTOS. Full text of the license can be */
+/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
+/* and in the root directory of this software. */
+/* */
+/**************************************************************************/
+
+
+/**************************************************************************/
+/**************************************************************************/
+/** */
+/** ThreadX Component */
+/** */
+/** Thread */
+/** */
+/**************************************************************************/
+/**************************************************************************/
+
+ .global _tx_thread_system_state
+ .global _tx_thread_current_ptr
+ .global _tx_execution_isr_enter
+
+
+
+/* No 16-bit Thumb mode veneer code is needed for _tx_thread_vectored_context_save
+ since it will never be called 16-bit mode. */
+
+ .arm
+ .text
+ .align 2
+/**************************************************************************/
+/* */
+/* FUNCTION RELEASE */
+/* */
+/* _tx_thread_vectored_context_save ARMv7-A */
+/* 6.1.11 */
+/* AUTHOR */
+/* */
+/* William E. Lamie, Microsoft Corporation */
+/* */
+/* DESCRIPTION */
+/* */
+/* This function saves the context of an executing thread in the */
+/* beginning of interrupt processing. The function also ensures that */
+/* the system stack is used upon return to the calling ISR. */
+/* */
+/* INPUT */
+/* */
+/* None */
+/* */
+/* OUTPUT */
+/* */
+/* None */
+/* */
+/* CALLS */
+/* */
+/* None */
+/* */
+/* CALLED BY */
+/* */
+/* ISRs */
+/* */
+/* RELEASE HISTORY */
+/* */
+/* DATE NAME DESCRIPTION */
+/* */
+/* 09-30-2020 William E. Lamie Initial Version 6.1 */
+/* 10-15-2021 William E. Lamie Modified comment(s), added */
+/* execution profile support, */
+/* resulting in version 6.1.9 */
+/* 04-25-2022 Zhen Kong Updated comments, */
+/* resulting in version 6.1.11 */
+/* */
+/**************************************************************************/
+ .global _tx_thread_vectored_context_save
+ .type _tx_thread_vectored_context_save,function
+_tx_thread_vectored_context_save:
+
+ /* Upon entry to this routine, it is assumed that IRQ interrupts are locked
+ out, we are in IRQ mode, and all registers are intact. */
+
+ /* Check for a nested interrupt condition. */
+
+#ifdef TX_ENABLE_FIQ_SUPPORT
+ CPSID if // Disable IRQ and FIQ interrupts
+#endif
+ LDR r3, =_tx_thread_system_state // Pickup address of system state variable
+ LDR r2, [r3, #0] // Pickup system state
+ CMP r2, #0 // Is this the first interrupt?
+ BEQ __tx_thread_not_nested_save // Yes, not a nested context save
+
+ /* Nested interrupt condition. */
+
+ ADD r2, r2, #1 // Increment the interrupt counter
+ STR r2, [r3, #0] // Store it back in the variable
+
+ /* Note: Minimal context of interrupted thread is already saved. */
+
+ /* Return to the ISR. */
+
+ MOV r10, #0 // Clear stack limit
+
+#if (defined(TX_ENABLE_EXECUTION_CHANGE_NOTIFY) || defined(TX_EXECUTION_PROFILE_ENABLE))
+
+ /* Call the ISR enter function to indicate an ISR is executing. */
+
+ PUSH {lr} // Save ISR lr
+ BL _tx_execution_isr_enter // Call the ISR enter function
+ POP {lr} // Recover ISR lr
+#endif
+
+ MOV pc, lr // Return to caller
+
+__tx_thread_not_nested_save:
+
+ /* Otherwise, not nested, check to see if a thread was running. */
+
+ ADD r2, r2, #1 // Increment the interrupt counter
+ STR r2, [r3, #0] // Store it back in the variable
+ LDR r1, =_tx_thread_current_ptr // Pickup address of current thread ptr
+ LDR r0, [r1, #0] // Pickup current thread pointer
+ CMP r0, #0 // Is it NULL?
+ BEQ __tx_thread_idle_system_save // If so, interrupt occurred in
+ // scheduling loop - nothing needs saving!
+ /* Note: Minimal context of interrupted thread is already saved. */
+
+ /* Save the current stack pointer in the thread's control block. */
+
+ MOV r10, #0 // Clear stack limit
+
+#if (defined(TX_ENABLE_EXECUTION_CHANGE_NOTIFY) || defined(TX_EXECUTION_PROFILE_ENABLE))
+
+ /* Call the ISR enter function to indicate an ISR is executing. */
+
+ PUSH {lr} // Save ISR lr
+ BL _tx_execution_isr_enter // Call the ISR enter function
+ POP {lr} // Recover ISR lr
+#endif
+
+ MOV pc, lr // Return to caller
+
+__tx_thread_idle_system_save:
+
+ /* Interrupt occurred in the scheduling loop. */
+
+ /* Not much to do here, just adjust the stack pointer, and return to IRQ
+ processing. */
+
+ MOV r10, #0 // Clear stack limit
+
+#if (defined(TX_ENABLE_EXECUTION_CHANGE_NOTIFY) || defined(TX_EXECUTION_PROFILE_ENABLE))
+
+ /* Call the ISR enter function to indicate an ISR is executing. */
+
+ PUSH {lr} // Save ISR lr
+ BL _tx_execution_isr_enter // Call the ISR enter function
+ POP {lr} // Recover ISR lr
+#endif
+
+ ADD sp, sp, #32 // Recover saved registers
+ MOV pc, lr // Return to caller
diff --git a/ports/cortex_a12/ac6/src/tx_timer_interrupt.S b/ports/cortex_a12/ac6/src/tx_timer_interrupt.S
new file mode 100644
index 00000000..7337ed0c
--- /dev/null
+++ b/ports/cortex_a12/ac6/src/tx_timer_interrupt.S
@@ -0,0 +1,231 @@
+/**************************************************************************/
+/* */
+/* Copyright (c) Microsoft Corporation. All rights reserved. */
+/* */
+/* This software is licensed under the Microsoft Software License */
+/* Terms for Microsoft Azure RTOS. Full text of the license can be */
+/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
+/* and in the root directory of this software. */
+/* */
+/**************************************************************************/
+
+
+/**************************************************************************/
+/**************************************************************************/
+/** */
+/** ThreadX Component */
+/** */
+/** Timer */
+/** */
+/**************************************************************************/
+/**************************************************************************/
+
+ .arm
+
+
+/* Define Assembly language external references... */
+
+ .global _tx_timer_time_slice
+ .global _tx_timer_system_clock
+ .global _tx_timer_current_ptr
+ .global _tx_timer_list_start
+ .global _tx_timer_list_end
+ .global _tx_timer_expired_time_slice
+ .global _tx_timer_expired
+ .global _tx_thread_time_slice
+
+
+
+/* Define the 16-bit Thumb mode veneer for _tx_timer_interrupt for
+ applications calling this function from to 16-bit Thumb mode. */
+
+ .text
+ .align 2
+ .thumb
+ .global $_tx_timer_interrupt
+ .type $_tx_timer_interrupt,function
+$_tx_timer_interrupt:
+ BX pc // Switch to 32-bit mode
+ NOP //
+ .arm
+ STMFD sp!, {lr} // Save return address
+ BL _tx_timer_interrupt // Call _tx_timer_interrupt function
+ LDMFD sp!, {lr} // Recover saved return address
+ BX lr // Return to 16-bit caller
+
+
+ .text
+ .align 2
+/**************************************************************************/
+/* */
+/* FUNCTION RELEASE */
+/* */
+/* _tx_timer_interrupt ARMv7-A */
+/* 6.1.11 */
+/* AUTHOR */
+/* */
+/* William E. Lamie, Microsoft Corporation */
+/* */
+/* DESCRIPTION */
+/* */
+/* This function processes the hardware timer interrupt. This */
+/* processing includes incrementing the system clock and checking for */
+/* time slice and/or timer expiration. If either is found, the */
+/* interrupt context save/restore functions are called along with the */
+/* expiration functions. */
+/* */
+/* INPUT */
+/* */
+/* None */
+/* */
+/* OUTPUT */
+/* */
+/* None */
+/* */
+/* CALLS */
+/* */
+/* _tx_thread_time_slice Time slice interrupted thread */
+/* _tx_timer_expiration_process Timer expiration processing */
+/* */
+/* CALLED BY */
+/* */
+/* interrupt vector */
+/* */
+/* RELEASE HISTORY */
+/* */
+/* DATE NAME DESCRIPTION */
+/* */
+/* 09-30-2020 William E. Lamie Initial Version 6.1 */
+/* 04-25-2022 Zhen Kong Updated comments, */
+/* resulting in version 6.1.11 */
+/* */
+/**************************************************************************/
+ .global _tx_timer_interrupt
+ .type _tx_timer_interrupt,function
+_tx_timer_interrupt:
+
+ /* Upon entry to this routine, it is assumed that context save has already
+ been called, and therefore the compiler scratch registers are available
+ for use. */
+
+ /* Increment the system clock. */
+
+ LDR r1, =_tx_timer_system_clock // Pickup address of system clock
+ LDR r0, [r1] // Pickup system clock
+ ADD r0, r0, #1 // Increment system clock
+ STR r0, [r1] // Store new system clock
+
+ /* Test for time-slice expiration. */
+
+ LDR r3, =_tx_timer_time_slice // Pickup address of time-slice
+ LDR r2, [r3] // Pickup time-slice
+ CMP r2, #0 // Is it non-active?
+ BEQ __tx_timer_no_time_slice // Yes, skip time-slice processing
+
+ /* Decrement the time_slice. */
+
+ SUB r2, r2, #1 // Decrement the time-slice
+ STR r2, [r3] // Store new time-slice value
+
+ /* Check for expiration. */
+
+ CMP r2, #0 // Has it expired?
+ BNE __tx_timer_no_time_slice // No, skip expiration processing
+
+ /* Set the time-slice expired flag. */
+
+ LDR r3, =_tx_timer_expired_time_slice // Pickup address of expired flag
+ MOV r0, #1 // Build expired value
+ STR r0, [r3] // Set time-slice expiration flag
+
+__tx_timer_no_time_slice:
+
+ /* Test for timer expiration. */
+
+ LDR r1, =_tx_timer_current_ptr // Pickup current timer pointer address
+ LDR r0, [r1] // Pickup current timer
+ LDR r2, [r0] // Pickup timer list entry
+ CMP r2, #0 // Is there anything in the list?
+ BEQ __tx_timer_no_timer // No, just increment the timer
+
+ /* Set expiration flag. */
+
+ LDR r3, =_tx_timer_expired // Pickup expiration flag address
+ MOV r2, #1 // Build expired value
+ STR r2, [r3] // Set expired flag
+ B __tx_timer_done // Finished timer processing
+
+__tx_timer_no_timer:
+
+ /* No timer expired, increment the timer pointer. */
+ ADD r0, r0, #4 // Move to next timer
+
+ /* Check for wraparound. */
+
+ LDR r3, =_tx_timer_list_end // Pickup address of timer list end
+ LDR r2, [r3] // Pickup list end
+ CMP r0, r2 // Are we at list end?
+ BNE __tx_timer_skip_wrap // No, skip wraparound logic
+
+ /* Wrap to beginning of list. */
+
+ LDR r3, =_tx_timer_list_start // Pickup address of timer list start
+ LDR r0, [r3] // Set current pointer to list start
+
+__tx_timer_skip_wrap:
+
+ STR r0, [r1] // Store new current timer pointer
+
+__tx_timer_done:
+
+ /* See if anything has expired. */
+
+ LDR r3, =_tx_timer_expired_time_slice // Pickup address of expired flag
+ LDR r2, [r3] // Pickup time-slice expired flag
+ CMP r2, #0 // Did a time-slice expire?
+ BNE __tx_something_expired // If non-zero, time-slice expired
+ LDR r1, =_tx_timer_expired // Pickup address of other expired flag
+ LDR r0, [r1] // Pickup timer expired flag
+ CMP r0, #0 // Did a timer expire?
+ BEQ __tx_timer_nothing_expired // No, nothing expired
+
+__tx_something_expired:
+
+ STMDB sp!, {r0, lr} // Save the lr register on the stack
+ // and save r0 just to keep 8-byte alignment
+
+ /* Did a timer expire? */
+
+ LDR r1, =_tx_timer_expired // Pickup address of expired flag
+ LDR r0, [r1] // Pickup timer expired flag
+ CMP r0, #0 // Check for timer expiration
+ BEQ __tx_timer_dont_activate // If not set, skip timer activation
+
+ /* Process timer expiration. */
+ BL _tx_timer_expiration_process // Call the timer expiration handling routine
+
+__tx_timer_dont_activate:
+
+ /* Did time slice expire? */
+
+ LDR r3, =_tx_timer_expired_time_slice // Pickup address of time-slice expired
+ LDR r2, [r3] // Pickup the actual flag
+ CMP r2, #0 // See if the flag is set
+ BEQ __tx_timer_not_ts_expiration // No, skip time-slice processing
+
+ /* Time slice interrupted thread. */
+
+ BL _tx_thread_time_slice // Call time-slice processing
+
+__tx_timer_not_ts_expiration:
+
+ LDMIA sp!, {r0, lr} // Recover lr register (r0 is just there for
+ // the 8-byte stack alignment
+
+__tx_timer_nothing_expired:
+
+#ifdef __THUMB_INTERWORK
+ BX lr // Return to caller
+#else
+ MOV pc, lr // Return to caller
+#endif
diff --git a/ports/cortex_a12/gnu/example_build/build_threadx.bat b/ports/cortex_a12/gnu/example_build/build_threadx.bat
new file mode 100644
index 00000000..d3898b30
--- /dev/null
+++ b/ports/cortex_a12/gnu/example_build/build_threadx.bat
@@ -0,0 +1,238 @@
+del tx.a
+arm-none-eabi-gcc -c -g -mcpu=cortex-a12 tx_initialize_low_level.S
+arm-none-eabi-gcc -c -g -mcpu=cortex-a12 ../src/tx_thread_stack_build.S
+arm-none-eabi-gcc -c -g -mcpu=cortex-a12 ../src/tx_thread_schedule.S
+arm-none-eabi-gcc -c -g -mcpu=cortex-a12 ../src/tx_thread_system_return.S
+arm-none-eabi-gcc -c -g -mcpu=cortex-a12 ../src/tx_thread_context_save.S
+arm-none-eabi-gcc -c -g -mcpu=cortex-a12 ../src/tx_thread_context_restore.S
+arm-none-eabi-gcc -c -g -mcpu=cortex-a12 ../src/tx_thread_interrupt_control.S
+arm-none-eabi-gcc -c -g -mcpu=cortex-a12 ../src/tx_timer_interrupt.S
+arm-none-eabi-gcc -c -g -mcpu=cortex-a12 ../src/tx_thread_interrupt_disable.S
+arm-none-eabi-gcc -c -g -mcpu=cortex-a12 ../src/tx_thread_interrupt_restore.S
+arm-none-eabi-gcc -c -g -mcpu=cortex-a12 ../src/tx_thread_fiq_context_save.S
+arm-none-eabi-gcc -c -g -mcpu=cortex-a12 ../src/tx_thread_fiq_nesting_start.S
+arm-none-eabi-gcc -c -g -mcpu=cortex-a12 ../src/tx_thread_irq_nesting_start.S
+arm-none-eabi-gcc -c -g -mcpu=cortex-a12 ../src/tx_thread_irq_nesting_end.S
+arm-none-eabi-gcc -c -g -mcpu=cortex-a12 ../src/tx_thread_fiq_nesting_end.S
+arm-none-eabi-gcc -c -g -mcpu=cortex-a12 ../src/tx_thread_fiq_context_restore.S
+arm-none-eabi-gcc -c -g -mcpu=cortex-a12 ../src/tx_thread_vectored_context_save.S
+arm-none-eabi-gcc -c -g -mcpu=cortex-a12 -I../../../../common/inc -I../inc ../../../../common/src/tx_block_allocate.c
+arm-none-eabi-gcc -c -g -mcpu=cortex-a12 -I../../../../common/inc -I../inc ../../../../common/src/tx_block_pool_cleanup.c
+arm-none-eabi-gcc -c -g -mcpu=cortex-a12 -I../../../../common/inc -I../inc ../../../../common/src/tx_block_pool_create.c
+arm-none-eabi-gcc -c -g -mcpu=cortex-a12 -I../../../../common/inc -I../inc ../../../../common/src/tx_block_pool_delete.c
+arm-none-eabi-gcc -c -g -mcpu=cortex-a12 -I../../../../common/inc -I../inc ../../../../common/src/tx_block_pool_info_get.c
+arm-none-eabi-gcc -c -g -mcpu=cortex-a12 -I../../../../common/inc -I../inc ../../../../common/src/tx_block_pool_initialize.c
+arm-none-eabi-gcc -c -g -mcpu=cortex-a12 -I../../../../common/inc -I../inc ../../../../common/src/tx_block_pool_performance_info_get.c
+arm-none-eabi-gcc -c -g -mcpu=cortex-a12 -I../../../../common/inc -I../inc ../../../../common/src/tx_block_pool_performance_system_info_get.c
+arm-none-eabi-gcc -c -g -mcpu=cortex-a12 -I../../../../common/inc -I../inc ../../../../common/src/tx_block_pool_prioritize.c
+arm-none-eabi-gcc -c -g -mcpu=cortex-a12 -I../../../../common/inc -I../inc ../../../../common/src/tx_block_release.c
+arm-none-eabi-gcc -c -g -mcpu=cortex-a12 -I../../../../common/inc -I../inc ../../../../common/src/tx_byte_allocate.c
+arm-none-eabi-gcc -c -g -mcpu=cortex-a12 -I../../../../common/inc -I../inc ../../../../common/src/tx_byte_pool_cleanup.c
+arm-none-eabi-gcc -c -g -mcpu=cortex-a12 -I../../../../common/inc -I../inc ../../../../common/src/tx_byte_pool_create.c
+arm-none-eabi-gcc -c -g -mcpu=cortex-a12 -I../../../../common/inc -I../inc ../../../../common/src/tx_byte_pool_delete.c
+arm-none-eabi-gcc -c -g -mcpu=cortex-a12 -I../../../../common/inc -I../inc ../../../../common/src/tx_byte_pool_info_get.c
+arm-none-eabi-gcc -c -g -mcpu=cortex-a12 -I../../../../common/inc -I../inc ../../../../common/src/tx_byte_pool_initialize.c
+arm-none-eabi-gcc -c -g -mcpu=cortex-a12 -I../../../../common/inc -I../inc ../../../../common/src/tx_byte_pool_performance_info_get.c
+arm-none-eabi-gcc -c -g -mcpu=cortex-a12 -I../../../../common/inc -I../inc ../../../../common/src/tx_byte_pool_performance_system_info_get.c
+arm-none-eabi-gcc -c -g -mcpu=cortex-a12 -I../../../../common/inc -I../inc ../../../../common/src/tx_byte_pool_prioritize.c
+arm-none-eabi-gcc -c -g -mcpu=cortex-a12 -I../../../../common/inc -I../inc ../../../../common/src/tx_byte_pool_search.c
+arm-none-eabi-gcc -c -g -mcpu=cortex-a12 -I../../../../common/inc -I../inc ../../../../common/src/tx_byte_release.c
+arm-none-eabi-gcc -c -g -mcpu=cortex-a12 -I../../../../common/inc -I../inc ../../../../common/src/tx_event_flags_cleanup.c
+arm-none-eabi-gcc -c -g -mcpu=cortex-a12 -I../../../../common/inc -I../inc ../../../../common/src/tx_event_flags_create.c
+arm-none-eabi-gcc -c -g -mcpu=cortex-a12 -I../../../../common/inc -I../inc ../../../../common/src/tx_event_flags_delete.c
+arm-none-eabi-gcc -c -g -mcpu=cortex-a12 -I../../../../common/inc -I../inc ../../../../common/src/tx_event_flags_get.c
+arm-none-eabi-gcc -c -g -mcpu=cortex-a12 -I../../../../common/inc -I../inc ../../../../common/src/tx_event_flags_info_get.c
+arm-none-eabi-gcc -c -g -mcpu=cortex-a12 -I../../../../common/inc -I../inc ../../../../common/src/tx_event_flags_initialize.c
+arm-none-eabi-gcc -c -g -mcpu=cortex-a12 -I../../../../common/inc -I../inc ../../../../common/src/tx_event_flags_performance_info_get.c
+arm-none-eabi-gcc -c -g -mcpu=cortex-a12 -I../../../../common/inc -I../inc ../../../../common/src/tx_event_flags_performance_system_info_get.c
+arm-none-eabi-gcc -c -g -mcpu=cortex-a12 -I../../../../common/inc -I../inc ../../../../common/src/tx_event_flags_set.c
+arm-none-eabi-gcc -c -g -mcpu=cortex-a12 -I../../../../common/inc -I../inc ../../../../common/src/tx_event_flags_set_notify.c
+arm-none-eabi-gcc -c -g -mcpu=cortex-a12 -I../../../../common/inc -I../inc ../../../../common/src/tx_initialize_high_level.c
+arm-none-eabi-gcc -c -g -mcpu=cortex-a12 -I../../../../common/inc -I../inc ../../../../common/src/tx_initialize_kernel_enter.c
+arm-none-eabi-gcc -c -g -mcpu=cortex-a12 -I../../../../common/inc -I../inc ../../../../common/src/tx_initialize_kernel_setup.c
+arm-none-eabi-gcc -c -g -mcpu=cortex-a12 -I../../../../common/inc -I../inc ../../../../common/src/tx_mutex_cleanup.c
+arm-none-eabi-gcc -c -g -mcpu=cortex-a12 -I../../../../common/inc -I../inc ../../../../common/src/tx_mutex_create.c
+arm-none-eabi-gcc -c -g -mcpu=cortex-a12 -I../../../../common/inc -I../inc ../../../../common/src/tx_mutex_delete.c
+arm-none-eabi-gcc -c -g -mcpu=cortex-a12 -I../../../../common/inc -I../inc ../../../../common/src/tx_mutex_get.c
+arm-none-eabi-gcc -c -g -mcpu=cortex-a12 -I../../../../common/inc -I../inc ../../../../common/src/tx_mutex_info_get.c
+arm-none-eabi-gcc -c -g -mcpu=cortex-a12 -I../../../../common/inc -I../inc ../../../../common/src/tx_mutex_initialize.c
+arm-none-eabi-gcc -c -g -mcpu=cortex-a12 -I../../../../common/inc -I../inc ../../../../common/src/tx_mutex_performance_info_get.c
+arm-none-eabi-gcc -c -g -mcpu=cortex-a12 -I../../../../common/inc -I../inc ../../../../common/src/tx_mutex_performance_system_info_get.c
+arm-none-eabi-gcc -c -g -mcpu=cortex-a12 -I../../../../common/inc -I../inc ../../../../common/src/tx_mutex_prioritize.c
+arm-none-eabi-gcc -c -g -mcpu=cortex-a12 -I../../../../common/inc -I../inc ../../../../common/src/tx_mutex_priority_change.c
+arm-none-eabi-gcc -c -g -mcpu=cortex-a12 -I../../../../common/inc -I../inc ../../../../common/src/tx_mutex_put.c
+arm-none-eabi-gcc -c -g -mcpu=cortex-a12 -I../../../../common/inc -I../inc ../../../../common/src/tx_queue_cleanup.c
+arm-none-eabi-gcc -c -g -mcpu=cortex-a12 -I../../../../common/inc -I../inc ../../../../common/src/tx_queue_create.c
+arm-none-eabi-gcc -c -g -mcpu=cortex-a12 -I../../../../common/inc -I../inc ../../../../common/src/tx_queue_delete.c
+arm-none-eabi-gcc -c -g -mcpu=cortex-a12 -I../../../../common/inc -I../inc ../../../../common/src/tx_queue_flush.c
+arm-none-eabi-gcc -c -g -mcpu=cortex-a12 -I../../../../common/inc -I../inc ../../../../common/src/tx_queue_front_send.c
+arm-none-eabi-gcc -c -g -mcpu=cortex-a12 -I../../../../common/inc -I../inc ../../../../common/src/tx_queue_info_get.c
+arm-none-eabi-gcc -c -g -mcpu=cortex-a12 -I../../../../common/inc -I../inc ../../../../common/src/tx_queue_initialize.c
+arm-none-eabi-gcc -c -g -mcpu=cortex-a12 -I../../../../common/inc -I../inc ../../../../common/src/tx_queue_performance_info_get.c
+arm-none-eabi-gcc -c -g -mcpu=cortex-a12 -I../../../../common/inc -I../inc ../../../../common/src/tx_queue_performance_system_info_get.c
+arm-none-eabi-gcc -c -g -mcpu=cortex-a12 -I../../../../common/inc -I../inc ../../../../common/src/tx_queue_prioritize.c
+arm-none-eabi-gcc -c -g -mcpu=cortex-a12 -I../../../../common/inc -I../inc ../../../../common/src/tx_queue_receive.c
+arm-none-eabi-gcc -c -g -mcpu=cortex-a12 -I../../../../common/inc -I../inc ../../../../common/src/tx_queue_send.c
+arm-none-eabi-gcc -c -g -mcpu=cortex-a12 -I../../../../common/inc -I../inc ../../../../common/src/tx_queue_send_notify.c
+arm-none-eabi-gcc -c -g -mcpu=cortex-a12 -I../../../../common/inc -I../inc ../../../../common/src/tx_semaphore_ceiling_put.c
+arm-none-eabi-gcc -c -g -mcpu=cortex-a12 -I../../../../common/inc -I../inc ../../../../common/src/tx_semaphore_cleanup.c
+arm-none-eabi-gcc -c -g -mcpu=cortex-a12 -I../../../../common/inc -I../inc ../../../../common/src/tx_semaphore_create.c
+arm-none-eabi-gcc -c -g -mcpu=cortex-a12 -I../../../../common/inc -I../inc ../../../../common/src/tx_semaphore_delete.c
+arm-none-eabi-gcc -c -g -mcpu=cortex-a12 -I../../../../common/inc -I../inc ../../../../common/src/tx_semaphore_get.c
+arm-none-eabi-gcc -c -g -mcpu=cortex-a12 -I../../../../common/inc -I../inc ../../../../common/src/tx_semaphore_info_get.c
+arm-none-eabi-gcc -c -g -mcpu=cortex-a12 -I../../../../common/inc -I../inc ../../../../common/src/tx_semaphore_initialize.c
+arm-none-eabi-gcc -c -g -mcpu=cortex-a12 -I../../../../common/inc -I../inc ../../../../common/src/tx_semaphore_performance_info_get.c
+arm-none-eabi-gcc -c -g -mcpu=cortex-a12 -I../../../../common/inc -I../inc ../../../../common/src/tx_semaphore_performance_system_info_get.c
+arm-none-eabi-gcc -c -g -mcpu=cortex-a12 -I../../../../common/inc -I../inc ../../../../common/src/tx_semaphore_prioritize.c
+arm-none-eabi-gcc -c -g -mcpu=cortex-a12 -I../../../../common/inc -I../inc ../../../../common/src/tx_semaphore_put.c
+arm-none-eabi-gcc -c -g -mcpu=cortex-a12 -I../../../../common/inc -I../inc ../../../../common/src/tx_semaphore_put_notify.c
+arm-none-eabi-gcc -c -g -mcpu=cortex-a12 -I../../../../common/inc -I../inc ../../../../common/src/tx_thread_create.c
+arm-none-eabi-gcc -c -g -mcpu=cortex-a12 -I../../../../common/inc -I../inc ../../../../common/src/tx_thread_delete.c
+arm-none-eabi-gcc -c -g -mcpu=cortex-a12 -I../../../../common/inc -I../inc ../../../../common/src/tx_thread_entry_exit_notify.c
+arm-none-eabi-gcc -c -g -mcpu=cortex-a12 -I../../../../common/inc -I../inc ../../../../common/src/tx_thread_identify.c
+arm-none-eabi-gcc -c -g -mcpu=cortex-a12 -I../../../../common/inc -I../inc ../../../../common/src/tx_thread_info_get.c
+arm-none-eabi-gcc -c -g -mcpu=cortex-a12 -I../../../../common/inc -I../inc ../../../../common/src/tx_thread_initialize.c
+arm-none-eabi-gcc -c -g -mcpu=cortex-a12 -I../../../../common/inc -I../inc ../../../../common/src/tx_thread_performance_info_get.c
+arm-none-eabi-gcc -c -g -mcpu=cortex-a12 -I../../../../common/inc -I../inc ../../../../common/src/tx_thread_performance_system_info_get.c
+arm-none-eabi-gcc -c -g -mcpu=cortex-a12 -I../../../../common/inc -I../inc ../../../../common/src/tx_thread_preemption_change.c
+arm-none-eabi-gcc -c -g -mcpu=cortex-a12 -I../../../../common/inc -I../inc ../../../../common/src/tx_thread_priority_change.c
+arm-none-eabi-gcc -c -g -mcpu=cortex-a12 -I../../../../common/inc -I../inc ../../../../common/src/tx_thread_relinquish.c
+arm-none-eabi-gcc -c -g -mcpu=cortex-a12 -I../../../../common/inc -I../inc ../../../../common/src/tx_thread_reset.c
+arm-none-eabi-gcc -c -g -mcpu=cortex-a12 -I../../../../common/inc -I../inc ../../../../common/src/tx_thread_resume.c
+arm-none-eabi-gcc -c -g -mcpu=cortex-a12 -I../../../../common/inc -I../inc ../../../../common/src/tx_thread_shell_entry.c
+arm-none-eabi-gcc -c -g -mcpu=cortex-a12 -I../../../../common/inc -I../inc ../../../../common/src/tx_thread_sleep.c
+arm-none-eabi-gcc -c -g -mcpu=cortex-a12 -I../../../../common/inc -I../inc ../../../../common/src/tx_thread_stack_analyze.c
+arm-none-eabi-gcc -c -g -mcpu=cortex-a12 -I../../../../common/inc -I../inc ../../../../common/src/tx_thread_stack_error_handler.c
+arm-none-eabi-gcc -c -g -mcpu=cortex-a12 -I../../../../common/inc -I../inc ../../../../common/src/tx_thread_stack_error_notify.c
+arm-none-eabi-gcc -c -g -mcpu=cortex-a12 -I../../../../common/inc -I../inc ../../../../common/src/tx_thread_suspend.c
+arm-none-eabi-gcc -c -g -mcpu=cortex-a12 -I../../../../common/inc -I../inc ../../../../common/src/tx_thread_system_preempt_check.c
+arm-none-eabi-gcc -c -g -mcpu=cortex-a12 -I../../../../common/inc -I../inc ../../../../common/src/tx_thread_system_resume.c
+arm-none-eabi-gcc -c -g -mcpu=cortex-a12 -I../../../../common/inc -I../inc ../../../../common/src/tx_thread_system_suspend.c
+arm-none-eabi-gcc -c -g -mcpu=cortex-a12 -I../../../../common/inc -I../inc ../../../../common/src/tx_thread_terminate.c
+arm-none-eabi-gcc -c -g -mcpu=cortex-a12 -I../../../../common/inc -I../inc ../../../../common/src/tx_thread_time_slice.c
+arm-none-eabi-gcc -c -g -mcpu=cortex-a12 -I../../../../common/inc -I../inc ../../../../common/src/tx_thread_time_slice_change.c
+arm-none-eabi-gcc -c -g -mcpu=cortex-a12 -I../../../../common/inc -I../inc ../../../../common/src/tx_thread_timeout.c
+arm-none-eabi-gcc -c -g -mcpu=cortex-a12 -I../../../../common/inc -I../inc ../../../../common/src/tx_thread_wait_abort.c
+arm-none-eabi-gcc -c -g -mcpu=cortex-a12 -I../../../../common/inc -I../inc ../../../../common/src/tx_time_get.c
+arm-none-eabi-gcc -c -g -mcpu=cortex-a12 -I../../../../common/inc -I../inc ../../../../common/src/tx_time_set.c
+arm-none-eabi-gcc -c -g -mcpu=cortex-a12 -I../../../../common/inc -I../inc ../../../../common/src/tx_timer_activate.c
+arm-none-eabi-gcc -c -g -mcpu=cortex-a12 -I../../../../common/inc -I../inc ../../../../common/src/tx_timer_change.c
+arm-none-eabi-gcc -c -g -mcpu=cortex-a12 -I../../../../common/inc -I../inc ../../../../common/src/tx_timer_create.c
+arm-none-eabi-gcc -c -g -mcpu=cortex-a12 -I../../../../common/inc -I../inc ../../../../common/src/tx_timer_deactivate.c
+arm-none-eabi-gcc -c -g -mcpu=cortex-a12 -I../../../../common/inc -I../inc ../../../../common/src/tx_timer_delete.c
+arm-none-eabi-gcc -c -g -mcpu=cortex-a12 -I../../../../common/inc -I../inc ../../../../common/src/tx_timer_expiration_process.c
+arm-none-eabi-gcc -c -g -mcpu=cortex-a12 -I../../../../common/inc -I../inc ../../../../common/src/tx_timer_info_get.c
+arm-none-eabi-gcc -c -g -mcpu=cortex-a12 -I../../../../common/inc -I../inc ../../../../common/src/tx_timer_initialize.c
+arm-none-eabi-gcc -c -g -mcpu=cortex-a12 -I../../../../common/inc -I../inc ../../../../common/src/tx_timer_performance_info_get.c
+arm-none-eabi-gcc -c -g -mcpu=cortex-a12 -I../../../../common/inc -I../inc ../../../../common/src/tx_timer_performance_system_info_get.c
+arm-none-eabi-gcc -c -g -mcpu=cortex-a12 -I../../../../common/inc -I../inc ../../../../common/src/tx_timer_system_activate.c
+arm-none-eabi-gcc -c -g -mcpu=cortex-a12 -I../../../../common/inc -I../inc ../../../../common/src/tx_timer_system_deactivate.c
+arm-none-eabi-gcc -c -g -mcpu=cortex-a12 -I../../../../common/inc -I../inc ../../../../common/src/tx_timer_thread_entry.c
+arm-none-eabi-gcc -c -g -mcpu=cortex-a12 -I../../../../common/inc -I../inc ../../../../common/src/tx_trace_enable.c
+arm-none-eabi-gcc -c -g -mcpu=cortex-a12 -I../../../../common/inc -I../inc ../../../../common/src/tx_trace_disable.c
+arm-none-eabi-gcc -c -g -mcpu=cortex-a12 -I../../../../common/inc -I../inc ../../../../common/src/tx_trace_initialize.c
+arm-none-eabi-gcc -c -g -mcpu=cortex-a12 -I../../../../common/inc -I../inc ../../../../common/src/tx_trace_interrupt_control.c
+arm-none-eabi-gcc -c -g -mcpu=cortex-a12 -I../../../../common/inc -I../inc ../../../../common/src/tx_trace_isr_enter_insert.c
+arm-none-eabi-gcc -c -g -mcpu=cortex-a12 -I../../../../common/inc -I../inc ../../../../common/src/tx_trace_isr_exit_insert.c
+arm-none-eabi-gcc -c -g -mcpu=cortex-a12 -I../../../../common/inc -I../inc ../../../../common/src/tx_trace_object_register.c
+arm-none-eabi-gcc -c -g -mcpu=cortex-a12 -I../../../../common/inc -I../inc ../../../../common/src/tx_trace_object_unregister.c
+arm-none-eabi-gcc -c -g -mcpu=cortex-a12 -I../../../../common/inc -I../inc ../../../../common/src/tx_trace_user_event_insert.c
+arm-none-eabi-gcc -c -g -mcpu=cortex-a12 -I../../../../common/inc -I../inc ../../../../common/src/tx_trace_buffer_full_notify.c
+arm-none-eabi-gcc -c -g -mcpu=cortex-a12 -I../../../../common/inc -I../inc ../../../../common/src/tx_trace_event_filter.c
+arm-none-eabi-gcc -c -g -mcpu=cortex-a12 -I../../../../common/inc -I../inc ../../../../common/src/tx_trace_event_unfilter.c
+arm-none-eabi-gcc -c -g -mcpu=cortex-a12 -I../../../../common/inc -I../inc ../../../../common/src/txe_block_allocate.c
+arm-none-eabi-gcc -c -g -mcpu=cortex-a12 -I../../../../common/inc -I../inc ../../../../common/src/txe_block_pool_create.c
+arm-none-eabi-gcc -c -g -mcpu=cortex-a12 -I../../../../common/inc -I../inc ../../../../common/src/txe_block_pool_delete.c
+arm-none-eabi-gcc -c -g -mcpu=cortex-a12 -I../../../../common/inc -I../inc ../../../../common/src/txe_block_pool_info_get.c
+arm-none-eabi-gcc -c -g -mcpu=cortex-a12 -I../../../../common/inc -I../inc ../../../../common/src/txe_block_pool_prioritize.c
+arm-none-eabi-gcc -c -g -mcpu=cortex-a12 -I../../../../common/inc -I../inc ../../../../common/src/txe_block_release.c
+arm-none-eabi-gcc -c -g -mcpu=cortex-a12 -I../../../../common/inc -I../inc ../../../../common/src/txe_byte_allocate.c
+arm-none-eabi-gcc -c -g -mcpu=cortex-a12 -I../../../../common/inc -I../inc ../../../../common/src/txe_byte_pool_create.c
+arm-none-eabi-gcc -c -g -mcpu=cortex-a12 -I../../../../common/inc -I../inc ../../../../common/src/txe_byte_pool_delete.c
+arm-none-eabi-gcc -c -g -mcpu=cortex-a12 -I../../../../common/inc -I../inc ../../../../common/src/txe_byte_pool_info_get.c
+arm-none-eabi-gcc -c -g -mcpu=cortex-a12 -I../../../../common/inc -I../inc ../../../../common/src/txe_byte_pool_prioritize.c
+arm-none-eabi-gcc -c -g -mcpu=cortex-a12 -I../../../../common/inc -I../inc ../../../../common/src/txe_byte_release.c
+arm-none-eabi-gcc -c -g -mcpu=cortex-a12 -I../../../../common/inc -I../inc ../../../../common/src/txe_event_flags_create.c
+arm-none-eabi-gcc -c -g -mcpu=cortex-a12 -I../../../../common/inc -I../inc ../../../../common/src/txe_event_flags_delete.c
+arm-none-eabi-gcc -c -g -mcpu=cortex-a12 -I../../../../common/inc -I../inc ../../../../common/src/txe_event_flags_get.c
+arm-none-eabi-gcc -c -g -mcpu=cortex-a12 -I../../../../common/inc -I../inc ../../../../common/src/txe_event_flags_info_get.c
+arm-none-eabi-gcc -c -g -mcpu=cortex-a12 -I../../../../common/inc -I../inc ../../../../common/src/txe_event_flags_set.c
+arm-none-eabi-gcc -c -g -mcpu=cortex-a12 -I../../../../common/inc -I../inc ../../../../common/src/txe_event_flags_set_notify.c
+arm-none-eabi-gcc -c -g -mcpu=cortex-a12 -I../../../../common/inc -I../inc ../../../../common/src/txe_mutex_create.c
+arm-none-eabi-gcc -c -g -mcpu=cortex-a12 -I../../../../common/inc -I../inc ../../../../common/src/txe_mutex_delete.c
+arm-none-eabi-gcc -c -g -mcpu=cortex-a12 -I../../../../common/inc -I../inc ../../../../common/src/txe_mutex_get.c
+arm-none-eabi-gcc -c -g -mcpu=cortex-a12 -I../../../../common/inc -I../inc ../../../../common/src/txe_mutex_info_get.c
+arm-none-eabi-gcc -c -g -mcpu=cortex-a12 -I../../../../common/inc -I../inc ../../../../common/src/txe_mutex_prioritize.c
+arm-none-eabi-gcc -c -g -mcpu=cortex-a12 -I../../../../common/inc -I../inc ../../../../common/src/txe_mutex_put.c
+arm-none-eabi-gcc -c -g -mcpu=cortex-a12 -I../../../../common/inc -I../inc ../../../../common/src/txe_queue_create.c
+arm-none-eabi-gcc -c -g -mcpu=cortex-a12 -I../../../../common/inc -I../inc ../../../../common/src/txe_queue_delete.c
+arm-none-eabi-gcc -c -g -mcpu=cortex-a12 -I../../../../common/inc -I../inc ../../../../common/src/txe_queue_flush.c
+arm-none-eabi-gcc -c -g -mcpu=cortex-a12 -I../../../../common/inc -I../inc ../../../../common/src/txe_queue_front_send.c
+arm-none-eabi-gcc -c -g -mcpu=cortex-a12 -I../../../../common/inc -I../inc ../../../../common/src/txe_queue_info_get.c
+arm-none-eabi-gcc -c -g -mcpu=cortex-a12 -I../../../../common/inc -I../inc ../../../../common/src/txe_queue_prioritize.c
+arm-none-eabi-gcc -c -g -mcpu=cortex-a12 -I../../../../common/inc -I../inc ../../../../common/src/txe_queue_receive.c
+arm-none-eabi-gcc -c -g -mcpu=cortex-a12 -I../../../../common/inc -I../inc ../../../../common/src/txe_queue_send.c
+arm-none-eabi-gcc -c -g -mcpu=cortex-a12 -I../../../../common/inc -I../inc ../../../../common/src/txe_queue_send_notify.c
+arm-none-eabi-gcc -c -g -mcpu=cortex-a12 -I../../../../common/inc -I../inc ../../../../common/src/txe_semaphore_ceiling_put.c
+arm-none-eabi-gcc -c -g -mcpu=cortex-a12 -I../../../../common/inc -I../inc ../../../../common/src/txe_semaphore_create.c
+arm-none-eabi-gcc -c -g -mcpu=cortex-a12 -I../../../../common/inc -I../inc ../../../../common/src/txe_semaphore_delete.c
+arm-none-eabi-gcc -c -g -mcpu=cortex-a12 -I../../../../common/inc -I../inc ../../../../common/src/txe_semaphore_get.c
+arm-none-eabi-gcc -c -g -mcpu=cortex-a12 -I../../../../common/inc -I../inc ../../../../common/src/txe_semaphore_info_get.c
+arm-none-eabi-gcc -c -g -mcpu=cortex-a12 -I../../../../common/inc -I../inc ../../../../common/src/txe_semaphore_prioritize.c
+arm-none-eabi-gcc -c -g -mcpu=cortex-a12 -I../../../../common/inc -I../inc ../../../../common/src/txe_semaphore_put.c
+arm-none-eabi-gcc -c -g -mcpu=cortex-a12 -I../../../../common/inc -I../inc ../../../../common/src/txe_semaphore_put_notify.c
+arm-none-eabi-gcc -c -g -mcpu=cortex-a12 -I../../../../common/inc -I../inc ../../../../common/src/txe_thread_create.c
+arm-none-eabi-gcc -c -g -mcpu=cortex-a12 -I../../../../common/inc -I../inc ../../../../common/src/txe_thread_delete.c
+arm-none-eabi-gcc -c -g -mcpu=cortex-a12 -I../../../../common/inc -I../inc ../../../../common/src/txe_thread_entry_exit_notify.c
+arm-none-eabi-gcc -c -g -mcpu=cortex-a12 -I../../../../common/inc -I../inc ../../../../common/src/txe_thread_info_get.c
+arm-none-eabi-gcc -c -g -mcpu=cortex-a12 -I../../../../common/inc -I../inc ../../../../common/src/txe_thread_preemption_change.c
+arm-none-eabi-gcc -c -g -mcpu=cortex-a12 -I../../../../common/inc -I../inc ../../../../common/src/txe_thread_priority_change.c
+arm-none-eabi-gcc -c -g -mcpu=cortex-a12 -I../../../../common/inc -I../inc ../../../../common/src/txe_thread_relinquish.c
+arm-none-eabi-gcc -c -g -mcpu=cortex-a12 -I../../../../common/inc -I../inc ../../../../common/src/txe_thread_reset.c
+arm-none-eabi-gcc -c -g -mcpu=cortex-a12 -I../../../../common/inc -I../inc ../../../../common/src/txe_thread_resume.c
+arm-none-eabi-gcc -c -g -mcpu=cortex-a12 -I../../../../common/inc -I../inc ../../../../common/src/txe_thread_suspend.c
+arm-none-eabi-gcc -c -g -mcpu=cortex-a12 -I../../../../common/inc -I../inc ../../../../common/src/txe_thread_terminate.c
+arm-none-eabi-gcc -c -g -mcpu=cortex-a12 -I../../../../common/inc -I../inc ../../../../common/src/txe_thread_time_slice_change.c
+arm-none-eabi-gcc -c -g -mcpu=cortex-a12 -I../../../../common/inc -I../inc ../../../../common/src/txe_thread_wait_abort.c
+arm-none-eabi-gcc -c -g -mcpu=cortex-a12 -I../../../../common/inc -I../inc ../../../../common/src/txe_timer_activate.c
+arm-none-eabi-gcc -c -g -mcpu=cortex-a12 -I../../../../common/inc -I../inc ../../../../common/src/txe_timer_change.c
+arm-none-eabi-gcc -c -g -mcpu=cortex-a12 -I../../../../common/inc -I../inc ../../../../common/src/txe_timer_create.c
+arm-none-eabi-gcc -c -g -mcpu=cortex-a12 -I../../../../common/inc -I../inc ../../../../common/src/txe_timer_deactivate.c
+arm-none-eabi-gcc -c -g -mcpu=cortex-a12 -I../../../../common/inc -I../inc ../../../../common/src/txe_timer_delete.c
+arm-none-eabi-gcc -c -g -mcpu=cortex-a12 -I../../../../common/inc -I../inc ../../../../common/src/txe_timer_info_get.c
+arm-none-eabi-ar -r tx.a tx_thread_stack_build.o tx_thread_schedule.o tx_thread_system_return.o tx_thread_context_save.o tx_thread_context_restore.o tx_timer_interrupt.o tx_thread_interrupt_control.o
+arm-none-eabi-ar -r tx.a tx_thread_interrupt_disable.o tx_thread_interrupt_restore.o tx_thread_fiq_context_save.o tx_thread_fiq_nesting_start.o tx_thread_irq_nesting_start.o tx_thread_irq_nesting_end.o
+arm-none-eabi-ar -r tx.a tx_thread_fiq_nesting_end.o tx_thread_fiq_context_restore.o tx_thread_vectored_context_save.o tx_initialize_low_level.o
+arm-none-eabi-ar -r tx.a tx_block_allocate.o tx_block_pool_cleanup.o tx_block_pool_create.o tx_block_pool_delete.o tx_block_pool_info_get.o
+arm-none-eabi-ar -r tx.a tx_block_pool_initialize.o tx_block_pool_performance_info_get.o tx_block_pool_performance_system_info_get.o tx_block_pool_prioritize.o
+arm-none-eabi-ar -r tx.a tx_block_release.o tx_byte_allocate.o tx_byte_pool_cleanup.o tx_byte_pool_create.o tx_byte_pool_delete.o tx_byte_pool_info_get.o
+arm-none-eabi-ar -r tx.a tx_byte_pool_initialize.o tx_byte_pool_performance_info_get.o tx_byte_pool_performance_system_info_get.o tx_byte_pool_prioritize.o
+arm-none-eabi-ar -r tx.a tx_byte_pool_search.o tx_byte_release.o tx_event_flags_cleanup.o tx_event_flags_create.o tx_event_flags_delete.o tx_event_flags_get.o
+arm-none-eabi-ar -r tx.a tx_event_flags_info_get.o tx_event_flags_initialize.o tx_event_flags_performance_info_get.o tx_event_flags_performance_system_info_get.o
+arm-none-eabi-ar -r tx.a tx_event_flags_set.o tx_event_flags_set_notify.o tx_initialize_high_level.o tx_initialize_kernel_enter.o tx_initialize_kernel_setup.o
+arm-none-eabi-ar -r tx.a tx_mutex_cleanup.o tx_mutex_create.o tx_mutex_delete.o tx_mutex_get.o tx_mutex_info_get.o tx_mutex_initialize.o tx_mutex_performance_info_get.o
+arm-none-eabi-ar -r tx.a tx_mutex_performance_system_info_get.o tx_mutex_prioritize.o tx_mutex_priority_change.o tx_mutex_put.o tx_queue_cleanup.o tx_queue_create.o
+arm-none-eabi-ar -r tx.a tx_queue_delete.o tx_queue_flush.o tx_queue_front_send.o tx_queue_info_get.o tx_queue_initialize.o tx_queue_performance_info_get.o
+arm-none-eabi-ar -r tx.a tx_queue_performance_system_info_get.o tx_queue_prioritize.o tx_queue_receive.o tx_queue_send.o tx_queue_send_notify.o tx_semaphore_ceiling_put.o
+arm-none-eabi-ar -r tx.a tx_semaphore_cleanup.o tx_semaphore_create.o tx_semaphore_delete.o tx_semaphore_get.o tx_semaphore_info_get.o tx_semaphore_initialize.o
+arm-none-eabi-ar -r tx.a tx_semaphore_performance_info_get.o tx_semaphore_performance_system_info_get.o tx_semaphore_prioritize.o tx_semaphore_put.o tx_semaphore_put_notify.o
+arm-none-eabi-ar -r tx.a tx_thread_create.o tx_thread_delete.o tx_thread_entry_exit_notify.o tx_thread_identify.o tx_thread_info_get.o tx_thread_initialize.o
+arm-none-eabi-ar -r tx.a tx_thread_performance_info_get.o tx_thread_performance_system_info_get.o tx_thread_preemption_change.o tx_thread_priority_change.o tx_thread_relinquish.o
+arm-none-eabi-ar -r tx.a tx_thread_reset.o tx_thread_resume.o tx_thread_shell_entry.o tx_thread_sleep.o tx_thread_stack_analyze.o tx_thread_stack_error_handler.o
+arm-none-eabi-ar -r tx.a tx_thread_stack_error_notify.o tx_thread_suspend.o tx_thread_system_preempt_check.o tx_thread_system_resume.o tx_thread_system_suspend.o
+arm-none-eabi-ar -r tx.a tx_thread_terminate.o tx_thread_time_slice.o tx_thread_time_slice_change.o tx_thread_timeout.o tx_thread_wait_abort.o tx_time_get.o
+arm-none-eabi-ar -r tx.a tx_time_set.o tx_timer_activate.o tx_timer_change.o tx_timer_create.o tx_timer_deactivate.o tx_timer_delete.o tx_timer_expiration_process.o
+arm-none-eabi-ar -r tx.a tx_timer_info_get.o tx_timer_initialize.o tx_timer_performance_info_get.o tx_timer_performance_system_info_get.o tx_timer_system_activate.o
+arm-none-eabi-ar -r tx.a tx_timer_system_deactivate.o tx_timer_thread_entry.o tx_trace_enable.o tx_trace_disable.o tx_trace_initialize.o tx_trace_interrupt_control.o
+arm-none-eabi-ar -r tx.a tx_trace_isr_enter_insert.o tx_trace_isr_exit_insert.o tx_trace_object_register.o tx_trace_object_unregister.o tx_trace_user_event_insert.o
+arm-none-eabi-ar -r tx.a tx_trace_buffer_full_notify.o tx_trace_event_filter.o tx_trace_event_unfilter.o
+arm-none-eabi-ar -r tx.a txe_block_allocate.o txe_block_pool_create.o txe_block_pool_delete.o txe_block_pool_info_get.o txe_block_pool_prioritize.o txe_block_release.o
+arm-none-eabi-ar -r tx.a txe_byte_allocate.o txe_byte_pool_create.o txe_byte_pool_delete.o txe_byte_pool_info_get.o txe_byte_pool_prioritize.o txe_byte_release.o
+arm-none-eabi-ar -r tx.a txe_event_flags_create.o txe_event_flags_delete.o txe_event_flags_get.o txe_event_flags_info_get.o txe_event_flags_set.o
+arm-none-eabi-ar -r tx.a txe_event_flags_set_notify.o txe_mutex_create.o txe_mutex_delete.o txe_mutex_get.o txe_mutex_info_get.o txe_mutex_prioritize.o
+arm-none-eabi-ar -r tx.a txe_mutex_put.o txe_queue_create.o txe_queue_delete.o txe_queue_flush.o txe_queue_front_send.o txe_queue_info_get.o txe_queue_prioritize.o
+arm-none-eabi-ar -r tx.a txe_queue_receive.o txe_queue_send.o txe_queue_send_notify.o txe_semaphore_ceiling_put.o txe_semaphore_create.o txe_semaphore_delete.o
+arm-none-eabi-ar -r tx.a txe_semaphore_get.o txe_semaphore_info_get.o txe_semaphore_prioritize.o txe_semaphore_put.o txe_semaphore_put_notify.o txe_thread_create.o
+arm-none-eabi-ar -r tx.a txe_thread_delete.o txe_thread_entry_exit_notify.o txe_thread_info_get.o txe_thread_preemption_change.o txe_thread_priority_change.o
+arm-none-eabi-ar -r tx.a txe_thread_relinquish.o txe_thread_reset.o txe_thread_resume.o txe_thread_suspend.o txe_thread_terminate.o txe_thread_time_slice_change.o
+arm-none-eabi-ar -r tx.a txe_thread_wait_abort.o txe_timer_activate.o txe_timer_change.o txe_timer_create.o txe_timer_deactivate.o txe_timer_delete.o txe_timer_info_get.o
diff --git a/ports/cortex_a12/gnu/example_build/build_threadx_sample.bat b/ports/cortex_a12/gnu/example_build/build_threadx_sample.bat
new file mode 100644
index 00000000..d6716da1
--- /dev/null
+++ b/ports/cortex_a12/gnu/example_build/build_threadx_sample.bat
@@ -0,0 +1,6 @@
+arm-none-eabi-gcc -c -g -mcpu=cortex-a12 reset.S
+arm-none-eabi-gcc -c -g -mcpu=cortex-a12 crt0.S
+arm-none-eabi-gcc -c -g -mcpu=cortex-a12 tx_initialize_low_level.S
+arm-none-eabi-gcc -c -g -mcpu=cortex-a12 -I../../../../common/inc -I../inc sample_threadx.c
+arm-none-eabi-gcc -g -mcpu=cortex-a12 -T sample_threadx.ld --specs=nosys.specs -o sample_threadx.out -Wl,-Map=sample_threadx.map tx_initialize_low_level.o sample_threadx.o tx.a
+
diff --git a/ports/cortex_a12/gnu/example_build/crt0.S b/ports/cortex_a12/gnu/example_build/crt0.S
new file mode 100644
index 00000000..56b6c958
--- /dev/null
+++ b/ports/cortex_a12/gnu/example_build/crt0.S
@@ -0,0 +1,90 @@
+
+/* .text is used instead of .section .text so it works with arm-aout too. */
+ .text
+ .code 32
+ .align 0
+
+ .global _mainCRTStartup
+ .global _start
+ .global start
+start:
+_start:
+_mainCRTStartup:
+
+/* Start by setting up a stack */
+ /* Set up the stack pointer to a fixed value */
+ ldr r3, .LC0
+ mov sp, r3
+ /* Setup a default stack-limit in case the code has been
+ compiled with "-mapcs-stack-check". Hard-wiring this value
+ is not ideal, since there is currently no support for
+ checking that the heap and stack have not collided, or that
+ this default 64k is enough for the program being executed.
+ However, it ensures that this simple crt0 world will not
+ immediately cause an overflow event: */
+ sub sl, sp, #64 << 10 /* Still assumes 256bytes below sl */
+ mov a2, #0 /* Second arg: fill value */
+ mov fp, a2 /* Null frame pointer */
+ mov r7, a2 /* Null frame pointer for Thumb */
+
+ ldr a1, .LC1 /* First arg: start of memory block */
+ ldr a3, .LC2
+ sub a3, a3, a1 /* Third arg: length of block */
+
+
+
+ bl memset
+ mov r0, #0 /* no arguments */
+ mov r1, #0 /* no argv either */
+#ifdef __USES_INITFINI__
+ /* Some arm/elf targets use the .init and .fini sections
+ to create constructors and destructors, and for these
+ targets we need to call the _init function and arrange
+ for _fini to be called at program exit. */
+ mov r4, r0
+ mov r5, r1
+/* ldr r0, .Lfini */
+ bl atexit
+/* bl init */
+ mov r0, r4
+ mov r1, r5
+#endif
+ bl main
+
+ bl exit /* Should not return. */
+
+
+ /* For Thumb, constants must be after the code since only
+ positive offsets are supported for PC relative addresses. */
+
+ .align 0
+.LC0:
+.LC1:
+ .word __bss_start__
+.LC2:
+ .word __bss_end__
+/*
+#ifdef __USES_INITFINI__
+.Lfini:
+ .word _fini
+#endif */
+ /* Return ... */
+#ifdef __APCS_26__
+ movs pc, lr
+#else
+#ifdef __THUMB_INTERWORK
+ bx lr
+#else
+ mov pc, lr
+#endif
+#endif
+
+
+/* Workspace for Angel calls. */
+ .data
+/* Data returned by monitor SWI. */
+.global __stack_base__
+HeapBase: .word 0
+HeapLimit: .word 0
+__stack_base__: .word 0
+StackLimit: .word 0
diff --git a/ports/cortex_a12/gnu/example_build/reset.S b/ports/cortex_a12/gnu/example_build/reset.S
new file mode 100644
index 00000000..597e9d9a
--- /dev/null
+++ b/ports/cortex_a12/gnu/example_build/reset.S
@@ -0,0 +1,64 @@
+/**************************************************************************/
+/* */
+/* Copyright (c) Microsoft Corporation. All rights reserved. */
+/* */
+/* This software is licensed under the Microsoft Software License */
+/* Terms for Microsoft Azure RTOS. Full text of the license can be */
+/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
+/* and in the root directory of this software. */
+/* */
+/**************************************************************************/
+
+
+/**************************************************************************/
+/**************************************************************************/
+/** */
+/** ThreadX Component */
+/** */
+/** Initialize */
+/** */
+/**************************************************************************/
+/**************************************************************************/
+
+ .arm
+
+ .global _start
+ .global __tx_undefined
+ .global __tx_swi_interrupt
+ .global __tx_prefetch_handler
+ .global __tx_abort_handler
+ .global __tx_reserved_handler
+ .global __tx_irq_handler
+ .global __tx_fiq_handler
+
+/* Define the vector area. This should be located or copied to 0. */
+
+ .text
+ .global __vectors
+__vectors:
+
+ LDR pc, STARTUP // Reset goes to startup function
+ LDR pc, UNDEFINED // Undefined handler
+ LDR pc, SWI // Software interrupt handler
+ LDR pc, PREFETCH // Prefetch exception handler
+ LDR pc, ABORT // Abort exception handler
+ LDR pc, RESERVED // Reserved exception handler
+ LDR pc, IRQ // IRQ interrupt handler
+ LDR pc, FIQ // FIQ interrupt handler
+
+STARTUP:
+ .word _start // Reset goes to C startup function
+UNDEFINED:
+ .word __tx_undefined // Undefined handler
+SWI:
+ .word __tx_swi_interrupt // Software interrupt handler
+PREFETCH:
+ .word __tx_prefetch_handler // Prefetch exception handler
+ABORT:
+ .word __tx_abort_handler // Abort exception handler
+RESERVED:
+ .word __tx_reserved_handler // Reserved exception handler
+IRQ:
+ .word __tx_irq_handler // IRQ interrupt handler
+FIQ:
+ .word __tx_fiq_handler // FIQ interrupt handler
diff --git a/ports/cortex_a12/gnu/example_build/sample_threadx.c b/ports/cortex_a12/gnu/example_build/sample_threadx.c
new file mode 100644
index 00000000..8c61de06
--- /dev/null
+++ b/ports/cortex_a12/gnu/example_build/sample_threadx.c
@@ -0,0 +1,369 @@
+/* This is a small demo of the high-performance ThreadX kernel. It includes examples of eight
+ threads of different priorities, using a message queue, semaphore, mutex, event flags group,
+ byte pool, and block pool. */
+
+#include "tx_api.h"
+
+#define DEMO_STACK_SIZE 1024
+#define DEMO_BYTE_POOL_SIZE 9120
+#define DEMO_BLOCK_POOL_SIZE 100
+#define DEMO_QUEUE_SIZE 100
+
+
+/* Define the ThreadX object control blocks... */
+
+TX_THREAD thread_0;
+TX_THREAD thread_1;
+TX_THREAD thread_2;
+TX_THREAD thread_3;
+TX_THREAD thread_4;
+TX_THREAD thread_5;
+TX_THREAD thread_6;
+TX_THREAD thread_7;
+TX_QUEUE queue_0;
+TX_SEMAPHORE semaphore_0;
+TX_MUTEX mutex_0;
+TX_EVENT_FLAGS_GROUP event_flags_0;
+TX_BYTE_POOL byte_pool_0;
+TX_BLOCK_POOL block_pool_0;
+
+
+/* Define the counters used in the demo application... */
+
+ULONG thread_0_counter;
+ULONG thread_1_counter;
+ULONG thread_1_messages_sent;
+ULONG thread_2_counter;
+ULONG thread_2_messages_received;
+ULONG thread_3_counter;
+ULONG thread_4_counter;
+ULONG thread_5_counter;
+ULONG thread_6_counter;
+ULONG thread_7_counter;
+
+
+/* Define thread prototypes. */
+
+void thread_0_entry(ULONG thread_input);
+void thread_1_entry(ULONG thread_input);
+void thread_2_entry(ULONG thread_input);
+void thread_3_and_4_entry(ULONG thread_input);
+void thread_5_entry(ULONG thread_input);
+void thread_6_and_7_entry(ULONG thread_input);
+
+
+/* Define main entry point. */
+
+int main()
+{
+
+ /* Enter the ThreadX kernel. */
+ tx_kernel_enter();
+}
+
+
+/* Define what the initial system looks like. */
+
+void tx_application_define(void *first_unused_memory)
+{
+
+CHAR *pointer = TX_NULL;
+
+
+ /* Create a byte memory pool from which to allocate the thread stacks. */
+ tx_byte_pool_create(&byte_pool_0, "byte pool 0", first_unused_memory, DEMO_BYTE_POOL_SIZE);
+
+ /* Put system definition stuff in here, e.g. thread creates and other assorted
+ create information. */
+
+ /* Allocate the stack for thread 0. */
+ tx_byte_allocate(&byte_pool_0, (VOID **) &pointer, DEMO_STACK_SIZE, TX_NO_WAIT);
+
+ /* Create the main thread. */
+ tx_thread_create(&thread_0, "thread 0", thread_0_entry, 0,
+ pointer, DEMO_STACK_SIZE,
+ 1, 1, TX_NO_TIME_SLICE, TX_AUTO_START);
+
+
+ /* Allocate the stack for thread 1. */
+ tx_byte_allocate(&byte_pool_0, (VOID **) &pointer, DEMO_STACK_SIZE, TX_NO_WAIT);
+
+ /* Create threads 1 and 2. These threads pass information through a ThreadX
+ message queue. It is also interesting to note that these threads have a time
+ slice. */
+ tx_thread_create(&thread_1, "thread 1", thread_1_entry, 1,
+ pointer, DEMO_STACK_SIZE,
+ 16, 16, 4, TX_AUTO_START);
+
+ /* Allocate the stack for thread 2. */
+ tx_byte_allocate(&byte_pool_0, (VOID **) &pointer, DEMO_STACK_SIZE, TX_NO_WAIT);
+
+ tx_thread_create(&thread_2, "thread 2", thread_2_entry, 2,
+ pointer, DEMO_STACK_SIZE,
+ 16, 16, 4, TX_AUTO_START);
+
+ /* Allocate the stack for thread 3. */
+ tx_byte_allocate(&byte_pool_0, (VOID **) &pointer, DEMO_STACK_SIZE, TX_NO_WAIT);
+
+ /* Create threads 3 and 4. These threads compete for a ThreadX counting semaphore.
+ An interesting thing here is that both threads share the same instruction area. */
+ tx_thread_create(&thread_3, "thread 3", thread_3_and_4_entry, 3,
+ pointer, DEMO_STACK_SIZE,
+ 8, 8, TX_NO_TIME_SLICE, TX_AUTO_START);
+
+ /* Allocate the stack for thread 4. */
+ tx_byte_allocate(&byte_pool_0, (VOID **) &pointer, DEMO_STACK_SIZE, TX_NO_WAIT);
+
+ tx_thread_create(&thread_4, "thread 4", thread_3_and_4_entry, 4,
+ pointer, DEMO_STACK_SIZE,
+ 8, 8, TX_NO_TIME_SLICE, TX_AUTO_START);
+
+ /* Allocate the stack for thread 5. */
+ tx_byte_allocate(&byte_pool_0, (VOID **) &pointer, DEMO_STACK_SIZE, TX_NO_WAIT);
+
+ /* Create thread 5. This thread simply pends on an event flag which will be set
+ by thread_0. */
+ tx_thread_create(&thread_5, "thread 5", thread_5_entry, 5,
+ pointer, DEMO_STACK_SIZE,
+ 4, 4, TX_NO_TIME_SLICE, TX_AUTO_START);
+
+ /* Allocate the stack for thread 6. */
+ tx_byte_allocate(&byte_pool_0, (VOID **) &pointer, DEMO_STACK_SIZE, TX_NO_WAIT);
+
+ /* Create threads 6 and 7. These threads compete for a ThreadX mutex. */
+ tx_thread_create(&thread_6, "thread 6", thread_6_and_7_entry, 6,
+ pointer, DEMO_STACK_SIZE,
+ 8, 8, TX_NO_TIME_SLICE, TX_AUTO_START);
+
+ /* Allocate the stack for thread 7. */
+ tx_byte_allocate(&byte_pool_0, (VOID **) &pointer, DEMO_STACK_SIZE, TX_NO_WAIT);
+
+ tx_thread_create(&thread_7, "thread 7", thread_6_and_7_entry, 7,
+ pointer, DEMO_STACK_SIZE,
+ 8, 8, TX_NO_TIME_SLICE, TX_AUTO_START);
+
+ /* Allocate the message queue. */
+ tx_byte_allocate(&byte_pool_0, (VOID **) &pointer, DEMO_QUEUE_SIZE*sizeof(ULONG), TX_NO_WAIT);
+
+ /* Create the message queue shared by threads 1 and 2. */
+ tx_queue_create(&queue_0, "queue 0", TX_1_ULONG, pointer, DEMO_QUEUE_SIZE*sizeof(ULONG));
+
+ /* Create the semaphore used by threads 3 and 4. */
+ tx_semaphore_create(&semaphore_0, "semaphore 0", 1);
+
+ /* Create the event flags group used by threads 1 and 5. */
+ tx_event_flags_create(&event_flags_0, "event flags 0");
+
+ /* Create the mutex used by thread 6 and 7 without priority inheritance. */
+ tx_mutex_create(&mutex_0, "mutex 0", TX_NO_INHERIT);
+
+ /* Allocate the memory for a small block pool. */
+ tx_byte_allocate(&byte_pool_0, (VOID **) &pointer, DEMO_BLOCK_POOL_SIZE, TX_NO_WAIT);
+
+ /* Create a block memory pool to allocate a message buffer from. */
+ tx_block_pool_create(&block_pool_0, "block pool 0", sizeof(ULONG), pointer, DEMO_BLOCK_POOL_SIZE);
+
+ /* Allocate a block and release the block memory. */
+ tx_block_allocate(&block_pool_0, (VOID **) &pointer, TX_NO_WAIT);
+
+ /* Release the block back to the pool. */
+ tx_block_release(pointer);
+}
+
+
+
+/* Define the test threads. */
+
+void thread_0_entry(ULONG thread_input)
+{
+
+UINT status;
+
+
+ /* This thread simply sits in while-forever-sleep loop. */
+ while(1)
+ {
+
+ /* Increment the thread counter. */
+ thread_0_counter++;
+
+ /* Sleep for 10 ticks. */
+ tx_thread_sleep(10);
+
+ /* Set event flag 0 to wakeup thread 5. */
+ status = tx_event_flags_set(&event_flags_0, 0x1, TX_OR);
+
+ /* Check status. */
+ if (status != TX_SUCCESS)
+ break;
+ }
+}
+
+
+void thread_1_entry(ULONG thread_input)
+{
+
+UINT status;
+
+
+ /* This thread simply sends messages to a queue shared by thread 2. */
+ while(1)
+ {
+
+ /* Increment the thread counter. */
+ thread_1_counter++;
+
+ /* Send message to queue 0. */
+ status = tx_queue_send(&queue_0, &thread_1_messages_sent, TX_WAIT_FOREVER);
+
+ /* Check completion status. */
+ if (status != TX_SUCCESS)
+ break;
+
+ /* Increment the message sent. */
+ thread_1_messages_sent++;
+ }
+}
+
+
+void thread_2_entry(ULONG thread_input)
+{
+
+ULONG received_message;
+UINT status;
+
+ /* This thread retrieves messages placed on the queue by thread 1. */
+ while(1)
+ {
+
+ /* Increment the thread counter. */
+ thread_2_counter++;
+
+ /* Retrieve a message from the queue. */
+ status = tx_queue_receive(&queue_0, &received_message, TX_WAIT_FOREVER);
+
+ /* Check completion status and make sure the message is what we
+ expected. */
+ if ((status != TX_SUCCESS) || (received_message != thread_2_messages_received))
+ break;
+
+ /* Otherwise, all is okay. Increment the received message count. */
+ thread_2_messages_received++;
+ }
+}
+
+
+void thread_3_and_4_entry(ULONG thread_input)
+{
+
+UINT status;
+
+
+ /* This function is executed from thread 3 and thread 4. As the loop
+ below shows, these function compete for ownership of semaphore_0. */
+ while(1)
+ {
+
+ /* Increment the thread counter. */
+ if (thread_input == 3)
+ thread_3_counter++;
+ else
+ thread_4_counter++;
+
+ /* Get the semaphore with suspension. */
+ status = tx_semaphore_get(&semaphore_0, TX_WAIT_FOREVER);
+
+ /* Check status. */
+ if (status != TX_SUCCESS)
+ break;
+
+ /* Sleep for 2 ticks to hold the semaphore. */
+ tx_thread_sleep(2);
+
+ /* Release the semaphore. */
+ status = tx_semaphore_put(&semaphore_0);
+
+ /* Check status. */
+ if (status != TX_SUCCESS)
+ break;
+ }
+}
+
+
+void thread_5_entry(ULONG thread_input)
+{
+
+UINT status;
+ULONG actual_flags;
+
+
+ /* This thread simply waits for an event in a forever loop. */
+ while(1)
+ {
+
+ /* Increment the thread counter. */
+ thread_5_counter++;
+
+ /* Wait for event flag 0. */
+ status = tx_event_flags_get(&event_flags_0, 0x1, TX_OR_CLEAR,
+ &actual_flags, TX_WAIT_FOREVER);
+
+ /* Check status. */
+ if ((status != TX_SUCCESS) || (actual_flags != 0x1))
+ break;
+ }
+}
+
+
+void thread_6_and_7_entry(ULONG thread_input)
+{
+
+UINT status;
+
+
+ /* This function is executed from thread 6 and thread 7. As the loop
+ below shows, these function compete for ownership of mutex_0. */
+ while(1)
+ {
+
+ /* Increment the thread counter. */
+ if (thread_input == 6)
+ thread_6_counter++;
+ else
+ thread_7_counter++;
+
+ /* Get the mutex with suspension. */
+ status = tx_mutex_get(&mutex_0, TX_WAIT_FOREVER);
+
+ /* Check status. */
+ if (status != TX_SUCCESS)
+ break;
+
+ /* Get the mutex again with suspension. This shows
+ that an owning thread may retrieve the mutex it
+ owns multiple times. */
+ status = tx_mutex_get(&mutex_0, TX_WAIT_FOREVER);
+
+ /* Check status. */
+ if (status != TX_SUCCESS)
+ break;
+
+ /* Sleep for 2 ticks to hold the mutex. */
+ tx_thread_sleep(2);
+
+ /* Release the mutex. */
+ status = tx_mutex_put(&mutex_0);
+
+ /* Check status. */
+ if (status != TX_SUCCESS)
+ break;
+
+ /* Release the mutex again. This will actually
+ release ownership since it was obtained twice. */
+ status = tx_mutex_put(&mutex_0);
+
+ /* Check status. */
+ if (status != TX_SUCCESS)
+ break;
+ }
+}
diff --git a/ports/cortex_a12/gnu/example_build/sample_threadx.ld b/ports/cortex_a12/gnu/example_build/sample_threadx.ld
new file mode 100644
index 00000000..3dea4e1c
--- /dev/null
+++ b/ports/cortex_a12/gnu/example_build/sample_threadx.ld
@@ -0,0 +1,239 @@
+OUTPUT_FORMAT("elf32-littlearm", "elf32-bigarm",
+ "elf32-littlearm")
+OUTPUT_ARCH(arm)
+/* ENTRY(_start) */
+/* Do we need any of these for elf?
+ __DYNAMIC = 0; */
+SECTIONS
+{
+ . = 0x00000000;
+
+ .vectors : {reset.o(.text) }
+
+ /* Read-only sections, merged into text segment: */
+ . = 0x00001000;
+ .interp : { *(.interp) }
+ .hash : { *(.hash) }
+ .dynsym : { *(.dynsym) }
+ .dynstr : { *(.dynstr) }
+ .gnu.version : { *(.gnu.version) }
+ .gnu.version_d : { *(.gnu.version_d) }
+ .gnu.version_r : { *(.gnu.version_r) }
+ .rel.init : { *(.rel.init) }
+ .rela.init : { *(.rela.init) }
+ .rel.text :
+ {
+ *(.rel.text)
+ *(.rel.text.*)
+ *(.rel.gnu.linkonce.t*)
+ }
+ .rela.text :
+ {
+ *(.rela.text)
+ *(.rela.text.*)
+ *(.rela.gnu.linkonce.t*)
+ }
+ .rel.fini : { *(.rel.fini) }
+ .rela.fini : { *(.rela.fini) }
+ .rel.rodata :
+ {
+ *(.rel.rodata)
+ *(.rel.rodata.*)
+ *(.rel.gnu.linkonce.r*)
+ }
+ .rela.rodata :
+ {
+ *(.rela.rodata)
+ *(.rela.rodata.*)
+ *(.rela.gnu.linkonce.r*)
+ }
+ .rel.data :
+ {
+ *(.rel.data)
+ *(.rel.data.*)
+ *(.rel.gnu.linkonce.d*)
+ }
+ .rela.data :
+ {
+ *(.rela.data)
+ *(.rela.data.*)
+ *(.rela.gnu.linkonce.d*)
+ }
+ .rel.ctors : { *(.rel.ctors) }
+ .rela.ctors : { *(.rela.ctors) }
+ .rel.dtors : { *(.rel.dtors) }
+ .rela.dtors : { *(.rela.dtors) }
+ .rel.got : { *(.rel.got) }
+ .rela.got : { *(.rela.got) }
+ .rel.sdata :
+ {
+ *(.rel.sdata)
+ *(.rel.sdata.*)
+ *(.rel.gnu.linkonce.s*)
+ }
+ .rela.sdata :
+ {
+ *(.rela.sdata)
+ *(.rela.sdata.*)
+ *(.rela.gnu.linkonce.s*)
+ }
+ .rel.sbss : { *(.rel.sbss) }
+ .rela.sbss : { *(.rela.sbss) }
+ .rel.bss : { *(.rel.bss) }
+ .rela.bss : { *(.rela.bss) }
+ .rel.plt : { *(.rel.plt) }
+ .rela.plt : { *(.rela.plt) }
+ .plt : { *(.plt) }
+ .text :
+ {
+ *(.text)
+ *(.text.*)
+ *(.stub)
+ /* .gnu.warning sections are handled specially by elf32.em. */
+ *(.gnu.warning)
+ *(.gnu.linkonce.t*)
+ *(.glue_7t) *(.glue_7)
+ } =0
+ .init :
+ {
+ KEEP (*(.init))
+ } =0
+ _etext = .;
+ PROVIDE (etext = .);
+ .fini :
+ {
+ KEEP (*(.fini))
+ } =0
+ .rodata : { *(.rodata) *(.rodata.*) *(.gnu.linkonce.r*) }
+ .rodata1 : { *(.rodata1) }
+ .eh_frame_hdr : { *(.eh_frame_hdr) }
+ /* Adjust the address for the data segment. We want to adjust up to
+ the same address within the page on the next page up. */
+ . = ALIGN(256) + (. & (256 - 1));
+ .data :
+ {
+ *(.data)
+ *(.data.*)
+ *(.gnu.linkonce.d*)
+ SORT(CONSTRUCTORS)
+ }
+ .data1 : { *(.data1) }
+ .eh_frame : { KEEP (*(.eh_frame)) }
+ .gcc_except_table : { *(.gcc_except_table) }
+ .ctors :
+ {
+ /* gcc uses crtbegin.o to find the start of
+ the constructors, so we make sure it is
+ first. Because this is a wildcard, it
+ doesn't matter if the user does not
+ actually link against crtbegin.o; the
+ linker won't look for a file to match a
+ wildcard. The wildcard also means that it
+ doesn't matter which directory crtbegin.o
+ is in. */
+ KEEP (*crtbegin.o(.ctors))
+ /* We don't want to include the .ctor section from
+ from the crtend.o file until after the sorted ctors.
+ The .ctor section from the crtend file contains the
+ end of ctors marker and it must be last */
+ KEEP (*(EXCLUDE_FILE (*crtend.o ) .ctors))
+ KEEP (*(SORT(.ctors.*)))
+ KEEP (*(.ctors))
+ }
+ .dtors :
+ {
+ KEEP (*crtbegin.o(.dtors))
+ KEEP (*(EXCLUDE_FILE (*crtend.o ) .dtors))
+ KEEP (*(SORT(.dtors.*)))
+ KEEP (*(.dtors))
+ }
+ .jcr : { KEEP (*(.jcr)) }
+ .got : { *(.got.plt) *(.got) }
+ .dynamic : { *(.dynamic) }
+ /* We want the small data sections together, so single-instruction offsets
+ can access them all, and initialized data all before uninitialized, so
+ we can shorten the on-disk segment size. */
+ .sdata :
+ {
+ *(.sdata)
+ *(.sdata.*)
+ *(.gnu.linkonce.s.*)
+ }
+ _edata = .;
+ PROVIDE (edata = .);
+ __bss_start = .;
+ __bss_start__ = .;
+ .sbss :
+ {
+ *(.dynsbss)
+ *(.sbss)
+ *(.sbss.*)
+ *(.scommon)
+ }
+ .bss :
+ {
+ *(.dynbss)
+ *(.bss)
+ *(.bss.*)
+ *(COMMON)
+ /* Align here to ensure that the .bss section occupies space up to
+ _end. Align after .bss to ensure correct alignment even if the
+ .bss section disappears because there are no input sections. */
+ . = ALIGN(32 / 8);
+ }
+ . = ALIGN(32 / 8);
+
+ _bss_end__ = . ; __bss_end__ = . ;
+ PROVIDE (end = .);
+
+ .stack :
+ {
+
+ _stack_bottom = ABSOLUTE(.) ;
+
+ /* Allocate room for stack. This must be big enough for the IRQ, FIQ, and
+ SYS stack if nested interrupts are enabled. */
+ . = ALIGN(8) ;
+ . += 4096 ;
+ _sp = . - 16 ;
+ _stack_top = ABSOLUTE(.) ;
+ }
+
+ _end = .; __end__ = . ;
+
+ /* Stabs debugging sections. */
+ .stab 0 : { *(.stab) }
+ .stabstr 0 : { *(.stabstr) }
+ .stab.excl 0 : { *(.stab.excl) }
+ .stab.exclstr 0 : { *(.stab.exclstr) }
+ .stab.index 0 : { *(.stab.index) }
+ .stab.indexstr 0 : { *(.stab.indexstr) }
+ .comment 0 : { *(.comment) }
+ /* DWARF debug sections.
+ Symbols in the DWARF debugging sections are relative to the beginning
+ of the section so we begin them at 0. */
+ /* DWARF 1 */
+ .debug 0 : { *(.debug) }
+ .line 0 : { *(.line) }
+ /* GNU DWARF 1 extensions */
+ .debug_srcinfo 0 : { *(.debug_srcinfo) }
+ .debug_sfnames 0 : { *(.debug_sfnames) }
+ /* DWARF 1.1 and DWARF 2 */
+ .debug_aranges 0 : { *(.debug_aranges) }
+ .debug_pubnames 0 : { *(.debug_pubnames) }
+ /* DWARF 2 */
+ .debug_info 0 : { *(.debug_info) }
+ .debug_abbrev 0 : { *(.debug_abbrev) }
+ .debug_line 0 : { *(.debug_line) }
+ .debug_frame 0 : { *(.debug_frame) }
+ .debug_str 0 : { *(.debug_str) }
+ .debug_loc 0 : { *(.debug_loc) }
+ .debug_macinfo 0 : { *(.debug_macinfo) }
+ /* SGI/MIPS DWARF 2 extensions */
+ .debug_weaknames 0 : { *(.debug_weaknames) }
+ .debug_funcnames 0 : { *(.debug_funcnames) }
+ .debug_typenames 0 : { *(.debug_typenames) }
+ .debug_varnames 0 : { *(.debug_varnames) }
+
+ /* These must appear regardless of . */
+}
diff --git a/ports/cortex_a12/gnu/example_build/tx_initialize_low_level.S b/ports/cortex_a12/gnu/example_build/tx_initialize_low_level.S
new file mode 100644
index 00000000..7de5d3ce
--- /dev/null
+++ b/ports/cortex_a12/gnu/example_build/tx_initialize_low_level.S
@@ -0,0 +1,305 @@
+/**************************************************************************/
+/* */
+/* Copyright (c) Microsoft Corporation. All rights reserved. */
+/* */
+/* This software is licensed under the Microsoft Software License */
+/* Terms for Microsoft Azure RTOS. Full text of the license can be */
+/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
+/* and in the root directory of this software. */
+/* */
+/**************************************************************************/
+
+
+/**************************************************************************/
+/**************************************************************************/
+/** */
+/** ThreadX Component */
+/** */
+/** Initialize */
+/** */
+/**************************************************************************/
+/**************************************************************************/
+
+ .arm
+
+SVC_MODE = 0xD3 // Disable IRQ/FIQ SVC mode
+IRQ_MODE = 0xD2 // Disable IRQ/FIQ IRQ mode
+FIQ_MODE = 0xD1 // Disable IRQ/FIQ FIQ mode
+SYS_MODE = 0xDF // Disable IRQ/FIQ SYS mode
+FIQ_STACK_SIZE = 512 // FIQ stack size
+IRQ_STACK_SIZE = 1024 // IRQ stack size
+SYS_STACK_SIZE = 1024 // System stack size
+
+ .global _tx_thread_system_stack_ptr
+ .global _tx_initialize_unused_memory
+ .global _tx_thread_context_save
+ .global _tx_thread_context_restore
+ .global _tx_timer_interrupt
+ .global _end
+ .global _sp
+ .global _stack_bottom
+
+
+/* Define the 16-bit Thumb mode veneer for _tx_initialize_low_level for
+ applications calling this function from to 16-bit Thumb mode. */
+
+ .text
+ .align 2
+ .thumb
+ .global $_tx_initialize_low_level
+ .type $_tx_initialize_low_level,function
+$_tx_initialize_low_level:
+ BX pc // Switch to 32-bit mode
+ NOP //
+ .arm
+ STMFD sp!, {lr} // Save return address
+ BL _tx_initialize_low_level // Call _tx_initialize_low_level function
+ LDMFD sp!, {lr} // Recover saved return address
+ BX lr // Return to 16-bit caller
+
+ .text
+ .align 2
+/**************************************************************************/
+/* */
+/* FUNCTION RELEASE */
+/* */
+/* _tx_initialize_low_level ARMv7-A */
+/* 6.1.11 */
+/* AUTHOR */
+/* */
+/* William E. Lamie, Microsoft Corporation */
+/* */
+/* DESCRIPTION */
+/* */
+/* This function is responsible for any low-level processor */
+/* initialization, including setting up interrupt vectors, setting */
+/* up a periodic timer interrupt source, saving the system stack */
+/* pointer for use in ISR processing later, and finding the first */
+/* available RAM memory address for tx_application_define. */
+/* */
+/* INPUT */
+/* */
+/* None */
+/* */
+/* OUTPUT */
+/* */
+/* None */
+/* */
+/* CALLS */
+/* */
+/* None */
+/* */
+/* CALLED BY */
+/* */
+/* _tx_initialize_kernel_enter ThreadX entry function */
+/* */
+/* RELEASE HISTORY */
+/* */
+/* DATE NAME DESCRIPTION */
+/* */
+/* 09-30-2020 William E. Lamie Initial Version 6.1 */
+/* 04-25-2022 Zhen Kong Updated comments, */
+/* resulting in version 6.1.11 */
+/* */
+/**************************************************************************/
+ .global _tx_initialize_low_level
+ .type _tx_initialize_low_level,function
+_tx_initialize_low_level:
+
+ /* We must be in SVC mode at this point! */
+
+ /* Setup various stack pointers. */
+
+ LDR r1, =_sp // Get pointer to stack area
+
+#ifdef TX_ENABLE_IRQ_NESTING
+
+ /* Setup the system mode stack for nested interrupt support */
+
+ LDR r2, =SYS_STACK_SIZE // Pickup stack size
+ MOV r3, #SYS_MODE // Build SYS mode CPSR
+ MSR CPSR_c, r3 // Enter SYS mode
+ SUB r1, r1, #1 // Backup 1 byte
+ BIC r1, r1, #7 // Ensure 8-byte alignment
+ MOV sp, r1 // Setup SYS stack pointer
+ SUB r1, r1, r2 // Calculate start of next stack
+#endif
+
+ LDR r2, =FIQ_STACK_SIZE // Pickup stack size
+ MOV r0, #FIQ_MODE // Build FIQ mode CPSR
+ MSR CPSR, r0 // Enter FIQ mode
+ SUB r1, r1, #1 // Backup 1 byte
+ BIC r1, r1, #7 // Ensure 8-byte alignment
+ MOV sp, r1 // Setup FIQ stack pointer
+ SUB r1, r1, r2 // Calculate start of next stack
+ LDR r2, =IRQ_STACK_SIZE // Pickup IRQ stack size
+ MOV r0, #IRQ_MODE // Build IRQ mode CPSR
+ MSR CPSR, r0 // Enter IRQ mode
+ SUB r1, r1, #1 // Backup 1 byte
+ BIC r1, r1, #7 // Ensure 8-byte alignment
+ MOV sp, r1 // Setup IRQ stack pointer
+ SUB r3, r1, r2 // Calculate end of IRQ stack
+ MOV r0, #SVC_MODE // Build SVC mode CPSR
+ MSR CPSR, r0 // Enter SVC mode
+ LDR r2, =_stack_bottom // Pickup stack bottom
+ CMP r3, r2 // Compare the current stack end with the bottom
+_stack_error_loop:
+ BLT _stack_error_loop // If the IRQ stack exceeds the stack bottom, just sit here!
+
+ LDR r2, =_tx_thread_system_stack_ptr // Pickup stack pointer
+ STR r1, [r2] // Save the system stack
+
+ LDR r1, =_end // Get end of non-initialized RAM area
+ LDR r2, =_tx_initialize_unused_memory // Pickup unused memory ptr address
+ ADD r1, r1, #8 // Increment to next free word
+ STR r1, [r2] // Save first free memory address
+
+#ifdef __THUMB_INTERWORK
+ BX lr // Return to caller
+#else
+ MOV pc, lr // Return to caller
+#endif
+
+/* Define shells for each of the interrupt vectors. */
+
+ .global __tx_undefined
+__tx_undefined:
+ B __tx_undefined // Undefined handler
+
+ .global __tx_swi_interrupt
+__tx_swi_interrupt:
+ B __tx_swi_interrupt // Software interrupt handler
+
+ .global __tx_prefetch_handler
+__tx_prefetch_handler:
+ B __tx_prefetch_handler // Prefetch exception handler
+
+ .global __tx_abort_handler
+__tx_abort_handler:
+ B __tx_abort_handler // Abort exception handler
+
+ .global __tx_reserved_handler
+__tx_reserved_handler:
+ B __tx_reserved_handler // Reserved exception handler
+
+ .global __tx_irq_handler
+ .global __tx_irq_processing_return
+__tx_irq_handler:
+
+ /* Jump to context save to save system context. */
+ B _tx_thread_context_save
+__tx_irq_processing_return:
+//
+ /* At this point execution is still in the IRQ mode. The CPSR, point of
+ interrupt, and all C scratch registers are available for use. In
+ addition, IRQ interrupts may be re-enabled - with certain restrictions -
+ if nested IRQ interrupts are desired. Interrupts may be re-enabled over
+ small code sequences where lr is saved before enabling interrupts and
+ restored after interrupts are again disabled. */
+
+ /* Interrupt nesting is allowed after calling _tx_thread_irq_nesting_start
+ from IRQ mode with interrupts disabled. This routine switches to the
+ system mode and returns with IRQ interrupts enabled.
+
+ NOTE: It is very important to ensure all IRQ interrupts are cleared
+ prior to enabling nested IRQ interrupts. */
+#ifdef TX_ENABLE_IRQ_NESTING
+ BL _tx_thread_irq_nesting_start
+#endif
+
+ /* For debug purpose, execute the timer interrupt processing here. In
+ a real system, some kind of status indication would have to be checked
+ before the timer interrupt handler could be called. */
+
+ BL _tx_timer_interrupt // Timer interrupt handler
+
+
+ /* If interrupt nesting was started earlier, the end of interrupt nesting
+ service must be called before returning to _tx_thread_context_restore.
+ This routine returns in processing in IRQ mode with interrupts disabled. */
+#ifdef TX_ENABLE_IRQ_NESTING
+ BL _tx_thread_irq_nesting_end
+#endif
+
+ /* Jump to context restore to restore system context. */
+ B _tx_thread_context_restore
+
+
+ /* This is an example of a vectored IRQ handler. */
+
+
+
+ /* Save initial context and call context save to prepare for
+ vectored ISR execution. */
+
+ /* At this point execution is still in the IRQ mode. The CPSR, point of
+ interrupt, and all C scratch registers are available for use. In
+ addition, IRQ interrupts may be re-enabled - with certain restrictions -
+ if nested IRQ interrupts are desired. Interrupts may be re-enabled over
+ small code sequences where lr is saved before enabling interrupts and
+ restored after interrupts are again disabled. */
+
+
+ /* Interrupt nesting is allowed after calling _tx_thread_irq_nesting_start
+ from IRQ mode with interrupts disabled. This routine switches to the
+ system mode and returns with IRQ interrupts enabled.
+
+ NOTE: It is very important to ensure all IRQ interrupts are cleared
+ prior to enabling nested IRQ interrupts. */
+
+ /* Application IRQ handlers can be called here! */
+
+ /* If interrupt nesting was started earlier, the end of interrupt nesting
+ service must be called before returning to _tx_thread_context_restore.
+ This routine returns in processing in IRQ mode with interrupts disabled. */
+
+
+
+#ifdef TX_ENABLE_FIQ_SUPPORT
+ .global __tx_fiq_handler
+ .global __tx_fiq_processing_return
+__tx_fiq_handler:
+
+ /* Jump to fiq context save to save system context. */
+ B _tx_thread_fiq_context_save
+__tx_fiq_processing_return:
+
+ /* At this point execution is still in the FIQ mode. The CPSR, point of
+ interrupt, and all C scratch registers are available for use. */
+
+ /* Interrupt nesting is allowed after calling _tx_thread_fiq_nesting_start
+ from FIQ mode with interrupts disabled. This routine switches to the
+ system mode and returns with FIQ interrupts enabled.
+
+ NOTE: It is very important to ensure all FIQ interrupts are cleared
+ prior to enabling nested FIQ interrupts. */
+#ifdef TX_ENABLE_FIQ_NESTING
+ BL _tx_thread_fiq_nesting_start
+#endif
+
+ /* Application FIQ handlers can be called here! */
+
+ /* If interrupt nesting was started earlier, the end of interrupt nesting
+ service must be called before returning to _tx_thread_fiq_context_restore. */
+#ifdef TX_ENABLE_FIQ_NESTING
+ BL _tx_thread_fiq_nesting_end
+#endif
+
+ /* Jump to fiq context restore to restore system context. */
+ B _tx_thread_fiq_context_restore
+
+
+#else
+ .global __tx_fiq_handler
+__tx_fiq_handler:
+ B __tx_fiq_handler // FIQ interrupt handler
+#endif
+
+
+BUILD_OPTIONS:
+ .word _tx_build_options // Reference to bring in
+VERSION_ID:
+ .word _tx_version_id // Reference to bring in
+
+
+
diff --git a/ports/cortex_a12/gnu/inc/tx_port.h b/ports/cortex_a12/gnu/inc/tx_port.h
new file mode 100644
index 00000000..19463de1
--- /dev/null
+++ b/ports/cortex_a12/gnu/inc/tx_port.h
@@ -0,0 +1,328 @@
+/**************************************************************************/
+/* */
+/* Copyright (c) Microsoft Corporation. All rights reserved. */
+/* */
+/* This software is licensed under the Microsoft Software License */
+/* Terms for Microsoft Azure RTOS. Full text of the license can be */
+/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
+/* and in the root directory of this software. */
+/* */
+/**************************************************************************/
+
+
+/**************************************************************************/
+/**************************************************************************/
+/** */
+/** ThreadX Component */
+/** */
+/** Port Specific */
+/** */
+/**************************************************************************/
+/**************************************************************************/
+
+
+/**************************************************************************/
+/* */
+/* PORT SPECIFIC C INFORMATION RELEASE */
+/* */
+/* tx_port.h ARMv7-A */
+/* 6.1.11 */
+/* */
+/* AUTHOR */
+/* */
+/* William E. Lamie, Microsoft Corporation */
+/* */
+/* DESCRIPTION */
+/* */
+/* This file contains data type definitions that make the ThreadX */
+/* real-time kernel function identically on a variety of different */
+/* processor architectures. For example, the size or number of bits */
+/* in an "int" data type vary between microprocessor architectures and */
+/* even C compilers for the same microprocessor. ThreadX does not */
+/* directly use native C data types. Instead, ThreadX creates its */
+/* own special types that can be mapped to actual data types by this */
+/* file to guarantee consistency in the interface and functionality. */
+/* */
+/* RELEASE HISTORY */
+/* */
+/* DATE NAME DESCRIPTION */
+/* */
+/* 09-30-2020 William E. Lamie Initial Version 6.1 */
+/* 04-02-2021 Bhupendra Naphade Modified comment(s),updated */
+/* macro definition, */
+/* resulting in version 6.1.6 */
+/* 04-25-2022 Zhen Kong Updated comments, */
+/* resulting in version 6.1.11 */
+/* */
+/**************************************************************************/
+
+#ifndef TX_PORT_H
+#define TX_PORT_H
+
+
+/* Determine if the optional ThreadX user define file should be used. */
+
+#ifdef TX_INCLUDE_USER_DEFINE_FILE
+
+
+/* Yes, include the user defines in tx_user.h. The defines in this file may
+ alternately be defined on the command line. */
+
+#include "tx_user.h"
+#endif
+
+
+/* Define compiler library include files. */
+
+#include
+#include
+
+
+/* Define ThreadX basic types for this port. */
+
+#define VOID void
+typedef char CHAR;
+typedef unsigned char UCHAR;
+typedef int INT;
+typedef unsigned int UINT;
+typedef long LONG;
+typedef unsigned long ULONG;
+typedef short SHORT;
+typedef unsigned short USHORT;
+
+
+/* Define the priority levels for ThreadX. Legal values range
+ from 32 to 1024 and MUST be evenly divisible by 32. */
+
+#ifndef TX_MAX_PRIORITIES
+#define TX_MAX_PRIORITIES 32
+#endif
+
+
+/* Define the minimum stack for a ThreadX thread on this processor. If the size supplied during
+ thread creation is less than this value, the thread create call will return an error. */
+
+#ifndef TX_MINIMUM_STACK
+#define TX_MINIMUM_STACK 200 /* Minimum stack size for this port */
+#endif
+
+
+/* Define the system timer thread's default stack size and priority. These are only applicable
+ if TX_TIMER_PROCESS_IN_ISR is not defined. */
+
+#ifndef TX_TIMER_THREAD_STACK_SIZE
+#define TX_TIMER_THREAD_STACK_SIZE 1024 /* Default timer thread stack size */
+#endif
+
+#ifndef TX_TIMER_THREAD_PRIORITY
+#define TX_TIMER_THREAD_PRIORITY 0 /* Default timer thread priority */
+#endif
+
+
+/* Define various constants for the ThreadX ARM port. */
+
+#ifdef TX_ENABLE_FIQ_SUPPORT
+#define TX_INT_DISABLE 0xC0 /* Disable IRQ & FIQ interrupts */
+#else
+#define TX_INT_DISABLE 0x80 /* Disable IRQ interrupts */
+#endif
+#define TX_INT_ENABLE 0x00 /* Enable IRQ interrupts */
+
+
+/* Define the clock source for trace event entry time stamp. The following two item are port specific.
+ For example, if the time source is at the address 0x0a800024 and is 16-bits in size, the clock
+ source constants would be:
+
+#define TX_TRACE_TIME_SOURCE *((ULONG *) 0x0a800024)
+#define TX_TRACE_TIME_MASK 0x0000FFFFUL
+
+*/
+
+#ifndef TX_TRACE_TIME_SOURCE
+#define TX_TRACE_TIME_SOURCE ++_tx_trace_simulated_time
+#endif
+#ifndef TX_TRACE_TIME_MASK
+#define TX_TRACE_TIME_MASK 0xFFFFFFFFUL
+#endif
+
+
+/* Define the port specific options for the _tx_build_options variable. This variable indicates
+ how the ThreadX library was built. */
+
+#ifdef TX_ENABLE_FIQ_SUPPORT
+#define TX_FIQ_ENABLED 1
+#else
+#define TX_FIQ_ENABLED 0
+#endif
+
+#ifdef TX_ENABLE_IRQ_NESTING
+#define TX_IRQ_NESTING_ENABLED 2
+#else
+#define TX_IRQ_NESTING_ENABLED 0
+#endif
+
+#ifdef TX_ENABLE_FIQ_NESTING
+#define TX_FIQ_NESTING_ENABLED 4
+#else
+#define TX_FIQ_NESTING_ENABLED 0
+#endif
+
+#define TX_PORT_SPECIFIC_BUILD_OPTIONS TX_FIQ_ENABLED | TX_IRQ_NESTING_ENABLED | TX_FIQ_NESTING_ENABLED
+
+
+/* Define the in-line initialization constant so that modules with in-line
+ initialization capabilities can prevent their initialization from being
+ a function call. */
+
+#define TX_INLINE_INITIALIZATION
+
+
+/* Determine whether or not stack checking is enabled. By default, ThreadX stack checking is
+ disabled. When the following is defined, ThreadX thread stack checking is enabled. If stack
+ checking is enabled (TX_ENABLE_STACK_CHECKING is defined), the TX_DISABLE_STACK_FILLING
+ define is negated, thereby forcing the stack fill which is necessary for the stack checking
+ logic. */
+
+#ifdef TX_ENABLE_STACK_CHECKING
+#undef TX_DISABLE_STACK_FILLING
+#endif
+
+
+/* Define the TX_THREAD control block extensions for this port. The main reason
+ for the multiple macros is so that backward compatibility can be maintained with
+ existing ThreadX kernel awareness modules. */
+
+#define TX_THREAD_EXTENSION_0
+#define TX_THREAD_EXTENSION_1
+#define TX_THREAD_EXTENSION_2 ULONG tx_thread_vfp_enable;
+#define TX_THREAD_EXTENSION_3
+
+
+/* Define the port extensions of the remaining ThreadX objects. */
+
+#define TX_BLOCK_POOL_EXTENSION
+#define TX_BYTE_POOL_EXTENSION
+#define TX_EVENT_FLAGS_GROUP_EXTENSION
+#define TX_MUTEX_EXTENSION
+#define TX_QUEUE_EXTENSION
+#define TX_SEMAPHORE_EXTENSION
+#define TX_TIMER_EXTENSION
+
+
+/* Define the user extension field of the thread control block. Nothing
+ additional is needed for this port so it is defined as white space. */
+
+#ifndef TX_THREAD_USER_EXTENSION
+#define TX_THREAD_USER_EXTENSION
+#endif
+
+
+/* Define the macros for processing extensions in tx_thread_create, tx_thread_delete,
+ tx_thread_shell_entry, and tx_thread_terminate. */
+
+
+#define TX_THREAD_CREATE_EXTENSION(thread_ptr)
+#define TX_THREAD_DELETE_EXTENSION(thread_ptr)
+#define TX_THREAD_COMPLETED_EXTENSION(thread_ptr)
+#define TX_THREAD_TERMINATED_EXTENSION(thread_ptr)
+
+
+/* Define the ThreadX object creation extensions for the remaining objects. */
+
+#define TX_BLOCK_POOL_CREATE_EXTENSION(pool_ptr)
+#define TX_BYTE_POOL_CREATE_EXTENSION(pool_ptr)
+#define TX_EVENT_FLAGS_GROUP_CREATE_EXTENSION(group_ptr)
+#define TX_MUTEX_CREATE_EXTENSION(mutex_ptr)
+#define TX_QUEUE_CREATE_EXTENSION(queue_ptr)
+#define TX_SEMAPHORE_CREATE_EXTENSION(semaphore_ptr)
+#define TX_TIMER_CREATE_EXTENSION(timer_ptr)
+
+
+/* Define the ThreadX object deletion extensions for the remaining objects. */
+
+#define TX_BLOCK_POOL_DELETE_EXTENSION(pool_ptr)
+#define TX_BYTE_POOL_DELETE_EXTENSION(pool_ptr)
+#define TX_EVENT_FLAGS_GROUP_DELETE_EXTENSION(group_ptr)
+#define TX_MUTEX_DELETE_EXTENSION(mutex_ptr)
+#define TX_QUEUE_DELETE_EXTENSION(queue_ptr)
+#define TX_SEMAPHORE_DELETE_EXTENSION(semaphore_ptr)
+#define TX_TIMER_DELETE_EXTENSION(timer_ptr)
+
+
+/* Determine if the ARM architecture has the CLZ instruction. This is available on
+ architectures v5 and above. If available, redefine the macro for calculating the
+ lowest bit set. */
+
+#if __TARGET_ARCH_ARM > 4
+
+#ifndef __thumb__
+
+#define TX_LOWEST_SET_BIT_CALCULATE(m, b) m = m & ((ULONG) (-((LONG) m))); \
+ asm volatile (" CLZ %0,%1 ": "=r" (b) : "r" (m) ); \
+ b = 31 - b;
+#endif
+#endif
+
+
+/* Define ThreadX interrupt lockout and restore macros for protection on
+ access of critical kernel information. The restore interrupt macro must
+ restore the interrupt posture of the running thread prior to the value
+ present prior to the disable macro. In most cases, the save area macro
+ is used to define a local function save area for the disable and restore
+ macros. */
+
+#ifdef __thumb__
+
+unsigned int _tx_thread_interrupt_disable(void);
+unsigned int _tx_thread_interrupt_restore(UINT old_posture);
+
+
+#define TX_INTERRUPT_SAVE_AREA UINT interrupt_save;
+
+#define TX_DISABLE interrupt_save = _tx_thread_interrupt_disable();
+#define TX_RESTORE _tx_thread_interrupt_restore(interrupt_save);
+
+#else
+
+#define TX_INTERRUPT_SAVE_AREA UINT interrupt_save, tx_temp;
+
+#ifdef TX_ENABLE_FIQ_SUPPORT
+#define TX_DISABLE asm volatile (" MRS %0,CPSR; CPSID if ": "=r" (interrupt_save) );
+#else
+#define TX_DISABLE asm volatile (" MRS %0,CPSR; CPSID i ": "=r" (interrupt_save) );
+#endif
+
+#define TX_RESTORE asm volatile (" MSR CPSR_c,%0 "::"r" (interrupt_save) );
+
+#endif
+
+
+/* Define VFP extension for the ARMv7-A. Each is assumed to be called in the context of the executing
+ thread. */
+
+void tx_thread_vfp_enable(void);
+void tx_thread_vfp_disable(void);
+
+
+/* Define the interrupt lockout macros for each ThreadX object. */
+
+#define TX_BLOCK_POOL_DISABLE TX_DISABLE
+#define TX_BYTE_POOL_DISABLE TX_DISABLE
+#define TX_EVENT_FLAGS_GROUP_DISABLE TX_DISABLE
+#define TX_MUTEX_DISABLE TX_DISABLE
+#define TX_QUEUE_DISABLE TX_DISABLE
+#define TX_SEMAPHORE_DISABLE TX_DISABLE
+
+
+/* Define the version ID of ThreadX. This may be utilized by the application. */
+
+#ifdef TX_THREAD_INIT
+CHAR _tx_version_id[] =
+ "Copyright (c) Microsoft Corporation. All rights reserved. * ThreadX ARMv7-A Version 6.1.11 *";
+#else
+extern CHAR _tx_version_id[];
+#endif
+
+
+#endif
+
diff --git a/ports/cortex_a12/gnu/src/tx_thread_context_restore.S b/ports/cortex_a12/gnu/src/tx_thread_context_restore.S
new file mode 100644
index 00000000..fae7e72d
--- /dev/null
+++ b/ports/cortex_a12/gnu/src/tx_thread_context_restore.S
@@ -0,0 +1,222 @@
+/**************************************************************************/
+/* */
+/* Copyright (c) Microsoft Corporation. All rights reserved. */
+/* */
+/* This software is licensed under the Microsoft Software License */
+/* Terms for Microsoft Azure RTOS. Full text of the license can be */
+/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
+/* and in the root directory of this software. */
+/* */
+/**************************************************************************/
+
+
+/**************************************************************************/
+/**************************************************************************/
+/** */
+/** ThreadX Component */
+/** */
+/** Thread */
+/** */
+/**************************************************************************/
+/**************************************************************************/
+
+ .arm
+
+#ifdef TX_ENABLE_FIQ_SUPPORT
+SVC_MODE = 0xD3 // Disable IRQ/FIQ, SVC mode
+IRQ_MODE = 0xD2 // Disable IRQ/FIQ, IRQ mode
+#else
+SVC_MODE = 0x93 // Disable IRQ, SVC mode
+IRQ_MODE = 0x92 // Disable IRQ, IRQ mode
+#endif
+
+ .global _tx_thread_system_state
+ .global _tx_thread_current_ptr
+ .global _tx_thread_execute_ptr
+ .global _tx_timer_time_slice
+ .global _tx_thread_schedule
+ .global _tx_thread_preempt_disable
+
+
+
+/* No 16-bit Thumb mode veneer code is needed for _tx_thread_context_restore
+ since it will never be called 16-bit mode. */
+
+ .arm
+ .text
+ .align 2
+/**************************************************************************/
+/* */
+/* FUNCTION RELEASE */
+/* */
+/* _tx_thread_context_restore ARMv7-A */
+/* 6.1.11 */
+/* AUTHOR */
+/* */
+/* William E. Lamie, Microsoft Corporation */
+/* */
+/* DESCRIPTION */
+/* */
+/* This function restores the interrupt context if it is processing a */
+/* nested interrupt. If not, it returns to the interrupt thread if no */
+/* preemption is necessary. Otherwise, if preemption is necessary or */
+/* if no thread was running, the function returns to the scheduler. */
+/* */
+/* INPUT */
+/* */
+/* None */
+/* */
+/* OUTPUT */
+/* */
+/* None */
+/* */
+/* CALLS */
+/* */
+/* _tx_thread_schedule Thread scheduling routine */
+/* */
+/* CALLED BY */
+/* */
+/* ISRs Interrupt Service Routines */
+/* */
+/* RELEASE HISTORY */
+/* */
+/* DATE NAME DESCRIPTION */
+/* */
+/* 09-30-2020 William E. Lamie Initial Version 6.1 */
+/* 10-15-2021 William E. Lamie Modified comment(s), added */
+/* execution profile support, */
+/* resulting in version 6.1.9 */
+/* 04-25-2022 Zhen Kong Updated comments, */
+/* resulting in version 6.1.11 */
+/* */
+/**************************************************************************/
+ .global _tx_thread_context_restore
+ .type _tx_thread_context_restore,function
+_tx_thread_context_restore:
+
+ /* Lockout interrupts. */
+
+#ifdef TX_ENABLE_FIQ_SUPPORT
+ CPSID if // Disable IRQ and FIQ interrupts
+#else
+ CPSID i // Disable IRQ interrupts
+#endif
+
+#if (defined(TX_ENABLE_EXECUTION_CHANGE_NOTIFY) || defined(TX_EXECUTION_PROFILE_ENABLE))
+
+ /* Call the ISR exit function to indicate an ISR is complete. */
+
+ BL _tx_execution_isr_exit // Call the ISR exit function
+#endif
+
+ /* Determine if interrupts are nested. */
+
+ LDR r3, =_tx_thread_system_state // Pickup address of system state variable
+ LDR r2, [r3] // Pickup system state
+ SUB r2, r2, #1 // Decrement the counter
+ STR r2, [r3] // Store the counter
+ CMP r2, #0 // Was this the first interrupt?
+ BEQ __tx_thread_not_nested_restore // If so, not a nested restore
+
+ /* Interrupts are nested. */
+
+ /* Just recover the saved registers and return to the point of
+ interrupt. */
+
+ LDMIA sp!, {r0, r10, r12, lr} // Recover SPSR, POI, and scratch regs
+ MSR SPSR_cxsf, r0 // Put SPSR back
+ LDMIA sp!, {r0-r3} // Recover r0-r3
+ MOVS pc, lr // Return to point of interrupt
+
+__tx_thread_not_nested_restore:
+
+ /* Determine if a thread was interrupted and no preemption is required. */
+
+ LDR r1, =_tx_thread_current_ptr // Pickup address of current thread ptr
+ LDR r0, [r1] // Pickup actual current thread pointer
+ CMP r0, #0 // Is it NULL?
+ BEQ __tx_thread_idle_system_restore // Yes, idle system was interrupted
+
+ LDR r3, =_tx_thread_preempt_disable // Pickup preempt disable address
+ LDR r2, [r3] // Pickup actual preempt disable flag
+ CMP r2, #0 // Is it set?
+ BNE __tx_thread_no_preempt_restore // Yes, don't preempt this thread
+ LDR r3, =_tx_thread_execute_ptr // Pickup address of execute thread ptr
+ LDR r2, [r3] // Pickup actual execute thread pointer
+ CMP r0, r2 // Is the same thread highest priority?
+ BNE __tx_thread_preempt_restore // No, preemption needs to happen
+
+
+__tx_thread_no_preempt_restore:
+
+ /* Recover the saved context and return to the point of interrupt. */
+
+ /* Pickup the saved stack pointer. */
+
+ /* Recover the saved context and return to the point of interrupt. */
+ LDMIA sp!, {r0, r10, r12, lr} // Recover SPSR, POI, and scratch regs
+ MSR SPSR_cxsf, r0 // Put SPSR back
+ LDMIA sp!, {r0-r3} // Recover r0-r3
+ MOVS pc, lr // Return to point of interrupt
+
+__tx_thread_preempt_restore:
+
+ LDMIA sp!, {r3, r10, r12, lr} // Recover temporarily saved registers
+ MOV r1, lr // Save lr (point of interrupt)
+ MOV r2, #SVC_MODE // Build SVC mode CPSR
+ MSR CPSR_c, r2 // Enter SVC mode
+ STR r1, [sp, #-4]! // Save point of interrupt
+ STMDB sp!, {r4-r12, lr} // Save upper half of registers
+ MOV r4, r3 // Save SPSR in r4
+ MOV r2, #IRQ_MODE // Build IRQ mode CPSR
+ MSR CPSR_c, r2 // Enter IRQ mode
+ LDMIA sp!, {r0-r3} // Recover r0-r3
+ MOV r5, #SVC_MODE // Build SVC mode CPSR
+ MSR CPSR_c, r5 // Enter SVC mode
+ STMDB sp!, {r0-r3} // Save r0-r3 on thread's stack
+
+ LDR r1, =_tx_thread_current_ptr // Pickup address of current thread ptr
+ LDR r0, [r1] // Pickup current thread pointer
+
+#ifdef TX_ENABLE_VFP_SUPPORT
+ LDR r2, [r0, #144] // Pickup the VFP enabled flag
+ CMP r2, #0 // Is the VFP enabled?
+ BEQ _tx_skip_irq_vfp_save // No, skip VFP IRQ save
+ VMRS r2, FPSCR // Pickup the FPSCR
+ STR r2, [sp, #-4]! // Save FPSCR
+ VSTMDB sp!, {D16-D31} // Save D16-D31
+ VSTMDB sp!, {D0-D15} // Save D0-D15
+
+_tx_skip_irq_vfp_save:
+
+#endif
+
+ MOV r3, #1 // Build interrupt stack type
+ STMDB sp!, {r3, r4} // Save interrupt stack type and SPSR
+ STR sp, [r0, #8] // Save stack pointer in thread control
+ // block
+
+ /* Save the remaining time-slice and disable it. */
+ LDR r3, =_tx_timer_time_slice // Pickup time-slice variable address
+ LDR r2, [r3] // Pickup time-slice
+ CMP r2, #0 // Is it active?
+ BEQ __tx_thread_dont_save_ts // No, don't save it
+ STR r2, [r0, #24] // Save thread's time-slice
+ MOV r2, #0 // Clear value
+ STR r2, [r3] // Disable global time-slice flag
+
+__tx_thread_dont_save_ts:
+
+ /* Clear the current task pointer. */
+ MOV r0, #0 // NULL value
+ STR r0, [r1] // Clear current thread pointer
+
+ /* Return to the scheduler. */
+ B _tx_thread_schedule // Return to scheduler
+
+__tx_thread_idle_system_restore:
+
+ /* Just return back to the scheduler! */
+ MOV r0, #SVC_MODE // Build SVC mode CPSR
+ MSR CPSR_c, r0 // Enter SVC mode
+ B _tx_thread_schedule // Return to scheduler
diff --git a/ports/cortex_a12/gnu/src/tx_thread_context_save.S b/ports/cortex_a12/gnu/src/tx_thread_context_save.S
new file mode 100644
index 00000000..7ac48c2e
--- /dev/null
+++ b/ports/cortex_a12/gnu/src/tx_thread_context_save.S
@@ -0,0 +1,172 @@
+/**************************************************************************/
+/* */
+/* Copyright (c) Microsoft Corporation. All rights reserved. */
+/* */
+/* This software is licensed under the Microsoft Software License */
+/* Terms for Microsoft Azure RTOS. Full text of the license can be */
+/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
+/* and in the root directory of this software. */
+/* */
+/**************************************************************************/
+
+
+/**************************************************************************/
+/**************************************************************************/
+/** */
+/** ThreadX Component */
+/** */
+/** Thread */
+/** */
+/**************************************************************************/
+/**************************************************************************/
+
+ .global _tx_thread_system_state
+ .global _tx_thread_current_ptr
+ .global __tx_irq_processing_return
+
+
+/* No 16-bit Thumb mode veneer code is needed for _tx_thread_context_save
+ since it will never be called 16-bit mode. */
+
+ .arm
+ .text
+ .align 2
+/**************************************************************************/
+/* */
+/* FUNCTION RELEASE */
+/* */
+/* _tx_thread_context_save ARMv7-A */
+/* 6.1.11 */
+/* AUTHOR */
+/* */
+/* William E. Lamie, Microsoft Corporation */
+/* */
+/* DESCRIPTION */
+/* */
+/* This function saves the context of an executing thread in the */
+/* beginning of interrupt processing. The function also ensures that */
+/* the system stack is used upon return to the calling ISR. */
+/* */
+/* INPUT */
+/* */
+/* None */
+/* */
+/* OUTPUT */
+/* */
+/* None */
+/* */
+/* CALLS */
+/* */
+/* None */
+/* */
+/* CALLED BY */
+/* */
+/* ISRs */
+/* */
+/* RELEASE HISTORY */
+/* */
+/* DATE NAME DESCRIPTION */
+/* */
+/* 09-30-2020 William E. Lamie Initial Version 6.1 */
+/* 10-15-2021 William E. Lamie Modified comment(s), added */
+/* execution profile support, */
+/* resulting in version 6.1.9 */
+/* 04-25-2022 Zhen Kong Updated comments, */
+/* resulting in version 6.1.11 */
+/* */
+/**************************************************************************/
+ .global _tx_thread_context_save
+ .type _tx_thread_context_save,function
+_tx_thread_context_save:
+
+ /* Upon entry to this routine, it is assumed that IRQ interrupts are locked
+ out, we are in IRQ mode, and all registers are intact. */
+
+ /* Check for a nested interrupt condition. */
+
+ STMDB sp!, {r0-r3} // Save some working registers
+#ifdef TX_ENABLE_FIQ_SUPPORT
+ CPSID if // Disable FIQ interrupts
+#endif
+ LDR r3, =_tx_thread_system_state // Pickup address of system state variable
+ LDR r2, [r3] // Pickup system state
+ CMP r2, #0 // Is this the first interrupt?
+ BEQ __tx_thread_not_nested_save // Yes, not a nested context save
+
+ /* Nested interrupt condition. */
+
+ ADD r2, r2, #1 // Increment the interrupt counter
+ STR r2, [r3] // Store it back in the variable
+
+ /* Save the rest of the scratch registers on the stack and return to the
+ calling ISR. */
+
+ MRS r0, SPSR // Pickup saved SPSR
+ SUB lr, lr, #4 // Adjust point of interrupt
+ STMDB sp!, {r0, r10, r12, lr} // Store other registers
+
+ /* Return to the ISR. */
+
+ MOV r10, #0 // Clear stack limit
+
+#if (defined(TX_ENABLE_EXECUTION_CHANGE_NOTIFY) || defined(TX_EXECUTION_PROFILE_ENABLE))
+
+ /* Call the ISR enter function to indicate an ISR is executing. */
+
+ PUSH {lr} // Save ISR lr
+ BL _tx_execution_isr_enter // Call the ISR enter function
+ POP {lr} // Recover ISR lr
+#endif
+
+ B __tx_irq_processing_return // Continue IRQ processing
+
+__tx_thread_not_nested_save:
+
+ /* Otherwise, not nested, check to see if a thread was running. */
+ ADD r2, r2, #1 // Increment the interrupt counter
+ STR r2, [r3] // Store it back in the variable
+ LDR r1, =_tx_thread_current_ptr // Pickup address of current thread ptr
+ LDR r0, [r1] // Pickup current thread pointer
+ CMP r0, #0 // Is it NULL?
+ BEQ __tx_thread_idle_system_save // If so, interrupt occurred in
+ // scheduling loop - nothing needs saving!
+
+ /* Save minimal context of interrupted thread. */
+
+ MRS r2, SPSR // Pickup saved SPSR
+ SUB lr, lr, #4 // Adjust point of interrupt
+ STMDB sp!, {r2, r10, r12, lr} // Store other registers
+
+ MOV r10, #0 // Clear stack limit
+
+#if (defined(TX_ENABLE_EXECUTION_CHANGE_NOTIFY) || defined(TX_EXECUTION_PROFILE_ENABLE))
+
+ /* Call the ISR enter function to indicate an ISR is executing. */
+
+ PUSH {lr} // Save ISR lr
+ BL _tx_execution_isr_enter // Call the ISR enter function
+ POP {lr} // Recover ISR lr
+#endif
+
+ B __tx_irq_processing_return // Continue IRQ processing
+
+__tx_thread_idle_system_save:
+
+ /* Interrupt occurred in the scheduling loop. */
+
+ /* Not much to do here, just adjust the stack pointer, and return to IRQ
+ processing. */
+
+ MOV r10, #0 // Clear stack limit
+
+#if (defined(TX_ENABLE_EXECUTION_CHANGE_NOTIFY) || defined(TX_EXECUTION_PROFILE_ENABLE))
+
+ /* Call the ISR enter function to indicate an ISR is executing. */
+
+ PUSH {lr} // Save ISR lr
+ BL _tx_execution_isr_enter // Call the ISR enter function
+ POP {lr} // Recover ISR lr
+#endif
+
+ ADD sp, sp, #16 // Recover saved registers
+ B __tx_irq_processing_return // Continue IRQ processing
diff --git a/ports/cortex_a12/gnu/src/tx_thread_fiq_context_restore.S b/ports/cortex_a12/gnu/src/tx_thread_fiq_context_restore.S
new file mode 100644
index 00000000..006be973
--- /dev/null
+++ b/ports/cortex_a12/gnu/src/tx_thread_fiq_context_restore.S
@@ -0,0 +1,223 @@
+/**************************************************************************/
+/* */
+/* Copyright (c) Microsoft Corporation. All rights reserved. */
+/* */
+/* This software is licensed under the Microsoft Software License */
+/* Terms for Microsoft Azure RTOS. Full text of the license can be */
+/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
+/* and in the root directory of this software. */
+/* */
+/**************************************************************************/
+
+
+/**************************************************************************/
+/**************************************************************************/
+/** */
+/** ThreadX Component */
+/** */
+/** Thread */
+/** */
+/**************************************************************************/
+/**************************************************************************/
+
+SVC_MODE = 0xD3 // SVC mode
+FIQ_MODE = 0xD1 // FIQ mode
+MODE_MASK = 0x1F // Mode mask
+THUMB_MASK = 0x20 // Thumb bit mask
+IRQ_MODE_BITS = 0x12 // IRQ mode bits
+
+
+ .global _tx_thread_system_state
+ .global _tx_thread_current_ptr
+ .global _tx_thread_system_stack_ptr
+ .global _tx_thread_execute_ptr
+ .global _tx_timer_time_slice
+ .global _tx_thread_schedule
+ .global _tx_thread_preempt_disable
+ .global _tx_execution_isr_exit
+
+
+/* No 16-bit Thumb mode veneer code is needed for _tx_thread_fiq_context_restore
+ since it will never be called 16-bit mode. */
+
+ .arm
+ .text
+ .align 2
+/**************************************************************************/
+/* */
+/* FUNCTION RELEASE */
+/* */
+/* _tx_thread_fiq_context_restore ARMv7-A */
+/* 6.1.11 */
+/* AUTHOR */
+/* */
+/* William E. Lamie, Microsoft Corporation */
+/* */
+/* DESCRIPTION */
+/* */
+/* This function restores the fiq interrupt context when processing a */
+/* nested interrupt. If not, it returns to the interrupt thread if no */
+/* preemption is necessary. Otherwise, if preemption is necessary or */
+/* if no thread was running, the function returns to the scheduler. */
+/* */
+/* INPUT */
+/* */
+/* None */
+/* */
+/* OUTPUT */
+/* */
+/* None */
+/* */
+/* CALLS */
+/* */
+/* _tx_thread_schedule Thread scheduling routine */
+/* */
+/* CALLED BY */
+/* */
+/* FIQ ISR Interrupt Service Routines */
+/* */
+/* RELEASE HISTORY */
+/* */
+/* DATE NAME DESCRIPTION */
+/* */
+/* 09-30-2020 William E. Lamie Initial Version 6.1 */
+/* 10-15-2021 William E. Lamie Modified comment(s), added */
+/* execution profile support, */
+/* resulting in version 6.1.9 */
+/* 04-25-2022 Zhen Kong Updated comments, */
+/* resulting in version 6.1.11 */
+/* */
+/**************************************************************************/
+ .global _tx_thread_fiq_context_restore
+ .type _tx_thread_fiq_context_restore,function
+_tx_thread_fiq_context_restore:
+
+ /* Lockout interrupts. */
+
+ CPSID if // Disable IRQ and FIQ interrupts
+
+#if (defined(TX_ENABLE_EXECUTION_CHANGE_NOTIFY) || defined(TX_EXECUTION_PROFILE_ENABLE))
+
+ /* Call the ISR exit function to indicate an ISR is complete. */
+
+ BL _tx_execution_isr_exit // Call the ISR exit function
+#endif
+
+ /* Determine if interrupts are nested. */
+ LDR r3, =_tx_thread_system_state // Pickup address of system state variable
+ LDR r2, [r3] // Pickup system state
+ SUB r2, r2, #1 // Decrement the counter
+ STR r2, [r3] // Store the counter
+ CMP r2, #0 // Was this the first interrupt?
+ BEQ __tx_thread_fiq_not_nested_restore // If so, not a nested restore
+
+ /* Interrupts are nested. */
+
+ /* Just recover the saved registers and return to the point of
+ interrupt. */
+
+ LDMIA sp!, {r0, r10, r12, lr} // Recover SPSR, POI, and scratch regs
+ MSR SPSR_cxsf, r0 // Put SPSR back
+ LDMIA sp!, {r0-r3} // Recover r0-r3
+ MOVS pc, lr // Return to point of interrupt
+
+__tx_thread_fiq_not_nested_restore:
+
+ /* Determine if a thread was interrupted and no preemption is required. */
+
+ LDR r1, [sp] // Pickup the saved SPSR
+ MOV r2, #MODE_MASK // Build mask to isolate the interrupted mode
+ AND r1, r1, r2 // Isolate mode bits
+ CMP r1, #IRQ_MODE_BITS // Was an interrupt taken in IRQ mode before we
+ // got to context save? */
+ BEQ __tx_thread_fiq_no_preempt_restore // Yes, just go back to point of interrupt
+
+
+ LDR r1, =_tx_thread_current_ptr // Pickup address of current thread ptr
+ LDR r0, [r1] // Pickup actual current thread pointer
+ CMP r0, #0 // Is it NULL?
+ BEQ __tx_thread_fiq_idle_system_restore // Yes, idle system was interrupted
+
+ LDR r3, =_tx_thread_preempt_disable // Pickup preempt disable address
+ LDR r2, [r3] // Pickup actual preempt disable flag
+ CMP r2, #0 // Is it set?
+ BNE __tx_thread_fiq_no_preempt_restore // Yes, don't preempt this thread
+ LDR r3, =_tx_thread_execute_ptr // Pickup address of execute thread ptr
+ LDR r2, [r3] // Pickup actual execute thread pointer
+ CMP r0, r2 // Is the same thread highest priority?
+ BNE __tx_thread_fiq_preempt_restore // No, preemption needs to happen
+
+
+__tx_thread_fiq_no_preempt_restore:
+
+ /* Restore interrupted thread or ISR. */
+ /* Recover the saved context and return to the point of interrupt. */
+
+ LDMIA sp!, {r0, lr} // Recover SPSR, POI, and scratch regs
+ MSR SPSR_cxsf, r0 // Put SPSR back
+ LDMIA sp!, {r0-r3} // Recover r0-r3
+ MOVS pc, lr // Return to point of interrupt
+
+__tx_thread_fiq_preempt_restore:
+
+ LDMIA sp!, {r3, lr} // Recover temporarily saved registers
+ MOV r1, lr // Save lr (point of interrupt)
+ MOV r2, #SVC_MODE // Build SVC mode CPSR
+ MSR CPSR_c, r2 // Enter SVC mode
+ STR r1, [sp, #-4]! // Save point of interrupt
+ STMDB sp!, {r4-r12, lr} // Save upper half of registers
+ MOV r4, r3 // Save SPSR in r4
+ MOV r2, #FIQ_MODE // Build FIQ mode CPSR
+ MSR CPSR_c, r2 // Reenter FIQ mode
+ LDMIA sp!, {r0-r3} // Recover r0-r3
+ MOV r5, #SVC_MODE // Build SVC mode CPSR
+ MSR CPSR_c, r5 // Enter SVC mode
+ STMDB sp!, {r0-r3} // Save r0-r3 on thread's stack
+
+ LDR r1, =_tx_thread_current_ptr // Pickup address of current thread ptr
+ LDR r0, [r1] // Pickup current thread pointer
+
+#ifdef TX_ENABLE_VFP_SUPPORT
+ LDR r2, [r0, #144] // Pickup the VFP enabled flag
+ CMP r2, #0 // Is the VFP enabled?
+ BEQ _tx_skip_fiq_vfp_save // No, skip VFP IRQ save
+ VMRS r2, FPSCR // Pickup the FPSCR
+ STR r2, [sp, #-4]! // Save FPSCR
+ VSTMDB sp!, {D16-D31} // Save D16-D31
+ VSTMDB sp!, {D0-D15} // Save D0-D15
+_tx_skip_fiq_vfp_save:
+#endif
+
+ MOV r3, #1 // Build interrupt stack type
+ STMDB sp!, {r3, r4} // Save interrupt stack type and SPSR
+ STR sp, [r0, #8] // Save stack pointer in thread control
+ // block */
+ LDR r3, =_tx_timer_time_slice // Pickup time-slice variable address
+ LDR r2, [r3] // Pickup time-slice
+ CMP r2, #0 // Is it active?
+ BEQ __tx_thread_fiq_dont_save_ts // No, don't save it
+
+ STR r2, [r0, #24] // Save thread's time-slice
+ MOV r2, #0 // Clear value
+ STR r2, [r3] // Disable global time-slice flag
+
+__tx_thread_fiq_dont_save_ts:
+
+ /* Clear the current task pointer. */
+
+ MOV r0, #0 // NULL value
+ STR r0, [r1] // Clear current thread pointer
+
+ /* Return to the scheduler. */
+
+ B _tx_thread_schedule // Return to scheduler
+
+__tx_thread_fiq_idle_system_restore:
+
+ /* Just return back to the scheduler! */
+
+ ADD sp, sp, #24 // Recover FIQ stack space
+ MOV r3, #SVC_MODE // Build SVC mode CPSR
+ MSR CPSR_c, r3 // Lockout interrupts
+ B _tx_thread_schedule // Return to scheduler
+
diff --git a/ports/cortex_a12/gnu/src/tx_thread_fiq_context_save.S b/ports/cortex_a12/gnu/src/tx_thread_fiq_context_save.S
new file mode 100644
index 00000000..7db6a4c2
--- /dev/null
+++ b/ports/cortex_a12/gnu/src/tx_thread_fiq_context_save.S
@@ -0,0 +1,178 @@
+/**************************************************************************/
+/* */
+/* Copyright (c) Microsoft Corporation. All rights reserved. */
+/* */
+/* This software is licensed under the Microsoft Software License */
+/* Terms for Microsoft Azure RTOS. Full text of the license can be */
+/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
+/* and in the root directory of this software. */
+/* */
+/**************************************************************************/
+
+
+/**************************************************************************/
+/**************************************************************************/
+/** */
+/** ThreadX Component */
+/** */
+/** Thread */
+/** */
+/**************************************************************************/
+/**************************************************************************/
+
+ .global _tx_thread_system_state
+ .global _tx_thread_current_ptr
+ .global __tx_fiq_processing_return
+ .global _tx_execution_isr_enter
+
+
+/* No 16-bit Thumb mode veneer code is needed for _tx_thread_fiq_context_save
+ since it will never be called 16-bit mode. */
+
+ .arm
+ .text
+ .align 2
+/**************************************************************************/
+/* */
+/* FUNCTION RELEASE */
+/* */
+/* _tx_thread_fiq_context_save ARMv7-A */
+/* 6.1.11 */
+/* AUTHOR */
+/* */
+/* William E. Lamie, Microsoft Corporation */
+/* */
+/* DESCRIPTION */
+/* */
+/* This function saves the context of an executing thread in the */
+/* beginning of interrupt processing. The function also ensures that */
+/* the system stack is used upon return to the calling ISR. */
+/* */
+/* INPUT */
+/* */
+/* None */
+/* */
+/* OUTPUT */
+/* */
+/* None */
+/* */
+/* CALLS */
+/* */
+/* None */
+/* */
+/* CALLED BY */
+/* */
+/* ISRs */
+/* */
+/* RELEASE HISTORY */
+/* */
+/* DATE NAME DESCRIPTION */
+/* */
+/* 09-30-2020 William E. Lamie Initial Version 6.1 */
+/* 10-15-2021 William E. Lamie Modified comment(s), added */
+/* execution profile support, */
+/* resulting in version 6.1.9 */
+/* 04-25-2022 Zhen Kong Updated comments, */
+/* resulting in version 6.1.11 */
+/* */
+/**************************************************************************/
+ .global _tx_thread_fiq_context_save
+ .type _tx_thread_fiq_context_save,function
+_tx_thread_fiq_context_save:
+
+ /* Upon entry to this routine, it is assumed that IRQ interrupts are locked
+ out, we are in IRQ mode, and all registers are intact. */
+
+ /* Check for a nested interrupt condition. */
+
+ STMDB sp!, {r0-r3} // Save some working registers
+ LDR r3, =_tx_thread_system_state // Pickup address of system state variable
+ LDR r2, [r3] // Pickup system state
+ CMP r2, #0 // Is this the first interrupt?
+ BEQ __tx_thread_fiq_not_nested_save // Yes, not a nested context save
+
+ /* Nested interrupt condition. */
+
+ ADD r2, r2, #1 // Increment the interrupt counter
+ STR r2, [r3] // Store it back in the variable
+
+ /* Save the rest of the scratch registers on the stack and return to the
+ calling ISR. */
+
+ MRS r0, SPSR // Pickup saved SPSR
+ SUB lr, lr, #4 // Adjust point of interrupt
+ STMDB sp!, {r0, r10, r12, lr} // Store other registers
+
+ /* Return to the ISR. */
+
+ MOV r10, #0 // Clear stack limit
+
+#if (defined(TX_ENABLE_EXECUTION_CHANGE_NOTIFY) || defined(TX_EXECUTION_PROFILE_ENABLE))
+
+ /* Call the ISR enter function to indicate an ISR is executing. */
+
+ PUSH {lr} // Save ISR lr
+ BL _tx_execution_isr_enter // Call the ISR enter function
+ POP {lr} // Recover ISR lr
+#endif
+
+ B __tx_fiq_processing_return // Continue FIQ processing
+//
+__tx_thread_fiq_not_nested_save:
+
+ /* Otherwise, not nested, check to see if a thread was running. */
+
+ ADD r2, r2, #1 // Increment the interrupt counter
+ STR r2, [r3] // Store it back in the variable
+ LDR r1, =_tx_thread_current_ptr // Pickup address of current thread ptr
+ LDR r0, [r1] // Pickup current thread pointer
+ CMP r0, #0 // Is it NULL?
+ BEQ __tx_thread_fiq_idle_system_save // If so, interrupt occurred in
+ // scheduling loop - nothing needs saving!
+
+ /* Save minimal context of interrupted thread. */
+
+ MRS r2, SPSR // Pickup saved SPSR
+ SUB lr, lr, #4 // Adjust point of interrupt
+ STMDB sp!, {r2, lr} // Store other registers, Note that we don't
+ // need to save sl and ip since FIQ has
+ // copies of these registers. Nested
+ // interrupt processing does need to save
+ // these registers.
+
+ MOV r10, #0 // Clear stack limit
+
+#if (defined(TX_ENABLE_EXECUTION_CHANGE_NOTIFY) || defined(TX_EXECUTION_PROFILE_ENABLE))
+
+ /* Call the ISR enter function to indicate an ISR is executing. */
+
+ PUSH {lr} // Save ISR lr
+ BL _tx_execution_isr_enter // Call the ISR enter function
+ POP {lr} // Recover ISR lr
+#endif
+
+ B __tx_fiq_processing_return // Continue FIQ processing
+
+__tx_thread_fiq_idle_system_save:
+
+ /* Interrupt occurred in the scheduling loop. */
+
+#if (defined(TX_ENABLE_EXECUTION_CHANGE_NOTIFY) || defined(TX_EXECUTION_PROFILE_ENABLE))
+
+ /* Call the ISR enter function to indicate an ISR is executing. */
+
+ PUSH {lr} // Save ISR lr
+ BL _tx_execution_isr_enter // Call the ISR enter function
+ POP {lr} // Recover ISR lr
+#endif
+
+ /* Not much to do here, save the current SPSR and LR for possible
+ use in IRQ interrupted in idle system conditions, and return to
+ FIQ interrupt processing. */
+
+ MRS r0, SPSR // Pickup saved SPSR
+ SUB lr, lr, #4 // Adjust point of interrupt
+ STMDB sp!, {r0, lr} // Store other registers that will get used
+ // or stripped off the stack in context
+ // restore
+ B __tx_fiq_processing_return // Continue FIQ processing
diff --git a/ports/cortex_a12/gnu/src/tx_thread_fiq_nesting_end.S b/ports/cortex_a12/gnu/src/tx_thread_fiq_nesting_end.S
new file mode 100644
index 00000000..b34d881e
--- /dev/null
+++ b/ports/cortex_a12/gnu/src/tx_thread_fiq_nesting_end.S
@@ -0,0 +1,104 @@
+/**************************************************************************/
+/* */
+/* Copyright (c) Microsoft Corporation. All rights reserved. */
+/* */
+/* This software is licensed under the Microsoft Software License */
+/* Terms for Microsoft Azure RTOS. Full text of the license can be */
+/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
+/* and in the root directory of this software. */
+/* */
+/**************************************************************************/
+
+
+/**************************************************************************/
+/**************************************************************************/
+/** */
+/** ThreadX Component */
+/** */
+/** Thread */
+/** */
+/**************************************************************************/
+/**************************************************************************/
+
+#ifdef TX_ENABLE_FIQ_SUPPORT
+DISABLE_INTS = 0xC0 // Disable IRQ/FIQ interrupts
+#else
+DISABLE_INTS = 0x80 // Disable IRQ interrupts
+#endif
+MODE_MASK = 0x1F // Mode mask
+FIQ_MODE_BITS = 0x11 // FIQ mode bits
+
+
+/* No 16-bit Thumb mode veneer code is needed for _tx_thread_fiq_nesting_end
+ since it will never be called 16-bit mode. */
+
+ .arm
+ .text
+ .align 2
+/**************************************************************************/
+/* */
+/* FUNCTION RELEASE */
+/* */
+/* _tx_thread_fiq_nesting_end ARMv7-A */
+/* 6.1.11 */
+/* AUTHOR */
+/* */
+/* William E. Lamie, Microsoft Corporation */
+/* */
+/* DESCRIPTION */
+/* */
+/* This function is called by the application from FIQ mode after */
+/* _tx_thread_fiq_nesting_start has been called and switches the FIQ */
+/* processing from system mode back to FIQ mode prior to the ISR */
+/* calling _tx_thread_fiq_context_restore. Note that this function */
+/* assumes the system stack pointer is in the same position after */
+/* nesting start function was called. */
+/* */
+/* This function assumes that the system mode stack pointer was setup */
+/* during low-level initialization (tx_initialize_low_level.s). */
+/* */
+/* This function returns with FIQ interrupts disabled. */
+/* */
+/* INPUT */
+/* */
+/* None */
+/* */
+/* OUTPUT */
+/* */
+/* None */
+/* */
+/* CALLS */
+/* */
+/* None */
+/* */
+/* CALLED BY */
+/* */
+/* ISRs */
+/* */
+/* RELEASE HISTORY */
+/* */
+/* DATE NAME DESCRIPTION */
+/* */
+/* 09-30-2020 William E. Lamie Initial Version 6.1 */
+/* 04-25-2022 Zhen Kong Updated comments, */
+/* resulting in version 6.1.11 */
+/* */
+/**************************************************************************/
+ .global _tx_thread_fiq_nesting_end
+ .type _tx_thread_fiq_nesting_end,function
+_tx_thread_fiq_nesting_end:
+ MOV r3,lr // Save ISR return address
+ MRS r0, CPSR // Pickup the CPSR
+ ORR r0, r0, #DISABLE_INTS // Build disable interrupt value
+ MSR CPSR_c, r0 // Disable interrupts
+ LDMIA sp!, {r1, lr} // Pickup saved lr (and r1 throw-away for
+ // 8-byte alignment logic)
+ BIC r0, r0, #MODE_MASK // Clear mode bits
+ ORR r0, r0, #FIQ_MODE_BITS // Build IRQ mode CPSR
+ MSR CPSR_c, r0 // Reenter IRQ mode
+
+#ifdef __THUMB_INTERWORK
+ BX r3 // Return to caller
+#else
+ MOV pc, r3 // Return to caller
+#endif
diff --git a/ports/cortex_a12/gnu/src/tx_thread_fiq_nesting_start.S b/ports/cortex_a12/gnu/src/tx_thread_fiq_nesting_start.S
new file mode 100644
index 00000000..c9cd5a06
--- /dev/null
+++ b/ports/cortex_a12/gnu/src/tx_thread_fiq_nesting_start.S
@@ -0,0 +1,96 @@
+/**************************************************************************/
+/* */
+/* Copyright (c) Microsoft Corporation. All rights reserved. */
+/* */
+/* This software is licensed under the Microsoft Software License */
+/* Terms for Microsoft Azure RTOS. Full text of the license can be */
+/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
+/* and in the root directory of this software. */
+/* */
+/**************************************************************************/
+
+
+/**************************************************************************/
+/**************************************************************************/
+/** */
+/** ThreadX Component */
+/** */
+/** Thread */
+/** */
+/**************************************************************************/
+/**************************************************************************/
+
+FIQ_DISABLE = 0x40 // FIQ disable bit
+MODE_MASK = 0x1F // Mode mask
+SYS_MODE_BITS = 0x1F // System mode bits
+
+
+/* No 16-bit Thumb mode veneer code is needed for _tx_thread_fiq_nesting_start
+ since it will never be called 16-bit mode. */
+
+ .arm
+ .text
+ .align 2
+/**************************************************************************/
+/* */
+/* FUNCTION RELEASE */
+/* */
+/* _tx_thread_fiq_nesting_start ARMv7-A */
+/* 6.1.11 */
+/* AUTHOR */
+/* */
+/* William E. Lamie, Microsoft Corporation */
+/* */
+/* DESCRIPTION */
+/* */
+/* This function is called by the application from FIQ mode after */
+/* _tx_thread_fiq_context_save has been called and switches the FIQ */
+/* processing to the system mode so nested FIQ interrupt processing */
+/* is possible (system mode has its own "lr" register). Note that */
+/* this function assumes that the system mode stack pointer was setup */
+/* during low-level initialization (tx_initialize_low_level.s). */
+/* */
+/* This function returns with FIQ interrupts enabled. */
+/* */
+/* INPUT */
+/* */
+/* None */
+/* */
+/* OUTPUT */
+/* */
+/* None */
+/* */
+/* CALLS */
+/* */
+/* None */
+/* */
+/* CALLED BY */
+/* */
+/* ISRs */
+/* */
+/* RELEASE HISTORY */
+/* */
+/* DATE NAME DESCRIPTION */
+/* */
+/* 09-30-2020 William E. Lamie Initial Version 6.1 */
+/* 04-25-2022 Zhen Kong Updated comments, */
+/* resulting in version 6.1.11 */
+/* */
+/**************************************************************************/
+ .global _tx_thread_fiq_nesting_start
+ .type _tx_thread_fiq_nesting_start,function
+_tx_thread_fiq_nesting_start:
+ MOV r3,lr // Save ISR return address
+ MRS r0, CPSR // Pickup the CPSR
+ BIC r0, r0, #MODE_MASK // Clear the mode bits
+ ORR r0, r0, #SYS_MODE_BITS // Build system mode CPSR
+ MSR CPSR_c, r0 // Enter system mode
+ STMDB sp!, {r1, lr} // Push the system mode lr on the system mode stack
+ // and push r1 just to keep 8-byte alignment
+ BIC r0, r0, #FIQ_DISABLE // Build enable FIQ CPSR
+ MSR CPSR_c, r0 // Enter system mode
+#ifdef __THUMB_INTERWORK
+ BX r3 // Return to caller
+#else
+ MOV pc, r3 // Return to caller
+#endif
diff --git a/ports/cortex_a12/gnu/src/tx_thread_interrupt_control.S b/ports/cortex_a12/gnu/src/tx_thread_interrupt_control.S
new file mode 100644
index 00000000..63b1609a
--- /dev/null
+++ b/ports/cortex_a12/gnu/src/tx_thread_interrupt_control.S
@@ -0,0 +1,104 @@
+/**************************************************************************/
+/* */
+/* Copyright (c) Microsoft Corporation. All rights reserved. */
+/* */
+/* This software is licensed under the Microsoft Software License */
+/* Terms for Microsoft Azure RTOS. Full text of the license can be */
+/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
+/* and in the root directory of this software. */
+/* */
+/**************************************************************************/
+
+
+/**************************************************************************/
+/**************************************************************************/
+/** */
+/** ThreadX Component */
+/** */
+/** Thread */
+/** */
+/**************************************************************************/
+/**************************************************************************/
+
+INT_MASK = 0x03F
+
+
+/* Define the 16-bit Thumb mode veneer for _tx_thread_interrupt_control for
+ applications calling this function from to 16-bit Thumb mode. */
+
+ .text
+ .align 2
+ .global $_tx_thread_interrupt_control
+$_tx_thread_interrupt_control:
+ .thumb
+ BX pc // Switch to 32-bit mode
+ NOP //
+ .arm
+ STMFD sp!, {lr} // Save return address
+ BL _tx_thread_interrupt_control // Call _tx_thread_interrupt_control function
+ LDMFD sp!, {lr} // Recover saved return address
+ BX lr // Return to 16-bit caller
+
+
+ .text
+ .align 2
+/**************************************************************************/
+/* */
+/* FUNCTION RELEASE */
+/* */
+/* _tx_thread_interrupt_control ARMv7-A */
+/* 6.1.11 */
+/* AUTHOR */
+/* */
+/* William E. Lamie, Microsoft Corporation */
+/* */
+/* DESCRIPTION */
+/* */
+/* This function is responsible for changing the interrupt lockout */
+/* posture of the system. */
+/* */
+/* INPUT */
+/* */
+/* new_posture New interrupt lockout posture */
+/* */
+/* OUTPUT */
+/* */
+/* old_posture Old interrupt lockout posture */
+/* */
+/* CALLS */
+/* */
+/* None */
+/* */
+/* CALLED BY */
+/* */
+/* Application Code */
+/* */
+/* RELEASE HISTORY */
+/* */
+/* DATE NAME DESCRIPTION */
+/* */
+/* 09-30-2020 William E. Lamie Initial Version 6.1 */
+/* 04-25-2022 Zhen Kong Updated comments, */
+/* resulting in version 6.1.11 */
+/* */
+/**************************************************************************/
+ .global _tx_thread_interrupt_control
+ .type _tx_thread_interrupt_control,function
+_tx_thread_interrupt_control:
+
+ /* Pickup current interrupt lockout posture. */
+
+ MRS r3, CPSR // Pickup current CPSR
+ MOV r2, #INT_MASK // Build interrupt mask
+ AND r1, r3, r2 // Clear interrupt lockout bits
+ ORR r1, r1, r0 // Or-in new interrupt lockout bits
+
+ /* Apply the new interrupt posture. */
+
+ MSR CPSR_c, r1 // Setup new CPSR
+ BIC r0, r3, r2 // Return previous interrupt mask
+#ifdef __THUMB_INTERWORK
+ BX lr // Return to caller
+#else
+ MOV pc, lr // Return to caller
+#endif
diff --git a/ports/cortex_a12/gnu/src/tx_thread_interrupt_disable.S b/ports/cortex_a12/gnu/src/tx_thread_interrupt_disable.S
new file mode 100644
index 00000000..13258808
--- /dev/null
+++ b/ports/cortex_a12/gnu/src/tx_thread_interrupt_disable.S
@@ -0,0 +1,101 @@
+/**************************************************************************/
+/* */
+/* Copyright (c) Microsoft Corporation. All rights reserved. */
+/* */
+/* This software is licensed under the Microsoft Software License */
+/* Terms for Microsoft Azure RTOS. Full text of the license can be */
+/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
+/* and in the root directory of this software. */
+/* */
+/**************************************************************************/
+
+
+/**************************************************************************/
+/**************************************************************************/
+/** */
+/** ThreadX Component */
+/** */
+/** Thread */
+/** */
+/**************************************************************************/
+/**************************************************************************/
+
+/* Define the 16-bit Thumb mode veneer for _tx_thread_interrupt_disable for
+ applications calling this function from to 16-bit Thumb mode. */
+
+ .text
+ .align 2
+ .global $_tx_thread_interrupt_disable
+$_tx_thread_interrupt_disable:
+ .thumb
+ BX pc // Switch to 32-bit mode
+ NOP //
+ .arm
+ STMFD sp!, {lr} // Save return address
+ BL _tx_thread_interrupt_disable // Call _tx_thread_interrupt_disable function
+ LDMFD sp!, {lr} // Recover saved return address
+ BX lr // Return to 16-bit caller
+
+
+ .text
+ .align 2
+/**************************************************************************/
+/* */
+/* FUNCTION RELEASE */
+/* */
+/* _tx_thread_interrupt_disable ARMv7-A */
+/* 6.1.11 */
+/* AUTHOR */
+/* */
+/* William E. Lamie, Microsoft Corporation */
+/* */
+/* DESCRIPTION */
+/* */
+/* This function is responsible for disabling interrupts */
+/* */
+/* INPUT */
+/* */
+/* None */
+/* */
+/* OUTPUT */
+/* */
+/* old_posture Old interrupt lockout posture */
+/* */
+/* CALLS */
+/* */
+/* None */
+/* */
+/* CALLED BY */
+/* */
+/* Application Code */
+/* */
+/* RELEASE HISTORY */
+/* */
+/* DATE NAME DESCRIPTION */
+/* */
+/* 09-30-2020 William E. Lamie Initial Version 6.1 */
+/* 04-25-2022 Zhen Kong Updated comments, */
+/* resulting in version 6.1.11 */
+/* */
+/**************************************************************************/
+ .global _tx_thread_interrupt_disable
+ .type _tx_thread_interrupt_disable,function
+_tx_thread_interrupt_disable:
+
+ /* Pickup current interrupt lockout posture. */
+
+ MRS r0, CPSR // Pickup current CPSR
+
+ /* Mask interrupts. */
+
+#ifdef TX_ENABLE_FIQ_SUPPORT
+ CPSID if // Disable IRQ and FIQ
+#else
+ CPSID i // Disable IRQ
+#endif
+
+#ifdef __THUMB_INTERWORK
+ BX lr // Return to caller
+#else
+ MOV pc, lr // Return to caller
+#endif
diff --git a/ports/cortex_a12/gnu/src/tx_thread_interrupt_restore.S b/ports/cortex_a12/gnu/src/tx_thread_interrupt_restore.S
new file mode 100644
index 00000000..2d582511
--- /dev/null
+++ b/ports/cortex_a12/gnu/src/tx_thread_interrupt_restore.S
@@ -0,0 +1,93 @@
+/**************************************************************************/
+/* */
+/* Copyright (c) Microsoft Corporation. All rights reserved. */
+/* */
+/* This software is licensed under the Microsoft Software License */
+/* Terms for Microsoft Azure RTOS. Full text of the license can be */
+/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
+/* and in the root directory of this software. */
+/* */
+/**************************************************************************/
+
+
+/**************************************************************************/
+/**************************************************************************/
+/** */
+/** ThreadX Component */
+/** */
+/** Thread */
+/** */
+/**************************************************************************/
+/**************************************************************************/
+
+/* Define the 16-bit Thumb mode veneer for _tx_thread_interrupt_restore for
+ applications calling this function from to 16-bit Thumb mode. */
+
+ .text
+ .align 2
+ .global $_tx_thread_interrupt_restore
+$_tx_thread_interrupt_restore:
+ .thumb
+ BX pc // Switch to 32-bit mode
+ NOP //
+ .arm
+ STMFD sp!, {lr} // Save return address
+ BL _tx_thread_interrupt_restore // Call _tx_thread_interrupt_restore function
+ LDMFD sp!, {lr} // Recover saved return address
+ BX lr // Return to 16-bit caller
+
+
+ .text
+ .align 2
+/**************************************************************************/
+/* */
+/* FUNCTION RELEASE */
+/* */
+/* _tx_thread_interrupt_restore ARMv7-A */
+/* 6.1.11 */
+/* AUTHOR */
+/* */
+/* William E. Lamie, Microsoft Corporation */
+/* */
+/* DESCRIPTION */
+/* */
+/* This function is responsible for restoring interrupts to the state */
+/* returned by a previous _tx_thread_interrupt_disable call. */
+/* */
+/* INPUT */
+/* */
+/* old_posture Old interrupt lockout posture */
+/* */
+/* OUTPUT */
+/* */
+/* None */
+/* */
+/* CALLS */
+/* */
+/* None */
+/* */
+/* CALLED BY */
+/* */
+/* Application Code */
+/* */
+/* RELEASE HISTORY */
+/* */
+/* DATE NAME DESCRIPTION */
+/* */
+/* 09-30-2020 William E. Lamie Initial Version 6.1 */
+/* 04-25-2022 Zhen Kong Updated comments, */
+/* resulting in version 6.1.11 */
+/* */
+/**************************************************************************/
+ .global _tx_thread_interrupt_restore
+ .type _tx_thread_interrupt_restore,function
+_tx_thread_interrupt_restore:
+
+ /* Apply the new interrupt posture. */
+
+ MSR CPSR_c, r0 // Setup new CPSR
+#ifdef __THUMB_INTERWORK
+ BX lr // Return to caller
+#else
+ MOV pc, lr // Return to caller
+#endif
diff --git a/ports/cortex_a12/gnu/src/tx_thread_irq_nesting_end.S b/ports/cortex_a12/gnu/src/tx_thread_irq_nesting_end.S
new file mode 100644
index 00000000..ec7e63c6
--- /dev/null
+++ b/ports/cortex_a12/gnu/src/tx_thread_irq_nesting_end.S
@@ -0,0 +1,103 @@
+/**************************************************************************/
+/* */
+/* Copyright (c) Microsoft Corporation. All rights reserved. */
+/* */
+/* This software is licensed under the Microsoft Software License */
+/* Terms for Microsoft Azure RTOS. Full text of the license can be */
+/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
+/* and in the root directory of this software. */
+/* */
+/**************************************************************************/
+
+
+/**************************************************************************/
+/**************************************************************************/
+/** */
+/** ThreadX Component */
+/** */
+/** Thread */
+/** */
+/**************************************************************************/
+/**************************************************************************/
+
+#ifdef TX_ENABLE_FIQ_SUPPORT
+DISABLE_INTS = 0xC0 // Disable IRQ/FIQ interrupts
+#else
+DISABLE_INTS = 0x80 // Disable IRQ interrupts
+#endif
+MODE_MASK = 0x1F // Mode mask
+IRQ_MODE_BITS = 0x12 // IRQ mode bits
+
+
+/* No 16-bit Thumb mode veneer code is needed for _tx_thread_irq_nesting_end
+ since it will never be called 16-bit mode. */
+
+ .arm
+ .text
+ .align 2
+/**************************************************************************/
+/* */
+/* FUNCTION RELEASE */
+/* */
+/* _tx_thread_irq_nesting_end ARMv7-A */
+/* 6.1.11 */
+/* AUTHOR */
+/* */
+/* William E. Lamie, Microsoft Corporation */
+/* */
+/* DESCRIPTION */
+/* */
+/* This function is called by the application from IRQ mode after */
+/* _tx_thread_irq_nesting_start has been called and switches the IRQ */
+/* processing from system mode back to IRQ mode prior to the ISR */
+/* calling _tx_thread_context_restore. Note that this function */
+/* assumes the system stack pointer is in the same position after */
+/* nesting start function was called. */
+/* */
+/* This function assumes that the system mode stack pointer was setup */
+/* during low-level initialization (tx_initialize_low_level.s). */
+/* */
+/* This function returns with IRQ interrupts disabled. */
+/* */
+/* INPUT */
+/* */
+/* None */
+/* */
+/* OUTPUT */
+/* */
+/* None */
+/* */
+/* CALLS */
+/* */
+/* None */
+/* */
+/* CALLED BY */
+/* */
+/* ISRs */
+/* */
+/* RELEASE HISTORY */
+/* */
+/* DATE NAME DESCRIPTION */
+/* */
+/* 09-30-2020 William E. Lamie Initial Version 6.1 */
+/* 04-25-2022 Zhen Kong Updated comments, */
+/* resulting in version 6.1.11 */
+/* */
+/**************************************************************************/
+ .global _tx_thread_irq_nesting_end
+ .type _tx_thread_irq_nesting_end,function
+_tx_thread_irq_nesting_end:
+ MOV r3,lr // Save ISR return address
+ MRS r0, CPSR // Pickup the CPSR
+ ORR r0, r0, #DISABLE_INTS // Build disable interrupt value
+ MSR CPSR_c, r0 // Disable interrupts
+ LDMIA sp!, {r1, lr} // Pickup saved lr (and r1 throw-away for
+ // 8-byte alignment logic)
+ BIC r0, r0, #MODE_MASK // Clear mode bits
+ ORR r0, r0, #IRQ_MODE_BITS // Build IRQ mode CPSR
+ MSR CPSR_c, r0 // Reenter IRQ mode
+#ifdef __THUMB_INTERWORK
+ BX r3 // Return to caller
+#else
+ MOV pc, r3 // Return to caller
+#endif
diff --git a/ports/cortex_a12/gnu/src/tx_thread_irq_nesting_start.S b/ports/cortex_a12/gnu/src/tx_thread_irq_nesting_start.S
new file mode 100644
index 00000000..c69976ed
--- /dev/null
+++ b/ports/cortex_a12/gnu/src/tx_thread_irq_nesting_start.S
@@ -0,0 +1,96 @@
+/**************************************************************************/
+/* */
+/* Copyright (c) Microsoft Corporation. All rights reserved. */
+/* */
+/* This software is licensed under the Microsoft Software License */
+/* Terms for Microsoft Azure RTOS. Full text of the license can be */
+/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
+/* and in the root directory of this software. */
+/* */
+/**************************************************************************/
+
+
+/**************************************************************************/
+/**************************************************************************/
+/** */
+/** ThreadX Component */
+/** */
+/** Thread */
+/** */
+/**************************************************************************/
+/**************************************************************************/
+
+IRQ_DISABLE = 0x80 // IRQ disable bit
+MODE_MASK = 0x1F // Mode mask
+SYS_MODE_BITS = 0x1F // System mode bits
+
+
+/* No 16-bit Thumb mode veneer code is needed for _tx_thread_irq_nesting_start
+ since it will never be called 16-bit mode. */
+
+ .arm
+ .text
+ .align 2
+/**************************************************************************/
+/* */
+/* FUNCTION RELEASE */
+/* */
+/* _tx_thread_irq_nesting_start ARMv7-A */
+/* 6.1.11 */
+/* AUTHOR */
+/* */
+/* William E. Lamie, Microsoft Corporation */
+/* */
+/* DESCRIPTION */
+/* */
+/* This function is called by the application from IRQ mode after */
+/* _tx_thread_context_save has been called and switches the IRQ */
+/* processing to the system mode so nested IRQ interrupt processing */
+/* is possible (system mode has its own "lr" register). Note that */
+/* this function assumes that the system mode stack pointer was setup */
+/* during low-level initialization (tx_initialize_low_level.s). */
+/* */
+/* This function returns with IRQ interrupts enabled. */
+/* */
+/* INPUT */
+/* */
+/* None */
+/* */
+/* OUTPUT */
+/* */
+/* None */
+/* */
+/* CALLS */
+/* */
+/* None */
+/* */
+/* CALLED BY */
+/* */
+/* ISRs */
+/* */
+/* RELEASE HISTORY */
+/* */
+/* DATE NAME DESCRIPTION */
+/* */
+/* 09-30-2020 William E. Lamie Initial Version 6.1 */
+/* 04-25-2022 Zhen Kong Updated comments, */
+/* resulting in version 6.1.11 */
+/* */
+/**************************************************************************/
+ .global _tx_thread_irq_nesting_start
+ .type _tx_thread_irq_nesting_start,function
+_tx_thread_irq_nesting_start:
+ MOV r3,lr // Save ISR return address
+ MRS r0, CPSR // Pickup the CPSR
+ BIC r0, r0, #MODE_MASK // Clear the mode bits
+ ORR r0, r0, #SYS_MODE_BITS // Build system mode CPSR
+ MSR CPSR_c, r0 // Enter system mode
+ STMDB sp!, {r1, lr} // Push the system mode lr on the system mode stack
+ // and push r1 just to keep 8-byte alignment
+ BIC r0, r0, #IRQ_DISABLE // Build enable IRQ CPSR
+ MSR CPSR_c, r0 // Enter system mode
+#ifdef __THUMB_INTERWORK
+ BX r3 // Return to caller
+#else
+ MOV pc, r3 // Return to caller
+#endif
diff --git a/ports/cortex_a12/gnu/src/tx_thread_schedule.S b/ports/cortex_a12/gnu/src/tx_thread_schedule.S
new file mode 100644
index 00000000..8330e9df
--- /dev/null
+++ b/ports/cortex_a12/gnu/src/tx_thread_schedule.S
@@ -0,0 +1,230 @@
+/**************************************************************************/
+/* */
+/* Copyright (c) Microsoft Corporation. All rights reserved. */
+/* */
+/* This software is licensed under the Microsoft Software License */
+/* Terms for Microsoft Azure RTOS. Full text of the license can be */
+/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
+/* and in the root directory of this software. */
+/* */
+/**************************************************************************/
+
+
+/**************************************************************************/
+/**************************************************************************/
+/** */
+/** ThreadX Component */
+/** */
+/** Thread */
+/** */
+/**************************************************************************/
+/**************************************************************************/
+
+ .global _tx_thread_execute_ptr
+ .global _tx_thread_current_ptr
+ .global _tx_timer_time_slice
+
+
+/* Define the 16-bit Thumb mode veneer for _tx_thread_schedule for
+ applications calling this function from to 16-bit Thumb mode. */
+
+ .text
+ .align 2
+ .global $_tx_thread_schedule
+ .type $_tx_thread_schedule,function
+$_tx_thread_schedule:
+ .thumb
+ BX pc // Switch to 32-bit mode
+ NOP //
+ .arm
+ STMFD sp!, {lr} // Save return address
+ BL _tx_thread_schedule // Call _tx_thread_schedule function
+ LDMFD sp!, {lr} // Recover saved return address
+ BX lr // Return to 16-bit caller
+
+
+ .text
+ .align 2
+/**************************************************************************/
+/* */
+/* FUNCTION RELEASE */
+/* */
+/* _tx_thread_schedule ARMv7-A */
+/* 6.1.11 */
+/* AUTHOR */
+/* */
+/* William E. Lamie, Microsoft Corporation */
+/* */
+/* DESCRIPTION */
+/* */
+/* This function waits for a thread control block pointer to appear in */
+/* the _tx_thread_execute_ptr variable. Once a thread pointer appears */
+/* in the variable, the corresponding thread is resumed. */
+/* */
+/* INPUT */
+/* */
+/* None */
+/* */
+/* OUTPUT */
+/* */
+/* None */
+/* */
+/* CALLS */
+/* */
+/* None */
+/* */
+/* CALLED BY */
+/* */
+/* _tx_initialize_kernel_enter ThreadX entry function */
+/* _tx_thread_system_return Return to system from thread */
+/* _tx_thread_context_restore Restore thread's context */
+/* */
+/* RELEASE HISTORY */
+/* */
+/* DATE NAME DESCRIPTION */
+/* */
+/* 09-30-2020 William E. Lamie Initial Version 6.1 */
+/* 10-15-2021 William E. Lamie Modified comment(s), added */
+/* execution profile support, */
+/* resulting in version 6.1.9 */
+/* 04-25-2022 Zhen Kong Updated comments, */
+/* resulting in version 6.1.11 */
+/* */
+/**************************************************************************/
+ .global _tx_thread_schedule
+ .type _tx_thread_schedule,function
+_tx_thread_schedule:
+
+ /* Enable interrupts. */
+
+#ifdef TX_ENABLE_FIQ_SUPPORT
+ CPSIE if // Enable IRQ and FIQ interrupts
+#else
+ CPSIE i // Enable IRQ interrupts
+#endif
+
+ /* Wait for a thread to execute. */
+ LDR r1, =_tx_thread_execute_ptr // Address of thread execute ptr
+
+__tx_thread_schedule_loop:
+
+ LDR r0, [r1] // Pickup next thread to execute
+ CMP r0, #0 // Is it NULL?
+ BEQ __tx_thread_schedule_loop // If so, keep looking for a thread
+ /* Yes! We have a thread to execute. Lockout interrupts and
+ transfer control to it. */
+
+#ifdef TX_ENABLE_FIQ_SUPPORT
+ CPSID if // Disable IRQ and FIQ interrupts
+#else
+ CPSID i // Disable IRQ interrupts
+#endif
+
+ /* Setup the current thread pointer. */
+
+ LDR r1, =_tx_thread_current_ptr // Pickup address of current thread
+ STR r0, [r1] // Setup current thread pointer
+
+ /* Increment the run count for this thread. */
+
+ LDR r2, [r0, #4] // Pickup run counter
+ LDR r3, [r0, #24] // Pickup time-slice for this thread
+ ADD r2, r2, #1 // Increment thread run-counter
+ STR r2, [r0, #4] // Store the new run counter
+
+ /* Setup time-slice, if present. */
+
+ LDR r2, =_tx_timer_time_slice // Pickup address of time-slice
+ // variable
+ LDR sp, [r0, #8] // Switch stack pointers
+ STR r3, [r2] // Setup time-slice
+
+#if (defined(TX_ENABLE_EXECUTION_CHANGE_NOTIFY) || defined(TX_EXECUTION_PROFILE_ENABLE))
+
+ /* Call the thread entry function to indicate the thread is executing. */
+
+ MOV r5, r0 // Save r0
+ BL _tx_execution_thread_enter // Call the thread execution enter function
+ MOV r0, r5 // Restore r0
+#endif
+
+ /* Determine if an interrupt frame or a synchronous task suspension frame
+ is present. */
+
+ LDMIA sp!, {r4, r5} // Pickup the stack type and saved CPSR
+ CMP r4, #0 // Check for synchronous context switch
+ BEQ _tx_solicited_return
+ MSR SPSR_cxsf, r5 // Setup SPSR for return
+#ifdef TX_ENABLE_VFP_SUPPORT
+ LDR r1, [r0, #144] // Pickup the VFP enabled flag
+ CMP r1, #0 // Is the VFP enabled?
+ BEQ _tx_skip_interrupt_vfp_restore // No, skip VFP interrupt restore
+ VLDMIA sp!, {D0-D15} // Recover D0-D15
+ VLDMIA sp!, {D16-D31} // Recover D16-D31
+ LDR r4, [sp], #4 // Pickup FPSCR
+ VMSR FPSCR, r4 // Restore FPSCR
+_tx_skip_interrupt_vfp_restore:
+#endif
+ LDMIA sp!, {r0-r12, lr, pc}^ // Return to point of thread interrupt
+
+_tx_solicited_return:
+
+#ifdef TX_ENABLE_VFP_SUPPORT
+ LDR r1, [r0, #144] // Pickup the VFP enabled flag
+ CMP r1, #0 // Is the VFP enabled?
+ BEQ _tx_skip_solicited_vfp_restore // No, skip VFP solicited restore
+ VLDMIA sp!, {D8-D15} // Recover D8-D15
+ VLDMIA sp!, {D16-D31} // Recover D16-D31
+ LDR r4, [sp], #4 // Pickup FPSCR
+ VMSR FPSCR, r4 // Restore FPSCR
+_tx_skip_solicited_vfp_restore:
+#endif
+ MSR CPSR_cxsf, r5 // Recover CPSR
+ LDMIA sp!, {r4-r11, lr} // Return to thread synchronously
+#ifdef __THUMB_INTERWORK
+ BX lr // Return to caller
+#else
+ MOV pc, lr // Return to caller
+#endif
+
+#ifdef TX_ENABLE_VFP_SUPPORT
+
+ .global tx_thread_vfp_enable
+ .type tx_thread_vfp_enable,function
+tx_thread_vfp_enable:
+ MRS r2, CPSR // Pickup the CPSR
+#ifdef TX_ENABLE_FIQ_SUPPORT
+ CPSID if // Enable IRQ and FIQ interrupts
+#else
+ CPSID i // Enable IRQ interrupts
+#endif
+ LDR r0, =_tx_thread_current_ptr // Build current thread pointer address
+ LDR r1, [r0] // Pickup current thread pointer
+ CMP r1, #0 // Check for NULL thread pointer
+ BEQ __tx_no_thread_to_enable // If NULL, skip VFP enable
+ MOV r0, #1 // Build enable value
+ STR r0, [r1, #144] // Set the VFP enable flag (tx_thread_vfp_enable field in TX_THREAD)
+__tx_no_thread_to_enable:
+ MSR CPSR_cxsf, r2 // Recover CPSR
+ BX LR // Return to caller
+
+ .global tx_thread_vfp_disable
+ .type tx_thread_vfp_disable,function
+tx_thread_vfp_disable:
+ MRS r2, CPSR // Pickup the CPSR
+#ifdef TX_ENABLE_FIQ_SUPPORT
+ CPSID if // Enable IRQ and FIQ interrupts
+#else
+ CPSID i // Enable IRQ interrupts
+#endif
+ LDR r0, =_tx_thread_current_ptr // Build current thread pointer address
+ LDR r1, [r0] // Pickup current thread pointer
+ CMP r1, #0 // Check for NULL thread pointer
+ BEQ __tx_no_thread_to_disable // If NULL, skip VFP disable
+ MOV r0, #0 // Build disable value
+ STR r0, [r1, #144] // Clear the VFP enable flag (tx_thread_vfp_enable field in TX_THREAD)
+__tx_no_thread_to_disable:
+ MSR CPSR_cxsf, r2 // Recover CPSR
+ BX LR // Return to caller
+
+#endif
diff --git a/ports/cortex_a12/gnu/src/tx_thread_stack_build.S b/ports/cortex_a12/gnu/src/tx_thread_stack_build.S
new file mode 100644
index 00000000..f413e673
--- /dev/null
+++ b/ports/cortex_a12/gnu/src/tx_thread_stack_build.S
@@ -0,0 +1,164 @@
+/**************************************************************************/
+/* */
+/* Copyright (c) Microsoft Corporation. All rights reserved. */
+/* */
+/* This software is licensed under the Microsoft Software License */
+/* Terms for Microsoft Azure RTOS. Full text of the license can be */
+/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
+/* and in the root directory of this software. */
+/* */
+/**************************************************************************/
+
+
+/**************************************************************************/
+/**************************************************************************/
+/** */
+/** ThreadX Component */
+/** */
+/** Thread */
+/** */
+/**************************************************************************/
+/**************************************************************************/
+ .arm
+
+SVC_MODE = 0x13 // SVC mode
+#ifdef TX_ENABLE_FIQ_SUPPORT
+CPSR_MASK = 0xDF // Mask initial CPSR, IRQ & FIQ interrupts enabled
+#else
+CPSR_MASK = 0x9F // Mask initial CPSR, IRQ interrupts enabled
+#endif
+
+
+/* Define the 16-bit Thumb mode veneer for _tx_thread_stack_build for
+ applications calling this function from to 16-bit Thumb mode. */
+
+ .text
+ .align 2
+ .thumb
+ .global $_tx_thread_stack_build
+ .type $_tx_thread_stack_build,function
+$_tx_thread_stack_build:
+ BX pc // Switch to 32-bit mode
+ NOP //
+ .arm
+ STMFD sp!, {lr} // Save return address
+ BL _tx_thread_stack_build // Call _tx_thread_stack_build function
+ LDMFD sp!, {lr} // Recover saved return address
+ BX lr // Return to 16-bit caller
+
+
+ .text
+ .align 2
+/**************************************************************************/
+/* */
+/* FUNCTION RELEASE */
+/* */
+/* _tx_thread_stack_build ARMv7-A */
+/* 6.1.11 */
+/* AUTHOR */
+/* */
+/* William E. Lamie, Microsoft Corporation */
+/* */
+/* DESCRIPTION */
+/* */
+/* This function builds a stack frame on the supplied thread's stack. */
+/* The stack frame results in a fake interrupt return to the supplied */
+/* function pointer. */
+/* */
+/* INPUT */
+/* */
+/* thread_ptr Pointer to thread control blk */
+/* function_ptr Pointer to return function */
+/* */
+/* OUTPUT */
+/* */
+/* None */
+/* */
+/* CALLS */
+/* */
+/* None */
+/* */
+/* CALLED BY */
+/* */
+/* _tx_thread_create Create thread service */
+/* */
+/* RELEASE HISTORY */
+/* */
+/* DATE NAME DESCRIPTION */
+/* */
+/* 09-30-2020 William E. Lamie Initial Version 6.1 */
+/* 04-25-2022 Zhen Kong Updated comments, */
+/* resulting in version 6.1.11 */
+/* */
+/**************************************************************************/
+ .global _tx_thread_stack_build
+ .type _tx_thread_stack_build,function
+_tx_thread_stack_build:
+
+
+ /* Build a fake interrupt frame. The form of the fake interrupt stack
+ on the ARMv7-A should look like the following after it is built:
+
+ Stack Top: 1 Interrupt stack frame type
+ CPSR Initial value for CPSR
+ a1 (r0) Initial value for a1
+ a2 (r1) Initial value for a2
+ a3 (r2) Initial value for a3
+ a4 (r3) Initial value for a4
+ v1 (r4) Initial value for v1
+ v2 (r5) Initial value for v2
+ v3 (r6) Initial value for v3
+ v4 (r7) Initial value for v4
+ v5 (r8) Initial value for v5
+ sb (r9) Initial value for sb
+ sl (r10) Initial value for sl
+ fp (r11) Initial value for fp
+ ip (r12) Initial value for ip
+ lr (r14) Initial value for lr
+ pc (r15) Initial value for
+ 0 For stack backtracing
+
+ Stack Bottom: (higher memory address) */
+
+ LDR r2, [r0, #16] // Pickup end of stack area
+ BIC r2, r2, #7 // Ensure 8-byte alignment
+ SUB r2, r2, #76 // Allocate space for the stack frame
+
+ /* Actually build the stack frame. */
+
+ MOV r3, #1 // Build interrupt stack type
+ STR r3, [r2, #0] // Store stack type
+ MOV r3, #0 // Build initial register value
+ STR r3, [r2, #8] // Store initial r0
+ STR r3, [r2, #12] // Store initial r1
+ STR r3, [r2, #16] // Store initial r2
+ STR r3, [r2, #20] // Store initial r3
+ STR r3, [r2, #24] // Store initial r4
+ STR r3, [r2, #28] // Store initial r5
+ STR r3, [r2, #32] // Store initial r6
+ STR r3, [r2, #36] // Store initial r7
+ STR r3, [r2, #40] // Store initial r8
+ STR r3, [r2, #44] // Store initial r9
+ LDR r3, [r0, #12] // Pickup stack starting address
+ STR r3, [r2, #48] // Store initial r10 (sl)
+ LDR r3,=_tx_thread_schedule // Pickup address of _tx_thread_schedule for GDB backtrace
+ STR r3, [r2, #60] // Store initial r14 (lr)
+ MOV r3, #0 // Build initial register value
+ STR r3, [r2, #52] // Store initial r11
+ STR r3, [r2, #56] // Store initial r12
+ STR r1, [r2, #64] // Store initial pc
+ STR r3, [r2, #68] // 0 for back-trace
+ MRS r1, CPSR // Pickup CPSR
+ BIC r1, r1, #CPSR_MASK // Mask mode bits of CPSR
+ ORR r3, r1, #SVC_MODE // Build CPSR, SVC mode, interrupts enabled
+ STR r3, [r2, #4] // Store initial CPSR
+
+ /* Setup stack pointer. */
+
+ STR r2, [r0, #8] // Save stack pointer in thread's
+ // control block
+#ifdef __THUMB_INTERWORK
+ BX lr // Return to caller
+#else
+ MOV pc, lr // Return to caller
+#endif
diff --git a/ports/cortex_a12/gnu/src/tx_thread_system_return.S b/ports/cortex_a12/gnu/src/tx_thread_system_return.S
new file mode 100644
index 00000000..cb7d62ce
--- /dev/null
+++ b/ports/cortex_a12/gnu/src/tx_thread_system_return.S
@@ -0,0 +1,162 @@
+/**************************************************************************/
+/* */
+/* Copyright (c) Microsoft Corporation. All rights reserved. */
+/* */
+/* This software is licensed under the Microsoft Software License */
+/* Terms for Microsoft Azure RTOS. Full text of the license can be */
+/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
+/* and in the root directory of this software. */
+/* */
+/**************************************************************************/
+
+
+/**************************************************************************/
+/**************************************************************************/
+/** */
+/** ThreadX Component */
+/** */
+/** Thread */
+/** */
+/**************************************************************************/
+/**************************************************************************/
+
+ .arm
+
+
+ .global _tx_thread_current_ptr
+ .global _tx_timer_time_slice
+ .global _tx_thread_schedule
+
+
+
+/* Define the 16-bit Thumb mode veneer for _tx_thread_system_return for
+ applications calling this function from to 16-bit Thumb mode. */
+
+ .text
+ .align 2
+ .global $_tx_thread_system_return
+ .type $_tx_thread_system_return,function
+$_tx_thread_system_return:
+ .thumb
+ BX pc // Switch to 32-bit mode
+ NOP //
+ .arm
+ STMFD sp!, {lr} // Save return address
+ BL _tx_thread_system_return // Call _tx_thread_system_return function
+ LDMFD sp!, {lr} // Recover saved return address
+ BX lr // Return to 16-bit caller
+
+
+ .text
+ .align 2
+/**************************************************************************/
+/* */
+/* FUNCTION RELEASE */
+/* */
+/* _tx_thread_system_return ARMv7-A */
+/* 6.1.11 */
+/* AUTHOR */
+/* */
+/* William E. Lamie, Microsoft Corporation */
+/* */
+/* DESCRIPTION */
+/* */
+/* This function is target processor specific. It is used to transfer */
+/* control from a thread back to the ThreadX system. Only a */
+/* minimal context is saved since the compiler assumes temp registers */
+/* are going to get slicked by a function call anyway. */
+/* */
+/* INPUT */
+/* */
+/* None */
+/* */
+/* OUTPUT */
+/* */
+/* None */
+/* */
+/* CALLS */
+/* */
+/* _tx_thread_schedule Thread scheduling loop */
+/* */
+/* CALLED BY */
+/* */
+/* ThreadX components */
+/* */
+/* RELEASE HISTORY */
+/* */
+/* DATE NAME DESCRIPTION */
+/* */
+/* 09-30-2020 William E. Lamie Initial Version 6.1 */
+/* 10-15-2021 William E. Lamie Modified comment(s), added */
+/* execution profile support, */
+/* resulting in version 6.1.9 */
+/* 04-25-2022 Zhen Kong Updated comments, */
+/* resulting in version 6.1.11 */
+/* */
+/**************************************************************************/
+ .global _tx_thread_system_return
+ .type _tx_thread_system_return,function
+_tx_thread_system_return:
+
+ /* Save minimal context on the stack. */
+
+ STMDB sp!, {r4-r11, lr} // Save minimal context
+
+ LDR r4, =_tx_thread_current_ptr // Pickup address of current ptr
+ LDR r5, [r4] // Pickup current thread pointer
+
+#ifdef TX_ENABLE_VFP_SUPPORT
+ LDR r1, [r5, #144] // Pickup the VFP enabled flag
+ CMP r1, #0 // Is the VFP enabled?
+ BEQ _tx_skip_solicited_vfp_save // No, skip VFP solicited save
+ VMRS r1, FPSCR // Pickup the FPSCR
+ STR r1, [sp, #-4]! // Save FPSCR
+ VSTMDB sp!, {D16-D31} // Save D16-D31
+ VSTMDB sp!, {D8-D15} // Save D8-D15
+_tx_skip_solicited_vfp_save:
+#endif
+
+ MOV r0, #0 // Build a solicited stack type
+ MRS r1, CPSR // Pickup the CPSR
+ STMDB sp!, {r0-r1} // Save type and CPSR
+
+ /* Lockout interrupts. */
+
+#ifdef TX_ENABLE_FIQ_SUPPORT
+ CPSID if // Disable IRQ and FIQ interrupts
+#else
+ CPSID i // Disable IRQ interrupts
+#endif
+
+#if (defined(TX_ENABLE_EXECUTION_CHANGE_NOTIFY) || defined(TX_EXECUTION_PROFILE_ENABLE))
+
+ /* Call the thread exit function to indicate the thread is no longer executing. */
+
+ BL _tx_execution_thread_exit // Call the thread exit function
+#endif
+ MOV r3, r4 // Pickup address of current ptr
+ MOV r0, r5 // Pickup current thread pointer
+ LDR r2, =_tx_timer_time_slice // Pickup address of time slice
+ LDR r1, [r2] // Pickup current time slice
+
+ /* Save current stack and switch to system stack. */
+
+ STR sp, [r0, #8] // Save thread stack pointer
+
+ /* Determine if the time-slice is active. */
+
+ MOV r4, #0 // Build clear value
+ CMP r1, #0 // Is a time-slice active?
+ BEQ __tx_thread_dont_save_ts // No, don't save the time-slice
+
+ /* Save time-slice for the thread and clear the current time-slice. */
+
+ STR r4, [r2] // Clear time-slice
+ STR r1, [r0, #24] // Save current time-slice
+
+__tx_thread_dont_save_ts:
+
+ /* Clear the current thread pointer. */
+
+ STR r4, [r3] // Clear current thread pointer
+ B _tx_thread_schedule // Jump to scheduler!
diff --git a/ports/cortex_a12/gnu/src/tx_thread_vectored_context_save.S b/ports/cortex_a12/gnu/src/tx_thread_vectored_context_save.S
new file mode 100644
index 00000000..d846223f
--- /dev/null
+++ b/ports/cortex_a12/gnu/src/tx_thread_vectored_context_save.S
@@ -0,0 +1,165 @@
+/**************************************************************************/
+/* */
+/* Copyright (c) Microsoft Corporation. All rights reserved. */
+/* */
+/* This software is licensed under the Microsoft Software License */
+/* Terms for Microsoft Azure RTOS. Full text of the license can be */
+/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
+/* and in the root directory of this software. */
+/* */
+/**************************************************************************/
+
+
+/**************************************************************************/
+/**************************************************************************/
+/** */
+/** ThreadX Component */
+/** */
+/** Thread */
+/** */
+/**************************************************************************/
+/**************************************************************************/
+
+ .global _tx_thread_system_state
+ .global _tx_thread_current_ptr
+ .global _tx_execution_isr_enter
+
+
+
+/* No 16-bit Thumb mode veneer code is needed for _tx_thread_vectored_context_save
+ since it will never be called 16-bit mode. */
+
+ .arm
+ .text
+ .align 2
+/**************************************************************************/
+/* */
+/* FUNCTION RELEASE */
+/* */
+/* _tx_thread_vectored_context_save ARMv7-A */
+/* 6.1.11 */
+/* AUTHOR */
+/* */
+/* William E. Lamie, Microsoft Corporation */
+/* */
+/* DESCRIPTION */
+/* */
+/* This function saves the context of an executing thread in the */
+/* beginning of interrupt processing. The function also ensures that */
+/* the system stack is used upon return to the calling ISR. */
+/* */
+/* INPUT */
+/* */
+/* None */
+/* */
+/* OUTPUT */
+/* */
+/* None */
+/* */
+/* CALLS */
+/* */
+/* None */
+/* */
+/* CALLED BY */
+/* */
+/* ISRs */
+/* */
+/* RELEASE HISTORY */
+/* */
+/* DATE NAME DESCRIPTION */
+/* */
+/* 09-30-2020 William E. Lamie Initial Version 6.1 */
+/* 10-15-2021 William E. Lamie Modified comment(s), added */
+/* execution profile support, */
+/* resulting in version 6.1.9 */
+/* 04-25-2022 Zhen Kong Updated comments, */
+/* resulting in version 6.1.11 */
+/* */
+/**************************************************************************/
+ .global _tx_thread_vectored_context_save
+ .type _tx_thread_vectored_context_save,function
+_tx_thread_vectored_context_save:
+
+ /* Upon entry to this routine, it is assumed that IRQ interrupts are locked
+ out, we are in IRQ mode, and all registers are intact. */
+
+ /* Check for a nested interrupt condition. */
+
+#ifdef TX_ENABLE_FIQ_SUPPORT
+ CPSID if // Disable IRQ and FIQ interrupts
+#endif
+ LDR r3, =_tx_thread_system_state // Pickup address of system state variable
+ LDR r2, [r3, #0] // Pickup system state
+ CMP r2, #0 // Is this the first interrupt?
+ BEQ __tx_thread_not_nested_save // Yes, not a nested context save
+
+ /* Nested interrupt condition. */
+
+ ADD r2, r2, #1 // Increment the interrupt counter
+ STR r2, [r3, #0] // Store it back in the variable
+
+ /* Note: Minimal context of interrupted thread is already saved. */
+
+ /* Return to the ISR. */
+
+ MOV r10, #0 // Clear stack limit
+
+#if (defined(TX_ENABLE_EXECUTION_CHANGE_NOTIFY) || defined(TX_EXECUTION_PROFILE_ENABLE))
+
+ /* Call the ISR enter function to indicate an ISR is executing. */
+
+ PUSH {lr} // Save ISR lr
+ BL _tx_execution_isr_enter // Call the ISR enter function
+ POP {lr} // Recover ISR lr
+#endif
+
+ MOV pc, lr // Return to caller
+
+__tx_thread_not_nested_save:
+
+ /* Otherwise, not nested, check to see if a thread was running. */
+
+ ADD r2, r2, #1 // Increment the interrupt counter
+ STR r2, [r3, #0] // Store it back in the variable
+ LDR r1, =_tx_thread_current_ptr // Pickup address of current thread ptr
+ LDR r0, [r1, #0] // Pickup current thread pointer
+ CMP r0, #0 // Is it NULL?
+ BEQ __tx_thread_idle_system_save // If so, interrupt occurred in
+ // scheduling loop - nothing needs saving!
+ /* Note: Minimal context of interrupted thread is already saved. */
+
+ /* Save the current stack pointer in the thread's control block. */
+
+ MOV r10, #0 // Clear stack limit
+
+#if (defined(TX_ENABLE_EXECUTION_CHANGE_NOTIFY) || defined(TX_EXECUTION_PROFILE_ENABLE))
+
+ /* Call the ISR enter function to indicate an ISR is executing. */
+
+ PUSH {lr} // Save ISR lr
+ BL _tx_execution_isr_enter // Call the ISR enter function
+ POP {lr} // Recover ISR lr
+#endif
+
+ MOV pc, lr // Return to caller
+
+__tx_thread_idle_system_save:
+
+ /* Interrupt occurred in the scheduling loop. */
+
+ /* Not much to do here, just adjust the stack pointer, and return to IRQ
+ processing. */
+
+ MOV r10, #0 // Clear stack limit
+
+#if (defined(TX_ENABLE_EXECUTION_CHANGE_NOTIFY) || defined(TX_EXECUTION_PROFILE_ENABLE))
+
+ /* Call the ISR enter function to indicate an ISR is executing. */
+
+ PUSH {lr} // Save ISR lr
+ BL _tx_execution_isr_enter // Call the ISR enter function
+ POP {lr} // Recover ISR lr
+#endif
+
+ ADD sp, sp, #32 // Recover saved registers
+ MOV pc, lr // Return to caller
diff --git a/ports/cortex_a12/gnu/src/tx_timer_interrupt.S b/ports/cortex_a12/gnu/src/tx_timer_interrupt.S
new file mode 100644
index 00000000..7337ed0c
--- /dev/null
+++ b/ports/cortex_a12/gnu/src/tx_timer_interrupt.S
@@ -0,0 +1,231 @@
+/**************************************************************************/
+/* */
+/* Copyright (c) Microsoft Corporation. All rights reserved. */
+/* */
+/* This software is licensed under the Microsoft Software License */
+/* Terms for Microsoft Azure RTOS. Full text of the license can be */
+/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
+/* and in the root directory of this software. */
+/* */
+/**************************************************************************/
+
+
+/**************************************************************************/
+/**************************************************************************/
+/** */
+/** ThreadX Component */
+/** */
+/** Timer */
+/** */
+/**************************************************************************/
+/**************************************************************************/
+
+ .arm
+
+
+/* Define Assembly language external references... */
+
+ .global _tx_timer_time_slice
+ .global _tx_timer_system_clock
+ .global _tx_timer_current_ptr
+ .global _tx_timer_list_start
+ .global _tx_timer_list_end
+ .global _tx_timer_expired_time_slice
+ .global _tx_timer_expired
+ .global _tx_thread_time_slice
+
+
+
+/* Define the 16-bit Thumb mode veneer for _tx_timer_interrupt for
+ applications calling this function from to 16-bit Thumb mode. */
+
+ .text
+ .align 2
+ .thumb
+ .global $_tx_timer_interrupt
+ .type $_tx_timer_interrupt,function
+$_tx_timer_interrupt:
+ BX pc // Switch to 32-bit mode
+ NOP //
+ .arm
+ STMFD sp!, {lr} // Save return address
+ BL _tx_timer_interrupt // Call _tx_timer_interrupt function
+ LDMFD sp!, {lr} // Recover saved return address
+ BX lr // Return to 16-bit caller
+
+
+ .text
+ .align 2
+/**************************************************************************/
+/* */
+/* FUNCTION RELEASE */
+/* */
+/* _tx_timer_interrupt ARMv7-A */
+/* 6.1.11 */
+/* AUTHOR */
+/* */
+/* William E. Lamie, Microsoft Corporation */
+/* */
+/* DESCRIPTION */
+/* */
+/* This function processes the hardware timer interrupt. This */
+/* processing includes incrementing the system clock and checking for */
+/* time slice and/or timer expiration. If either is found, the */
+/* interrupt context save/restore functions are called along with the */
+/* expiration functions. */
+/* */
+/* INPUT */
+/* */
+/* None */
+/* */
+/* OUTPUT */
+/* */
+/* None */
+/* */
+/* CALLS */
+/* */
+/* _tx_thread_time_slice Time slice interrupted thread */
+/* _tx_timer_expiration_process Timer expiration processing */
+/* */
+/* CALLED BY */
+/* */
+/* interrupt vector */
+/* */
+/* RELEASE HISTORY */
+/* */
+/* DATE NAME DESCRIPTION */
+/* */
+/* 09-30-2020 William E. Lamie Initial Version 6.1 */
+/* 04-25-2022 Zhen Kong Updated comments, */
+/* resulting in version 6.1.11 */
+/* */
+/**************************************************************************/
+ .global _tx_timer_interrupt
+ .type _tx_timer_interrupt,function
+_tx_timer_interrupt:
+
+ /* Upon entry to this routine, it is assumed that context save has already
+ been called, and therefore the compiler scratch registers are available
+ for use. */
+
+ /* Increment the system clock. */
+
+ LDR r1, =_tx_timer_system_clock // Pickup address of system clock
+ LDR r0, [r1] // Pickup system clock
+ ADD r0, r0, #1 // Increment system clock
+ STR r0, [r1] // Store new system clock
+
+ /* Test for time-slice expiration. */
+
+ LDR r3, =_tx_timer_time_slice // Pickup address of time-slice
+ LDR r2, [r3] // Pickup time-slice
+ CMP r2, #0 // Is it non-active?
+ BEQ __tx_timer_no_time_slice // Yes, skip time-slice processing
+
+ /* Decrement the time_slice. */
+
+ SUB r2, r2, #1 // Decrement the time-slice
+ STR r2, [r3] // Store new time-slice value
+
+ /* Check for expiration. */
+
+ CMP r2, #0 // Has it expired?
+ BNE __tx_timer_no_time_slice // No, skip expiration processing
+
+ /* Set the time-slice expired flag. */
+
+ LDR r3, =_tx_timer_expired_time_slice // Pickup address of expired flag
+ MOV r0, #1 // Build expired value
+ STR r0, [r3] // Set time-slice expiration flag
+
+__tx_timer_no_time_slice:
+
+ /* Test for timer expiration. */
+
+ LDR r1, =_tx_timer_current_ptr // Pickup current timer pointer address
+ LDR r0, [r1] // Pickup current timer
+ LDR r2, [r0] // Pickup timer list entry
+ CMP r2, #0 // Is there anything in the list?
+ BEQ __tx_timer_no_timer // No, just increment the timer
+
+ /* Set expiration flag. */
+
+ LDR r3, =_tx_timer_expired // Pickup expiration flag address
+ MOV r2, #1 // Build expired value
+ STR r2, [r3] // Set expired flag
+ B __tx_timer_done // Finished timer processing
+
+__tx_timer_no_timer:
+
+ /* No timer expired, increment the timer pointer. */
+ ADD r0, r0, #4 // Move to next timer
+
+ /* Check for wraparound. */
+
+ LDR r3, =_tx_timer_list_end // Pickup address of timer list end
+ LDR r2, [r3] // Pickup list end
+ CMP r0, r2 // Are we at list end?
+ BNE __tx_timer_skip_wrap // No, skip wraparound logic
+
+ /* Wrap to beginning of list. */
+
+ LDR r3, =_tx_timer_list_start // Pickup address of timer list start
+ LDR r0, [r3] // Set current pointer to list start
+
+__tx_timer_skip_wrap:
+
+ STR r0, [r1] // Store new current timer pointer
+
+__tx_timer_done:
+
+ /* See if anything has expired. */
+
+ LDR r3, =_tx_timer_expired_time_slice // Pickup address of expired flag
+ LDR r2, [r3] // Pickup time-slice expired flag
+ CMP r2, #0 // Did a time-slice expire?
+ BNE __tx_something_expired // If non-zero, time-slice expired
+ LDR r1, =_tx_timer_expired // Pickup address of other expired flag
+ LDR r0, [r1] // Pickup timer expired flag
+ CMP r0, #0 // Did a timer expire?
+ BEQ __tx_timer_nothing_expired // No, nothing expired
+
+__tx_something_expired:
+
+ STMDB sp!, {r0, lr} // Save the lr register on the stack
+ // and save r0 just to keep 8-byte alignment
+
+ /* Did a timer expire? */
+
+ LDR r1, =_tx_timer_expired // Pickup address of expired flag
+ LDR r0, [r1] // Pickup timer expired flag
+ CMP r0, #0 // Check for timer expiration
+ BEQ __tx_timer_dont_activate // If not set, skip timer activation
+
+ /* Process timer expiration. */
+ BL _tx_timer_expiration_process // Call the timer expiration handling routine
+
+__tx_timer_dont_activate:
+
+ /* Did time slice expire? */
+
+ LDR r3, =_tx_timer_expired_time_slice // Pickup address of time-slice expired
+ LDR r2, [r3] // Pickup the actual flag
+ CMP r2, #0 // See if the flag is set
+ BEQ __tx_timer_not_ts_expiration // No, skip time-slice processing
+
+ /* Time slice interrupted thread. */
+
+ BL _tx_thread_time_slice // Call time-slice processing
+
+__tx_timer_not_ts_expiration:
+
+ LDMIA sp!, {r0, lr} // Recover lr register (r0 is just there for
+ // the 8-byte stack alignment
+
+__tx_timer_nothing_expired:
+
+#ifdef __THUMB_INTERWORK
+ BX lr // Return to caller
+#else
+ MOV pc, lr // Return to caller
+#endif
diff --git a/ports/cortex_a15/ac6/example_build/sample_threadx.c b/ports/cortex_a15/ac6/example_build/sample_threadx.c
new file mode 100644
index 00000000..8c61de06
--- /dev/null
+++ b/ports/cortex_a15/ac6/example_build/sample_threadx.c
@@ -0,0 +1,369 @@
+/* This is a small demo of the high-performance ThreadX kernel. It includes examples of eight
+ threads of different priorities, using a message queue, semaphore, mutex, event flags group,
+ byte pool, and block pool. */
+
+#include "tx_api.h"
+
+#define DEMO_STACK_SIZE 1024
+#define DEMO_BYTE_POOL_SIZE 9120
+#define DEMO_BLOCK_POOL_SIZE 100
+#define DEMO_QUEUE_SIZE 100
+
+
+/* Define the ThreadX object control blocks... */
+
+TX_THREAD thread_0;
+TX_THREAD thread_1;
+TX_THREAD thread_2;
+TX_THREAD thread_3;
+TX_THREAD thread_4;
+TX_THREAD thread_5;
+TX_THREAD thread_6;
+TX_THREAD thread_7;
+TX_QUEUE queue_0;
+TX_SEMAPHORE semaphore_0;
+TX_MUTEX mutex_0;
+TX_EVENT_FLAGS_GROUP event_flags_0;
+TX_BYTE_POOL byte_pool_0;
+TX_BLOCK_POOL block_pool_0;
+
+
+/* Define the counters used in the demo application... */
+
+ULONG thread_0_counter;
+ULONG thread_1_counter;
+ULONG thread_1_messages_sent;
+ULONG thread_2_counter;
+ULONG thread_2_messages_received;
+ULONG thread_3_counter;
+ULONG thread_4_counter;
+ULONG thread_5_counter;
+ULONG thread_6_counter;
+ULONG thread_7_counter;
+
+
+/* Define thread prototypes. */
+
+void thread_0_entry(ULONG thread_input);
+void thread_1_entry(ULONG thread_input);
+void thread_2_entry(ULONG thread_input);
+void thread_3_and_4_entry(ULONG thread_input);
+void thread_5_entry(ULONG thread_input);
+void thread_6_and_7_entry(ULONG thread_input);
+
+
+/* Define main entry point. */
+
+int main()
+{
+
+ /* Enter the ThreadX kernel. */
+ tx_kernel_enter();
+}
+
+
+/* Define what the initial system looks like. */
+
+void tx_application_define(void *first_unused_memory)
+{
+
+CHAR *pointer = TX_NULL;
+
+
+ /* Create a byte memory pool from which to allocate the thread stacks. */
+ tx_byte_pool_create(&byte_pool_0, "byte pool 0", first_unused_memory, DEMO_BYTE_POOL_SIZE);
+
+ /* Put system definition stuff in here, e.g. thread creates and other assorted
+ create information. */
+
+ /* Allocate the stack for thread 0. */
+ tx_byte_allocate(&byte_pool_0, (VOID **) &pointer, DEMO_STACK_SIZE, TX_NO_WAIT);
+
+ /* Create the main thread. */
+ tx_thread_create(&thread_0, "thread 0", thread_0_entry, 0,
+ pointer, DEMO_STACK_SIZE,
+ 1, 1, TX_NO_TIME_SLICE, TX_AUTO_START);
+
+
+ /* Allocate the stack for thread 1. */
+ tx_byte_allocate(&byte_pool_0, (VOID **) &pointer, DEMO_STACK_SIZE, TX_NO_WAIT);
+
+ /* Create threads 1 and 2. These threads pass information through a ThreadX
+ message queue. It is also interesting to note that these threads have a time
+ slice. */
+ tx_thread_create(&thread_1, "thread 1", thread_1_entry, 1,
+ pointer, DEMO_STACK_SIZE,
+ 16, 16, 4, TX_AUTO_START);
+
+ /* Allocate the stack for thread 2. */
+ tx_byte_allocate(&byte_pool_0, (VOID **) &pointer, DEMO_STACK_SIZE, TX_NO_WAIT);
+
+ tx_thread_create(&thread_2, "thread 2", thread_2_entry, 2,
+ pointer, DEMO_STACK_SIZE,
+ 16, 16, 4, TX_AUTO_START);
+
+ /* Allocate the stack for thread 3. */
+ tx_byte_allocate(&byte_pool_0, (VOID **) &pointer, DEMO_STACK_SIZE, TX_NO_WAIT);
+
+ /* Create threads 3 and 4. These threads compete for a ThreadX counting semaphore.
+ An interesting thing here is that both threads share the same instruction area. */
+ tx_thread_create(&thread_3, "thread 3", thread_3_and_4_entry, 3,
+ pointer, DEMO_STACK_SIZE,
+ 8, 8, TX_NO_TIME_SLICE, TX_AUTO_START);
+
+ /* Allocate the stack for thread 4. */
+ tx_byte_allocate(&byte_pool_0, (VOID **) &pointer, DEMO_STACK_SIZE, TX_NO_WAIT);
+
+ tx_thread_create(&thread_4, "thread 4", thread_3_and_4_entry, 4,
+ pointer, DEMO_STACK_SIZE,
+ 8, 8, TX_NO_TIME_SLICE, TX_AUTO_START);
+
+ /* Allocate the stack for thread 5. */
+ tx_byte_allocate(&byte_pool_0, (VOID **) &pointer, DEMO_STACK_SIZE, TX_NO_WAIT);
+
+ /* Create thread 5. This thread simply pends on an event flag which will be set
+ by thread_0. */
+ tx_thread_create(&thread_5, "thread 5", thread_5_entry, 5,
+ pointer, DEMO_STACK_SIZE,
+ 4, 4, TX_NO_TIME_SLICE, TX_AUTO_START);
+
+ /* Allocate the stack for thread 6. */
+ tx_byte_allocate(&byte_pool_0, (VOID **) &pointer, DEMO_STACK_SIZE, TX_NO_WAIT);
+
+ /* Create threads 6 and 7. These threads compete for a ThreadX mutex. */
+ tx_thread_create(&thread_6, "thread 6", thread_6_and_7_entry, 6,
+ pointer, DEMO_STACK_SIZE,
+ 8, 8, TX_NO_TIME_SLICE, TX_AUTO_START);
+
+ /* Allocate the stack for thread 7. */
+ tx_byte_allocate(&byte_pool_0, (VOID **) &pointer, DEMO_STACK_SIZE, TX_NO_WAIT);
+
+ tx_thread_create(&thread_7, "thread 7", thread_6_and_7_entry, 7,
+ pointer, DEMO_STACK_SIZE,
+ 8, 8, TX_NO_TIME_SLICE, TX_AUTO_START);
+
+ /* Allocate the message queue. */
+ tx_byte_allocate(&byte_pool_0, (VOID **) &pointer, DEMO_QUEUE_SIZE*sizeof(ULONG), TX_NO_WAIT);
+
+ /* Create the message queue shared by threads 1 and 2. */
+ tx_queue_create(&queue_0, "queue 0", TX_1_ULONG, pointer, DEMO_QUEUE_SIZE*sizeof(ULONG));
+
+ /* Create the semaphore used by threads 3 and 4. */
+ tx_semaphore_create(&semaphore_0, "semaphore 0", 1);
+
+ /* Create the event flags group used by threads 1 and 5. */
+ tx_event_flags_create(&event_flags_0, "event flags 0");
+
+ /* Create the mutex used by thread 6 and 7 without priority inheritance. */
+ tx_mutex_create(&mutex_0, "mutex 0", TX_NO_INHERIT);
+
+ /* Allocate the memory for a small block pool. */
+ tx_byte_allocate(&byte_pool_0, (VOID **) &pointer, DEMO_BLOCK_POOL_SIZE, TX_NO_WAIT);
+
+ /* Create a block memory pool to allocate a message buffer from. */
+ tx_block_pool_create(&block_pool_0, "block pool 0", sizeof(ULONG), pointer, DEMO_BLOCK_POOL_SIZE);
+
+ /* Allocate a block and release the block memory. */
+ tx_block_allocate(&block_pool_0, (VOID **) &pointer, TX_NO_WAIT);
+
+ /* Release the block back to the pool. */
+ tx_block_release(pointer);
+}
+
+
+
+/* Define the test threads. */
+
+void thread_0_entry(ULONG thread_input)
+{
+
+UINT status;
+
+
+ /* This thread simply sits in while-forever-sleep loop. */
+ while(1)
+ {
+
+ /* Increment the thread counter. */
+ thread_0_counter++;
+
+ /* Sleep for 10 ticks. */
+ tx_thread_sleep(10);
+
+ /* Set event flag 0 to wakeup thread 5. */
+ status = tx_event_flags_set(&event_flags_0, 0x1, TX_OR);
+
+ /* Check status. */
+ if (status != TX_SUCCESS)
+ break;
+ }
+}
+
+
+void thread_1_entry(ULONG thread_input)
+{
+
+UINT status;
+
+
+ /* This thread simply sends messages to a queue shared by thread 2. */
+ while(1)
+ {
+
+ /* Increment the thread counter. */
+ thread_1_counter++;
+
+ /* Send message to queue 0. */
+ status = tx_queue_send(&queue_0, &thread_1_messages_sent, TX_WAIT_FOREVER);
+
+ /* Check completion status. */
+ if (status != TX_SUCCESS)
+ break;
+
+ /* Increment the message sent. */
+ thread_1_messages_sent++;
+ }
+}
+
+
+void thread_2_entry(ULONG thread_input)
+{
+
+ULONG received_message;
+UINT status;
+
+ /* This thread retrieves messages placed on the queue by thread 1. */
+ while(1)
+ {
+
+ /* Increment the thread counter. */
+ thread_2_counter++;
+
+ /* Retrieve a message from the queue. */
+ status = tx_queue_receive(&queue_0, &received_message, TX_WAIT_FOREVER);
+
+ /* Check completion status and make sure the message is what we
+ expected. */
+ if ((status != TX_SUCCESS) || (received_message != thread_2_messages_received))
+ break;
+
+ /* Otherwise, all is okay. Increment the received message count. */
+ thread_2_messages_received++;
+ }
+}
+
+
+void thread_3_and_4_entry(ULONG thread_input)
+{
+
+UINT status;
+
+
+ /* This function is executed from thread 3 and thread 4. As the loop
+ below shows, these function compete for ownership of semaphore_0. */
+ while(1)
+ {
+
+ /* Increment the thread counter. */
+ if (thread_input == 3)
+ thread_3_counter++;
+ else
+ thread_4_counter++;
+
+ /* Get the semaphore with suspension. */
+ status = tx_semaphore_get(&semaphore_0, TX_WAIT_FOREVER);
+
+ /* Check status. */
+ if (status != TX_SUCCESS)
+ break;
+
+ /* Sleep for 2 ticks to hold the semaphore. */
+ tx_thread_sleep(2);
+
+ /* Release the semaphore. */
+ status = tx_semaphore_put(&semaphore_0);
+
+ /* Check status. */
+ if (status != TX_SUCCESS)
+ break;
+ }
+}
+
+
+void thread_5_entry(ULONG thread_input)
+{
+
+UINT status;
+ULONG actual_flags;
+
+
+ /* This thread simply waits for an event in a forever loop. */
+ while(1)
+ {
+
+ /* Increment the thread counter. */
+ thread_5_counter++;
+
+ /* Wait for event flag 0. */
+ status = tx_event_flags_get(&event_flags_0, 0x1, TX_OR_CLEAR,
+ &actual_flags, TX_WAIT_FOREVER);
+
+ /* Check status. */
+ if ((status != TX_SUCCESS) || (actual_flags != 0x1))
+ break;
+ }
+}
+
+
+void thread_6_and_7_entry(ULONG thread_input)
+{
+
+UINT status;
+
+
+ /* This function is executed from thread 6 and thread 7. As the loop
+ below shows, these function compete for ownership of mutex_0. */
+ while(1)
+ {
+
+ /* Increment the thread counter. */
+ if (thread_input == 6)
+ thread_6_counter++;
+ else
+ thread_7_counter++;
+
+ /* Get the mutex with suspension. */
+ status = tx_mutex_get(&mutex_0, TX_WAIT_FOREVER);
+
+ /* Check status. */
+ if (status != TX_SUCCESS)
+ break;
+
+ /* Get the mutex again with suspension. This shows
+ that an owning thread may retrieve the mutex it
+ owns multiple times. */
+ status = tx_mutex_get(&mutex_0, TX_WAIT_FOREVER);
+
+ /* Check status. */
+ if (status != TX_SUCCESS)
+ break;
+
+ /* Sleep for 2 ticks to hold the mutex. */
+ tx_thread_sleep(2);
+
+ /* Release the mutex. */
+ status = tx_mutex_put(&mutex_0);
+
+ /* Check status. */
+ if (status != TX_SUCCESS)
+ break;
+
+ /* Release the mutex again. This will actually
+ release ownership since it was obtained twice. */
+ status = tx_mutex_put(&mutex_0);
+
+ /* Check status. */
+ if (status != TX_SUCCESS)
+ break;
+ }
+}
diff --git a/ports/cortex_a15/ac6/example_build/sample_threadx/.cproject b/ports/cortex_a15/ac6/example_build/sample_threadx/.cproject
index b9c92970..9a96fceb 100644
--- a/ports/cortex_a15/ac6/example_build/sample_threadx/.cproject
+++ b/ports/cortex_a15/ac6/example_build/sample_threadx/.cproject
@@ -3,9 +3,9 @@
-
+
-
+
@@ -23,41 +23,37 @@
-
+
-
+
-
+
-
+
-
+
-
+
-
+
-
+
-
+
-
+
-
+
-
+
-
+
-
+
-
+
-
-
-
-
-
-
+
+
+
-
+
-
-
-
+
-
+
-
+
-
+
-
+
-
+
-
+
-
+
-
+
-
+
-
+
-
+
-
-
+
-
+
-
+
-
+
+
+
+
+
+
+
@@ -137,7 +139,7 @@
-
+
@@ -168,5 +170,7 @@
+
+
diff --git a/ports/cortex_a15/ac6/example_build/sample_threadx/.project b/ports/cortex_a15/ac6/example_build/sample_threadx/.project
index 2a6b3cb1..ed4c0885 100644
--- a/ports/cortex_a15/ac6/example_build/sample_threadx/.project
+++ b/ports/cortex_a15/ac6/example_build/sample_threadx/.project
@@ -20,7 +20,6 @@
- com.arm.debug.ds.natureorg.eclipse.cdt.core.cnatureorg.eclipse.cdt.managedbuilder.core.managedBuildNatureorg.eclipse.cdt.managedbuilder.core.ScannerConfigNature
diff --git a/ports/cortex_a15/ac6/example_build/sample_threadx/sample_threadx.c b/ports/cortex_a15/ac6/example_build/sample_threadx/sample_threadx.c
index 418ec634..8c61de06 100644
--- a/ports/cortex_a15/ac6/example_build/sample_threadx/sample_threadx.c
+++ b/ports/cortex_a15/ac6/example_build/sample_threadx/sample_threadx.c
@@ -1,5 +1,5 @@
/* This is a small demo of the high-performance ThreadX kernel. It includes examples of eight
- threads of different priorities, using a message queue, semaphore, mutex, event flags group,
+ threads of different priorities, using a message queue, semaphore, mutex, event flags group,
byte pool, and block pool. */
#include "tx_api.h"
@@ -80,42 +80,42 @@ CHAR *pointer = TX_NULL;
tx_byte_allocate(&byte_pool_0, (VOID **) &pointer, DEMO_STACK_SIZE, TX_NO_WAIT);
/* Create the main thread. */
- tx_thread_create(&thread_0, "thread 0", thread_0_entry, 0,
- pointer, DEMO_STACK_SIZE,
+ tx_thread_create(&thread_0, "thread 0", thread_0_entry, 0,
+ pointer, DEMO_STACK_SIZE,
1, 1, TX_NO_TIME_SLICE, TX_AUTO_START);
/* Allocate the stack for thread 1. */
tx_byte_allocate(&byte_pool_0, (VOID **) &pointer, DEMO_STACK_SIZE, TX_NO_WAIT);
- /* Create threads 1 and 2. These threads pass information through a ThreadX
+ /* Create threads 1 and 2. These threads pass information through a ThreadX
message queue. It is also interesting to note that these threads have a time
slice. */
- tx_thread_create(&thread_1, "thread 1", thread_1_entry, 1,
- pointer, DEMO_STACK_SIZE,
+ tx_thread_create(&thread_1, "thread 1", thread_1_entry, 1,
+ pointer, DEMO_STACK_SIZE,
16, 16, 4, TX_AUTO_START);
/* Allocate the stack for thread 2. */
tx_byte_allocate(&byte_pool_0, (VOID **) &pointer, DEMO_STACK_SIZE, TX_NO_WAIT);
- tx_thread_create(&thread_2, "thread 2", thread_2_entry, 2,
- pointer, DEMO_STACK_SIZE,
+ tx_thread_create(&thread_2, "thread 2", thread_2_entry, 2,
+ pointer, DEMO_STACK_SIZE,
16, 16, 4, TX_AUTO_START);
/* Allocate the stack for thread 3. */
tx_byte_allocate(&byte_pool_0, (VOID **) &pointer, DEMO_STACK_SIZE, TX_NO_WAIT);
- /* Create threads 3 and 4. These threads compete for a ThreadX counting semaphore.
+ /* Create threads 3 and 4. These threads compete for a ThreadX counting semaphore.
An interesting thing here is that both threads share the same instruction area. */
- tx_thread_create(&thread_3, "thread 3", thread_3_and_4_entry, 3,
- pointer, DEMO_STACK_SIZE,
+ tx_thread_create(&thread_3, "thread 3", thread_3_and_4_entry, 3,
+ pointer, DEMO_STACK_SIZE,
8, 8, TX_NO_TIME_SLICE, TX_AUTO_START);
/* Allocate the stack for thread 4. */
tx_byte_allocate(&byte_pool_0, (VOID **) &pointer, DEMO_STACK_SIZE, TX_NO_WAIT);
- tx_thread_create(&thread_4, "thread 4", thread_3_and_4_entry, 4,
- pointer, DEMO_STACK_SIZE,
+ tx_thread_create(&thread_4, "thread 4", thread_3_and_4_entry, 4,
+ pointer, DEMO_STACK_SIZE,
8, 8, TX_NO_TIME_SLICE, TX_AUTO_START);
/* Allocate the stack for thread 5. */
@@ -123,23 +123,23 @@ CHAR *pointer = TX_NULL;
/* Create thread 5. This thread simply pends on an event flag which will be set
by thread_0. */
- tx_thread_create(&thread_5, "thread 5", thread_5_entry, 5,
- pointer, DEMO_STACK_SIZE,
+ tx_thread_create(&thread_5, "thread 5", thread_5_entry, 5,
+ pointer, DEMO_STACK_SIZE,
4, 4, TX_NO_TIME_SLICE, TX_AUTO_START);
/* Allocate the stack for thread 6. */
tx_byte_allocate(&byte_pool_0, (VOID **) &pointer, DEMO_STACK_SIZE, TX_NO_WAIT);
/* Create threads 6 and 7. These threads compete for a ThreadX mutex. */
- tx_thread_create(&thread_6, "thread 6", thread_6_and_7_entry, 6,
- pointer, DEMO_STACK_SIZE,
+ tx_thread_create(&thread_6, "thread 6", thread_6_and_7_entry, 6,
+ pointer, DEMO_STACK_SIZE,
8, 8, TX_NO_TIME_SLICE, TX_AUTO_START);
/* Allocate the stack for thread 7. */
tx_byte_allocate(&byte_pool_0, (VOID **) &pointer, DEMO_STACK_SIZE, TX_NO_WAIT);
- tx_thread_create(&thread_7, "thread 7", thread_6_and_7_entry, 7,
- pointer, DEMO_STACK_SIZE,
+ tx_thread_create(&thread_7, "thread 7", thread_6_and_7_entry, 7,
+ pointer, DEMO_STACK_SIZE,
8, 8, TX_NO_TIME_SLICE, TX_AUTO_START);
/* Allocate the message queue. */
@@ -242,11 +242,11 @@ UINT status;
/* Retrieve a message from the queue. */
status = tx_queue_receive(&queue_0, &received_message, TX_WAIT_FOREVER);
- /* Check completion status and make sure the message is what we
+ /* Check completion status and make sure the message is what we
expected. */
if ((status != TX_SUCCESS) || (received_message != thread_2_messages_received))
break;
-
+
/* Otherwise, all is okay. Increment the received message count. */
thread_2_messages_received++;
}
@@ -305,7 +305,7 @@ ULONG actual_flags;
thread_5_counter++;
/* Wait for event flag 0. */
- status = tx_event_flags_get(&event_flags_0, 0x1, TX_OR_CLEAR,
+ status = tx_event_flags_get(&event_flags_0, 0x1, TX_OR_CLEAR,
&actual_flags, TX_WAIT_FOREVER);
/* Check status. */
@@ -358,7 +358,7 @@ UINT status;
if (status != TX_SUCCESS)
break;
- /* Release the mutex again. This will actually
+ /* Release the mutex again. This will actually
release ownership since it was obtained twice. */
status = tx_mutex_put(&mutex_0);
diff --git a/ports/cortex_a15/ac6/example_build/sample_threadx/sample_threadx.launch b/ports/cortex_a15/ac6/example_build/sample_threadx/sample_threadx.launch
new file mode 100644
index 00000000..016eebe6
--- /dev/null
+++ b/ports/cortex_a15/ac6/example_build/sample_threadx/sample_threadx.launch
@@ -0,0 +1,188 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/ports/cortex_a15/ac6/example_build/sample_threadx/sample_threadx.scat b/ports/cortex_a15/ac6/example_build/sample_threadx/sample_threadx.scat
index 8e648890..d23881cd 100644
--- a/ports/cortex_a15/ac6/example_build/sample_threadx/sample_threadx.scat
+++ b/ports/cortex_a15/ac6/example_build/sample_threadx/sample_threadx.scat
@@ -1,47 +1,44 @@
-;**************************************************
-; Copyright (c) 2011 Arm Limited (or its affiliates). All rights reserved.
+;*******************************************************
+; Copyright (c) 2011-2016 Arm Limited (or its affiliates). All rights reserved.
; Use, modification and redistribution of this file is subject to your possession of a
; valid End User License Agreement for the Arm Product of which these examples are part of
; and your compliance with all applicable terms and conditions of such licence agreement.
-;**************************************************
+;*******************************************************
-; Scatter-file for bare-metal example on Versatile Express
+; Scatter-file for ARMv7-A bare-metal example on Versatile Express
-; This scatter-file places application code, data, stack and heap at suitable addresses in the Versatile Express Cortex-A15 Core memory map.
-
-; Versatile Express Cortex-A15 Core has SDRAM at 0x80000000, which this scatter-file uses.
+; This scatter-file places application code, data, stack and heap at suitable addresses in the memory map.
-SDRAM 0x80000000
+SDRAM 0x80000000 0x20000000
{
VECTORS +0
{
* (VECTORS, +FIRST) ; Vector table and other (assembler) startup code
* (InRoot$$Sections) ; All (library) code that must be in a root region
}
-
- RO_CODE +0
- { * (+RO-CODE) } ; Application RO code (.text)
- RO_DATA +0
- { * (+RO-DATA) } ; Application RO data (.constdata)
+ RO_CODE +0
+ { * (+RO-CODE) } ; Application RO code (.text)
- RW_DATA +0
- { * (+RW) } ; Application RW data (.data)
+ RO_DATA +0
+ { * (+RO-DATA) } ; Application RO data (.constdata)
- ZI_DATA +0
- { * (+ZI) } ; Application ZI data (.bss)
+ RW_DATA +0
+ { * (+RW) } ; Application RW data (.data)
- ARM_LIB_HEAP 0x80040000 EMPTY 0x00040000 ; Application heap
- { }
+ ZI_DATA +0
+ { * (+ZI) } ; Application ZI data (.bss)
- ARM_LIB_STACK 0x80090000 EMPTY -0x00010000 ; Application (SVC mode) stack
- { }
+ ARM_LIB_HEAP 0x80040000 EMPTY 0x00040000 ; Application heap
+ { }
- ;IRQ_STACK 0x800A0000 EMPTY -0x00010000 ; IRQ mode stack
- ;{ }
+ ARM_LIB_STACK 0x80090000 EMPTY 0x00010000 ; Application (SVC mode) stack
+ { }
- TTB 0x80100000 EMPTY 0x4000 ; Level-1 Translation Table for MMU
- { }
+; IRQ_STACK 0x800A0000 EMPTY -0x00010000 ; IRQ mode stack
+; { }
+ TTB 0x80100000 EMPTY 0x4000 ; Level-1 Translation Table for MMU
+ { }
}
diff --git a/ports/cortex_a15/ac6/example_build/sample_threadx/startup.S b/ports/cortex_a15/ac6/example_build/sample_threadx/startup.S
index 8bfa1982..670fadb9 100644
--- a/ports/cortex_a15/ac6/example_build/sample_threadx/startup.S
+++ b/ports/cortex_a15/ac6/example_build/sample_threadx/startup.S
@@ -1,31 +1,31 @@
//----------------------------------------------------------------
-// Cortex-A15 Embedded example - Startup Code
+// ARMv7-A Embedded example - Startup Code
//
// Copyright (c) 2005-2018 Arm Limited (or its affiliates). All rights reserved.
// Use, modification and redistribution of this file is subject to your possession of a
-// valid End User License Agreement for the Arm Product of which these examples are part of
+// valid End User License Agreement for the Arm Product of which these examples are part of
// and your compliance with all applicable terms and conditions of such licence agreement.
//----------------------------------------------------------------
-
// Standard definitions of mode bits and interrupt (I & F) flags in PSRs
-#define Mode_USR 0x10
-#define Mode_FIQ 0x11
-#define Mode_IRQ 0x12
-#define Mode_SVC 0x13
-#define Mode_ABT 0x17
-#define Mode_UND 0x1B
-#define Mode_SYS 0x1F
+#define Mode_USR 0x10
+#define Mode_FIQ 0x11
+#define Mode_IRQ 0x12
+#define Mode_SVC 0x13
+#define Mode_ABT 0x17
+#define Mode_UND 0x1B
+#define Mode_SYS 0x1F
-#define I_Bit 0x80 // When I bit is set, IRQ is disabled
-#define F_Bit 0x40 // When F bit is set, FIQ is disabled
+#define I_Bit 0x80 // When I bit is set, IRQ is disabled
+#define F_Bit 0x40 // When F bit is set, FIQ is disabled
.section VECTORS, "ax"
.align 3
.cfi_sections .debug_frame // put stack frame info into .debug_frame instead of .eh_frame
+
//----------------------------------------------------------------
// Entry point for the Reset handler
//----------------------------------------------------------------
@@ -48,29 +48,23 @@ Vectors:
LDR PC, IRQ_Addr
LDR PC, FIQ_Addr
+
.balign 4
Reset_Addr:
.word Reset_Handler
Undefined_Addr:
- //.word Undefined_Handler
.word __tx_undefined
SVC_Addr:
- //.word SVC_Handler
.word __tx_swi_interrupt
Prefetch_Addr:
- //.word Prefetch_Handler
.word __tx_prefetch_handler
Abort_Addr:
- //.word Abort_Handler
.word __tx_abort_handler
Hypervisor_Addr:
- //.word Hypervisor_Handler
.word __tx_reserved_handler
IRQ_Addr:
- //.word IRQ_Handler
.word __tx_irq_handler
FIQ_Addr:
- //.word FIQ_Handler
.word __tx_fiq_handler
@@ -100,26 +94,24 @@ FIQ_Handler:
Reset_Handler:
//----------------------------------------------------------------
-// Disable caches, MMU and branch prediction in case they were left enabled from an earlier run
+// Disable caches and MMU in case they were left enabled from an earlier run
// This does not need to be done from a cold reset
//----------------------------------------------------------------
- MRC p15, 0, r0, c1, c0, 0 // Read System Control Register
- BIC r0, r0, #(0x1 << 12) // Clear I bit 12 to disable I Cache
- BIC r0, r0, #(0x1 << 2) // Clear C bit 2 to disable D Cache
- BIC r0, r0, #0x1 // Clear M bit 0 to disable MMU
- BIC r0, r0, #(0x1 << 11) // Clear Z bit 11 to disable branch prediction
- MCR p15, 0, r0, c1, c0, 0 // Write System Control Register
- ISB
+ MRC p15, 0, r0, c1, c0, 0 // Read System Control Register
+ BIC r0, r0, #(0x1 << 12) // Clear I bit 12 to disable I Cache
+ BIC r0, r0, #(0x1 << 2) // Clear C bit 2 to disable D Cache
+ BIC r0, r0, #0x1 // Clear M bit 0 to disable MMU
+ BIC r0, r0, #(0x1 << 11) // Clear Z bit 11 to disable branch prediction
+ MCR p15, 0, r0, c1, c0, 0 // Write System Control Register
+ ISB
-// The MMU is enabled later, before calling main(). Caches and branch prediction are enabled inside main(),
+// The MMU is enabled later, before calling main(). Caches are enabled inside main(),
// after the MMU has been enabled and scatterloading has been performed.
//----------------------------------------------------------------
-// ACTLR.SMP bit must be set before the caches and MMU are enabled,
-// or any cache and TLB maintenance operations are performed, even for "AMP" CPUs.
-// In the Cortex-A15 processor, the L1 data cache and L2 cache are always coherent,
-// for shared or non-shared data, regardless of the value of the SMP bit.
+// ACTLR.SMP bit must be set before the caches and MMU are enabled,
+// or any cache and TLB maintenance operations are performed, even for single-core
//----------------------------------------------------------------
MRC p15, 0, r0, c1, c0, 1 // Read ACTLR
ORR r0, r0, #(1 << 6) // Set ACTLR.SMP bit
@@ -127,7 +119,7 @@ Reset_Handler:
ISB
//----------------------------------------------------------------
-// Invalidate Data and Instruction TLBs and branch predictor in case they were left enabled from an earlier run
+// Invalidate Data and Instruction TLBs and branch predictor
// This does not need to be done from a cold reset
//----------------------------------------------------------------
@@ -170,8 +162,8 @@ notA15r0p0:
MCR p15, 0, r0, c12, c0, 0
//----------------------------------------------------------------
-// Cache Invalidation code for Cortex-A15
-// NOTE: Neither Caches, nor MMU, nor BTB need post-reset invalidation on Cortex-A15,
+// Cache Invalidation code for ARMv7-A
+// The caches, MMU and BTB do not need post-reset invalidation on Cortex-A7,
// but forcing a cache invalidation makes the code more portable to other CPUs (e.g. Cortex-A9)
//----------------------------------------------------------------
@@ -190,7 +182,8 @@ notA15r0p0:
BEQ Finished // If 0, no need to clean
MOV r10, #0 // R10 holds current cache level << 1
-Loop1: ADD r2, r10, r10, LSR #1 // R2 holds cache "Set" position
+Loop1:
+ ADD r2, r10, r10, LSR #1 // R2 holds cache "Set" position
MOV r1, r0, LSR r2 // Bottom 3 bits are the Cache-type for this level
AND r1, r1, #7 // Isolate those lower 3 bits
CMP r1, #2
@@ -207,16 +200,19 @@ Loop1: ADD r2, r10, r10, LSR #1 // R2 holds cache "Set" position
LDR r7, =0x7FFF
ANDS r7, r7, r1, LSR #13 // R7 is the max number of the index size (right aligned)
-Loop2: MOV r9, r4 // R9 working copy of the max way size (right aligned)
+Loop2:
+ MOV r9, r4 // R9 working copy of the max way size (right aligned)
-Loop3: ORR r11, r10, r9, LSL r5 // Factor in the Way number and cache number into R11
+Loop3:
+ ORR r11, r10, r9, LSL r5 // Factor in the Way number and cache number into R11
ORR r11, r11, r7, LSL r2 // Factor in the Set number
MCR p15, 0, r11, c7, c6, 2 // Invalidate by Set/Way
SUBS r9, r9, #1 // Decrement the Way number
BGE Loop3
SUBS r7, r7, #1 // Decrement the Set number
BGE Loop2
-Skip: ADD r10, r10, #2 // Increment the cache number
+Skip:
+ ADD r10, r10, #2 // Increment the cache number
CMP r3, r10
BGT Loop1
@@ -237,11 +233,13 @@ Finished:
// write the address of our page table base to TTB register 0
LDR r0,=Image$$TTB$$ZI$$Base
+
MOV r1, #0x08 // RGN=b01 (outer cacheable write-back cached, write allocate)
// S=0 (translation table walk to non-shared memory)
ORR r1,r1,#0x40 // IRGN=b01 (inner cacheability for the translation table walk is Write-back Write-allocate)
ORR r0,r0,r1
+
MCR p15, 0, r0, c2, c0, 0
@@ -259,7 +257,7 @@ Finished:
// Bits[31:20] - Top 12 bits of VA is pointer into table
// nG[17]=0 - Non global, enables matching against ASID in the TLB when set.
// S[16]=0 - Indicates normal memory is shared when set.
-// AP2[15]=0
+// AP2[15]=0
// AP[11:10]=11 - Configure for full read/write access in all modes
// TEX[14:12]=000
// CB[3:2]= 00 - Set attributes to Strongly-ordered memory.
@@ -269,32 +267,32 @@ Finished:
// XN[4]=1 - Execute never on Strongly-ordered memory
// Bits[1:0]=10 - Indicate entry is a 1MB section
//----------------------------------------------------------------
- LDR r0, =Image$$TTB$$ZI$$Base
+ LDR r0,=Image$$TTB$$ZI$$Base
LDR r1,=0xfff // loop counter
- LDR r2,=3554
+ LDR r2,=0b00000000000000000000110111100010
// r0 contains the address of the translation table base
// r1 is loop counter
// r2 is level1 descriptor (bits 19:0)
// use loop counter to create 4096 individual table entries.
- // this writes from address 'Image$$TTB$$ZI$$Base' +
+ // this writes from address 'Image$$TTB$$ZI$$Base' +
// offset 0x3FFC down to offset 0x0 in word steps (4 bytes)
init_ttb_1:
ORR r3, r2, r1, LSL#20 // R3 now contains full level1 descriptor to write
- ORR r3, r3, #16 // Set XN bit
+ ORR r3, r3, #0b0000000010000 // Set XN bit
STR r3, [r0, r1, LSL#2] // Str table entry at TTB base + loopcount*4
SUBS r1, r1, #1 // Decrement loop counter
BPL init_ttb_1
- // In this example, the 1MB section based at 'Image$$VECTORS$$Base' is setup specially as cacheable (write back mode).
+ // In this example, the 1MB section based at '__code_start' is setup specially as cacheable (write back mode).
// TEX[14:12]=001 and CB[3:2]= 11, Outer and inner write back, write allocate normal memory.
- LDR r1, =Image$$VECTORS$$Base // Base physical address of code segment
+ LDR r1,=Image$$VECTORS$$Base // Base physical address of code segment
LSR r1, #20 // Shift right to align to 1MB boundaries
ORR r3, r2, r1, LSL#20 // Setup the initial level1 descriptor again
- ORR r3, r3, #12 // Set CB bits
- ORR r3, r3, #4096 // Set TEX bit 12
+ ORR r3, r3, #0b0000000001100 // Set CB bits
+ ORR r3, r3, #0b1000000000000 // Set TEX bit 12
STR r3, [r0, r1, LSL#2] // str table entry
//----------------------------------------------------------------
@@ -306,7 +304,6 @@ init_ttb_1:
MCR p15, 0, r0, c3, c0, 0 // Write Domain Access Control Register
#if defined(__ARM_NEON) || defined(__ARM_FP)
-
//----------------------------------------------------------------
// Enable access to NEON/VFP by enabling access to Coprocessors 10 and 11.
// Enables Full Access i.e. in both privileged and non privileged modes
@@ -325,15 +322,14 @@ init_ttb_1:
VMSR FPEXC, r0 // Write FPEXC register, EN bit set
#endif
-
+
//----------------------------------------------------------------
// Enable MMU and branch to __main
// Leaving the caches disabled until after scatter loading.
//----------------------------------------------------------------
- LDR r12,=__main // Save this in register for possible long jump
+ LDR r12,=__main
-#if 0
MRC p15, 0, r0, c1, c0, 0 // Read System Control Register
BIC r0, r0, #(0x1 << 12) // Clear I bit 12 to disable I Cache
BIC r0, r0, #(0x1 << 2) // Clear C bit 2 to disable D Cache
@@ -341,7 +337,6 @@ init_ttb_1:
ORR r0, r0, #0x1 // Set M bit 0 to enable MMU before scatter loading
MCR p15, 0, r0, c1, c0, 0 // Write System Control Register
ISB
-#endif
// Now the MMU is enabled, virtual to physical address translations will occur. This will affect the next
// instruction fetch.
@@ -350,7 +345,7 @@ init_ttb_1:
// The branch to __main is safe because the Virtual Address (VA) is the same as the Physical Address (PA)
// (flat mapping) of this code that enables the MMU and performs the branch
- BX r12 // Branch to __main C library entry point
+ BX r12 // Branch to __main C library entry point
@@ -378,7 +373,25 @@ enable_caches:
MCR p15, 0, r0, c1, c0, 0 // Write System Control Register
ISB
- BX lr
+ MRC p15, 0, r0, c1, c0, 1 // Read Auxiliary Control Register
+ ORR r0, #2 // L2EN bit, enable L2 cache
+ ORR r0, r0, #(0x1 << 2) // Set DP bit 2 to enable L1 Dside prefetch
+ MCR p15, 0, r0, c1, c0, 1 // Write Auxiliary Control Register
+ ISB
+ BX lr
.cfi_endproc
+ .global disable_caches
+ .type disable_caches, "function"
+disable_caches:
+
+ MRC p15, 0, r0, c1, c0, 0 // Read System Control Register
+ BIC r0, r0, #(0x1 << 12) // Clear I bit 12 to disable I Cache
+ BIC r0, r0, #(0x1 << 2) // Clear C bit 2 to disable D Cache
+ MCR p15, 0, r0, c1, c0, 0 // Write System Control Register
+ ISB
+
+ BX lr
+
+
diff --git a/ports/cortex_a15/ac6/example_build/sample_threadx/tx_initialize_low_level.S b/ports/cortex_a15/ac6/example_build/sample_threadx/tx_initialize_low_level.S
index 7ea1cf8c..715958f0 100644
--- a/ports/cortex_a15/ac6/example_build/sample_threadx/tx_initialize_low_level.S
+++ b/ports/cortex_a15/ac6/example_build/sample_threadx/tx_initialize_low_level.S
@@ -1,345 +1,299 @@
-@/**************************************************************************/
-@/* */
-@/* Copyright (c) Microsoft Corporation. All rights reserved. */
-@/* */
-@/* This software is licensed under the Microsoft Software License */
-@/* Terms for Microsoft Azure RTOS. Full text of the license can be */
-@/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
-@/* and in the root directory of this software. */
-@/* */
-@/**************************************************************************/
-@
-@
-@/**************************************************************************/
-@/**************************************************************************/
-@/** */
-@/** ThreadX Component */
-@/** */
-@/** Initialize */
-@/** */
-@/**************************************************************************/
-@/**************************************************************************/
-@
-@
-@#define TX_SOURCE_CODE
-@
-@
-@/* Include necessary system files. */
-@
-@#include "tx_api.h"
-@#include "tx_initialize.h"
-@#include "tx_thread.h"
-@#include "tx_timer.h"
+/**************************************************************************/
+/* */
+/* Copyright (c) Microsoft Corporation. All rights reserved. */
+/* */
+/* This software is licensed under the Microsoft Software License */
+/* Terms for Microsoft Azure RTOS. Full text of the license can be */
+/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
+/* and in the root directory of this software. */
+/* */
+/**************************************************************************/
+
+
+/**************************************************************************/
+/**************************************************************************/
+/** */
+/** ThreadX Component */
+/** */
+/** Initialize */
+/** */
+/**************************************************************************/
+/**************************************************************************/
.arm
-SVC_MODE = 0xD3 @ Disable IRQ/FIQ SVC mode
-IRQ_MODE = 0xD2 @ Disable IRQ/FIQ IRQ mode
-FIQ_MODE = 0xD1 @ Disable IRQ/FIQ FIQ mode
-SYS_MODE = 0xDF @ Disable IRQ/FIQ SYS mode
-FIQ_STACK_SIZE = 512 @ FIQ stack size
-IRQ_STACK_SIZE = 1024 @ IRQ stack size
-SYS_STACK_SIZE = 1024 @ System stack size
-@
-@
+SVC_MODE = 0xD3 // Disable IRQ/FIQ SVC mode
+IRQ_MODE = 0xD2 // Disable IRQ/FIQ IRQ mode
+FIQ_MODE = 0xD1 // Disable IRQ/FIQ FIQ mode
+SYS_MODE = 0xDF // Disable IRQ/FIQ SYS mode
+FIQ_STACK_SIZE = 512 // FIQ stack size
+IRQ_STACK_SIZE = 1024 // IRQ stack size
+SYS_STACK_SIZE = 1024 // System stack size
+
.global _tx_thread_system_stack_ptr
.global _tx_initialize_unused_memory
.global _tx_thread_context_save
.global _tx_thread_context_restore
.global _tx_timer_interrupt
-@
-@
-@/* Define the 16-bit Thumb mode veneer for _tx_initialize_low_level for
-@ applications calling this function from to 16-bit Thumb mode. */
-@
+
+
+/* Define the 16-bit Thumb mode veneer for _tx_initialize_low_level for
+ applications calling this function from to 16-bit Thumb mode. */
+
.text
.align 2
.thumb
.global $_tx_initialize_low_level
.type $_tx_initialize_low_level,function
$_tx_initialize_low_level:
- BX pc @ Switch to 32-bit mode
- NOP @
+ BX pc // Switch to 32-bit mode
+ NOP //
.arm
- STMFD sp!, {lr} @ Save return address
- BL _tx_initialize_low_level @ Call _tx_initialize_low_level function
- LDMFD sp!, {lr} @ Recover saved return address
- BX lr @ Return to 16-bit caller
-@
-@
+ STMFD sp!, {lr} // Save return address
+ BL _tx_initialize_low_level // Call _tx_initialize_low_level function
+ LDMFD sp!, {lr} // Recover saved return address
+ BX lr // Return to 16-bit caller
+
+
.text
.align 2
-@/**************************************************************************/
-@/* */
-@/* FUNCTION RELEASE */
-@/* */
-@/* _tx_initialize_low_level Cortex-A15/AC6 */
-@/* 6.1 */
-@/* AUTHOR */
-@/* */
-@/* William E. Lamie, Microsoft Corporation */
-@/* */
-@/* DESCRIPTION */
-@/* */
-@/* This function is responsible for any low-level processor */
-@/* initialization, including setting up interrupt vectors, setting */
-@/* up a periodic timer interrupt source, saving the system stack */
-@/* pointer for use in ISR processing later, and finding the first */
-@/* available RAM memory address for tx_application_define. */
-@/* */
-@/* INPUT */
-@/* */
-@/* None */
-@/* */
-@/* OUTPUT */
-@/* */
-@/* None */
-@/* */
-@/* CALLS */
-@/* */
-@/* None */
-@/* */
-@/* CALLED BY */
-@/* */
-@/* _tx_initialize_kernel_enter ThreadX entry function */
-@/* */
-@/* RELEASE HISTORY */
-@/* */
-@/* DATE NAME DESCRIPTION */
-@/* */
-@/* 09-30-2020 William E. Lamie Initial Version 6.1 */
-@/* */
-@/**************************************************************************/
-@VOID _tx_initialize_low_level(VOID)
-@{
+/**************************************************************************/
+/* */
+/* FUNCTION RELEASE */
+/* */
+/* _tx_initialize_low_level ARMV7-A */
+/* 6.1.11 */
+/* AUTHOR */
+/* */
+/* William E. Lamie, Microsoft Corporation */
+/* */
+/* DESCRIPTION */
+/* */
+/* This function is responsible for any low-level processor */
+/* initialization, including setting up interrupt vectors, setting */
+/* up a periodic timer interrupt source, saving the system stack */
+/* pointer for use in ISR processing later, and finding the first */
+/* available RAM memory address for tx_application_define. */
+/* */
+/* INPUT */
+/* */
+/* None */
+/* */
+/* OUTPUT */
+/* */
+/* None */
+/* */
+/* CALLS */
+/* */
+/* None */
+/* */
+/* CALLED BY */
+/* */
+/* _tx_initialize_kernel_enter ThreadX entry function */
+/* */
+/* RELEASE HISTORY */
+/* */
+/* DATE NAME DESCRIPTION */
+/* */
+/* 09-30-2020 William E. Lamie Initial Version 6.1 */
+/* 04-25-2022 Zhen Kong Updated comments, */
+/* resulting in version 6.1.11 */
+/* */
+/**************************************************************************/
.global _tx_initialize_low_level
.type _tx_initialize_low_level,function
_tx_initialize_low_level:
-@
-@ /* We must be in SVC mode at this point! */
-@
-@ /* Setup various stack pointers. */
-@
- LDR r1, =Image$$ARM_LIB_STACK$$ZI$$Limit @ Get pointer to stack area
-#ifdef TX_ENABLE_IRQ_NESTING
-@
-@ /* Setup the system mode stack for nested interrupt support */
-@
- LDR r2, =SYS_STACK_SIZE @ Pickup stack size
- MOV r3, #SYS_MODE @ Build SYS mode CPSR
- MSR CPSR_c, r3 @ Enter SYS mode
- SUB r1, r1, #1 @ Backup 1 byte
- BIC r1, r1, #7 @ Ensure 8-byte alignment
- MOV sp, r1 @ Setup SYS stack pointer
- SUB r1, r1, r2 @ Calculate start of next stack
+ /* We must be in SVC mode at this point! */
+
+ /* Setup various stack pointers. */
+
+ LDR r1, =Image$$ARM_LIB_STACK$$ZI$$Limit // Get pointer to stack area
+
+#ifdef TX_ENABLE_IRQ_NESTING
+
+ /* Setup the system mode stack for nested interrupt support */
+
+ LDR r2, =SYS_STACK_SIZE // Pickup stack size
+ MOV r3, #SYS_MODE // Build SYS mode CPSR
+ MSR CPSR_c, r3 // Enter SYS mode
+ SUB r1, r1, #1 // Backup 1 byte
+ BIC r1, r1, #7 // Ensure 8-byte alignment
+ MOV sp, r1 // Setup SYS stack pointer
+ SUB r1, r1, r2 // Calculate start of next stack
#endif
- LDR r2, =FIQ_STACK_SIZE @ Pickup stack size
- MOV r0, #FIQ_MODE @ Build FIQ mode CPSR
- MSR CPSR, r0 @ Enter FIQ mode
- SUB r1, r1, #1 @ Backup 1 byte
- BIC r1, r1, #7 @ Ensure 8-byte alignment
- MOV sp, r1 @ Setup FIQ stack pointer
- SUB r1, r1, r2 @ Calculate start of next stack
- LDR r2, =IRQ_STACK_SIZE @ Pickup IRQ stack size
- MOV r0, #IRQ_MODE @ Build IRQ mode CPSR
- MSR CPSR, r0 @ Enter IRQ mode
- SUB r1, r1, #1 @ Backup 1 byte
- BIC r1, r1, #7 @ Ensure 8-byte alignment
- MOV sp, r1 @ Setup IRQ stack pointer
- SUB r3, r1, r2 @ Calculate end of IRQ stack
- MOV r0, #SVC_MODE @ Build SVC mode CPSR
- MSR CPSR, r0 @ Enter SVC mode
- LDR r2, =Image$$ARM_LIB_STACK$$Base @ Pickup stack bottom
- CMP r3, r2 @ Compare the current stack end with the bottom
-_stack_error_loop:
- BLT _stack_error_loop @ If the IRQ stack exceeds the stack bottom, just sit here!
-@
-@ /* Save the system stack pointer. */
-@ _tx_thread_system_stack_ptr = (VOID_PTR) (sp);
-@
- LDR r2, =_tx_thread_system_stack_ptr @ Pickup stack pointer
- STR r1, [r2] @ Save the system stack
-@
-@ /* Save the first available memory address. */
-@ _tx_initialize_unused_memory = (VOID_PTR) _end;
-@
- LDR r1, =Image$$ZI_DATA$$ZI$$Limit @ Get end of non-initialized RAM area
- LDR r2, =_tx_initialize_unused_memory @ Pickup unused memory ptr address
- ADD r1, r1, #8 @ Increment to next free word
- STR r1, [r2] @ Save first free memory address
-@
-@ /* Setup Timer for periodic interrupts. */
-@
-@ /* Done, return to caller. */
-@
+ LDR r2, =FIQ_STACK_SIZE // Pickup stack size
+ MOV r0, #FIQ_MODE // Build FIQ mode CPSR
+ MSR CPSR, r0 // Enter FIQ mode
+ SUB r1, r1, #1 // Backup 1 byte
+ BIC r1, r1, #7 // Ensure 8-byte alignment
+ MOV sp, r1 // Setup FIQ stack pointer
+ SUB r1, r1, r2 // Calculate start of next stack
+ LDR r2, =IRQ_STACK_SIZE // Pickup IRQ stack size
+ MOV r0, #IRQ_MODE // Build IRQ mode CPSR
+ MSR CPSR, r0 // Enter IRQ mode
+ SUB r1, r1, #1 // Backup 1 byte
+ BIC r1, r1, #7 // Ensure 8-byte alignment
+ MOV sp, r1 // Setup IRQ stack pointer
+ SUB r3, r1, r2 // Calculate end of IRQ stack
+ MOV r0, #SVC_MODE // Build SVC mode CPSR
+ MSR CPSR, r0 // Enter SVC mode
+ LDR r2, =Image$$ARM_LIB_STACK$$Base // Pickup stack bottom
+ CMP r3, r2 // Compare the current stack end with the bottom
+_stack_error_loop:
+ BLT _stack_error_loop // If the IRQ stack exceeds the stack bottom, just sit here!
+
+ LDR r2, =_tx_thread_system_stack_ptr // Pickup stack pointer
+ STR r1, [r2] // Save the system stack
+
+ LDR r1, =Image$$ZI_DATA$$ZI$$Limit // Get end of non-initialized RAM area
+ LDR r2, =_tx_initialize_unused_memory // Pickup unused memory ptr address
+ ADD r1, r1, #8 // Increment to next free word
+ STR r1, [r2] // Save first free memory address
+
#ifdef __THUMB_INTERWORK
- BX lr @ Return to caller
+ BX lr // Return to caller
#else
- MOV pc, lr @ Return to caller
+ MOV pc, lr // Return to caller
#endif
-@}
-@
-@
-@/* Define shells for each of the interrupt vectors. */
-@
+
+/* Define shells for each of the interrupt vectors. */
+
.global __tx_undefined
__tx_undefined:
- B __tx_undefined @ Undefined handler
-@
+ B __tx_undefined // Undefined handler
+
.global __tx_swi_interrupt
__tx_swi_interrupt:
- B __tx_swi_interrupt @ Software interrupt handler
-@
+ B __tx_swi_interrupt // Software interrupt handler
+
.global __tx_prefetch_handler
__tx_prefetch_handler:
- B __tx_prefetch_handler @ Prefetch exception handler
-@
+ B __tx_prefetch_handler // Prefetch exception handler
+
.global __tx_abort_handler
__tx_abort_handler:
- B __tx_abort_handler @ Abort exception handler
-@
+ B __tx_abort_handler // Abort exception handler
+
.global __tx_reserved_handler
__tx_reserved_handler:
- B __tx_reserved_handler @ Reserved exception handler
-@
- .global __tx_irq_processing_return
+ B __tx_reserved_handler // Reserved exception handler
+
+ .global __tx_irq_processing_return
.type __tx_irq_processing_return,function
- .global __tx_irq_handler
+ .global __tx_irq_handler
__tx_irq_handler:
-@
-@ /* Jump to context save to save system context. */
+
+ /* Jump to context save to save system context. */
B _tx_thread_context_save
__tx_irq_processing_return:
-@
-@ /* At this point execution is still in the IRQ mode. The CPSR, point of
-@ interrupt, and all C scratch registers are available for use. In
-@ addition, IRQ interrupts may be re-enabled - with certain restrictions -
-@ if nested IRQ interrupts are desired. Interrupts may be re-enabled over
-@ small code sequences where lr is saved before enabling interrupts and
-@ restored after interrupts are again disabled. */
-@
-@ /* Interrupt nesting is allowed after calling _tx_thread_irq_nesting_start
-@ from IRQ mode with interrupts disabled. This routine switches to the
-@ system mode and returns with IRQ interrupts enabled.
-@
-@ NOTE: It is very important to ensure all IRQ interrupts are cleared
-@ prior to enabling nested IRQ interrupts. */
+
+ /* At this point execution is still in the IRQ mode. The CPSR, point of
+ interrupt, and all C scratch registers are available for use. In
+ addition, IRQ interrupts may be re-enabled - with certain restrictions -
+ if nested IRQ interrupts are desired. Interrupts may be re-enabled over
+ small code sequences where lr is saved before enabling interrupts and
+ restored after interrupts are again disabled. */
+
+ /* Interrupt nesting is allowed after calling _tx_thread_irq_nesting_start
+ from IRQ mode with interrupts disabled. This routine switches to the
+ system mode and returns with IRQ interrupts enabled.
+
+ NOTE: It is very important to ensure all IRQ interrupts are cleared
+ prior to enabling nested IRQ interrupts. */
#ifdef TX_ENABLE_IRQ_NESTING
BL _tx_thread_irq_nesting_start
#endif
-@
-@ /* For debug purpose, execute the timer interrupt processing here. In
-@ a real system, some kind of status indication would have to be checked
-@ before the timer interrupt handler could be called. */
-@
- BL _tx_timer_interrupt @ Timer interrupt handler
-@
-@
-@ /* If interrupt nesting was started earlier, the end of interrupt nesting
-@ service must be called before returning to _tx_thread_context_restore.
-@ This routine returns in processing in IRQ mode with interrupts disabled. */
+
+ /* For debug purpose, execute the timer interrupt processing here. In
+ a real system, some kind of status indication would have to be checked
+ before the timer interrupt handler could be called. */
+
+ BL _tx_timer_interrupt // Timer interrupt handler
+
+
+ /* If interrupt nesting was started earlier, the end of interrupt nesting
+ service must be called before returning to _tx_thread_context_restore.
+ This routine returns in processing in IRQ mode with interrupts disabled. */
#ifdef TX_ENABLE_IRQ_NESTING
BL _tx_thread_irq_nesting_end
#endif
-@
-@ /* Jump to context restore to restore system context. */
+
+ /* Jump to context restore to restore system context. */
B _tx_thread_context_restore
-@
-@
-@ /* This is an example of a vectored IRQ handler. */
-@
-@ .global __tx_example_vectored_irq_handler
-@__tx_example_vectored_irq_handler:
-@
-@
-@ /* Save initial context and call context save to prepare for
-@ vectored ISR execution. */
-@
-@ STMDB sp!, {r0-r3} @ Save some scratch registers
-@ MRS r0, SPSR @ Pickup saved SPSR
-@ SUB lr, lr, #4 @ Adjust point of interrupt
-@ STMDB sp!, {r0, r10, r12, lr} @ Store other scratch registers
-@ BL _tx_thread_vectored_context_save @ Vectored context save
-@
-@ /* At this point execution is still in the IRQ mode. The CPSR, point of
-@ interrupt, and all C scratch registers are available for use. In
-@ addition, IRQ interrupts may be re-enabled - with certain restrictions -
-@ if nested IRQ interrupts are desired. Interrupts may be re-enabled over
-@ small code sequences where lr is saved before enabling interrupts and
-@ restored after interrupts are again disabled. */
-@
-@
-@ /* Interrupt nesting is allowed after calling _tx_thread_irq_nesting_start
-@ from IRQ mode with interrupts disabled. This routine switches to the
-@ system mode and returns with IRQ interrupts enabled.
-@
-@ NOTE: It is very important to ensure all IRQ interrupts are cleared
-@ prior to enabling nested IRQ interrupts. */
-@#ifdef TX_ENABLE_IRQ_NESTING
-@ BL _tx_thread_irq_nesting_start
-@#endif
-@
-@ /* Application IRQ handlers can be called here! */
-@
-@ /* If interrupt nesting was started earlier, the end of interrupt nesting
-@ service must be called before returning to _tx_thread_context_restore.
-@ This routine returns in processing in IRQ mode with interrupts disabled. */
-@#ifdef TX_ENABLE_IRQ_NESTING
-@ BL _tx_thread_irq_nesting_end
-@#endif
-@
-@ /* Jump to context restore to restore system context. */
-@ B _tx_thread_context_restore
-@
-@
+
+
+ /* This is an example of a vectored IRQ handler. */
+
+ /* At this point execution is still in the IRQ mode. The CPSR, point of
+ interrupt, and all C scratch registers are available for use. In
+ addition, IRQ interrupts may be re-enabled - with certain restrictions -
+ if nested IRQ interrupts are desired. Interrupts may be re-enabled over
+ small code sequences where lr is saved before enabling interrupts and
+ restored after interrupts are again disabled. */
+
+
+ /* Interrupt nesting is allowed after calling _tx_thread_irq_nesting_start
+ from IRQ mode with interrupts disabled. This routine switches to the
+ system mode and returns with IRQ interrupts enabled.
+
+ NOTE: It is very important to ensure all IRQ interrupts are cleared
+ prior to enabling nested IRQ interrupts. */
+
+ /* Application IRQ handlers can be called here! */
+
+ /* If interrupt nesting was started earlier, the end of interrupt nesting
+ service must be called before returning to _tx_thread_context_restore.
+ This routine returns in processing in IRQ mode with interrupts disabled. */
+
+ /* Jump to context restore to restore system context. */
+
#ifdef TX_ENABLE_FIQ_SUPPORT
.global __tx_fiq_handler
.global __tx_fiq_processing_return
__tx_fiq_handler:
-@
-@ /* Jump to fiq context save to save system context. */
+
+ /* Jump to fiq context save to save system context. */
B _tx_thread_fiq_context_save
__tx_fiq_processing_return:
-@
-@ /* At this point execution is still in the FIQ mode. The CPSR, point of
-@ interrupt, and all C scratch registers are available for use. */
-@
-@ /* Interrupt nesting is allowed after calling _tx_thread_fiq_nesting_start
-@ from FIQ mode with interrupts disabled. This routine switches to the
-@ system mode and returns with FIQ interrupts enabled.
-@
-@ NOTE: It is very important to ensure all FIQ interrupts are cleared
-@ prior to enabling nested FIQ interrupts. */
+
+ /* At this point execution is still in the FIQ mode. The CPSR, point of
+ interrupt, and all C scratch registers are available for use. */
+
+ /* Interrupt nesting is allowed after calling _tx_thread_fiq_nesting_start
+ from FIQ mode with interrupts disabled. This routine switches to the
+ system mode and returns with FIQ interrupts enabled.
+
+ NOTE: It is very important to ensure all FIQ interrupts are cleared
+ prior to enabling nested FIQ interrupts. */
#ifdef TX_ENABLE_FIQ_NESTING
BL _tx_thread_fiq_nesting_start
#endif
-@
-@ /* Application FIQ handlers can be called here! */
-@
-@ /* If interrupt nesting was started earlier, the end of interrupt nesting
-@ service must be called before returning to _tx_thread_fiq_context_restore. */
+
+ /* Application FIQ handlers can be called here! */
+
+ /* If interrupt nesting was started earlier, the end of interrupt nesting
+ service must be called before returning to _tx_thread_fiq_context_restore. */
#ifdef TX_ENABLE_FIQ_NESTING
BL _tx_thread_fiq_nesting_end
#endif
-@
-@ /* Jump to fiq context restore to restore system context. */
+
+ /* Jump to fiq context restore to restore system context. */
B _tx_thread_fiq_context_restore
-@
-@
+
+
#else
.global __tx_fiq_handler
__tx_fiq_handler:
- B __tx_fiq_handler @ FIQ interrupt handler
+ B __tx_fiq_handler // FIQ interrupt handler
#endif
-@
-@
+
+
BUILD_OPTIONS:
- .word _tx_build_options @ Reference to bring in
+ .word _tx_build_options // Reference to bring in
VERSION_ID:
- .word _tx_version_id @ Reference to bring in
-
+ .word _tx_version_id // Reference to bring in
diff --git a/ports/cortex_a15/ac6/example_build/tx/.cproject b/ports/cortex_a15/ac6/example_build/tx/.cproject
index 45b4676d..e8ca6dd7 100644
--- a/ports/cortex_a15/ac6/example_build/tx/.cproject
+++ b/ports/cortex_a15/ac6/example_build/tx/.cproject
@@ -3,9 +3,9 @@
-
+
-
+
@@ -23,37 +23,37 @@
-
+
-
+
-
+
-
+
-
+
-
+
-
+
-
+
-
+
-
+
-
+
-
+
-
+
-
+
-
+
-
-
+
-
+
-
+
-
+
-
+
-
+
-
+
-
+
-
+
-
+
-
+
-
+
-
+
-
+
@@ -111,7 +111,7 @@
-
+
@@ -123,6 +123,10 @@
+
+
+
+
@@ -138,9 +142,5 @@
-
-
-
-
diff --git a/ports/cortex_a15/ac6/inc/tx_port.h b/ports/cortex_a15/ac6/inc/tx_port.h
index 5485dfa2..19463de1 100644
--- a/ports/cortex_a15/ac6/inc/tx_port.h
+++ b/ports/cortex_a15/ac6/inc/tx_port.h
@@ -12,7 +12,7 @@
/**************************************************************************/
/**************************************************************************/
-/** */
+/** */
/** ThreadX Component */
/** */
/** Port Specific */
@@ -21,36 +21,38 @@
/**************************************************************************/
-/**************************************************************************/
-/* */
-/* PORT SPECIFIC C INFORMATION RELEASE */
-/* */
-/* tx_port.h Cortex-A15/AC6 */
-/* 6.1.6 */
+/**************************************************************************/
+/* */
+/* PORT SPECIFIC C INFORMATION RELEASE */
+/* */
+/* tx_port.h ARMv7-A */
+/* 6.1.11 */
/* */
/* AUTHOR */
/* */
/* William E. Lamie, Microsoft Corporation */
/* */
/* DESCRIPTION */
-/* */
-/* This file contains data type definitions that make the ThreadX */
-/* real-time kernel function identically on a variety of different */
-/* processor architectures. For example, the size or number of bits */
-/* in an "int" data type vary between microprocessor architectures and */
-/* even C compilers for the same microprocessor. ThreadX does not */
-/* directly use native C data types. Instead, ThreadX creates its */
-/* own special types that can be mapped to actual data types by this */
-/* file to guarantee consistency in the interface and functionality. */
-/* */
-/* RELEASE HISTORY */
-/* */
+/* */
+/* This file contains data type definitions that make the ThreadX */
+/* real-time kernel function identically on a variety of different */
+/* processor architectures. For example, the size or number of bits */
+/* in an "int" data type vary between microprocessor architectures and */
+/* even C compilers for the same microprocessor. ThreadX does not */
+/* directly use native C data types. Instead, ThreadX creates its */
+/* own special types that can be mapped to actual data types by this */
+/* file to guarantee consistency in the interface and functionality. */
+/* */
+/* RELEASE HISTORY */
+/* */
/* DATE NAME DESCRIPTION */
/* */
/* 09-30-2020 William E. Lamie Initial Version 6.1 */
/* 04-02-2021 Bhupendra Naphade Modified comment(s),updated */
/* macro definition, */
/* resulting in version 6.1.6 */
+/* 04-25-2022 Zhen Kong Updated comments, */
+/* resulting in version 6.1.11 */
/* */
/**************************************************************************/
@@ -63,7 +65,7 @@
#ifdef TX_INCLUDE_USER_DEFINE_FILE
-/* Yes, include the user defines in tx_user.h. The defines in this file may
+/* Yes, include the user defines in tx_user.h. The defines in this file may
alternately be defined on the command line. */
#include "tx_user.h"
@@ -76,7 +78,7 @@
#include
-/* Define ThreadX basic types for this port. */
+/* Define ThreadX basic types for this port. */
#define VOID void
typedef char CHAR;
@@ -112,12 +114,12 @@ typedef unsigned short USHORT;
#define TX_TIMER_THREAD_STACK_SIZE 1024 /* Default timer thread stack size */
#endif
-#ifndef TX_TIMER_THREAD_PRIORITY
-#define TX_TIMER_THREAD_PRIORITY 0 /* Default timer thread priority */
+#ifndef TX_TIMER_THREAD_PRIORITY
+#define TX_TIMER_THREAD_PRIORITY 0 /* Default timer thread priority */
#endif
-/* Define various constants for the ThreadX ARM port. */
+/* Define various constants for the ThreadX ARM port. */
#ifdef TX_ENABLE_FIQ_SUPPORT
#define TX_INT_DISABLE 0xC0 /* Disable IRQ & FIQ interrupts */
@@ -127,8 +129,8 @@ typedef unsigned short USHORT;
#define TX_INT_ENABLE 0x00 /* Enable IRQ interrupts */
-/* Define the clock source for trace event entry time stamp. The following two item are port specific.
- For example, if the time source is at the address 0x0a800024 and is 16-bits in size, the clock
+/* Define the clock source for trace event entry time stamp. The following two item are port specific.
+ For example, if the time source is at the address 0x0a800024 and is 16-bits in size, the clock
source constants would be:
#define TX_TRACE_TIME_SOURCE *((ULONG *) 0x0a800024)
@@ -175,7 +177,7 @@ typedef unsigned short USHORT;
#define TX_INLINE_INITIALIZATION
-/* Determine whether or not stack checking is enabled. By default, ThreadX stack checking is
+/* Determine whether or not stack checking is enabled. By default, ThreadX stack checking is
disabled. When the following is defined, ThreadX thread stack checking is enabled. If stack
checking is enabled (TX_ENABLE_STACK_CHECKING is defined), the TX_DISABLE_STACK_FILLING
define is negated, thereby forcing the stack fill which is necessary for the stack checking
@@ -187,13 +189,13 @@ typedef unsigned short USHORT;
/* Define the TX_THREAD control block extensions for this port. The main reason
- for the multiple macros is so that backward compatibility can be maintained with
+ for the multiple macros is so that backward compatibility can be maintained with
existing ThreadX kernel awareness modules. */
-#define TX_THREAD_EXTENSION_0
-#define TX_THREAD_EXTENSION_1
+#define TX_THREAD_EXTENSION_0
+#define TX_THREAD_EXTENSION_1
#define TX_THREAD_EXTENSION_2 ULONG tx_thread_vfp_enable;
-#define TX_THREAD_EXTENSION_3
+#define TX_THREAD_EXTENSION_3
/* Define the port extensions of the remaining ThreadX objects. */
@@ -207,11 +209,11 @@ typedef unsigned short USHORT;
#define TX_TIMER_EXTENSION
-/* Define the user extension field of the thread control block. Nothing
+/* Define the user extension field of the thread control block. Nothing
additional is needed for this port so it is defined as white space. */
#ifndef TX_THREAD_USER_EXTENSION
-#define TX_THREAD_USER_EXTENSION
+#define TX_THREAD_USER_EXTENSION
#endif
@@ -219,8 +221,8 @@ typedef unsigned short USHORT;
tx_thread_shell_entry, and tx_thread_terminate. */
-#define TX_THREAD_CREATE_EXTENSION(thread_ptr)
-#define TX_THREAD_DELETE_EXTENSION(thread_ptr)
+#define TX_THREAD_CREATE_EXTENSION(thread_ptr)
+#define TX_THREAD_DELETE_EXTENSION(thread_ptr)
#define TX_THREAD_COMPLETED_EXTENSION(thread_ptr)
#define TX_THREAD_TERMINATED_EXTENSION(thread_ptr)
@@ -247,24 +249,24 @@ typedef unsigned short USHORT;
#define TX_TIMER_DELETE_EXTENSION(timer_ptr)
-/* Determine if the ARM architecture has the CLZ instruction. This is available on
- architectures v5 and above. If available, redefine the macro for calculating the
+/* Determine if the ARM architecture has the CLZ instruction. This is available on
+ architectures v5 and above. If available, redefine the macro for calculating the
lowest bit set. */
-
+
#if __TARGET_ARCH_ARM > 4
#ifndef __thumb__
#define TX_LOWEST_SET_BIT_CALCULATE(m, b) m = m & ((ULONG) (-((LONG) m))); \
asm volatile (" CLZ %0,%1 ": "=r" (b) : "r" (m) ); \
- b = 31 - b;
+ b = 31 - b;
#endif
#endif
-/* Define ThreadX interrupt lockout and restore macros for protection on
- access of critical kernel information. The restore interrupt macro must
- restore the interrupt posture of the running thread prior to the value
+/* Define ThreadX interrupt lockout and restore macros for protection on
+ access of critical kernel information. The restore interrupt macro must
+ restore the interrupt posture of the running thread prior to the value
present prior to the disable macro. In most cases, the save area macro
is used to define a local function save area for the disable and restore
macros. */
@@ -295,7 +297,7 @@ unsigned int _tx_thread_interrupt_restore(UINT old_posture);
#endif
-/* Define VFP extension for the Cortex-A15. Each is assumed to be called in the context of the executing
+/* Define VFP extension for the ARMv7-A. Each is assumed to be called in the context of the executing
thread. */
void tx_thread_vfp_enable(void);
@@ -315,8 +317,8 @@ void tx_thread_vfp_disable(void);
/* Define the version ID of ThreadX. This may be utilized by the application. */
#ifdef TX_THREAD_INIT
-CHAR _tx_version_id[] =
- "Copyright (c) Microsoft Corporation. All rights reserved. * ThreadX Cortex-A15/AC6 Version 6.1.9 *";
+CHAR _tx_version_id[] =
+ "Copyright (c) Microsoft Corporation. All rights reserved. * ThreadX ARMv7-A Version 6.1.11 *";
#else
extern CHAR _tx_version_id[];
#endif
diff --git a/ports/cortex_a15/ac6/src/tx_thread_context_restore.S b/ports/cortex_a15/ac6/src/tx_thread_context_restore.S
index cbc1b316..fae7e72d 100644
--- a/ports/cortex_a15/ac6/src/tx_thread_context_restore.S
+++ b/ports/cortex_a15/ac6/src/tx_thread_context_restore.S
@@ -1,259 +1,222 @@
-@/**************************************************************************/
-@/* */
-@/* Copyright (c) Microsoft Corporation. All rights reserved. */
-@/* */
-@/* This software is licensed under the Microsoft Software License */
-@/* Terms for Microsoft Azure RTOS. Full text of the license can be */
-@/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
-@/* and in the root directory of this software. */
-@/* */
-@/**************************************************************************/
-@
-@
-@/**************************************************************************/
-@/**************************************************************************/
-@/** */
-@/** ThreadX Component */
-@/** */
-@/** Thread */
-@/** */
-@/**************************************************************************/
-@/**************************************************************************/
-@
-@
-@#define TX_SOURCE_CODE
-@
-@
-@/* Include necessary system files. */
-@
-@#include "tx_api.h"
-@#include "tx_thread.h"
-@#include "tx_timer.h"
-@
+/**************************************************************************/
+/* */
+/* Copyright (c) Microsoft Corporation. All rights reserved. */
+/* */
+/* This software is licensed under the Microsoft Software License */
+/* Terms for Microsoft Azure RTOS. Full text of the license can be */
+/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
+/* and in the root directory of this software. */
+/* */
+/**************************************************************************/
+
+
+/**************************************************************************/
+/**************************************************************************/
+/** */
+/** ThreadX Component */
+/** */
+/** Thread */
+/** */
+/**************************************************************************/
+/**************************************************************************/
+
.arm
#ifdef TX_ENABLE_FIQ_SUPPORT
-SVC_MODE = 0xD3 @ Disable IRQ/FIQ, SVC mode
-IRQ_MODE = 0xD2 @ Disable IRQ/FIQ, IRQ mode
+SVC_MODE = 0xD3 // Disable IRQ/FIQ, SVC mode
+IRQ_MODE = 0xD2 // Disable IRQ/FIQ, IRQ mode
#else
-SVC_MODE = 0x93 @ Disable IRQ, SVC mode
-IRQ_MODE = 0x92 @ Disable IRQ, IRQ mode
+SVC_MODE = 0x93 // Disable IRQ, SVC mode
+IRQ_MODE = 0x92 // Disable IRQ, IRQ mode
#endif
-@
+
.global _tx_thread_system_state
.global _tx_thread_current_ptr
.global _tx_thread_execute_ptr
.global _tx_timer_time_slice
.global _tx_thread_schedule
.global _tx_thread_preempt_disable
-@
-@
-@/* No 16-bit Thumb mode veneer code is needed for _tx_thread_context_restore
-@ since it will never be called 16-bit mode. */
-@
+
+
+
+/* No 16-bit Thumb mode veneer code is needed for _tx_thread_context_restore
+ since it will never be called 16-bit mode. */
+
.arm
.text
.align 2
-@/**************************************************************************/
-@/* */
-@/* FUNCTION RELEASE */
-@/* */
-@/* _tx_thread_context_restore Cortex-A15/AC6 */
-@/* 6.1.9 */
-@/* AUTHOR */
-@/* */
-@/* William E. Lamie, Microsoft Corporation */
-@/* */
-@/* DESCRIPTION */
-@/* */
-@/* This function restores the interrupt context if it is processing a */
-@/* nested interrupt. If not, it returns to the interrupt thread if no */
-@/* preemption is necessary. Otherwise, if preemption is necessary or */
-@/* if no thread was running, the function returns to the scheduler. */
-@/* */
-@/* INPUT */
-@/* */
-@/* None */
-@/* */
-@/* OUTPUT */
-@/* */
-@/* None */
-@/* */
-@/* CALLS */
-@/* */
-@/* _tx_thread_schedule Thread scheduling routine */
-@/* */
-@/* CALLED BY */
-@/* */
-@/* ISRs Interrupt Service Routines */
-@/* */
-@/* RELEASE HISTORY */
-@/* */
-@/* DATE NAME DESCRIPTION */
-@/* */
-@/* 09-30-2020 William E. Lamie Initial Version 6.1 */
-@/* 10-15-2021 William E. Lamie Modified comment(s), added */
-@/* execution profile support, */
-@/* resulting in version 6.1.9 */
-@/* */
-@/**************************************************************************/
-@VOID _tx_thread_context_restore(VOID)
-@{
+/**************************************************************************/
+/* */
+/* FUNCTION RELEASE */
+/* */
+/* _tx_thread_context_restore ARMv7-A */
+/* 6.1.11 */
+/* AUTHOR */
+/* */
+/* William E. Lamie, Microsoft Corporation */
+/* */
+/* DESCRIPTION */
+/* */
+/* This function restores the interrupt context if it is processing a */
+/* nested interrupt. If not, it returns to the interrupt thread if no */
+/* preemption is necessary. Otherwise, if preemption is necessary or */
+/* if no thread was running, the function returns to the scheduler. */
+/* */
+/* INPUT */
+/* */
+/* None */
+/* */
+/* OUTPUT */
+/* */
+/* None */
+/* */
+/* CALLS */
+/* */
+/* _tx_thread_schedule Thread scheduling routine */
+/* */
+/* CALLED BY */
+/* */
+/* ISRs Interrupt Service Routines */
+/* */
+/* RELEASE HISTORY */
+/* */
+/* DATE NAME DESCRIPTION */
+/* */
+/* 09-30-2020 William E. Lamie Initial Version 6.1 */
+/* 10-15-2021 William E. Lamie Modified comment(s), added */
+/* execution profile support, */
+/* resulting in version 6.1.9 */
+/* 04-25-2022 Zhen Kong Updated comments, */
+/* resulting in version 6.1.11 */
+/* */
+/**************************************************************************/
.global _tx_thread_context_restore
.type _tx_thread_context_restore,function
_tx_thread_context_restore:
-@
-@ /* Lockout interrupts. */
-@
+
+ /* Lockout interrupts. */
+
#ifdef TX_ENABLE_FIQ_SUPPORT
- CPSID if @ Disable IRQ and FIQ interrupts
+ CPSID if // Disable IRQ and FIQ interrupts
#else
- CPSID i @ Disable IRQ interrupts
+ CPSID i // Disable IRQ interrupts
#endif
#if (defined(TX_ENABLE_EXECUTION_CHANGE_NOTIFY) || defined(TX_EXECUTION_PROFILE_ENABLE))
-@
-@ /* Call the ISR exit function to indicate an ISR is complete. */
-@
- BL _tx_execution_isr_exit @ Call the ISR exit function
-#endif
-@
-@ /* Determine if interrupts are nested. */
-@ if (--_tx_thread_system_state)
-@ {
-@
- LDR r3, =_tx_thread_system_state @ Pickup address of system state variable
- LDR r2, [r3] @ Pickup system state
- SUB r2, r2, #1 @ Decrement the counter
- STR r2, [r3] @ Store the counter
- CMP r2, #0 @ Was this the first interrupt?
- BEQ __tx_thread_not_nested_restore @ If so, not a nested restore
-@
-@ /* Interrupts are nested. */
-@
-@ /* Just recover the saved registers and return to the point of
-@ interrupt. */
-@
- LDMIA sp!, {r0, r10, r12, lr} @ Recover SPSR, POI, and scratch regs
- MSR SPSR_cxsf, r0 @ Put SPSR back
- LDMIA sp!, {r0-r3} @ Recover r0-r3
- MOVS pc, lr @ Return to point of interrupt
-@
-@ }
-__tx_thread_not_nested_restore:
-@
-@ /* Determine if a thread was interrupted and no preemption is required. */
-@ else if (((_tx_thread_current_ptr) && (_tx_thread_current_ptr == _tx_thread_execute_ptr)
-@ || (_tx_thread_preempt_disable))
-@ {
-@
- LDR r1, =_tx_thread_current_ptr @ Pickup address of current thread ptr
- LDR r0, [r1] @ Pickup actual current thread pointer
- CMP r0, #0 @ Is it NULL?
- BEQ __tx_thread_idle_system_restore @ Yes, idle system was interrupted
-@
- LDR r3, =_tx_thread_preempt_disable @ Pickup preempt disable address
- LDR r2, [r3] @ Pickup actual preempt disable flag
- CMP r2, #0 @ Is it set?
- BNE __tx_thread_no_preempt_restore @ Yes, don't preempt this thread
- LDR r3, =_tx_thread_execute_ptr @ Pickup address of execute thread ptr
- LDR r2, [r3] @ Pickup actual execute thread pointer
- CMP r0, r2 @ Is the same thread highest priority?
- BNE __tx_thread_preempt_restore @ No, preemption needs to happen
-@
-@
-__tx_thread_no_preempt_restore:
-@
-@ /* Restore interrupted thread or ISR. */
-@
-@ /* Pickup the saved stack pointer. */
-@ tmp_ptr = _tx_thread_current_ptr -> tx_thread_stack_ptr;
-@
-@ /* Recover the saved context and return to the point of interrupt. */
-@
- LDMIA sp!, {r0, r10, r12, lr} @ Recover SPSR, POI, and scratch regs
- MSR SPSR_cxsf, r0 @ Put SPSR back
- LDMIA sp!, {r0-r3} @ Recover r0-r3
- MOVS pc, lr @ Return to point of interrupt
-@
-@ }
-@ else
-@ {
-__tx_thread_preempt_restore:
-@
- LDMIA sp!, {r3, r10, r12, lr} @ Recover temporarily saved registers
- MOV r1, lr @ Save lr (point of interrupt)
- MOV r2, #SVC_MODE @ Build SVC mode CPSR
- MSR CPSR_c, r2 @ Enter SVC mode
- STR r1, [sp, #-4]! @ Save point of interrupt
- STMDB sp!, {r4-r12, lr} @ Save upper half of registers
- MOV r4, r3 @ Save SPSR in r4
- MOV r2, #IRQ_MODE @ Build IRQ mode CPSR
- MSR CPSR_c, r2 @ Enter IRQ mode
- LDMIA sp!, {r0-r3} @ Recover r0-r3
- MOV r5, #SVC_MODE @ Build SVC mode CPSR
- MSR CPSR_c, r5 @ Enter SVC mode
- STMDB sp!, {r0-r3} @ Save r0-r3 on thread's stack
- LDR r1, =_tx_thread_current_ptr @ Pickup address of current thread ptr
- LDR r0, [r1] @ Pickup current thread pointer
+ /* Call the ISR exit function to indicate an ISR is complete. */
+
+ BL _tx_execution_isr_exit // Call the ISR exit function
+#endif
+
+ /* Determine if interrupts are nested. */
+
+ LDR r3, =_tx_thread_system_state // Pickup address of system state variable
+ LDR r2, [r3] // Pickup system state
+ SUB r2, r2, #1 // Decrement the counter
+ STR r2, [r3] // Store the counter
+ CMP r2, #0 // Was this the first interrupt?
+ BEQ __tx_thread_not_nested_restore // If so, not a nested restore
+
+ /* Interrupts are nested. */
+
+ /* Just recover the saved registers and return to the point of
+ interrupt. */
+
+ LDMIA sp!, {r0, r10, r12, lr} // Recover SPSR, POI, and scratch regs
+ MSR SPSR_cxsf, r0 // Put SPSR back
+ LDMIA sp!, {r0-r3} // Recover r0-r3
+ MOVS pc, lr // Return to point of interrupt
+
+__tx_thread_not_nested_restore:
+
+ /* Determine if a thread was interrupted and no preemption is required. */
+
+ LDR r1, =_tx_thread_current_ptr // Pickup address of current thread ptr
+ LDR r0, [r1] // Pickup actual current thread pointer
+ CMP r0, #0 // Is it NULL?
+ BEQ __tx_thread_idle_system_restore // Yes, idle system was interrupted
+
+ LDR r3, =_tx_thread_preempt_disable // Pickup preempt disable address
+ LDR r2, [r3] // Pickup actual preempt disable flag
+ CMP r2, #0 // Is it set?
+ BNE __tx_thread_no_preempt_restore // Yes, don't preempt this thread
+ LDR r3, =_tx_thread_execute_ptr // Pickup address of execute thread ptr
+ LDR r2, [r3] // Pickup actual execute thread pointer
+ CMP r0, r2 // Is the same thread highest priority?
+ BNE __tx_thread_preempt_restore // No, preemption needs to happen
+
+
+__tx_thread_no_preempt_restore:
+
+ /* Recover the saved context and return to the point of interrupt. */
+
+ /* Pickup the saved stack pointer. */
+
+ /* Recover the saved context and return to the point of interrupt. */
+ LDMIA sp!, {r0, r10, r12, lr} // Recover SPSR, POI, and scratch regs
+ MSR SPSR_cxsf, r0 // Put SPSR back
+ LDMIA sp!, {r0-r3} // Recover r0-r3
+ MOVS pc, lr // Return to point of interrupt
+
+__tx_thread_preempt_restore:
+
+ LDMIA sp!, {r3, r10, r12, lr} // Recover temporarily saved registers
+ MOV r1, lr // Save lr (point of interrupt)
+ MOV r2, #SVC_MODE // Build SVC mode CPSR
+ MSR CPSR_c, r2 // Enter SVC mode
+ STR r1, [sp, #-4]! // Save point of interrupt
+ STMDB sp!, {r4-r12, lr} // Save upper half of registers
+ MOV r4, r3 // Save SPSR in r4
+ MOV r2, #IRQ_MODE // Build IRQ mode CPSR
+ MSR CPSR_c, r2 // Enter IRQ mode
+ LDMIA sp!, {r0-r3} // Recover r0-r3
+ MOV r5, #SVC_MODE // Build SVC mode CPSR
+ MSR CPSR_c, r5 // Enter SVC mode
+ STMDB sp!, {r0-r3} // Save r0-r3 on thread's stack
+
+ LDR r1, =_tx_thread_current_ptr // Pickup address of current thread ptr
+ LDR r0, [r1] // Pickup current thread pointer
#ifdef TX_ENABLE_VFP_SUPPORT
- LDR r2, [r0, #144] @ Pickup the VFP enabled flag
- CMP r2, #0 @ Is the VFP enabled?
- BEQ _tx_skip_irq_vfp_save @ No, skip VFP IRQ save
- VMRS r2, FPSCR @ Pickup the FPSCR
- STR r2, [sp, #-4]! @ Save FPSCR
- VSTMDB sp!, {D16-D31} @ Save D16-D31
- VSTMDB sp!, {D0-D15} @ Save D0-D15
+ LDR r2, [r0, #144] // Pickup the VFP enabled flag
+ CMP r2, #0 // Is the VFP enabled?
+ BEQ _tx_skip_irq_vfp_save // No, skip VFP IRQ save
+ VMRS r2, FPSCR // Pickup the FPSCR
+ STR r2, [sp, #-4]! // Save FPSCR
+ VSTMDB sp!, {D16-D31} // Save D16-D31
+ VSTMDB sp!, {D0-D15} // Save D0-D15
+
_tx_skip_irq_vfp_save:
+
#endif
- MOV r3, #1 @ Build interrupt stack type
- STMDB sp!, {r3, r4} @ Save interrupt stack type and SPSR
- STR sp, [r0, #8] @ Save stack pointer in thread control
- @ block
-@
-@ /* Save the remaining time-slice and disable it. */
-@ if (_tx_timer_time_slice)
-@ {
-@
- LDR r3, =_tx_timer_time_slice @ Pickup time-slice variable address
- LDR r2, [r3] @ Pickup time-slice
- CMP r2, #0 @ Is it active?
- BEQ __tx_thread_dont_save_ts @ No, don't save it
-@
-@ _tx_thread_current_ptr -> tx_thread_time_slice = _tx_timer_time_slice;
-@ _tx_timer_time_slice = 0;
-@
- STR r2, [r0, #24] @ Save thread's time-slice
- MOV r2, #0 @ Clear value
- STR r2, [r3] @ Disable global time-slice flag
-@
-@ }
+ MOV r3, #1 // Build interrupt stack type
+ STMDB sp!, {r3, r4} // Save interrupt stack type and SPSR
+ STR sp, [r0, #8] // Save stack pointer in thread control
+ // block
+
+ /* Save the remaining time-slice and disable it. */
+ LDR r3, =_tx_timer_time_slice // Pickup time-slice variable address
+ LDR r2, [r3] // Pickup time-slice
+ CMP r2, #0 // Is it active?
+ BEQ __tx_thread_dont_save_ts // No, don't save it
+ STR r2, [r0, #24] // Save thread's time-slice
+ MOV r2, #0 // Clear value
+ STR r2, [r3] // Disable global time-slice flag
+
__tx_thread_dont_save_ts:
-@
-@
-@ /* Clear the current task pointer. */
-@ _tx_thread_current_ptr = TX_NULL;
-@
- MOV r0, #0 @ NULL value
- STR r0, [r1] @ Clear current thread pointer
-@
-@ /* Return to the scheduler. */
-@ _tx_thread_schedule();
-@
- B _tx_thread_schedule @ Return to scheduler
-@ }
-@
+
+ /* Clear the current task pointer. */
+ MOV r0, #0 // NULL value
+ STR r0, [r1] // Clear current thread pointer
+
+ /* Return to the scheduler. */
+ B _tx_thread_schedule // Return to scheduler
+
__tx_thread_idle_system_restore:
-@
-@ /* Just return back to the scheduler! */
-@
- MOV r0, #SVC_MODE @ Build SVC mode CPSR
- MSR CPSR_c, r0 @ Enter SVC mode
- B _tx_thread_schedule @ Return to scheduler
-@}
-
-
+ /* Just return back to the scheduler! */
+ MOV r0, #SVC_MODE // Build SVC mode CPSR
+ MSR CPSR_c, r0 // Enter SVC mode
+ B _tx_thread_schedule // Return to scheduler
diff --git a/ports/cortex_a15/ac6/src/tx_thread_context_save.S b/ports/cortex_a15/ac6/src/tx_thread_context_save.S
index 82f63954..7ac48c2e 100644
--- a/ports/cortex_a15/ac6/src/tx_thread_context_save.S
+++ b/ports/cortex_a15/ac6/src/tx_thread_context_save.S
@@ -1,205 +1,172 @@
-@/**************************************************************************/
-@/* */
-@/* Copyright (c) Microsoft Corporation. All rights reserved. */
-@/* */
-@/* This software is licensed under the Microsoft Software License */
-@/* Terms for Microsoft Azure RTOS. Full text of the license can be */
-@/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
-@/* and in the root directory of this software. */
-@/* */
-@/**************************************************************************/
-@
-@
-@/**************************************************************************/
-@/**************************************************************************/
-@/** */
-@/** ThreadX Component */
-@/** */
-@/** Thread */
-@/** */
-@/**************************************************************************/
-@/**************************************************************************/
-@
-@
-@#define TX_SOURCE_CODE
-@
-@
-@/* Include necessary system files. */
-@
-@#include "tx_api.h"
-@#include "tx_thread.h"
-@#include "tx_timer.h"
-@
-@
+/**************************************************************************/
+/* */
+/* Copyright (c) Microsoft Corporation. All rights reserved. */
+/* */
+/* This software is licensed under the Microsoft Software License */
+/* Terms for Microsoft Azure RTOS. Full text of the license can be */
+/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
+/* and in the root directory of this software. */
+/* */
+/**************************************************************************/
+
+
+/**************************************************************************/
+/**************************************************************************/
+/** */
+/** ThreadX Component */
+/** */
+/** Thread */
+/** */
+/**************************************************************************/
+/**************************************************************************/
+
.global _tx_thread_system_state
.global _tx_thread_current_ptr
.global __tx_irq_processing_return
-@
-@
-@/* No 16-bit Thumb mode veneer code is needed for _tx_thread_context_save
-@ since it will never be called 16-bit mode. */
-@
+
+
+/* No 16-bit Thumb mode veneer code is needed for _tx_thread_context_save
+ since it will never be called 16-bit mode. */
+
.arm
.text
.align 2
-@/**************************************************************************/
-@/* */
-@/* FUNCTION RELEASE */
-@/* */
-@/* _tx_thread_context_save Cortex-A15/AC6 */
-@/* 6.1.9 */
-@/* AUTHOR */
-@/* */
-@/* William E. Lamie, Microsoft Corporation */
-@/* */
-@/* DESCRIPTION */
-@/* */
-@/* This function saves the context of an executing thread in the */
-@/* beginning of interrupt processing. The function also ensures that */
-@/* the system stack is used upon return to the calling ISR. */
-@/* */
-@/* INPUT */
-@/* */
-@/* None */
-@/* */
-@/* OUTPUT */
-@/* */
-@/* None */
-@/* */
-@/* CALLS */
-@/* */
-@/* None */
-@/* */
-@/* CALLED BY */
-@/* */
-@/* ISRs */
-@/* */
-@/* RELEASE HISTORY */
-@/* */
-@/* DATE NAME DESCRIPTION */
-@/* */
-@/* 09-30-2020 William E. Lamie Initial Version 6.1 */
-@/* 10-15-2021 William E. Lamie Modified comment(s), added */
-@/* execution profile support, */
-@/* resulting in version 6.1.9 */
-@/* */
-@/**************************************************************************/
-@VOID _tx_thread_context_save(VOID)
-@{
+/**************************************************************************/
+/* */
+/* FUNCTION RELEASE */
+/* */
+/* _tx_thread_context_save ARMv7-A */
+/* 6.1.11 */
+/* AUTHOR */
+/* */
+/* William E. Lamie, Microsoft Corporation */
+/* */
+/* DESCRIPTION */
+/* */
+/* This function saves the context of an executing thread in the */
+/* beginning of interrupt processing. The function also ensures that */
+/* the system stack is used upon return to the calling ISR. */
+/* */
+/* INPUT */
+/* */
+/* None */
+/* */
+/* OUTPUT */
+/* */
+/* None */
+/* */
+/* CALLS */
+/* */
+/* None */
+/* */
+/* CALLED BY */
+/* */
+/* ISRs */
+/* */
+/* RELEASE HISTORY */
+/* */
+/* DATE NAME DESCRIPTION */
+/* */
+/* 09-30-2020 William E. Lamie Initial Version 6.1 */
+/* 10-15-2021 William E. Lamie Modified comment(s), added */
+/* execution profile support, */
+/* resulting in version 6.1.9 */
+/* 04-25-2022 Zhen Kong Updated comments, */
+/* resulting in version 6.1.11 */
+/* */
+/**************************************************************************/
.global _tx_thread_context_save
.type _tx_thread_context_save,function
_tx_thread_context_save:
-@
-@ /* Upon entry to this routine, it is assumed that IRQ interrupts are locked
-@ out, we are in IRQ mode, and all registers are intact. */
-@
-@ /* Check for a nested interrupt condition. */
-@ if (_tx_thread_system_state++)
-@ {
-@
- STMDB sp!, {r0-r3} @ Save some working registers
+
+ /* Upon entry to this routine, it is assumed that IRQ interrupts are locked
+ out, we are in IRQ mode, and all registers are intact. */
+
+ /* Check for a nested interrupt condition. */
+
+ STMDB sp!, {r0-r3} // Save some working registers
#ifdef TX_ENABLE_FIQ_SUPPORT
- CPSID if @ Disable FIQ interrupts
+ CPSID if // Disable FIQ interrupts
#endif
- LDR r3, =_tx_thread_system_state @ Pickup address of system state variable
- LDR r2, [r3] @ Pickup system state
- CMP r2, #0 @ Is this the first interrupt?
- BEQ __tx_thread_not_nested_save @ Yes, not a nested context save
-@
-@ /* Nested interrupt condition. */
-@
- ADD r2, r2, #1 @ Increment the interrupt counter
- STR r2, [r3] @ Store it back in the variable
-@
-@ /* Save the rest of the scratch registers on the stack and return to the
-@ calling ISR. */
-@
- MRS r0, SPSR @ Pickup saved SPSR
- SUB lr, lr, #4 @ Adjust point of interrupt
- STMDB sp!, {r0, r10, r12, lr} @ Store other registers
-@
-@ /* Return to the ISR. */
-@
- MOV r10, #0 @ Clear stack limit
+ LDR r3, =_tx_thread_system_state // Pickup address of system state variable
+ LDR r2, [r3] // Pickup system state
+ CMP r2, #0 // Is this the first interrupt?
+ BEQ __tx_thread_not_nested_save // Yes, not a nested context save
+
+ /* Nested interrupt condition. */
+
+ ADD r2, r2, #1 // Increment the interrupt counter
+ STR r2, [r3] // Store it back in the variable
+
+ /* Save the rest of the scratch registers on the stack and return to the
+ calling ISR. */
+
+ MRS r0, SPSR // Pickup saved SPSR
+ SUB lr, lr, #4 // Adjust point of interrupt
+ STMDB sp!, {r0, r10, r12, lr} // Store other registers
+
+ /* Return to the ISR. */
+
+ MOV r10, #0 // Clear stack limit
#if (defined(TX_ENABLE_EXECUTION_CHANGE_NOTIFY) || defined(TX_EXECUTION_PROFILE_ENABLE))
-@
-@ /* Call the ISR enter function to indicate an ISR is executing. */
-@
- PUSH {lr} @ Save ISR lr
- BL _tx_execution_isr_enter @ Call the ISR enter function
- POP {lr} @ Recover ISR lr
+
+ /* Call the ISR enter function to indicate an ISR is executing. */
+
+ PUSH {lr} // Save ISR lr
+ BL _tx_execution_isr_enter // Call the ISR enter function
+ POP {lr} // Recover ISR lr
#endif
- B __tx_irq_processing_return @ Continue IRQ processing
-@
+ B __tx_irq_processing_return // Continue IRQ processing
+
__tx_thread_not_nested_save:
-@ }
-@
-@ /* Otherwise, not nested, check to see if a thread was running. */
-@ else if (_tx_thread_current_ptr)
-@ {
-@
- ADD r2, r2, #1 @ Increment the interrupt counter
- STR r2, [r3] @ Store it back in the variable
- LDR r1, =_tx_thread_current_ptr @ Pickup address of current thread ptr
- LDR r0, [r1] @ Pickup current thread pointer
- CMP r0, #0 @ Is it NULL?
- BEQ __tx_thread_idle_system_save @ If so, interrupt occurred in
- @ scheduling loop - nothing needs saving!
-@
-@ /* Save minimal context of interrupted thread. */
-@
- MRS r2, SPSR @ Pickup saved SPSR
- SUB lr, lr, #4 @ Adjust point of interrupt
- STMDB sp!, {r2, r10, r12, lr} @ Store other registers
-@
-@ /* Save the current stack pointer in the thread's control block. */
-@ _tx_thread_current_ptr -> tx_thread_stack_ptr = sp;
-@
-@ /* Switch to the system stack. */
-@ sp = _tx_thread_system_stack_ptr@
-@
- MOV r10, #0 @ Clear stack limit
+
+ /* Otherwise, not nested, check to see if a thread was running. */
+ ADD r2, r2, #1 // Increment the interrupt counter
+ STR r2, [r3] // Store it back in the variable
+ LDR r1, =_tx_thread_current_ptr // Pickup address of current thread ptr
+ LDR r0, [r1] // Pickup current thread pointer
+ CMP r0, #0 // Is it NULL?
+ BEQ __tx_thread_idle_system_save // If so, interrupt occurred in
+ // scheduling loop - nothing needs saving!
+
+ /* Save minimal context of interrupted thread. */
+
+ MRS r2, SPSR // Pickup saved SPSR
+ SUB lr, lr, #4 // Adjust point of interrupt
+ STMDB sp!, {r2, r10, r12, lr} // Store other registers
+
+ MOV r10, #0 // Clear stack limit
#if (defined(TX_ENABLE_EXECUTION_CHANGE_NOTIFY) || defined(TX_EXECUTION_PROFILE_ENABLE))
-@
-@ /* Call the ISR enter function to indicate an ISR is executing. */
-@
- PUSH {lr} @ Save ISR lr
- BL _tx_execution_isr_enter @ Call the ISR enter function
- POP {lr} @ Recover ISR lr
+
+ /* Call the ISR enter function to indicate an ISR is executing. */
+
+ PUSH {lr} // Save ISR lr
+ BL _tx_execution_isr_enter // Call the ISR enter function
+ POP {lr} // Recover ISR lr
#endif
- B __tx_irq_processing_return @ Continue IRQ processing
-@
-@ }
-@ else
-@ {
-@
+ B __tx_irq_processing_return // Continue IRQ processing
+
__tx_thread_idle_system_save:
-@
-@ /* Interrupt occurred in the scheduling loop. */
-@
-@ /* Not much to do here, just adjust the stack pointer, and return to IRQ
-@ processing. */
-@
- MOV r10, #0 @ Clear stack limit
+
+ /* Interrupt occurred in the scheduling loop. */
+
+ /* Not much to do here, just adjust the stack pointer, and return to IRQ
+ processing. */
+
+ MOV r10, #0 // Clear stack limit
#if (defined(TX_ENABLE_EXECUTION_CHANGE_NOTIFY) || defined(TX_EXECUTION_PROFILE_ENABLE))
-@
-@ /* Call the ISR enter function to indicate an ISR is executing. */
-@
- PUSH {lr} @ Save ISR lr
- BL _tx_execution_isr_enter @ Call the ISR enter function
- POP {lr} @ Recover ISR lr
+
+ /* Call the ISR enter function to indicate an ISR is executing. */
+
+ PUSH {lr} // Save ISR lr
+ BL _tx_execution_isr_enter // Call the ISR enter function
+ POP {lr} // Recover ISR lr
#endif
- ADD sp, sp, #16 @ Recover saved registers
- B __tx_irq_processing_return @ Continue IRQ processing
-@
-@ }
-@}
-
-
-
+ ADD sp, sp, #16 // Recover saved registers
+ B __tx_irq_processing_return // Continue IRQ processing
diff --git a/ports/cortex_a15/ac6/src/tx_thread_fiq_context_restore.S b/ports/cortex_a15/ac6/src/tx_thread_fiq_context_restore.S
index efa3ec3e..006be973 100644
--- a/ports/cortex_a15/ac6/src/tx_thread_fiq_context_restore.S
+++ b/ports/cortex_a15/ac6/src/tx_thread_fiq_context_restore.S
@@ -1,43 +1,32 @@
-@/**************************************************************************/
-@/* */
-@/* Copyright (c) Microsoft Corporation. All rights reserved. */
-@/* */
-@/* This software is licensed under the Microsoft Software License */
-@/* Terms for Microsoft Azure RTOS. Full text of the license can be */
-@/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
-@/* and in the root directory of this software. */
-@/* */
-@/**************************************************************************/
-@
-@
-@/**************************************************************************/
-@/**************************************************************************/
-@/** */
-@/** ThreadX Component */
-@/** */
-@/** Thread */
-@/** */
-@/**************************************************************************/
-@/**************************************************************************/
-@
-@
-@#define TX_SOURCE_CODE
-@
-@
-@/* Include necessary system files. */
-@
-@#include "tx_api.h"
-@#include "tx_thread.h"
-@#include "tx_timer.h"
-@
-@
-SVC_MODE = 0xD3 @ SVC mode
-FIQ_MODE = 0xD1 @ FIQ mode
-MODE_MASK = 0x1F @ Mode mask
-THUMB_MASK = 0x20 @ Thumb bit mask
-IRQ_MODE_BITS = 0x12 @ IRQ mode bits
-@
-@
+/**************************************************************************/
+/* */
+/* Copyright (c) Microsoft Corporation. All rights reserved. */
+/* */
+/* This software is licensed under the Microsoft Software License */
+/* Terms for Microsoft Azure RTOS. Full text of the license can be */
+/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
+/* and in the root directory of this software. */
+/* */
+/**************************************************************************/
+
+
+/**************************************************************************/
+/**************************************************************************/
+/** */
+/** ThreadX Component */
+/** */
+/** Thread */
+/** */
+/**************************************************************************/
+/**************************************************************************/
+
+SVC_MODE = 0xD3 // SVC mode
+FIQ_MODE = 0xD1 // FIQ mode
+MODE_MASK = 0x1F // Mode mask
+THUMB_MASK = 0x20 // Thumb bit mask
+IRQ_MODE_BITS = 0x12 // IRQ mode bits
+
+
.global _tx_thread_system_state
.global _tx_thread_current_ptr
.global _tx_thread_system_stack_ptr
@@ -45,218 +34,190 @@ IRQ_MODE_BITS = 0x12 @ IRQ mode bits
.global _tx_timer_time_slice
.global _tx_thread_schedule
.global _tx_thread_preempt_disable
-@
-@
-@/* No 16-bit Thumb mode veneer code is needed for _tx_thread_fiq_context_restore
-@ since it will never be called 16-bit mode. */
-@
+ .global _tx_execution_isr_exit
+
+
+/* No 16-bit Thumb mode veneer code is needed for _tx_thread_fiq_context_restore
+ since it will never be called 16-bit mode. */
+
.arm
.text
.align 2
-@/**************************************************************************/
-@/* */
-@/* FUNCTION RELEASE */
-@/* */
-@/* _tx_thread_fiq_context_restore Cortex-A15/AC6 */
-@/* 6.1.9 */
-@/* AUTHOR */
-@/* */
-@/* William E. Lamie, Microsoft Corporation */
-@/* */
-@/* DESCRIPTION */
-@/* */
-@/* This function restores the fiq interrupt context when processing a */
-@/* nested interrupt. If not, it returns to the interrupt thread if no */
-@/* preemption is necessary. Otherwise, if preemption is necessary or */
-@/* if no thread was running, the function returns to the scheduler. */
-@/* */
-@/* INPUT */
-@/* */
-@/* None */
-@/* */
-@/* OUTPUT */
-@/* */
-@/* None */
-@/* */
-@/* CALLS */
-@/* */
-@/* _tx_thread_schedule Thread scheduling routine */
-@/* */
-@/* CALLED BY */
-@/* */
-@/* FIQ ISR Interrupt Service Routines */
-@/* */
-@/* RELEASE HISTORY */
-@/* */
-@/* DATE NAME DESCRIPTION */
-@/* */
-@/* 09-30-2020 William E. Lamie Initial Version 6.1 */
-@/* 10-15-2021 William E. Lamie Modified comment(s), added */
-@/* execution profile support, */
-@/* resulting in version 6.1.9 */
-@/* */
-@/**************************************************************************/
-@VOID _tx_thread_fiq_context_restore(VOID)
-@{
+/**************************************************************************/
+/* */
+/* FUNCTION RELEASE */
+/* */
+/* _tx_thread_fiq_context_restore ARMv7-A */
+/* 6.1.11 */
+/* AUTHOR */
+/* */
+/* William E. Lamie, Microsoft Corporation */
+/* */
+/* DESCRIPTION */
+/* */
+/* This function restores the fiq interrupt context when processing a */
+/* nested interrupt. If not, it returns to the interrupt thread if no */
+/* preemption is necessary. Otherwise, if preemption is necessary or */
+/* if no thread was running, the function returns to the scheduler. */
+/* */
+/* INPUT */
+/* */
+/* None */
+/* */
+/* OUTPUT */
+/* */
+/* None */
+/* */
+/* CALLS */
+/* */
+/* _tx_thread_schedule Thread scheduling routine */
+/* */
+/* CALLED BY */
+/* */
+/* FIQ ISR Interrupt Service Routines */
+/* */
+/* RELEASE HISTORY */
+/* */
+/* DATE NAME DESCRIPTION */
+/* */
+/* 09-30-2020 William E. Lamie Initial Version 6.1 */
+/* 10-15-2021 William E. Lamie Modified comment(s), added */
+/* execution profile support, */
+/* resulting in version 6.1.9 */
+/* 04-25-2022 Zhen Kong Updated comments, */
+/* resulting in version 6.1.11 */
+/* */
+/**************************************************************************/
.global _tx_thread_fiq_context_restore
.type _tx_thread_fiq_context_restore,function
_tx_thread_fiq_context_restore:
-@
-@ /* Lockout interrupts. */
-@
- CPSID if @ Disable IRQ and FIQ interrupts
+
+ /* Lockout interrupts. */
+
+ CPSID if // Disable IRQ and FIQ interrupts
#if (defined(TX_ENABLE_EXECUTION_CHANGE_NOTIFY) || defined(TX_EXECUTION_PROFILE_ENABLE))
-@
-@ /* Call the ISR exit function to indicate an ISR is complete. */
-@
- BL _tx_execution_isr_exit @ Call the ISR exit function
+
+ /* Call the ISR exit function to indicate an ISR is complete. */
+
+ BL _tx_execution_isr_exit // Call the ISR exit function
#endif
-@
-@ /* Determine if interrupts are nested. */
-@ if (--_tx_thread_system_state)
-@ {
-@
- LDR r3, =_tx_thread_system_state @ Pickup address of system state variable
- LDR r2, [r3] @ Pickup system state
- SUB r2, r2, #1 @ Decrement the counter
- STR r2, [r3] @ Store the counter
- CMP r2, #0 @ Was this the first interrupt?
- BEQ __tx_thread_fiq_not_nested_restore @ If so, not a nested restore
-@
-@ /* Interrupts are nested. */
-@
-@ /* Just recover the saved registers and return to the point of
-@ interrupt. */
-@
- LDMIA sp!, {r0, r10, r12, lr} @ Recover SPSR, POI, and scratch regs
- MSR SPSR_cxsf, r0 @ Put SPSR back
- LDMIA sp!, {r0-r3} @ Recover r0-r3
- MOVS pc, lr @ Return to point of interrupt
-@
-@ }
+
+ /* Determine if interrupts are nested. */
+ LDR r3, =_tx_thread_system_state // Pickup address of system state variable
+ LDR r2, [r3] // Pickup system state
+ SUB r2, r2, #1 // Decrement the counter
+ STR r2, [r3] // Store the counter
+ CMP r2, #0 // Was this the first interrupt?
+ BEQ __tx_thread_fiq_not_nested_restore // If so, not a nested restore
+
+ /* Interrupts are nested. */
+
+ /* Just recover the saved registers and return to the point of
+ interrupt. */
+
+ LDMIA sp!, {r0, r10, r12, lr} // Recover SPSR, POI, and scratch regs
+ MSR SPSR_cxsf, r0 // Put SPSR back
+ LDMIA sp!, {r0-r3} // Recover r0-r3
+ MOVS pc, lr // Return to point of interrupt
+
__tx_thread_fiq_not_nested_restore:
-@
-@ /* Determine if a thread was interrupted and no preemption is required. */
-@ else if (((_tx_thread_current_ptr) && (_tx_thread_current_ptr == _tx_thread_execute_ptr)
-@ || (_tx_thread_preempt_disable))
-@ {
-@
- LDR r1, [sp] @ Pickup the saved SPSR
- MOV r2, #MODE_MASK @ Build mask to isolate the interrupted mode
- AND r1, r1, r2 @ Isolate mode bits
- CMP r1, #IRQ_MODE_BITS @ Was an interrupt taken in IRQ mode before we
- @ got to context save? */
- BEQ __tx_thread_fiq_no_preempt_restore @ Yes, just go back to point of interrupt
+
+ /* Determine if a thread was interrupted and no preemption is required. */
+
+ LDR r1, [sp] // Pickup the saved SPSR
+ MOV r2, #MODE_MASK // Build mask to isolate the interrupted mode
+ AND r1, r1, r2 // Isolate mode bits
+ CMP r1, #IRQ_MODE_BITS // Was an interrupt taken in IRQ mode before we
+ // got to context save? */
+ BEQ __tx_thread_fiq_no_preempt_restore // Yes, just go back to point of interrupt
- LDR r1, =_tx_thread_current_ptr @ Pickup address of current thread ptr
- LDR r0, [r1] @ Pickup actual current thread pointer
- CMP r0, #0 @ Is it NULL?
- BEQ __tx_thread_fiq_idle_system_restore @ Yes, idle system was interrupted
+ LDR r1, =_tx_thread_current_ptr // Pickup address of current thread ptr
+ LDR r0, [r1] // Pickup actual current thread pointer
+ CMP r0, #0 // Is it NULL?
+ BEQ __tx_thread_fiq_idle_system_restore // Yes, idle system was interrupted
- LDR r3, =_tx_thread_preempt_disable @ Pickup preempt disable address
- LDR r2, [r3] @ Pickup actual preempt disable flag
- CMP r2, #0 @ Is it set?
- BNE __tx_thread_fiq_no_preempt_restore @ Yes, don't preempt this thread
- LDR r3, =_tx_thread_execute_ptr @ Pickup address of execute thread ptr
- LDR r2, [r3] @ Pickup actual execute thread pointer
- CMP r0, r2 @ Is the same thread highest priority?
- BNE __tx_thread_fiq_preempt_restore @ No, preemption needs to happen
+ LDR r3, =_tx_thread_preempt_disable // Pickup preempt disable address
+ LDR r2, [r3] // Pickup actual preempt disable flag
+ CMP r2, #0 // Is it set?
+ BNE __tx_thread_fiq_no_preempt_restore // Yes, don't preempt this thread
+ LDR r3, =_tx_thread_execute_ptr // Pickup address of execute thread ptr
+ LDR r2, [r3] // Pickup actual execute thread pointer
+ CMP r0, r2 // Is the same thread highest priority?
+ BNE __tx_thread_fiq_preempt_restore // No, preemption needs to happen
__tx_thread_fiq_no_preempt_restore:
-@
-@ /* Restore interrupted thread or ISR. */
-@
-@ /* Pickup the saved stack pointer. */
-@ tmp_ptr = _tx_thread_current_ptr -> tx_thread_stack_ptr;
-@
-@ /* Recover the saved context and return to the point of interrupt. */
-@
- LDMIA sp!, {r0, lr} @ Recover SPSR, POI, and scratch regs
- MSR SPSR_cxsf, r0 @ Put SPSR back
- LDMIA sp!, {r0-r3} @ Recover r0-r3
- MOVS pc, lr @ Return to point of interrupt
-@
-@ }
-@ else
-@ {
-__tx_thread_fiq_preempt_restore:
-@
- LDMIA sp!, {r3, lr} @ Recover temporarily saved registers
- MOV r1, lr @ Save lr (point of interrupt)
- MOV r2, #SVC_MODE @ Build SVC mode CPSR
- MSR CPSR_c, r2 @ Enter SVC mode
- STR r1, [sp, #-4]! @ Save point of interrupt
- STMDB sp!, {r4-r12, lr} @ Save upper half of registers
- MOV r4, r3 @ Save SPSR in r4
- MOV r2, #FIQ_MODE @ Build FIQ mode CPSR
- MSR CPSR_c, r2 @ Reenter FIQ mode
- LDMIA sp!, {r0-r3} @ Recover r0-r3
- MOV r5, #SVC_MODE @ Build SVC mode CPSR
- MSR CPSR_c, r5 @ Enter SVC mode
- STMDB sp!, {r0-r3} @ Save r0-r3 on thread's stack
- LDR r1, =_tx_thread_current_ptr @ Pickup address of current thread ptr
- LDR r0, [r1] @ Pickup current thread pointer
+ /* Restore interrupted thread or ISR. */
+ /* Recover the saved context and return to the point of interrupt. */
+
+ LDMIA sp!, {r0, lr} // Recover SPSR, POI, and scratch regs
+ MSR SPSR_cxsf, r0 // Put SPSR back
+ LDMIA sp!, {r0-r3} // Recover r0-r3
+ MOVS pc, lr // Return to point of interrupt
+
+__tx_thread_fiq_preempt_restore:
+
+ LDMIA sp!, {r3, lr} // Recover temporarily saved registers
+ MOV r1, lr // Save lr (point of interrupt)
+ MOV r2, #SVC_MODE // Build SVC mode CPSR
+ MSR CPSR_c, r2 // Enter SVC mode
+ STR r1, [sp, #-4]! // Save point of interrupt
+ STMDB sp!, {r4-r12, lr} // Save upper half of registers
+ MOV r4, r3 // Save SPSR in r4
+ MOV r2, #FIQ_MODE // Build FIQ mode CPSR
+ MSR CPSR_c, r2 // Reenter FIQ mode
+ LDMIA sp!, {r0-r3} // Recover r0-r3
+ MOV r5, #SVC_MODE // Build SVC mode CPSR
+ MSR CPSR_c, r5 // Enter SVC mode
+ STMDB sp!, {r0-r3} // Save r0-r3 on thread's stack
+
+ LDR r1, =_tx_thread_current_ptr // Pickup address of current thread ptr
+ LDR r0, [r1] // Pickup current thread pointer
#ifdef TX_ENABLE_VFP_SUPPORT
- LDR r2, [r0, #144] @ Pickup the VFP enabled flag
- CMP r2, #0 @ Is the VFP enabled?
- BEQ _tx_skip_fiq_vfp_save @ No, skip VFP IRQ save
- VMRS r2, FPSCR @ Pickup the FPSCR
- STR r2, [sp, #-4]! @ Save FPSCR
- VSTMDB sp!, {D16-D31} @ Save D16-D31
- VSTMDB sp!, {D0-D15} @ Save D0-D15
+ LDR r2, [r0, #144] // Pickup the VFP enabled flag
+ CMP r2, #0 // Is the VFP enabled?
+ BEQ _tx_skip_fiq_vfp_save // No, skip VFP IRQ save
+ VMRS r2, FPSCR // Pickup the FPSCR
+ STR r2, [sp, #-4]! // Save FPSCR
+ VSTMDB sp!, {D16-D31} // Save D16-D31
+ VSTMDB sp!, {D0-D15} // Save D0-D15
_tx_skip_fiq_vfp_save:
#endif
- MOV r3, #1 @ Build interrupt stack type
- STMDB sp!, {r3, r4} @ Save interrupt stack type and SPSR
- STR sp, [r0, #8] @ Save stack pointer in thread control
- @ block */
-@
-@ /* Save the remaining time-slice and disable it. */
-@ if (_tx_timer_time_slice)
-@ {
-@
- LDR r3, =_tx_timer_time_slice @ Pickup time-slice variable address
- LDR r2, [r3] @ Pickup time-slice
- CMP r2, #0 @ Is it active?
- BEQ __tx_thread_fiq_dont_save_ts @ No, don't save it
-@
-@ _tx_thread_current_ptr -> tx_thread_time_slice = _tx_timer_time_slice;
-@ _tx_timer_time_slice = 0;
-@
- STR r2, [r0, #24] @ Save thread's time-slice
- MOV r2, #0 @ Clear value
- STR r2, [r3] @ Disable global time-slice flag
-@
-@ }
-__tx_thread_fiq_dont_save_ts:
-@
-@
-@ /* Clear the current task pointer. */
-@ _tx_thread_current_ptr = TX_NULL;
-@
- MOV r0, #0 @ NULL value
- STR r0, [r1] @ Clear current thread pointer
-@
-@ /* Return to the scheduler. */
-@ _tx_thread_schedule();
-@
- B _tx_thread_schedule @ Return to scheduler
-@ }
-@
-__tx_thread_fiq_idle_system_restore:
-@
-@ /* Just return back to the scheduler! */
-@
- ADD sp, sp, #24 @ Recover FIQ stack space
- MOV r3, #SVC_MODE @ Build SVC mode CPSR
- MSR CPSR_c, r3 @ Lockout interrupts
- B _tx_thread_schedule @ Return to scheduler
-@
-@}
+ MOV r3, #1 // Build interrupt stack type
+ STMDB sp!, {r3, r4} // Save interrupt stack type and SPSR
+ STR sp, [r0, #8] // Save stack pointer in thread control
+ // block */
+ LDR r3, =_tx_timer_time_slice // Pickup time-slice variable address
+ LDR r2, [r3] // Pickup time-slice
+ CMP r2, #0 // Is it active?
+ BEQ __tx_thread_fiq_dont_save_ts // No, don't save it
+
+ STR r2, [r0, #24] // Save thread's time-slice
+ MOV r2, #0 // Clear value
+ STR r2, [r3] // Disable global time-slice flag
+
+__tx_thread_fiq_dont_save_ts:
+
+ /* Clear the current task pointer. */
+
+ MOV r0, #0 // NULL value
+ STR r0, [r1] // Clear current thread pointer
+
+ /* Return to the scheduler. */
+
+ B _tx_thread_schedule // Return to scheduler
+
+__tx_thread_fiq_idle_system_restore:
+
+ /* Just return back to the scheduler! */
+
+ ADD sp, sp, #24 // Recover FIQ stack space
+ MOV r3, #SVC_MODE // Build SVC mode CPSR
+ MSR CPSR_c, r3 // Lockout interrupts
+ B _tx_thread_schedule // Return to scheduler
diff --git a/ports/cortex_a15/ac6/src/tx_thread_fiq_context_save.S b/ports/cortex_a15/ac6/src/tx_thread_fiq_context_save.S
index b45748c0..7db6a4c2 100644
--- a/ports/cortex_a15/ac6/src/tx_thread_fiq_context_save.S
+++ b/ports/cortex_a15/ac6/src/tx_thread_fiq_context_save.S
@@ -1,206 +1,178 @@
-@/**************************************************************************/
-@/* */
-@/* Copyright (c) Microsoft Corporation. All rights reserved. */
-@/* */
-@/* This software is licensed under the Microsoft Software License */
-@/* Terms for Microsoft Azure RTOS. Full text of the license can be */
-@/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
-@/* and in the root directory of this software. */
-@/* */
-@/**************************************************************************/
-@
-@
-@/**************************************************************************/
-@/**************************************************************************/
-@/** */
-@/** ThreadX Component */
-@/** */
-@/** Thread */
-@/** */
-@/**************************************************************************/
-@/**************************************************************************/
-@
-@
-@#define TX_SOURCE_CODE
-@
-@
-@/* Include necessary system files. */
-@
-@#include "tx_api.h"
-@#include "tx_thread.h"
-@
-@
+/**************************************************************************/
+/* */
+/* Copyright (c) Microsoft Corporation. All rights reserved. */
+/* */
+/* This software is licensed under the Microsoft Software License */
+/* Terms for Microsoft Azure RTOS. Full text of the license can be */
+/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
+/* and in the root directory of this software. */
+/* */
+/**************************************************************************/
+
+
+/**************************************************************************/
+/**************************************************************************/
+/** */
+/** ThreadX Component */
+/** */
+/** Thread */
+/** */
+/**************************************************************************/
+/**************************************************************************/
+
.global _tx_thread_system_state
.global _tx_thread_current_ptr
.global __tx_fiq_processing_return
-@
-@
-@/* No 16-bit Thumb mode veneer code is needed for _tx_thread_fiq_context_save
-@ since it will never be called 16-bit mode. */
-@
+ .global _tx_execution_isr_enter
+
+
+/* No 16-bit Thumb mode veneer code is needed for _tx_thread_fiq_context_save
+ since it will never be called 16-bit mode. */
+
.arm
.text
.align 2
-@/**************************************************************************/
-@/* */
-@/* FUNCTION RELEASE */
-@/* */
-@/* _tx_thread_fiq_context_save Cortex-A15/AC6 */
-@/* 6.1.9 */
-@/* AUTHOR */
-@/* */
-@/* William E. Lamie, Microsoft Corporation */
-@/* */
-@/* DESCRIPTION */
-@/* */
-@/* This function saves the context of an executing thread in the */
-@/* beginning of interrupt processing. The function also ensures that */
-@/* the system stack is used upon return to the calling ISR. */
-@/* */
-@/* INPUT */
-@/* */
-@/* None */
-@/* */
-@/* OUTPUT */
-@/* */
-@/* None */
-@/* */
-@/* CALLS */
-@/* */
-@/* None */
-@/* */
-@/* CALLED BY */
-@/* */
-@/* ISRs */
-@/* */
-@/* RELEASE HISTORY */
-@/* */
-@/* DATE NAME DESCRIPTION */
-@/* */
-@/* 09-30-2020 William E. Lamie Initial Version 6.1 */
-@/* 10-15-2021 William E. Lamie Modified comment(s), added */
-@/* execution profile support, */
-@/* resulting in version 6.1.9 */
-@/* */
-@/**************************************************************************/
-@ VOID _tx_thread_fiq_context_save(VOID)
-@{
+/**************************************************************************/
+/* */
+/* FUNCTION RELEASE */
+/* */
+/* _tx_thread_fiq_context_save ARMv7-A */
+/* 6.1.11 */
+/* AUTHOR */
+/* */
+/* William E. Lamie, Microsoft Corporation */
+/* */
+/* DESCRIPTION */
+/* */
+/* This function saves the context of an executing thread in the */
+/* beginning of interrupt processing. The function also ensures that */
+/* the system stack is used upon return to the calling ISR. */
+/* */
+/* INPUT */
+/* */
+/* None */
+/* */
+/* OUTPUT */
+/* */
+/* None */
+/* */
+/* CALLS */
+/* */
+/* None */
+/* */
+/* CALLED BY */
+/* */
+/* ISRs */
+/* */
+/* RELEASE HISTORY */
+/* */
+/* DATE NAME DESCRIPTION */
+/* */
+/* 09-30-2020 William E. Lamie Initial Version 6.1 */
+/* 10-15-2021 William E. Lamie Modified comment(s), added */
+/* execution profile support, */
+/* resulting in version 6.1.9 */
+/* 04-25-2022 Zhen Kong Updated comments, */
+/* resulting in version 6.1.11 */
+/* */
+/**************************************************************************/
.global _tx_thread_fiq_context_save
.type _tx_thread_fiq_context_save,function
_tx_thread_fiq_context_save:
-@
-@ /* Upon entry to this routine, it is assumed that IRQ interrupts are locked
-@ out, we are in IRQ mode, and all registers are intact. */
-@
-@ /* Check for a nested interrupt condition. */
-@ if (_tx_thread_system_state++)
-@ {
-@
- STMDB sp!, {r0-r3} @ Save some working registers
- LDR r3, =_tx_thread_system_state @ Pickup address of system state variable
- LDR r2, [r3] @ Pickup system state
- CMP r2, #0 @ Is this the first interrupt?
- BEQ __tx_thread_fiq_not_nested_save @ Yes, not a nested context save
-@
-@ /* Nested interrupt condition. */
-@
- ADD r2, r2, #1 @ Increment the interrupt counter
- STR r2, [r3] @ Store it back in the variable
-@
-@ /* Save the rest of the scratch registers on the stack and return to the
-@ calling ISR. */
-@
- MRS r0, SPSR @ Pickup saved SPSR
- SUB lr, lr, #4 @ Adjust point of interrupt
- STMDB sp!, {r0, r10, r12, lr} @ Store other registers
-@
-@ /* Return to the ISR. */
-@
- MOV r10, #0 @ Clear stack limit
+
+ /* Upon entry to this routine, it is assumed that IRQ interrupts are locked
+ out, we are in IRQ mode, and all registers are intact. */
+
+ /* Check for a nested interrupt condition. */
+
+ STMDB sp!, {r0-r3} // Save some working registers
+ LDR r3, =_tx_thread_system_state // Pickup address of system state variable
+ LDR r2, [r3] // Pickup system state
+ CMP r2, #0 // Is this the first interrupt?
+ BEQ __tx_thread_fiq_not_nested_save // Yes, not a nested context save
+
+ /* Nested interrupt condition. */
+
+ ADD r2, r2, #1 // Increment the interrupt counter
+ STR r2, [r3] // Store it back in the variable
+
+ /* Save the rest of the scratch registers on the stack and return to the
+ calling ISR. */
+
+ MRS r0, SPSR // Pickup saved SPSR
+ SUB lr, lr, #4 // Adjust point of interrupt
+ STMDB sp!, {r0, r10, r12, lr} // Store other registers
+
+ /* Return to the ISR. */
+
+ MOV r10, #0 // Clear stack limit
#if (defined(TX_ENABLE_EXECUTION_CHANGE_NOTIFY) || defined(TX_EXECUTION_PROFILE_ENABLE))
-@
-@ /* Call the ISR enter function to indicate an ISR is executing. */
-@
- PUSH {lr} @ Save ISR lr
- BL _tx_execution_isr_enter @ Call the ISR enter function
- POP {lr} @ Recover ISR lr
+
+ /* Call the ISR enter function to indicate an ISR is executing. */
+
+ PUSH {lr} // Save ISR lr
+ BL _tx_execution_isr_enter // Call the ISR enter function
+ POP {lr} // Recover ISR lr
#endif
- B __tx_fiq_processing_return @ Continue FIQ processing
-@
+ B __tx_fiq_processing_return // Continue FIQ processing
+//
__tx_thread_fiq_not_nested_save:
-@ }
-@
-@ /* Otherwise, not nested, check to see if a thread was running. */
-@ else if (_tx_thread_current_ptr)
-@ {
-@
- ADD r2, r2, #1 @ Increment the interrupt counter
- STR r2, [r3] @ Store it back in the variable
- LDR r1, =_tx_thread_current_ptr @ Pickup address of current thread ptr
- LDR r0, [r1] @ Pickup current thread pointer
- CMP r0, #0 @ Is it NULL?
- BEQ __tx_thread_fiq_idle_system_save @ If so, interrupt occurred in
-@ @ scheduling loop - nothing needs saving!
-@
-@ /* Save minimal context of interrupted thread. */
-@
- MRS r2, SPSR @ Pickup saved SPSR
- SUB lr, lr, #4 @ Adjust point of interrupt
- STMDB sp!, {r2, lr} @ Store other registers, Note that we don't
-@ @ need to save sl and ip since FIQ has
-@ @ copies of these registers. Nested
-@ @ interrupt processing does need to save
-@ @ these registers.
-@
-@ /* Save the current stack pointer in the thread's control block. */
-@ _tx_thread_current_ptr -> tx_thread_stack_ptr = sp;
-@
-@ /* Switch to the system stack. */
-@ sp = _tx_thread_system_stack_ptr;
-@
- MOV r10, #0 @ Clear stack limit
+
+ /* Otherwise, not nested, check to see if a thread was running. */
+
+ ADD r2, r2, #1 // Increment the interrupt counter
+ STR r2, [r3] // Store it back in the variable
+ LDR r1, =_tx_thread_current_ptr // Pickup address of current thread ptr
+ LDR r0, [r1] // Pickup current thread pointer
+ CMP r0, #0 // Is it NULL?
+ BEQ __tx_thread_fiq_idle_system_save // If so, interrupt occurred in
+ // scheduling loop - nothing needs saving!
+
+ /* Save minimal context of interrupted thread. */
+
+ MRS r2, SPSR // Pickup saved SPSR
+ SUB lr, lr, #4 // Adjust point of interrupt
+ STMDB sp!, {r2, lr} // Store other registers, Note that we don't
+ // need to save sl and ip since FIQ has
+ // copies of these registers. Nested
+ // interrupt processing does need to save
+ // these registers.
+
+ MOV r10, #0 // Clear stack limit
#if (defined(TX_ENABLE_EXECUTION_CHANGE_NOTIFY) || defined(TX_EXECUTION_PROFILE_ENABLE))
-@
-@ /* Call the ISR enter function to indicate an ISR is executing. */
-@
- PUSH {lr} @ Save ISR lr
- BL _tx_execution_isr_enter @ Call the ISR enter function
- POP {lr} @ Recover ISR lr
+
+ /* Call the ISR enter function to indicate an ISR is executing. */
+
+ PUSH {lr} // Save ISR lr
+ BL _tx_execution_isr_enter // Call the ISR enter function
+ POP {lr} // Recover ISR lr
#endif
- B __tx_fiq_processing_return @ Continue FIQ processing
-@
-@ }
-@ else
-@ {
-@
+ B __tx_fiq_processing_return // Continue FIQ processing
+
__tx_thread_fiq_idle_system_save:
-@
-@ /* Interrupt occurred in the scheduling loop. */
-@
-#if (defined(TX_ENABLE_EXECUTION_CHANGE_NOTIFY) || defined(TX_EXECUTION_PROFILE_ENABLE))
-@
-@ /* Call the ISR enter function to indicate an ISR is executing. */
-@
- PUSH {lr} @ Save ISR lr
- BL _tx_execution_isr_enter @ Call the ISR enter function
- POP {lr} @ Recover ISR lr
-#endif
-@
-@ /* Not much to do here, save the current SPSR and LR for possible
-@ use in IRQ interrupted in idle system conditions, and return to
-@ FIQ interrupt processing. */
-@
- MRS r0, SPSR @ Pickup saved SPSR
- SUB lr, lr, #4 @ Adjust point of interrupt
- STMDB sp!, {r0, lr} @ Store other registers that will get used
-@ @ or stripped off the stack in context
-@ @ restore
- B __tx_fiq_processing_return @ Continue FIQ processing
-@
-@ }
-@}
+ /* Interrupt occurred in the scheduling loop. */
+
+#if (defined(TX_ENABLE_EXECUTION_CHANGE_NOTIFY) || defined(TX_EXECUTION_PROFILE_ENABLE))
+
+ /* Call the ISR enter function to indicate an ISR is executing. */
+
+ PUSH {lr} // Save ISR lr
+ BL _tx_execution_isr_enter // Call the ISR enter function
+ POP {lr} // Recover ISR lr
+#endif
+
+ /* Not much to do here, save the current SPSR and LR for possible
+ use in IRQ interrupted in idle system conditions, and return to
+ FIQ interrupt processing. */
+
+ MRS r0, SPSR // Pickup saved SPSR
+ SUB lr, lr, #4 // Adjust point of interrupt
+ STMDB sp!, {r0, lr} // Store other registers that will get used
+ // or stripped off the stack in context
+ // restore
+ B __tx_fiq_processing_return // Continue FIQ processing
diff --git a/ports/cortex_a15/ac6/src/tx_thread_fiq_nesting_end.S b/ports/cortex_a15/ac6/src/tx_thread_fiq_nesting_end.S
index 2cf49fc3..b34d881e 100644
--- a/ports/cortex_a15/ac6/src/tx_thread_fiq_nesting_end.S
+++ b/ports/cortex_a15/ac6/src/tx_thread_fiq_nesting_end.S
@@ -1,116 +1,104 @@
-@/**************************************************************************/
-@/* */
-@/* Copyright (c) Microsoft Corporation. All rights reserved. */
-@/* */
-@/* This software is licensed under the Microsoft Software License */
-@/* Terms for Microsoft Azure RTOS. Full text of the license can be */
-@/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
-@/* and in the root directory of this software. */
-@/* */
-@/**************************************************************************/
-@
-@
-@/**************************************************************************/
-@/**************************************************************************/
-@/** */
-@/** ThreadX Component */
-@/** */
-@/** Thread */
-@/** */
-@/**************************************************************************/
-@/**************************************************************************/
-@
-@
-@#define TX_SOURCE_CODE
-@
-@
-@/* Include necessary system files. */
-@
-@#include "tx_api.h"
-@#include "tx_thread.h"
-@
-@
+/**************************************************************************/
+/* */
+/* Copyright (c) Microsoft Corporation. All rights reserved. */
+/* */
+/* This software is licensed under the Microsoft Software License */
+/* Terms for Microsoft Azure RTOS. Full text of the license can be */
+/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
+/* and in the root directory of this software. */
+/* */
+/**************************************************************************/
+
+
+/**************************************************************************/
+/**************************************************************************/
+/** */
+/** ThreadX Component */
+/** */
+/** Thread */
+/** */
+/**************************************************************************/
+/**************************************************************************/
+
#ifdef TX_ENABLE_FIQ_SUPPORT
-DISABLE_INTS = 0xC0 @ Disable IRQ/FIQ interrupts
+DISABLE_INTS = 0xC0 // Disable IRQ/FIQ interrupts
#else
-DISABLE_INTS = 0x80 @ Disable IRQ interrupts
+DISABLE_INTS = 0x80 // Disable IRQ interrupts
#endif
-MODE_MASK = 0x1F @ Mode mask
-FIQ_MODE_BITS = 0x11 @ FIQ mode bits
-@
-@
-@/* No 16-bit Thumb mode veneer code is needed for _tx_thread_fiq_nesting_end
-@ since it will never be called 16-bit mode. */
-@
+MODE_MASK = 0x1F // Mode mask
+FIQ_MODE_BITS = 0x11 // FIQ mode bits
+
+
+/* No 16-bit Thumb mode veneer code is needed for _tx_thread_fiq_nesting_end
+ since it will never be called 16-bit mode. */
+
.arm
.text
.align 2
-@/**************************************************************************/
-@/* */
-@/* FUNCTION RELEASE */
-@/* */
-@/* _tx_thread_fiq_nesting_end Cortex-A15/AC6 */
-@/* 6.1 */
-@/* AUTHOR */
-@/* */
-@/* William E. Lamie, Microsoft Corporation */
-@/* */
-@/* DESCRIPTION */
-@/* */
-@/* This function is called by the application from FIQ mode after */
-@/* _tx_thread_fiq_nesting_start has been called and switches the FIQ */
-@/* processing from system mode back to FIQ mode prior to the ISR */
-@/* calling _tx_thread_fiq_context_restore. Note that this function */
-@/* assumes the system stack pointer is in the same position after */
-@/* nesting start function was called. */
-@/* */
-@/* This function assumes that the system mode stack pointer was setup */
-@/* during low-level initialization (tx_initialize_low_level.s). */
-@/* */
-@/* This function returns with FIQ interrupts disabled. */
-@/* */
-@/* INPUT */
-@/* */
-@/* None */
-@/* */
-@/* OUTPUT */
-@/* */
-@/* None */
-@/* */
-@/* CALLS */
-@/* */
-@/* None */
-@/* */
-@/* CALLED BY */
-@/* */
-@/* ISRs */
-@/* */
-@/* RELEASE HISTORY */
-@/* */
-@/* DATE NAME DESCRIPTION */
-@/* */
-@/* 09-30-2020 William E. Lamie Initial Version 6.1 */
-@/* */
-@/**************************************************************************/
-@VOID _tx_thread_fiq_nesting_end(VOID)
-@{
+/**************************************************************************/
+/* */
+/* FUNCTION RELEASE */
+/* */
+/* _tx_thread_fiq_nesting_end ARMv7-A */
+/* 6.1.11 */
+/* AUTHOR */
+/* */
+/* William E. Lamie, Microsoft Corporation */
+/* */
+/* DESCRIPTION */
+/* */
+/* This function is called by the application from FIQ mode after */
+/* _tx_thread_fiq_nesting_start has been called and switches the FIQ */
+/* processing from system mode back to FIQ mode prior to the ISR */
+/* calling _tx_thread_fiq_context_restore. Note that this function */
+/* assumes the system stack pointer is in the same position after */
+/* nesting start function was called. */
+/* */
+/* This function assumes that the system mode stack pointer was setup */
+/* during low-level initialization (tx_initialize_low_level.s). */
+/* */
+/* This function returns with FIQ interrupts disabled. */
+/* */
+/* INPUT */
+/* */
+/* None */
+/* */
+/* OUTPUT */
+/* */
+/* None */
+/* */
+/* CALLS */
+/* */
+/* None */
+/* */
+/* CALLED BY */
+/* */
+/* ISRs */
+/* */
+/* RELEASE HISTORY */
+/* */
+/* DATE NAME DESCRIPTION */
+/* */
+/* 09-30-2020 William E. Lamie Initial Version 6.1 */
+/* 04-25-2022 Zhen Kong Updated comments, */
+/* resulting in version 6.1.11 */
+/* */
+/**************************************************************************/
.global _tx_thread_fiq_nesting_end
.type _tx_thread_fiq_nesting_end,function
_tx_thread_fiq_nesting_end:
- MOV r3,lr @ Save ISR return address
- MRS r0, CPSR @ Pickup the CPSR
- ORR r0, r0, #DISABLE_INTS @ Build disable interrupt value
- MSR CPSR_c, r0 @ Disable interrupts
- LDMIA sp!, {r1, lr} @ Pickup saved lr (and r1 throw-away for
- @ 8-byte alignment logic)
- BIC r0, r0, #MODE_MASK @ Clear mode bits
- ORR r0, r0, #FIQ_MODE_BITS @ Build IRQ mode CPSR
- MSR CPSR_c, r0 @ Reenter IRQ mode
+ MOV r3,lr // Save ISR return address
+ MRS r0, CPSR // Pickup the CPSR
+ ORR r0, r0, #DISABLE_INTS // Build disable interrupt value
+ MSR CPSR_c, r0 // Disable interrupts
+ LDMIA sp!, {r1, lr} // Pickup saved lr (and r1 throw-away for
+ // 8-byte alignment logic)
+ BIC r0, r0, #MODE_MASK // Clear mode bits
+ ORR r0, r0, #FIQ_MODE_BITS // Build IRQ mode CPSR
+ MSR CPSR_c, r0 // Reenter IRQ mode
#ifdef __THUMB_INTERWORK
- BX r3 @ Return to caller
+ BX r3 // Return to caller
#else
- MOV pc, r3 @ Return to caller
+ MOV pc, r3 // Return to caller
#endif
-@}
-
diff --git a/ports/cortex_a15/ac6/src/tx_thread_fiq_nesting_start.S b/ports/cortex_a15/ac6/src/tx_thread_fiq_nesting_start.S
index c26e173f..c9cd5a06 100644
--- a/ports/cortex_a15/ac6/src/tx_thread_fiq_nesting_start.S
+++ b/ports/cortex_a15/ac6/src/tx_thread_fiq_nesting_start.S
@@ -1,108 +1,96 @@
-@/**************************************************************************/
-@/* */
-@/* Copyright (c) Microsoft Corporation. All rights reserved. */
-@/* */
-@/* This software is licensed under the Microsoft Software License */
-@/* Terms for Microsoft Azure RTOS. Full text of the license can be */
-@/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
-@/* and in the root directory of this software. */
-@/* */
-@/**************************************************************************/
-@
-@
-@/**************************************************************************/
-@/**************************************************************************/
-@/** */
-@/** ThreadX Component */
-@/** */
-@/** Thread */
-@/** */
-@/**************************************************************************/
-@/**************************************************************************/
-@
-@
-@#define TX_SOURCE_CODE
-@
-@
-@/* Include necessary system files. */
-@
-@#include "tx_api.h"
-@#include "tx_thread.h"
-@
-@
-FIQ_DISABLE = 0x40 @ FIQ disable bit
-MODE_MASK = 0x1F @ Mode mask
-SYS_MODE_BITS = 0x1F @ System mode bits
-@
-@
-@/* No 16-bit Thumb mode veneer code is needed for _tx_thread_fiq_nesting_start
-@ since it will never be called 16-bit mode. */
-@
+/**************************************************************************/
+/* */
+/* Copyright (c) Microsoft Corporation. All rights reserved. */
+/* */
+/* This software is licensed under the Microsoft Software License */
+/* Terms for Microsoft Azure RTOS. Full text of the license can be */
+/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
+/* and in the root directory of this software. */
+/* */
+/**************************************************************************/
+
+
+/**************************************************************************/
+/**************************************************************************/
+/** */
+/** ThreadX Component */
+/** */
+/** Thread */
+/** */
+/**************************************************************************/
+/**************************************************************************/
+
+FIQ_DISABLE = 0x40 // FIQ disable bit
+MODE_MASK = 0x1F // Mode mask
+SYS_MODE_BITS = 0x1F // System mode bits
+
+
+/* No 16-bit Thumb mode veneer code is needed for _tx_thread_fiq_nesting_start
+ since it will never be called 16-bit mode. */
+
.arm
.text
.align 2
-@/**************************************************************************/
-@/* */
-@/* FUNCTION RELEASE */
-@/* */
-@/* _tx_thread_fiq_nesting_start Cortex-A15/AC6 */
-@/* 6.1 */
-@/* AUTHOR */
-@/* */
-@/* William E. Lamie, Microsoft Corporation */
-@/* */
-@/* DESCRIPTION */
-@/* */
-@/* This function is called by the application from FIQ mode after */
-@/* _tx_thread_fiq_context_save has been called and switches the FIQ */
-@/* processing to the system mode so nested FIQ interrupt processing */
-@/* is possible (system mode has its own "lr" register). Note that */
-@/* this function assumes that the system mode stack pointer was setup */
-@/* during low-level initialization (tx_initialize_low_level.s). */
-@/* */
-@/* This function returns with FIQ interrupts enabled. */
-@/* */
-@/* INPUT */
-@/* */
-@/* None */
-@/* */
-@/* OUTPUT */
-@/* */
-@/* None */
-@/* */
-@/* CALLS */
-@/* */
-@/* None */
-@/* */
-@/* CALLED BY */
-@/* */
-@/* ISRs */
-@/* */
-@/* RELEASE HISTORY */
-@/* */
-@/* DATE NAME DESCRIPTION */
-@/* */
-@/* 09-30-2020 William E. Lamie Initial Version 6.1 */
-@/* */
-@/**************************************************************************/
-@VOID _tx_thread_fiq_nesting_start(VOID)
-@{
+/**************************************************************************/
+/* */
+/* FUNCTION RELEASE */
+/* */
+/* _tx_thread_fiq_nesting_start ARMv7-A */
+/* 6.1.11 */
+/* AUTHOR */
+/* */
+/* William E. Lamie, Microsoft Corporation */
+/* */
+/* DESCRIPTION */
+/* */
+/* This function is called by the application from FIQ mode after */
+/* _tx_thread_fiq_context_save has been called and switches the FIQ */
+/* processing to the system mode so nested FIQ interrupt processing */
+/* is possible (system mode has its own "lr" register). Note that */
+/* this function assumes that the system mode stack pointer was setup */
+/* during low-level initialization (tx_initialize_low_level.s). */
+/* */
+/* This function returns with FIQ interrupts enabled. */
+/* */
+/* INPUT */
+/* */
+/* None */
+/* */
+/* OUTPUT */
+/* */
+/* None */
+/* */
+/* CALLS */
+/* */
+/* None */
+/* */
+/* CALLED BY */
+/* */
+/* ISRs */
+/* */
+/* RELEASE HISTORY */
+/* */
+/* DATE NAME DESCRIPTION */
+/* */
+/* 09-30-2020 William E. Lamie Initial Version 6.1 */
+/* 04-25-2022 Zhen Kong Updated comments, */
+/* resulting in version 6.1.11 */
+/* */
+/**************************************************************************/
.global _tx_thread_fiq_nesting_start
.type _tx_thread_fiq_nesting_start,function
_tx_thread_fiq_nesting_start:
- MOV r3,lr @ Save ISR return address
- MRS r0, CPSR @ Pickup the CPSR
- BIC r0, r0, #MODE_MASK @ Clear the mode bits
- ORR r0, r0, #SYS_MODE_BITS @ Build system mode CPSR
- MSR CPSR_c, r0 @ Enter system mode
- STMDB sp!, {r1, lr} @ Push the system mode lr on the system mode stack
- @ and push r1 just to keep 8-byte alignment
- BIC r0, r0, #FIQ_DISABLE @ Build enable FIQ CPSR
- MSR CPSR_c, r0 @ Enter system mode
+ MOV r3,lr // Save ISR return address
+ MRS r0, CPSR // Pickup the CPSR
+ BIC r0, r0, #MODE_MASK // Clear the mode bits
+ ORR r0, r0, #SYS_MODE_BITS // Build system mode CPSR
+ MSR CPSR_c, r0 // Enter system mode
+ STMDB sp!, {r1, lr} // Push the system mode lr on the system mode stack
+ // and push r1 just to keep 8-byte alignment
+ BIC r0, r0, #FIQ_DISABLE // Build enable FIQ CPSR
+ MSR CPSR_c, r0 // Enter system mode
#ifdef __THUMB_INTERWORK
- BX r3 @ Return to caller
+ BX r3 // Return to caller
#else
- MOV pc, r3 @ Return to caller
+ MOV pc, r3 // Return to caller
#endif
-@}
-
diff --git a/ports/cortex_a15/ac6/src/tx_thread_interrupt_control.S b/ports/cortex_a15/ac6/src/tx_thread_interrupt_control.S
index 6e7b9dee..63b1609a 100644
--- a/ports/cortex_a15/ac6/src/tx_thread_interrupt_control.S
+++ b/ports/cortex_a15/ac6/src/tx_thread_interrupt_control.S
@@ -1,115 +1,104 @@
-@/**************************************************************************/
-@/* */
-@/* Copyright (c) Microsoft Corporation. All rights reserved. */
-@/* */
-@/* This software is licensed under the Microsoft Software License */
-@/* Terms for Microsoft Azure RTOS. Full text of the license can be */
-@/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
-@/* and in the root directory of this software. */
-@/* */
-@/**************************************************************************/
-@
-@
-@/**************************************************************************/
-@/**************************************************************************/
-@/** */
-@/** ThreadX Component */
-@/** */
-@/** Thread */
-@/** */
-@/**************************************************************************/
-@/**************************************************************************/
-@
-@#define TX_SOURCE_CODE
-@
-@
-@/* Include necessary system files. */
-@
-@#include "tx_api.h"
-@#include "tx_thread.h" */
-@
+/**************************************************************************/
+/* */
+/* Copyright (c) Microsoft Corporation. All rights reserved. */
+/* */
+/* This software is licensed under the Microsoft Software License */
+/* Terms for Microsoft Azure RTOS. Full text of the license can be */
+/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
+/* and in the root directory of this software. */
+/* */
+/**************************************************************************/
+
+
+/**************************************************************************/
+/**************************************************************************/
+/** */
+/** ThreadX Component */
+/** */
+/** Thread */
+/** */
+/**************************************************************************/
+/**************************************************************************/
INT_MASK = 0x03F
-@
-@/* Define the 16-bit Thumb mode veneer for _tx_thread_interrupt_control for
-@ applications calling this function from to 16-bit Thumb mode. */
-@
+
+/* Define the 16-bit Thumb mode veneer for _tx_thread_interrupt_control for
+ applications calling this function from to 16-bit Thumb mode. */
+
.text
.align 2
.global $_tx_thread_interrupt_control
$_tx_thread_interrupt_control:
.thumb
- BX pc @ Switch to 32-bit mode
- NOP @
+ BX pc // Switch to 32-bit mode
+ NOP //
.arm
- STMFD sp!, {lr} @ Save return address
- BL _tx_thread_interrupt_control @ Call _tx_thread_interrupt_control function
- LDMFD sp!, {lr} @ Recover saved return address
- BX lr @ Return to 16-bit caller
-@
-@
+ STMFD sp!, {lr} // Save return address
+ BL _tx_thread_interrupt_control // Call _tx_thread_interrupt_control function
+ LDMFD sp!, {lr} // Recover saved return address
+ BX lr // Return to 16-bit caller
+
+
.text
.align 2
-@/**************************************************************************/
-@/* */
-@/* FUNCTION RELEASE */
-@/* */
-@/* _tx_thread_interrupt_control Cortex-A15/AC6 */
-@/* 6.1 */
-@/* AUTHOR */
-@/* */
-@/* William E. Lamie, Microsoft Corporation */
-@/* */
-@/* DESCRIPTION */
-@/* */
-@/* This function is responsible for changing the interrupt lockout */
-@/* posture of the system. */
-@/* */
-@/* INPUT */
-@/* */
-@/* new_posture New interrupt lockout posture */
-@/* */
-@/* OUTPUT */
-@/* */
-@/* old_posture Old interrupt lockout posture */
-@/* */
-@/* CALLS */
-@/* */
-@/* None */
-@/* */
-@/* CALLED BY */
-@/* */
-@/* Application Code */
-@/* */
-@/* RELEASE HISTORY */
-@/* */
-@/* DATE NAME DESCRIPTION */
-@/* */
-@/* 09-30-2020 William E. Lamie Initial Version 6.1 */
-@/* */
-@/**************************************************************************/
-@UINT _tx_thread_interrupt_control(UINT new_posture)
-@{
+/**************************************************************************/
+/* */
+/* FUNCTION RELEASE */
+/* */
+/* _tx_thread_interrupt_control ARMv7-A */
+/* 6.1.11 */
+/* AUTHOR */
+/* */
+/* William E. Lamie, Microsoft Corporation */
+/* */
+/* DESCRIPTION */
+/* */
+/* This function is responsible for changing the interrupt lockout */
+/* posture of the system. */
+/* */
+/* INPUT */
+/* */
+/* new_posture New interrupt lockout posture */
+/* */
+/* OUTPUT */
+/* */
+/* old_posture Old interrupt lockout posture */
+/* */
+/* CALLS */
+/* */
+/* None */
+/* */
+/* CALLED BY */
+/* */
+/* Application Code */
+/* */
+/* RELEASE HISTORY */
+/* */
+/* DATE NAME DESCRIPTION */
+/* */
+/* 09-30-2020 William E. Lamie Initial Version 6.1 */
+/* 04-25-2022 Zhen Kong Updated comments, */
+/* resulting in version 6.1.11 */
+/* */
+/**************************************************************************/
.global _tx_thread_interrupt_control
.type _tx_thread_interrupt_control,function
_tx_thread_interrupt_control:
-@
-@ /* Pickup current interrupt lockout posture. */
-@
- MRS r3, CPSR @ Pickup current CPSR
- MOV r2, #INT_MASK @ Build interrupt mask
- AND r1, r3, r2 @ Clear interrupt lockout bits
- ORR r1, r1, r0 @ Or-in new interrupt lockout bits
-@
-@ /* Apply the new interrupt posture. */
-@
- MSR CPSR_c, r1 @ Setup new CPSR
- BIC r0, r3, r2 @ Return previous interrupt mask
-#ifdef __THUMB_INTERWORK
- BX lr @ Return to caller
-#else
- MOV pc, lr @ Return to caller
-#endif
-@}
+ /* Pickup current interrupt lockout posture. */
+
+ MRS r3, CPSR // Pickup current CPSR
+ MOV r2, #INT_MASK // Build interrupt mask
+ AND r1, r3, r2 // Clear interrupt lockout bits
+ ORR r1, r1, r0 // Or-in new interrupt lockout bits
+
+ /* Apply the new interrupt posture. */
+
+ MSR CPSR_c, r1 // Setup new CPSR
+ BIC r0, r3, r2 // Return previous interrupt mask
+#ifdef __THUMB_INTERWORK
+ BX lr // Return to caller
+#else
+ MOV pc, lr // Return to caller
+#endif
diff --git a/ports/cortex_a15/ac6/src/tx_thread_interrupt_disable.S b/ports/cortex_a15/ac6/src/tx_thread_interrupt_disable.S
index f0833f65..13258808 100644
--- a/ports/cortex_a15/ac6/src/tx_thread_interrupt_disable.S
+++ b/ports/cortex_a15/ac6/src/tx_thread_interrupt_disable.S
@@ -1,113 +1,101 @@
-@/**************************************************************************/
-@/* */
-@/* Copyright (c) Microsoft Corporation. All rights reserved. */
-@/* */
-@/* This software is licensed under the Microsoft Software License */
-@/* Terms for Microsoft Azure RTOS. Full text of the license can be */
-@/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
-@/* and in the root directory of this software. */
-@/* */
-@/**************************************************************************/
-@
-@
-@/**************************************************************************/
-@/**************************************************************************/
-@/** */
-@/** ThreadX Component */
-@/** */
-@/** Thread */
-@/** */
-@/**************************************************************************/
-@/**************************************************************************/
-@
-@#define TX_SOURCE_CODE
-@
-@
-@/* Include necessary system files. */
-@
-@#include "tx_api.h"
-@#include "tx_thread.h"
-@
-@
-@/* Define the 16-bit Thumb mode veneer for _tx_thread_interrupt_disable for
-@ applications calling this function from to 16-bit Thumb mode. */
-@
+/**************************************************************************/
+/* */
+/* Copyright (c) Microsoft Corporation. All rights reserved. */
+/* */
+/* This software is licensed under the Microsoft Software License */
+/* Terms for Microsoft Azure RTOS. Full text of the license can be */
+/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
+/* and in the root directory of this software. */
+/* */
+/**************************************************************************/
+
+
+/**************************************************************************/
+/**************************************************************************/
+/** */
+/** ThreadX Component */
+/** */
+/** Thread */
+/** */
+/**************************************************************************/
+/**************************************************************************/
+
+/* Define the 16-bit Thumb mode veneer for _tx_thread_interrupt_disable for
+ applications calling this function from to 16-bit Thumb mode. */
+
.text
.align 2
.global $_tx_thread_interrupt_disable
$_tx_thread_interrupt_disable:
.thumb
- BX pc @ Switch to 32-bit mode
- NOP @
+ BX pc // Switch to 32-bit mode
+ NOP //
.arm
- STMFD sp!, {lr} @ Save return address
- BL _tx_thread_interrupt_disable @ Call _tx_thread_interrupt_disable function
- LDMFD sp!, {lr} @ Recover saved return address
- BX lr @ Return to 16-bit caller
-@
-@
+ STMFD sp!, {lr} // Save return address
+ BL _tx_thread_interrupt_disable // Call _tx_thread_interrupt_disable function
+ LDMFD sp!, {lr} // Recover saved return address
+ BX lr // Return to 16-bit caller
+
+
.text
.align 2
-@/**************************************************************************/
-@/* */
-@/* FUNCTION RELEASE */
-@/* */
-@/* _tx_thread_interrupt_disable Cortex-A15/AC6 */
-@/* 6.1 */
-@/* AUTHOR */
-@/* */
-@/* William E. Lamie, Microsoft Corporation */
-@/* */
-@/* DESCRIPTION */
-@/* */
-@/* This function is responsible for disabling interrupts */
-@/* */
-@/* INPUT */
-@/* */
-@/* None */
-@/* */
-@/* OUTPUT */
-@/* */
-@/* old_posture Old interrupt lockout posture */
-@/* */
-@/* CALLS */
-@/* */
-@/* None */
-@/* */
-@/* CALLED BY */
-@/* */
-@/* Application Code */
-@/* */
-@/* RELEASE HISTORY */
-@/* */
-@/* DATE NAME DESCRIPTION */
-@/* */
-@/* 09-30-2020 William E. Lamie Initial Version 6.1 */
-@/* */
-@/**************************************************************************/
-@UINT _tx_thread_interrupt_disable(void)
-@{
+/**************************************************************************/
+/* */
+/* FUNCTION RELEASE */
+/* */
+/* _tx_thread_interrupt_disable ARMv7-A */
+/* 6.1.11 */
+/* AUTHOR */
+/* */
+/* William E. Lamie, Microsoft Corporation */
+/* */
+/* DESCRIPTION */
+/* */
+/* This function is responsible for disabling interrupts */
+/* */
+/* INPUT */
+/* */
+/* None */
+/* */
+/* OUTPUT */
+/* */
+/* old_posture Old interrupt lockout posture */
+/* */
+/* CALLS */
+/* */
+/* None */
+/* */
+/* CALLED BY */
+/* */
+/* Application Code */
+/* */
+/* RELEASE HISTORY */
+/* */
+/* DATE NAME DESCRIPTION */
+/* */
+/* 09-30-2020 William E. Lamie Initial Version 6.1 */
+/* 04-25-2022 Zhen Kong Updated comments, */
+/* resulting in version 6.1.11 */
+/* */
+/**************************************************************************/
.global _tx_thread_interrupt_disable
.type _tx_thread_interrupt_disable,function
_tx_thread_interrupt_disable:
-@
-@ /* Pickup current interrupt lockout posture. */
-@
- MRS r0, CPSR @ Pickup current CPSR
-@
-@ /* Mask interrupts. */
-@
+
+ /* Pickup current interrupt lockout posture. */
+
+ MRS r0, CPSR // Pickup current CPSR
+
+ /* Mask interrupts. */
+
#ifdef TX_ENABLE_FIQ_SUPPORT
- CPSID if @ Disable IRQ and FIQ
+ CPSID if // Disable IRQ and FIQ
#else
- CPSID i @ Disable IRQ
+ CPSID i // Disable IRQ
#endif
#ifdef __THUMB_INTERWORK
- BX lr @ Return to caller
+ BX lr // Return to caller
#else
- MOV pc, lr @ Return to caller
+ MOV pc, lr // Return to caller
#endif
-@}
-
-
diff --git a/ports/cortex_a15/ac6/src/tx_thread_interrupt_restore.S b/ports/cortex_a15/ac6/src/tx_thread_interrupt_restore.S
index 0f0a4b06..2d582511 100644
--- a/ports/cortex_a15/ac6/src/tx_thread_interrupt_restore.S
+++ b/ports/cortex_a15/ac6/src/tx_thread_interrupt_restore.S
@@ -1,104 +1,93 @@
-@/**************************************************************************/
-@/* */
-@/* Copyright (c) Microsoft Corporation. All rights reserved. */
-@/* */
-@/* This software is licensed under the Microsoft Software License */
-@/* Terms for Microsoft Azure RTOS. Full text of the license can be */
-@/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
-@/* and in the root directory of this software. */
-@/* */
-@/**************************************************************************/
-@
-@
-@/**************************************************************************/
-@/**************************************************************************/
-@/** */
-@/** ThreadX Component */
-@/** */
-@/** Thread */
-@/** */
-@/**************************************************************************/
-@/**************************************************************************/
-@
-@#define TX_SOURCE_CODE
-@
-@
-@/* Include necessary system files. */
-@
-@#include "tx_api.h"
-@#include "tx_thread.h"
-@
-@
-@/* Define the 16-bit Thumb mode veneer for _tx_thread_interrupt_restore for
-@ applications calling this function from to 16-bit Thumb mode. */
-@
+/**************************************************************************/
+/* */
+/* Copyright (c) Microsoft Corporation. All rights reserved. */
+/* */
+/* This software is licensed under the Microsoft Software License */
+/* Terms for Microsoft Azure RTOS. Full text of the license can be */
+/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
+/* and in the root directory of this software. */
+/* */
+/**************************************************************************/
+
+
+/**************************************************************************/
+/**************************************************************************/
+/** */
+/** ThreadX Component */
+/** */
+/** Thread */
+/** */
+/**************************************************************************/
+/**************************************************************************/
+
+/* Define the 16-bit Thumb mode veneer for _tx_thread_interrupt_restore for
+ applications calling this function from to 16-bit Thumb mode. */
+
.text
.align 2
.global $_tx_thread_interrupt_restore
$_tx_thread_interrupt_restore:
.thumb
- BX pc @ Switch to 32-bit mode
- NOP @
+ BX pc // Switch to 32-bit mode
+ NOP //
.arm
- STMFD sp!, {lr} @ Save return address
- BL _tx_thread_interrupt_restore @ Call _tx_thread_interrupt_restore function
- LDMFD sp!, {lr} @ Recover saved return address
- BX lr @ Return to 16-bit caller
-@
-@
+ STMFD sp!, {lr} // Save return address
+ BL _tx_thread_interrupt_restore // Call _tx_thread_interrupt_restore function
+ LDMFD sp!, {lr} // Recover saved return address
+ BX lr // Return to 16-bit caller
+
+
.text
.align 2
-@/**************************************************************************/
-@/* */
-@/* FUNCTION RELEASE */
-@/* */
-@/* _tx_thread_interrupt_restore Cortex-A15/AC6 */
-@/* 6.1 */
-@/* AUTHOR */
-@/* */
-@/* William E. Lamie, Microsoft Corporation */
-@/* */
-@/* DESCRIPTION */
-@/* */
-@/* This function is responsible for restoring interrupts to the state */
-@/* returned by a previous _tx_thread_interrupt_disable call. */
-@/* */
-@/* INPUT */
-@/* */
-@/* old_posture Old interrupt lockout posture */
-@/* */
-@/* OUTPUT */
-@/* */
-@/* None */
-@/* */
-@/* CALLS */
-@/* */
-@/* None */
-@/* */
-@/* CALLED BY */
-@/* */
-@/* Application Code */
-@/* */
-@/* RELEASE HISTORY */
-@/* */
-@/* DATE NAME DESCRIPTION */
-@/* */
-@/* 09-30-2020 William E. Lamie Initial Version 6.1 */
-@/* */
-@/**************************************************************************/
-@UINT _tx_thread_interrupt_restore(UINT old_posture)
-@{
+/**************************************************************************/
+/* */
+/* FUNCTION RELEASE */
+/* */
+/* _tx_thread_interrupt_restore ARMv7-A */
+/* 6.1.11 */
+/* AUTHOR */
+/* */
+/* William E. Lamie, Microsoft Corporation */
+/* */
+/* DESCRIPTION */
+/* */
+/* This function is responsible for restoring interrupts to the state */
+/* returned by a previous _tx_thread_interrupt_disable call. */
+/* */
+/* INPUT */
+/* */
+/* old_posture Old interrupt lockout posture */
+/* */
+/* OUTPUT */
+/* */
+/* None */
+/* */
+/* CALLS */
+/* */
+/* None */
+/* */
+/* CALLED BY */
+/* */
+/* Application Code */
+/* */
+/* RELEASE HISTORY */
+/* */
+/* DATE NAME DESCRIPTION */
+/* */
+/* 09-30-2020 William E. Lamie Initial Version 6.1 */
+/* 04-25-2022 Zhen Kong Updated comments, */
+/* resulting in version 6.1.11 */
+/* */
+/**************************************************************************/
.global _tx_thread_interrupt_restore
.type _tx_thread_interrupt_restore,function
_tx_thread_interrupt_restore:
-@
-@ /* Apply the new interrupt posture. */
-@
- MSR CPSR_c, r0 @ Setup new CPSR
-#ifdef __THUMB_INTERWORK
- BX lr @ Return to caller
-#else
- MOV pc, lr @ Return to caller
-#endif
-@}
+ /* Apply the new interrupt posture. */
+
+ MSR CPSR_c, r0 // Setup new CPSR
+#ifdef __THUMB_INTERWORK
+ BX lr // Return to caller
+#else
+ MOV pc, lr // Return to caller
+#endif
diff --git a/ports/cortex_a15/ac6/src/tx_thread_irq_nesting_end.S b/ports/cortex_a15/ac6/src/tx_thread_irq_nesting_end.S
index 40a9c386..ec7e63c6 100644
--- a/ports/cortex_a15/ac6/src/tx_thread_irq_nesting_end.S
+++ b/ports/cortex_a15/ac6/src/tx_thread_irq_nesting_end.S
@@ -1,115 +1,103 @@
-@/**************************************************************************/
-@/* */
-@/* Copyright (c) Microsoft Corporation. All rights reserved. */
-@/* */
-@/* This software is licensed under the Microsoft Software License */
-@/* Terms for Microsoft Azure RTOS. Full text of the license can be */
-@/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
-@/* and in the root directory of this software. */
-@/* */
-@/**************************************************************************/
-@
-@
-@/**************************************************************************/
-@/**************************************************************************/
-@/** */
-@/** ThreadX Component */
-@/** */
-@/** Thread */
-@/** */
-@/**************************************************************************/
-@/**************************************************************************/
-@
-@
-@#define TX_SOURCE_CODE
-@
-@
-@/* Include necessary system files. */
-@
-@#include "tx_api.h"
-@#include "tx_thread.h"
-@
-@
+/**************************************************************************/
+/* */
+/* Copyright (c) Microsoft Corporation. All rights reserved. */
+/* */
+/* This software is licensed under the Microsoft Software License */
+/* Terms for Microsoft Azure RTOS. Full text of the license can be */
+/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
+/* and in the root directory of this software. */
+/* */
+/**************************************************************************/
+
+
+/**************************************************************************/
+/**************************************************************************/
+/** */
+/** ThreadX Component */
+/** */
+/** Thread */
+/** */
+/**************************************************************************/
+/**************************************************************************/
+
#ifdef TX_ENABLE_FIQ_SUPPORT
-DISABLE_INTS = 0xC0 @ Disable IRQ/FIQ interrupts
+DISABLE_INTS = 0xC0 // Disable IRQ/FIQ interrupts
#else
-DISABLE_INTS = 0x80 @ Disable IRQ interrupts
+DISABLE_INTS = 0x80 // Disable IRQ interrupts
#endif
-MODE_MASK = 0x1F @ Mode mask
-IRQ_MODE_BITS = 0x12 @ IRQ mode bits
-@
-@
-@/* No 16-bit Thumb mode veneer code is needed for _tx_thread_irq_nesting_end
-@ since it will never be called 16-bit mode. */
-@
+MODE_MASK = 0x1F // Mode mask
+IRQ_MODE_BITS = 0x12 // IRQ mode bits
+
+
+/* No 16-bit Thumb mode veneer code is needed for _tx_thread_irq_nesting_end
+ since it will never be called 16-bit mode. */
+
.arm
.text
.align 2
-@/**************************************************************************/
-@/* */
-@/* FUNCTION RELEASE */
-@/* */
-@/* _tx_thread_irq_nesting_end Cortex-A15/AC6 */
-@/* 6.1 */
-@/* AUTHOR */
-@/* */
-@/* William E. Lamie, Microsoft Corporation */
-@/* */
-@/* DESCRIPTION */
-@/* */
-@/* This function is called by the application from IRQ mode after */
-@/* _tx_thread_irq_nesting_start has been called and switches the IRQ */
-@/* processing from system mode back to IRQ mode prior to the ISR */
-@/* calling _tx_thread_context_restore. Note that this function */
-@/* assumes the system stack pointer is in the same position after */
-@/* nesting start function was called. */
-@/* */
-@/* This function assumes that the system mode stack pointer was setup */
-@/* during low-level initialization (tx_initialize_low_level.s). */
-@/* */
-@/* This function returns with IRQ interrupts disabled. */
-@/* */
-@/* INPUT */
-@/* */
-@/* None */
-@/* */
-@/* OUTPUT */
-@/* */
-@/* None */
-@/* */
-@/* CALLS */
-@/* */
-@/* None */
-@/* */
-@/* CALLED BY */
-@/* */
-@/* ISRs */
-@/* */
-@/* RELEASE HISTORY */
-@/* */
-@/* DATE NAME DESCRIPTION */
-@/* */
-@/* 09-30-2020 William E. Lamie Initial Version 6.1 */
-@/* */
-@/**************************************************************************/
-@VOID _tx_thread_irq_nesting_end(VOID)
-@{
+/**************************************************************************/
+/* */
+/* FUNCTION RELEASE */
+/* */
+/* _tx_thread_irq_nesting_end ARMv7-A */
+/* 6.1.11 */
+/* AUTHOR */
+/* */
+/* William E. Lamie, Microsoft Corporation */
+/* */
+/* DESCRIPTION */
+/* */
+/* This function is called by the application from IRQ mode after */
+/* _tx_thread_irq_nesting_start has been called and switches the IRQ */
+/* processing from system mode back to IRQ mode prior to the ISR */
+/* calling _tx_thread_context_restore. Note that this function */
+/* assumes the system stack pointer is in the same position after */
+/* nesting start function was called. */
+/* */
+/* This function assumes that the system mode stack pointer was setup */
+/* during low-level initialization (tx_initialize_low_level.s). */
+/* */
+/* This function returns with IRQ interrupts disabled. */
+/* */
+/* INPUT */
+/* */
+/* None */
+/* */
+/* OUTPUT */
+/* */
+/* None */
+/* */
+/* CALLS */
+/* */
+/* None */
+/* */
+/* CALLED BY */
+/* */
+/* ISRs */
+/* */
+/* RELEASE HISTORY */
+/* */
+/* DATE NAME DESCRIPTION */
+/* */
+/* 09-30-2020 William E. Lamie Initial Version 6.1 */
+/* 04-25-2022 Zhen Kong Updated comments, */
+/* resulting in version 6.1.11 */
+/* */
+/**************************************************************************/
.global _tx_thread_irq_nesting_end
.type _tx_thread_irq_nesting_end,function
_tx_thread_irq_nesting_end:
- MOV r3,lr @ Save ISR return address
- MRS r0, CPSR @ Pickup the CPSR
- ORR r0, r0, #DISABLE_INTS @ Build disable interrupt value
- MSR CPSR_c, r0 @ Disable interrupts
- LDMIA sp!, {r1, lr} @ Pickup saved lr (and r1 throw-away for
- @ 8-byte alignment logic)
- BIC r0, r0, #MODE_MASK @ Clear mode bits
- ORR r0, r0, #IRQ_MODE_BITS @ Build IRQ mode CPSR
- MSR CPSR_c, r0 @ Reenter IRQ mode
+ MOV r3,lr // Save ISR return address
+ MRS r0, CPSR // Pickup the CPSR
+ ORR r0, r0, #DISABLE_INTS // Build disable interrupt value
+ MSR CPSR_c, r0 // Disable interrupts
+ LDMIA sp!, {r1, lr} // Pickup saved lr (and r1 throw-away for
+ // 8-byte alignment logic)
+ BIC r0, r0, #MODE_MASK // Clear mode bits
+ ORR r0, r0, #IRQ_MODE_BITS // Build IRQ mode CPSR
+ MSR CPSR_c, r0 // Reenter IRQ mode
#ifdef __THUMB_INTERWORK
- BX r3 @ Return to caller
+ BX r3 // Return to caller
#else
- MOV pc, r3 @ Return to caller
+ MOV pc, r3 // Return to caller
#endif
-@}
-
diff --git a/ports/cortex_a15/ac6/src/tx_thread_irq_nesting_start.S b/ports/cortex_a15/ac6/src/tx_thread_irq_nesting_start.S
index 9a6d4370..c69976ed 100644
--- a/ports/cortex_a15/ac6/src/tx_thread_irq_nesting_start.S
+++ b/ports/cortex_a15/ac6/src/tx_thread_irq_nesting_start.S
@@ -1,108 +1,96 @@
-@/**************************************************************************/
-@/* */
-@/* Copyright (c) Microsoft Corporation. All rights reserved. */
-@/* */
-@/* This software is licensed under the Microsoft Software License */
-@/* Terms for Microsoft Azure RTOS. Full text of the license can be */
-@/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
-@/* and in the root directory of this software. */
-@/* */
-@/**************************************************************************/
-@
-@
-@/**************************************************************************/
-@/**************************************************************************/
-@/** */
-@/** ThreadX Component */
-@/** */
-@/** Thread */
-@/** */
-@/**************************************************************************/
-@/**************************************************************************/
-@
-@
-@#define TX_SOURCE_CODE
-@
-@
-@/* Include necessary system files. */
-@
-@#include "tx_api.h"
-@#include "tx_thread.h"
-@
-@
-IRQ_DISABLE = 0x80 @ IRQ disable bit
-MODE_MASK = 0x1F @ Mode mask
-SYS_MODE_BITS = 0x1F @ System mode bits
-@
-@
-@/* No 16-bit Thumb mode veneer code is needed for _tx_thread_irq_nesting_start
-@ since it will never be called 16-bit mode. */
-@
+/**************************************************************************/
+/* */
+/* Copyright (c) Microsoft Corporation. All rights reserved. */
+/* */
+/* This software is licensed under the Microsoft Software License */
+/* Terms for Microsoft Azure RTOS. Full text of the license can be */
+/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
+/* and in the root directory of this software. */
+/* */
+/**************************************************************************/
+
+
+/**************************************************************************/
+/**************************************************************************/
+/** */
+/** ThreadX Component */
+/** */
+/** Thread */
+/** */
+/**************************************************************************/
+/**************************************************************************/
+
+IRQ_DISABLE = 0x80 // IRQ disable bit
+MODE_MASK = 0x1F // Mode mask
+SYS_MODE_BITS = 0x1F // System mode bits
+
+
+/* No 16-bit Thumb mode veneer code is needed for _tx_thread_irq_nesting_start
+ since it will never be called 16-bit mode. */
+
.arm
.text
.align 2
-@/**************************************************************************/
-@/* */
-@/* FUNCTION RELEASE */
-@/* */
-@/* _tx_thread_irq_nesting_start Cortex-A15/AC6 */
-@/* 6.1 */
-@/* AUTHOR */
-@/* */
-@/* William E. Lamie, Microsoft Corporation */
-@/* */
-@/* DESCRIPTION */
-@/* */
-@/* This function is called by the application from IRQ mode after */
-@/* _tx_thread_context_save has been called and switches the IRQ */
-@/* processing to the system mode so nested IRQ interrupt processing */
-@/* is possible (system mode has its own "lr" register). Note that */
-@/* this function assumes that the system mode stack pointer was setup */
-@/* during low-level initialization (tx_initialize_low_level.s). */
-@/* */
-@/* This function returns with IRQ interrupts enabled. */
-@/* */
-@/* INPUT */
-@/* */
-@/* None */
-@/* */
-@/* OUTPUT */
-@/* */
-@/* None */
-@/* */
-@/* CALLS */
-@/* */
-@/* None */
-@/* */
-@/* CALLED BY */
-@/* */
-@/* ISRs */
-@/* */
-@/* RELEASE HISTORY */
-@/* */
-@/* DATE NAME DESCRIPTION */
-@/* */
-@/* 09-30-2020 William E. Lamie Initial Version 6.1 */
-@/* */
-@/**************************************************************************/
-@VOID _tx_thread_irq_nesting_start(VOID)
-@{
+/**************************************************************************/
+/* */
+/* FUNCTION RELEASE */
+/* */
+/* _tx_thread_irq_nesting_start ARMv7-A */
+/* 6.1.11 */
+/* AUTHOR */
+/* */
+/* William E. Lamie, Microsoft Corporation */
+/* */
+/* DESCRIPTION */
+/* */
+/* This function is called by the application from IRQ mode after */
+/* _tx_thread_context_save has been called and switches the IRQ */
+/* processing to the system mode so nested IRQ interrupt processing */
+/* is possible (system mode has its own "lr" register). Note that */
+/* this function assumes that the system mode stack pointer was setup */
+/* during low-level initialization (tx_initialize_low_level.s). */
+/* */
+/* This function returns with IRQ interrupts enabled. */
+/* */
+/* INPUT */
+/* */
+/* None */
+/* */
+/* OUTPUT */
+/* */
+/* None */
+/* */
+/* CALLS */
+/* */
+/* None */
+/* */
+/* CALLED BY */
+/* */
+/* ISRs */
+/* */
+/* RELEASE HISTORY */
+/* */
+/* DATE NAME DESCRIPTION */
+/* */
+/* 09-30-2020 William E. Lamie Initial Version 6.1 */
+/* 04-25-2022 Zhen Kong Updated comments, */
+/* resulting in version 6.1.11 */
+/* */
+/**************************************************************************/
.global _tx_thread_irq_nesting_start
.type _tx_thread_irq_nesting_start,function
_tx_thread_irq_nesting_start:
- MOV r3,lr @ Save ISR return address
- MRS r0, CPSR @ Pickup the CPSR
- BIC r0, r0, #MODE_MASK @ Clear the mode bits
- ORR r0, r0, #SYS_MODE_BITS @ Build system mode CPSR
- MSR CPSR_c, r0 @ Enter system mode
- STMDB sp!, {r1, lr} @ Push the system mode lr on the system mode stack
- @ and push r1 just to keep 8-byte alignment
- BIC r0, r0, #IRQ_DISABLE @ Build enable IRQ CPSR
- MSR CPSR_c, r0 @ Enter system mode
+ MOV r3,lr // Save ISR return address
+ MRS r0, CPSR // Pickup the CPSR
+ BIC r0, r0, #MODE_MASK // Clear the mode bits
+ ORR r0, r0, #SYS_MODE_BITS // Build system mode CPSR
+ MSR CPSR_c, r0 // Enter system mode
+ STMDB sp!, {r1, lr} // Push the system mode lr on the system mode stack
+ // and push r1 just to keep 8-byte alignment
+ BIC r0, r0, #IRQ_DISABLE // Build enable IRQ CPSR
+ MSR CPSR_c, r0 // Enter system mode
#ifdef __THUMB_INTERWORK
- BX r3 @ Return to caller
+ BX r3 // Return to caller
#else
- MOV pc, r3 @ Return to caller
+ MOV pc, r3 // Return to caller
#endif
-@}
-
diff --git a/ports/cortex_a15/ac6/src/tx_thread_schedule.S b/ports/cortex_a15/ac6/src/tx_thread_schedule.S
index 9fabd3b3..8330e9df 100644
--- a/ports/cortex_a15/ac6/src/tx_thread_schedule.S
+++ b/ports/cortex_a15/ac6/src/tx_thread_schedule.S
@@ -1,257 +1,230 @@
-@/**************************************************************************/
-@/* */
-@/* Copyright (c) Microsoft Corporation. All rights reserved. */
-@/* */
-@/* This software is licensed under the Microsoft Software License */
-@/* Terms for Microsoft Azure RTOS. Full text of the license can be */
-@/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
-@/* and in the root directory of this software. */
-@/* */
-@/**************************************************************************/
-@
-@
-@/**************************************************************************/
-@/**************************************************************************/
-@/** */
-@/** ThreadX Component */
-@/** */
-@/** Thread */
-@/** */
-@/**************************************************************************/
-@/**************************************************************************/
-@
-@
-@#define TX_SOURCE_CODE
-@
-@
-@/* Include necessary system files. */
-@
-@#include "tx_api.h"
-@#include "tx_thread.h"
-@#include "tx_timer.h"
-@
-@
+/**************************************************************************/
+/* */
+/* Copyright (c) Microsoft Corporation. All rights reserved. */
+/* */
+/* This software is licensed under the Microsoft Software License */
+/* Terms for Microsoft Azure RTOS. Full text of the license can be */
+/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
+/* and in the root directory of this software. */
+/* */
+/**************************************************************************/
+
+
+/**************************************************************************/
+/**************************************************************************/
+/** */
+/** ThreadX Component */
+/** */
+/** Thread */
+/** */
+/**************************************************************************/
+/**************************************************************************/
+
.global _tx_thread_execute_ptr
.global _tx_thread_current_ptr
.global _tx_timer_time_slice
-@
-@
-@/* Define the 16-bit Thumb mode veneer for _tx_thread_schedule for
-@ applications calling this function from to 16-bit Thumb mode. */
-@
+
+
+/* Define the 16-bit Thumb mode veneer for _tx_thread_schedule for
+ applications calling this function from to 16-bit Thumb mode. */
+
.text
.align 2
.global $_tx_thread_schedule
.type $_tx_thread_schedule,function
$_tx_thread_schedule:
.thumb
- BX pc @ Switch to 32-bit mode
- NOP @
+ BX pc // Switch to 32-bit mode
+ NOP //
.arm
- STMFD sp!, {lr} @ Save return address
- BL _tx_thread_schedule @ Call _tx_thread_schedule function
- LDMFD sp!, {lr} @ Recover saved return address
- BX lr @ Return to 16-bit caller
-@
-@
+ STMFD sp!, {lr} // Save return address
+ BL _tx_thread_schedule // Call _tx_thread_schedule function
+ LDMFD sp!, {lr} // Recover saved return address
+ BX lr // Return to 16-bit caller
+
+
.text
.align 2
-@/**************************************************************************/
-@/* */
-@/* FUNCTION RELEASE */
-@/* */
-@/* _tx_thread_schedule Cortex-A15/AC6 */
-@/* 6.1.9 */
-@/* AUTHOR */
-@/* */
-@/* William E. Lamie, Microsoft Corporation */
-@/* */
-@/* DESCRIPTION */
-@/* */
-@/* This function waits for a thread control block pointer to appear in */
-@/* the _tx_thread_execute_ptr variable. Once a thread pointer appears */
-@/* in the variable, the corresponding thread is resumed. */
-@/* */
-@/* INPUT */
-@/* */
-@/* None */
-@/* */
-@/* OUTPUT */
-@/* */
-@/* None */
-@/* */
-@/* CALLS */
-@/* */
-@/* None */
-@/* */
-@/* CALLED BY */
-@/* */
-@/* _tx_initialize_kernel_enter ThreadX entry function */
-@/* _tx_thread_system_return Return to system from thread */
-@/* _tx_thread_context_restore Restore thread's context */
-@/* */
-@/* RELEASE HISTORY */
-@/* */
-@/* DATE NAME DESCRIPTION */
-@/* */
-@/* 09-30-2020 William E. Lamie Initial Version 6.1 */
-@/* 10-15-2021 William E. Lamie Modified comment(s), added */
-@/* execution profile support, */
-@/* resulting in version 6.1.9 */
-@/* */
-@/**************************************************************************/
-@VOID _tx_thread_schedule(VOID)
-@{
+/**************************************************************************/
+/* */
+/* FUNCTION RELEASE */
+/* */
+/* _tx_thread_schedule ARMv7-A */
+/* 6.1.11 */
+/* AUTHOR */
+/* */
+/* William E. Lamie, Microsoft Corporation */
+/* */
+/* DESCRIPTION */
+/* */
+/* This function waits for a thread control block pointer to appear in */
+/* the _tx_thread_execute_ptr variable. Once a thread pointer appears */
+/* in the variable, the corresponding thread is resumed. */
+/* */
+/* INPUT */
+/* */
+/* None */
+/* */
+/* OUTPUT */
+/* */
+/* None */
+/* */
+/* CALLS */
+/* */
+/* None */
+/* */
+/* CALLED BY */
+/* */
+/* _tx_initialize_kernel_enter ThreadX entry function */
+/* _tx_thread_system_return Return to system from thread */
+/* _tx_thread_context_restore Restore thread's context */
+/* */
+/* RELEASE HISTORY */
+/* */
+/* DATE NAME DESCRIPTION */
+/* */
+/* 09-30-2020 William E. Lamie Initial Version 6.1 */
+/* 10-15-2021 William E. Lamie Modified comment(s), added */
+/* execution profile support, */
+/* resulting in version 6.1.9 */
+/* 04-25-2022 Zhen Kong Updated comments, */
+/* resulting in version 6.1.11 */
+/* */
+/**************************************************************************/
.global _tx_thread_schedule
.type _tx_thread_schedule,function
_tx_thread_schedule:
-@
-@ /* Enable interrupts. */
-@
+
+ /* Enable interrupts. */
+
#ifdef TX_ENABLE_FIQ_SUPPORT
- CPSIE if @ Enable IRQ and FIQ interrupts
+ CPSIE if // Enable IRQ and FIQ interrupts
#else
- CPSIE i @ Enable IRQ interrupts
+ CPSIE i // Enable IRQ interrupts
#endif
-@
-@ /* Wait for a thread to execute. */
-@ do
-@ {
- LDR r1, =_tx_thread_execute_ptr @ Address of thread execute ptr
-@
+
+ /* Wait for a thread to execute. */
+ LDR r1, =_tx_thread_execute_ptr // Address of thread execute ptr
+
__tx_thread_schedule_loop:
-@
- LDR r0, [r1] @ Pickup next thread to execute
- CMP r0, #0 @ Is it NULL?
- BEQ __tx_thread_schedule_loop @ If so, keep looking for a thread
-@
-@ }
-@ while(_tx_thread_execute_ptr == TX_NULL);
-@
-@ /* Yes! We have a thread to execute. Lockout interrupts and
-@ transfer control to it. */
-@
+
+ LDR r0, [r1] // Pickup next thread to execute
+ CMP r0, #0 // Is it NULL?
+ BEQ __tx_thread_schedule_loop // If so, keep looking for a thread
+ /* Yes! We have a thread to execute. Lockout interrupts and
+ transfer control to it. */
+
#ifdef TX_ENABLE_FIQ_SUPPORT
- CPSID if @ Disable IRQ and FIQ interrupts
+ CPSID if // Disable IRQ and FIQ interrupts
#else
- CPSID i @ Disable IRQ interrupts
+ CPSID i // Disable IRQ interrupts
#endif
-@
-@ /* Setup the current thread pointer. */
-@ _tx_thread_current_ptr = _tx_thread_execute_ptr;
-@
- LDR r1, =_tx_thread_current_ptr @ Pickup address of current thread
- STR r0, [r1] @ Setup current thread pointer
-@
-@ /* Increment the run count for this thread. */
-@ _tx_thread_current_ptr -> tx_thread_run_count++;
-@
- LDR r2, [r0, #4] @ Pickup run counter
- LDR r3, [r0, #24] @ Pickup time-slice for this thread
- ADD r2, r2, #1 @ Increment thread run-counter
- STR r2, [r0, #4] @ Store the new run counter
-@
-@ /* Setup time-slice, if present. */
-@ _tx_timer_time_slice = _tx_thread_current_ptr -> tx_thread_time_slice;
-@
- LDR r2, =_tx_timer_time_slice @ Pickup address of time-slice
- @ variable
- LDR sp, [r0, #8] @ Switch stack pointers
- STR r3, [r2] @ Setup time-slice
-@
-@ /* Switch to the thread's stack. */
-@ sp = _tx_thread_execute_ptr -> tx_thread_stack_ptr;
-@
+
+ /* Setup the current thread pointer. */
+
+ LDR r1, =_tx_thread_current_ptr // Pickup address of current thread
+ STR r0, [r1] // Setup current thread pointer
+
+ /* Increment the run count for this thread. */
+
+ LDR r2, [r0, #4] // Pickup run counter
+ LDR r3, [r0, #24] // Pickup time-slice for this thread
+ ADD r2, r2, #1 // Increment thread run-counter
+ STR r2, [r0, #4] // Store the new run counter
+
+ /* Setup time-slice, if present. */
+
+ LDR r2, =_tx_timer_time_slice // Pickup address of time-slice
+ // variable
+ LDR sp, [r0, #8] // Switch stack pointers
+ STR r3, [r2] // Setup time-slice
+
#if (defined(TX_ENABLE_EXECUTION_CHANGE_NOTIFY) || defined(TX_EXECUTION_PROFILE_ENABLE))
-@
-@ /* Call the thread entry function to indicate the thread is executing. */
-@
- MOV r5, r0 @ Save r0
- BL _tx_execution_thread_enter @ Call the thread execution enter function
- MOV r0, r5 @ Restore r0
+
+ /* Call the thread entry function to indicate the thread is executing. */
+
+ MOV r5, r0 // Save r0
+ BL _tx_execution_thread_enter // Call the thread execution enter function
+ MOV r0, r5 // Restore r0
#endif
-@
-@ /* Determine if an interrupt frame or a synchronous task suspension frame
-@ is present. */
-@
- LDMIA sp!, {r4, r5} @ Pickup the stack type and saved CPSR
- CMP r4, #0 @ Check for synchronous context switch
+
+ /* Determine if an interrupt frame or a synchronous task suspension frame
+ is present. */
+
+ LDMIA sp!, {r4, r5} // Pickup the stack type and saved CPSR
+ CMP r4, #0 // Check for synchronous context switch
BEQ _tx_solicited_return
- MSR SPSR_cxsf, r5 @ Setup SPSR for return
+ MSR SPSR_cxsf, r5 // Setup SPSR for return
#ifdef TX_ENABLE_VFP_SUPPORT
- LDR r1, [r0, #144] @ Pickup the VFP enabled flag
- CMP r1, #0 @ Is the VFP enabled?
- BEQ _tx_skip_interrupt_vfp_restore @ No, skip VFP interrupt restore
- VLDMIA sp!, {D0-D15} @ Recover D0-D15
- VLDMIA sp!, {D16-D31} @ Recover D16-D31
- LDR r4, [sp], #4 @ Pickup FPSCR
- VMSR FPSCR, r4 @ Restore FPSCR
+ LDR r1, [r0, #144] // Pickup the VFP enabled flag
+ CMP r1, #0 // Is the VFP enabled?
+ BEQ _tx_skip_interrupt_vfp_restore // No, skip VFP interrupt restore
+ VLDMIA sp!, {D0-D15} // Recover D0-D15
+ VLDMIA sp!, {D16-D31} // Recover D16-D31
+ LDR r4, [sp], #4 // Pickup FPSCR
+ VMSR FPSCR, r4 // Restore FPSCR
_tx_skip_interrupt_vfp_restore:
#endif
- LDMIA sp!, {r0-r12, lr, pc}^ @ Return to point of thread interrupt
+ LDMIA sp!, {r0-r12, lr, pc}^ // Return to point of thread interrupt
_tx_solicited_return:
#ifdef TX_ENABLE_VFP_SUPPORT
- LDR r1, [r0, #144] @ Pickup the VFP enabled flag
- CMP r1, #0 @ Is the VFP enabled?
- BEQ _tx_skip_solicited_vfp_restore @ No, skip VFP solicited restore
- VLDMIA sp!, {D8-D15} @ Recover D8-D15
- VLDMIA sp!, {D16-D31} @ Recover D16-D31
- LDR r4, [sp], #4 @ Pickup FPSCR
- VMSR FPSCR, r4 @ Restore FPSCR
+ LDR r1, [r0, #144] // Pickup the VFP enabled flag
+ CMP r1, #0 // Is the VFP enabled?
+ BEQ _tx_skip_solicited_vfp_restore // No, skip VFP solicited restore
+ VLDMIA sp!, {D8-D15} // Recover D8-D15
+ VLDMIA sp!, {D16-D31} // Recover D16-D31
+ LDR r4, [sp], #4 // Pickup FPSCR
+ VMSR FPSCR, r4 // Restore FPSCR
_tx_skip_solicited_vfp_restore:
#endif
- MSR CPSR_cxsf, r5 @ Recover CPSR
- LDMIA sp!, {r4-r11, lr} @ Return to thread synchronously
+ MSR CPSR_cxsf, r5 // Recover CPSR
+ LDMIA sp!, {r4-r11, lr} // Return to thread synchronously
#ifdef __THUMB_INTERWORK
- BX lr @ Return to caller
+ BX lr // Return to caller
#else
- MOV pc, lr @ Return to caller
+ MOV pc, lr // Return to caller
#endif
-@
-@}
-@
#ifdef TX_ENABLE_VFP_SUPPORT
.global tx_thread_vfp_enable
.type tx_thread_vfp_enable,function
tx_thread_vfp_enable:
- MRS r2, CPSR @ Pickup the CPSR
+ MRS r2, CPSR // Pickup the CPSR
#ifdef TX_ENABLE_FIQ_SUPPORT
- CPSID if @ Enable IRQ and FIQ interrupts
+ CPSID if // Enable IRQ and FIQ interrupts
#else
- CPSID i @ Enable IRQ interrupts
+ CPSID i // Enable IRQ interrupts
#endif
- LDR r0, =_tx_thread_current_ptr @ Build current thread pointer address
- LDR r1, [r0] @ Pickup current thread pointer
- CMP r1, #0 @ Check for NULL thread pointer
- BEQ __tx_no_thread_to_enable @ If NULL, skip VFP enable
- MOV r0, #1 @ Build enable value
- STR r0, [r1, #144] @ Set the VFP enable flag (tx_thread_vfp_enable field in TX_THREAD)
+ LDR r0, =_tx_thread_current_ptr // Build current thread pointer address
+ LDR r1, [r0] // Pickup current thread pointer
+ CMP r1, #0 // Check for NULL thread pointer
+ BEQ __tx_no_thread_to_enable // If NULL, skip VFP enable
+ MOV r0, #1 // Build enable value
+ STR r0, [r1, #144] // Set the VFP enable flag (tx_thread_vfp_enable field in TX_THREAD)
__tx_no_thread_to_enable:
- MSR CPSR_cxsf, r2 @ Recover CPSR
- BX LR @ Return to caller
+ MSR CPSR_cxsf, r2 // Recover CPSR
+ BX LR // Return to caller
.global tx_thread_vfp_disable
.type tx_thread_vfp_disable,function
tx_thread_vfp_disable:
- MRS r2, CPSR @ Pickup the CPSR
+ MRS r2, CPSR // Pickup the CPSR
#ifdef TX_ENABLE_FIQ_SUPPORT
- CPSID if @ Enable IRQ and FIQ interrupts
+ CPSID if // Enable IRQ and FIQ interrupts
#else
- CPSID i @ Enable IRQ interrupts
+ CPSID i // Enable IRQ interrupts
#endif
- LDR r0, =_tx_thread_current_ptr @ Build current thread pointer address
- LDR r1, [r0] @ Pickup current thread pointer
- CMP r1, #0 @ Check for NULL thread pointer
- BEQ __tx_no_thread_to_disable @ If NULL, skip VFP disable
- MOV r0, #0 @ Build disable value
- STR r0, [r1, #144] @ Clear the VFP enable flag (tx_thread_vfp_enable field in TX_THREAD)
+ LDR r0, =_tx_thread_current_ptr // Build current thread pointer address
+ LDR r1, [r0] // Pickup current thread pointer
+ CMP r1, #0 // Check for NULL thread pointer
+ BEQ __tx_no_thread_to_disable // If NULL, skip VFP disable
+ MOV r0, #0 // Build disable value
+ STR r0, [r1, #144] // Clear the VFP enable flag (tx_thread_vfp_enable field in TX_THREAD)
__tx_no_thread_to_disable:
- MSR CPSR_cxsf, r2 @ Recover CPSR
- BX LR @ Return to caller
+ MSR CPSR_cxsf, r2 // Recover CPSR
+ BX LR // Return to caller
#endif
-
diff --git a/ports/cortex_a15/ac6/src/tx_thread_stack_build.S b/ports/cortex_a15/ac6/src/tx_thread_stack_build.S
index df648838..f413e673 100644
--- a/ports/cortex_a15/ac6/src/tx_thread_stack_build.S
+++ b/ports/cortex_a15/ac6/src/tx_thread_stack_build.S
@@ -1,178 +1,164 @@
-@/**************************************************************************/
-@/* */
-@/* Copyright (c) Microsoft Corporation. All rights reserved. */
-@/* */
-@/* This software is licensed under the Microsoft Software License */
-@/* Terms for Microsoft Azure RTOS. Full text of the license can be */
-@/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
-@/* and in the root directory of this software. */
-@/* */
-@/**************************************************************************/
-@
-@
-@/**************************************************************************/
-@/**************************************************************************/
-@/** */
-@/** ThreadX Component */
-@/** */
-@/** Thread */
-@/** */
-@/**************************************************************************/
-@/**************************************************************************/
-@
-@
-@#define TX_SOURCE_CODE
-@
-@
-@/* Include necessary system files. */
-@
-@#include "tx_api.h"
-@#include "tx_thread.h"
-@
+/**************************************************************************/
+/* */
+/* Copyright (c) Microsoft Corporation. All rights reserved. */
+/* */
+/* This software is licensed under the Microsoft Software License */
+/* Terms for Microsoft Azure RTOS. Full text of the license can be */
+/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
+/* and in the root directory of this software. */
+/* */
+/**************************************************************************/
+
+
+/**************************************************************************/
+/**************************************************************************/
+/** */
+/** ThreadX Component */
+/** */
+/** Thread */
+/** */
+/**************************************************************************/
+/**************************************************************************/
.arm
-SVC_MODE = 0x13 @ SVC mode
+SVC_MODE = 0x13 // SVC mode
#ifdef TX_ENABLE_FIQ_SUPPORT
-CPSR_MASK = 0xDF @ Mask initial CPSR, IRQ & FIQ interrupts enabled
+CPSR_MASK = 0xDF // Mask initial CPSR, IRQ & FIQ interrupts enabled
#else
-CPSR_MASK = 0x9F @ Mask initial CPSR, IRQ interrupts enabled
+CPSR_MASK = 0x9F // Mask initial CPSR, IRQ interrupts enabled
#endif
-@
-@
-@/* Define the 16-bit Thumb mode veneer for _tx_thread_stack_build for
-@ applications calling this function from to 16-bit Thumb mode. */
-@
+
+
+/* Define the 16-bit Thumb mode veneer for _tx_thread_stack_build for
+ applications calling this function from to 16-bit Thumb mode. */
+
.text
.align 2
.thumb
.global $_tx_thread_stack_build
.type $_tx_thread_stack_build,function
$_tx_thread_stack_build:
- BX pc @ Switch to 32-bit mode
- NOP @
+ BX pc // Switch to 32-bit mode
+ NOP //
.arm
- STMFD sp!, {lr} @ Save return address
- BL _tx_thread_stack_build @ Call _tx_thread_stack_build function
- LDMFD sp!, {lr} @ Recover saved return address
- BX lr @ Return to 16-bit caller
-@
-@
+ STMFD sp!, {lr} // Save return address
+ BL _tx_thread_stack_build // Call _tx_thread_stack_build function
+ LDMFD sp!, {lr} // Recover saved return address
+ BX lr // Return to 16-bit caller
+
+
.text
.align 2
-@/**************************************************************************/
-@/* */
-@/* FUNCTION RELEASE */
-@/* */
-@/* _tx_thread_stack_build Cortex-A15/AC6 */
-@/* 6.1 */
-@/* AUTHOR */
-@/* */
-@/* William E. Lamie, Microsoft Corporation */
-@/* */
-@/* DESCRIPTION */
-@/* */
-@/* This function builds a stack frame on the supplied thread's stack. */
-@/* The stack frame results in a fake interrupt return to the supplied */
-@/* function pointer. */
-@/* */
-@/* INPUT */
-@/* */
-@/* thread_ptr Pointer to thread control blk */
-@/* function_ptr Pointer to return function */
-@/* */
-@/* OUTPUT */
-@/* */
-@/* None */
-@/* */
-@/* CALLS */
-@/* */
-@/* None */
-@/* */
-@/* CALLED BY */
-@/* */
-@/* _tx_thread_create Create thread service */
-@/* */
-@/* RELEASE HISTORY */
-@/* */
-@/* DATE NAME DESCRIPTION */
-@/* */
-@/* 09-30-2020 William E. Lamie Initial Version 6.1 */
-@/* */
-@/**************************************************************************/
-@VOID _tx_thread_stack_build(TX_THREAD *thread_ptr, VOID (*function_ptr)(VOID))
-@{
+/**************************************************************************/
+/* */
+/* FUNCTION RELEASE */
+/* */
+/* _tx_thread_stack_build ARMv7-A */
+/* 6.1.11 */
+/* AUTHOR */
+/* */
+/* William E. Lamie, Microsoft Corporation */
+/* */
+/* DESCRIPTION */
+/* */
+/* This function builds a stack frame on the supplied thread's stack. */
+/* The stack frame results in a fake interrupt return to the supplied */
+/* function pointer. */
+/* */
+/* INPUT */
+/* */
+/* thread_ptr Pointer to thread control blk */
+/* function_ptr Pointer to return function */
+/* */
+/* OUTPUT */
+/* */
+/* None */
+/* */
+/* CALLS */
+/* */
+/* None */
+/* */
+/* CALLED BY */
+/* */
+/* _tx_thread_create Create thread service */
+/* */
+/* RELEASE HISTORY */
+/* */
+/* DATE NAME DESCRIPTION */
+/* */
+/* 09-30-2020 William E. Lamie Initial Version 6.1 */
+/* 04-25-2022 Zhen Kong Updated comments, */
+/* resulting in version 6.1.11 */
+/* */
+/**************************************************************************/
.global _tx_thread_stack_build
.type _tx_thread_stack_build,function
_tx_thread_stack_build:
-@
-@
-@ /* Build a fake interrupt frame. The form of the fake interrupt stack
-@ on the Cortex-A15 should look like the following after it is built:
-@
-@ Stack Top: 1 Interrupt stack frame type
-@ CPSR Initial value for CPSR
-@ a1 (r0) Initial value for a1
-@ a2 (r1) Initial value for a2
-@ a3 (r2) Initial value for a3
-@ a4 (r3) Initial value for a4
-@ v1 (r4) Initial value for v1
-@ v2 (r5) Initial value for v2
-@ v3 (r6) Initial value for v3
-@ v4 (r7) Initial value for v4
-@ v5 (r8) Initial value for v5
-@ sb (r9) Initial value for sb
-@ sl (r10) Initial value for sl
-@ fp (r11) Initial value for fp
-@ ip (r12) Initial value for ip
-@ lr (r14) Initial value for lr
-@ pc (r15) Initial value for pc
-@ 0 For stack backtracing
-@
-@ Stack Bottom: (higher memory address) */
-@
- LDR r2, [r0, #16] @ Pickup end of stack area
- BIC r2, r2, #7 @ Ensure 8-byte alignment
- SUB r2, r2, #76 @ Allocate space for the stack frame
-@
-@ /* Actually build the stack frame. */
-@
- MOV r3, #1 @ Build interrupt stack type
- STR r3, [r2, #0] @ Store stack type
- MOV r3, #0 @ Build initial register value
- STR r3, [r2, #8] @ Store initial r0
- STR r3, [r2, #12] @ Store initial r1
- STR r3, [r2, #16] @ Store initial r2
- STR r3, [r2, #20] @ Store initial r3
- STR r3, [r2, #24] @ Store initial r4
- STR r3, [r2, #28] @ Store initial r5
- STR r3, [r2, #32] @ Store initial r6
- STR r3, [r2, #36] @ Store initial r7
- STR r3, [r2, #40] @ Store initial r8
- STR r3, [r2, #44] @ Store initial r9
- LDR r3, [r0, #12] @ Pickup stack starting address
- STR r3, [r2, #48] @ Store initial r10 (sl)
- LDR r3,=_tx_thread_schedule @ Pickup address of _tx_thread_schedule for GDB backtrace
- STR r3, [r2, #60] @ Store initial r14 (lr)
- MOV r3, #0 @ Build initial register value
- STR r3, [r2, #52] @ Store initial r11
- STR r3, [r2, #56] @ Store initial r12
- STR r1, [r2, #64] @ Store initial pc
- STR r3, [r2, #68] @ 0 for back-trace
- MRS r1, CPSR @ Pickup CPSR
- BIC r1, r1, #CPSR_MASK @ Mask mode bits of CPSR
- ORR r3, r1, #SVC_MODE @ Build CPSR, SVC mode, interrupts enabled
- STR r3, [r2, #4] @ Store initial CPSR
-@
-@ /* Setup stack pointer. */
-@ thread_ptr -> tx_thread_stack_ptr = r2;
-@
- STR r2, [r0, #8] @ Save stack pointer in thread's
- @ control block
+
+
+ /* Build a fake interrupt frame. The form of the fake interrupt stack
+ on the ARMv7-A should look like the following after it is built:
+
+ Stack Top: 1 Interrupt stack frame type
+ CPSR Initial value for CPSR
+ a1 (r0) Initial value for a1
+ a2 (r1) Initial value for a2
+ a3 (r2) Initial value for a3
+ a4 (r3) Initial value for a4
+ v1 (r4) Initial value for v1
+ v2 (r5) Initial value for v2
+ v3 (r6) Initial value for v3
+ v4 (r7) Initial value for v4
+ v5 (r8) Initial value for v5
+ sb (r9) Initial value for sb
+ sl (r10) Initial value for sl
+ fp (r11) Initial value for fp
+ ip (r12) Initial value for ip
+ lr (r14) Initial value for lr
+ pc (r15) Initial value for
+ 0 For stack backtracing
+
+ Stack Bottom: (higher memory address) */
+
+ LDR r2, [r0, #16] // Pickup end of stack area
+ BIC r2, r2, #7 // Ensure 8-byte alignment
+ SUB r2, r2, #76 // Allocate space for the stack frame
+
+ /* Actually build the stack frame. */
+
+ MOV r3, #1 // Build interrupt stack type
+ STR r3, [r2, #0] // Store stack type
+ MOV r3, #0 // Build initial register value
+ STR r3, [r2, #8] // Store initial r0
+ STR r3, [r2, #12] // Store initial r1
+ STR r3, [r2, #16] // Store initial r2
+ STR r3, [r2, #20] // Store initial r3
+ STR r3, [r2, #24] // Store initial r4
+ STR r3, [r2, #28] // Store initial r5
+ STR r3, [r2, #32] // Store initial r6
+ STR r3, [r2, #36] // Store initial r7
+ STR r3, [r2, #40] // Store initial r8
+ STR r3, [r2, #44] // Store initial r9
+ LDR r3, [r0, #12] // Pickup stack starting address
+ STR r3, [r2, #48] // Store initial r10 (sl)
+ LDR r3,=_tx_thread_schedule // Pickup address of _tx_thread_schedule for GDB backtrace
+ STR r3, [r2, #60] // Store initial r14 (lr)
+ MOV r3, #0 // Build initial register value
+ STR r3, [r2, #52] // Store initial r11
+ STR r3, [r2, #56] // Store initial r12
+ STR r1, [r2, #64] // Store initial pc
+ STR r3, [r2, #68] // 0 for back-trace
+ MRS r1, CPSR // Pickup CPSR
+ BIC r1, r1, #CPSR_MASK // Mask mode bits of CPSR
+ ORR r3, r1, #SVC_MODE // Build CPSR, SVC mode, interrupts enabled
+ STR r3, [r2, #4] // Store initial CPSR
+
+ /* Setup stack pointer. */
+
+ STR r2, [r0, #8] // Save stack pointer in thread's
+ // control block
#ifdef __THUMB_INTERWORK
- BX lr @ Return to caller
+ BX lr // Return to caller
#else
- MOV pc, lr @ Return to caller
+ MOV pc, lr // Return to caller
#endif
-@}
-
-
diff --git a/ports/cortex_a15/ac6/src/tx_thread_system_return.S b/ports/cortex_a15/ac6/src/tx_thread_system_return.S
index 11b376a7..cb7d62ce 100644
--- a/ports/cortex_a15/ac6/src/tx_thread_system_return.S
+++ b/ports/cortex_a15/ac6/src/tx_thread_system_return.S
@@ -1,182 +1,162 @@
-@/**************************************************************************/
-@/* */
-@/* Copyright (c) Microsoft Corporation. All rights reserved. */
-@/* */
-@/* This software is licensed under the Microsoft Software License */
-@/* Terms for Microsoft Azure RTOS. Full text of the license can be */
-@/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
-@/* and in the root directory of this software. */
-@/* */
-@/**************************************************************************/
-@
-@
-@/**************************************************************************/
-@/**************************************************************************/
-@/** */
-@/** ThreadX Component */
-@/** */
-@/** Thread */
-@/** */
-@/**************************************************************************/
-@/**************************************************************************/
-@
-@#define TX_SOURCE_CODE
-@
-@
-@/* Include necessary system files. */
-@
-@#include "tx_api.h"
-@#include "tx_thread.h"
-@#include "tx_timer.h"
-@
+/**************************************************************************/
+/* */
+/* Copyright (c) Microsoft Corporation. All rights reserved. */
+/* */
+/* This software is licensed under the Microsoft Software License */
+/* Terms for Microsoft Azure RTOS. Full text of the license can be */
+/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
+/* and in the root directory of this software. */
+/* */
+/**************************************************************************/
+
+
+/**************************************************************************/
+/**************************************************************************/
+/** */
+/** ThreadX Component */
+/** */
+/** Thread */
+/** */
+/**************************************************************************/
+/**************************************************************************/
+
.arm
-@
-@
+
+
.global _tx_thread_current_ptr
.global _tx_timer_time_slice
.global _tx_thread_schedule
-@
-@
-@
-@/* Define the 16-bit Thumb mode veneer for _tx_thread_system_return for
-@ applications calling this function from to 16-bit Thumb mode. */
-@
+
+
+
+/* Define the 16-bit Thumb mode veneer for _tx_thread_system_return for
+ applications calling this function from to 16-bit Thumb mode. */
+
.text
.align 2
.global $_tx_thread_system_return
.type $_tx_thread_system_return,function
$_tx_thread_system_return:
.thumb
- BX pc @ Switch to 32-bit mode
- NOP @
+ BX pc // Switch to 32-bit mode
+ NOP //
.arm
- STMFD sp!, {lr} @ Save return address
- BL _tx_thread_system_return @ Call _tx_thread_system_return function
- LDMFD sp!, {lr} @ Recover saved return address
- BX lr @ Return to 16-bit caller
-@
-@
+ STMFD sp!, {lr} // Save return address
+ BL _tx_thread_system_return // Call _tx_thread_system_return function
+ LDMFD sp!, {lr} // Recover saved return address
+ BX lr // Return to 16-bit caller
+
+
.text
.align 2
-@/**************************************************************************/
-@/* */
-@/* FUNCTION RELEASE */
-@/* */
-@/* _tx_thread_system_return Cortex-A15/AC6 */
-@/* 6.1.9 */
-@/* AUTHOR */
-@/* */
-@/* William E. Lamie, Microsoft Corporation */
-@/* */
-@/* DESCRIPTION */
-@/* */
-@/* This function is target processor specific. It is used to transfer */
-@/* control from a thread back to the ThreadX system. Only a */
-@/* minimal context is saved since the compiler assumes temp registers */
-@/* are going to get slicked by a function call anyway. */
-@/* */
-@/* INPUT */
-@/* */
-@/* None */
-@/* */
-@/* OUTPUT */
-@/* */
-@/* None */
-@/* */
-@/* CALLS */
-@/* */
-@/* _tx_thread_schedule Thread scheduling loop */
-@/* */
-@/* CALLED BY */
-@/* */
-@/* ThreadX components */
-@/* */
-@/* RELEASE HISTORY */
-@/* */
-@/* DATE NAME DESCRIPTION */
-@/* */
-@/* 09-30-2020 William E. Lamie Initial Version 6.1 */
-@/* 10-15-2021 William E. Lamie Modified comment(s), added */
-@/* execution profile support, */
-@/* resulting in version 6.1.9 */
-@/* */
-@/**************************************************************************/
-@VOID _tx_thread_system_return(VOID)
-@{
+/**************************************************************************/
+/* */
+/* FUNCTION RELEASE */
+/* */
+/* _tx_thread_system_return ARMv7-A */
+/* 6.1.11 */
+/* AUTHOR */
+/* */
+/* William E. Lamie, Microsoft Corporation */
+/* */
+/* DESCRIPTION */
+/* */
+/* This function is target processor specific. It is used to transfer */
+/* control from a thread back to the ThreadX system. Only a */
+/* minimal context is saved since the compiler assumes temp registers */
+/* are going to get slicked by a function call anyway. */
+/* */
+/* INPUT */
+/* */
+/* None */
+/* */
+/* OUTPUT */
+/* */
+/* None */
+/* */
+/* CALLS */
+/* */
+/* _tx_thread_schedule Thread scheduling loop */
+/* */
+/* CALLED BY */
+/* */
+/* ThreadX components */
+/* */
+/* RELEASE HISTORY */
+/* */
+/* DATE NAME DESCRIPTION */
+/* */
+/* 09-30-2020 William E. Lamie Initial Version 6.1 */
+/* 10-15-2021 William E. Lamie Modified comment(s), added */
+/* execution profile support, */
+/* resulting in version 6.1.9 */
+/* 04-25-2022 Zhen Kong Updated comments, */
+/* resulting in version 6.1.11 */
+/* */
+/**************************************************************************/
.global _tx_thread_system_return
.type _tx_thread_system_return,function
_tx_thread_system_return:
-@
-@ /* Save minimal context on the stack. */
-@
- STMDB sp!, {r4-r11, lr} @ Save minimal context
- LDR r4, =_tx_thread_current_ptr @ Pickup address of current ptr
- LDR r5, [r4] @ Pickup current thread pointer
-
+ /* Save minimal context on the stack. */
+
+ STMDB sp!, {r4-r11, lr} // Save minimal context
+
+ LDR r4, =_tx_thread_current_ptr // Pickup address of current ptr
+ LDR r5, [r4] // Pickup current thread pointer
+
#ifdef TX_ENABLE_VFP_SUPPORT
- LDR r1, [r5, #144] @ Pickup the VFP enabled flag
- CMP r1, #0 @ Is the VFP enabled?
- BEQ _tx_skip_solicited_vfp_save @ No, skip VFP solicited save
- VMRS r1, FPSCR @ Pickup the FPSCR
- STR r1, [sp, #-4]! @ Save FPSCR
- VSTMDB sp!, {D16-D31} @ Save D16-D31
- VSTMDB sp!, {D8-D15} @ Save D8-D15
+ LDR r1, [r5, #144] // Pickup the VFP enabled flag
+ CMP r1, #0 // Is the VFP enabled?
+ BEQ _tx_skip_solicited_vfp_save // No, skip VFP solicited save
+ VMRS r1, FPSCR // Pickup the FPSCR
+ STR r1, [sp, #-4]! // Save FPSCR
+ VSTMDB sp!, {D16-D31} // Save D16-D31
+ VSTMDB sp!, {D8-D15} // Save D8-D15
_tx_skip_solicited_vfp_save:
#endif
- MOV r0, #0 @ Build a solicited stack type
- MRS r1, CPSR @ Pickup the CPSR
- STMDB sp!, {r0-r1} @ Save type and CPSR
-@
-@ /* Lockout interrupts. */
-@
-#ifdef TX_ENABLE_FIQ_SUPPORT
- CPSID if @ Disable IRQ and FIQ interrupts
-#else
- CPSID i @ Disable IRQ interrupts
-#endif
-
-#if (defined(TX_ENABLE_EXECUTION_CHANGE_NOTIFY) || defined(TX_EXECUTION_PROFILE_ENABLE))
-@
-@ /* Call the thread exit function to indicate the thread is no longer executing. */
-@
- BL _tx_execution_thread_exit @ Call the thread exit function
-#endif
- MOV r3, r4 @ Pickup address of current ptr
- MOV r0, r5 @ Pickup current thread pointer
- LDR r2, =_tx_timer_time_slice @ Pickup address of time slice
- LDR r1, [r2] @ Pickup current time slice
-@
-@ /* Save current stack and switch to system stack. */
-@ _tx_thread_current_ptr -> tx_thread_stack_ptr = sp;
-@ sp = _tx_thread_system_stack_ptr;
-@
- STR sp, [r0, #8] @ Save thread stack pointer
-@
-@ /* Determine if the time-slice is active. */
-@ if (_tx_timer_time_slice)
-@ {
-@
- MOV r4, #0 @ Build clear value
- CMP r1, #0 @ Is a time-slice active?
- BEQ __tx_thread_dont_save_ts @ No, don't save the time-slice
-@
-@ /* Save time-slice for the thread and clear the current time-slice. */
-@ _tx_thread_current_ptr -> tx_thread_time_slice = _tx_timer_time_slice;
-@ _tx_timer_time_slice = 0;
-@
- STR r4, [r2] @ Clear time-slice
- STR r1, [r0, #24] @ Save current time-slice
-@
-@ }
-__tx_thread_dont_save_ts:
-@
-@ /* Clear the current thread pointer. */
-@ _tx_thread_current_ptr = TX_NULL;
-@
- STR r4, [r3] @ Clear current thread pointer
- B _tx_thread_schedule @ Jump to scheduler!
-@
-@}
+ MOV r0, #0 // Build a solicited stack type
+ MRS r1, CPSR // Pickup the CPSR
+ STMDB sp!, {r0-r1} // Save type and CPSR
+ /* Lockout interrupts. */
+
+#ifdef TX_ENABLE_FIQ_SUPPORT
+ CPSID if // Disable IRQ and FIQ interrupts
+#else
+ CPSID i // Disable IRQ interrupts
+#endif
+
+#if (defined(TX_ENABLE_EXECUTION_CHANGE_NOTIFY) || defined(TX_EXECUTION_PROFILE_ENABLE))
+
+ /* Call the thread exit function to indicate the thread is no longer executing. */
+
+ BL _tx_execution_thread_exit // Call the thread exit function
+#endif
+ MOV r3, r4 // Pickup address of current ptr
+ MOV r0, r5 // Pickup current thread pointer
+ LDR r2, =_tx_timer_time_slice // Pickup address of time slice
+ LDR r1, [r2] // Pickup current time slice
+
+ /* Save current stack and switch to system stack. */
+
+ STR sp, [r0, #8] // Save thread stack pointer
+
+ /* Determine if the time-slice is active. */
+
+ MOV r4, #0 // Build clear value
+ CMP r1, #0 // Is a time-slice active?
+ BEQ __tx_thread_dont_save_ts // No, don't save the time-slice
+
+ /* Save time-slice for the thread and clear the current time-slice. */
+
+ STR r4, [r2] // Clear time-slice
+ STR r1, [r0, #24] // Save current time-slice
+
+__tx_thread_dont_save_ts:
+
+ /* Clear the current thread pointer. */
+
+ STR r4, [r3] // Clear current thread pointer
+ B _tx_thread_schedule // Jump to scheduler!
diff --git a/ports/cortex_a15/ac6/src/tx_thread_vectored_context_save.S b/ports/cortex_a15/ac6/src/tx_thread_vectored_context_save.S
index 5981286c..d846223f 100644
--- a/ports/cortex_a15/ac6/src/tx_thread_vectored_context_save.S
+++ b/ports/cortex_a15/ac6/src/tx_thread_vectored_context_save.S
@@ -1,192 +1,165 @@
-@/**************************************************************************/
-@/* */
-@/* Copyright (c) Microsoft Corporation. All rights reserved. */
-@/* */
-@/* This software is licensed under the Microsoft Software License */
-@/* Terms for Microsoft Azure RTOS. Full text of the license can be */
-@/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
-@/* and in the root directory of this software. */
-@/* */
-@/**************************************************************************/
-@
-@
-@/**************************************************************************/
-@/**************************************************************************/
-@/** */
-@/** ThreadX Component */
-@/** */
-@/** Thread */
-@/** */
-@/**************************************************************************/
-@/**************************************************************************/
-@
-@
-@#define TX_SOURCE_CODE
-@
-@
-@/* Include necessary system files. */
-@
-@#include "tx_api.h"
-@#include "tx_thread.h"
-@
-@
+/**************************************************************************/
+/* */
+/* Copyright (c) Microsoft Corporation. All rights reserved. */
+/* */
+/* This software is licensed under the Microsoft Software License */
+/* Terms for Microsoft Azure RTOS. Full text of the license can be */
+/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
+/* and in the root directory of this software. */
+/* */
+/**************************************************************************/
+
+
+/**************************************************************************/
+/**************************************************************************/
+/** */
+/** ThreadX Component */
+/** */
+/** Thread */
+/** */
+/**************************************************************************/
+/**************************************************************************/
+
.global _tx_thread_system_state
.global _tx_thread_current_ptr
-@
-@
-@
-@/* No 16-bit Thumb mode veneer code is needed for _tx_thread_vectored_context_save
-@ since it will never be called 16-bit mode. */
-@
+ .global _tx_execution_isr_enter
+
+
+
+/* No 16-bit Thumb mode veneer code is needed for _tx_thread_vectored_context_save
+ since it will never be called 16-bit mode. */
+
.arm
.text
.align 2
-@/**************************************************************************/
-@/* */
-@/* FUNCTION RELEASE */
-@/* */
-@/* _tx_thread_vectored_context_save Cortex-A15/AC6 */
-@/* 6.1.9 */
-@/* AUTHOR */
-@/* */
-@/* William E. Lamie, Microsoft Corporation */
-@/* */
-@/* DESCRIPTION */
-@/* */
-@/* This function saves the context of an executing thread in the */
-@/* beginning of interrupt processing. The function also ensures that */
-@/* the system stack is used upon return to the calling ISR. */
-@/* */
-@/* INPUT */
-@/* */
-@/* None */
-@/* */
-@/* OUTPUT */
-@/* */
-@/* None */
-@/* */
-@/* CALLS */
-@/* */
-@/* None */
-@/* */
-@/* CALLED BY */
-@/* */
-@/* ISRs */
-@/* */
-@/* RELEASE HISTORY */
-@/* */
-@/* DATE NAME DESCRIPTION */
-@/* */
-@/* 09-30-2020 William E. Lamie Initial Version 6.1 */
-@/* 10-15-2021 William E. Lamie Modified comment(s), added */
-@/* execution profile support, */
-@/* resulting in version 6.1.9 */
-@/* */
-@/**************************************************************************/
-@VOID _tx_thread_vectored_context_save(VOID)
-@{
+/**************************************************************************/
+/* */
+/* FUNCTION RELEASE */
+/* */
+/* _tx_thread_vectored_context_save ARMv7-A */
+/* 6.1.11 */
+/* AUTHOR */
+/* */
+/* William E. Lamie, Microsoft Corporation */
+/* */
+/* DESCRIPTION */
+/* */
+/* This function saves the context of an executing thread in the */
+/* beginning of interrupt processing. The function also ensures that */
+/* the system stack is used upon return to the calling ISR. */
+/* */
+/* INPUT */
+/* */
+/* None */
+/* */
+/* OUTPUT */
+/* */
+/* None */
+/* */
+/* CALLS */
+/* */
+/* None */
+/* */
+/* CALLED BY */
+/* */
+/* ISRs */
+/* */
+/* RELEASE HISTORY */
+/* */
+/* DATE NAME DESCRIPTION */
+/* */
+/* 09-30-2020 William E. Lamie Initial Version 6.1 */
+/* 10-15-2021 William E. Lamie Modified comment(s), added */
+/* execution profile support, */
+/* resulting in version 6.1.9 */
+/* 04-25-2022 Zhen Kong Updated comments, */
+/* resulting in version 6.1.11 */
+/* */
+/**************************************************************************/
.global _tx_thread_vectored_context_save
.type _tx_thread_vectored_context_save,function
_tx_thread_vectored_context_save:
-@
-@ /* Upon entry to this routine, it is assumed that IRQ interrupts are locked
-@ out, we are in IRQ mode, and all registers are intact. */
-@
-@ /* Check for a nested interrupt condition. */
-@ if (_tx_thread_system_state++)
-@ {
-@
+
+ /* Upon entry to this routine, it is assumed that IRQ interrupts are locked
+ out, we are in IRQ mode, and all registers are intact. */
+
+ /* Check for a nested interrupt condition. */
+
#ifdef TX_ENABLE_FIQ_SUPPORT
- CPSID if @ Disable IRQ and FIQ interrupts
+ CPSID if // Disable IRQ and FIQ interrupts
#endif
- LDR r3, =_tx_thread_system_state @ Pickup address of system state variable
- LDR r2, [r3, #0] @ Pickup system state
- CMP r2, #0 @ Is this the first interrupt?
- BEQ __tx_thread_not_nested_save @ Yes, not a nested context save
-@
-@ /* Nested interrupt condition. */
-@
- ADD r2, r2, #1 @ Increment the interrupt counter
- STR r2, [r3, #0] @ Store it back in the variable
-@
-@ /* Note: Minimal context of interrupted thread is already saved. */
-@
-@ /* Return to the ISR. */
-@
- MOV r10, #0 @ Clear stack limit
+ LDR r3, =_tx_thread_system_state // Pickup address of system state variable
+ LDR r2, [r3, #0] // Pickup system state
+ CMP r2, #0 // Is this the first interrupt?
+ BEQ __tx_thread_not_nested_save // Yes, not a nested context save
+
+ /* Nested interrupt condition. */
+
+ ADD r2, r2, #1 // Increment the interrupt counter
+ STR r2, [r3, #0] // Store it back in the variable
+
+ /* Note: Minimal context of interrupted thread is already saved. */
+
+ /* Return to the ISR. */
+
+ MOV r10, #0 // Clear stack limit
#if (defined(TX_ENABLE_EXECUTION_CHANGE_NOTIFY) || defined(TX_EXECUTION_PROFILE_ENABLE))
-@
-@ /* Call the ISR enter function to indicate an ISR is executing. */
-@
- PUSH {lr} @ Save ISR lr
- BL _tx_execution_isr_enter @ Call the ISR enter function
- POP {lr} @ Recover ISR lr
+
+ /* Call the ISR enter function to indicate an ISR is executing. */
+
+ PUSH {lr} // Save ISR lr
+ BL _tx_execution_isr_enter // Call the ISR enter function
+ POP {lr} // Recover ISR lr
#endif
- MOV pc, lr @ Return to caller
-@
+ MOV pc, lr // Return to caller
+
__tx_thread_not_nested_save:
-@ }
-@
-@ /* Otherwise, not nested, check to see if a thread was running. */
-@ else if (_tx_thread_current_ptr)
-@ {
-@
- ADD r2, r2, #1 @ Increment the interrupt counter
- STR r2, [r3, #0] @ Store it back in the variable
- LDR r1, =_tx_thread_current_ptr @ Pickup address of current thread ptr
- LDR r0, [r1, #0] @ Pickup current thread pointer
- CMP r0, #0 @ Is it NULL?
- BEQ __tx_thread_idle_system_save @ If so, interrupt occurred in
- @ scheduling loop - nothing needs saving!
-@
-@ /* Note: Minimal context of interrupted thread is already saved. */
-@
-@ /* Save the current stack pointer in the thread's control block. */
-@ _tx_thread_current_ptr -> tx_thread_stack_ptr = sp;
-@
-@ /* Switch to the system stack. */
-@ sp = _tx_thread_system_stack_ptr;
-@
- MOV r10, #0 @ Clear stack limit
+
+ /* Otherwise, not nested, check to see if a thread was running. */
+
+ ADD r2, r2, #1 // Increment the interrupt counter
+ STR r2, [r3, #0] // Store it back in the variable
+ LDR r1, =_tx_thread_current_ptr // Pickup address of current thread ptr
+ LDR r0, [r1, #0] // Pickup current thread pointer
+ CMP r0, #0 // Is it NULL?
+ BEQ __tx_thread_idle_system_save // If so, interrupt occurred in
+ // scheduling loop - nothing needs saving!
+ /* Note: Minimal context of interrupted thread is already saved. */
+
+ /* Save the current stack pointer in the thread's control block. */
+
+ MOV r10, #0 // Clear stack limit
#if (defined(TX_ENABLE_EXECUTION_CHANGE_NOTIFY) || defined(TX_EXECUTION_PROFILE_ENABLE))
-@
-@ /* Call the ISR enter function to indicate an ISR is executing. */
-@
- PUSH {lr} @ Save ISR lr
- BL _tx_execution_isr_enter @ Call the ISR enter function
- POP {lr} @ Recover ISR lr
+
+ /* Call the ISR enter function to indicate an ISR is executing. */
+
+ PUSH {lr} // Save ISR lr
+ BL _tx_execution_isr_enter // Call the ISR enter function
+ POP {lr} // Recover ISR lr
#endif
- MOV pc, lr @ Return to caller
-@
-@ }
-@ else
-@ {
-@
+ MOV pc, lr // Return to caller
+
__tx_thread_idle_system_save:
-@
-@ /* Interrupt occurred in the scheduling loop. */
-@
-@ /* Not much to do here, just adjust the stack pointer, and return to IRQ
-@ processing. */
-@
- MOV r10, #0 @ Clear stack limit
+
+ /* Interrupt occurred in the scheduling loop. */
+
+ /* Not much to do here, just adjust the stack pointer, and return to IRQ
+ processing. */
+
+ MOV r10, #0 // Clear stack limit
#if (defined(TX_ENABLE_EXECUTION_CHANGE_NOTIFY) || defined(TX_EXECUTION_PROFILE_ENABLE))
-@
-@ /* Call the ISR enter function to indicate an ISR is executing. */
-@
- PUSH {lr} @ Save ISR lr
- BL _tx_execution_isr_enter @ Call the ISR enter function
- POP {lr} @ Recover ISR lr
+
+ /* Call the ISR enter function to indicate an ISR is executing. */
+
+ PUSH {lr} // Save ISR lr
+ BL _tx_execution_isr_enter // Call the ISR enter function
+ POP {lr} // Recover ISR lr
#endif
- ADD sp, sp, #32 @ Recover saved registers
- MOV pc, lr @ Return to caller
-@
-@ }
-@}
-
+ ADD sp, sp, #32 // Recover saved registers
+ MOV pc, lr // Return to caller
diff --git a/ports/cortex_a15/ac6/src/tx_timer_interrupt.S b/ports/cortex_a15/ac6/src/tx_timer_interrupt.S
index 73285451..7337ed0c 100644
--- a/ports/cortex_a15/ac6/src/tx_timer_interrupt.S
+++ b/ports/cortex_a15/ac6/src/tx_timer_interrupt.S
@@ -1,40 +1,30 @@
-@/**************************************************************************/
-@/* */
-@/* Copyright (c) Microsoft Corporation. All rights reserved. */
-@/* */
-@/* This software is licensed under the Microsoft Software License */
-@/* Terms for Microsoft Azure RTOS. Full text of the license can be */
-@/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
-@/* and in the root directory of this software. */
-@/* */
-@/**************************************************************************/
-@
-@
-@/**************************************************************************/
-@/**************************************************************************/
-@/** */
-@/** ThreadX Component */
-@/** */
-@/** Timer */
-@/** */
-@/**************************************************************************/
-@/**************************************************************************/
-@
-@#define TX_SOURCE_CODE
-@
-@
-@/* Include necessary system files. */
-@
-@#include "tx_api.h"
-@#include "tx_timer.h"
-@#include "tx_thread.h"
-@
-@
+/**************************************************************************/
+/* */
+/* Copyright (c) Microsoft Corporation. All rights reserved. */
+/* */
+/* This software is licensed under the Microsoft Software License */
+/* Terms for Microsoft Azure RTOS. Full text of the license can be */
+/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
+/* and in the root directory of this software. */
+/* */
+/**************************************************************************/
+
+
+/**************************************************************************/
+/**************************************************************************/
+/** */
+/** ThreadX Component */
+/** */
+/** Timer */
+/** */
+/**************************************************************************/
+/**************************************************************************/
+
.arm
-@
-@/* Define Assembly language external references... */
-@
+
+/* Define Assembly language external references... */
+
.global _tx_timer_time_slice
.global _tx_timer_system_clock
.global _tx_timer_current_ptr
@@ -43,237 +33,199 @@
.global _tx_timer_expired_time_slice
.global _tx_timer_expired
.global _tx_thread_time_slice
-@
-@
-@
-@/* Define the 16-bit Thumb mode veneer for _tx_timer_interrupt for
-@ applications calling this function from to 16-bit Thumb mode. */
-@
+
+
+
+/* Define the 16-bit Thumb mode veneer for _tx_timer_interrupt for
+ applications calling this function from to 16-bit Thumb mode. */
+
.text
.align 2
.thumb
.global $_tx_timer_interrupt
.type $_tx_timer_interrupt,function
$_tx_timer_interrupt:
- BX pc @ Switch to 32-bit mode
- NOP @
+ BX pc // Switch to 32-bit mode
+ NOP //
.arm
- STMFD sp!, {lr} @ Save return address
- BL _tx_timer_interrupt @ Call _tx_timer_interrupt function
- LDMFD sp!, {lr} @ Recover saved return address
- BX lr @ Return to 16-bit caller
-@
-@
+ STMFD sp!, {lr} // Save return address
+ BL _tx_timer_interrupt // Call _tx_timer_interrupt function
+ LDMFD sp!, {lr} // Recover saved return address
+ BX lr // Return to 16-bit caller
+
+
.text
.align 2
-@/**************************************************************************/
-@/* */
-@/* FUNCTION RELEASE */
-@/* */
-@/* _tx_timer_interrupt Cortex-A15/AC6 */
-@/* 6.1 */
-@/* AUTHOR */
-@/* */
-@/* William E. Lamie, Microsoft Corporation */
-@/* */
-@/* DESCRIPTION */
-@/* */
-@/* This function processes the hardware timer interrupt. This */
-@/* processing includes incrementing the system clock and checking for */
-@/* time slice and/or timer expiration. If either is found, the */
-@/* interrupt context save/restore functions are called along with the */
-@/* expiration functions. */
-@/* */
-@/* INPUT */
-@/* */
-@/* None */
-@/* */
-@/* OUTPUT */
-@/* */
-@/* None */
-@/* */
-@/* CALLS */
-@/* */
-@/* _tx_thread_time_slice Time slice interrupted thread */
-@/* _tx_timer_expiration_process Timer expiration processing */
-@/* */
-@/* CALLED BY */
-@/* */
-@/* interrupt vector */
-@/* */
-@/* RELEASE HISTORY */
-@/* */
-@/* DATE NAME DESCRIPTION */
-@/* */
-@/* 09-30-2020 William E. Lamie Initial Version 6.1 */
-@/* */
-@/**************************************************************************/
-@VOID _tx_timer_interrupt(VOID)
-@{
+/**************************************************************************/
+/* */
+/* FUNCTION RELEASE */
+/* */
+/* _tx_timer_interrupt ARMv7-A */
+/* 6.1.11 */
+/* AUTHOR */
+/* */
+/* William E. Lamie, Microsoft Corporation */
+/* */
+/* DESCRIPTION */
+/* */
+/* This function processes the hardware timer interrupt. This */
+/* processing includes incrementing the system clock and checking for */
+/* time slice and/or timer expiration. If either is found, the */
+/* interrupt context save/restore functions are called along with the */
+/* expiration functions. */
+/* */
+/* INPUT */
+/* */
+/* None */
+/* */
+/* OUTPUT */
+/* */
+/* None */
+/* */
+/* CALLS */
+/* */
+/* _tx_thread_time_slice Time slice interrupted thread */
+/* _tx_timer_expiration_process Timer expiration processing */
+/* */
+/* CALLED BY */
+/* */
+/* interrupt vector */
+/* */
+/* RELEASE HISTORY */
+/* */
+/* DATE NAME DESCRIPTION */
+/* */
+/* 09-30-2020 William E. Lamie Initial Version 6.1 */
+/* 04-25-2022 Zhen Kong Updated comments, */
+/* resulting in version 6.1.11 */
+/* */
+/**************************************************************************/
.global _tx_timer_interrupt
.type _tx_timer_interrupt,function
_tx_timer_interrupt:
-@
-@ /* Upon entry to this routine, it is assumed that context save has already
-@ been called, and therefore the compiler scratch registers are available
-@ for use. */
-@
-@ /* Increment the system clock. */
-@ _tx_timer_system_clock++;
-@
- LDR r1, =_tx_timer_system_clock @ Pickup address of system clock
- LDR r0, [r1] @ Pickup system clock
- ADD r0, r0, #1 @ Increment system clock
- STR r0, [r1] @ Store new system clock
-@
-@ /* Test for time-slice expiration. */
-@ if (_tx_timer_time_slice)
-@ {
-@
- LDR r3, =_tx_timer_time_slice @ Pickup address of time-slice
- LDR r2, [r3] @ Pickup time-slice
- CMP r2, #0 @ Is it non-active?
- BEQ __tx_timer_no_time_slice @ Yes, skip time-slice processing
-@
-@ /* Decrement the time_slice. */
-@ _tx_timer_time_slice--;
-@
- SUB r2, r2, #1 @ Decrement the time-slice
- STR r2, [r3] @ Store new time-slice value
-@
-@ /* Check for expiration. */
-@ if (__tx_timer_time_slice == 0)
-@
- CMP r2, #0 @ Has it expired?
- BNE __tx_timer_no_time_slice @ No, skip expiration processing
-@
-@ /* Set the time-slice expired flag. */
-@ _tx_timer_expired_time_slice = TX_TRUE;
-@
- LDR r3, =_tx_timer_expired_time_slice @ Pickup address of expired flag
- MOV r0, #1 @ Build expired value
- STR r0, [r3] @ Set time-slice expiration flag
-@
-@ }
-@
-__tx_timer_no_time_slice:
-@
-@ /* Test for timer expiration. */
-@ if (*_tx_timer_current_ptr)
-@ {
-@
- LDR r1, =_tx_timer_current_ptr @ Pickup current timer pointer address
- LDR r0, [r1] @ Pickup current timer
- LDR r2, [r0] @ Pickup timer list entry
- CMP r2, #0 @ Is there anything in the list?
- BEQ __tx_timer_no_timer @ No, just increment the timer
-@
-@ /* Set expiration flag. */
-@ _tx_timer_expired = TX_TRUE;
-@
- LDR r3, =_tx_timer_expired @ Pickup expiration flag address
- MOV r2, #1 @ Build expired value
- STR r2, [r3] @ Set expired flag
- B __tx_timer_done @ Finished timer processing
-@
-@ }
-@ else
-@ {
-__tx_timer_no_timer:
-@
-@ /* No timer expired, increment the timer pointer. */
-@ _tx_timer_current_ptr++;
-@
- ADD r0, r0, #4 @ Move to next timer
-@
-@ /* Check for wraparound. */
-@ if (_tx_timer_current_ptr == _tx_timer_list_end)
-@
- LDR r3, =_tx_timer_list_end @ Pickup address of timer list end
- LDR r2, [r3] @ Pickup list end
- CMP r0, r2 @ Are we at list end?
- BNE __tx_timer_skip_wrap @ No, skip wraparound logic
-@
-@ /* Wrap to beginning of list. */
-@ _tx_timer_current_ptr = _tx_timer_list_start;
-@
- LDR r3, =_tx_timer_list_start @ Pickup address of timer list start
- LDR r0, [r3] @ Set current pointer to list start
-@
-__tx_timer_skip_wrap:
-@
- STR r0, [r1] @ Store new current timer pointer
-@ }
-@
-__tx_timer_done:
-@
-@
-@ /* See if anything has expired. */
-@ if ((_tx_timer_expired_time_slice) || (_tx_timer_expired))
-@ {
-@
- LDR r3, =_tx_timer_expired_time_slice @ Pickup address of expired flag
- LDR r2, [r3] @ Pickup time-slice expired flag
- CMP r2, #0 @ Did a time-slice expire?
- BNE __tx_something_expired @ If non-zero, time-slice expired
- LDR r1, =_tx_timer_expired @ Pickup address of other expired flag
- LDR r0, [r1] @ Pickup timer expired flag
- CMP r0, #0 @ Did a timer expire?
- BEQ __tx_timer_nothing_expired @ No, nothing expired
-@
-__tx_something_expired:
-@
-@
- STMDB sp!, {r0, lr} @ Save the lr register on the stack
- @ and save r0 just to keep 8-byte alignment
-@
-@ /* Did a timer expire? */
-@ if (_tx_timer_expired)
-@ {
-@
- LDR r1, =_tx_timer_expired @ Pickup address of expired flag
- LDR r0, [r1] @ Pickup timer expired flag
- CMP r0, #0 @ Check for timer expiration
- BEQ __tx_timer_dont_activate @ If not set, skip timer activation
-@
-@ /* Process timer expiration. */
-@ _tx_timer_expiration_process();
-@
- BL _tx_timer_expiration_process @ Call the timer expiration handling routine
-@
-@ }
-__tx_timer_dont_activate:
-@
-@ /* Did time slice expire? */
-@ if (_tx_timer_expired_time_slice)
-@ {
-@
- LDR r3, =_tx_timer_expired_time_slice @ Pickup address of time-slice expired
- LDR r2, [r3] @ Pickup the actual flag
- CMP r2, #0 @ See if the flag is set
- BEQ __tx_timer_not_ts_expiration @ No, skip time-slice processing
-@
-@ /* Time slice interrupted thread. */
-@ _tx_thread_time_slice();
-@
- BL _tx_thread_time_slice @ Call time-slice processing
-@
-@ }
-@
-__tx_timer_not_ts_expiration:
-@
- LDMIA sp!, {r0, lr} @ Recover lr register (r0 is just there for
- @ the 8-byte stack alignment
-@
-@ }
-@
-__tx_timer_nothing_expired:
-@
-#ifdef __THUMB_INTERWORK
- BX lr @ Return to caller
-#else
- MOV pc, lr @ Return to caller
-#endif
-@
-@}
+ /* Upon entry to this routine, it is assumed that context save has already
+ been called, and therefore the compiler scratch registers are available
+ for use. */
+
+ /* Increment the system clock. */
+
+ LDR r1, =_tx_timer_system_clock // Pickup address of system clock
+ LDR r0, [r1] // Pickup system clock
+ ADD r0, r0, #1 // Increment system clock
+ STR r0, [r1] // Store new system clock
+
+ /* Test for time-slice expiration. */
+
+ LDR r3, =_tx_timer_time_slice // Pickup address of time-slice
+ LDR r2, [r3] // Pickup time-slice
+ CMP r2, #0 // Is it non-active?
+ BEQ __tx_timer_no_time_slice // Yes, skip time-slice processing
+
+ /* Decrement the time_slice. */
+
+ SUB r2, r2, #1 // Decrement the time-slice
+ STR r2, [r3] // Store new time-slice value
+
+ /* Check for expiration. */
+
+ CMP r2, #0 // Has it expired?
+ BNE __tx_timer_no_time_slice // No, skip expiration processing
+
+ /* Set the time-slice expired flag. */
+
+ LDR r3, =_tx_timer_expired_time_slice // Pickup address of expired flag
+ MOV r0, #1 // Build expired value
+ STR r0, [r3] // Set time-slice expiration flag
+
+__tx_timer_no_time_slice:
+
+ /* Test for timer expiration. */
+
+ LDR r1, =_tx_timer_current_ptr // Pickup current timer pointer address
+ LDR r0, [r1] // Pickup current timer
+ LDR r2, [r0] // Pickup timer list entry
+ CMP r2, #0 // Is there anything in the list?
+ BEQ __tx_timer_no_timer // No, just increment the timer
+
+ /* Set expiration flag. */
+
+ LDR r3, =_tx_timer_expired // Pickup expiration flag address
+ MOV r2, #1 // Build expired value
+ STR r2, [r3] // Set expired flag
+ B __tx_timer_done // Finished timer processing
+
+__tx_timer_no_timer:
+
+ /* No timer expired, increment the timer pointer. */
+ ADD r0, r0, #4 // Move to next timer
+
+ /* Check for wraparound. */
+
+ LDR r3, =_tx_timer_list_end // Pickup address of timer list end
+ LDR r2, [r3] // Pickup list end
+ CMP r0, r2 // Are we at list end?
+ BNE __tx_timer_skip_wrap // No, skip wraparound logic
+
+ /* Wrap to beginning of list. */
+
+ LDR r3, =_tx_timer_list_start // Pickup address of timer list start
+ LDR r0, [r3] // Set current pointer to list start
+
+__tx_timer_skip_wrap:
+
+ STR r0, [r1] // Store new current timer pointer
+
+__tx_timer_done:
+
+ /* See if anything has expired. */
+
+ LDR r3, =_tx_timer_expired_time_slice // Pickup address of expired flag
+ LDR r2, [r3] // Pickup time-slice expired flag
+ CMP r2, #0 // Did a time-slice expire?
+ BNE __tx_something_expired // If non-zero, time-slice expired
+ LDR r1, =_tx_timer_expired // Pickup address of other expired flag
+ LDR r0, [r1] // Pickup timer expired flag
+ CMP r0, #0 // Did a timer expire?
+ BEQ __tx_timer_nothing_expired // No, nothing expired
+
+__tx_something_expired:
+
+ STMDB sp!, {r0, lr} // Save the lr register on the stack
+ // and save r0 just to keep 8-byte alignment
+
+ /* Did a timer expire? */
+
+ LDR r1, =_tx_timer_expired // Pickup address of expired flag
+ LDR r0, [r1] // Pickup timer expired flag
+ CMP r0, #0 // Check for timer expiration
+ BEQ __tx_timer_dont_activate // If not set, skip timer activation
+
+ /* Process timer expiration. */
+ BL _tx_timer_expiration_process // Call the timer expiration handling routine
+
+__tx_timer_dont_activate:
+
+ /* Did time slice expire? */
+
+ LDR r3, =_tx_timer_expired_time_slice // Pickup address of time-slice expired
+ LDR r2, [r3] // Pickup the actual flag
+ CMP r2, #0 // See if the flag is set
+ BEQ __tx_timer_not_ts_expiration // No, skip time-slice processing
+
+ /* Time slice interrupted thread. */
+
+ BL _tx_thread_time_slice // Call time-slice processing
+
+__tx_timer_not_ts_expiration:
+
+ LDMIA sp!, {r0, lr} // Recover lr register (r0 is just there for
+ // the 8-byte stack alignment
+
+__tx_timer_nothing_expired:
+
+#ifdef __THUMB_INTERWORK
+ BX lr // Return to caller
+#else
+ MOV pc, lr // Return to caller
+#endif
diff --git a/ports/cortex_a15/gnu/example_build/build_threadx_sample.bat b/ports/cortex_a15/gnu/example_build/build_threadx_sample.bat
index af64fc3b..0d5617a1 100644
--- a/ports/cortex_a15/gnu/example_build/build_threadx_sample.bat
+++ b/ports/cortex_a15/gnu/example_build/build_threadx_sample.bat
@@ -2,5 +2,5 @@ arm-none-eabi-gcc -c -g -mcpu=cortex-a15 reset.S
arm-none-eabi-gcc -c -g -mcpu=cortex-a15 crt0.S
arm-none-eabi-gcc -c -g -mcpu=cortex-a15 tx_initialize_low_level.S
arm-none-eabi-gcc -c -g -mcpu=cortex-a15 -I../../../../common/inc -I../inc sample_threadx.c
-arm-none-eabi-ld -A cortex-a15 -T sample_threadx.ld reset.o crt0.o tx_initialize_low_level.o sample_threadx.o tx.a libc.a libgcc.a -o sample_threadx.out -M > sample_threadx.map
+arm-none-eabi-gcc -g -mcpu=cortex-a15 -T sample_threadx.ld --specs=nosys.specs -o sample_threadx.out -Wl,-Map=sample_threadx.map tx_initialize_low_level.o sample_threadx.o tx.a
diff --git a/ports/cortex_a15/gnu/example_build/crt0.S b/ports/cortex_a15/gnu/example_build/crt0.S
index aa0f3239..56b6c958 100644
--- a/ports/cortex_a15/gnu/example_build/crt0.S
+++ b/ports/cortex_a15/gnu/example_build/crt0.S
@@ -26,13 +26,13 @@ _mainCRTStartup:
mov a2, #0 /* Second arg: fill value */
mov fp, a2 /* Null frame pointer */
mov r7, a2 /* Null frame pointer for Thumb */
-
- ldr a1, .LC1 /* First arg: start of memory block */
- ldr a3, .LC2
- sub a3, a3, a1 /* Third arg: length of block */
-
-
+ ldr a1, .LC1 /* First arg: start of memory block */
+ ldr a3, .LC2
+ sub a3, a3, a1 /* Third arg: length of block */
+
+
+
bl memset
mov r0, #0 /* no arguments */
mov r1, #0 /* no argv either */
@@ -48,15 +48,15 @@ _mainCRTStartup:
/* bl init */
mov r0, r4
mov r1, r5
-#endif
+#endif
bl main
bl exit /* Should not return. */
-
- /* For Thumb, constants must be after the code since only
+
+ /* For Thumb, constants must be after the code since only
positive offsets are supported for PC relative addresses. */
-
+
.align 0
.LC0:
.LC1:
diff --git a/ports/cortex_a15/gnu/example_build/reset.S b/ports/cortex_a15/gnu/example_build/reset.S
index 856e31eb..597e9d9a 100644
--- a/ports/cortex_a15/gnu/example_build/reset.S
+++ b/ports/cortex_a15/gnu/example_build/reset.S
@@ -1,35 +1,24 @@
-@/**************************************************************************/
-@/* */
-@/* Copyright (c) Microsoft Corporation. All rights reserved. */
-@/* */
-@/* This software is licensed under the Microsoft Software License */
-@/* Terms for Microsoft Azure RTOS. Full text of the license can be */
-@/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
-@/* and in the root directory of this software. */
-@/* */
-@/**************************************************************************/
-@
-@
-@/**************************************************************************/
-@/**************************************************************************/
-@/** */
-@/** ThreadX Component */
-@/** */
-@/** Initialize */
-@/** */
-@/**************************************************************************/
-@/**************************************************************************/
-@
-@
-@#define TX_SOURCE_CODE
-@
-@
-@/* Include necessary system files. */
-@
-@#include "tx_api.h"
-@#include "tx_initialize.h"
-@#include "tx_thread.h"
-@#include "tx_timer.h"
+/**************************************************************************/
+/* */
+/* Copyright (c) Microsoft Corporation. All rights reserved. */
+/* */
+/* This software is licensed under the Microsoft Software License */
+/* Terms for Microsoft Azure RTOS. Full text of the license can be */
+/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
+/* and in the root directory of this software. */
+/* */
+/**************************************************************************/
+
+
+/**************************************************************************/
+/**************************************************************************/
+/** */
+/** ThreadX Component */
+/** */
+/** Initialize */
+/** */
+/**************************************************************************/
+/**************************************************************************/
.arm
@@ -41,36 +30,35 @@
.global __tx_reserved_handler
.global __tx_irq_handler
.global __tx_fiq_handler
-@
-@
-@/* Define the vector area. This should be located or copied to 0. */
-@
+
+/* Define the vector area. This should be located or copied to 0. */
+
.text
.global __vectors
__vectors:
- LDR pc, STARTUP @ Reset goes to startup function
- LDR pc, UNDEFINED @ Undefined handler
- LDR pc, SWI @ Software interrupt handler
- LDR pc, PREFETCH @ Prefetch exception handler
- LDR pc, ABORT @ Abort exception handler
- LDR pc, RESERVED @ Reserved exception handler
- LDR pc, IRQ @ IRQ interrupt handler
- LDR pc, FIQ @ FIQ interrupt handler
+ LDR pc, STARTUP // Reset goes to startup function
+ LDR pc, UNDEFINED // Undefined handler
+ LDR pc, SWI // Software interrupt handler
+ LDR pc, PREFETCH // Prefetch exception handler
+ LDR pc, ABORT // Abort exception handler
+ LDR pc, RESERVED // Reserved exception handler
+ LDR pc, IRQ // IRQ interrupt handler
+ LDR pc, FIQ // FIQ interrupt handler
STARTUP:
- .word _start @ Reset goes to C startup function
+ .word _start // Reset goes to C startup function
UNDEFINED:
- .word __tx_undefined @ Undefined handler
+ .word __tx_undefined // Undefined handler
SWI:
- .word __tx_swi_interrupt @ Software interrupt handler
+ .word __tx_swi_interrupt // Software interrupt handler
PREFETCH:
- .word __tx_prefetch_handler @ Prefetch exception handler
-ABORT:
- .word __tx_abort_handler @ Abort exception handler
-RESERVED:
- .word __tx_reserved_handler @ Reserved exception handler
-IRQ:
- .word __tx_irq_handler @ IRQ interrupt handler
+ .word __tx_prefetch_handler // Prefetch exception handler
+ABORT:
+ .word __tx_abort_handler // Abort exception handler
+RESERVED:
+ .word __tx_reserved_handler // Reserved exception handler
+IRQ:
+ .word __tx_irq_handler // IRQ interrupt handler
FIQ:
- .word __tx_fiq_handler @ FIQ interrupt handler
+ .word __tx_fiq_handler // FIQ interrupt handler
diff --git a/ports/cortex_a15/gnu/example_build/sample_threadx.c b/ports/cortex_a15/gnu/example_build/sample_threadx.c
index 418ec634..8c61de06 100644
--- a/ports/cortex_a15/gnu/example_build/sample_threadx.c
+++ b/ports/cortex_a15/gnu/example_build/sample_threadx.c
@@ -1,5 +1,5 @@
/* This is a small demo of the high-performance ThreadX kernel. It includes examples of eight
- threads of different priorities, using a message queue, semaphore, mutex, event flags group,
+ threads of different priorities, using a message queue, semaphore, mutex, event flags group,
byte pool, and block pool. */
#include "tx_api.h"
@@ -80,42 +80,42 @@ CHAR *pointer = TX_NULL;
tx_byte_allocate(&byte_pool_0, (VOID **) &pointer, DEMO_STACK_SIZE, TX_NO_WAIT);
/* Create the main thread. */
- tx_thread_create(&thread_0, "thread 0", thread_0_entry, 0,
- pointer, DEMO_STACK_SIZE,
+ tx_thread_create(&thread_0, "thread 0", thread_0_entry, 0,
+ pointer, DEMO_STACK_SIZE,
1, 1, TX_NO_TIME_SLICE, TX_AUTO_START);
/* Allocate the stack for thread 1. */
tx_byte_allocate(&byte_pool_0, (VOID **) &pointer, DEMO_STACK_SIZE, TX_NO_WAIT);
- /* Create threads 1 and 2. These threads pass information through a ThreadX
+ /* Create threads 1 and 2. These threads pass information through a ThreadX
message queue. It is also interesting to note that these threads have a time
slice. */
- tx_thread_create(&thread_1, "thread 1", thread_1_entry, 1,
- pointer, DEMO_STACK_SIZE,
+ tx_thread_create(&thread_1, "thread 1", thread_1_entry, 1,
+ pointer, DEMO_STACK_SIZE,
16, 16, 4, TX_AUTO_START);
/* Allocate the stack for thread 2. */
tx_byte_allocate(&byte_pool_0, (VOID **) &pointer, DEMO_STACK_SIZE, TX_NO_WAIT);
- tx_thread_create(&thread_2, "thread 2", thread_2_entry, 2,
- pointer, DEMO_STACK_SIZE,
+ tx_thread_create(&thread_2, "thread 2", thread_2_entry, 2,
+ pointer, DEMO_STACK_SIZE,
16, 16, 4, TX_AUTO_START);
/* Allocate the stack for thread 3. */
tx_byte_allocate(&byte_pool_0, (VOID **) &pointer, DEMO_STACK_SIZE, TX_NO_WAIT);
- /* Create threads 3 and 4. These threads compete for a ThreadX counting semaphore.
+ /* Create threads 3 and 4. These threads compete for a ThreadX counting semaphore.
An interesting thing here is that both threads share the same instruction area. */
- tx_thread_create(&thread_3, "thread 3", thread_3_and_4_entry, 3,
- pointer, DEMO_STACK_SIZE,
+ tx_thread_create(&thread_3, "thread 3", thread_3_and_4_entry, 3,
+ pointer, DEMO_STACK_SIZE,
8, 8, TX_NO_TIME_SLICE, TX_AUTO_START);
/* Allocate the stack for thread 4. */
tx_byte_allocate(&byte_pool_0, (VOID **) &pointer, DEMO_STACK_SIZE, TX_NO_WAIT);
- tx_thread_create(&thread_4, "thread 4", thread_3_and_4_entry, 4,
- pointer, DEMO_STACK_SIZE,
+ tx_thread_create(&thread_4, "thread 4", thread_3_and_4_entry, 4,
+ pointer, DEMO_STACK_SIZE,
8, 8, TX_NO_TIME_SLICE, TX_AUTO_START);
/* Allocate the stack for thread 5. */
@@ -123,23 +123,23 @@ CHAR *pointer = TX_NULL;
/* Create thread 5. This thread simply pends on an event flag which will be set
by thread_0. */
- tx_thread_create(&thread_5, "thread 5", thread_5_entry, 5,
- pointer, DEMO_STACK_SIZE,
+ tx_thread_create(&thread_5, "thread 5", thread_5_entry, 5,
+ pointer, DEMO_STACK_SIZE,
4, 4, TX_NO_TIME_SLICE, TX_AUTO_START);
/* Allocate the stack for thread 6. */
tx_byte_allocate(&byte_pool_0, (VOID **) &pointer, DEMO_STACK_SIZE, TX_NO_WAIT);
/* Create threads 6 and 7. These threads compete for a ThreadX mutex. */
- tx_thread_create(&thread_6, "thread 6", thread_6_and_7_entry, 6,
- pointer, DEMO_STACK_SIZE,
+ tx_thread_create(&thread_6, "thread 6", thread_6_and_7_entry, 6,
+ pointer, DEMO_STACK_SIZE,
8, 8, TX_NO_TIME_SLICE, TX_AUTO_START);
/* Allocate the stack for thread 7. */
tx_byte_allocate(&byte_pool_0, (VOID **) &pointer, DEMO_STACK_SIZE, TX_NO_WAIT);
- tx_thread_create(&thread_7, "thread 7", thread_6_and_7_entry, 7,
- pointer, DEMO_STACK_SIZE,
+ tx_thread_create(&thread_7, "thread 7", thread_6_and_7_entry, 7,
+ pointer, DEMO_STACK_SIZE,
8, 8, TX_NO_TIME_SLICE, TX_AUTO_START);
/* Allocate the message queue. */
@@ -242,11 +242,11 @@ UINT status;
/* Retrieve a message from the queue. */
status = tx_queue_receive(&queue_0, &received_message, TX_WAIT_FOREVER);
- /* Check completion status and make sure the message is what we
+ /* Check completion status and make sure the message is what we
expected. */
if ((status != TX_SUCCESS) || (received_message != thread_2_messages_received))
break;
-
+
/* Otherwise, all is okay. Increment the received message count. */
thread_2_messages_received++;
}
@@ -305,7 +305,7 @@ ULONG actual_flags;
thread_5_counter++;
/* Wait for event flag 0. */
- status = tx_event_flags_get(&event_flags_0, 0x1, TX_OR_CLEAR,
+ status = tx_event_flags_get(&event_flags_0, 0x1, TX_OR_CLEAR,
&actual_flags, TX_WAIT_FOREVER);
/* Check status. */
@@ -358,7 +358,7 @@ UINT status;
if (status != TX_SUCCESS)
break;
- /* Release the mutex again. This will actually
+ /* Release the mutex again. This will actually
release ownership since it was obtained twice. */
status = tx_mutex_put(&mutex_0);
diff --git a/ports/cortex_a15/gnu/example_build/tx_initialize_low_level.S b/ports/cortex_a15/gnu/example_build/tx_initialize_low_level.S
index bf53179c..7de5d3ce 100644
--- a/ports/cortex_a15/gnu/example_build/tx_initialize_low_level.S
+++ b/ports/cortex_a15/gnu/example_build/tx_initialize_low_level.S
@@ -1,47 +1,35 @@
-@/**************************************************************************/
-@/* */
-@/* Copyright (c) Microsoft Corporation. All rights reserved. */
-@/* */
-@/* This software is licensed under the Microsoft Software License */
-@/* Terms for Microsoft Azure RTOS. Full text of the license can be */
-@/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
-@/* and in the root directory of this software. */
-@/* */
-@/**************************************************************************/
-@
-@
-@/**************************************************************************/
-@/**************************************************************************/
-@/** */
-@/** ThreadX Component */
-@/** */
-@/** Initialize */
-@/** */
-@/**************************************************************************/
-@/**************************************************************************/
-@
-@
-@#define TX_SOURCE_CODE
-@
-@
-@/* Include necessary system files. */
-@
-@#include "tx_api.h"
-@#include "tx_initialize.h"
-@#include "tx_thread.h"
-@#include "tx_timer.h"
+/**************************************************************************/
+/* */
+/* Copyright (c) Microsoft Corporation. All rights reserved. */
+/* */
+/* This software is licensed under the Microsoft Software License */
+/* Terms for Microsoft Azure RTOS. Full text of the license can be */
+/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
+/* and in the root directory of this software. */
+/* */
+/**************************************************************************/
+
+
+/**************************************************************************/
+/**************************************************************************/
+/** */
+/** ThreadX Component */
+/** */
+/** Initialize */
+/** */
+/**************************************************************************/
+/**************************************************************************/
.arm
-SVC_MODE = 0xD3 @ Disable IRQ/FIQ SVC mode
-IRQ_MODE = 0xD2 @ Disable IRQ/FIQ IRQ mode
-FIQ_MODE = 0xD1 @ Disable IRQ/FIQ FIQ mode
-SYS_MODE = 0xDF @ Disable IRQ/FIQ SYS mode
-FIQ_STACK_SIZE = 512 @ FIQ stack size
-IRQ_STACK_SIZE = 1024 @ IRQ stack size
-SYS_STACK_SIZE = 1024 @ System stack size
-@
-@
+SVC_MODE = 0xD3 // Disable IRQ/FIQ SVC mode
+IRQ_MODE = 0xD2 // Disable IRQ/FIQ IRQ mode
+FIQ_MODE = 0xD1 // Disable IRQ/FIQ FIQ mode
+SYS_MODE = 0xDF // Disable IRQ/FIQ SYS mode
+FIQ_STACK_SIZE = 512 // FIQ stack size
+IRQ_STACK_SIZE = 1024 // IRQ stack size
+SYS_STACK_SIZE = 1024 // System stack size
+
.global _tx_thread_system_stack_ptr
.global _tx_initialize_unused_memory
.global _tx_thread_context_save
@@ -51,297 +39,267 @@ SYS_STACK_SIZE = 1024 @ System stack size
.global _sp
.global _stack_bottom
-@
-@
-@/* Define the 16-bit Thumb mode veneer for _tx_initialize_low_level for
-@ applications calling this function from to 16-bit Thumb mode. */
-@
+
+/* Define the 16-bit Thumb mode veneer for _tx_initialize_low_level for
+ applications calling this function from to 16-bit Thumb mode. */
+
.text
.align 2
.thumb
.global $_tx_initialize_low_level
.type $_tx_initialize_low_level,function
$_tx_initialize_low_level:
- BX pc @ Switch to 32-bit mode
- NOP @
+ BX pc // Switch to 32-bit mode
+ NOP //
.arm
- STMFD sp!, {lr} @ Save return address
- BL _tx_initialize_low_level @ Call _tx_initialize_low_level function
- LDMFD sp!, {lr} @ Recover saved return address
- BX lr @ Return to 16-bit caller
-@
-@
+ STMFD sp!, {lr} // Save return address
+ BL _tx_initialize_low_level // Call _tx_initialize_low_level function
+ LDMFD sp!, {lr} // Recover saved return address
+ BX lr // Return to 16-bit caller
+
.text
.align 2
-@/**************************************************************************/
-@/* */
-@/* FUNCTION RELEASE */
-@/* */
-@/* _tx_initialize_low_level Cortex-A15/GNU */
-@/* 6.1 */
-@/* AUTHOR */
-@/* */
-@/* William E. Lamie, Microsoft Corporation */
-@/* */
-@/* DESCRIPTION */
-@/* */
-@/* This function is responsible for any low-level processor */
-@/* initialization, including setting up interrupt vectors, setting */
-@/* up a periodic timer interrupt source, saving the system stack */
-@/* pointer for use in ISR processing later, and finding the first */
-@/* available RAM memory address for tx_application_define. */
-@/* */
-@/* INPUT */
-@/* */
-@/* None */
-@/* */
-@/* OUTPUT */
-@/* */
-@/* None */
-@/* */
-@/* CALLS */
-@/* */
-@/* None */
-@/* */
-@/* CALLED BY */
-@/* */
-@/* _tx_initialize_kernel_enter ThreadX entry function */
-@/* */
-@/* RELEASE HISTORY */
-@/* */
-@/* DATE NAME DESCRIPTION */
-@/* */
-@/* 09-30-2020 William E. Lamie Initial Version 6.1 */
-@/* */
-@/**************************************************************************/
-@VOID _tx_initialize_low_level(VOID)
-@{
+/**************************************************************************/
+/* */
+/* FUNCTION RELEASE */
+/* */
+/* _tx_initialize_low_level ARMv7-A */
+/* 6.1.11 */
+/* AUTHOR */
+/* */
+/* William E. Lamie, Microsoft Corporation */
+/* */
+/* DESCRIPTION */
+/* */
+/* This function is responsible for any low-level processor */
+/* initialization, including setting up interrupt vectors, setting */
+/* up a periodic timer interrupt source, saving the system stack */
+/* pointer for use in ISR processing later, and finding the first */
+/* available RAM memory address for tx_application_define. */
+/* */
+/* INPUT */
+/* */
+/* None */
+/* */
+/* OUTPUT */
+/* */
+/* None */
+/* */
+/* CALLS */
+/* */
+/* None */
+/* */
+/* CALLED BY */
+/* */
+/* _tx_initialize_kernel_enter ThreadX entry function */
+/* */
+/* RELEASE HISTORY */
+/* */
+/* DATE NAME DESCRIPTION */
+/* */
+/* 09-30-2020 William E. Lamie Initial Version 6.1 */
+/* 04-25-2022 Zhen Kong Updated comments, */
+/* resulting in version 6.1.11 */
+/* */
+/**************************************************************************/
.global _tx_initialize_low_level
.type _tx_initialize_low_level,function
_tx_initialize_low_level:
-@
-@ /* We must be in SVC mode at this point! */
-@
-@ /* Setup various stack pointers. */
-@
- LDR r1, =_sp @ Get pointer to stack area
-#ifdef TX_ENABLE_IRQ_NESTING
-@
-@ /* Setup the system mode stack for nested interrupt support */
-@
- LDR r2, =SYS_STACK_SIZE @ Pickup stack size
- MOV r3, #SYS_MODE @ Build SYS mode CPSR
- MSR CPSR_c, r3 @ Enter SYS mode
- SUB r1, r1, #1 @ Backup 1 byte
- BIC r1, r1, #7 @ Ensure 8-byte alignment
- MOV sp, r1 @ Setup SYS stack pointer
- SUB r1, r1, r2 @ Calculate start of next stack
+ /* We must be in SVC mode at this point! */
+
+ /* Setup various stack pointers. */
+
+ LDR r1, =_sp // Get pointer to stack area
+
+#ifdef TX_ENABLE_IRQ_NESTING
+
+ /* Setup the system mode stack for nested interrupt support */
+
+ LDR r2, =SYS_STACK_SIZE // Pickup stack size
+ MOV r3, #SYS_MODE // Build SYS mode CPSR
+ MSR CPSR_c, r3 // Enter SYS mode
+ SUB r1, r1, #1 // Backup 1 byte
+ BIC r1, r1, #7 // Ensure 8-byte alignment
+ MOV sp, r1 // Setup SYS stack pointer
+ SUB r1, r1, r2 // Calculate start of next stack
#endif
- LDR r2, =FIQ_STACK_SIZE @ Pickup stack size
- MOV r0, #FIQ_MODE @ Build FIQ mode CPSR
- MSR CPSR, r0 @ Enter FIQ mode
- SUB r1, r1, #1 @ Backup 1 byte
- BIC r1, r1, #7 @ Ensure 8-byte alignment
- MOV sp, r1 @ Setup FIQ stack pointer
- SUB r1, r1, r2 @ Calculate start of next stack
- LDR r2, =IRQ_STACK_SIZE @ Pickup IRQ stack size
- MOV r0, #IRQ_MODE @ Build IRQ mode CPSR
- MSR CPSR, r0 @ Enter IRQ mode
- SUB r1, r1, #1 @ Backup 1 byte
- BIC r1, r1, #7 @ Ensure 8-byte alignment
- MOV sp, r1 @ Setup IRQ stack pointer
- SUB r3, r1, r2 @ Calculate end of IRQ stack
- MOV r0, #SVC_MODE @ Build SVC mode CPSR
- MSR CPSR, r0 @ Enter SVC mode
- LDR r2, =_stack_bottom @ Pickup stack bottom
- CMP r3, r2 @ Compare the current stack end with the bottom
-_stack_error_loop:
- BLT _stack_error_loop @ If the IRQ stack exceeds the stack bottom, just sit here!
-@
-@ /* Save the system stack pointer. */
-@ _tx_thread_system_stack_ptr = (VOID_PTR) (sp);
-@
- LDR r2, =_tx_thread_system_stack_ptr @ Pickup stack pointer
- STR r1, [r2] @ Save the system stack
-@
-@ /* Save the first available memory address. */
-@ _tx_initialize_unused_memory = (VOID_PTR) _end;
-@
- LDR r1, =_end @ Get end of non-initialized RAM area
- LDR r2, =_tx_initialize_unused_memory @ Pickup unused memory ptr address
- ADD r1, r1, #8 @ Increment to next free word
- STR r1, [r2] @ Save first free memory address
-@
-@ /* Setup Timer for periodic interrupts. */
-@
-@ /* Done, return to caller. */
-@
+ LDR r2, =FIQ_STACK_SIZE // Pickup stack size
+ MOV r0, #FIQ_MODE // Build FIQ mode CPSR
+ MSR CPSR, r0 // Enter FIQ mode
+ SUB r1, r1, #1 // Backup 1 byte
+ BIC r1, r1, #7 // Ensure 8-byte alignment
+ MOV sp, r1 // Setup FIQ stack pointer
+ SUB r1, r1, r2 // Calculate start of next stack
+ LDR r2, =IRQ_STACK_SIZE // Pickup IRQ stack size
+ MOV r0, #IRQ_MODE // Build IRQ mode CPSR
+ MSR CPSR, r0 // Enter IRQ mode
+ SUB r1, r1, #1 // Backup 1 byte
+ BIC r1, r1, #7 // Ensure 8-byte alignment
+ MOV sp, r1 // Setup IRQ stack pointer
+ SUB r3, r1, r2 // Calculate end of IRQ stack
+ MOV r0, #SVC_MODE // Build SVC mode CPSR
+ MSR CPSR, r0 // Enter SVC mode
+ LDR r2, =_stack_bottom // Pickup stack bottom
+ CMP r3, r2 // Compare the current stack end with the bottom
+_stack_error_loop:
+ BLT _stack_error_loop // If the IRQ stack exceeds the stack bottom, just sit here!
+
+ LDR r2, =_tx_thread_system_stack_ptr // Pickup stack pointer
+ STR r1, [r2] // Save the system stack
+
+ LDR r1, =_end // Get end of non-initialized RAM area
+ LDR r2, =_tx_initialize_unused_memory // Pickup unused memory ptr address
+ ADD r1, r1, #8 // Increment to next free word
+ STR r1, [r2] // Save first free memory address
+
#ifdef __THUMB_INTERWORK
- BX lr @ Return to caller
+ BX lr // Return to caller
#else
- MOV pc, lr @ Return to caller
+ MOV pc, lr // Return to caller
#endif
-@}
-@
-@
-@/* Define shells for each of the interrupt vectors. */
-@
+
+/* Define shells for each of the interrupt vectors. */
+
.global __tx_undefined
__tx_undefined:
- B __tx_undefined @ Undefined handler
-@
+ B __tx_undefined // Undefined handler
+
.global __tx_swi_interrupt
__tx_swi_interrupt:
- B __tx_swi_interrupt @ Software interrupt handler
-@
+ B __tx_swi_interrupt // Software interrupt handler
+
.global __tx_prefetch_handler
__tx_prefetch_handler:
- B __tx_prefetch_handler @ Prefetch exception handler
-@
+ B __tx_prefetch_handler // Prefetch exception handler
+
.global __tx_abort_handler
__tx_abort_handler:
- B __tx_abort_handler @ Abort exception handler
-@
+ B __tx_abort_handler // Abort exception handler
+
.global __tx_reserved_handler
__tx_reserved_handler:
- B __tx_reserved_handler @ Reserved exception handler
-@
+ B __tx_reserved_handler // Reserved exception handler
+
.global __tx_irq_handler
- .global __tx_irq_processing_return
+ .global __tx_irq_processing_return
__tx_irq_handler:
-@
-@ /* Jump to context save to save system context. */
+
+ /* Jump to context save to save system context. */
B _tx_thread_context_save
__tx_irq_processing_return:
-@
-@ /* At this point execution is still in the IRQ mode. The CPSR, point of
-@ interrupt, and all C scratch registers are available for use. In
-@ addition, IRQ interrupts may be re-enabled - with certain restrictions -
-@ if nested IRQ interrupts are desired. Interrupts may be re-enabled over
-@ small code sequences where lr is saved before enabling interrupts and
-@ restored after interrupts are again disabled. */
-@
-@ /* Interrupt nesting is allowed after calling _tx_thread_irq_nesting_start
-@ from IRQ mode with interrupts disabled. This routine switches to the
-@ system mode and returns with IRQ interrupts enabled.
-@
-@ NOTE: It is very important to ensure all IRQ interrupts are cleared
-@ prior to enabling nested IRQ interrupts. */
+//
+ /* At this point execution is still in the IRQ mode. The CPSR, point of
+ interrupt, and all C scratch registers are available for use. In
+ addition, IRQ interrupts may be re-enabled - with certain restrictions -
+ if nested IRQ interrupts are desired. Interrupts may be re-enabled over
+ small code sequences where lr is saved before enabling interrupts and
+ restored after interrupts are again disabled. */
+
+ /* Interrupt nesting is allowed after calling _tx_thread_irq_nesting_start
+ from IRQ mode with interrupts disabled. This routine switches to the
+ system mode and returns with IRQ interrupts enabled.
+
+ NOTE: It is very important to ensure all IRQ interrupts are cleared
+ prior to enabling nested IRQ interrupts. */
#ifdef TX_ENABLE_IRQ_NESTING
BL _tx_thread_irq_nesting_start
#endif
-@
-@ /* For debug purpose, execute the timer interrupt processing here. In
-@ a real system, some kind of status indication would have to be checked
-@ before the timer interrupt handler could be called. */
-@
- BL _tx_timer_interrupt @ Timer interrupt handler
-@
-@
-@ /* If interrupt nesting was started earlier, the end of interrupt nesting
-@ service must be called before returning to _tx_thread_context_restore.
-@ This routine returns in processing in IRQ mode with interrupts disabled. */
+
+ /* For debug purpose, execute the timer interrupt processing here. In
+ a real system, some kind of status indication would have to be checked
+ before the timer interrupt handler could be called. */
+
+ BL _tx_timer_interrupt // Timer interrupt handler
+
+
+ /* If interrupt nesting was started earlier, the end of interrupt nesting
+ service must be called before returning to _tx_thread_context_restore.
+ This routine returns in processing in IRQ mode with interrupts disabled. */
#ifdef TX_ENABLE_IRQ_NESTING
BL _tx_thread_irq_nesting_end
#endif
-@
-@ /* Jump to context restore to restore system context. */
+
+ /* Jump to context restore to restore system context. */
B _tx_thread_context_restore
-@
-@
-@ /* This is an example of a vectored IRQ handler. */
-@
-@ .global __tx_example_vectored_irq_handler
-@__tx_example_vectored_irq_handler:
-@
-@
-@ /* Save initial context and call context save to prepare for
-@ vectored ISR execution. */
-@
-@ STMDB sp!, {r0-r3} @ Save some scratch registers
-@ MRS r0, SPSR @ Pickup saved SPSR
-@ SUB lr, lr, #4 @ Adjust point of interrupt
-@ STMDB sp!, {r0, r10, r12, lr} @ Store other scratch registers
-@ BL _tx_thread_vectored_context_save @ Vectored context save
-@
-@ /* At this point execution is still in the IRQ mode. The CPSR, point of
-@ interrupt, and all C scratch registers are available for use. In
-@ addition, IRQ interrupts may be re-enabled - with certain restrictions -
-@ if nested IRQ interrupts are desired. Interrupts may be re-enabled over
-@ small code sequences where lr is saved before enabling interrupts and
-@ restored after interrupts are again disabled. */
-@
-@
-@ /* Interrupt nesting is allowed after calling _tx_thread_irq_nesting_start
-@ from IRQ mode with interrupts disabled. This routine switches to the
-@ system mode and returns with IRQ interrupts enabled.
-@
-@ NOTE: It is very important to ensure all IRQ interrupts are cleared
-@ prior to enabling nested IRQ interrupts. */
-@#ifdef TX_ENABLE_IRQ_NESTING
-@ BL _tx_thread_irq_nesting_start
-@#endif
-@
-@ /* Application IRQ handlers can be called here! */
-@
-@ /* If interrupt nesting was started earlier, the end of interrupt nesting
-@ service must be called before returning to _tx_thread_context_restore.
-@ This routine returns in processing in IRQ mode with interrupts disabled. */
-@#ifdef TX_ENABLE_IRQ_NESTING
-@ BL _tx_thread_irq_nesting_end
-@#endif
-@
-@ /* Jump to context restore to restore system context. */
-@ B _tx_thread_context_restore
-@
-@
+
+
+ /* This is an example of a vectored IRQ handler. */
+
+
+
+ /* Save initial context and call context save to prepare for
+ vectored ISR execution. */
+
+ /* At this point execution is still in the IRQ mode. The CPSR, point of
+ interrupt, and all C scratch registers are available for use. In
+ addition, IRQ interrupts may be re-enabled - with certain restrictions -
+ if nested IRQ interrupts are desired. Interrupts may be re-enabled over
+ small code sequences where lr is saved before enabling interrupts and
+ restored after interrupts are again disabled. */
+
+
+ /* Interrupt nesting is allowed after calling _tx_thread_irq_nesting_start
+ from IRQ mode with interrupts disabled. This routine switches to the
+ system mode and returns with IRQ interrupts enabled.
+
+ NOTE: It is very important to ensure all IRQ interrupts are cleared
+ prior to enabling nested IRQ interrupts. */
+
+ /* Application IRQ handlers can be called here! */
+
+ /* If interrupt nesting was started earlier, the end of interrupt nesting
+ service must be called before returning to _tx_thread_context_restore.
+ This routine returns in processing in IRQ mode with interrupts disabled. */
+
+
+
#ifdef TX_ENABLE_FIQ_SUPPORT
.global __tx_fiq_handler
.global __tx_fiq_processing_return
__tx_fiq_handler:
-@
-@ /* Jump to fiq context save to save system context. */
+
+ /* Jump to fiq context save to save system context. */
B _tx_thread_fiq_context_save
__tx_fiq_processing_return:
-@
-@ /* At this point execution is still in the FIQ mode. The CPSR, point of
-@ interrupt, and all C scratch registers are available for use. */
-@
-@ /* Interrupt nesting is allowed after calling _tx_thread_fiq_nesting_start
-@ from FIQ mode with interrupts disabled. This routine switches to the
-@ system mode and returns with FIQ interrupts enabled.
-@
-@ NOTE: It is very important to ensure all FIQ interrupts are cleared
-@ prior to enabling nested FIQ interrupts. */
+
+ /* At this point execution is still in the FIQ mode. The CPSR, point of
+ interrupt, and all C scratch registers are available for use. */
+
+ /* Interrupt nesting is allowed after calling _tx_thread_fiq_nesting_start
+ from FIQ mode with interrupts disabled. This routine switches to the
+ system mode and returns with FIQ interrupts enabled.
+
+ NOTE: It is very important to ensure all FIQ interrupts are cleared
+ prior to enabling nested FIQ interrupts. */
#ifdef TX_ENABLE_FIQ_NESTING
BL _tx_thread_fiq_nesting_start
#endif
-@
-@ /* Application FIQ handlers can be called here! */
-@
-@ /* If interrupt nesting was started earlier, the end of interrupt nesting
-@ service must be called before returning to _tx_thread_fiq_context_restore. */
+
+ /* Application FIQ handlers can be called here! */
+
+ /* If interrupt nesting was started earlier, the end of interrupt nesting
+ service must be called before returning to _tx_thread_fiq_context_restore. */
#ifdef TX_ENABLE_FIQ_NESTING
BL _tx_thread_fiq_nesting_end
#endif
-@
-@ /* Jump to fiq context restore to restore system context. */
+
+ /* Jump to fiq context restore to restore system context. */
B _tx_thread_fiq_context_restore
-@
-@
+
+
#else
.global __tx_fiq_handler
__tx_fiq_handler:
- B __tx_fiq_handler @ FIQ interrupt handler
+ B __tx_fiq_handler // FIQ interrupt handler
#endif
-@
-@
+
+
BUILD_OPTIONS:
- .word _tx_build_options @ Reference to bring in
+ .word _tx_build_options // Reference to bring in
VERSION_ID:
- .word _tx_version_id @ Reference to bring in
+ .word _tx_version_id // Reference to bring in
diff --git a/ports/cortex_a15/gnu/inc/tx_port.h b/ports/cortex_a15/gnu/inc/tx_port.h
index edd17889..19463de1 100644
--- a/ports/cortex_a15/gnu/inc/tx_port.h
+++ b/ports/cortex_a15/gnu/inc/tx_port.h
@@ -12,7 +12,7 @@
/**************************************************************************/
/**************************************************************************/
-/** */
+/** */
/** ThreadX Component */
/** */
/** Port Specific */
@@ -21,36 +21,38 @@
/**************************************************************************/
-/**************************************************************************/
-/* */
-/* PORT SPECIFIC C INFORMATION RELEASE */
-/* */
-/* tx_port.h Cortex-A15/GNU */
-/* 6.1.6 */
+/**************************************************************************/
+/* */
+/* PORT SPECIFIC C INFORMATION RELEASE */
+/* */
+/* tx_port.h ARMv7-A */
+/* 6.1.11 */
/* */
/* AUTHOR */
/* */
/* William E. Lamie, Microsoft Corporation */
/* */
/* DESCRIPTION */
-/* */
-/* This file contains data type definitions that make the ThreadX */
-/* real-time kernel function identically on a variety of different */
-/* processor architectures. For example, the size or number of bits */
-/* in an "int" data type vary between microprocessor architectures and */
-/* even C compilers for the same microprocessor. ThreadX does not */
-/* directly use native C data types. Instead, ThreadX creates its */
-/* own special types that can be mapped to actual data types by this */
-/* file to guarantee consistency in the interface and functionality. */
-/* */
-/* RELEASE HISTORY */
-/* */
+/* */
+/* This file contains data type definitions that make the ThreadX */
+/* real-time kernel function identically on a variety of different */
+/* processor architectures. For example, the size or number of bits */
+/* in an "int" data type vary between microprocessor architectures and */
+/* even C compilers for the same microprocessor. ThreadX does not */
+/* directly use native C data types. Instead, ThreadX creates its */
+/* own special types that can be mapped to actual data types by this */
+/* file to guarantee consistency in the interface and functionality. */
+/* */
+/* RELEASE HISTORY */
+/* */
/* DATE NAME DESCRIPTION */
/* */
/* 09-30-2020 William E. Lamie Initial Version 6.1 */
/* 04-02-2021 Bhupendra Naphade Modified comment(s),updated */
/* macro definition, */
/* resulting in version 6.1.6 */
+/* 04-25-2022 Zhen Kong Updated comments, */
+/* resulting in version 6.1.11 */
/* */
/**************************************************************************/
@@ -63,7 +65,7 @@
#ifdef TX_INCLUDE_USER_DEFINE_FILE
-/* Yes, include the user defines in tx_user.h. The defines in this file may
+/* Yes, include the user defines in tx_user.h. The defines in this file may
alternately be defined on the command line. */
#include "tx_user.h"
@@ -76,7 +78,7 @@
#include
-/* Define ThreadX basic types for this port. */
+/* Define ThreadX basic types for this port. */
#define VOID void
typedef char CHAR;
@@ -112,12 +114,12 @@ typedef unsigned short USHORT;
#define TX_TIMER_THREAD_STACK_SIZE 1024 /* Default timer thread stack size */
#endif
-#ifndef TX_TIMER_THREAD_PRIORITY
-#define TX_TIMER_THREAD_PRIORITY 0 /* Default timer thread priority */
+#ifndef TX_TIMER_THREAD_PRIORITY
+#define TX_TIMER_THREAD_PRIORITY 0 /* Default timer thread priority */
#endif
-/* Define various constants for the ThreadX ARM port. */
+/* Define various constants for the ThreadX ARM port. */
#ifdef TX_ENABLE_FIQ_SUPPORT
#define TX_INT_DISABLE 0xC0 /* Disable IRQ & FIQ interrupts */
@@ -127,8 +129,8 @@ typedef unsigned short USHORT;
#define TX_INT_ENABLE 0x00 /* Enable IRQ interrupts */
-/* Define the clock source for trace event entry time stamp. The following two item are port specific.
- For example, if the time source is at the address 0x0a800024 and is 16-bits in size, the clock
+/* Define the clock source for trace event entry time stamp. The following two item are port specific.
+ For example, if the time source is at the address 0x0a800024 and is 16-bits in size, the clock
source constants would be:
#define TX_TRACE_TIME_SOURCE *((ULONG *) 0x0a800024)
@@ -175,7 +177,7 @@ typedef unsigned short USHORT;
#define TX_INLINE_INITIALIZATION
-/* Determine whether or not stack checking is enabled. By default, ThreadX stack checking is
+/* Determine whether or not stack checking is enabled. By default, ThreadX stack checking is
disabled. When the following is defined, ThreadX thread stack checking is enabled. If stack
checking is enabled (TX_ENABLE_STACK_CHECKING is defined), the TX_DISABLE_STACK_FILLING
define is negated, thereby forcing the stack fill which is necessary for the stack checking
@@ -187,13 +189,13 @@ typedef unsigned short USHORT;
/* Define the TX_THREAD control block extensions for this port. The main reason
- for the multiple macros is so that backward compatibility can be maintained with
+ for the multiple macros is so that backward compatibility can be maintained with
existing ThreadX kernel awareness modules. */
-#define TX_THREAD_EXTENSION_0
-#define TX_THREAD_EXTENSION_1
+#define TX_THREAD_EXTENSION_0
+#define TX_THREAD_EXTENSION_1
#define TX_THREAD_EXTENSION_2 ULONG tx_thread_vfp_enable;
-#define TX_THREAD_EXTENSION_3
+#define TX_THREAD_EXTENSION_3
/* Define the port extensions of the remaining ThreadX objects. */
@@ -207,11 +209,11 @@ typedef unsigned short USHORT;
#define TX_TIMER_EXTENSION
-/* Define the user extension field of the thread control block. Nothing
+/* Define the user extension field of the thread control block. Nothing
additional is needed for this port so it is defined as white space. */
#ifndef TX_THREAD_USER_EXTENSION
-#define TX_THREAD_USER_EXTENSION
+#define TX_THREAD_USER_EXTENSION
#endif
@@ -219,8 +221,8 @@ typedef unsigned short USHORT;
tx_thread_shell_entry, and tx_thread_terminate. */
-#define TX_THREAD_CREATE_EXTENSION(thread_ptr)
-#define TX_THREAD_DELETE_EXTENSION(thread_ptr)
+#define TX_THREAD_CREATE_EXTENSION(thread_ptr)
+#define TX_THREAD_DELETE_EXTENSION(thread_ptr)
#define TX_THREAD_COMPLETED_EXTENSION(thread_ptr)
#define TX_THREAD_TERMINATED_EXTENSION(thread_ptr)
@@ -247,24 +249,24 @@ typedef unsigned short USHORT;
#define TX_TIMER_DELETE_EXTENSION(timer_ptr)
-/* Determine if the ARM architecture has the CLZ instruction. This is available on
- architectures v5 and above. If available, redefine the macro for calculating the
+/* Determine if the ARM architecture has the CLZ instruction. This is available on
+ architectures v5 and above. If available, redefine the macro for calculating the
lowest bit set. */
-
+
#if __TARGET_ARCH_ARM > 4
#ifndef __thumb__
#define TX_LOWEST_SET_BIT_CALCULATE(m, b) m = m & ((ULONG) (-((LONG) m))); \
asm volatile (" CLZ %0,%1 ": "=r" (b) : "r" (m) ); \
- b = 31 - b;
+ b = 31 - b;
#endif
#endif
-/* Define ThreadX interrupt lockout and restore macros for protection on
- access of critical kernel information. The restore interrupt macro must
- restore the interrupt posture of the running thread prior to the value
+/* Define ThreadX interrupt lockout and restore macros for protection on
+ access of critical kernel information. The restore interrupt macro must
+ restore the interrupt posture of the running thread prior to the value
present prior to the disable macro. In most cases, the save area macro
is used to define a local function save area for the disable and restore
macros. */
@@ -295,7 +297,7 @@ unsigned int _tx_thread_interrupt_restore(UINT old_posture);
#endif
-/* Define VFP extension for the Cortex-A15. Each is assumed to be called in the context of the executing
+/* Define VFP extension for the ARMv7-A. Each is assumed to be called in the context of the executing
thread. */
void tx_thread_vfp_enable(void);
@@ -315,8 +317,8 @@ void tx_thread_vfp_disable(void);
/* Define the version ID of ThreadX. This may be utilized by the application. */
#ifdef TX_THREAD_INIT
-CHAR _tx_version_id[] =
- "Copyright (c) Microsoft Corporation. All rights reserved. * ThreadX Cortex-A15/GNU Version 6.1.9 *";
+CHAR _tx_version_id[] =
+ "Copyright (c) Microsoft Corporation. All rights reserved. * ThreadX ARMv7-A Version 6.1.11 *";
#else
extern CHAR _tx_version_id[];
#endif
diff --git a/ports/cortex_a15/gnu/src/tx_thread_context_restore.S b/ports/cortex_a15/gnu/src/tx_thread_context_restore.S
index 3bf26a20..fae7e72d 100644
--- a/ports/cortex_a15/gnu/src/tx_thread_context_restore.S
+++ b/ports/cortex_a15/gnu/src/tx_thread_context_restore.S
@@ -1,260 +1,222 @@
-@/**************************************************************************/
-@/* */
-@/* Copyright (c) Microsoft Corporation. All rights reserved. */
-@/* */
-@/* This software is licensed under the Microsoft Software License */
-@/* Terms for Microsoft Azure RTOS. Full text of the license can be */
-@/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
-@/* and in the root directory of this software. */
-@/* */
-@/**************************************************************************/
-@
-@
-@/**************************************************************************/
-@/**************************************************************************/
-@/** */
-@/** ThreadX Component */
-@/** */
-@/** Thread */
-@/** */
-@/**************************************************************************/
-@/**************************************************************************/
-@
-@
-@#define TX_SOURCE_CODE
-@
-@
-@/* Include necessary system files. */
-@
-@#include "tx_api.h"
-@#include "tx_thread.h"
-@#include "tx_timer.h"
-@
+/**************************************************************************/
+/* */
+/* Copyright (c) Microsoft Corporation. All rights reserved. */
+/* */
+/* This software is licensed under the Microsoft Software License */
+/* Terms for Microsoft Azure RTOS. Full text of the license can be */
+/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
+/* and in the root directory of this software. */
+/* */
+/**************************************************************************/
+
+
+/**************************************************************************/
+/**************************************************************************/
+/** */
+/** ThreadX Component */
+/** */
+/** Thread */
+/** */
+/**************************************************************************/
+/**************************************************************************/
+
.arm
#ifdef TX_ENABLE_FIQ_SUPPORT
-SVC_MODE = 0xD3 @ Disable IRQ/FIQ, SVC mode
-IRQ_MODE = 0xD2 @ Disable IRQ/FIQ, IRQ mode
+SVC_MODE = 0xD3 // Disable IRQ/FIQ, SVC mode
+IRQ_MODE = 0xD2 // Disable IRQ/FIQ, IRQ mode
#else
-SVC_MODE = 0x93 @ Disable IRQ, SVC mode
-IRQ_MODE = 0x92 @ Disable IRQ, IRQ mode
+SVC_MODE = 0x93 // Disable IRQ, SVC mode
+IRQ_MODE = 0x92 // Disable IRQ, IRQ mode
#endif
-@
+
.global _tx_thread_system_state
.global _tx_thread_current_ptr
.global _tx_thread_execute_ptr
.global _tx_timer_time_slice
.global _tx_thread_schedule
.global _tx_thread_preempt_disable
- .global _tx_execution_isr_exit
-@
-@
-@/* No 16-bit Thumb mode veneer code is needed for _tx_thread_context_restore
-@ since it will never be called 16-bit mode. */
-@
+
+
+
+/* No 16-bit Thumb mode veneer code is needed for _tx_thread_context_restore
+ since it will never be called 16-bit mode. */
+
.arm
.text
.align 2
-@/**************************************************************************/
-@/* */
-@/* FUNCTION RELEASE */
-@/* */
-@/* _tx_thread_context_restore Cortex-A15/GNU */
-@/* 6.1.9 */
-@/* AUTHOR */
-@/* */
-@/* William E. Lamie, Microsoft Corporation */
-@/* */
-@/* DESCRIPTION */
-@/* */
-@/* This function restores the interrupt context if it is processing a */
-@/* nested interrupt. If not, it returns to the interrupt thread if no */
-@/* preemption is necessary. Otherwise, if preemption is necessary or */
-@/* if no thread was running, the function returns to the scheduler. */
-@/* */
-@/* INPUT */
-@/* */
-@/* None */
-@/* */
-@/* OUTPUT */
-@/* */
-@/* None */
-@/* */
-@/* CALLS */
-@/* */
-@/* _tx_thread_schedule Thread scheduling routine */
-@/* */
-@/* CALLED BY */
-@/* */
-@/* ISRs Interrupt Service Routines */
-@/* */
-@/* RELEASE HISTORY */
-@/* */
-@/* DATE NAME DESCRIPTION */
-@/* */
-@/* 09-30-2020 William E. Lamie Initial Version 6.1 */
-@/* 10-15-2021 William E. Lamie Modified comment(s), added */
-@/* execution profile support, */
-@/* resulting in version 6.1.9 */
-@/* */
-@/**************************************************************************/
-@VOID _tx_thread_context_restore(VOID)
-@{
+/**************************************************************************/
+/* */
+/* FUNCTION RELEASE */
+/* */
+/* _tx_thread_context_restore ARMv7-A */
+/* 6.1.11 */
+/* AUTHOR */
+/* */
+/* William E. Lamie, Microsoft Corporation */
+/* */
+/* DESCRIPTION */
+/* */
+/* This function restores the interrupt context if it is processing a */
+/* nested interrupt. If not, it returns to the interrupt thread if no */
+/* preemption is necessary. Otherwise, if preemption is necessary or */
+/* if no thread was running, the function returns to the scheduler. */
+/* */
+/* INPUT */
+/* */
+/* None */
+/* */
+/* OUTPUT */
+/* */
+/* None */
+/* */
+/* CALLS */
+/* */
+/* _tx_thread_schedule Thread scheduling routine */
+/* */
+/* CALLED BY */
+/* */
+/* ISRs Interrupt Service Routines */
+/* */
+/* RELEASE HISTORY */
+/* */
+/* DATE NAME DESCRIPTION */
+/* */
+/* 09-30-2020 William E. Lamie Initial Version 6.1 */
+/* 10-15-2021 William E. Lamie Modified comment(s), added */
+/* execution profile support, */
+/* resulting in version 6.1.9 */
+/* 04-25-2022 Zhen Kong Updated comments, */
+/* resulting in version 6.1.11 */
+/* */
+/**************************************************************************/
.global _tx_thread_context_restore
.type _tx_thread_context_restore,function
_tx_thread_context_restore:
-@
-@ /* Lockout interrupts. */
-@
+
+ /* Lockout interrupts. */
+
#ifdef TX_ENABLE_FIQ_SUPPORT
- CPSID if @ Disable IRQ and FIQ interrupts
+ CPSID if // Disable IRQ and FIQ interrupts
#else
- CPSID i @ Disable IRQ interrupts
+ CPSID i // Disable IRQ interrupts
#endif
#if (defined(TX_ENABLE_EXECUTION_CHANGE_NOTIFY) || defined(TX_EXECUTION_PROFILE_ENABLE))
-@
-@ /* Call the ISR exit function to indicate an ISR is complete. */
-@
- BL _tx_execution_isr_exit @ Call the ISR exit function
-#endif
-@
-@ /* Determine if interrupts are nested. */
-@ if (--_tx_thread_system_state)
-@ {
-@
- LDR r3, =_tx_thread_system_state @ Pickup address of system state variable
- LDR r2, [r3] @ Pickup system state
- SUB r2, r2, #1 @ Decrement the counter
- STR r2, [r3] @ Store the counter
- CMP r2, #0 @ Was this the first interrupt?
- BEQ __tx_thread_not_nested_restore @ If so, not a nested restore
-@
-@ /* Interrupts are nested. */
-@
-@ /* Just recover the saved registers and return to the point of
-@ interrupt. */
-@
- LDMIA sp!, {r0, r10, r12, lr} @ Recover SPSR, POI, and scratch regs
- MSR SPSR_cxsf, r0 @ Put SPSR back
- LDMIA sp!, {r0-r3} @ Recover r0-r3
- MOVS pc, lr @ Return to point of interrupt
-@
-@ }
-__tx_thread_not_nested_restore:
-@
-@ /* Determine if a thread was interrupted and no preemption is required. */
-@ else if (((_tx_thread_current_ptr) && (_tx_thread_current_ptr == _tx_thread_execute_ptr)
-@ || (_tx_thread_preempt_disable))
-@ {
-@
- LDR r1, =_tx_thread_current_ptr @ Pickup address of current thread ptr
- LDR r0, [r1] @ Pickup actual current thread pointer
- CMP r0, #0 @ Is it NULL?
- BEQ __tx_thread_idle_system_restore @ Yes, idle system was interrupted
-@
- LDR r3, =_tx_thread_preempt_disable @ Pickup preempt disable address
- LDR r2, [r3] @ Pickup actual preempt disable flag
- CMP r2, #0 @ Is it set?
- BNE __tx_thread_no_preempt_restore @ Yes, don't preempt this thread
- LDR r3, =_tx_thread_execute_ptr @ Pickup address of execute thread ptr
- LDR r2, [r3] @ Pickup actual execute thread pointer
- CMP r0, r2 @ Is the same thread highest priority?
- BNE __tx_thread_preempt_restore @ No, preemption needs to happen
-@
-@
-__tx_thread_no_preempt_restore:
-@
-@ /* Restore interrupted thread or ISR. */
-@
-@ /* Pickup the saved stack pointer. */
-@ tmp_ptr = _tx_thread_current_ptr -> tx_thread_stack_ptr;
-@
-@ /* Recover the saved context and return to the point of interrupt. */
-@
- LDMIA sp!, {r0, r10, r12, lr} @ Recover SPSR, POI, and scratch regs
- MSR SPSR_cxsf, r0 @ Put SPSR back
- LDMIA sp!, {r0-r3} @ Recover r0-r3
- MOVS pc, lr @ Return to point of interrupt
-@
-@ }
-@ else
-@ {
-__tx_thread_preempt_restore:
-@
- LDMIA sp!, {r3, r10, r12, lr} @ Recover temporarily saved registers
- MOV r1, lr @ Save lr (point of interrupt)
- MOV r2, #SVC_MODE @ Build SVC mode CPSR
- MSR CPSR_c, r2 @ Enter SVC mode
- STR r1, [sp, #-4]! @ Save point of interrupt
- STMDB sp!, {r4-r12, lr} @ Save upper half of registers
- MOV r4, r3 @ Save SPSR in r4
- MOV r2, #IRQ_MODE @ Build IRQ mode CPSR
- MSR CPSR_c, r2 @ Enter IRQ mode
- LDMIA sp!, {r0-r3} @ Recover r0-r3
- MOV r5, #SVC_MODE @ Build SVC mode CPSR
- MSR CPSR_c, r5 @ Enter SVC mode
- STMDB sp!, {r0-r3} @ Save r0-r3 on thread's stack
- LDR r1, =_tx_thread_current_ptr @ Pickup address of current thread ptr
- LDR r0, [r1] @ Pickup current thread pointer
+ /* Call the ISR exit function to indicate an ISR is complete. */
+
+ BL _tx_execution_isr_exit // Call the ISR exit function
+#endif
+
+ /* Determine if interrupts are nested. */
+
+ LDR r3, =_tx_thread_system_state // Pickup address of system state variable
+ LDR r2, [r3] // Pickup system state
+ SUB r2, r2, #1 // Decrement the counter
+ STR r2, [r3] // Store the counter
+ CMP r2, #0 // Was this the first interrupt?
+ BEQ __tx_thread_not_nested_restore // If so, not a nested restore
+
+ /* Interrupts are nested. */
+
+ /* Just recover the saved registers and return to the point of
+ interrupt. */
+
+ LDMIA sp!, {r0, r10, r12, lr} // Recover SPSR, POI, and scratch regs
+ MSR SPSR_cxsf, r0 // Put SPSR back
+ LDMIA sp!, {r0-r3} // Recover r0-r3
+ MOVS pc, lr // Return to point of interrupt
+
+__tx_thread_not_nested_restore:
+
+ /* Determine if a thread was interrupted and no preemption is required. */
+
+ LDR r1, =_tx_thread_current_ptr // Pickup address of current thread ptr
+ LDR r0, [r1] // Pickup actual current thread pointer
+ CMP r0, #0 // Is it NULL?
+ BEQ __tx_thread_idle_system_restore // Yes, idle system was interrupted
+
+ LDR r3, =_tx_thread_preempt_disable // Pickup preempt disable address
+ LDR r2, [r3] // Pickup actual preempt disable flag
+ CMP r2, #0 // Is it set?
+ BNE __tx_thread_no_preempt_restore // Yes, don't preempt this thread
+ LDR r3, =_tx_thread_execute_ptr // Pickup address of execute thread ptr
+ LDR r2, [r3] // Pickup actual execute thread pointer
+ CMP r0, r2 // Is the same thread highest priority?
+ BNE __tx_thread_preempt_restore // No, preemption needs to happen
+
+
+__tx_thread_no_preempt_restore:
+
+ /* Recover the saved context and return to the point of interrupt. */
+
+ /* Pickup the saved stack pointer. */
+
+ /* Recover the saved context and return to the point of interrupt. */
+ LDMIA sp!, {r0, r10, r12, lr} // Recover SPSR, POI, and scratch regs
+ MSR SPSR_cxsf, r0 // Put SPSR back
+ LDMIA sp!, {r0-r3} // Recover r0-r3
+ MOVS pc, lr // Return to point of interrupt
+
+__tx_thread_preempt_restore:
+
+ LDMIA sp!, {r3, r10, r12, lr} // Recover temporarily saved registers
+ MOV r1, lr // Save lr (point of interrupt)
+ MOV r2, #SVC_MODE // Build SVC mode CPSR
+ MSR CPSR_c, r2 // Enter SVC mode
+ STR r1, [sp, #-4]! // Save point of interrupt
+ STMDB sp!, {r4-r12, lr} // Save upper half of registers
+ MOV r4, r3 // Save SPSR in r4
+ MOV r2, #IRQ_MODE // Build IRQ mode CPSR
+ MSR CPSR_c, r2 // Enter IRQ mode
+ LDMIA sp!, {r0-r3} // Recover r0-r3
+ MOV r5, #SVC_MODE // Build SVC mode CPSR
+ MSR CPSR_c, r5 // Enter SVC mode
+ STMDB sp!, {r0-r3} // Save r0-r3 on thread's stack
+
+ LDR r1, =_tx_thread_current_ptr // Pickup address of current thread ptr
+ LDR r0, [r1] // Pickup current thread pointer
#ifdef TX_ENABLE_VFP_SUPPORT
- LDR r2, [r0, #144] @ Pickup the VFP enabled flag
- CMP r2, #0 @ Is the VFP enabled?
- BEQ _tx_skip_irq_vfp_save @ No, skip VFP IRQ save
- VMRS r2, FPSCR @ Pickup the FPSCR
- STR r2, [sp, #-4]! @ Save FPSCR
- VSTMDB sp!, {D16-D31} @ Save D16-D31
- VSTMDB sp!, {D0-D15} @ Save D0-D15
+ LDR r2, [r0, #144] // Pickup the VFP enabled flag
+ CMP r2, #0 // Is the VFP enabled?
+ BEQ _tx_skip_irq_vfp_save // No, skip VFP IRQ save
+ VMRS r2, FPSCR // Pickup the FPSCR
+ STR r2, [sp, #-4]! // Save FPSCR
+ VSTMDB sp!, {D16-D31} // Save D16-D31
+ VSTMDB sp!, {D0-D15} // Save D0-D15
+
_tx_skip_irq_vfp_save:
+
#endif
- MOV r3, #1 @ Build interrupt stack type
- STMDB sp!, {r3, r4} @ Save interrupt stack type and SPSR
- STR sp, [r0, #8] @ Save stack pointer in thread control
- @ block
-@
-@ /* Save the remaining time-slice and disable it. */
-@ if (_tx_timer_time_slice)
-@ {
-@
- LDR r3, =_tx_timer_time_slice @ Pickup time-slice variable address
- LDR r2, [r3] @ Pickup time-slice
- CMP r2, #0 @ Is it active?
- BEQ __tx_thread_dont_save_ts @ No, don't save it
-@
-@ _tx_thread_current_ptr -> tx_thread_time_slice = _tx_timer_time_slice;
-@ _tx_timer_time_slice = 0;
-@
- STR r2, [r0, #24] @ Save thread's time-slice
- MOV r2, #0 @ Clear value
- STR r2, [r3] @ Disable global time-slice flag
-@
-@ }
+ MOV r3, #1 // Build interrupt stack type
+ STMDB sp!, {r3, r4} // Save interrupt stack type and SPSR
+ STR sp, [r0, #8] // Save stack pointer in thread control
+ // block
+
+ /* Save the remaining time-slice and disable it. */
+ LDR r3, =_tx_timer_time_slice // Pickup time-slice variable address
+ LDR r2, [r3] // Pickup time-slice
+ CMP r2, #0 // Is it active?
+ BEQ __tx_thread_dont_save_ts // No, don't save it
+ STR r2, [r0, #24] // Save thread's time-slice
+ MOV r2, #0 // Clear value
+ STR r2, [r3] // Disable global time-slice flag
+
__tx_thread_dont_save_ts:
-@
-@
-@ /* Clear the current task pointer. */
-@ _tx_thread_current_ptr = TX_NULL;
-@
- MOV r0, #0 @ NULL value
- STR r0, [r1] @ Clear current thread pointer
-@
-@ /* Return to the scheduler. */
-@ _tx_thread_schedule();
-@
- B _tx_thread_schedule @ Return to scheduler
-@ }
-@
+
+ /* Clear the current task pointer. */
+ MOV r0, #0 // NULL value
+ STR r0, [r1] // Clear current thread pointer
+
+ /* Return to the scheduler. */
+ B _tx_thread_schedule // Return to scheduler
+
__tx_thread_idle_system_restore:
-@
-@ /* Just return back to the scheduler! */
-@
- MOV r0, #SVC_MODE @ Build SVC mode CPSR
- MSR CPSR_c, r0 @ Enter SVC mode
- B _tx_thread_schedule @ Return to scheduler
-@}
-
-
+ /* Just return back to the scheduler! */
+ MOV r0, #SVC_MODE // Build SVC mode CPSR
+ MSR CPSR_c, r0 // Enter SVC mode
+ B _tx_thread_schedule // Return to scheduler
diff --git a/ports/cortex_a15/gnu/src/tx_thread_context_save.S b/ports/cortex_a15/gnu/src/tx_thread_context_save.S
index 6f75ade7..7ac48c2e 100644
--- a/ports/cortex_a15/gnu/src/tx_thread_context_save.S
+++ b/ports/cortex_a15/gnu/src/tx_thread_context_save.S
@@ -1,206 +1,172 @@
-@/**************************************************************************/
-@/* */
-@/* Copyright (c) Microsoft Corporation. All rights reserved. */
-@/* */
-@/* This software is licensed under the Microsoft Software License */
-@/* Terms for Microsoft Azure RTOS. Full text of the license can be */
-@/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
-@/* and in the root directory of this software. */
-@/* */
-@/**************************************************************************/
-@
-@
-@/**************************************************************************/
-@/**************************************************************************/
-@/** */
-@/** ThreadX Component */
-@/** */
-@/** Thread */
-@/** */
-@/**************************************************************************/
-@/**************************************************************************/
-@
-@
-@#define TX_SOURCE_CODE
-@
-@
-@/* Include necessary system files. */
-@
-@#include "tx_api.h"
-@#include "tx_thread.h"
-@#include "tx_timer.h"
-@
-@
+/**************************************************************************/
+/* */
+/* Copyright (c) Microsoft Corporation. All rights reserved. */
+/* */
+/* This software is licensed under the Microsoft Software License */
+/* Terms for Microsoft Azure RTOS. Full text of the license can be */
+/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
+/* and in the root directory of this software. */
+/* */
+/**************************************************************************/
+
+
+/**************************************************************************/
+/**************************************************************************/
+/** */
+/** ThreadX Component */
+/** */
+/** Thread */
+/** */
+/**************************************************************************/
+/**************************************************************************/
+
.global _tx_thread_system_state
.global _tx_thread_current_ptr
- .global _tx_irq_processing_return
- .global _tx_execution_isr_enter
-@
-@
-@/* No 16-bit Thumb mode veneer code is needed for _tx_thread_context_save
-@ since it will never be called 16-bit mode. */
-@
+ .global __tx_irq_processing_return
+
+
+/* No 16-bit Thumb mode veneer code is needed for _tx_thread_context_save
+ since it will never be called 16-bit mode. */
+
.arm
.text
.align 2
-@/**************************************************************************/
-@/* */
-@/* FUNCTION RELEASE */
-@/* */
-@/* _tx_thread_context_save Cortex-A15/GNU */
-@/* 6.1.9 */
-@/* AUTHOR */
-@/* */
-@/* William E. Lamie, Microsoft Corporation */
-@/* */
-@/* DESCRIPTION */
-@/* */
-@/* This function saves the context of an executing thread in the */
-@/* beginning of interrupt processing. The function also ensures that */
-@/* the system stack is used upon return to the calling ISR. */
-@/* */
-@/* INPUT */
-@/* */
-@/* None */
-@/* */
-@/* OUTPUT */
-@/* */
-@/* None */
-@/* */
-@/* CALLS */
-@/* */
-@/* None */
-@/* */
-@/* CALLED BY */
-@/* */
-@/* ISRs */
-@/* */
-@/* RELEASE HISTORY */
-@/* */
-@/* DATE NAME DESCRIPTION */
-@/* */
-@/* 09-30-2020 William E. Lamie Initial Version 6.1 */
-@/* 10-15-2021 William E. Lamie Modified comment(s), added */
-@/* execution profile support, */
-@/* resulting in version 6.1.9 */
-@/* */
-@/**************************************************************************/
-@VOID _tx_thread_context_save(VOID)
-@{
+/**************************************************************************/
+/* */
+/* FUNCTION RELEASE */
+/* */
+/* _tx_thread_context_save ARMv7-A */
+/* 6.1.11 */
+/* AUTHOR */
+/* */
+/* William E. Lamie, Microsoft Corporation */
+/* */
+/* DESCRIPTION */
+/* */
+/* This function saves the context of an executing thread in the */
+/* beginning of interrupt processing. The function also ensures that */
+/* the system stack is used upon return to the calling ISR. */
+/* */
+/* INPUT */
+/* */
+/* None */
+/* */
+/* OUTPUT */
+/* */
+/* None */
+/* */
+/* CALLS */
+/* */
+/* None */
+/* */
+/* CALLED BY */
+/* */
+/* ISRs */
+/* */
+/* RELEASE HISTORY */
+/* */
+/* DATE NAME DESCRIPTION */
+/* */
+/* 09-30-2020 William E. Lamie Initial Version 6.1 */
+/* 10-15-2021 William E. Lamie Modified comment(s), added */
+/* execution profile support, */
+/* resulting in version 6.1.9 */
+/* 04-25-2022 Zhen Kong Updated comments, */
+/* resulting in version 6.1.11 */
+/* */
+/**************************************************************************/
.global _tx_thread_context_save
.type _tx_thread_context_save,function
_tx_thread_context_save:
-@
-@ /* Upon entry to this routine, it is assumed that IRQ interrupts are locked
-@ out, we are in IRQ mode, and all registers are intact. */
-@
-@ /* Check for a nested interrupt condition. */
-@ if (_tx_thread_system_state++)
-@ {
-@
- STMDB sp!, {r0-r3} @ Save some working registers
+
+ /* Upon entry to this routine, it is assumed that IRQ interrupts are locked
+ out, we are in IRQ mode, and all registers are intact. */
+
+ /* Check for a nested interrupt condition. */
+
+ STMDB sp!, {r0-r3} // Save some working registers
#ifdef TX_ENABLE_FIQ_SUPPORT
- CPSID if @ Disable FIQ interrupts
+ CPSID if // Disable FIQ interrupts
#endif
- LDR r3, =_tx_thread_system_state @ Pickup address of system state variable
- LDR r2, [r3] @ Pickup system state
- CMP r2, #0 @ Is this the first interrupt?
- BEQ __tx_thread_not_nested_save @ Yes, not a nested context save
-@
-@ /* Nested interrupt condition. */
-@
- ADD r2, r2, #1 @ Increment the interrupt counter
- STR r2, [r3] @ Store it back in the variable
-@
-@ /* Save the rest of the scratch registers on the stack and return to the
-@ calling ISR. */
-@
- MRS r0, SPSR @ Pickup saved SPSR
- SUB lr, lr, #4 @ Adjust point of interrupt
- STMDB sp!, {r0, r10, r12, lr} @ Store other registers
-@
-@ /* Return to the ISR. */
-@
- MOV r10, #0 @ Clear stack limit
+ LDR r3, =_tx_thread_system_state // Pickup address of system state variable
+ LDR r2, [r3] // Pickup system state
+ CMP r2, #0 // Is this the first interrupt?
+ BEQ __tx_thread_not_nested_save // Yes, not a nested context save
+
+ /* Nested interrupt condition. */
+
+ ADD r2, r2, #1 // Increment the interrupt counter
+ STR r2, [r3] // Store it back in the variable
+
+ /* Save the rest of the scratch registers on the stack and return to the
+ calling ISR. */
+
+ MRS r0, SPSR // Pickup saved SPSR
+ SUB lr, lr, #4 // Adjust point of interrupt
+ STMDB sp!, {r0, r10, r12, lr} // Store other registers
+
+ /* Return to the ISR. */
+
+ MOV r10, #0 // Clear stack limit
#if (defined(TX_ENABLE_EXECUTION_CHANGE_NOTIFY) || defined(TX_EXECUTION_PROFILE_ENABLE))
-@
-@ /* Call the ISR enter function to indicate an ISR is executing. */
-@
- PUSH {lr} @ Save ISR lr
- BL _tx_execution_isr_enter @ Call the ISR enter function
- POP {lr} @ Recover ISR lr
+
+ /* Call the ISR enter function to indicate an ISR is executing. */
+
+ PUSH {lr} // Save ISR lr
+ BL _tx_execution_isr_enter // Call the ISR enter function
+ POP {lr} // Recover ISR lr
#endif
- B __tx_irq_processing_return @ Continue IRQ processing
-@
+ B __tx_irq_processing_return // Continue IRQ processing
+
__tx_thread_not_nested_save:
-@ }
-@
-@ /* Otherwise, not nested, check to see if a thread was running. */
-@ else if (_tx_thread_current_ptr)
-@ {
-@
- ADD r2, r2, #1 @ Increment the interrupt counter
- STR r2, [r3] @ Store it back in the variable
- LDR r1, =_tx_thread_current_ptr @ Pickup address of current thread ptr
- LDR r0, [r1] @ Pickup current thread pointer
- CMP r0, #0 @ Is it NULL?
- BEQ __tx_thread_idle_system_save @ If so, interrupt occurred in
- @ scheduling loop - nothing needs saving!
-@
-@ /* Save minimal context of interrupted thread. */
-@
- MRS r2, SPSR @ Pickup saved SPSR
- SUB lr, lr, #4 @ Adjust point of interrupt
- STMDB sp!, {r2, r10, r12, lr} @ Store other registers
-@
-@ /* Save the current stack pointer in the thread's control block. */
-@ _tx_thread_current_ptr -> tx_thread_stack_ptr = sp;
-@
-@ /* Switch to the system stack. */
-@ sp = _tx_thread_system_stack_ptr@
-@
- MOV r10, #0 @ Clear stack limit
+
+ /* Otherwise, not nested, check to see if a thread was running. */
+ ADD r2, r2, #1 // Increment the interrupt counter
+ STR r2, [r3] // Store it back in the variable
+ LDR r1, =_tx_thread_current_ptr // Pickup address of current thread ptr
+ LDR r0, [r1] // Pickup current thread pointer
+ CMP r0, #0 // Is it NULL?
+ BEQ __tx_thread_idle_system_save // If so, interrupt occurred in
+ // scheduling loop - nothing needs saving!
+
+ /* Save minimal context of interrupted thread. */
+
+ MRS r2, SPSR // Pickup saved SPSR
+ SUB lr, lr, #4 // Adjust point of interrupt
+ STMDB sp!, {r2, r10, r12, lr} // Store other registers
+
+ MOV r10, #0 // Clear stack limit
#if (defined(TX_ENABLE_EXECUTION_CHANGE_NOTIFY) || defined(TX_EXECUTION_PROFILE_ENABLE))
-@
-@ /* Call the ISR enter function to indicate an ISR is executing. */
-@
- PUSH {lr} @ Save ISR lr
- BL _tx_execution_isr_enter @ Call the ISR enter function
- POP {lr} @ Recover ISR lr
+
+ /* Call the ISR enter function to indicate an ISR is executing. */
+
+ PUSH {lr} // Save ISR lr
+ BL _tx_execution_isr_enter // Call the ISR enter function
+ POP {lr} // Recover ISR lr
#endif
- B __tx_irq_processing_return @ Continue IRQ processing
-@
-@ }
-@ else
-@ {
-@
+ B __tx_irq_processing_return // Continue IRQ processing
+
__tx_thread_idle_system_save:
-@
-@ /* Interrupt occurred in the scheduling loop. */
-@
-@ /* Not much to do here, just adjust the stack pointer, and return to IRQ
-@ processing. */
-@
- MOV r10, #0 @ Clear stack limit
+
+ /* Interrupt occurred in the scheduling loop. */
+
+ /* Not much to do here, just adjust the stack pointer, and return to IRQ
+ processing. */
+
+ MOV r10, #0 // Clear stack limit
#if (defined(TX_ENABLE_EXECUTION_CHANGE_NOTIFY) || defined(TX_EXECUTION_PROFILE_ENABLE))
-@
-@ /* Call the ISR enter function to indicate an ISR is executing. */
-@
- PUSH {lr} @ Save ISR lr
- BL _tx_execution_isr_enter @ Call the ISR enter function
- POP {lr} @ Recover ISR lr
+
+ /* Call the ISR enter function to indicate an ISR is executing. */
+
+ PUSH {lr} // Save ISR lr
+ BL _tx_execution_isr_enter // Call the ISR enter function
+ POP {lr} // Recover ISR lr
#endif
- ADD sp, sp, #16 @ Recover saved registers
- B __tx_irq_processing_return @ Continue IRQ processing
-@
-@ }
-@}
-
-
-
+ ADD sp, sp, #16 // Recover saved registers
+ B __tx_irq_processing_return // Continue IRQ processing
diff --git a/ports/cortex_a15/gnu/src/tx_thread_fiq_context_restore.S b/ports/cortex_a15/gnu/src/tx_thread_fiq_context_restore.S
index 945d720a..006be973 100644
--- a/ports/cortex_a15/gnu/src/tx_thread_fiq_context_restore.S
+++ b/ports/cortex_a15/gnu/src/tx_thread_fiq_context_restore.S
@@ -1,43 +1,32 @@
-@/**************************************************************************/
-@/* */
-@/* Copyright (c) Microsoft Corporation. All rights reserved. */
-@/* */
-@/* This software is licensed under the Microsoft Software License */
-@/* Terms for Microsoft Azure RTOS. Full text of the license can be */
-@/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
-@/* and in the root directory of this software. */
-@/* */
-@/**************************************************************************/
-@
-@
-@/**************************************************************************/
-@/**************************************************************************/
-@/** */
-@/** ThreadX Component */
-@/** */
-@/** Thread */
-@/** */
-@/**************************************************************************/
-@/**************************************************************************/
-@
-@
-@#define TX_SOURCE_CODE
-@
-@
-@/* Include necessary system files. */
-@
-@#include "tx_api.h"
-@#include "tx_thread.h"
-@#include "tx_timer.h"
-@
-@
-SVC_MODE = 0xD3 @ SVC mode
-FIQ_MODE = 0xD1 @ FIQ mode
-MODE_MASK = 0x1F @ Mode mask
-THUMB_MASK = 0x20 @ Thumb bit mask
-IRQ_MODE_BITS = 0x12 @ IRQ mode bits
-@
-@
+/**************************************************************************/
+/* */
+/* Copyright (c) Microsoft Corporation. All rights reserved. */
+/* */
+/* This software is licensed under the Microsoft Software License */
+/* Terms for Microsoft Azure RTOS. Full text of the license can be */
+/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
+/* and in the root directory of this software. */
+/* */
+/**************************************************************************/
+
+
+/**************************************************************************/
+/**************************************************************************/
+/** */
+/** ThreadX Component */
+/** */
+/** Thread */
+/** */
+/**************************************************************************/
+/**************************************************************************/
+
+SVC_MODE = 0xD3 // SVC mode
+FIQ_MODE = 0xD1 // FIQ mode
+MODE_MASK = 0x1F // Mode mask
+THUMB_MASK = 0x20 // Thumb bit mask
+IRQ_MODE_BITS = 0x12 // IRQ mode bits
+
+
.global _tx_thread_system_state
.global _tx_thread_current_ptr
.global _tx_thread_system_stack_ptr
@@ -46,218 +35,189 @@ IRQ_MODE_BITS = 0x12 @ IRQ mode bits
.global _tx_thread_schedule
.global _tx_thread_preempt_disable
.global _tx_execution_isr_exit
-@
-@
-@/* No 16-bit Thumb mode veneer code is needed for _tx_thread_fiq_context_restore
-@ since it will never be called 16-bit mode. */
-@
+
+
+/* No 16-bit Thumb mode veneer code is needed for _tx_thread_fiq_context_restore
+ since it will never be called 16-bit mode. */
+
.arm
.text
.align 2
-@/**************************************************************************/
-@/* */
-@/* FUNCTION RELEASE */
-@/* */
-@/* _tx_thread_fiq_context_restore Cortex-A15/GNU */
-@/* 6.1.9 */
-@/* AUTHOR */
-@/* */
-@/* William E. Lamie, Microsoft Corporation */
-@/* */
-@/* DESCRIPTION */
-@/* */
-@/* This function restores the fiq interrupt context when processing a */
-@/* nested interrupt. If not, it returns to the interrupt thread if no */
-@/* preemption is necessary. Otherwise, if preemption is necessary or */
-@/* if no thread was running, the function returns to the scheduler. */
-@/* */
-@/* INPUT */
-@/* */
-@/* None */
-@/* */
-@/* OUTPUT */
-@/* */
-@/* None */
-@/* */
-@/* CALLS */
-@/* */
-@/* _tx_thread_schedule Thread scheduling routine */
-@/* */
-@/* CALLED BY */
-@/* */
-@/* FIQ ISR Interrupt Service Routines */
-@/* */
-@/* RELEASE HISTORY */
-@/* */
-@/* DATE NAME DESCRIPTION */
-@/* */
-@/* 09-30-2020 William E. Lamie Initial Version 6.1 */
-@/* 10-15-2021 William E. Lamie Modified comment(s), added */
-@/* execution profile support, */
-@/* resulting in version 6.1.9 */
-@/* */
-@/**************************************************************************/
-@VOID _tx_thread_fiq_context_restore(VOID)
-@{
+/**************************************************************************/
+/* */
+/* FUNCTION RELEASE */
+/* */
+/* _tx_thread_fiq_context_restore ARMv7-A */
+/* 6.1.11 */
+/* AUTHOR */
+/* */
+/* William E. Lamie, Microsoft Corporation */
+/* */
+/* DESCRIPTION */
+/* */
+/* This function restores the fiq interrupt context when processing a */
+/* nested interrupt. If not, it returns to the interrupt thread if no */
+/* preemption is necessary. Otherwise, if preemption is necessary or */
+/* if no thread was running, the function returns to the scheduler. */
+/* */
+/* INPUT */
+/* */
+/* None */
+/* */
+/* OUTPUT */
+/* */
+/* None */
+/* */
+/* CALLS */
+/* */
+/* _tx_thread_schedule Thread scheduling routine */
+/* */
+/* CALLED BY */
+/* */
+/* FIQ ISR Interrupt Service Routines */
+/* */
+/* RELEASE HISTORY */
+/* */
+/* DATE NAME DESCRIPTION */
+/* */
+/* 09-30-2020 William E. Lamie Initial Version 6.1 */
+/* 10-15-2021 William E. Lamie Modified comment(s), added */
+/* execution profile support, */
+/* resulting in version 6.1.9 */
+/* 04-25-2022 Zhen Kong Updated comments, */
+/* resulting in version 6.1.11 */
+/* */
+/**************************************************************************/
.global _tx_thread_fiq_context_restore
.type _tx_thread_fiq_context_restore,function
_tx_thread_fiq_context_restore:
-@
-@ /* Lockout interrupts. */
-@
- CPSID if @ Disable IRQ and FIQ interrupts
+
+ /* Lockout interrupts. */
+
+ CPSID if // Disable IRQ and FIQ interrupts
#if (defined(TX_ENABLE_EXECUTION_CHANGE_NOTIFY) || defined(TX_EXECUTION_PROFILE_ENABLE))
-@
-@ /* Call the ISR exit function to indicate an ISR is complete. */
-@
- BL _tx_execution_isr_exit @ Call the ISR exit function
+
+ /* Call the ISR exit function to indicate an ISR is complete. */
+
+ BL _tx_execution_isr_exit // Call the ISR exit function
#endif
-@
-@ /* Determine if interrupts are nested. */
-@ if (--_tx_thread_system_state)
-@ {
-@
- LDR r3, =_tx_thread_system_state @ Pickup address of system state variable
- LDR r2, [r3] @ Pickup system state
- SUB r2, r2, #1 @ Decrement the counter
- STR r2, [r3] @ Store the counter
- CMP r2, #0 @ Was this the first interrupt?
- BEQ __tx_thread_fiq_not_nested_restore @ If so, not a nested restore
-@
-@ /* Interrupts are nested. */
-@
-@ /* Just recover the saved registers and return to the point of
-@ interrupt. */
-@
- LDMIA sp!, {r0, r10, r12, lr} @ Recover SPSR, POI, and scratch regs
- MSR SPSR_cxsf, r0 @ Put SPSR back
- LDMIA sp!, {r0-r3} @ Recover r0-r3
- MOVS pc, lr @ Return to point of interrupt
-@
-@ }
+
+ /* Determine if interrupts are nested. */
+ LDR r3, =_tx_thread_system_state // Pickup address of system state variable
+ LDR r2, [r3] // Pickup system state
+ SUB r2, r2, #1 // Decrement the counter
+ STR r2, [r3] // Store the counter
+ CMP r2, #0 // Was this the first interrupt?
+ BEQ __tx_thread_fiq_not_nested_restore // If so, not a nested restore
+
+ /* Interrupts are nested. */
+
+ /* Just recover the saved registers and return to the point of
+ interrupt. */
+
+ LDMIA sp!, {r0, r10, r12, lr} // Recover SPSR, POI, and scratch regs
+ MSR SPSR_cxsf, r0 // Put SPSR back
+ LDMIA sp!, {r0-r3} // Recover r0-r3
+ MOVS pc, lr // Return to point of interrupt
+
__tx_thread_fiq_not_nested_restore:
-@
-@ /* Determine if a thread was interrupted and no preemption is required. */
-@ else if (((_tx_thread_current_ptr) && (_tx_thread_current_ptr == _tx_thread_execute_ptr)
-@ || (_tx_thread_preempt_disable))
-@ {
-@
- LDR r1, [sp] @ Pickup the saved SPSR
- MOV r2, #MODE_MASK @ Build mask to isolate the interrupted mode
- AND r1, r1, r2 @ Isolate mode bits
- CMP r1, #IRQ_MODE_BITS @ Was an interrupt taken in IRQ mode before we
- @ got to context save? */
- BEQ __tx_thread_fiq_no_preempt_restore @ Yes, just go back to point of interrupt
+
+ /* Determine if a thread was interrupted and no preemption is required. */
+
+ LDR r1, [sp] // Pickup the saved SPSR
+ MOV r2, #MODE_MASK // Build mask to isolate the interrupted mode
+ AND r1, r1, r2 // Isolate mode bits
+ CMP r1, #IRQ_MODE_BITS // Was an interrupt taken in IRQ mode before we
+ // got to context save? */
+ BEQ __tx_thread_fiq_no_preempt_restore // Yes, just go back to point of interrupt
- LDR r1, =_tx_thread_current_ptr @ Pickup address of current thread ptr
- LDR r0, [r1] @ Pickup actual current thread pointer
- CMP r0, #0 @ Is it NULL?
- BEQ __tx_thread_fiq_idle_system_restore @ Yes, idle system was interrupted
+ LDR r1, =_tx_thread_current_ptr // Pickup address of current thread ptr
+ LDR r0, [r1] // Pickup actual current thread pointer
+ CMP r0, #0 // Is it NULL?
+ BEQ __tx_thread_fiq_idle_system_restore // Yes, idle system was interrupted
- LDR r3, =_tx_thread_preempt_disable @ Pickup preempt disable address
- LDR r2, [r3] @ Pickup actual preempt disable flag
- CMP r2, #0 @ Is it set?
- BNE __tx_thread_fiq_no_preempt_restore @ Yes, don't preempt this thread
- LDR r3, =_tx_thread_execute_ptr @ Pickup address of execute thread ptr
- LDR r2, [r3] @ Pickup actual execute thread pointer
- CMP r0, r2 @ Is the same thread highest priority?
- BNE __tx_thread_fiq_preempt_restore @ No, preemption needs to happen
+ LDR r3, =_tx_thread_preempt_disable // Pickup preempt disable address
+ LDR r2, [r3] // Pickup actual preempt disable flag
+ CMP r2, #0 // Is it set?
+ BNE __tx_thread_fiq_no_preempt_restore // Yes, don't preempt this thread
+ LDR r3, =_tx_thread_execute_ptr // Pickup address of execute thread ptr
+ LDR r2, [r3] // Pickup actual execute thread pointer
+ CMP r0, r2 // Is the same thread highest priority?
+ BNE __tx_thread_fiq_preempt_restore // No, preemption needs to happen
__tx_thread_fiq_no_preempt_restore:
-@
-@ /* Restore interrupted thread or ISR. */
-@
-@ /* Pickup the saved stack pointer. */
-@ tmp_ptr = _tx_thread_current_ptr -> tx_thread_stack_ptr;
-@
-@ /* Recover the saved context and return to the point of interrupt. */
-@
- LDMIA sp!, {r0, lr} @ Recover SPSR, POI, and scratch regs
- MSR SPSR_cxsf, r0 @ Put SPSR back
- LDMIA sp!, {r0-r3} @ Recover r0-r3
- MOVS pc, lr @ Return to point of interrupt
-@
-@ }
-@ else
-@ {
-__tx_thread_fiq_preempt_restore:
-@
- LDMIA sp!, {r3, lr} @ Recover temporarily saved registers
- MOV r1, lr @ Save lr (point of interrupt)
- MOV r2, #SVC_MODE @ Build SVC mode CPSR
- MSR CPSR_c, r2 @ Enter SVC mode
- STR r1, [sp, #-4]! @ Save point of interrupt
- STMDB sp!, {r4-r12, lr} @ Save upper half of registers
- MOV r4, r3 @ Save SPSR in r4
- MOV r2, #FIQ_MODE @ Build FIQ mode CPSR
- MSR CPSR_c, r2 @ Reenter FIQ mode
- LDMIA sp!, {r0-r3} @ Recover r0-r3
- MOV r5, #SVC_MODE @ Build SVC mode CPSR
- MSR CPSR_c, r5 @ Enter SVC mode
- STMDB sp!, {r0-r3} @ Save r0-r3 on thread's stack
- LDR r1, =_tx_thread_current_ptr @ Pickup address of current thread ptr
- LDR r0, [r1] @ Pickup current thread pointer
+ /* Restore interrupted thread or ISR. */
+ /* Recover the saved context and return to the point of interrupt. */
+
+ LDMIA sp!, {r0, lr} // Recover SPSR, POI, and scratch regs
+ MSR SPSR_cxsf, r0 // Put SPSR back
+ LDMIA sp!, {r0-r3} // Recover r0-r3
+ MOVS pc, lr // Return to point of interrupt
+
+__tx_thread_fiq_preempt_restore:
+
+ LDMIA sp!, {r3, lr} // Recover temporarily saved registers
+ MOV r1, lr // Save lr (point of interrupt)
+ MOV r2, #SVC_MODE // Build SVC mode CPSR
+ MSR CPSR_c, r2 // Enter SVC mode
+ STR r1, [sp, #-4]! // Save point of interrupt
+ STMDB sp!, {r4-r12, lr} // Save upper half of registers
+ MOV r4, r3 // Save SPSR in r4
+ MOV r2, #FIQ_MODE // Build FIQ mode CPSR
+ MSR CPSR_c, r2 // Reenter FIQ mode
+ LDMIA sp!, {r0-r3} // Recover r0-r3
+ MOV r5, #SVC_MODE // Build SVC mode CPSR
+ MSR CPSR_c, r5 // Enter SVC mode
+ STMDB sp!, {r0-r3} // Save r0-r3 on thread's stack
+
+ LDR r1, =_tx_thread_current_ptr // Pickup address of current thread ptr
+ LDR r0, [r1] // Pickup current thread pointer
#ifdef TX_ENABLE_VFP_SUPPORT
- LDR r2, [r0, #144] @ Pickup the VFP enabled flag
- CMP r2, #0 @ Is the VFP enabled?
- BEQ _tx_skip_fiq_vfp_save @ No, skip VFP IRQ save
- VMRS r2, FPSCR @ Pickup the FPSCR
- STR r2, [sp, #-4]! @ Save FPSCR
- VSTMDB sp!, {D16-D31} @ Save D16-D31
- VSTMDB sp!, {D0-D15} @ Save D0-D15
+ LDR r2, [r0, #144] // Pickup the VFP enabled flag
+ CMP r2, #0 // Is the VFP enabled?
+ BEQ _tx_skip_fiq_vfp_save // No, skip VFP IRQ save
+ VMRS r2, FPSCR // Pickup the FPSCR
+ STR r2, [sp, #-4]! // Save FPSCR
+ VSTMDB sp!, {D16-D31} // Save D16-D31
+ VSTMDB sp!, {D0-D15} // Save D0-D15
_tx_skip_fiq_vfp_save:
#endif
- MOV r3, #1 @ Build interrupt stack type
- STMDB sp!, {r3, r4} @ Save interrupt stack type and SPSR
- STR sp, [r0, #8] @ Save stack pointer in thread control
- @ block */
-@
-@ /* Save the remaining time-slice and disable it. */
-@ if (_tx_timer_time_slice)
-@ {
-@
- LDR r3, =_tx_timer_time_slice @ Pickup time-slice variable address
- LDR r2, [r3] @ Pickup time-slice
- CMP r2, #0 @ Is it active?
- BEQ __tx_thread_fiq_dont_save_ts @ No, don't save it
-@
-@ _tx_thread_current_ptr -> tx_thread_time_slice = _tx_timer_time_slice;
-@ _tx_timer_time_slice = 0;
-@
- STR r2, [r0, #24] @ Save thread's time-slice
- MOV r2, #0 @ Clear value
- STR r2, [r3] @ Disable global time-slice flag
-@
-@ }
-__tx_thread_fiq_dont_save_ts:
-@
-@
-@ /* Clear the current task pointer. */
-@ _tx_thread_current_ptr = TX_NULL;
-@
- MOV r0, #0 @ NULL value
- STR r0, [r1] @ Clear current thread pointer
-@
-@ /* Return to the scheduler. */
-@ _tx_thread_schedule();
-@
- B _tx_thread_schedule @ Return to scheduler
-@ }
-@
-__tx_thread_fiq_idle_system_restore:
-@
-@ /* Just return back to the scheduler! */
-@
- ADD sp, sp, #24 @ Recover FIQ stack space
- MOV r3, #SVC_MODE @ Build SVC mode CPSR
- MSR CPSR_c, r3 @ Lockout interrupts
- B _tx_thread_schedule @ Return to scheduler
-@
-@}
+ MOV r3, #1 // Build interrupt stack type
+ STMDB sp!, {r3, r4} // Save interrupt stack type and SPSR
+ STR sp, [r0, #8] // Save stack pointer in thread control
+ // block */
+ LDR r3, =_tx_timer_time_slice // Pickup time-slice variable address
+ LDR r2, [r3] // Pickup time-slice
+ CMP r2, #0 // Is it active?
+ BEQ __tx_thread_fiq_dont_save_ts // No, don't save it
+
+ STR r2, [r0, #24] // Save thread's time-slice
+ MOV r2, #0 // Clear value
+ STR r2, [r3] // Disable global time-slice flag
+
+__tx_thread_fiq_dont_save_ts:
+
+ /* Clear the current task pointer. */
+
+ MOV r0, #0 // NULL value
+ STR r0, [r1] // Clear current thread pointer
+
+ /* Return to the scheduler. */
+
+ B _tx_thread_schedule // Return to scheduler
+
+__tx_thread_fiq_idle_system_restore:
+
+ /* Just return back to the scheduler! */
+
+ ADD sp, sp, #24 // Recover FIQ stack space
+ MOV r3, #SVC_MODE // Build SVC mode CPSR
+ MSR CPSR_c, r3 // Lockout interrupts
+ B _tx_thread_schedule // Return to scheduler
diff --git a/ports/cortex_a15/gnu/src/tx_thread_fiq_context_save.S b/ports/cortex_a15/gnu/src/tx_thread_fiq_context_save.S
index f036bf85..7db6a4c2 100644
--- a/ports/cortex_a15/gnu/src/tx_thread_fiq_context_save.S
+++ b/ports/cortex_a15/gnu/src/tx_thread_fiq_context_save.S
@@ -1,207 +1,178 @@
-@/**************************************************************************/
-@/* */
-@/* Copyright (c) Microsoft Corporation. All rights reserved. */
-@/* */
-@/* This software is licensed under the Microsoft Software License */
-@/* Terms for Microsoft Azure RTOS. Full text of the license can be */
-@/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
-@/* and in the root directory of this software. */
-@/* */
-@/**************************************************************************/
-@
-@
-@/**************************************************************************/
-@/**************************************************************************/
-@/** */
-@/** ThreadX Component */
-@/** */
-@/** Thread */
-@/** */
-@/**************************************************************************/
-@/**************************************************************************/
-@
-@
-@#define TX_SOURCE_CODE
-@
-@
-@/* Include necessary system files. */
-@
-@#include "tx_api.h"
-@#include "tx_thread.h"
-@
-@
+/**************************************************************************/
+/* */
+/* Copyright (c) Microsoft Corporation. All rights reserved. */
+/* */
+/* This software is licensed under the Microsoft Software License */
+/* Terms for Microsoft Azure RTOS. Full text of the license can be */
+/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
+/* and in the root directory of this software. */
+/* */
+/**************************************************************************/
+
+
+/**************************************************************************/
+/**************************************************************************/
+/** */
+/** ThreadX Component */
+/** */
+/** Thread */
+/** */
+/**************************************************************************/
+/**************************************************************************/
+
.global _tx_thread_system_state
.global _tx_thread_current_ptr
.global __tx_fiq_processing_return
.global _tx_execution_isr_enter
-@
-@
-@/* No 16-bit Thumb mode veneer code is needed for _tx_thread_fiq_context_save
-@ since it will never be called 16-bit mode. */
-@
+
+
+/* No 16-bit Thumb mode veneer code is needed for _tx_thread_fiq_context_save
+ since it will never be called 16-bit mode. */
+
.arm
.text
.align 2
-@/**************************************************************************/
-@/* */
-@/* FUNCTION RELEASE */
-@/* */
-@/* _tx_thread_fiq_context_save Cortex-A15/GNU */
-@/* 6.1.9 */
-@/* AUTHOR */
-@/* */
-@/* William E. Lamie, Microsoft Corporation */
-@/* */
-@/* DESCRIPTION */
-@/* */
-@/* This function saves the context of an executing thread in the */
-@/* beginning of interrupt processing. The function also ensures that */
-@/* the system stack is used upon return to the calling ISR. */
-@/* */
-@/* INPUT */
-@/* */
-@/* None */
-@/* */
-@/* OUTPUT */
-@/* */
-@/* None */
-@/* */
-@/* CALLS */
-@/* */
-@/* None */
-@/* */
-@/* CALLED BY */
-@/* */
-@/* ISRs */
-@/* */
-@/* RELEASE HISTORY */
-@/* */
-@/* DATE NAME DESCRIPTION */
-@/* */
-@/* 09-30-2020 William E. Lamie Initial Version 6.1 */
-@/* 10-15-2021 William E. Lamie Modified comment(s), added */
-@/* execution profile support, */
-@/* resulting in version 6.1.9 */
-@/* */
-@/**************************************************************************/
-@ VOID _tx_thread_fiq_context_save(VOID)
-@{
+/**************************************************************************/
+/* */
+/* FUNCTION RELEASE */
+/* */
+/* _tx_thread_fiq_context_save ARMv7-A */
+/* 6.1.11 */
+/* AUTHOR */
+/* */
+/* William E. Lamie, Microsoft Corporation */
+/* */
+/* DESCRIPTION */
+/* */
+/* This function saves the context of an executing thread in the */
+/* beginning of interrupt processing. The function also ensures that */
+/* the system stack is used upon return to the calling ISR. */
+/* */
+/* INPUT */
+/* */
+/* None */
+/* */
+/* OUTPUT */
+/* */
+/* None */
+/* */
+/* CALLS */
+/* */
+/* None */
+/* */
+/* CALLED BY */
+/* */
+/* ISRs */
+/* */
+/* RELEASE HISTORY */
+/* */
+/* DATE NAME DESCRIPTION */
+/* */
+/* 09-30-2020 William E. Lamie Initial Version 6.1 */
+/* 10-15-2021 William E. Lamie Modified comment(s), added */
+/* execution profile support, */
+/* resulting in version 6.1.9 */
+/* 04-25-2022 Zhen Kong Updated comments, */
+/* resulting in version 6.1.11 */
+/* */
+/**************************************************************************/
.global _tx_thread_fiq_context_save
.type _tx_thread_fiq_context_save,function
_tx_thread_fiq_context_save:
-@
-@ /* Upon entry to this routine, it is assumed that IRQ interrupts are locked
-@ out, we are in IRQ mode, and all registers are intact. */
-@
-@ /* Check for a nested interrupt condition. */
-@ if (_tx_thread_system_state++)
-@ {
-@
- STMDB sp!, {r0-r3} @ Save some working registers
- LDR r3, =_tx_thread_system_state @ Pickup address of system state variable
- LDR r2, [r3] @ Pickup system state
- CMP r2, #0 @ Is this the first interrupt?
- BEQ __tx_thread_fiq_not_nested_save @ Yes, not a nested context save
-@
-@ /* Nested interrupt condition. */
-@
- ADD r2, r2, #1 @ Increment the interrupt counter
- STR r2, [r3] @ Store it back in the variable
-@
-@ /* Save the rest of the scratch registers on the stack and return to the
-@ calling ISR. */
-@
- MRS r0, SPSR @ Pickup saved SPSR
- SUB lr, lr, #4 @ Adjust point of interrupt
- STMDB sp!, {r0, r10, r12, lr} @ Store other registers
-@
-@ /* Return to the ISR. */
-@
- MOV r10, #0 @ Clear stack limit
+
+ /* Upon entry to this routine, it is assumed that IRQ interrupts are locked
+ out, we are in IRQ mode, and all registers are intact. */
+
+ /* Check for a nested interrupt condition. */
+
+ STMDB sp!, {r0-r3} // Save some working registers
+ LDR r3, =_tx_thread_system_state // Pickup address of system state variable
+ LDR r2, [r3] // Pickup system state
+ CMP r2, #0 // Is this the first interrupt?
+ BEQ __tx_thread_fiq_not_nested_save // Yes, not a nested context save
+
+ /* Nested interrupt condition. */
+
+ ADD r2, r2, #1 // Increment the interrupt counter
+ STR r2, [r3] // Store it back in the variable
+
+ /* Save the rest of the scratch registers on the stack and return to the
+ calling ISR. */
+
+ MRS r0, SPSR // Pickup saved SPSR
+ SUB lr, lr, #4 // Adjust point of interrupt
+ STMDB sp!, {r0, r10, r12, lr} // Store other registers
+
+ /* Return to the ISR. */
+
+ MOV r10, #0 // Clear stack limit
#if (defined(TX_ENABLE_EXECUTION_CHANGE_NOTIFY) || defined(TX_EXECUTION_PROFILE_ENABLE))
-@
-@ /* Call the ISR enter function to indicate an ISR is executing. */
-@
- PUSH {lr} @ Save ISR lr
- BL _tx_execution_isr_enter @ Call the ISR enter function
- POP {lr} @ Recover ISR lr
+
+ /* Call the ISR enter function to indicate an ISR is executing. */
+
+ PUSH {lr} // Save ISR lr
+ BL _tx_execution_isr_enter // Call the ISR enter function
+ POP {lr} // Recover ISR lr
#endif
- B __tx_fiq_processing_return @ Continue FIQ processing
-@
+ B __tx_fiq_processing_return // Continue FIQ processing
+//
__tx_thread_fiq_not_nested_save:
-@ }
-@
-@ /* Otherwise, not nested, check to see if a thread was running. */
-@ else if (_tx_thread_current_ptr)
-@ {
-@
- ADD r2, r2, #1 @ Increment the interrupt counter
- STR r2, [r3] @ Store it back in the variable
- LDR r1, =_tx_thread_current_ptr @ Pickup address of current thread ptr
- LDR r0, [r1] @ Pickup current thread pointer
- CMP r0, #0 @ Is it NULL?
- BEQ __tx_thread_fiq_idle_system_save @ If so, interrupt occurred in
-@ @ scheduling loop - nothing needs saving!
-@
-@ /* Save minimal context of interrupted thread. */
-@
- MRS r2, SPSR @ Pickup saved SPSR
- SUB lr, lr, #4 @ Adjust point of interrupt
- STMDB sp!, {r2, lr} @ Store other registers, Note that we don't
-@ @ need to save sl and ip since FIQ has
-@ @ copies of these registers. Nested
-@ @ interrupt processing does need to save
-@ @ these registers.
-@
-@ /* Save the current stack pointer in the thread's control block. */
-@ _tx_thread_current_ptr -> tx_thread_stack_ptr = sp;
-@
-@ /* Switch to the system stack. */
-@ sp = _tx_thread_system_stack_ptr;
-@
- MOV r10, #0 @ Clear stack limit
+
+ /* Otherwise, not nested, check to see if a thread was running. */
+
+ ADD r2, r2, #1 // Increment the interrupt counter
+ STR r2, [r3] // Store it back in the variable
+ LDR r1, =_tx_thread_current_ptr // Pickup address of current thread ptr
+ LDR r0, [r1] // Pickup current thread pointer
+ CMP r0, #0 // Is it NULL?
+ BEQ __tx_thread_fiq_idle_system_save // If so, interrupt occurred in
+ // scheduling loop - nothing needs saving!
+
+ /* Save minimal context of interrupted thread. */
+
+ MRS r2, SPSR // Pickup saved SPSR
+ SUB lr, lr, #4 // Adjust point of interrupt
+ STMDB sp!, {r2, lr} // Store other registers, Note that we don't
+ // need to save sl and ip since FIQ has
+ // copies of these registers. Nested
+ // interrupt processing does need to save
+ // these registers.
+
+ MOV r10, #0 // Clear stack limit
#if (defined(TX_ENABLE_EXECUTION_CHANGE_NOTIFY) || defined(TX_EXECUTION_PROFILE_ENABLE))
-@
-@ /* Call the ISR enter function to indicate an ISR is executing. */
-@
- PUSH {lr} @ Save ISR lr
- BL _tx_execution_isr_enter @ Call the ISR enter function
- POP {lr} @ Recover ISR lr
+
+ /* Call the ISR enter function to indicate an ISR is executing. */
+
+ PUSH {lr} // Save ISR lr
+ BL _tx_execution_isr_enter // Call the ISR enter function
+ POP {lr} // Recover ISR lr
#endif
- B __tx_fiq_processing_return @ Continue FIQ processing
-@
-@ }
-@ else
-@ {
-@
+ B __tx_fiq_processing_return // Continue FIQ processing
+
__tx_thread_fiq_idle_system_save:
-@
-@ /* Interrupt occurred in the scheduling loop. */
-@
-#if (defined(TX_ENABLE_EXECUTION_CHANGE_NOTIFY) || defined(TX_EXECUTION_PROFILE_ENABLE))
-@
-@ /* Call the ISR enter function to indicate an ISR is executing. */
-@
- PUSH {lr} @ Save ISR lr
- BL _tx_execution_isr_enter @ Call the ISR enter function
- POP {lr} @ Recover ISR lr
-#endif
-@
-@ /* Not much to do here, save the current SPSR and LR for possible
-@ use in IRQ interrupted in idle system conditions, and return to
-@ FIQ interrupt processing. */
-@
- MRS r0, SPSR @ Pickup saved SPSR
- SUB lr, lr, #4 @ Adjust point of interrupt
- STMDB sp!, {r0, lr} @ Store other registers that will get used
-@ @ or stripped off the stack in context
-@ @ restore
- B __tx_fiq_processing_return @ Continue FIQ processing
-@
-@ }
-@}
+ /* Interrupt occurred in the scheduling loop. */
+
+#if (defined(TX_ENABLE_EXECUTION_CHANGE_NOTIFY) || defined(TX_EXECUTION_PROFILE_ENABLE))
+
+ /* Call the ISR enter function to indicate an ISR is executing. */
+
+ PUSH {lr} // Save ISR lr
+ BL _tx_execution_isr_enter // Call the ISR enter function
+ POP {lr} // Recover ISR lr
+#endif
+
+ /* Not much to do here, save the current SPSR and LR for possible
+ use in IRQ interrupted in idle system conditions, and return to
+ FIQ interrupt processing. */
+
+ MRS r0, SPSR // Pickup saved SPSR
+ SUB lr, lr, #4 // Adjust point of interrupt
+ STMDB sp!, {r0, lr} // Store other registers that will get used
+ // or stripped off the stack in context
+ // restore
+ B __tx_fiq_processing_return // Continue FIQ processing
diff --git a/ports/cortex_a15/gnu/src/tx_thread_fiq_nesting_end.S b/ports/cortex_a15/gnu/src/tx_thread_fiq_nesting_end.S
index 7ddb0f7f..b34d881e 100644
--- a/ports/cortex_a15/gnu/src/tx_thread_fiq_nesting_end.S
+++ b/ports/cortex_a15/gnu/src/tx_thread_fiq_nesting_end.S
@@ -1,116 +1,104 @@
-@/**************************************************************************/
-@/* */
-@/* Copyright (c) Microsoft Corporation. All rights reserved. */
-@/* */
-@/* This software is licensed under the Microsoft Software License */
-@/* Terms for Microsoft Azure RTOS. Full text of the license can be */
-@/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
-@/* and in the root directory of this software. */
-@/* */
-@/**************************************************************************/
-@
-@
-@/**************************************************************************/
-@/**************************************************************************/
-@/** */
-@/** ThreadX Component */
-@/** */
-@/** Thread */
-@/** */
-@/**************************************************************************/
-@/**************************************************************************/
-@
-@
-@#define TX_SOURCE_CODE
-@
-@
-@/* Include necessary system files. */
-@
-@#include "tx_api.h"
-@#include "tx_thread.h"
-@
-@
+/**************************************************************************/
+/* */
+/* Copyright (c) Microsoft Corporation. All rights reserved. */
+/* */
+/* This software is licensed under the Microsoft Software License */
+/* Terms for Microsoft Azure RTOS. Full text of the license can be */
+/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
+/* and in the root directory of this software. */
+/* */
+/**************************************************************************/
+
+
+/**************************************************************************/
+/**************************************************************************/
+/** */
+/** ThreadX Component */
+/** */
+/** Thread */
+/** */
+/**************************************************************************/
+/**************************************************************************/
+
#ifdef TX_ENABLE_FIQ_SUPPORT
-DISABLE_INTS = 0xC0 @ Disable IRQ/FIQ interrupts
+DISABLE_INTS = 0xC0 // Disable IRQ/FIQ interrupts
#else
-DISABLE_INTS = 0x80 @ Disable IRQ interrupts
+DISABLE_INTS = 0x80 // Disable IRQ interrupts
#endif
-MODE_MASK = 0x1F @ Mode mask
-FIQ_MODE_BITS = 0x11 @ FIQ mode bits
-@
-@
-@/* No 16-bit Thumb mode veneer code is needed for _tx_thread_fiq_nesting_end
-@ since it will never be called 16-bit mode. */
-@
+MODE_MASK = 0x1F // Mode mask
+FIQ_MODE_BITS = 0x11 // FIQ mode bits
+
+
+/* No 16-bit Thumb mode veneer code is needed for _tx_thread_fiq_nesting_end
+ since it will never be called 16-bit mode. */
+
.arm
.text
.align 2
-@/**************************************************************************/
-@/* */
-@/* FUNCTION RELEASE */
-@/* */
-@/* _tx_thread_fiq_nesting_end Cortex-A15/GNU */
-@/* 6.1 */
-@/* AUTHOR */
-@/* */
-@/* William E. Lamie, Microsoft Corporation */
-@/* */
-@/* DESCRIPTION */
-@/* */
-@/* This function is called by the application from FIQ mode after */
-@/* _tx_thread_fiq_nesting_start has been called and switches the FIQ */
-@/* processing from system mode back to FIQ mode prior to the ISR */
-@/* calling _tx_thread_fiq_context_restore. Note that this function */
-@/* assumes the system stack pointer is in the same position after */
-@/* nesting start function was called. */
-@/* */
-@/* This function assumes that the system mode stack pointer was setup */
-@/* during low-level initialization (tx_initialize_low_level.s). */
-@/* */
-@/* This function returns with FIQ interrupts disabled. */
-@/* */
-@/* INPUT */
-@/* */
-@/* None */
-@/* */
-@/* OUTPUT */
-@/* */
-@/* None */
-@/* */
-@/* CALLS */
-@/* */
-@/* None */
-@/* */
-@/* CALLED BY */
-@/* */
-@/* ISRs */
-@/* */
-@/* RELEASE HISTORY */
-@/* */
-@/* DATE NAME DESCRIPTION */
-@/* */
-@/* 09-30-2020 William E. Lamie Initial Version 6.1 */
-@/* */
-@/**************************************************************************/
-@VOID _tx_thread_fiq_nesting_end(VOID)
-@{
+/**************************************************************************/
+/* */
+/* FUNCTION RELEASE */
+/* */
+/* _tx_thread_fiq_nesting_end ARMv7-A */
+/* 6.1.11 */
+/* AUTHOR */
+/* */
+/* William E. Lamie, Microsoft Corporation */
+/* */
+/* DESCRIPTION */
+/* */
+/* This function is called by the application from FIQ mode after */
+/* _tx_thread_fiq_nesting_start has been called and switches the FIQ */
+/* processing from system mode back to FIQ mode prior to the ISR */
+/* calling _tx_thread_fiq_context_restore. Note that this function */
+/* assumes the system stack pointer is in the same position after */
+/* nesting start function was called. */
+/* */
+/* This function assumes that the system mode stack pointer was setup */
+/* during low-level initialization (tx_initialize_low_level.s). */
+/* */
+/* This function returns with FIQ interrupts disabled. */
+/* */
+/* INPUT */
+/* */
+/* None */
+/* */
+/* OUTPUT */
+/* */
+/* None */
+/* */
+/* CALLS */
+/* */
+/* None */
+/* */
+/* CALLED BY */
+/* */
+/* ISRs */
+/* */
+/* RELEASE HISTORY */
+/* */
+/* DATE NAME DESCRIPTION */
+/* */
+/* 09-30-2020 William E. Lamie Initial Version 6.1 */
+/* 04-25-2022 Zhen Kong Updated comments, */
+/* resulting in version 6.1.11 */
+/* */
+/**************************************************************************/
.global _tx_thread_fiq_nesting_end
.type _tx_thread_fiq_nesting_end,function
_tx_thread_fiq_nesting_end:
- MOV r3,lr @ Save ISR return address
- MRS r0, CPSR @ Pickup the CPSR
- ORR r0, r0, #DISABLE_INTS @ Build disable interrupt value
- MSR CPSR_c, r0 @ Disable interrupts
- LDMIA sp!, {r1, lr} @ Pickup saved lr (and r1 throw-away for
- @ 8-byte alignment logic)
- BIC r0, r0, #MODE_MASK @ Clear mode bits
- ORR r0, r0, #FIQ_MODE_BITS @ Build IRQ mode CPSR
- MSR CPSR_c, r0 @ Reenter IRQ mode
+ MOV r3,lr // Save ISR return address
+ MRS r0, CPSR // Pickup the CPSR
+ ORR r0, r0, #DISABLE_INTS // Build disable interrupt value
+ MSR CPSR_c, r0 // Disable interrupts
+ LDMIA sp!, {r1, lr} // Pickup saved lr (and r1 throw-away for
+ // 8-byte alignment logic)
+ BIC r0, r0, #MODE_MASK // Clear mode bits
+ ORR r0, r0, #FIQ_MODE_BITS // Build IRQ mode CPSR
+ MSR CPSR_c, r0 // Reenter IRQ mode
#ifdef __THUMB_INTERWORK
- BX r3 @ Return to caller
+ BX r3 // Return to caller
#else
- MOV pc, r3 @ Return to caller
+ MOV pc, r3 // Return to caller
#endif
-@}
-
diff --git a/ports/cortex_a15/gnu/src/tx_thread_fiq_nesting_start.S b/ports/cortex_a15/gnu/src/tx_thread_fiq_nesting_start.S
index a2f0471e..c9cd5a06 100644
--- a/ports/cortex_a15/gnu/src/tx_thread_fiq_nesting_start.S
+++ b/ports/cortex_a15/gnu/src/tx_thread_fiq_nesting_start.S
@@ -1,108 +1,96 @@
-@/**************************************************************************/
-@/* */
-@/* Copyright (c) Microsoft Corporation. All rights reserved. */
-@/* */
-@/* This software is licensed under the Microsoft Software License */
-@/* Terms for Microsoft Azure RTOS. Full text of the license can be */
-@/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
-@/* and in the root directory of this software. */
-@/* */
-@/**************************************************************************/
-@
-@
-@/**************************************************************************/
-@/**************************************************************************/
-@/** */
-@/** ThreadX Component */
-@/** */
-@/** Thread */
-@/** */
-@/**************************************************************************/
-@/**************************************************************************/
-@
-@
-@#define TX_SOURCE_CODE
-@
-@
-@/* Include necessary system files. */
-@
-@#include "tx_api.h"
-@#include "tx_thread.h"
-@
-@
-FIQ_DISABLE = 0x40 @ FIQ disable bit
-MODE_MASK = 0x1F @ Mode mask
-SYS_MODE_BITS = 0x1F @ System mode bits
-@
-@
-@/* No 16-bit Thumb mode veneer code is needed for _tx_thread_fiq_nesting_start
-@ since it will never be called 16-bit mode. */
-@
+/**************************************************************************/
+/* */
+/* Copyright (c) Microsoft Corporation. All rights reserved. */
+/* */
+/* This software is licensed under the Microsoft Software License */
+/* Terms for Microsoft Azure RTOS. Full text of the license can be */
+/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
+/* and in the root directory of this software. */
+/* */
+/**************************************************************************/
+
+
+/**************************************************************************/
+/**************************************************************************/
+/** */
+/** ThreadX Component */
+/** */
+/** Thread */
+/** */
+/**************************************************************************/
+/**************************************************************************/
+
+FIQ_DISABLE = 0x40 // FIQ disable bit
+MODE_MASK = 0x1F // Mode mask
+SYS_MODE_BITS = 0x1F // System mode bits
+
+
+/* No 16-bit Thumb mode veneer code is needed for _tx_thread_fiq_nesting_start
+ since it will never be called 16-bit mode. */
+
.arm
.text
.align 2
-@/**************************************************************************/
-@/* */
-@/* FUNCTION RELEASE */
-@/* */
-@/* _tx_thread_fiq_nesting_start Cortex-A15/GNU */
-@/* 6.1 */
-@/* AUTHOR */
-@/* */
-@/* William E. Lamie, Microsoft Corporation */
-@/* */
-@/* DESCRIPTION */
-@/* */
-@/* This function is called by the application from FIQ mode after */
-@/* _tx_thread_fiq_context_save has been called and switches the FIQ */
-@/* processing to the system mode so nested FIQ interrupt processing */
-@/* is possible (system mode has its own "lr" register). Note that */
-@/* this function assumes that the system mode stack pointer was setup */
-@/* during low-level initialization (tx_initialize_low_level.s). */
-@/* */
-@/* This function returns with FIQ interrupts enabled. */
-@/* */
-@/* INPUT */
-@/* */
-@/* None */
-@/* */
-@/* OUTPUT */
-@/* */
-@/* None */
-@/* */
-@/* CALLS */
-@/* */
-@/* None */
-@/* */
-@/* CALLED BY */
-@/* */
-@/* ISRs */
-@/* */
-@/* RELEASE HISTORY */
-@/* */
-@/* DATE NAME DESCRIPTION */
-@/* */
-@/* 09-30-2020 William E. Lamie Initial Version 6.1 */
-@/* */
-@/**************************************************************************/
-@VOID _tx_thread_fiq_nesting_start(VOID)
-@{
+/**************************************************************************/
+/* */
+/* FUNCTION RELEASE */
+/* */
+/* _tx_thread_fiq_nesting_start ARMv7-A */
+/* 6.1.11 */
+/* AUTHOR */
+/* */
+/* William E. Lamie, Microsoft Corporation */
+/* */
+/* DESCRIPTION */
+/* */
+/* This function is called by the application from FIQ mode after */
+/* _tx_thread_fiq_context_save has been called and switches the FIQ */
+/* processing to the system mode so nested FIQ interrupt processing */
+/* is possible (system mode has its own "lr" register). Note that */
+/* this function assumes that the system mode stack pointer was setup */
+/* during low-level initialization (tx_initialize_low_level.s). */
+/* */
+/* This function returns with FIQ interrupts enabled. */
+/* */
+/* INPUT */
+/* */
+/* None */
+/* */
+/* OUTPUT */
+/* */
+/* None */
+/* */
+/* CALLS */
+/* */
+/* None */
+/* */
+/* CALLED BY */
+/* */
+/* ISRs */
+/* */
+/* RELEASE HISTORY */
+/* */
+/* DATE NAME DESCRIPTION */
+/* */
+/* 09-30-2020 William E. Lamie Initial Version 6.1 */
+/* 04-25-2022 Zhen Kong Updated comments, */
+/* resulting in version 6.1.11 */
+/* */
+/**************************************************************************/
.global _tx_thread_fiq_nesting_start
.type _tx_thread_fiq_nesting_start,function
_tx_thread_fiq_nesting_start:
- MOV r3,lr @ Save ISR return address
- MRS r0, CPSR @ Pickup the CPSR
- BIC r0, r0, #MODE_MASK @ Clear the mode bits
- ORR r0, r0, #SYS_MODE_BITS @ Build system mode CPSR
- MSR CPSR_c, r0 @ Enter system mode
- STMDB sp!, {r1, lr} @ Push the system mode lr on the system mode stack
- @ and push r1 just to keep 8-byte alignment
- BIC r0, r0, #FIQ_DISABLE @ Build enable FIQ CPSR
- MSR CPSR_c, r0 @ Enter system mode
+ MOV r3,lr // Save ISR return address
+ MRS r0, CPSR // Pickup the CPSR
+ BIC r0, r0, #MODE_MASK // Clear the mode bits
+ ORR r0, r0, #SYS_MODE_BITS // Build system mode CPSR
+ MSR CPSR_c, r0 // Enter system mode
+ STMDB sp!, {r1, lr} // Push the system mode lr on the system mode stack
+ // and push r1 just to keep 8-byte alignment
+ BIC r0, r0, #FIQ_DISABLE // Build enable FIQ CPSR
+ MSR CPSR_c, r0 // Enter system mode
#ifdef __THUMB_INTERWORK
- BX r3 @ Return to caller
+ BX r3 // Return to caller
#else
- MOV pc, r3 @ Return to caller
+ MOV pc, r3 // Return to caller
#endif
-@}
-
diff --git a/ports/cortex_a15/gnu/src/tx_thread_interrupt_control.S b/ports/cortex_a15/gnu/src/tx_thread_interrupt_control.S
index bb919207..63b1609a 100644
--- a/ports/cortex_a15/gnu/src/tx_thread_interrupt_control.S
+++ b/ports/cortex_a15/gnu/src/tx_thread_interrupt_control.S
@@ -1,115 +1,104 @@
-@/**************************************************************************/
-@/* */
-@/* Copyright (c) Microsoft Corporation. All rights reserved. */
-@/* */
-@/* This software is licensed under the Microsoft Software License */
-@/* Terms for Microsoft Azure RTOS. Full text of the license can be */
-@/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
-@/* and in the root directory of this software. */
-@/* */
-@/**************************************************************************/
-@
-@
-@/**************************************************************************/
-@/**************************************************************************/
-@/** */
-@/** ThreadX Component */
-@/** */
-@/** Thread */
-@/** */
-@/**************************************************************************/
-@/**************************************************************************/
-@
-@#define TX_SOURCE_CODE
-@
-@
-@/* Include necessary system files. */
-@
-@#include "tx_api.h"
-@#include "tx_thread.h" */
-@
+/**************************************************************************/
+/* */
+/* Copyright (c) Microsoft Corporation. All rights reserved. */
+/* */
+/* This software is licensed under the Microsoft Software License */
+/* Terms for Microsoft Azure RTOS. Full text of the license can be */
+/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
+/* and in the root directory of this software. */
+/* */
+/**************************************************************************/
+
+
+/**************************************************************************/
+/**************************************************************************/
+/** */
+/** ThreadX Component */
+/** */
+/** Thread */
+/** */
+/**************************************************************************/
+/**************************************************************************/
INT_MASK = 0x03F
-@
-@/* Define the 16-bit Thumb mode veneer for _tx_thread_interrupt_control for
-@ applications calling this function from to 16-bit Thumb mode. */
-@
+
+/* Define the 16-bit Thumb mode veneer for _tx_thread_interrupt_control for
+ applications calling this function from to 16-bit Thumb mode. */
+
.text
.align 2
.global $_tx_thread_interrupt_control
$_tx_thread_interrupt_control:
.thumb
- BX pc @ Switch to 32-bit mode
- NOP @
+ BX pc // Switch to 32-bit mode
+ NOP //
.arm
- STMFD sp!, {lr} @ Save return address
- BL _tx_thread_interrupt_control @ Call _tx_thread_interrupt_control function
- LDMFD sp!, {lr} @ Recover saved return address
- BX lr @ Return to 16-bit caller
-@
-@
+ STMFD sp!, {lr} // Save return address
+ BL _tx_thread_interrupt_control // Call _tx_thread_interrupt_control function
+ LDMFD sp!, {lr} // Recover saved return address
+ BX lr // Return to 16-bit caller
+
+
.text
.align 2
-@/**************************************************************************/
-@/* */
-@/* FUNCTION RELEASE */
-@/* */
-@/* _tx_thread_interrupt_control Cortex-A15/GNU */
-@/* 6.1 */
-@/* AUTHOR */
-@/* */
-@/* William E. Lamie, Microsoft Corporation */
-@/* */
-@/* DESCRIPTION */
-@/* */
-@/* This function is responsible for changing the interrupt lockout */
-@/* posture of the system. */
-@/* */
-@/* INPUT */
-@/* */
-@/* new_posture New interrupt lockout posture */
-@/* */
-@/* OUTPUT */
-@/* */
-@/* old_posture Old interrupt lockout posture */
-@/* */
-@/* CALLS */
-@/* */
-@/* None */
-@/* */
-@/* CALLED BY */
-@/* */
-@/* Application Code */
-@/* */
-@/* RELEASE HISTORY */
-@/* */
-@/* DATE NAME DESCRIPTION */
-@/* */
-@/* 09-30-2020 William E. Lamie Initial Version 6.1 */
-@/* */
-@/**************************************************************************/
-@UINT _tx_thread_interrupt_control(UINT new_posture)
-@{
+/**************************************************************************/
+/* */
+/* FUNCTION RELEASE */
+/* */
+/* _tx_thread_interrupt_control ARMv7-A */
+/* 6.1.11 */
+/* AUTHOR */
+/* */
+/* William E. Lamie, Microsoft Corporation */
+/* */
+/* DESCRIPTION */
+/* */
+/* This function is responsible for changing the interrupt lockout */
+/* posture of the system. */
+/* */
+/* INPUT */
+/* */
+/* new_posture New interrupt lockout posture */
+/* */
+/* OUTPUT */
+/* */
+/* old_posture Old interrupt lockout posture */
+/* */
+/* CALLS */
+/* */
+/* None */
+/* */
+/* CALLED BY */
+/* */
+/* Application Code */
+/* */
+/* RELEASE HISTORY */
+/* */
+/* DATE NAME DESCRIPTION */
+/* */
+/* 09-30-2020 William E. Lamie Initial Version 6.1 */
+/* 04-25-2022 Zhen Kong Updated comments, */
+/* resulting in version 6.1.11 */
+/* */
+/**************************************************************************/
.global _tx_thread_interrupt_control
.type _tx_thread_interrupt_control,function
_tx_thread_interrupt_control:
-@
-@ /* Pickup current interrupt lockout posture. */
-@
- MRS r3, CPSR @ Pickup current CPSR
- MOV r2, #INT_MASK @ Build interrupt mask
- AND r1, r3, r2 @ Clear interrupt lockout bits
- ORR r1, r1, r0 @ Or-in new interrupt lockout bits
-@
-@ /* Apply the new interrupt posture. */
-@
- MSR CPSR_c, r1 @ Setup new CPSR
- BIC r0, r3, r2 @ Return previous interrupt mask
-#ifdef __THUMB_INTERWORK
- BX lr @ Return to caller
-#else
- MOV pc, lr @ Return to caller
-#endif
-@}
+ /* Pickup current interrupt lockout posture. */
+
+ MRS r3, CPSR // Pickup current CPSR
+ MOV r2, #INT_MASK // Build interrupt mask
+ AND r1, r3, r2 // Clear interrupt lockout bits
+ ORR r1, r1, r0 // Or-in new interrupt lockout bits
+
+ /* Apply the new interrupt posture. */
+
+ MSR CPSR_c, r1 // Setup new CPSR
+ BIC r0, r3, r2 // Return previous interrupt mask
+#ifdef __THUMB_INTERWORK
+ BX lr // Return to caller
+#else
+ MOV pc, lr // Return to caller
+#endif
diff --git a/ports/cortex_a15/gnu/src/tx_thread_interrupt_disable.S b/ports/cortex_a15/gnu/src/tx_thread_interrupt_disable.S
index 549423c5..13258808 100644
--- a/ports/cortex_a15/gnu/src/tx_thread_interrupt_disable.S
+++ b/ports/cortex_a15/gnu/src/tx_thread_interrupt_disable.S
@@ -1,113 +1,101 @@
-@/**************************************************************************/
-@/* */
-@/* Copyright (c) Microsoft Corporation. All rights reserved. */
-@/* */
-@/* This software is licensed under the Microsoft Software License */
-@/* Terms for Microsoft Azure RTOS. Full text of the license can be */
-@/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
-@/* and in the root directory of this software. */
-@/* */
-@/**************************************************************************/
-@
-@
-@/**************************************************************************/
-@/**************************************************************************/
-@/** */
-@/** ThreadX Component */
-@/** */
-@/** Thread */
-@/** */
-@/**************************************************************************/
-@/**************************************************************************/
-@
-@#define TX_SOURCE_CODE
-@
-@
-@/* Include necessary system files. */
-@
-@#include "tx_api.h"
-@#include "tx_thread.h"
-@
-@
-@/* Define the 16-bit Thumb mode veneer for _tx_thread_interrupt_disable for
-@ applications calling this function from to 16-bit Thumb mode. */
-@
+/**************************************************************************/
+/* */
+/* Copyright (c) Microsoft Corporation. All rights reserved. */
+/* */
+/* This software is licensed under the Microsoft Software License */
+/* Terms for Microsoft Azure RTOS. Full text of the license can be */
+/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
+/* and in the root directory of this software. */
+/* */
+/**************************************************************************/
+
+
+/**************************************************************************/
+/**************************************************************************/
+/** */
+/** ThreadX Component */
+/** */
+/** Thread */
+/** */
+/**************************************************************************/
+/**************************************************************************/
+
+/* Define the 16-bit Thumb mode veneer for _tx_thread_interrupt_disable for
+ applications calling this function from to 16-bit Thumb mode. */
+
.text
.align 2
.global $_tx_thread_interrupt_disable
$_tx_thread_interrupt_disable:
.thumb
- BX pc @ Switch to 32-bit mode
- NOP @
+ BX pc // Switch to 32-bit mode
+ NOP //
.arm
- STMFD sp!, {lr} @ Save return address
- BL _tx_thread_interrupt_disable @ Call _tx_thread_interrupt_disable function
- LDMFD sp!, {lr} @ Recover saved return address
- BX lr @ Return to 16-bit caller
-@
-@
+ STMFD sp!, {lr} // Save return address
+ BL _tx_thread_interrupt_disable // Call _tx_thread_interrupt_disable function
+ LDMFD sp!, {lr} // Recover saved return address
+ BX lr // Return to 16-bit caller
+
+
.text
.align 2
-@/**************************************************************************/
-@/* */
-@/* FUNCTION RELEASE */
-@/* */
-@/* _tx_thread_interrupt_disable Cortex-A15/GNU */
-@/* 6.1 */
-@/* AUTHOR */
-@/* */
-@/* William E. Lamie, Microsoft Corporation */
-@/* */
-@/* DESCRIPTION */
-@/* */
-@/* This function is responsible for disabling interrupts */
-@/* */
-@/* INPUT */
-@/* */
-@/* None */
-@/* */
-@/* OUTPUT */
-@/* */
-@/* old_posture Old interrupt lockout posture */
-@/* */
-@/* CALLS */
-@/* */
-@/* None */
-@/* */
-@/* CALLED BY */
-@/* */
-@/* Application Code */
-@/* */
-@/* RELEASE HISTORY */
-@/* */
-@/* DATE NAME DESCRIPTION */
-@/* */
-@/* 09-30-2020 William E. Lamie Initial Version 6.1 */
-@/* */
-@/**************************************************************************/
-@UINT _tx_thread_interrupt_disable(void)
-@{
+/**************************************************************************/
+/* */
+/* FUNCTION RELEASE */
+/* */
+/* _tx_thread_interrupt_disable ARMv7-A */
+/* 6.1.11 */
+/* AUTHOR */
+/* */
+/* William E. Lamie, Microsoft Corporation */
+/* */
+/* DESCRIPTION */
+/* */
+/* This function is responsible for disabling interrupts */
+/* */
+/* INPUT */
+/* */
+/* None */
+/* */
+/* OUTPUT */
+/* */
+/* old_posture Old interrupt lockout posture */
+/* */
+/* CALLS */
+/* */
+/* None */
+/* */
+/* CALLED BY */
+/* */
+/* Application Code */
+/* */
+/* RELEASE HISTORY */
+/* */
+/* DATE NAME DESCRIPTION */
+/* */
+/* 09-30-2020 William E. Lamie Initial Version 6.1 */
+/* 04-25-2022 Zhen Kong Updated comments, */
+/* resulting in version 6.1.11 */
+/* */
+/**************************************************************************/
.global _tx_thread_interrupt_disable
.type _tx_thread_interrupt_disable,function
_tx_thread_interrupt_disable:
-@
-@ /* Pickup current interrupt lockout posture. */
-@
- MRS r0, CPSR @ Pickup current CPSR
-@
-@ /* Mask interrupts. */
-@
+
+ /* Pickup current interrupt lockout posture. */
+
+ MRS r0, CPSR // Pickup current CPSR
+
+ /* Mask interrupts. */
+
#ifdef TX_ENABLE_FIQ_SUPPORT
- CPSID if @ Disable IRQ and FIQ
+ CPSID if // Disable IRQ and FIQ
#else
- CPSID i @ Disable IRQ
+ CPSID i // Disable IRQ
#endif
#ifdef __THUMB_INTERWORK
- BX lr @ Return to caller
+ BX lr // Return to caller
#else
- MOV pc, lr @ Return to caller
+ MOV pc, lr // Return to caller
#endif
-@}
-
-
diff --git a/ports/cortex_a15/gnu/src/tx_thread_interrupt_restore.S b/ports/cortex_a15/gnu/src/tx_thread_interrupt_restore.S
index 0f5a7b0a..2d582511 100644
--- a/ports/cortex_a15/gnu/src/tx_thread_interrupt_restore.S
+++ b/ports/cortex_a15/gnu/src/tx_thread_interrupt_restore.S
@@ -1,104 +1,93 @@
-@/**************************************************************************/
-@/* */
-@/* Copyright (c) Microsoft Corporation. All rights reserved. */
-@/* */
-@/* This software is licensed under the Microsoft Software License */
-@/* Terms for Microsoft Azure RTOS. Full text of the license can be */
-@/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
-@/* and in the root directory of this software. */
-@/* */
-@/**************************************************************************/
-@
-@
-@/**************************************************************************/
-@/**************************************************************************/
-@/** */
-@/** ThreadX Component */
-@/** */
-@/** Thread */
-@/** */
-@/**************************************************************************/
-@/**************************************************************************/
-@
-@#define TX_SOURCE_CODE
-@
-@
-@/* Include necessary system files. */
-@
-@#include "tx_api.h"
-@#include "tx_thread.h"
-@
-@
-@/* Define the 16-bit Thumb mode veneer for _tx_thread_interrupt_restore for
-@ applications calling this function from to 16-bit Thumb mode. */
-@
+/**************************************************************************/
+/* */
+/* Copyright (c) Microsoft Corporation. All rights reserved. */
+/* */
+/* This software is licensed under the Microsoft Software License */
+/* Terms for Microsoft Azure RTOS. Full text of the license can be */
+/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
+/* and in the root directory of this software. */
+/* */
+/**************************************************************************/
+
+
+/**************************************************************************/
+/**************************************************************************/
+/** */
+/** ThreadX Component */
+/** */
+/** Thread */
+/** */
+/**************************************************************************/
+/**************************************************************************/
+
+/* Define the 16-bit Thumb mode veneer for _tx_thread_interrupt_restore for
+ applications calling this function from to 16-bit Thumb mode. */
+
.text
.align 2
.global $_tx_thread_interrupt_restore
$_tx_thread_interrupt_restore:
.thumb
- BX pc @ Switch to 32-bit mode
- NOP @
+ BX pc // Switch to 32-bit mode
+ NOP //
.arm
- STMFD sp!, {lr} @ Save return address
- BL _tx_thread_interrupt_restore @ Call _tx_thread_interrupt_restore function
- LDMFD sp!, {lr} @ Recover saved return address
- BX lr @ Return to 16-bit caller
-@
-@
+ STMFD sp!, {lr} // Save return address
+ BL _tx_thread_interrupt_restore // Call _tx_thread_interrupt_restore function
+ LDMFD sp!, {lr} // Recover saved return address
+ BX lr // Return to 16-bit caller
+
+
.text
.align 2
-@/**************************************************************************/
-@/* */
-@/* FUNCTION RELEASE */
-@/* */
-@/* _tx_thread_interrupt_restore Cortex-A15/GNU */
-@/* 6.1 */
-@/* AUTHOR */
-@/* */
-@/* William E. Lamie, Microsoft Corporation */
-@/* */
-@/* DESCRIPTION */
-@/* */
-@/* This function is responsible for restoring interrupts to the state */
-@/* returned by a previous _tx_thread_interrupt_disable call. */
-@/* */
-@/* INPUT */
-@/* */
-@/* old_posture Old interrupt lockout posture */
-@/* */
-@/* OUTPUT */
-@/* */
-@/* None */
-@/* */
-@/* CALLS */
-@/* */
-@/* None */
-@/* */
-@/* CALLED BY */
-@/* */
-@/* Application Code */
-@/* */
-@/* RELEASE HISTORY */
-@/* */
-@/* DATE NAME DESCRIPTION */
-@/* */
-@/* 09-30-2020 William E. Lamie Initial Version 6.1 */
-@/* */
-@/**************************************************************************/
-@UINT _tx_thread_interrupt_restore(UINT old_posture)
-@{
+/**************************************************************************/
+/* */
+/* FUNCTION RELEASE */
+/* */
+/* _tx_thread_interrupt_restore ARMv7-A */
+/* 6.1.11 */
+/* AUTHOR */
+/* */
+/* William E. Lamie, Microsoft Corporation */
+/* */
+/* DESCRIPTION */
+/* */
+/* This function is responsible for restoring interrupts to the state */
+/* returned by a previous _tx_thread_interrupt_disable call. */
+/* */
+/* INPUT */
+/* */
+/* old_posture Old interrupt lockout posture */
+/* */
+/* OUTPUT */
+/* */
+/* None */
+/* */
+/* CALLS */
+/* */
+/* None */
+/* */
+/* CALLED BY */
+/* */
+/* Application Code */
+/* */
+/* RELEASE HISTORY */
+/* */
+/* DATE NAME DESCRIPTION */
+/* */
+/* 09-30-2020 William E. Lamie Initial Version 6.1 */
+/* 04-25-2022 Zhen Kong Updated comments, */
+/* resulting in version 6.1.11 */
+/* */
+/**************************************************************************/
.global _tx_thread_interrupt_restore
.type _tx_thread_interrupt_restore,function
_tx_thread_interrupt_restore:
-@
-@ /* Apply the new interrupt posture. */
-@
- MSR CPSR_c, r0 @ Setup new CPSR
-#ifdef __THUMB_INTERWORK
- BX lr @ Return to caller
-#else
- MOV pc, lr @ Return to caller
-#endif
-@}
+ /* Apply the new interrupt posture. */
+
+ MSR CPSR_c, r0 // Setup new CPSR
+#ifdef __THUMB_INTERWORK
+ BX lr // Return to caller
+#else
+ MOV pc, lr // Return to caller
+#endif
diff --git a/ports/cortex_a15/gnu/src/tx_thread_irq_nesting_end.S b/ports/cortex_a15/gnu/src/tx_thread_irq_nesting_end.S
index 0ae02793..ec7e63c6 100644
--- a/ports/cortex_a15/gnu/src/tx_thread_irq_nesting_end.S
+++ b/ports/cortex_a15/gnu/src/tx_thread_irq_nesting_end.S
@@ -1,115 +1,103 @@
-@/**************************************************************************/
-@/* */
-@/* Copyright (c) Microsoft Corporation. All rights reserved. */
-@/* */
-@/* This software is licensed under the Microsoft Software License */
-@/* Terms for Microsoft Azure RTOS. Full text of the license can be */
-@/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
-@/* and in the root directory of this software. */
-@/* */
-@/**************************************************************************/
-@
-@
-@/**************************************************************************/
-@/**************************************************************************/
-@/** */
-@/** ThreadX Component */
-@/** */
-@/** Thread */
-@/** */
-@/**************************************************************************/
-@/**************************************************************************/
-@
-@
-@#define TX_SOURCE_CODE
-@
-@
-@/* Include necessary system files. */
-@
-@#include "tx_api.h"
-@#include "tx_thread.h"
-@
-@
+/**************************************************************************/
+/* */
+/* Copyright (c) Microsoft Corporation. All rights reserved. */
+/* */
+/* This software is licensed under the Microsoft Software License */
+/* Terms for Microsoft Azure RTOS. Full text of the license can be */
+/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
+/* and in the root directory of this software. */
+/* */
+/**************************************************************************/
+
+
+/**************************************************************************/
+/**************************************************************************/
+/** */
+/** ThreadX Component */
+/** */
+/** Thread */
+/** */
+/**************************************************************************/
+/**************************************************************************/
+
#ifdef TX_ENABLE_FIQ_SUPPORT
-DISABLE_INTS = 0xC0 @ Disable IRQ/FIQ interrupts
+DISABLE_INTS = 0xC0 // Disable IRQ/FIQ interrupts
#else
-DISABLE_INTS = 0x80 @ Disable IRQ interrupts
+DISABLE_INTS = 0x80 // Disable IRQ interrupts
#endif
-MODE_MASK = 0x1F @ Mode mask
-IRQ_MODE_BITS = 0x12 @ IRQ mode bits
-@
-@
-@/* No 16-bit Thumb mode veneer code is needed for _tx_thread_irq_nesting_end
-@ since it will never be called 16-bit mode. */
-@
+MODE_MASK = 0x1F // Mode mask
+IRQ_MODE_BITS = 0x12 // IRQ mode bits
+
+
+/* No 16-bit Thumb mode veneer code is needed for _tx_thread_irq_nesting_end
+ since it will never be called 16-bit mode. */
+
.arm
.text
.align 2
-@/**************************************************************************/
-@/* */
-@/* FUNCTION RELEASE */
-@/* */
-@/* _tx_thread_irq_nesting_end Cortex-A15/GNU */
-@/* 6.1 */
-@/* AUTHOR */
-@/* */
-@/* William E. Lamie, Microsoft Corporation */
-@/* */
-@/* DESCRIPTION */
-@/* */
-@/* This function is called by the application from IRQ mode after */
-@/* _tx_thread_irq_nesting_start has been called and switches the IRQ */
-@/* processing from system mode back to IRQ mode prior to the ISR */
-@/* calling _tx_thread_context_restore. Note that this function */
-@/* assumes the system stack pointer is in the same position after */
-@/* nesting start function was called. */
-@/* */
-@/* This function assumes that the system mode stack pointer was setup */
-@/* during low-level initialization (tx_initialize_low_level.s). */
-@/* */
-@/* This function returns with IRQ interrupts disabled. */
-@/* */
-@/* INPUT */
-@/* */
-@/* None */
-@/* */
-@/* OUTPUT */
-@/* */
-@/* None */
-@/* */
-@/* CALLS */
-@/* */
-@/* None */
-@/* */
-@/* CALLED BY */
-@/* */
-@/* ISRs */
-@/* */
-@/* RELEASE HISTORY */
-@/* */
-@/* DATE NAME DESCRIPTION */
-@/* */
-@/* 09-30-2020 William E. Lamie Initial Version 6.1 */
-@/* */
-@/**************************************************************************/
-@VOID _tx_thread_irq_nesting_end(VOID)
-@{
+/**************************************************************************/
+/* */
+/* FUNCTION RELEASE */
+/* */
+/* _tx_thread_irq_nesting_end ARMv7-A */
+/* 6.1.11 */
+/* AUTHOR */
+/* */
+/* William E. Lamie, Microsoft Corporation */
+/* */
+/* DESCRIPTION */
+/* */
+/* This function is called by the application from IRQ mode after */
+/* _tx_thread_irq_nesting_start has been called and switches the IRQ */
+/* processing from system mode back to IRQ mode prior to the ISR */
+/* calling _tx_thread_context_restore. Note that this function */
+/* assumes the system stack pointer is in the same position after */
+/* nesting start function was called. */
+/* */
+/* This function assumes that the system mode stack pointer was setup */
+/* during low-level initialization (tx_initialize_low_level.s). */
+/* */
+/* This function returns with IRQ interrupts disabled. */
+/* */
+/* INPUT */
+/* */
+/* None */
+/* */
+/* OUTPUT */
+/* */
+/* None */
+/* */
+/* CALLS */
+/* */
+/* None */
+/* */
+/* CALLED BY */
+/* */
+/* ISRs */
+/* */
+/* RELEASE HISTORY */
+/* */
+/* DATE NAME DESCRIPTION */
+/* */
+/* 09-30-2020 William E. Lamie Initial Version 6.1 */
+/* 04-25-2022 Zhen Kong Updated comments, */
+/* resulting in version 6.1.11 */
+/* */
+/**************************************************************************/
.global _tx_thread_irq_nesting_end
.type _tx_thread_irq_nesting_end,function
_tx_thread_irq_nesting_end:
- MOV r3,lr @ Save ISR return address
- MRS r0, CPSR @ Pickup the CPSR
- ORR r0, r0, #DISABLE_INTS @ Build disable interrupt value
- MSR CPSR_c, r0 @ Disable interrupts
- LDMIA sp!, {r1, lr} @ Pickup saved lr (and r1 throw-away for
- @ 8-byte alignment logic)
- BIC r0, r0, #MODE_MASK @ Clear mode bits
- ORR r0, r0, #IRQ_MODE_BITS @ Build IRQ mode CPSR
- MSR CPSR_c, r0 @ Reenter IRQ mode
+ MOV r3,lr // Save ISR return address
+ MRS r0, CPSR // Pickup the CPSR
+ ORR r0, r0, #DISABLE_INTS // Build disable interrupt value
+ MSR CPSR_c, r0 // Disable interrupts
+ LDMIA sp!, {r1, lr} // Pickup saved lr (and r1 throw-away for
+ // 8-byte alignment logic)
+ BIC r0, r0, #MODE_MASK // Clear mode bits
+ ORR r0, r0, #IRQ_MODE_BITS // Build IRQ mode CPSR
+ MSR CPSR_c, r0 // Reenter IRQ mode
#ifdef __THUMB_INTERWORK
- BX r3 @ Return to caller
+ BX r3 // Return to caller
#else
- MOV pc, r3 @ Return to caller
+ MOV pc, r3 // Return to caller
#endif
-@}
-
diff --git a/ports/cortex_a15/gnu/src/tx_thread_irq_nesting_start.S b/ports/cortex_a15/gnu/src/tx_thread_irq_nesting_start.S
index 222d348f..c69976ed 100644
--- a/ports/cortex_a15/gnu/src/tx_thread_irq_nesting_start.S
+++ b/ports/cortex_a15/gnu/src/tx_thread_irq_nesting_start.S
@@ -1,108 +1,96 @@
-@/**************************************************************************/
-@/* */
-@/* Copyright (c) Microsoft Corporation. All rights reserved. */
-@/* */
-@/* This software is licensed under the Microsoft Software License */
-@/* Terms for Microsoft Azure RTOS. Full text of the license can be */
-@/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
-@/* and in the root directory of this software. */
-@/* */
-@/**************************************************************************/
-@
-@
-@/**************************************************************************/
-@/**************************************************************************/
-@/** */
-@/** ThreadX Component */
-@/** */
-@/** Thread */
-@/** */
-@/**************************************************************************/
-@/**************************************************************************/
-@
-@
-@#define TX_SOURCE_CODE
-@
-@
-@/* Include necessary system files. */
-@
-@#include "tx_api.h"
-@#include "tx_thread.h"
-@
-@
-IRQ_DISABLE = 0x80 @ IRQ disable bit
-MODE_MASK = 0x1F @ Mode mask
-SYS_MODE_BITS = 0x1F @ System mode bits
-@
-@
-@/* No 16-bit Thumb mode veneer code is needed for _tx_thread_irq_nesting_start
-@ since it will never be called 16-bit mode. */
-@
+/**************************************************************************/
+/* */
+/* Copyright (c) Microsoft Corporation. All rights reserved. */
+/* */
+/* This software is licensed under the Microsoft Software License */
+/* Terms for Microsoft Azure RTOS. Full text of the license can be */
+/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
+/* and in the root directory of this software. */
+/* */
+/**************************************************************************/
+
+
+/**************************************************************************/
+/**************************************************************************/
+/** */
+/** ThreadX Component */
+/** */
+/** Thread */
+/** */
+/**************************************************************************/
+/**************************************************************************/
+
+IRQ_DISABLE = 0x80 // IRQ disable bit
+MODE_MASK = 0x1F // Mode mask
+SYS_MODE_BITS = 0x1F // System mode bits
+
+
+/* No 16-bit Thumb mode veneer code is needed for _tx_thread_irq_nesting_start
+ since it will never be called 16-bit mode. */
+
.arm
.text
.align 2
-@/**************************************************************************/
-@/* */
-@/* FUNCTION RELEASE */
-@/* */
-@/* _tx_thread_irq_nesting_start Cortex-A15/GNU */
-@/* 6.1 */
-@/* AUTHOR */
-@/* */
-@/* William E. Lamie, Microsoft Corporation */
-@/* */
-@/* DESCRIPTION */
-@/* */
-@/* This function is called by the application from IRQ mode after */
-@/* _tx_thread_context_save has been called and switches the IRQ */
-@/* processing to the system mode so nested IRQ interrupt processing */
-@/* is possible (system mode has its own "lr" register). Note that */
-@/* this function assumes that the system mode stack pointer was setup */
-@/* during low-level initialization (tx_initialize_low_level.s). */
-@/* */
-@/* This function returns with IRQ interrupts enabled. */
-@/* */
-@/* INPUT */
-@/* */
-@/* None */
-@/* */
-@/* OUTPUT */
-@/* */
-@/* None */
-@/* */
-@/* CALLS */
-@/* */
-@/* None */
-@/* */
-@/* CALLED BY */
-@/* */
-@/* ISRs */
-@/* */
-@/* RELEASE HISTORY */
-@/* */
-@/* DATE NAME DESCRIPTION */
-@/* */
-@/* 09-30-2020 William E. Lamie Initial Version 6.1 */
-@/* */
-@/**************************************************************************/
-@VOID _tx_thread_irq_nesting_start(VOID)
-@{
+/**************************************************************************/
+/* */
+/* FUNCTION RELEASE */
+/* */
+/* _tx_thread_irq_nesting_start ARMv7-A */
+/* 6.1.11 */
+/* AUTHOR */
+/* */
+/* William E. Lamie, Microsoft Corporation */
+/* */
+/* DESCRIPTION */
+/* */
+/* This function is called by the application from IRQ mode after */
+/* _tx_thread_context_save has been called and switches the IRQ */
+/* processing to the system mode so nested IRQ interrupt processing */
+/* is possible (system mode has its own "lr" register). Note that */
+/* this function assumes that the system mode stack pointer was setup */
+/* during low-level initialization (tx_initialize_low_level.s). */
+/* */
+/* This function returns with IRQ interrupts enabled. */
+/* */
+/* INPUT */
+/* */
+/* None */
+/* */
+/* OUTPUT */
+/* */
+/* None */
+/* */
+/* CALLS */
+/* */
+/* None */
+/* */
+/* CALLED BY */
+/* */
+/* ISRs */
+/* */
+/* RELEASE HISTORY */
+/* */
+/* DATE NAME DESCRIPTION */
+/* */
+/* 09-30-2020 William E. Lamie Initial Version 6.1 */
+/* 04-25-2022 Zhen Kong Updated comments, */
+/* resulting in version 6.1.11 */
+/* */
+/**************************************************************************/
.global _tx_thread_irq_nesting_start
.type _tx_thread_irq_nesting_start,function
_tx_thread_irq_nesting_start:
- MOV r3,lr @ Save ISR return address
- MRS r0, CPSR @ Pickup the CPSR
- BIC r0, r0, #MODE_MASK @ Clear the mode bits
- ORR r0, r0, #SYS_MODE_BITS @ Build system mode CPSR
- MSR CPSR_c, r0 @ Enter system mode
- STMDB sp!, {r1, lr} @ Push the system mode lr on the system mode stack
- @ and push r1 just to keep 8-byte alignment
- BIC r0, r0, #IRQ_DISABLE @ Build enable IRQ CPSR
- MSR CPSR_c, r0 @ Enter system mode
+ MOV r3,lr // Save ISR return address
+ MRS r0, CPSR // Pickup the CPSR
+ BIC r0, r0, #MODE_MASK // Clear the mode bits
+ ORR r0, r0, #SYS_MODE_BITS // Build system mode CPSR
+ MSR CPSR_c, r0 // Enter system mode
+ STMDB sp!, {r1, lr} // Push the system mode lr on the system mode stack
+ // and push r1 just to keep 8-byte alignment
+ BIC r0, r0, #IRQ_DISABLE // Build enable IRQ CPSR
+ MSR CPSR_c, r0 // Enter system mode
#ifdef __THUMB_INTERWORK
- BX r3 @ Return to caller
+ BX r3 // Return to caller
#else
- MOV pc, r3 @ Return to caller
+ MOV pc, r3 // Return to caller
#endif
-@}
-
diff --git a/ports/cortex_a15/gnu/src/tx_thread_schedule.S b/ports/cortex_a15/gnu/src/tx_thread_schedule.S
index 5e62803e..8330e9df 100644
--- a/ports/cortex_a15/gnu/src/tx_thread_schedule.S
+++ b/ports/cortex_a15/gnu/src/tx_thread_schedule.S
@@ -1,258 +1,230 @@
-@/**************************************************************************/
-@/* */
-@/* Copyright (c) Microsoft Corporation. All rights reserved. */
-@/* */
-@/* This software is licensed under the Microsoft Software License */
-@/* Terms for Microsoft Azure RTOS. Full text of the license can be */
-@/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
-@/* and in the root directory of this software. */
-@/* */
-@/**************************************************************************/
-@
-@
-@/**************************************************************************/
-@/**************************************************************************/
-@/** */
-@/** ThreadX Component */
-@/** */
-@/** Thread */
-@/** */
-@/**************************************************************************/
-@/**************************************************************************/
-@
-@
-@#define TX_SOURCE_CODE
-@
-@
-@/* Include necessary system files. */
-@
-@#include "tx_api.h"
-@#include "tx_thread.h"
-@#include "tx_timer.h"
-@
-@
+/**************************************************************************/
+/* */
+/* Copyright (c) Microsoft Corporation. All rights reserved. */
+/* */
+/* This software is licensed under the Microsoft Software License */
+/* Terms for Microsoft Azure RTOS. Full text of the license can be */
+/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
+/* and in the root directory of this software. */
+/* */
+/**************************************************************************/
+
+
+/**************************************************************************/
+/**************************************************************************/
+/** */
+/** ThreadX Component */
+/** */
+/** Thread */
+/** */
+/**************************************************************************/
+/**************************************************************************/
+
.global _tx_thread_execute_ptr
.global _tx_thread_current_ptr
.global _tx_timer_time_slice
- .global _tx_execution_thread_enter
-@
-@
-@/* Define the 16-bit Thumb mode veneer for _tx_thread_schedule for
-@ applications calling this function from to 16-bit Thumb mode. */
-@
+
+
+/* Define the 16-bit Thumb mode veneer for _tx_thread_schedule for
+ applications calling this function from to 16-bit Thumb mode. */
+
.text
.align 2
.global $_tx_thread_schedule
.type $_tx_thread_schedule,function
$_tx_thread_schedule:
.thumb
- BX pc @ Switch to 32-bit mode
- NOP @
+ BX pc // Switch to 32-bit mode
+ NOP //
.arm
- STMFD sp!, {lr} @ Save return address
- BL _tx_thread_schedule @ Call _tx_thread_schedule function
- LDMFD sp!, {lr} @ Recover saved return address
- BX lr @ Return to 16-bit caller
-@
-@
+ STMFD sp!, {lr} // Save return address
+ BL _tx_thread_schedule // Call _tx_thread_schedule function
+ LDMFD sp!, {lr} // Recover saved return address
+ BX lr // Return to 16-bit caller
+
+
.text
.align 2
-@/**************************************************************************/
-@/* */
-@/* FUNCTION RELEASE */
-@/* */
-@/* _tx_thread_schedule Cortex-A15/GNU */
-@/* 6.1.9 */
-@/* AUTHOR */
-@/* */
-@/* William E. Lamie, Microsoft Corporation */
-@/* */
-@/* DESCRIPTION */
-@/* */
-@/* This function waits for a thread control block pointer to appear in */
-@/* the _tx_thread_execute_ptr variable. Once a thread pointer appears */
-@/* in the variable, the corresponding thread is resumed. */
-@/* */
-@/* INPUT */
-@/* */
-@/* None */
-@/* */
-@/* OUTPUT */
-@/* */
-@/* None */
-@/* */
-@/* CALLS */
-@/* */
-@/* None */
-@/* */
-@/* CALLED BY */
-@/* */
-@/* _tx_initialize_kernel_enter ThreadX entry function */
-@/* _tx_thread_system_return Return to system from thread */
-@/* _tx_thread_context_restore Restore thread's context */
-@/* */
-@/* RELEASE HISTORY */
-@/* */
-@/* DATE NAME DESCRIPTION */
-@/* */
-@/* 09-30-2020 William E. Lamie Initial Version 6.1 */
-@/* 10-15-2021 William E. Lamie Modified comment(s), added */
-@/* execution profile support, */
-@/* resulting in version 6.1.9 */
-@/* */
-@/**************************************************************************/
-@VOID _tx_thread_schedule(VOID)
-@{
+/**************************************************************************/
+/* */
+/* FUNCTION RELEASE */
+/* */
+/* _tx_thread_schedule ARMv7-A */
+/* 6.1.11 */
+/* AUTHOR */
+/* */
+/* William E. Lamie, Microsoft Corporation */
+/* */
+/* DESCRIPTION */
+/* */
+/* This function waits for a thread control block pointer to appear in */
+/* the _tx_thread_execute_ptr variable. Once a thread pointer appears */
+/* in the variable, the corresponding thread is resumed. */
+/* */
+/* INPUT */
+/* */
+/* None */
+/* */
+/* OUTPUT */
+/* */
+/* None */
+/* */
+/* CALLS */
+/* */
+/* None */
+/* */
+/* CALLED BY */
+/* */
+/* _tx_initialize_kernel_enter ThreadX entry function */
+/* _tx_thread_system_return Return to system from thread */
+/* _tx_thread_context_restore Restore thread's context */
+/* */
+/* RELEASE HISTORY */
+/* */
+/* DATE NAME DESCRIPTION */
+/* */
+/* 09-30-2020 William E. Lamie Initial Version 6.1 */
+/* 10-15-2021 William E. Lamie Modified comment(s), added */
+/* execution profile support, */
+/* resulting in version 6.1.9 */
+/* 04-25-2022 Zhen Kong Updated comments, */
+/* resulting in version 6.1.11 */
+/* */
+/**************************************************************************/
.global _tx_thread_schedule
.type _tx_thread_schedule,function
_tx_thread_schedule:
-@
-@ /* Enable interrupts. */
-@
+
+ /* Enable interrupts. */
+
#ifdef TX_ENABLE_FIQ_SUPPORT
- CPSIE if @ Enable IRQ and FIQ interrupts
+ CPSIE if // Enable IRQ and FIQ interrupts
#else
- CPSIE i @ Enable IRQ interrupts
+ CPSIE i // Enable IRQ interrupts
#endif
-@
-@ /* Wait for a thread to execute. */
-@ do
-@ {
- LDR r1, =_tx_thread_execute_ptr @ Address of thread execute ptr
-@
+
+ /* Wait for a thread to execute. */
+ LDR r1, =_tx_thread_execute_ptr // Address of thread execute ptr
+
__tx_thread_schedule_loop:
-@
- LDR r0, [r1] @ Pickup next thread to execute
- CMP r0, #0 @ Is it NULL?
- BEQ __tx_thread_schedule_loop @ If so, keep looking for a thread
-@
-@ }
-@ while(_tx_thread_execute_ptr == TX_NULL);
-@
-@ /* Yes! We have a thread to execute. Lockout interrupts and
-@ transfer control to it. */
-@
+
+ LDR r0, [r1] // Pickup next thread to execute
+ CMP r0, #0 // Is it NULL?
+ BEQ __tx_thread_schedule_loop // If so, keep looking for a thread
+ /* Yes! We have a thread to execute. Lockout interrupts and
+ transfer control to it. */
+
#ifdef TX_ENABLE_FIQ_SUPPORT
- CPSID if @ Disable IRQ and FIQ interrupts
+ CPSID if // Disable IRQ and FIQ interrupts
#else
- CPSID i @ Disable IRQ interrupts
+ CPSID i // Disable IRQ interrupts
#endif
-@
-@ /* Setup the current thread pointer. */
-@ _tx_thread_current_ptr = _tx_thread_execute_ptr;
-@
- LDR r1, =_tx_thread_current_ptr @ Pickup address of current thread
- STR r0, [r1] @ Setup current thread pointer
-@
-@ /* Increment the run count for this thread. */
-@ _tx_thread_current_ptr -> tx_thread_run_count++;
-@
- LDR r2, [r0, #4] @ Pickup run counter
- LDR r3, [r0, #24] @ Pickup time-slice for this thread
- ADD r2, r2, #1 @ Increment thread run-counter
- STR r2, [r0, #4] @ Store the new run counter
-@
-@ /* Setup time-slice, if present. */
-@ _tx_timer_time_slice = _tx_thread_current_ptr -> tx_thread_time_slice;
-@
- LDR r2, =_tx_timer_time_slice @ Pickup address of time-slice
- @ variable
- LDR sp, [r0, #8] @ Switch stack pointers
- STR r3, [r2] @ Setup time-slice
-@
-@ /* Switch to the thread's stack. */
-@ sp = _tx_thread_execute_ptr -> tx_thread_stack_ptr;
-@
+
+ /* Setup the current thread pointer. */
+
+ LDR r1, =_tx_thread_current_ptr // Pickup address of current thread
+ STR r0, [r1] // Setup current thread pointer
+
+ /* Increment the run count for this thread. */
+
+ LDR r2, [r0, #4] // Pickup run counter
+ LDR r3, [r0, #24] // Pickup time-slice for this thread
+ ADD r2, r2, #1 // Increment thread run-counter
+ STR r2, [r0, #4] // Store the new run counter
+
+ /* Setup time-slice, if present. */
+
+ LDR r2, =_tx_timer_time_slice // Pickup address of time-slice
+ // variable
+ LDR sp, [r0, #8] // Switch stack pointers
+ STR r3, [r2] // Setup time-slice
+
#if (defined(TX_ENABLE_EXECUTION_CHANGE_NOTIFY) || defined(TX_EXECUTION_PROFILE_ENABLE))
-@
-@ /* Call the thread entry function to indicate the thread is executing. */
-@
- MOV r5, r0 @ Save r0
- BL _tx_execution_thread_enter @ Call the thread execution enter function
- MOV r0, r5 @ Restore r0
+
+ /* Call the thread entry function to indicate the thread is executing. */
+
+ MOV r5, r0 // Save r0
+ BL _tx_execution_thread_enter // Call the thread execution enter function
+ MOV r0, r5 // Restore r0
#endif
-@
-@ /* Determine if an interrupt frame or a synchronous task suspension frame
-@ is present. */
-@
- LDMIA sp!, {r4, r5} @ Pickup the stack type and saved CPSR
- CMP r4, #0 @ Check for synchronous context switch
+
+ /* Determine if an interrupt frame or a synchronous task suspension frame
+ is present. */
+
+ LDMIA sp!, {r4, r5} // Pickup the stack type and saved CPSR
+ CMP r4, #0 // Check for synchronous context switch
BEQ _tx_solicited_return
- MSR SPSR_cxsf, r5 @ Setup SPSR for return
+ MSR SPSR_cxsf, r5 // Setup SPSR for return
#ifdef TX_ENABLE_VFP_SUPPORT
- LDR r1, [r0, #144] @ Pickup the VFP enabled flag
- CMP r1, #0 @ Is the VFP enabled?
- BEQ _tx_skip_interrupt_vfp_restore @ No, skip VFP interrupt restore
- VLDMIA sp!, {D0-D15} @ Recover D0-D15
- VLDMIA sp!, {D16-D31} @ Recover D16-D31
- LDR r4, [sp], #4 @ Pickup FPSCR
- VMSR FPSCR, r4 @ Restore FPSCR
+ LDR r1, [r0, #144] // Pickup the VFP enabled flag
+ CMP r1, #0 // Is the VFP enabled?
+ BEQ _tx_skip_interrupt_vfp_restore // No, skip VFP interrupt restore
+ VLDMIA sp!, {D0-D15} // Recover D0-D15
+ VLDMIA sp!, {D16-D31} // Recover D16-D31
+ LDR r4, [sp], #4 // Pickup FPSCR
+ VMSR FPSCR, r4 // Restore FPSCR
_tx_skip_interrupt_vfp_restore:
#endif
- LDMIA sp!, {r0-r12, lr, pc}^ @ Return to point of thread interrupt
+ LDMIA sp!, {r0-r12, lr, pc}^ // Return to point of thread interrupt
_tx_solicited_return:
#ifdef TX_ENABLE_VFP_SUPPORT
- LDR r1, [r0, #144] @ Pickup the VFP enabled flag
- CMP r1, #0 @ Is the VFP enabled?
- BEQ _tx_skip_solicited_vfp_restore @ No, skip VFP solicited restore
- VLDMIA sp!, {D8-D15} @ Recover D8-D15
- VLDMIA sp!, {D16-D31} @ Recover D16-D31
- LDR r4, [sp], #4 @ Pickup FPSCR
- VMSR FPSCR, r4 @ Restore FPSCR
+ LDR r1, [r0, #144] // Pickup the VFP enabled flag
+ CMP r1, #0 // Is the VFP enabled?
+ BEQ _tx_skip_solicited_vfp_restore // No, skip VFP solicited restore
+ VLDMIA sp!, {D8-D15} // Recover D8-D15
+ VLDMIA sp!, {D16-D31} // Recover D16-D31
+ LDR r4, [sp], #4 // Pickup FPSCR
+ VMSR FPSCR, r4 // Restore FPSCR
_tx_skip_solicited_vfp_restore:
#endif
- MSR CPSR_cxsf, r5 @ Recover CPSR
- LDMIA sp!, {r4-r11, lr} @ Return to thread synchronously
+ MSR CPSR_cxsf, r5 // Recover CPSR
+ LDMIA sp!, {r4-r11, lr} // Return to thread synchronously
#ifdef __THUMB_INTERWORK
- BX lr @ Return to caller
+ BX lr // Return to caller
#else
- MOV pc, lr @ Return to caller
+ MOV pc, lr // Return to caller
#endif
-@
-@}
-@
#ifdef TX_ENABLE_VFP_SUPPORT
.global tx_thread_vfp_enable
.type tx_thread_vfp_enable,function
tx_thread_vfp_enable:
- MRS r2, CPSR @ Pickup the CPSR
+ MRS r2, CPSR // Pickup the CPSR
#ifdef TX_ENABLE_FIQ_SUPPORT
- CPSID if @ Enable IRQ and FIQ interrupts
+ CPSID if // Enable IRQ and FIQ interrupts
#else
- CPSID i @ Enable IRQ interrupts
+ CPSID i // Enable IRQ interrupts
#endif
- LDR r0, =_tx_thread_current_ptr @ Build current thread pointer address
- LDR r1, [r0] @ Pickup current thread pointer
- CMP r1, #0 @ Check for NULL thread pointer
- BEQ __tx_no_thread_to_enable @ If NULL, skip VFP enable
- MOV r0, #1 @ Build enable value
- STR r0, [r1, #144] @ Set the VFP enable flag (tx_thread_vfp_enable field in TX_THREAD)
+ LDR r0, =_tx_thread_current_ptr // Build current thread pointer address
+ LDR r1, [r0] // Pickup current thread pointer
+ CMP r1, #0 // Check for NULL thread pointer
+ BEQ __tx_no_thread_to_enable // If NULL, skip VFP enable
+ MOV r0, #1 // Build enable value
+ STR r0, [r1, #144] // Set the VFP enable flag (tx_thread_vfp_enable field in TX_THREAD)
__tx_no_thread_to_enable:
- MSR CPSR_cxsf, r2 @ Recover CPSR
- BX LR @ Return to caller
+ MSR CPSR_cxsf, r2 // Recover CPSR
+ BX LR // Return to caller
.global tx_thread_vfp_disable
.type tx_thread_vfp_disable,function
tx_thread_vfp_disable:
- MRS r2, CPSR @ Pickup the CPSR
+ MRS r2, CPSR // Pickup the CPSR
#ifdef TX_ENABLE_FIQ_SUPPORT
- CPSID if @ Enable IRQ and FIQ interrupts
+ CPSID if // Enable IRQ and FIQ interrupts
#else
- CPSID i @ Enable IRQ interrupts
+ CPSID i // Enable IRQ interrupts
#endif
- LDR r0, =_tx_thread_current_ptr @ Build current thread pointer address
- LDR r1, [r0] @ Pickup current thread pointer
- CMP r1, #0 @ Check for NULL thread pointer
- BEQ __tx_no_thread_to_disable @ If NULL, skip VFP disable
- MOV r0, #0 @ Build disable value
- STR r0, [r1, #144] @ Clear the VFP enable flag (tx_thread_vfp_enable field in TX_THREAD)
+ LDR r0, =_tx_thread_current_ptr // Build current thread pointer address
+ LDR r1, [r0] // Pickup current thread pointer
+ CMP r1, #0 // Check for NULL thread pointer
+ BEQ __tx_no_thread_to_disable // If NULL, skip VFP disable
+ MOV r0, #0 // Build disable value
+ STR r0, [r1, #144] // Clear the VFP enable flag (tx_thread_vfp_enable field in TX_THREAD)
__tx_no_thread_to_disable:
- MSR CPSR_cxsf, r2 @ Recover CPSR
- BX LR @ Return to caller
+ MSR CPSR_cxsf, r2 // Recover CPSR
+ BX LR // Return to caller
#endif
-
diff --git a/ports/cortex_a15/gnu/src/tx_thread_stack_build.S b/ports/cortex_a15/gnu/src/tx_thread_stack_build.S
index cad38249..f413e673 100644
--- a/ports/cortex_a15/gnu/src/tx_thread_stack_build.S
+++ b/ports/cortex_a15/gnu/src/tx_thread_stack_build.S
@@ -1,178 +1,164 @@
-@/**************************************************************************/
-@/* */
-@/* Copyright (c) Microsoft Corporation. All rights reserved. */
-@/* */
-@/* This software is licensed under the Microsoft Software License */
-@/* Terms for Microsoft Azure RTOS. Full text of the license can be */
-@/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
-@/* and in the root directory of this software. */
-@/* */
-@/**************************************************************************/
-@
-@
-@/**************************************************************************/
-@/**************************************************************************/
-@/** */
-@/** ThreadX Component */
-@/** */
-@/** Thread */
-@/** */
-@/**************************************************************************/
-@/**************************************************************************/
-@
-@
-@#define TX_SOURCE_CODE
-@
-@
-@/* Include necessary system files. */
-@
-@#include "tx_api.h"
-@#include "tx_thread.h"
-@
+/**************************************************************************/
+/* */
+/* Copyright (c) Microsoft Corporation. All rights reserved. */
+/* */
+/* This software is licensed under the Microsoft Software License */
+/* Terms for Microsoft Azure RTOS. Full text of the license can be */
+/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
+/* and in the root directory of this software. */
+/* */
+/**************************************************************************/
+
+
+/**************************************************************************/
+/**************************************************************************/
+/** */
+/** ThreadX Component */
+/** */
+/** Thread */
+/** */
+/**************************************************************************/
+/**************************************************************************/
.arm
-SVC_MODE = 0x13 @ SVC mode
+SVC_MODE = 0x13 // SVC mode
#ifdef TX_ENABLE_FIQ_SUPPORT
-CPSR_MASK = 0xDF @ Mask initial CPSR, IRQ & FIQ interrupts enabled
+CPSR_MASK = 0xDF // Mask initial CPSR, IRQ & FIQ interrupts enabled
#else
-CPSR_MASK = 0x9F @ Mask initial CPSR, IRQ interrupts enabled
+CPSR_MASK = 0x9F // Mask initial CPSR, IRQ interrupts enabled
#endif
-@
-@
-@/* Define the 16-bit Thumb mode veneer for _tx_thread_stack_build for
-@ applications calling this function from to 16-bit Thumb mode. */
-@
+
+
+/* Define the 16-bit Thumb mode veneer for _tx_thread_stack_build for
+ applications calling this function from to 16-bit Thumb mode. */
+
.text
.align 2
.thumb
.global $_tx_thread_stack_build
.type $_tx_thread_stack_build,function
$_tx_thread_stack_build:
- BX pc @ Switch to 32-bit mode
- NOP @
+ BX pc // Switch to 32-bit mode
+ NOP //
.arm
- STMFD sp!, {lr} @ Save return address
- BL _tx_thread_stack_build @ Call _tx_thread_stack_build function
- LDMFD sp!, {lr} @ Recover saved return address
- BX lr @ Return to 16-bit caller
-@
-@
+ STMFD sp!, {lr} // Save return address
+ BL _tx_thread_stack_build // Call _tx_thread_stack_build function
+ LDMFD sp!, {lr} // Recover saved return address
+ BX lr // Return to 16-bit caller
+
+
.text
.align 2
-@/**************************************************************************/
-@/* */
-@/* FUNCTION RELEASE */
-@/* */
-@/* _tx_thread_stack_build Cortex-A15/GNU */
-@/* 6.1 */
-@/* AUTHOR */
-@/* */
-@/* William E. Lamie, Microsoft Corporation */
-@/* */
-@/* DESCRIPTION */
-@/* */
-@/* This function builds a stack frame on the supplied thread's stack. */
-@/* The stack frame results in a fake interrupt return to the supplied */
-@/* function pointer. */
-@/* */
-@/* INPUT */
-@/* */
-@/* thread_ptr Pointer to thread control blk */
-@/* function_ptr Pointer to return function */
-@/* */
-@/* OUTPUT */
-@/* */
-@/* None */
-@/* */
-@/* CALLS */
-@/* */
-@/* None */
-@/* */
-@/* CALLED BY */
-@/* */
-@/* _tx_thread_create Create thread service */
-@/* */
-@/* RELEASE HISTORY */
-@/* */
-@/* DATE NAME DESCRIPTION */
-@/* */
-@/* 09-30-2020 William E. Lamie Initial Version 6.1 */
-@/* */
-@/**************************************************************************/
-@VOID _tx_thread_stack_build(TX_THREAD *thread_ptr, VOID (*function_ptr)(VOID))
-@{
+/**************************************************************************/
+/* */
+/* FUNCTION RELEASE */
+/* */
+/* _tx_thread_stack_build ARMv7-A */
+/* 6.1.11 */
+/* AUTHOR */
+/* */
+/* William E. Lamie, Microsoft Corporation */
+/* */
+/* DESCRIPTION */
+/* */
+/* This function builds a stack frame on the supplied thread's stack. */
+/* The stack frame results in a fake interrupt return to the supplied */
+/* function pointer. */
+/* */
+/* INPUT */
+/* */
+/* thread_ptr Pointer to thread control blk */
+/* function_ptr Pointer to return function */
+/* */
+/* OUTPUT */
+/* */
+/* None */
+/* */
+/* CALLS */
+/* */
+/* None */
+/* */
+/* CALLED BY */
+/* */
+/* _tx_thread_create Create thread service */
+/* */
+/* RELEASE HISTORY */
+/* */
+/* DATE NAME DESCRIPTION */
+/* */
+/* 09-30-2020 William E. Lamie Initial Version 6.1 */
+/* 04-25-2022 Zhen Kong Updated comments, */
+/* resulting in version 6.1.11 */
+/* */
+/**************************************************************************/
.global _tx_thread_stack_build
.type _tx_thread_stack_build,function
_tx_thread_stack_build:
-@
-@
-@ /* Build a fake interrupt frame. The form of the fake interrupt stack
-@ on the Cortex-A15 should look like the following after it is built:
-@
-@ Stack Top: 1 Interrupt stack frame type
-@ CPSR Initial value for CPSR
-@ a1 (r0) Initial value for a1
-@ a2 (r1) Initial value for a2
-@ a3 (r2) Initial value for a3
-@ a4 (r3) Initial value for a4
-@ v1 (r4) Initial value for v1
-@ v2 (r5) Initial value for v2
-@ v3 (r6) Initial value for v3
-@ v4 (r7) Initial value for v4
-@ v5 (r8) Initial value for v5
-@ sb (r9) Initial value for sb
-@ sl (r10) Initial value for sl
-@ fp (r11) Initial value for fp
-@ ip (r12) Initial value for ip
-@ lr (r14) Initial value for lr
-@ pc (r15) Initial value for pc
-@ 0 For stack backtracing
-@
-@ Stack Bottom: (higher memory address) */
-@
- LDR r2, [r0, #16] @ Pickup end of stack area
- BIC r2, r2, #7 @ Ensure 8-byte alignment
- SUB r2, r2, #76 @ Allocate space for the stack frame
-@
-@ /* Actually build the stack frame. */
-@
- MOV r3, #1 @ Build interrupt stack type
- STR r3, [r2, #0] @ Store stack type
- MOV r3, #0 @ Build initial register value
- STR r3, [r2, #8] @ Store initial r0
- STR r3, [r2, #12] @ Store initial r1
- STR r3, [r2, #16] @ Store initial r2
- STR r3, [r2, #20] @ Store initial r3
- STR r3, [r2, #24] @ Store initial r4
- STR r3, [r2, #28] @ Store initial r5
- STR r3, [r2, #32] @ Store initial r6
- STR r3, [r2, #36] @ Store initial r7
- STR r3, [r2, #40] @ Store initial r8
- STR r3, [r2, #44] @ Store initial r9
- LDR r3, [r0, #12] @ Pickup stack starting address
- STR r3, [r2, #48] @ Store initial r10 (sl)
- LDR r3,=_tx_thread_schedule @ Pickup address of _tx_thread_schedule for GDB backtrace
- STR r3, [r2, #60] @ Store initial r14 (lr)
- MOV r3, #0 @ Build initial register value
- STR r3, [r2, #52] @ Store initial r11
- STR r3, [r2, #56] @ Store initial r12
- STR r1, [r2, #64] @ Store initial pc
- STR r3, [r2, #68] @ 0 for back-trace
- MRS r1, CPSR @ Pickup CPSR
- BIC r1, r1, #CPSR_MASK @ Mask mode bits of CPSR
- ORR r3, r1, #SVC_MODE @ Build CPSR, SVC mode, interrupts enabled
- STR r3, [r2, #4] @ Store initial CPSR
-@
-@ /* Setup stack pointer. */
-@ thread_ptr -> tx_thread_stack_ptr = r2;
-@
- STR r2, [r0, #8] @ Save stack pointer in thread's
- @ control block
+
+
+ /* Build a fake interrupt frame. The form of the fake interrupt stack
+ on the ARMv7-A should look like the following after it is built:
+
+ Stack Top: 1 Interrupt stack frame type
+ CPSR Initial value for CPSR
+ a1 (r0) Initial value for a1
+ a2 (r1) Initial value for a2
+ a3 (r2) Initial value for a3
+ a4 (r3) Initial value for a4
+ v1 (r4) Initial value for v1
+ v2 (r5) Initial value for v2
+ v3 (r6) Initial value for v3
+ v4 (r7) Initial value for v4
+ v5 (r8) Initial value for v5
+ sb (r9) Initial value for sb
+ sl (r10) Initial value for sl
+ fp (r11) Initial value for fp
+ ip (r12) Initial value for ip
+ lr (r14) Initial value for lr
+ pc (r15) Initial value for
+ 0 For stack backtracing
+
+ Stack Bottom: (higher memory address) */
+
+ LDR r2, [r0, #16] // Pickup end of stack area
+ BIC r2, r2, #7 // Ensure 8-byte alignment
+ SUB r2, r2, #76 // Allocate space for the stack frame
+
+ /* Actually build the stack frame. */
+
+ MOV r3, #1 // Build interrupt stack type
+ STR r3, [r2, #0] // Store stack type
+ MOV r3, #0 // Build initial register value
+ STR r3, [r2, #8] // Store initial r0
+ STR r3, [r2, #12] // Store initial r1
+ STR r3, [r2, #16] // Store initial r2
+ STR r3, [r2, #20] // Store initial r3
+ STR r3, [r2, #24] // Store initial r4
+ STR r3, [r2, #28] // Store initial r5
+ STR r3, [r2, #32] // Store initial r6
+ STR r3, [r2, #36] // Store initial r7
+ STR r3, [r2, #40] // Store initial r8
+ STR r3, [r2, #44] // Store initial r9
+ LDR r3, [r0, #12] // Pickup stack starting address
+ STR r3, [r2, #48] // Store initial r10 (sl)
+ LDR r3,=_tx_thread_schedule // Pickup address of _tx_thread_schedule for GDB backtrace
+ STR r3, [r2, #60] // Store initial r14 (lr)
+ MOV r3, #0 // Build initial register value
+ STR r3, [r2, #52] // Store initial r11
+ STR r3, [r2, #56] // Store initial r12
+ STR r1, [r2, #64] // Store initial pc
+ STR r3, [r2, #68] // 0 for back-trace
+ MRS r1, CPSR // Pickup CPSR
+ BIC r1, r1, #CPSR_MASK // Mask mode bits of CPSR
+ ORR r3, r1, #SVC_MODE // Build CPSR, SVC mode, interrupts enabled
+ STR r3, [r2, #4] // Store initial CPSR
+
+ /* Setup stack pointer. */
+
+ STR r2, [r0, #8] // Save stack pointer in thread's
+ // control block
#ifdef __THUMB_INTERWORK
- BX lr @ Return to caller
+ BX lr // Return to caller
#else
- MOV pc, lr @ Return to caller
+ MOV pc, lr // Return to caller
#endif
-@}
-
-
diff --git a/ports/cortex_a15/gnu/src/tx_thread_system_return.S b/ports/cortex_a15/gnu/src/tx_thread_system_return.S
index 7161edca..cb7d62ce 100644
--- a/ports/cortex_a15/gnu/src/tx_thread_system_return.S
+++ b/ports/cortex_a15/gnu/src/tx_thread_system_return.S
@@ -1,183 +1,162 @@
-@/**************************************************************************/
-@/* */
-@/* Copyright (c) Microsoft Corporation. All rights reserved. */
-@/* */
-@/* This software is licensed under the Microsoft Software License */
-@/* Terms for Microsoft Azure RTOS. Full text of the license can be */
-@/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
-@/* and in the root directory of this software. */
-@/* */
-@/**************************************************************************/
-@
-@
-@/**************************************************************************/
-@/**************************************************************************/
-@/** */
-@/** ThreadX Component */
-@/** */
-@/** Thread */
-@/** */
-@/**************************************************************************/
-@/**************************************************************************/
-@
-@#define TX_SOURCE_CODE
-@
-@
-@/* Include necessary system files. */
-@
-@#include "tx_api.h"
-@#include "tx_thread.h"
-@#include "tx_timer.h"
-@
+/**************************************************************************/
+/* */
+/* Copyright (c) Microsoft Corporation. All rights reserved. */
+/* */
+/* This software is licensed under the Microsoft Software License */
+/* Terms for Microsoft Azure RTOS. Full text of the license can be */
+/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
+/* and in the root directory of this software. */
+/* */
+/**************************************************************************/
+
+
+/**************************************************************************/
+/**************************************************************************/
+/** */
+/** ThreadX Component */
+/** */
+/** Thread */
+/** */
+/**************************************************************************/
+/**************************************************************************/
+
.arm
-@
-@
+
+
.global _tx_thread_current_ptr
.global _tx_timer_time_slice
.global _tx_thread_schedule
- .global _tx_execution_thread_exit
-@
-@
-@
-@/* Define the 16-bit Thumb mode veneer for _tx_thread_system_return for
-@ applications calling this function from to 16-bit Thumb mode. */
-@
+
+
+
+/* Define the 16-bit Thumb mode veneer for _tx_thread_system_return for
+ applications calling this function from to 16-bit Thumb mode. */
+
.text
.align 2
.global $_tx_thread_system_return
.type $_tx_thread_system_return,function
$_tx_thread_system_return:
.thumb
- BX pc @ Switch to 32-bit mode
- NOP @
+ BX pc // Switch to 32-bit mode
+ NOP //
.arm
- STMFD sp!, {lr} @ Save return address
- BL _tx_thread_system_return @ Call _tx_thread_system_return function
- LDMFD sp!, {lr} @ Recover saved return address
- BX lr @ Return to 16-bit caller
-@
-@
+ STMFD sp!, {lr} // Save return address
+ BL _tx_thread_system_return // Call _tx_thread_system_return function
+ LDMFD sp!, {lr} // Recover saved return address
+ BX lr // Return to 16-bit caller
+
+
.text
.align 2
-@/**************************************************************************/
-@/* */
-@/* FUNCTION RELEASE */
-@/* */
-@/* _tx_thread_system_return Cortex-A15/GNU */
-@/* 6.1.9 */
-@/* AUTHOR */
-@/* */
-@/* William E. Lamie, Microsoft Corporation */
-@/* */
-@/* DESCRIPTION */
-@/* */
-@/* This function is target processor specific. It is used to transfer */
-@/* control from a thread back to the ThreadX system. Only a */
-@/* minimal context is saved since the compiler assumes temp registers */
-@/* are going to get slicked by a function call anyway. */
-@/* */
-@/* INPUT */
-@/* */
-@/* None */
-@/* */
-@/* OUTPUT */
-@/* */
-@/* None */
-@/* */
-@/* CALLS */
-@/* */
-@/* _tx_thread_schedule Thread scheduling loop */
-@/* */
-@/* CALLED BY */
-@/* */
-@/* ThreadX components */
-@/* */
-@/* RELEASE HISTORY */
-@/* */
-@/* DATE NAME DESCRIPTION */
-@/* */
-@/* 09-30-2020 William E. Lamie Initial Version 6.1 */
-@/* 10-15-2021 William E. Lamie Modified comment(s), added */
-@/* execution profile support, */
-@/* resulting in version 6.1.9 */
-@/* */
-@/**************************************************************************/
-@VOID _tx_thread_system_return(VOID)
-@{
+/**************************************************************************/
+/* */
+/* FUNCTION RELEASE */
+/* */
+/* _tx_thread_system_return ARMv7-A */
+/* 6.1.11 */
+/* AUTHOR */
+/* */
+/* William E. Lamie, Microsoft Corporation */
+/* */
+/* DESCRIPTION */
+/* */
+/* This function is target processor specific. It is used to transfer */
+/* control from a thread back to the ThreadX system. Only a */
+/* minimal context is saved since the compiler assumes temp registers */
+/* are going to get slicked by a function call anyway. */
+/* */
+/* INPUT */
+/* */
+/* None */
+/* */
+/* OUTPUT */
+/* */
+/* None */
+/* */
+/* CALLS */
+/* */
+/* _tx_thread_schedule Thread scheduling loop */
+/* */
+/* CALLED BY */
+/* */
+/* ThreadX components */
+/* */
+/* RELEASE HISTORY */
+/* */
+/* DATE NAME DESCRIPTION */
+/* */
+/* 09-30-2020 William E. Lamie Initial Version 6.1 */
+/* 10-15-2021 William E. Lamie Modified comment(s), added */
+/* execution profile support, */
+/* resulting in version 6.1.9 */
+/* 04-25-2022 Zhen Kong Updated comments, */
+/* resulting in version 6.1.11 */
+/* */
+/**************************************************************************/
.global _tx_thread_system_return
.type _tx_thread_system_return,function
_tx_thread_system_return:
-@
-@ /* Save minimal context on the stack. */
-@
- STMDB sp!, {r4-r11, lr} @ Save minimal context
- LDR r4, =_tx_thread_current_ptr @ Pickup address of current ptr
- LDR r5, [r4] @ Pickup current thread pointer
-
+ /* Save minimal context on the stack. */
+
+ STMDB sp!, {r4-r11, lr} // Save minimal context
+
+ LDR r4, =_tx_thread_current_ptr // Pickup address of current ptr
+ LDR r5, [r4] // Pickup current thread pointer
+
#ifdef TX_ENABLE_VFP_SUPPORT
- LDR r1, [r5, #144] @ Pickup the VFP enabled flag
- CMP r1, #0 @ Is the VFP enabled?
- BEQ _tx_skip_solicited_vfp_save @ No, skip VFP solicited save
- VMRS r1, FPSCR @ Pickup the FPSCR
- STR r1, [sp, #-4]! @ Save FPSCR
- VSTMDB sp!, {D16-D31} @ Save D16-D31
- VSTMDB sp!, {D8-D15} @ Save D8-D15
+ LDR r1, [r5, #144] // Pickup the VFP enabled flag
+ CMP r1, #0 // Is the VFP enabled?
+ BEQ _tx_skip_solicited_vfp_save // No, skip VFP solicited save
+ VMRS r1, FPSCR // Pickup the FPSCR
+ STR r1, [sp, #-4]! // Save FPSCR
+ VSTMDB sp!, {D16-D31} // Save D16-D31
+ VSTMDB sp!, {D8-D15} // Save D8-D15
_tx_skip_solicited_vfp_save:
#endif
- MOV r0, #0 @ Build a solicited stack type
- MRS r1, CPSR @ Pickup the CPSR
- STMDB sp!, {r0-r1} @ Save type and CPSR
-@
-@ /* Lockout interrupts. */
-@
-#ifdef TX_ENABLE_FIQ_SUPPORT
- CPSID if @ Disable IRQ and FIQ interrupts
-#else
- CPSID i @ Disable IRQ interrupts
-#endif
-
-#if (defined(TX_ENABLE_EXECUTION_CHANGE_NOTIFY) || defined(TX_EXECUTION_PROFILE_ENABLE))
-@
-@ /* Call the thread exit function to indicate the thread is no longer executing. */
-@
- BL _tx_execution_thread_exit @ Call the thread exit function
-#endif
- MOV r3, r4 @ Pickup address of current ptr
- MOV r0, r5 @ Pickup current thread pointer
- LDR r2, =_tx_timer_time_slice @ Pickup address of time slice
- LDR r1, [r2] @ Pickup current time slice
-@
-@ /* Save current stack and switch to system stack. */
-@ _tx_thread_current_ptr -> tx_thread_stack_ptr = sp;
-@ sp = _tx_thread_system_stack_ptr;
-@
- STR sp, [r0, #8] @ Save thread stack pointer
-@
-@ /* Determine if the time-slice is active. */
-@ if (_tx_timer_time_slice)
-@ {
-@
- MOV r4, #0 @ Build clear value
- CMP r1, #0 @ Is a time-slice active?
- BEQ __tx_thread_dont_save_ts @ No, don't save the time-slice
-@
-@ /* Save time-slice for the thread and clear the current time-slice. */
-@ _tx_thread_current_ptr -> tx_thread_time_slice = _tx_timer_time_slice;
-@ _tx_timer_time_slice = 0;
-@
- STR r4, [r2] @ Clear time-slice
- STR r1, [r0, #24] @ Save current time-slice
-@
-@ }
-__tx_thread_dont_save_ts:
-@
-@ /* Clear the current thread pointer. */
-@ _tx_thread_current_ptr = TX_NULL;
-@
- STR r4, [r3] @ Clear current thread pointer
- B _tx_thread_schedule @ Jump to scheduler!
-@
-@}
+ MOV r0, #0 // Build a solicited stack type
+ MRS r1, CPSR // Pickup the CPSR
+ STMDB sp!, {r0-r1} // Save type and CPSR
+ /* Lockout interrupts. */
+
+#ifdef TX_ENABLE_FIQ_SUPPORT
+ CPSID if // Disable IRQ and FIQ interrupts
+#else
+ CPSID i // Disable IRQ interrupts
+#endif
+
+#if (defined(TX_ENABLE_EXECUTION_CHANGE_NOTIFY) || defined(TX_EXECUTION_PROFILE_ENABLE))
+
+ /* Call the thread exit function to indicate the thread is no longer executing. */
+
+ BL _tx_execution_thread_exit // Call the thread exit function
+#endif
+ MOV r3, r4 // Pickup address of current ptr
+ MOV r0, r5 // Pickup current thread pointer
+ LDR r2, =_tx_timer_time_slice // Pickup address of time slice
+ LDR r1, [r2] // Pickup current time slice
+
+ /* Save current stack and switch to system stack. */
+
+ STR sp, [r0, #8] // Save thread stack pointer
+
+ /* Determine if the time-slice is active. */
+
+ MOV r4, #0 // Build clear value
+ CMP r1, #0 // Is a time-slice active?
+ BEQ __tx_thread_dont_save_ts // No, don't save the time-slice
+
+ /* Save time-slice for the thread and clear the current time-slice. */
+
+ STR r4, [r2] // Clear time-slice
+ STR r1, [r0, #24] // Save current time-slice
+
+__tx_thread_dont_save_ts:
+
+ /* Clear the current thread pointer. */
+
+ STR r4, [r3] // Clear current thread pointer
+ B _tx_thread_schedule // Jump to scheduler!
diff --git a/ports/cortex_a15/gnu/src/tx_thread_vectored_context_save.S b/ports/cortex_a15/gnu/src/tx_thread_vectored_context_save.S
index 4d4b1ce8..d846223f 100644
--- a/ports/cortex_a15/gnu/src/tx_thread_vectored_context_save.S
+++ b/ports/cortex_a15/gnu/src/tx_thread_vectored_context_save.S
@@ -1,193 +1,165 @@
-@/**************************************************************************/
-@/* */
-@/* Copyright (c) Microsoft Corporation. All rights reserved. */
-@/* */
-@/* This software is licensed under the Microsoft Software License */
-@/* Terms for Microsoft Azure RTOS. Full text of the license can be */
-@/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
-@/* and in the root directory of this software. */
-@/* */
-@/**************************************************************************/
-@
-@
-@/**************************************************************************/
-@/**************************************************************************/
-@/** */
-@/** ThreadX Component */
-@/** */
-@/** Thread */
-@/** */
-@/**************************************************************************/
-@/**************************************************************************/
-@
-@
-@#define TX_SOURCE_CODE
-@
-@
-@/* Include necessary system files. */
-@
-@#include "tx_api.h"
-@#include "tx_thread.h"
-@
-@
+/**************************************************************************/
+/* */
+/* Copyright (c) Microsoft Corporation. All rights reserved. */
+/* */
+/* This software is licensed under the Microsoft Software License */
+/* Terms for Microsoft Azure RTOS. Full text of the license can be */
+/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
+/* and in the root directory of this software. */
+/* */
+/**************************************************************************/
+
+
+/**************************************************************************/
+/**************************************************************************/
+/** */
+/** ThreadX Component */
+/** */
+/** Thread */
+/** */
+/**************************************************************************/
+/**************************************************************************/
+
.global _tx_thread_system_state
.global _tx_thread_current_ptr
.global _tx_execution_isr_enter
-@
-@
-@
-@/* No 16-bit Thumb mode veneer code is needed for _tx_thread_vectored_context_save
-@ since it will never be called 16-bit mode. */
-@
+
+
+
+/* No 16-bit Thumb mode veneer code is needed for _tx_thread_vectored_context_save
+ since it will never be called 16-bit mode. */
+
.arm
.text
.align 2
-@/**************************************************************************/
-@/* */
-@/* FUNCTION RELEASE */
-@/* */
-@/* _tx_thread_vectored_context_save Cortex-A15/GNU */
-@/* 6.1.9 */
-@/* AUTHOR */
-@/* */
-@/* William E. Lamie, Microsoft Corporation */
-@/* */
-@/* DESCRIPTION */
-@/* */
-@/* This function saves the context of an executing thread in the */
-@/* beginning of interrupt processing. The function also ensures that */
-@/* the system stack is used upon return to the calling ISR. */
-@/* */
-@/* INPUT */
-@/* */
-@/* None */
-@/* */
-@/* OUTPUT */
-@/* */
-@/* None */
-@/* */
-@/* CALLS */
-@/* */
-@/* None */
-@/* */
-@/* CALLED BY */
-@/* */
-@/* ISRs */
-@/* */
-@/* RELEASE HISTORY */
-@/* */
-@/* DATE NAME DESCRIPTION */
-@/* */
-@/* 09-30-2020 William E. Lamie Initial Version 6.1 */
-@/* 10-15-2021 William E. Lamie Modified comment(s), added */
-@/* execution profile support, */
-@/* resulting in version 6.1.9 */
-@/* */
-@/**************************************************************************/
-@VOID _tx_thread_vectored_context_save(VOID)
-@{
+/**************************************************************************/
+/* */
+/* FUNCTION RELEASE */
+/* */
+/* _tx_thread_vectored_context_save ARMv7-A */
+/* 6.1.11 */
+/* AUTHOR */
+/* */
+/* William E. Lamie, Microsoft Corporation */
+/* */
+/* DESCRIPTION */
+/* */
+/* This function saves the context of an executing thread in the */
+/* beginning of interrupt processing. The function also ensures that */
+/* the system stack is used upon return to the calling ISR. */
+/* */
+/* INPUT */
+/* */
+/* None */
+/* */
+/* OUTPUT */
+/* */
+/* None */
+/* */
+/* CALLS */
+/* */
+/* None */
+/* */
+/* CALLED BY */
+/* */
+/* ISRs */
+/* */
+/* RELEASE HISTORY */
+/* */
+/* DATE NAME DESCRIPTION */
+/* */
+/* 09-30-2020 William E. Lamie Initial Version 6.1 */
+/* 10-15-2021 William E. Lamie Modified comment(s), added */
+/* execution profile support, */
+/* resulting in version 6.1.9 */
+/* 04-25-2022 Zhen Kong Updated comments, */
+/* resulting in version 6.1.11 */
+/* */
+/**************************************************************************/
.global _tx_thread_vectored_context_save
.type _tx_thread_vectored_context_save,function
_tx_thread_vectored_context_save:
-@
-@ /* Upon entry to this routine, it is assumed that IRQ interrupts are locked
-@ out, we are in IRQ mode, and all registers are intact. */
-@
-@ /* Check for a nested interrupt condition. */
-@ if (_tx_thread_system_state++)
-@ {
-@
+
+ /* Upon entry to this routine, it is assumed that IRQ interrupts are locked
+ out, we are in IRQ mode, and all registers are intact. */
+
+ /* Check for a nested interrupt condition. */
+
#ifdef TX_ENABLE_FIQ_SUPPORT
- CPSID if @ Disable IRQ and FIQ interrupts
+ CPSID if // Disable IRQ and FIQ interrupts
#endif
- LDR r3, =_tx_thread_system_state @ Pickup address of system state variable
- LDR r2, [r3, #0] @ Pickup system state
- CMP r2, #0 @ Is this the first interrupt?
- BEQ __tx_thread_not_nested_save @ Yes, not a nested context save
-@
-@ /* Nested interrupt condition. */
-@
- ADD r2, r2, #1 @ Increment the interrupt counter
- STR r2, [r3, #0] @ Store it back in the variable
-@
-@ /* Note: Minimal context of interrupted thread is already saved. */
-@
-@ /* Return to the ISR. */
-@
- MOV r10, #0 @ Clear stack limit
+ LDR r3, =_tx_thread_system_state // Pickup address of system state variable
+ LDR r2, [r3, #0] // Pickup system state
+ CMP r2, #0 // Is this the first interrupt?
+ BEQ __tx_thread_not_nested_save // Yes, not a nested context save
+
+ /* Nested interrupt condition. */
+
+ ADD r2, r2, #1 // Increment the interrupt counter
+ STR r2, [r3, #0] // Store it back in the variable
+
+ /* Note: Minimal context of interrupted thread is already saved. */
+
+ /* Return to the ISR. */
+
+ MOV r10, #0 // Clear stack limit
#if (defined(TX_ENABLE_EXECUTION_CHANGE_NOTIFY) || defined(TX_EXECUTION_PROFILE_ENABLE))
-@
-@ /* Call the ISR enter function to indicate an ISR is executing. */
-@
- PUSH {lr} @ Save ISR lr
- BL _tx_execution_isr_enter @ Call the ISR enter function
- POP {lr} @ Recover ISR lr
+
+ /* Call the ISR enter function to indicate an ISR is executing. */
+
+ PUSH {lr} // Save ISR lr
+ BL _tx_execution_isr_enter // Call the ISR enter function
+ POP {lr} // Recover ISR lr
#endif
- MOV pc, lr @ Return to caller
-@
+ MOV pc, lr // Return to caller
+
__tx_thread_not_nested_save:
-@ }
-@
-@ /* Otherwise, not nested, check to see if a thread was running. */
-@ else if (_tx_thread_current_ptr)
-@ {
-@
- ADD r2, r2, #1 @ Increment the interrupt counter
- STR r2, [r3, #0] @ Store it back in the variable
- LDR r1, =_tx_thread_current_ptr @ Pickup address of current thread ptr
- LDR r0, [r1, #0] @ Pickup current thread pointer
- CMP r0, #0 @ Is it NULL?
- BEQ __tx_thread_idle_system_save @ If so, interrupt occurred in
- @ scheduling loop - nothing needs saving!
-@
-@ /* Note: Minimal context of interrupted thread is already saved. */
-@
-@ /* Save the current stack pointer in the thread's control block. */
-@ _tx_thread_current_ptr -> tx_thread_stack_ptr = sp;
-@
-@ /* Switch to the system stack. */
-@ sp = _tx_thread_system_stack_ptr;
-@
- MOV r10, #0 @ Clear stack limit
+
+ /* Otherwise, not nested, check to see if a thread was running. */
+
+ ADD r2, r2, #1 // Increment the interrupt counter
+ STR r2, [r3, #0] // Store it back in the variable
+ LDR r1, =_tx_thread_current_ptr // Pickup address of current thread ptr
+ LDR r0, [r1, #0] // Pickup current thread pointer
+ CMP r0, #0 // Is it NULL?
+ BEQ __tx_thread_idle_system_save // If so, interrupt occurred in
+ // scheduling loop - nothing needs saving!
+ /* Note: Minimal context of interrupted thread is already saved. */
+
+ /* Save the current stack pointer in the thread's control block. */
+
+ MOV r10, #0 // Clear stack limit
#if (defined(TX_ENABLE_EXECUTION_CHANGE_NOTIFY) || defined(TX_EXECUTION_PROFILE_ENABLE))
-@
-@ /* Call the ISR enter function to indicate an ISR is executing. */
-@
- PUSH {lr} @ Save ISR lr
- BL _tx_execution_isr_enter @ Call the ISR enter function
- POP {lr} @ Recover ISR lr
+
+ /* Call the ISR enter function to indicate an ISR is executing. */
+
+ PUSH {lr} // Save ISR lr
+ BL _tx_execution_isr_enter // Call the ISR enter function
+ POP {lr} // Recover ISR lr
#endif
- MOV pc, lr @ Return to caller
-@
-@ }
-@ else
-@ {
-@
+ MOV pc, lr // Return to caller
+
__tx_thread_idle_system_save:
-@
-@ /* Interrupt occurred in the scheduling loop. */
-@
-@ /* Not much to do here, just adjust the stack pointer, and return to IRQ
-@ processing. */
-@
- MOV r10, #0 @ Clear stack limit
+
+ /* Interrupt occurred in the scheduling loop. */
+
+ /* Not much to do here, just adjust the stack pointer, and return to IRQ
+ processing. */
+
+ MOV r10, #0 // Clear stack limit
#if (defined(TX_ENABLE_EXECUTION_CHANGE_NOTIFY) || defined(TX_EXECUTION_PROFILE_ENABLE))
-@
-@ /* Call the ISR enter function to indicate an ISR is executing. */
-@
- PUSH {lr} @ Save ISR lr
- BL _tx_execution_isr_enter @ Call the ISR enter function
- POP {lr} @ Recover ISR lr
+
+ /* Call the ISR enter function to indicate an ISR is executing. */
+
+ PUSH {lr} // Save ISR lr
+ BL _tx_execution_isr_enter // Call the ISR enter function
+ POP {lr} // Recover ISR lr
#endif
- ADD sp, sp, #32 @ Recover saved registers
- MOV pc, lr @ Return to caller
-@
-@ }
-@}
-
+ ADD sp, sp, #32 // Recover saved registers
+ MOV pc, lr // Return to caller
diff --git a/ports/cortex_a15/gnu/src/tx_timer_interrupt.S b/ports/cortex_a15/gnu/src/tx_timer_interrupt.S
index 2b2fc012..7337ed0c 100644
--- a/ports/cortex_a15/gnu/src/tx_timer_interrupt.S
+++ b/ports/cortex_a15/gnu/src/tx_timer_interrupt.S
@@ -1,40 +1,30 @@
-@/**************************************************************************/
-@/* */
-@/* Copyright (c) Microsoft Corporation. All rights reserved. */
-@/* */
-@/* This software is licensed under the Microsoft Software License */
-@/* Terms for Microsoft Azure RTOS. Full text of the license can be */
-@/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
-@/* and in the root directory of this software. */
-@/* */
-@/**************************************************************************/
-@
-@
-@/**************************************************************************/
-@/**************************************************************************/
-@/** */
-@/** ThreadX Component */
-@/** */
-@/** Timer */
-@/** */
-@/**************************************************************************/
-@/**************************************************************************/
-@
-@#define TX_SOURCE_CODE
-@
-@
-@/* Include necessary system files. */
-@
-@#include "tx_api.h"
-@#include "tx_timer.h"
-@#include "tx_thread.h"
-@
-@
+/**************************************************************************/
+/* */
+/* Copyright (c) Microsoft Corporation. All rights reserved. */
+/* */
+/* This software is licensed under the Microsoft Software License */
+/* Terms for Microsoft Azure RTOS. Full text of the license can be */
+/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
+/* and in the root directory of this software. */
+/* */
+/**************************************************************************/
+
+
+/**************************************************************************/
+/**************************************************************************/
+/** */
+/** ThreadX Component */
+/** */
+/** Timer */
+/** */
+/**************************************************************************/
+/**************************************************************************/
+
.arm
-@
-@/* Define Assembly language external references... */
-@
+
+/* Define Assembly language external references... */
+
.global _tx_timer_time_slice
.global _tx_timer_system_clock
.global _tx_timer_current_ptr
@@ -43,237 +33,199 @@
.global _tx_timer_expired_time_slice
.global _tx_timer_expired
.global _tx_thread_time_slice
-@
-@
-@
-@/* Define the 16-bit Thumb mode veneer for _tx_timer_interrupt for
-@ applications calling this function from to 16-bit Thumb mode. */
-@
+
+
+
+/* Define the 16-bit Thumb mode veneer for _tx_timer_interrupt for
+ applications calling this function from to 16-bit Thumb mode. */
+
.text
.align 2
.thumb
.global $_tx_timer_interrupt
.type $_tx_timer_interrupt,function
$_tx_timer_interrupt:
- BX pc @ Switch to 32-bit mode
- NOP @
+ BX pc // Switch to 32-bit mode
+ NOP //
.arm
- STMFD sp!, {lr} @ Save return address
- BL _tx_timer_interrupt @ Call _tx_timer_interrupt function
- LDMFD sp!, {lr} @ Recover saved return address
- BX lr @ Return to 16-bit caller
-@
-@
+ STMFD sp!, {lr} // Save return address
+ BL _tx_timer_interrupt // Call _tx_timer_interrupt function
+ LDMFD sp!, {lr} // Recover saved return address
+ BX lr // Return to 16-bit caller
+
+
.text
.align 2
-@/**************************************************************************/
-@/* */
-@/* FUNCTION RELEASE */
-@/* */
-@/* _tx_timer_interrupt Cortex-A15/GNU */
-@/* 6.1 */
-@/* AUTHOR */
-@/* */
-@/* William E. Lamie, Microsoft Corporation */
-@/* */
-@/* DESCRIPTION */
-@/* */
-@/* This function processes the hardware timer interrupt. This */
-@/* processing includes incrementing the system clock and checking for */
-@/* time slice and/or timer expiration. If either is found, the */
-@/* interrupt context save/restore functions are called along with the */
-@/* expiration functions. */
-@/* */
-@/* INPUT */
-@/* */
-@/* None */
-@/* */
-@/* OUTPUT */
-@/* */
-@/* None */
-@/* */
-@/* CALLS */
-@/* */
-@/* _tx_thread_time_slice Time slice interrupted thread */
-@/* _tx_timer_expiration_process Timer expiration processing */
-@/* */
-@/* CALLED BY */
-@/* */
-@/* interrupt vector */
-@/* */
-@/* RELEASE HISTORY */
-@/* */
-@/* DATE NAME DESCRIPTION */
-@/* */
-@/* 09-30-2020 William E. Lamie Initial Version 6.1 */
-@/* */
-@/**************************************************************************/
-@VOID _tx_timer_interrupt(VOID)
-@{
+/**************************************************************************/
+/* */
+/* FUNCTION RELEASE */
+/* */
+/* _tx_timer_interrupt ARMv7-A */
+/* 6.1.11 */
+/* AUTHOR */
+/* */
+/* William E. Lamie, Microsoft Corporation */
+/* */
+/* DESCRIPTION */
+/* */
+/* This function processes the hardware timer interrupt. This */
+/* processing includes incrementing the system clock and checking for */
+/* time slice and/or timer expiration. If either is found, the */
+/* interrupt context save/restore functions are called along with the */
+/* expiration functions. */
+/* */
+/* INPUT */
+/* */
+/* None */
+/* */
+/* OUTPUT */
+/* */
+/* None */
+/* */
+/* CALLS */
+/* */
+/* _tx_thread_time_slice Time slice interrupted thread */
+/* _tx_timer_expiration_process Timer expiration processing */
+/* */
+/* CALLED BY */
+/* */
+/* interrupt vector */
+/* */
+/* RELEASE HISTORY */
+/* */
+/* DATE NAME DESCRIPTION */
+/* */
+/* 09-30-2020 William E. Lamie Initial Version 6.1 */
+/* 04-25-2022 Zhen Kong Updated comments, */
+/* resulting in version 6.1.11 */
+/* */
+/**************************************************************************/
.global _tx_timer_interrupt
.type _tx_timer_interrupt,function
_tx_timer_interrupt:
-@
-@ /* Upon entry to this routine, it is assumed that context save has already
-@ been called, and therefore the compiler scratch registers are available
-@ for use. */
-@
-@ /* Increment the system clock. */
-@ _tx_timer_system_clock++;
-@
- LDR r1, =_tx_timer_system_clock @ Pickup address of system clock
- LDR r0, [r1] @ Pickup system clock
- ADD r0, r0, #1 @ Increment system clock
- STR r0, [r1] @ Store new system clock
-@
-@ /* Test for time-slice expiration. */
-@ if (_tx_timer_time_slice)
-@ {
-@
- LDR r3, =_tx_timer_time_slice @ Pickup address of time-slice
- LDR r2, [r3] @ Pickup time-slice
- CMP r2, #0 @ Is it non-active?
- BEQ __tx_timer_no_time_slice @ Yes, skip time-slice processing
-@
-@ /* Decrement the time_slice. */
-@ _tx_timer_time_slice--;
-@
- SUB r2, r2, #1 @ Decrement the time-slice
- STR r2, [r3] @ Store new time-slice value
-@
-@ /* Check for expiration. */
-@ if (__tx_timer_time_slice == 0)
-@
- CMP r2, #0 @ Has it expired?
- BNE __tx_timer_no_time_slice @ No, skip expiration processing
-@
-@ /* Set the time-slice expired flag. */
-@ _tx_timer_expired_time_slice = TX_TRUE;
-@
- LDR r3, =_tx_timer_expired_time_slice @ Pickup address of expired flag
- MOV r0, #1 @ Build expired value
- STR r0, [r3] @ Set time-slice expiration flag
-@
-@ }
-@
-__tx_timer_no_time_slice:
-@
-@ /* Test for timer expiration. */
-@ if (*_tx_timer_current_ptr)
-@ {
-@
- LDR r1, =_tx_timer_current_ptr @ Pickup current timer pointer address
- LDR r0, [r1] @ Pickup current timer
- LDR r2, [r0] @ Pickup timer list entry
- CMP r2, #0 @ Is there anything in the list?
- BEQ __tx_timer_no_timer @ No, just increment the timer
-@
-@ /* Set expiration flag. */
-@ _tx_timer_expired = TX_TRUE;
-@
- LDR r3, =_tx_timer_expired @ Pickup expiration flag address
- MOV r2, #1 @ Build expired value
- STR r2, [r3] @ Set expired flag
- B __tx_timer_done @ Finished timer processing
-@
-@ }
-@ else
-@ {
-__tx_timer_no_timer:
-@
-@ /* No timer expired, increment the timer pointer. */
-@ _tx_timer_current_ptr++;
-@
- ADD r0, r0, #4 @ Move to next timer
-@
-@ /* Check for wraparound. */
-@ if (_tx_timer_current_ptr == _tx_timer_list_end)
-@
- LDR r3, =_tx_timer_list_end @ Pickup address of timer list end
- LDR r2, [r3] @ Pickup list end
- CMP r0, r2 @ Are we at list end?
- BNE __tx_timer_skip_wrap @ No, skip wraparound logic
-@
-@ /* Wrap to beginning of list. */
-@ _tx_timer_current_ptr = _tx_timer_list_start;
-@
- LDR r3, =_tx_timer_list_start @ Pickup address of timer list start
- LDR r0, [r3] @ Set current pointer to list start
-@
-__tx_timer_skip_wrap:
-@
- STR r0, [r1] @ Store new current timer pointer
-@ }
-@
-__tx_timer_done:
-@
-@
-@ /* See if anything has expired. */
-@ if ((_tx_timer_expired_time_slice) || (_tx_timer_expired))
-@ {
-@
- LDR r3, =_tx_timer_expired_time_slice @ Pickup address of expired flag
- LDR r2, [r3] @ Pickup time-slice expired flag
- CMP r2, #0 @ Did a time-slice expire?
- BNE __tx_something_expired @ If non-zero, time-slice expired
- LDR r1, =_tx_timer_expired @ Pickup address of other expired flag
- LDR r0, [r1] @ Pickup timer expired flag
- CMP r0, #0 @ Did a timer expire?
- BEQ __tx_timer_nothing_expired @ No, nothing expired
-@
-__tx_something_expired:
-@
-@
- STMDB sp!, {r0, lr} @ Save the lr register on the stack
- @ and save r0 just to keep 8-byte alignment
-@
-@ /* Did a timer expire? */
-@ if (_tx_timer_expired)
-@ {
-@
- LDR r1, =_tx_timer_expired @ Pickup address of expired flag
- LDR r0, [r1] @ Pickup timer expired flag
- CMP r0, #0 @ Check for timer expiration
- BEQ __tx_timer_dont_activate @ If not set, skip timer activation
-@
-@ /* Process timer expiration. */
-@ _tx_timer_expiration_process();
-@
- BL _tx_timer_expiration_process @ Call the timer expiration handling routine
-@
-@ }
-__tx_timer_dont_activate:
-@
-@ /* Did time slice expire? */
-@ if (_tx_timer_expired_time_slice)
-@ {
-@
- LDR r3, =_tx_timer_expired_time_slice @ Pickup address of time-slice expired
- LDR r2, [r3] @ Pickup the actual flag
- CMP r2, #0 @ See if the flag is set
- BEQ __tx_timer_not_ts_expiration @ No, skip time-slice processing
-@
-@ /* Time slice interrupted thread. */
-@ _tx_thread_time_slice();
-@
- BL _tx_thread_time_slice @ Call time-slice processing
-@
-@ }
-@
-__tx_timer_not_ts_expiration:
-@
- LDMIA sp!, {r0, lr} @ Recover lr register (r0 is just there for
- @ the 8-byte stack alignment
-@
-@ }
-@
-__tx_timer_nothing_expired:
-@
-#ifdef __THUMB_INTERWORK
- BX lr @ Return to caller
-#else
- MOV pc, lr @ Return to caller
-#endif
-@
-@}
+ /* Upon entry to this routine, it is assumed that context save has already
+ been called, and therefore the compiler scratch registers are available
+ for use. */
+
+ /* Increment the system clock. */
+
+ LDR r1, =_tx_timer_system_clock // Pickup address of system clock
+ LDR r0, [r1] // Pickup system clock
+ ADD r0, r0, #1 // Increment system clock
+ STR r0, [r1] // Store new system clock
+
+ /* Test for time-slice expiration. */
+
+ LDR r3, =_tx_timer_time_slice // Pickup address of time-slice
+ LDR r2, [r3] // Pickup time-slice
+ CMP r2, #0 // Is it non-active?
+ BEQ __tx_timer_no_time_slice // Yes, skip time-slice processing
+
+ /* Decrement the time_slice. */
+
+ SUB r2, r2, #1 // Decrement the time-slice
+ STR r2, [r3] // Store new time-slice value
+
+ /* Check for expiration. */
+
+ CMP r2, #0 // Has it expired?
+ BNE __tx_timer_no_time_slice // No, skip expiration processing
+
+ /* Set the time-slice expired flag. */
+
+ LDR r3, =_tx_timer_expired_time_slice // Pickup address of expired flag
+ MOV r0, #1 // Build expired value
+ STR r0, [r3] // Set time-slice expiration flag
+
+__tx_timer_no_time_slice:
+
+ /* Test for timer expiration. */
+
+ LDR r1, =_tx_timer_current_ptr // Pickup current timer pointer address
+ LDR r0, [r1] // Pickup current timer
+ LDR r2, [r0] // Pickup timer list entry
+ CMP r2, #0 // Is there anything in the list?
+ BEQ __tx_timer_no_timer // No, just increment the timer
+
+ /* Set expiration flag. */
+
+ LDR r3, =_tx_timer_expired // Pickup expiration flag address
+ MOV r2, #1 // Build expired value
+ STR r2, [r3] // Set expired flag
+ B __tx_timer_done // Finished timer processing
+
+__tx_timer_no_timer:
+
+ /* No timer expired, increment the timer pointer. */
+ ADD r0, r0, #4 // Move to next timer
+
+ /* Check for wraparound. */
+
+ LDR r3, =_tx_timer_list_end // Pickup address of timer list end
+ LDR r2, [r3] // Pickup list end
+ CMP r0, r2 // Are we at list end?
+ BNE __tx_timer_skip_wrap // No, skip wraparound logic
+
+ /* Wrap to beginning of list. */
+
+ LDR r3, =_tx_timer_list_start // Pickup address of timer list start
+ LDR r0, [r3] // Set current pointer to list start
+
+__tx_timer_skip_wrap:
+
+ STR r0, [r1] // Store new current timer pointer
+
+__tx_timer_done:
+
+ /* See if anything has expired. */
+
+ LDR r3, =_tx_timer_expired_time_slice // Pickup address of expired flag
+ LDR r2, [r3] // Pickup time-slice expired flag
+ CMP r2, #0 // Did a time-slice expire?
+ BNE __tx_something_expired // If non-zero, time-slice expired
+ LDR r1, =_tx_timer_expired // Pickup address of other expired flag
+ LDR r0, [r1] // Pickup timer expired flag
+ CMP r0, #0 // Did a timer expire?
+ BEQ __tx_timer_nothing_expired // No, nothing expired
+
+__tx_something_expired:
+
+ STMDB sp!, {r0, lr} // Save the lr register on the stack
+ // and save r0 just to keep 8-byte alignment
+
+ /* Did a timer expire? */
+
+ LDR r1, =_tx_timer_expired // Pickup address of expired flag
+ LDR r0, [r1] // Pickup timer expired flag
+ CMP r0, #0 // Check for timer expiration
+ BEQ __tx_timer_dont_activate // If not set, skip timer activation
+
+ /* Process timer expiration. */
+ BL _tx_timer_expiration_process // Call the timer expiration handling routine
+
+__tx_timer_dont_activate:
+
+ /* Did time slice expire? */
+
+ LDR r3, =_tx_timer_expired_time_slice // Pickup address of time-slice expired
+ LDR r2, [r3] // Pickup the actual flag
+ CMP r2, #0 // See if the flag is set
+ BEQ __tx_timer_not_ts_expiration // No, skip time-slice processing
+
+ /* Time slice interrupted thread. */
+
+ BL _tx_thread_time_slice // Call time-slice processing
+
+__tx_timer_not_ts_expiration:
+
+ LDMIA sp!, {r0, lr} // Recover lr register (r0 is just there for
+ // the 8-byte stack alignment
+
+__tx_timer_nothing_expired:
+
+#ifdef __THUMB_INTERWORK
+ BX lr // Return to caller
+#else
+ MOV pc, lr // Return to caller
+#endif
diff --git a/ports/cortex_a17/ac6/example_build/sample_threadx.c b/ports/cortex_a17/ac6/example_build/sample_threadx.c
new file mode 100644
index 00000000..8c61de06
--- /dev/null
+++ b/ports/cortex_a17/ac6/example_build/sample_threadx.c
@@ -0,0 +1,369 @@
+/* This is a small demo of the high-performance ThreadX kernel. It includes examples of eight
+ threads of different priorities, using a message queue, semaphore, mutex, event flags group,
+ byte pool, and block pool. */
+
+#include "tx_api.h"
+
+#define DEMO_STACK_SIZE 1024
+#define DEMO_BYTE_POOL_SIZE 9120
+#define DEMO_BLOCK_POOL_SIZE 100
+#define DEMO_QUEUE_SIZE 100
+
+
+/* Define the ThreadX object control blocks... */
+
+TX_THREAD thread_0;
+TX_THREAD thread_1;
+TX_THREAD thread_2;
+TX_THREAD thread_3;
+TX_THREAD thread_4;
+TX_THREAD thread_5;
+TX_THREAD thread_6;
+TX_THREAD thread_7;
+TX_QUEUE queue_0;
+TX_SEMAPHORE semaphore_0;
+TX_MUTEX mutex_0;
+TX_EVENT_FLAGS_GROUP event_flags_0;
+TX_BYTE_POOL byte_pool_0;
+TX_BLOCK_POOL block_pool_0;
+
+
+/* Define the counters used in the demo application... */
+
+ULONG thread_0_counter;
+ULONG thread_1_counter;
+ULONG thread_1_messages_sent;
+ULONG thread_2_counter;
+ULONG thread_2_messages_received;
+ULONG thread_3_counter;
+ULONG thread_4_counter;
+ULONG thread_5_counter;
+ULONG thread_6_counter;
+ULONG thread_7_counter;
+
+
+/* Define thread prototypes. */
+
+void thread_0_entry(ULONG thread_input);
+void thread_1_entry(ULONG thread_input);
+void thread_2_entry(ULONG thread_input);
+void thread_3_and_4_entry(ULONG thread_input);
+void thread_5_entry(ULONG thread_input);
+void thread_6_and_7_entry(ULONG thread_input);
+
+
+/* Define main entry point. */
+
+int main()
+{
+
+ /* Enter the ThreadX kernel. */
+ tx_kernel_enter();
+}
+
+
+/* Define what the initial system looks like. */
+
+void tx_application_define(void *first_unused_memory)
+{
+
+CHAR *pointer = TX_NULL;
+
+
+ /* Create a byte memory pool from which to allocate the thread stacks. */
+ tx_byte_pool_create(&byte_pool_0, "byte pool 0", first_unused_memory, DEMO_BYTE_POOL_SIZE);
+
+ /* Put system definition stuff in here, e.g. thread creates and other assorted
+ create information. */
+
+ /* Allocate the stack for thread 0. */
+ tx_byte_allocate(&byte_pool_0, (VOID **) &pointer, DEMO_STACK_SIZE, TX_NO_WAIT);
+
+ /* Create the main thread. */
+ tx_thread_create(&thread_0, "thread 0", thread_0_entry, 0,
+ pointer, DEMO_STACK_SIZE,
+ 1, 1, TX_NO_TIME_SLICE, TX_AUTO_START);
+
+
+ /* Allocate the stack for thread 1. */
+ tx_byte_allocate(&byte_pool_0, (VOID **) &pointer, DEMO_STACK_SIZE, TX_NO_WAIT);
+
+ /* Create threads 1 and 2. These threads pass information through a ThreadX
+ message queue. It is also interesting to note that these threads have a time
+ slice. */
+ tx_thread_create(&thread_1, "thread 1", thread_1_entry, 1,
+ pointer, DEMO_STACK_SIZE,
+ 16, 16, 4, TX_AUTO_START);
+
+ /* Allocate the stack for thread 2. */
+ tx_byte_allocate(&byte_pool_0, (VOID **) &pointer, DEMO_STACK_SIZE, TX_NO_WAIT);
+
+ tx_thread_create(&thread_2, "thread 2", thread_2_entry, 2,
+ pointer, DEMO_STACK_SIZE,
+ 16, 16, 4, TX_AUTO_START);
+
+ /* Allocate the stack for thread 3. */
+ tx_byte_allocate(&byte_pool_0, (VOID **) &pointer, DEMO_STACK_SIZE, TX_NO_WAIT);
+
+ /* Create threads 3 and 4. These threads compete for a ThreadX counting semaphore.
+ An interesting thing here is that both threads share the same instruction area. */
+ tx_thread_create(&thread_3, "thread 3", thread_3_and_4_entry, 3,
+ pointer, DEMO_STACK_SIZE,
+ 8, 8, TX_NO_TIME_SLICE, TX_AUTO_START);
+
+ /* Allocate the stack for thread 4. */
+ tx_byte_allocate(&byte_pool_0, (VOID **) &pointer, DEMO_STACK_SIZE, TX_NO_WAIT);
+
+ tx_thread_create(&thread_4, "thread 4", thread_3_and_4_entry, 4,
+ pointer, DEMO_STACK_SIZE,
+ 8, 8, TX_NO_TIME_SLICE, TX_AUTO_START);
+
+ /* Allocate the stack for thread 5. */
+ tx_byte_allocate(&byte_pool_0, (VOID **) &pointer, DEMO_STACK_SIZE, TX_NO_WAIT);
+
+ /* Create thread 5. This thread simply pends on an event flag which will be set
+ by thread_0. */
+ tx_thread_create(&thread_5, "thread 5", thread_5_entry, 5,
+ pointer, DEMO_STACK_SIZE,
+ 4, 4, TX_NO_TIME_SLICE, TX_AUTO_START);
+
+ /* Allocate the stack for thread 6. */
+ tx_byte_allocate(&byte_pool_0, (VOID **) &pointer, DEMO_STACK_SIZE, TX_NO_WAIT);
+
+ /* Create threads 6 and 7. These threads compete for a ThreadX mutex. */
+ tx_thread_create(&thread_6, "thread 6", thread_6_and_7_entry, 6,
+ pointer, DEMO_STACK_SIZE,
+ 8, 8, TX_NO_TIME_SLICE, TX_AUTO_START);
+
+ /* Allocate the stack for thread 7. */
+ tx_byte_allocate(&byte_pool_0, (VOID **) &pointer, DEMO_STACK_SIZE, TX_NO_WAIT);
+
+ tx_thread_create(&thread_7, "thread 7", thread_6_and_7_entry, 7,
+ pointer, DEMO_STACK_SIZE,
+ 8, 8, TX_NO_TIME_SLICE, TX_AUTO_START);
+
+ /* Allocate the message queue. */
+ tx_byte_allocate(&byte_pool_0, (VOID **) &pointer, DEMO_QUEUE_SIZE*sizeof(ULONG), TX_NO_WAIT);
+
+ /* Create the message queue shared by threads 1 and 2. */
+ tx_queue_create(&queue_0, "queue 0", TX_1_ULONG, pointer, DEMO_QUEUE_SIZE*sizeof(ULONG));
+
+ /* Create the semaphore used by threads 3 and 4. */
+ tx_semaphore_create(&semaphore_0, "semaphore 0", 1);
+
+ /* Create the event flags group used by threads 1 and 5. */
+ tx_event_flags_create(&event_flags_0, "event flags 0");
+
+ /* Create the mutex used by thread 6 and 7 without priority inheritance. */
+ tx_mutex_create(&mutex_0, "mutex 0", TX_NO_INHERIT);
+
+ /* Allocate the memory for a small block pool. */
+ tx_byte_allocate(&byte_pool_0, (VOID **) &pointer, DEMO_BLOCK_POOL_SIZE, TX_NO_WAIT);
+
+ /* Create a block memory pool to allocate a message buffer from. */
+ tx_block_pool_create(&block_pool_0, "block pool 0", sizeof(ULONG), pointer, DEMO_BLOCK_POOL_SIZE);
+
+ /* Allocate a block and release the block memory. */
+ tx_block_allocate(&block_pool_0, (VOID **) &pointer, TX_NO_WAIT);
+
+ /* Release the block back to the pool. */
+ tx_block_release(pointer);
+}
+
+
+
+/* Define the test threads. */
+
+void thread_0_entry(ULONG thread_input)
+{
+
+UINT status;
+
+
+ /* This thread simply sits in while-forever-sleep loop. */
+ while(1)
+ {
+
+ /* Increment the thread counter. */
+ thread_0_counter++;
+
+ /* Sleep for 10 ticks. */
+ tx_thread_sleep(10);
+
+ /* Set event flag 0 to wakeup thread 5. */
+ status = tx_event_flags_set(&event_flags_0, 0x1, TX_OR);
+
+ /* Check status. */
+ if (status != TX_SUCCESS)
+ break;
+ }
+}
+
+
+void thread_1_entry(ULONG thread_input)
+{
+
+UINT status;
+
+
+ /* This thread simply sends messages to a queue shared by thread 2. */
+ while(1)
+ {
+
+ /* Increment the thread counter. */
+ thread_1_counter++;
+
+ /* Send message to queue 0. */
+ status = tx_queue_send(&queue_0, &thread_1_messages_sent, TX_WAIT_FOREVER);
+
+ /* Check completion status. */
+ if (status != TX_SUCCESS)
+ break;
+
+ /* Increment the message sent. */
+ thread_1_messages_sent++;
+ }
+}
+
+
+void thread_2_entry(ULONG thread_input)
+{
+
+ULONG received_message;
+UINT status;
+
+ /* This thread retrieves messages placed on the queue by thread 1. */
+ while(1)
+ {
+
+ /* Increment the thread counter. */
+ thread_2_counter++;
+
+ /* Retrieve a message from the queue. */
+ status = tx_queue_receive(&queue_0, &received_message, TX_WAIT_FOREVER);
+
+ /* Check completion status and make sure the message is what we
+ expected. */
+ if ((status != TX_SUCCESS) || (received_message != thread_2_messages_received))
+ break;
+
+ /* Otherwise, all is okay. Increment the received message count. */
+ thread_2_messages_received++;
+ }
+}
+
+
+void thread_3_and_4_entry(ULONG thread_input)
+{
+
+UINT status;
+
+
+ /* This function is executed from thread 3 and thread 4. As the loop
+ below shows, these function compete for ownership of semaphore_0. */
+ while(1)
+ {
+
+ /* Increment the thread counter. */
+ if (thread_input == 3)
+ thread_3_counter++;
+ else
+ thread_4_counter++;
+
+ /* Get the semaphore with suspension. */
+ status = tx_semaphore_get(&semaphore_0, TX_WAIT_FOREVER);
+
+ /* Check status. */
+ if (status != TX_SUCCESS)
+ break;
+
+ /* Sleep for 2 ticks to hold the semaphore. */
+ tx_thread_sleep(2);
+
+ /* Release the semaphore. */
+ status = tx_semaphore_put(&semaphore_0);
+
+ /* Check status. */
+ if (status != TX_SUCCESS)
+ break;
+ }
+}
+
+
+void thread_5_entry(ULONG thread_input)
+{
+
+UINT status;
+ULONG actual_flags;
+
+
+ /* This thread simply waits for an event in a forever loop. */
+ while(1)
+ {
+
+ /* Increment the thread counter. */
+ thread_5_counter++;
+
+ /* Wait for event flag 0. */
+ status = tx_event_flags_get(&event_flags_0, 0x1, TX_OR_CLEAR,
+ &actual_flags, TX_WAIT_FOREVER);
+
+ /* Check status. */
+ if ((status != TX_SUCCESS) || (actual_flags != 0x1))
+ break;
+ }
+}
+
+
+void thread_6_and_7_entry(ULONG thread_input)
+{
+
+UINT status;
+
+
+ /* This function is executed from thread 6 and thread 7. As the loop
+ below shows, these function compete for ownership of mutex_0. */
+ while(1)
+ {
+
+ /* Increment the thread counter. */
+ if (thread_input == 6)
+ thread_6_counter++;
+ else
+ thread_7_counter++;
+
+ /* Get the mutex with suspension. */
+ status = tx_mutex_get(&mutex_0, TX_WAIT_FOREVER);
+
+ /* Check status. */
+ if (status != TX_SUCCESS)
+ break;
+
+ /* Get the mutex again with suspension. This shows
+ that an owning thread may retrieve the mutex it
+ owns multiple times. */
+ status = tx_mutex_get(&mutex_0, TX_WAIT_FOREVER);
+
+ /* Check status. */
+ if (status != TX_SUCCESS)
+ break;
+
+ /* Sleep for 2 ticks to hold the mutex. */
+ tx_thread_sleep(2);
+
+ /* Release the mutex. */
+ status = tx_mutex_put(&mutex_0);
+
+ /* Check status. */
+ if (status != TX_SUCCESS)
+ break;
+
+ /* Release the mutex again. This will actually
+ release ownership since it was obtained twice. */
+ status = tx_mutex_put(&mutex_0);
+
+ /* Check status. */
+ if (status != TX_SUCCESS)
+ break;
+ }
+}
diff --git a/ports/cortex_a17/ac6/example_build/sample_threadx/.cproject b/ports/cortex_a17/ac6/example_build/sample_threadx/.cproject
new file mode 100644
index 00000000..6eef9a7b
--- /dev/null
+++ b/ports/cortex_a17/ac6/example_build/sample_threadx/.cproject
@@ -0,0 +1,176 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/ports/cortex_a17/ac6/example_build/sample_threadx/.project b/ports/cortex_a17/ac6/example_build/sample_threadx/.project
new file mode 100644
index 00000000..ed4c0885
--- /dev/null
+++ b/ports/cortex_a17/ac6/example_build/sample_threadx/.project
@@ -0,0 +1,27 @@
+
+
+ sample_threadx
+
+
+ tx
+
+
+
+ org.eclipse.cdt.managedbuilder.core.genmakebuilder
+ clean,full,incremental,
+
+
+
+
+ org.eclipse.cdt.managedbuilder.core.ScannerConfigBuilder
+ full,incremental,
+
+
+
+
+
+ org.eclipse.cdt.core.cnature
+ org.eclipse.cdt.managedbuilder.core.managedBuildNature
+ org.eclipse.cdt.managedbuilder.core.ScannerConfigNature
+
+
diff --git a/ports/cortex_a17/ac6/example_build/sample_threadx/sample_threadx.c b/ports/cortex_a17/ac6/example_build/sample_threadx/sample_threadx.c
new file mode 100644
index 00000000..8c61de06
--- /dev/null
+++ b/ports/cortex_a17/ac6/example_build/sample_threadx/sample_threadx.c
@@ -0,0 +1,369 @@
+/* This is a small demo of the high-performance ThreadX kernel. It includes examples of eight
+ threads of different priorities, using a message queue, semaphore, mutex, event flags group,
+ byte pool, and block pool. */
+
+#include "tx_api.h"
+
+#define DEMO_STACK_SIZE 1024
+#define DEMO_BYTE_POOL_SIZE 9120
+#define DEMO_BLOCK_POOL_SIZE 100
+#define DEMO_QUEUE_SIZE 100
+
+
+/* Define the ThreadX object control blocks... */
+
+TX_THREAD thread_0;
+TX_THREAD thread_1;
+TX_THREAD thread_2;
+TX_THREAD thread_3;
+TX_THREAD thread_4;
+TX_THREAD thread_5;
+TX_THREAD thread_6;
+TX_THREAD thread_7;
+TX_QUEUE queue_0;
+TX_SEMAPHORE semaphore_0;
+TX_MUTEX mutex_0;
+TX_EVENT_FLAGS_GROUP event_flags_0;
+TX_BYTE_POOL byte_pool_0;
+TX_BLOCK_POOL block_pool_0;
+
+
+/* Define the counters used in the demo application... */
+
+ULONG thread_0_counter;
+ULONG thread_1_counter;
+ULONG thread_1_messages_sent;
+ULONG thread_2_counter;
+ULONG thread_2_messages_received;
+ULONG thread_3_counter;
+ULONG thread_4_counter;
+ULONG thread_5_counter;
+ULONG thread_6_counter;
+ULONG thread_7_counter;
+
+
+/* Define thread prototypes. */
+
+void thread_0_entry(ULONG thread_input);
+void thread_1_entry(ULONG thread_input);
+void thread_2_entry(ULONG thread_input);
+void thread_3_and_4_entry(ULONG thread_input);
+void thread_5_entry(ULONG thread_input);
+void thread_6_and_7_entry(ULONG thread_input);
+
+
+/* Define main entry point. */
+
+int main()
+{
+
+ /* Enter the ThreadX kernel. */
+ tx_kernel_enter();
+}
+
+
+/* Define what the initial system looks like. */
+
+void tx_application_define(void *first_unused_memory)
+{
+
+CHAR *pointer = TX_NULL;
+
+
+ /* Create a byte memory pool from which to allocate the thread stacks. */
+ tx_byte_pool_create(&byte_pool_0, "byte pool 0", first_unused_memory, DEMO_BYTE_POOL_SIZE);
+
+ /* Put system definition stuff in here, e.g. thread creates and other assorted
+ create information. */
+
+ /* Allocate the stack for thread 0. */
+ tx_byte_allocate(&byte_pool_0, (VOID **) &pointer, DEMO_STACK_SIZE, TX_NO_WAIT);
+
+ /* Create the main thread. */
+ tx_thread_create(&thread_0, "thread 0", thread_0_entry, 0,
+ pointer, DEMO_STACK_SIZE,
+ 1, 1, TX_NO_TIME_SLICE, TX_AUTO_START);
+
+
+ /* Allocate the stack for thread 1. */
+ tx_byte_allocate(&byte_pool_0, (VOID **) &pointer, DEMO_STACK_SIZE, TX_NO_WAIT);
+
+ /* Create threads 1 and 2. These threads pass information through a ThreadX
+ message queue. It is also interesting to note that these threads have a time
+ slice. */
+ tx_thread_create(&thread_1, "thread 1", thread_1_entry, 1,
+ pointer, DEMO_STACK_SIZE,
+ 16, 16, 4, TX_AUTO_START);
+
+ /* Allocate the stack for thread 2. */
+ tx_byte_allocate(&byte_pool_0, (VOID **) &pointer, DEMO_STACK_SIZE, TX_NO_WAIT);
+
+ tx_thread_create(&thread_2, "thread 2", thread_2_entry, 2,
+ pointer, DEMO_STACK_SIZE,
+ 16, 16, 4, TX_AUTO_START);
+
+ /* Allocate the stack for thread 3. */
+ tx_byte_allocate(&byte_pool_0, (VOID **) &pointer, DEMO_STACK_SIZE, TX_NO_WAIT);
+
+ /* Create threads 3 and 4. These threads compete for a ThreadX counting semaphore.
+ An interesting thing here is that both threads share the same instruction area. */
+ tx_thread_create(&thread_3, "thread 3", thread_3_and_4_entry, 3,
+ pointer, DEMO_STACK_SIZE,
+ 8, 8, TX_NO_TIME_SLICE, TX_AUTO_START);
+
+ /* Allocate the stack for thread 4. */
+ tx_byte_allocate(&byte_pool_0, (VOID **) &pointer, DEMO_STACK_SIZE, TX_NO_WAIT);
+
+ tx_thread_create(&thread_4, "thread 4", thread_3_and_4_entry, 4,
+ pointer, DEMO_STACK_SIZE,
+ 8, 8, TX_NO_TIME_SLICE, TX_AUTO_START);
+
+ /* Allocate the stack for thread 5. */
+ tx_byte_allocate(&byte_pool_0, (VOID **) &pointer, DEMO_STACK_SIZE, TX_NO_WAIT);
+
+ /* Create thread 5. This thread simply pends on an event flag which will be set
+ by thread_0. */
+ tx_thread_create(&thread_5, "thread 5", thread_5_entry, 5,
+ pointer, DEMO_STACK_SIZE,
+ 4, 4, TX_NO_TIME_SLICE, TX_AUTO_START);
+
+ /* Allocate the stack for thread 6. */
+ tx_byte_allocate(&byte_pool_0, (VOID **) &pointer, DEMO_STACK_SIZE, TX_NO_WAIT);
+
+ /* Create threads 6 and 7. These threads compete for a ThreadX mutex. */
+ tx_thread_create(&thread_6, "thread 6", thread_6_and_7_entry, 6,
+ pointer, DEMO_STACK_SIZE,
+ 8, 8, TX_NO_TIME_SLICE, TX_AUTO_START);
+
+ /* Allocate the stack for thread 7. */
+ tx_byte_allocate(&byte_pool_0, (VOID **) &pointer, DEMO_STACK_SIZE, TX_NO_WAIT);
+
+ tx_thread_create(&thread_7, "thread 7", thread_6_and_7_entry, 7,
+ pointer, DEMO_STACK_SIZE,
+ 8, 8, TX_NO_TIME_SLICE, TX_AUTO_START);
+
+ /* Allocate the message queue. */
+ tx_byte_allocate(&byte_pool_0, (VOID **) &pointer, DEMO_QUEUE_SIZE*sizeof(ULONG), TX_NO_WAIT);
+
+ /* Create the message queue shared by threads 1 and 2. */
+ tx_queue_create(&queue_0, "queue 0", TX_1_ULONG, pointer, DEMO_QUEUE_SIZE*sizeof(ULONG));
+
+ /* Create the semaphore used by threads 3 and 4. */
+ tx_semaphore_create(&semaphore_0, "semaphore 0", 1);
+
+ /* Create the event flags group used by threads 1 and 5. */
+ tx_event_flags_create(&event_flags_0, "event flags 0");
+
+ /* Create the mutex used by thread 6 and 7 without priority inheritance. */
+ tx_mutex_create(&mutex_0, "mutex 0", TX_NO_INHERIT);
+
+ /* Allocate the memory for a small block pool. */
+ tx_byte_allocate(&byte_pool_0, (VOID **) &pointer, DEMO_BLOCK_POOL_SIZE, TX_NO_WAIT);
+
+ /* Create a block memory pool to allocate a message buffer from. */
+ tx_block_pool_create(&block_pool_0, "block pool 0", sizeof(ULONG), pointer, DEMO_BLOCK_POOL_SIZE);
+
+ /* Allocate a block and release the block memory. */
+ tx_block_allocate(&block_pool_0, (VOID **) &pointer, TX_NO_WAIT);
+
+ /* Release the block back to the pool. */
+ tx_block_release(pointer);
+}
+
+
+
+/* Define the test threads. */
+
+void thread_0_entry(ULONG thread_input)
+{
+
+UINT status;
+
+
+ /* This thread simply sits in while-forever-sleep loop. */
+ while(1)
+ {
+
+ /* Increment the thread counter. */
+ thread_0_counter++;
+
+ /* Sleep for 10 ticks. */
+ tx_thread_sleep(10);
+
+ /* Set event flag 0 to wakeup thread 5. */
+ status = tx_event_flags_set(&event_flags_0, 0x1, TX_OR);
+
+ /* Check status. */
+ if (status != TX_SUCCESS)
+ break;
+ }
+}
+
+
+void thread_1_entry(ULONG thread_input)
+{
+
+UINT status;
+
+
+ /* This thread simply sends messages to a queue shared by thread 2. */
+ while(1)
+ {
+
+ /* Increment the thread counter. */
+ thread_1_counter++;
+
+ /* Send message to queue 0. */
+ status = tx_queue_send(&queue_0, &thread_1_messages_sent, TX_WAIT_FOREVER);
+
+ /* Check completion status. */
+ if (status != TX_SUCCESS)
+ break;
+
+ /* Increment the message sent. */
+ thread_1_messages_sent++;
+ }
+}
+
+
+void thread_2_entry(ULONG thread_input)
+{
+
+ULONG received_message;
+UINT status;
+
+ /* This thread retrieves messages placed on the queue by thread 1. */
+ while(1)
+ {
+
+ /* Increment the thread counter. */
+ thread_2_counter++;
+
+ /* Retrieve a message from the queue. */
+ status = tx_queue_receive(&queue_0, &received_message, TX_WAIT_FOREVER);
+
+ /* Check completion status and make sure the message is what we
+ expected. */
+ if ((status != TX_SUCCESS) || (received_message != thread_2_messages_received))
+ break;
+
+ /* Otherwise, all is okay. Increment the received message count. */
+ thread_2_messages_received++;
+ }
+}
+
+
+void thread_3_and_4_entry(ULONG thread_input)
+{
+
+UINT status;
+
+
+ /* This function is executed from thread 3 and thread 4. As the loop
+ below shows, these function compete for ownership of semaphore_0. */
+ while(1)
+ {
+
+ /* Increment the thread counter. */
+ if (thread_input == 3)
+ thread_3_counter++;
+ else
+ thread_4_counter++;
+
+ /* Get the semaphore with suspension. */
+ status = tx_semaphore_get(&semaphore_0, TX_WAIT_FOREVER);
+
+ /* Check status. */
+ if (status != TX_SUCCESS)
+ break;
+
+ /* Sleep for 2 ticks to hold the semaphore. */
+ tx_thread_sleep(2);
+
+ /* Release the semaphore. */
+ status = tx_semaphore_put(&semaphore_0);
+
+ /* Check status. */
+ if (status != TX_SUCCESS)
+ break;
+ }
+}
+
+
+void thread_5_entry(ULONG thread_input)
+{
+
+UINT status;
+ULONG actual_flags;
+
+
+ /* This thread simply waits for an event in a forever loop. */
+ while(1)
+ {
+
+ /* Increment the thread counter. */
+ thread_5_counter++;
+
+ /* Wait for event flag 0. */
+ status = tx_event_flags_get(&event_flags_0, 0x1, TX_OR_CLEAR,
+ &actual_flags, TX_WAIT_FOREVER);
+
+ /* Check status. */
+ if ((status != TX_SUCCESS) || (actual_flags != 0x1))
+ break;
+ }
+}
+
+
+void thread_6_and_7_entry(ULONG thread_input)
+{
+
+UINT status;
+
+
+ /* This function is executed from thread 6 and thread 7. As the loop
+ below shows, these function compete for ownership of mutex_0. */
+ while(1)
+ {
+
+ /* Increment the thread counter. */
+ if (thread_input == 6)
+ thread_6_counter++;
+ else
+ thread_7_counter++;
+
+ /* Get the mutex with suspension. */
+ status = tx_mutex_get(&mutex_0, TX_WAIT_FOREVER);
+
+ /* Check status. */
+ if (status != TX_SUCCESS)
+ break;
+
+ /* Get the mutex again with suspension. This shows
+ that an owning thread may retrieve the mutex it
+ owns multiple times. */
+ status = tx_mutex_get(&mutex_0, TX_WAIT_FOREVER);
+
+ /* Check status. */
+ if (status != TX_SUCCESS)
+ break;
+
+ /* Sleep for 2 ticks to hold the mutex. */
+ tx_thread_sleep(2);
+
+ /* Release the mutex. */
+ status = tx_mutex_put(&mutex_0);
+
+ /* Check status. */
+ if (status != TX_SUCCESS)
+ break;
+
+ /* Release the mutex again. This will actually
+ release ownership since it was obtained twice. */
+ status = tx_mutex_put(&mutex_0);
+
+ /* Check status. */
+ if (status != TX_SUCCESS)
+ break;
+ }
+}
diff --git a/ports/cortex_a17/ac6/example_build/sample_threadx/sample_threadx.launch b/ports/cortex_a17/ac6/example_build/sample_threadx/sample_threadx.launch
new file mode 100644
index 00000000..4e7e09b1
--- /dev/null
+++ b/ports/cortex_a17/ac6/example_build/sample_threadx/sample_threadx.launch
@@ -0,0 +1,188 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/ports/cortex_a17/ac6/example_build/sample_threadx/sample_threadx.scat b/ports/cortex_a17/ac6/example_build/sample_threadx/sample_threadx.scat
new file mode 100644
index 00000000..d23881cd
--- /dev/null
+++ b/ports/cortex_a17/ac6/example_build/sample_threadx/sample_threadx.scat
@@ -0,0 +1,44 @@
+;*******************************************************
+; Copyright (c) 2011-2016 Arm Limited (or its affiliates). All rights reserved.
+; Use, modification and redistribution of this file is subject to your possession of a
+; valid End User License Agreement for the Arm Product of which these examples are part of
+; and your compliance with all applicable terms and conditions of such licence agreement.
+;*******************************************************
+
+; Scatter-file for ARMv7-A bare-metal example on Versatile Express
+
+; This scatter-file places application code, data, stack and heap at suitable addresses in the memory map.
+
+
+SDRAM 0x80000000 0x20000000
+{
+ VECTORS +0
+ {
+ * (VECTORS, +FIRST) ; Vector table and other (assembler) startup code
+ * (InRoot$$Sections) ; All (library) code that must be in a root region
+ }
+
+ RO_CODE +0
+ { * (+RO-CODE) } ; Application RO code (.text)
+
+ RO_DATA +0
+ { * (+RO-DATA) } ; Application RO data (.constdata)
+
+ RW_DATA +0
+ { * (+RW) } ; Application RW data (.data)
+
+ ZI_DATA +0
+ { * (+ZI) } ; Application ZI data (.bss)
+
+ ARM_LIB_HEAP 0x80040000 EMPTY 0x00040000 ; Application heap
+ { }
+
+ ARM_LIB_STACK 0x80090000 EMPTY 0x00010000 ; Application (SVC mode) stack
+ { }
+
+; IRQ_STACK 0x800A0000 EMPTY -0x00010000 ; IRQ mode stack
+; { }
+
+ TTB 0x80100000 EMPTY 0x4000 ; Level-1 Translation Table for MMU
+ { }
+}
diff --git a/ports/cortex_a17/ac6/example_build/sample_threadx/startup.S b/ports/cortex_a17/ac6/example_build/sample_threadx/startup.S
new file mode 100644
index 00000000..670fadb9
--- /dev/null
+++ b/ports/cortex_a17/ac6/example_build/sample_threadx/startup.S
@@ -0,0 +1,397 @@
+//----------------------------------------------------------------
+// ARMv7-A Embedded example - Startup Code
+//
+// Copyright (c) 2005-2018 Arm Limited (or its affiliates). All rights reserved.
+// Use, modification and redistribution of this file is subject to your possession of a
+// valid End User License Agreement for the Arm Product of which these examples are part of
+// and your compliance with all applicable terms and conditions of such licence agreement.
+//----------------------------------------------------------------
+
+// Standard definitions of mode bits and interrupt (I & F) flags in PSRs
+
+#define Mode_USR 0x10
+#define Mode_FIQ 0x11
+#define Mode_IRQ 0x12
+#define Mode_SVC 0x13
+#define Mode_ABT 0x17
+#define Mode_UND 0x1B
+#define Mode_SYS 0x1F
+
+#define I_Bit 0x80 // When I bit is set, IRQ is disabled
+#define F_Bit 0x40 // When F bit is set, FIQ is disabled
+
+
+ .section VECTORS, "ax"
+ .align 3
+ .cfi_sections .debug_frame // put stack frame info into .debug_frame instead of .eh_frame
+
+
+//----------------------------------------------------------------
+// Entry point for the Reset handler
+//----------------------------------------------------------------
+
+ .global Vectors
+
+//----------------------------------------------------------------
+// Exception Vector Table
+//----------------------------------------------------------------
+// Note: LDR PC instructions are used here, though branch (B) instructions
+// could also be used, unless the exception handlers are >32MB away.
+
+Vectors:
+ LDR PC, Reset_Addr
+ LDR PC, Undefined_Addr
+ LDR PC, SVC_Addr
+ LDR PC, Prefetch_Addr
+ LDR PC, Abort_Addr
+ LDR PC, Hypervisor_Addr
+ LDR PC, IRQ_Addr
+ LDR PC, FIQ_Addr
+
+
+ .balign 4
+Reset_Addr:
+ .word Reset_Handler
+Undefined_Addr:
+ .word __tx_undefined
+SVC_Addr:
+ .word __tx_swi_interrupt
+Prefetch_Addr:
+ .word __tx_prefetch_handler
+Abort_Addr:
+ .word __tx_abort_handler
+Hypervisor_Addr:
+ .word __tx_reserved_handler
+IRQ_Addr:
+ .word __tx_irq_handler
+FIQ_Addr:
+ .word __tx_fiq_handler
+
+
+//----------------------------------------------------------------
+// Exception Handlers
+//----------------------------------------------------------------
+
+Undefined_Handler:
+ B Undefined_Handler
+SVC_Handler:
+ B SVC_Handler
+Prefetch_Handler:
+ B Prefetch_Handler
+Abort_Handler:
+ B Abort_Handler
+Hypervisor_Handler:
+ B Hypervisor_Handler
+IRQ_Handler:
+ B IRQ_Handler
+FIQ_Handler:
+ B FIQ_Handler
+
+
+//----------------------------------------------------------------
+// Reset Handler
+//----------------------------------------------------------------
+Reset_Handler:
+
+//----------------------------------------------------------------
+// Disable caches and MMU in case they were left enabled from an earlier run
+// This does not need to be done from a cold reset
+//----------------------------------------------------------------
+
+ MRC p15, 0, r0, c1, c0, 0 // Read System Control Register
+ BIC r0, r0, #(0x1 << 12) // Clear I bit 12 to disable I Cache
+ BIC r0, r0, #(0x1 << 2) // Clear C bit 2 to disable D Cache
+ BIC r0, r0, #0x1 // Clear M bit 0 to disable MMU
+ BIC r0, r0, #(0x1 << 11) // Clear Z bit 11 to disable branch prediction
+ MCR p15, 0, r0, c1, c0, 0 // Write System Control Register
+ ISB
+
+// The MMU is enabled later, before calling main(). Caches are enabled inside main(),
+// after the MMU has been enabled and scatterloading has been performed.
+
+//----------------------------------------------------------------
+// ACTLR.SMP bit must be set before the caches and MMU are enabled,
+// or any cache and TLB maintenance operations are performed, even for single-core
+//----------------------------------------------------------------
+ MRC p15, 0, r0, c1, c0, 1 // Read ACTLR
+ ORR r0, r0, #(1 << 6) // Set ACTLR.SMP bit
+ MCR p15, 0, r0, c1, c0, 1 // Write ACTLR
+ ISB
+
+//----------------------------------------------------------------
+// Invalidate Data and Instruction TLBs and branch predictor
+// This does not need to be done from a cold reset
+//----------------------------------------------------------------
+
+ MOV r0,#0
+ MCR p15, 0, r0, c8, c7, 0 // I-TLB and D-TLB invalidation
+ MCR p15, 0, r0, c7, c5, 6 // BPIALL - Invalidate entire branch predictor array
+
+//----------------------------------------------------------------
+// Initialize Supervisor Mode Stack
+// Note stack must be 8 byte aligned.
+//----------------------------------------------------------------
+
+ LDR SP, =Image$$ARM_LIB_STACK$$ZI$$Limit
+
+//----------------------------------------------------------------
+// Disable loop-buffer to fix errata on A15 r0p0
+//----------------------------------------------------------------
+ MRC p15, 0, r0, c0, c0, 0 // Read main ID register MIDR
+ MOV r1, r0, lsr #4 // Extract Primary Part Number
+ LDR r2, =0xFFF
+ AND r1, r1, r2
+ LDR r2, =0xC0F
+ CMP r1, r2 // Is this an A15?
+ BNE notA15r0p0 // Jump if not A15
+ AND r5, r0, #0x00f00000 // Variant
+ AND r6, r0, #0x0000000f // Revision
+ ORRS r6, r6, r5 // Combine variant and revision
+ BNE notA15r0p0 // Jump if not r0p0
+ MRC p15, 0, r0, c1, c0, 1 // Read Aux Ctrl Reg
+ ORR r0, r0, #(1 << 1) // Set bit 1 to Disable Loop Buffer
+ MCR p15, 0, r0, c1, c0, 1 // Write Aux Ctrl Reg
+ ISB
+notA15r0p0:
+
+//----------------------------------------------------------------
+// Set Vector Base Address Register (VBAR) to point to this application's vector table
+//----------------------------------------------------------------
+
+ LDR r0, =Vectors
+ MCR p15, 0, r0, c12, c0, 0
+
+//----------------------------------------------------------------
+// Cache Invalidation code for ARMv7-A
+// The caches, MMU and BTB do not need post-reset invalidation on Cortex-A7,
+// but forcing a cache invalidation makes the code more portable to other CPUs (e.g. Cortex-A9)
+//----------------------------------------------------------------
+
+ // Invalidate L1 Instruction Cache
+
+ MRC p15, 1, r0, c0, c0, 1 // Read Cache Level ID Register (CLIDR)
+ TST r0, #0x3 // Harvard Cache?
+ MOV r0, #0 // SBZ
+ MCRNE p15, 0, r0, c7, c5, 0 // ICIALLU - Invalidate instruction cache and flush branch target cache
+
+ // Invalidate Data/Unified Caches
+
+ MRC p15, 1, r0, c0, c0, 1 // Read CLIDR
+ ANDS r3, r0, #0x07000000 // Extract coherency level
+ MOV r3, r3, LSR #23 // Total cache levels << 1
+ BEQ Finished // If 0, no need to clean
+
+ MOV r10, #0 // R10 holds current cache level << 1
+Loop1:
+ ADD r2, r10, r10, LSR #1 // R2 holds cache "Set" position
+ MOV r1, r0, LSR r2 // Bottom 3 bits are the Cache-type for this level
+ AND r1, r1, #7 // Isolate those lower 3 bits
+ CMP r1, #2
+ BLT Skip // No cache or only instruction cache at this level
+
+ MCR p15, 2, r10, c0, c0, 0 // Write the Cache Size selection register
+ ISB // ISB to sync the change to the CacheSizeID reg
+ MRC p15, 1, r1, c0, c0, 0 // Reads current Cache Size ID register
+ AND r2, r1, #7 // Extract the line length field
+ ADD r2, r2, #4 // Add 4 for the line length offset (log2 16 bytes)
+ LDR r4, =0x3FF
+ ANDS r4, r4, r1, LSR #3 // R4 is the max number on the way size (right aligned)
+ CLZ r5, r4 // R5 is the bit position of the way size increment
+ LDR r7, =0x7FFF
+ ANDS r7, r7, r1, LSR #13 // R7 is the max number of the index size (right aligned)
+
+Loop2:
+ MOV r9, r4 // R9 working copy of the max way size (right aligned)
+
+Loop3:
+ ORR r11, r10, r9, LSL r5 // Factor in the Way number and cache number into R11
+ ORR r11, r11, r7, LSL r2 // Factor in the Set number
+ MCR p15, 0, r11, c7, c6, 2 // Invalidate by Set/Way
+ SUBS r9, r9, #1 // Decrement the Way number
+ BGE Loop3
+ SUBS r7, r7, #1 // Decrement the Set number
+ BGE Loop2
+Skip:
+ ADD r10, r10, #2 // Increment the cache number
+ CMP r3, r10
+ BGT Loop1
+
+Finished:
+
+
+//----------------------------------------------------------------
+// MMU Configuration
+// Set translation table base
+//----------------------------------------------------------------
+
+ // Two translation tables are supported, TTBR0 and TTBR1
+ // Configure translation table base (TTB) control register cp15,c2
+ // to a value of all zeros, indicates we are using TTB register 0.
+
+ MOV r0,#0x0
+ MCR p15, 0, r0, c2, c0, 2
+
+ // write the address of our page table base to TTB register 0
+ LDR r0,=Image$$TTB$$ZI$$Base
+
+ MOV r1, #0x08 // RGN=b01 (outer cacheable write-back cached, write allocate)
+ // S=0 (translation table walk to non-shared memory)
+ ORR r1,r1,#0x40 // IRGN=b01 (inner cacheability for the translation table walk is Write-back Write-allocate)
+
+ ORR r0,r0,r1
+
+ MCR p15, 0, r0, c2, c0, 0
+
+
+//----------------------------------------------------------------
+// PAGE TABLE generation
+
+// Generate the page tables
+// Build a flat translation table for the whole address space.
+// ie: Create 4096 1MB sections from 0x000xxxxx to 0xFFFxxxxx
+
+
+// 31 20 19 18 17 16 15 14 12 11 10 9 8 5 4 3 2 1 0
+// |section base address| 0 0 |nG| S |AP2| TEX | AP | P | Domain | XN | C B | 1 0|
+//
+// Bits[31:20] - Top 12 bits of VA is pointer into table
+// nG[17]=0 - Non global, enables matching against ASID in the TLB when set.
+// S[16]=0 - Indicates normal memory is shared when set.
+// AP2[15]=0
+// AP[11:10]=11 - Configure for full read/write access in all modes
+// TEX[14:12]=000
+// CB[3:2]= 00 - Set attributes to Strongly-ordered memory.
+// (except for the code segment descriptor, see below)
+// IMPP[9]=0 - Ignored
+// Domain[5:8]=1111 - Set all pages to use domain 15
+// XN[4]=1 - Execute never on Strongly-ordered memory
+// Bits[1:0]=10 - Indicate entry is a 1MB section
+//----------------------------------------------------------------
+ LDR r0,=Image$$TTB$$ZI$$Base
+ LDR r1,=0xfff // loop counter
+ LDR r2,=0b00000000000000000000110111100010
+
+ // r0 contains the address of the translation table base
+ // r1 is loop counter
+ // r2 is level1 descriptor (bits 19:0)
+
+ // use loop counter to create 4096 individual table entries.
+ // this writes from address 'Image$$TTB$$ZI$$Base' +
+ // offset 0x3FFC down to offset 0x0 in word steps (4 bytes)
+
+init_ttb_1:
+ ORR r3, r2, r1, LSL#20 // R3 now contains full level1 descriptor to write
+ ORR r3, r3, #0b0000000010000 // Set XN bit
+ STR r3, [r0, r1, LSL#2] // Str table entry at TTB base + loopcount*4
+ SUBS r1, r1, #1 // Decrement loop counter
+ BPL init_ttb_1
+
+ // In this example, the 1MB section based at '__code_start' is setup specially as cacheable (write back mode).
+ // TEX[14:12]=001 and CB[3:2]= 11, Outer and inner write back, write allocate normal memory.
+ LDR r1,=Image$$VECTORS$$Base // Base physical address of code segment
+ LSR r1, #20 // Shift right to align to 1MB boundaries
+ ORR r3, r2, r1, LSL#20 // Setup the initial level1 descriptor again
+ ORR r3, r3, #0b0000000001100 // Set CB bits
+ ORR r3, r3, #0b1000000000000 // Set TEX bit 12
+ STR r3, [r0, r1, LSL#2] // str table entry
+
+//----------------------------------------------------------------
+// Setup domain control register - Enable all domains to client mode
+//----------------------------------------------------------------
+
+ MRC p15, 0, r0, c3, c0, 0 // Read Domain Access Control Register
+ LDR r0, =0x55555555 // Initialize every domain entry to b01 (client)
+ MCR p15, 0, r0, c3, c0, 0 // Write Domain Access Control Register
+
+#if defined(__ARM_NEON) || defined(__ARM_FP)
+//----------------------------------------------------------------
+// Enable access to NEON/VFP by enabling access to Coprocessors 10 and 11.
+// Enables Full Access i.e. in both privileged and non privileged modes
+//----------------------------------------------------------------
+
+ MRC p15, 0, r0, c1, c0, 2 // Read Coprocessor Access Control Register (CPACR)
+ ORR r0, r0, #(0xF << 20) // Enable access to CP 10 & 11
+ MCR p15, 0, r0, c1, c0, 2 // Write Coprocessor Access Control Register (CPACR)
+ ISB
+
+//----------------------------------------------------------------
+// Switch on the VFP and NEON hardware
+//----------------------------------------------------------------
+
+ MOV r0, #0x40000000
+ VMSR FPEXC, r0 // Write FPEXC register, EN bit set
+#endif
+
+
+//----------------------------------------------------------------
+// Enable MMU and branch to __main
+// Leaving the caches disabled until after scatter loading.
+//----------------------------------------------------------------
+
+ LDR r12,=__main
+
+ MRC p15, 0, r0, c1, c0, 0 // Read System Control Register
+ BIC r0, r0, #(0x1 << 12) // Clear I bit 12 to disable I Cache
+ BIC r0, r0, #(0x1 << 2) // Clear C bit 2 to disable D Cache
+ BIC r0, r0, #0x2 // Clear A bit 1 to disable strict alignment fault checking
+ ORR r0, r0, #0x1 // Set M bit 0 to enable MMU before scatter loading
+ MCR p15, 0, r0, c1, c0, 0 // Write System Control Register
+ ISB
+
+// Now the MMU is enabled, virtual to physical address translations will occur. This will affect the next
+// instruction fetch.
+//
+// The two instructions currently in the pipeline will have been fetched before the MMU was enabled.
+// The branch to __main is safe because the Virtual Address (VA) is the same as the Physical Address (PA)
+// (flat mapping) of this code that enables the MMU and performs the branch
+
+ BX r12 // Branch to __main C library entry point
+
+
+
+//----------------------------------------------------------------
+// Enable caches and branch prediction
+// This code must be run from a privileged mode
+//----------------------------------------------------------------
+
+ .section ENABLECACHES,"ax"
+ .align 3
+
+ .global enable_caches
+ .type enable_caches, "function"
+ .cfi_startproc
+enable_caches:
+
+//----------------------------------------------------------------
+// Enable caches and branch prediction
+//----------------------------------------------------------------
+
+ MRC p15, 0, r0, c1, c0, 0 // Read System Control Register
+ ORR r0, r0, #(0x1 << 12) // Set I bit 12 to enable I Cache
+ ORR r0, r0, #(0x1 << 2) // Set C bit 2 to enable D Cache
+ ORR r0, r0, #(0x1 << 11) // Set Z bit 11 to enable branch prediction
+ MCR p15, 0, r0, c1, c0, 0 // Write System Control Register
+ ISB
+
+ MRC p15, 0, r0, c1, c0, 1 // Read Auxiliary Control Register
+ ORR r0, #2 // L2EN bit, enable L2 cache
+ ORR r0, r0, #(0x1 << 2) // Set DP bit 2 to enable L1 Dside prefetch
+ MCR p15, 0, r0, c1, c0, 1 // Write Auxiliary Control Register
+ ISB
+
+ BX lr
+ .cfi_endproc
+
+ .global disable_caches
+ .type disable_caches, "function"
+disable_caches:
+
+ MRC p15, 0, r0, c1, c0, 0 // Read System Control Register
+ BIC r0, r0, #(0x1 << 12) // Clear I bit 12 to disable I Cache
+ BIC r0, r0, #(0x1 << 2) // Clear C bit 2 to disable D Cache
+ MCR p15, 0, r0, c1, c0, 0 // Write System Control Register
+ ISB
+
+ BX lr
+
+
diff --git a/ports/cortex_a17/ac6/example_build/sample_threadx/tx_initialize_low_level.S b/ports/cortex_a17/ac6/example_build/sample_threadx/tx_initialize_low_level.S
new file mode 100644
index 00000000..715958f0
--- /dev/null
+++ b/ports/cortex_a17/ac6/example_build/sample_threadx/tx_initialize_low_level.S
@@ -0,0 +1,299 @@
+/**************************************************************************/
+/* */
+/* Copyright (c) Microsoft Corporation. All rights reserved. */
+/* */
+/* This software is licensed under the Microsoft Software License */
+/* Terms for Microsoft Azure RTOS. Full text of the license can be */
+/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
+/* and in the root directory of this software. */
+/* */
+/**************************************************************************/
+
+
+/**************************************************************************/
+/**************************************************************************/
+/** */
+/** ThreadX Component */
+/** */
+/** Initialize */
+/** */
+/**************************************************************************/
+/**************************************************************************/
+
+ .arm
+
+SVC_MODE = 0xD3 // Disable IRQ/FIQ SVC mode
+IRQ_MODE = 0xD2 // Disable IRQ/FIQ IRQ mode
+FIQ_MODE = 0xD1 // Disable IRQ/FIQ FIQ mode
+SYS_MODE = 0xDF // Disable IRQ/FIQ SYS mode
+FIQ_STACK_SIZE = 512 // FIQ stack size
+IRQ_STACK_SIZE = 1024 // IRQ stack size
+SYS_STACK_SIZE = 1024 // System stack size
+
+ .global _tx_thread_system_stack_ptr
+ .global _tx_initialize_unused_memory
+ .global _tx_thread_context_save
+ .global _tx_thread_context_restore
+ .global _tx_timer_interrupt
+
+
+
+/* Define the 16-bit Thumb mode veneer for _tx_initialize_low_level for
+ applications calling this function from to 16-bit Thumb mode. */
+
+ .text
+ .align 2
+ .thumb
+ .global $_tx_initialize_low_level
+ .type $_tx_initialize_low_level,function
+$_tx_initialize_low_level:
+ BX pc // Switch to 32-bit mode
+ NOP //
+ .arm
+ STMFD sp!, {lr} // Save return address
+ BL _tx_initialize_low_level // Call _tx_initialize_low_level function
+ LDMFD sp!, {lr} // Recover saved return address
+ BX lr // Return to 16-bit caller
+
+
+ .text
+ .align 2
+/**************************************************************************/
+/* */
+/* FUNCTION RELEASE */
+/* */
+/* _tx_initialize_low_level ARMV7-A */
+/* 6.1.11 */
+/* AUTHOR */
+/* */
+/* William E. Lamie, Microsoft Corporation */
+/* */
+/* DESCRIPTION */
+/* */
+/* This function is responsible for any low-level processor */
+/* initialization, including setting up interrupt vectors, setting */
+/* up a periodic timer interrupt source, saving the system stack */
+/* pointer for use in ISR processing later, and finding the first */
+/* available RAM memory address for tx_application_define. */
+/* */
+/* INPUT */
+/* */
+/* None */
+/* */
+/* OUTPUT */
+/* */
+/* None */
+/* */
+/* CALLS */
+/* */
+/* None */
+/* */
+/* CALLED BY */
+/* */
+/* _tx_initialize_kernel_enter ThreadX entry function */
+/* */
+/* RELEASE HISTORY */
+/* */
+/* DATE NAME DESCRIPTION */
+/* */
+/* 09-30-2020 William E. Lamie Initial Version 6.1 */
+/* 04-25-2022 Zhen Kong Updated comments, */
+/* resulting in version 6.1.11 */
+/* */
+/**************************************************************************/
+ .global _tx_initialize_low_level
+ .type _tx_initialize_low_level,function
+_tx_initialize_low_level:
+
+ /* We must be in SVC mode at this point! */
+
+ /* Setup various stack pointers. */
+
+ LDR r1, =Image$$ARM_LIB_STACK$$ZI$$Limit // Get pointer to stack area
+
+#ifdef TX_ENABLE_IRQ_NESTING
+
+ /* Setup the system mode stack for nested interrupt support */
+
+ LDR r2, =SYS_STACK_SIZE // Pickup stack size
+ MOV r3, #SYS_MODE // Build SYS mode CPSR
+ MSR CPSR_c, r3 // Enter SYS mode
+ SUB r1, r1, #1 // Backup 1 byte
+ BIC r1, r1, #7 // Ensure 8-byte alignment
+ MOV sp, r1 // Setup SYS stack pointer
+ SUB r1, r1, r2 // Calculate start of next stack
+#endif
+
+ LDR r2, =FIQ_STACK_SIZE // Pickup stack size
+ MOV r0, #FIQ_MODE // Build FIQ mode CPSR
+ MSR CPSR, r0 // Enter FIQ mode
+ SUB r1, r1, #1 // Backup 1 byte
+ BIC r1, r1, #7 // Ensure 8-byte alignment
+ MOV sp, r1 // Setup FIQ stack pointer
+ SUB r1, r1, r2 // Calculate start of next stack
+ LDR r2, =IRQ_STACK_SIZE // Pickup IRQ stack size
+ MOV r0, #IRQ_MODE // Build IRQ mode CPSR
+ MSR CPSR, r0 // Enter IRQ mode
+ SUB r1, r1, #1 // Backup 1 byte
+ BIC r1, r1, #7 // Ensure 8-byte alignment
+ MOV sp, r1 // Setup IRQ stack pointer
+ SUB r3, r1, r2 // Calculate end of IRQ stack
+ MOV r0, #SVC_MODE // Build SVC mode CPSR
+ MSR CPSR, r0 // Enter SVC mode
+ LDR r2, =Image$$ARM_LIB_STACK$$Base // Pickup stack bottom
+ CMP r3, r2 // Compare the current stack end with the bottom
+_stack_error_loop:
+ BLT _stack_error_loop // If the IRQ stack exceeds the stack bottom, just sit here!
+
+ LDR r2, =_tx_thread_system_stack_ptr // Pickup stack pointer
+ STR r1, [r2] // Save the system stack
+
+ LDR r1, =Image$$ZI_DATA$$ZI$$Limit // Get end of non-initialized RAM area
+ LDR r2, =_tx_initialize_unused_memory // Pickup unused memory ptr address
+ ADD r1, r1, #8 // Increment to next free word
+ STR r1, [r2] // Save first free memory address
+
+#ifdef __THUMB_INTERWORK
+ BX lr // Return to caller
+#else
+ MOV pc, lr // Return to caller
+#endif
+
+/* Define shells for each of the interrupt vectors. */
+
+ .global __tx_undefined
+__tx_undefined:
+ B __tx_undefined // Undefined handler
+
+ .global __tx_swi_interrupt
+__tx_swi_interrupt:
+ B __tx_swi_interrupt // Software interrupt handler
+
+ .global __tx_prefetch_handler
+__tx_prefetch_handler:
+ B __tx_prefetch_handler // Prefetch exception handler
+
+ .global __tx_abort_handler
+__tx_abort_handler:
+ B __tx_abort_handler // Abort exception handler
+
+ .global __tx_reserved_handler
+__tx_reserved_handler:
+ B __tx_reserved_handler // Reserved exception handler
+
+ .global __tx_irq_processing_return
+ .type __tx_irq_processing_return,function
+ .global __tx_irq_handler
+__tx_irq_handler:
+
+ /* Jump to context save to save system context. */
+ B _tx_thread_context_save
+__tx_irq_processing_return:
+
+ /* At this point execution is still in the IRQ mode. The CPSR, point of
+ interrupt, and all C scratch registers are available for use. In
+ addition, IRQ interrupts may be re-enabled - with certain restrictions -
+ if nested IRQ interrupts are desired. Interrupts may be re-enabled over
+ small code sequences where lr is saved before enabling interrupts and
+ restored after interrupts are again disabled. */
+
+ /* Interrupt nesting is allowed after calling _tx_thread_irq_nesting_start
+ from IRQ mode with interrupts disabled. This routine switches to the
+ system mode and returns with IRQ interrupts enabled.
+
+ NOTE: It is very important to ensure all IRQ interrupts are cleared
+ prior to enabling nested IRQ interrupts. */
+#ifdef TX_ENABLE_IRQ_NESTING
+ BL _tx_thread_irq_nesting_start
+#endif
+
+ /* For debug purpose, execute the timer interrupt processing here. In
+ a real system, some kind of status indication would have to be checked
+ before the timer interrupt handler could be called. */
+
+ BL _tx_timer_interrupt // Timer interrupt handler
+
+
+ /* If interrupt nesting was started earlier, the end of interrupt nesting
+ service must be called before returning to _tx_thread_context_restore.
+ This routine returns in processing in IRQ mode with interrupts disabled. */
+#ifdef TX_ENABLE_IRQ_NESTING
+ BL _tx_thread_irq_nesting_end
+#endif
+
+ /* Jump to context restore to restore system context. */
+ B _tx_thread_context_restore
+
+
+ /* This is an example of a vectored IRQ handler. */
+
+ /* At this point execution is still in the IRQ mode. The CPSR, point of
+ interrupt, and all C scratch registers are available for use. In
+ addition, IRQ interrupts may be re-enabled - with certain restrictions -
+ if nested IRQ interrupts are desired. Interrupts may be re-enabled over
+ small code sequences where lr is saved before enabling interrupts and
+ restored after interrupts are again disabled. */
+
+
+ /* Interrupt nesting is allowed after calling _tx_thread_irq_nesting_start
+ from IRQ mode with interrupts disabled. This routine switches to the
+ system mode and returns with IRQ interrupts enabled.
+
+ NOTE: It is very important to ensure all IRQ interrupts are cleared
+ prior to enabling nested IRQ interrupts. */
+
+ /* Application IRQ handlers can be called here! */
+
+ /* If interrupt nesting was started earlier, the end of interrupt nesting
+ service must be called before returning to _tx_thread_context_restore.
+ This routine returns in processing in IRQ mode with interrupts disabled. */
+
+ /* Jump to context restore to restore system context. */
+
+#ifdef TX_ENABLE_FIQ_SUPPORT
+ .global __tx_fiq_handler
+ .global __tx_fiq_processing_return
+__tx_fiq_handler:
+
+ /* Jump to fiq context save to save system context. */
+ B _tx_thread_fiq_context_save
+__tx_fiq_processing_return:
+
+ /* At this point execution is still in the FIQ mode. The CPSR, point of
+ interrupt, and all C scratch registers are available for use. */
+
+ /* Interrupt nesting is allowed after calling _tx_thread_fiq_nesting_start
+ from FIQ mode with interrupts disabled. This routine switches to the
+ system mode and returns with FIQ interrupts enabled.
+
+ NOTE: It is very important to ensure all FIQ interrupts are cleared
+ prior to enabling nested FIQ interrupts. */
+#ifdef TX_ENABLE_FIQ_NESTING
+ BL _tx_thread_fiq_nesting_start
+#endif
+
+ /* Application FIQ handlers can be called here! */
+
+ /* If interrupt nesting was started earlier, the end of interrupt nesting
+ service must be called before returning to _tx_thread_fiq_context_restore. */
+#ifdef TX_ENABLE_FIQ_NESTING
+ BL _tx_thread_fiq_nesting_end
+#endif
+
+ /* Jump to fiq context restore to restore system context. */
+ B _tx_thread_fiq_context_restore
+
+
+#else
+ .global __tx_fiq_handler
+__tx_fiq_handler:
+ B __tx_fiq_handler // FIQ interrupt handler
+#endif
+
+
+BUILD_OPTIONS:
+ .word _tx_build_options // Reference to bring in
+VERSION_ID:
+ .word _tx_version_id // Reference to bring in
+
+
diff --git a/ports/cortex_a17/ac6/example_build/tx/.cproject b/ports/cortex_a17/ac6/example_build/tx/.cproject
new file mode 100644
index 00000000..93fc2931
--- /dev/null
+++ b/ports/cortex_a17/ac6/example_build/tx/.cproject
@@ -0,0 +1,146 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/ports/cortex_a17/ac6/example_build/tx/.project b/ports/cortex_a17/ac6/example_build/tx/.project
new file mode 100644
index 00000000..863ca5cb
--- /dev/null
+++ b/ports/cortex_a17/ac6/example_build/tx/.project
@@ -0,0 +1,48 @@
+
+
+ tx
+
+
+
+
+
+ org.eclipse.cdt.managedbuilder.core.genmakebuilder
+ clean,full,incremental,
+
+
+
+
+ org.eclipse.cdt.managedbuilder.core.ScannerConfigBuilder
+ full,incremental,
+
+
+
+
+
+ org.eclipse.cdt.core.cnature
+ org.eclipse.cdt.managedbuilder.core.managedBuildNature
+ org.eclipse.cdt.managedbuilder.core.ScannerConfigNature
+
+
+
+ inc_generic
+ 2
+ $%7BPARENT-5-PROJECT_LOC%7D/common/inc
+
+
+ inc_port
+ 2
+ $%7BPARENT-2-PROJECT_LOC%7D/inc
+
+
+ src_generic
+ 2
+ $%7BPARENT-5-PROJECT_LOC%7D/common/src
+
+
+ src_port
+ 2
+ $%7BPARENT-2-PROJECT_LOC%7D/src
+
+
+
diff --git a/ports/cortex_a17/ac6/inc/tx_port.h b/ports/cortex_a17/ac6/inc/tx_port.h
new file mode 100644
index 00000000..19463de1
--- /dev/null
+++ b/ports/cortex_a17/ac6/inc/tx_port.h
@@ -0,0 +1,328 @@
+/**************************************************************************/
+/* */
+/* Copyright (c) Microsoft Corporation. All rights reserved. */
+/* */
+/* This software is licensed under the Microsoft Software License */
+/* Terms for Microsoft Azure RTOS. Full text of the license can be */
+/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
+/* and in the root directory of this software. */
+/* */
+/**************************************************************************/
+
+
+/**************************************************************************/
+/**************************************************************************/
+/** */
+/** ThreadX Component */
+/** */
+/** Port Specific */
+/** */
+/**************************************************************************/
+/**************************************************************************/
+
+
+/**************************************************************************/
+/* */
+/* PORT SPECIFIC C INFORMATION RELEASE */
+/* */
+/* tx_port.h ARMv7-A */
+/* 6.1.11 */
+/* */
+/* AUTHOR */
+/* */
+/* William E. Lamie, Microsoft Corporation */
+/* */
+/* DESCRIPTION */
+/* */
+/* This file contains data type definitions that make the ThreadX */
+/* real-time kernel function identically on a variety of different */
+/* processor architectures. For example, the size or number of bits */
+/* in an "int" data type vary between microprocessor architectures and */
+/* even C compilers for the same microprocessor. ThreadX does not */
+/* directly use native C data types. Instead, ThreadX creates its */
+/* own special types that can be mapped to actual data types by this */
+/* file to guarantee consistency in the interface and functionality. */
+/* */
+/* RELEASE HISTORY */
+/* */
+/* DATE NAME DESCRIPTION */
+/* */
+/* 09-30-2020 William E. Lamie Initial Version 6.1 */
+/* 04-02-2021 Bhupendra Naphade Modified comment(s),updated */
+/* macro definition, */
+/* resulting in version 6.1.6 */
+/* 04-25-2022 Zhen Kong Updated comments, */
+/* resulting in version 6.1.11 */
+/* */
+/**************************************************************************/
+
+#ifndef TX_PORT_H
+#define TX_PORT_H
+
+
+/* Determine if the optional ThreadX user define file should be used. */
+
+#ifdef TX_INCLUDE_USER_DEFINE_FILE
+
+
+/* Yes, include the user defines in tx_user.h. The defines in this file may
+ alternately be defined on the command line. */
+
+#include "tx_user.h"
+#endif
+
+
+/* Define compiler library include files. */
+
+#include
+#include
+
+
+/* Define ThreadX basic types for this port. */
+
+#define VOID void
+typedef char CHAR;
+typedef unsigned char UCHAR;
+typedef int INT;
+typedef unsigned int UINT;
+typedef long LONG;
+typedef unsigned long ULONG;
+typedef short SHORT;
+typedef unsigned short USHORT;
+
+
+/* Define the priority levels for ThreadX. Legal values range
+ from 32 to 1024 and MUST be evenly divisible by 32. */
+
+#ifndef TX_MAX_PRIORITIES
+#define TX_MAX_PRIORITIES 32
+#endif
+
+
+/* Define the minimum stack for a ThreadX thread on this processor. If the size supplied during
+ thread creation is less than this value, the thread create call will return an error. */
+
+#ifndef TX_MINIMUM_STACK
+#define TX_MINIMUM_STACK 200 /* Minimum stack size for this port */
+#endif
+
+
+/* Define the system timer thread's default stack size and priority. These are only applicable
+ if TX_TIMER_PROCESS_IN_ISR is not defined. */
+
+#ifndef TX_TIMER_THREAD_STACK_SIZE
+#define TX_TIMER_THREAD_STACK_SIZE 1024 /* Default timer thread stack size */
+#endif
+
+#ifndef TX_TIMER_THREAD_PRIORITY
+#define TX_TIMER_THREAD_PRIORITY 0 /* Default timer thread priority */
+#endif
+
+
+/* Define various constants for the ThreadX ARM port. */
+
+#ifdef TX_ENABLE_FIQ_SUPPORT
+#define TX_INT_DISABLE 0xC0 /* Disable IRQ & FIQ interrupts */
+#else
+#define TX_INT_DISABLE 0x80 /* Disable IRQ interrupts */
+#endif
+#define TX_INT_ENABLE 0x00 /* Enable IRQ interrupts */
+
+
+/* Define the clock source for trace event entry time stamp. The following two item are port specific.
+ For example, if the time source is at the address 0x0a800024 and is 16-bits in size, the clock
+ source constants would be:
+
+#define TX_TRACE_TIME_SOURCE *((ULONG *) 0x0a800024)
+#define TX_TRACE_TIME_MASK 0x0000FFFFUL
+
+*/
+
+#ifndef TX_TRACE_TIME_SOURCE
+#define TX_TRACE_TIME_SOURCE ++_tx_trace_simulated_time
+#endif
+#ifndef TX_TRACE_TIME_MASK
+#define TX_TRACE_TIME_MASK 0xFFFFFFFFUL
+#endif
+
+
+/* Define the port specific options for the _tx_build_options variable. This variable indicates
+ how the ThreadX library was built. */
+
+#ifdef TX_ENABLE_FIQ_SUPPORT
+#define TX_FIQ_ENABLED 1
+#else
+#define TX_FIQ_ENABLED 0
+#endif
+
+#ifdef TX_ENABLE_IRQ_NESTING
+#define TX_IRQ_NESTING_ENABLED 2
+#else
+#define TX_IRQ_NESTING_ENABLED 0
+#endif
+
+#ifdef TX_ENABLE_FIQ_NESTING
+#define TX_FIQ_NESTING_ENABLED 4
+#else
+#define TX_FIQ_NESTING_ENABLED 0
+#endif
+
+#define TX_PORT_SPECIFIC_BUILD_OPTIONS TX_FIQ_ENABLED | TX_IRQ_NESTING_ENABLED | TX_FIQ_NESTING_ENABLED
+
+
+/* Define the in-line initialization constant so that modules with in-line
+ initialization capabilities can prevent their initialization from being
+ a function call. */
+
+#define TX_INLINE_INITIALIZATION
+
+
+/* Determine whether or not stack checking is enabled. By default, ThreadX stack checking is
+ disabled. When the following is defined, ThreadX thread stack checking is enabled. If stack
+ checking is enabled (TX_ENABLE_STACK_CHECKING is defined), the TX_DISABLE_STACK_FILLING
+ define is negated, thereby forcing the stack fill which is necessary for the stack checking
+ logic. */
+
+#ifdef TX_ENABLE_STACK_CHECKING
+#undef TX_DISABLE_STACK_FILLING
+#endif
+
+
+/* Define the TX_THREAD control block extensions for this port. The main reason
+ for the multiple macros is so that backward compatibility can be maintained with
+ existing ThreadX kernel awareness modules. */
+
+#define TX_THREAD_EXTENSION_0
+#define TX_THREAD_EXTENSION_1
+#define TX_THREAD_EXTENSION_2 ULONG tx_thread_vfp_enable;
+#define TX_THREAD_EXTENSION_3
+
+
+/* Define the port extensions of the remaining ThreadX objects. */
+
+#define TX_BLOCK_POOL_EXTENSION
+#define TX_BYTE_POOL_EXTENSION
+#define TX_EVENT_FLAGS_GROUP_EXTENSION
+#define TX_MUTEX_EXTENSION
+#define TX_QUEUE_EXTENSION
+#define TX_SEMAPHORE_EXTENSION
+#define TX_TIMER_EXTENSION
+
+
+/* Define the user extension field of the thread control block. Nothing
+ additional is needed for this port so it is defined as white space. */
+
+#ifndef TX_THREAD_USER_EXTENSION
+#define TX_THREAD_USER_EXTENSION
+#endif
+
+
+/* Define the macros for processing extensions in tx_thread_create, tx_thread_delete,
+ tx_thread_shell_entry, and tx_thread_terminate. */
+
+
+#define TX_THREAD_CREATE_EXTENSION(thread_ptr)
+#define TX_THREAD_DELETE_EXTENSION(thread_ptr)
+#define TX_THREAD_COMPLETED_EXTENSION(thread_ptr)
+#define TX_THREAD_TERMINATED_EXTENSION(thread_ptr)
+
+
+/* Define the ThreadX object creation extensions for the remaining objects. */
+
+#define TX_BLOCK_POOL_CREATE_EXTENSION(pool_ptr)
+#define TX_BYTE_POOL_CREATE_EXTENSION(pool_ptr)
+#define TX_EVENT_FLAGS_GROUP_CREATE_EXTENSION(group_ptr)
+#define TX_MUTEX_CREATE_EXTENSION(mutex_ptr)
+#define TX_QUEUE_CREATE_EXTENSION(queue_ptr)
+#define TX_SEMAPHORE_CREATE_EXTENSION(semaphore_ptr)
+#define TX_TIMER_CREATE_EXTENSION(timer_ptr)
+
+
+/* Define the ThreadX object deletion extensions for the remaining objects. */
+
+#define TX_BLOCK_POOL_DELETE_EXTENSION(pool_ptr)
+#define TX_BYTE_POOL_DELETE_EXTENSION(pool_ptr)
+#define TX_EVENT_FLAGS_GROUP_DELETE_EXTENSION(group_ptr)
+#define TX_MUTEX_DELETE_EXTENSION(mutex_ptr)
+#define TX_QUEUE_DELETE_EXTENSION(queue_ptr)
+#define TX_SEMAPHORE_DELETE_EXTENSION(semaphore_ptr)
+#define TX_TIMER_DELETE_EXTENSION(timer_ptr)
+
+
+/* Determine if the ARM architecture has the CLZ instruction. This is available on
+ architectures v5 and above. If available, redefine the macro for calculating the
+ lowest bit set. */
+
+#if __TARGET_ARCH_ARM > 4
+
+#ifndef __thumb__
+
+#define TX_LOWEST_SET_BIT_CALCULATE(m, b) m = m & ((ULONG) (-((LONG) m))); \
+ asm volatile (" CLZ %0,%1 ": "=r" (b) : "r" (m) ); \
+ b = 31 - b;
+#endif
+#endif
+
+
+/* Define ThreadX interrupt lockout and restore macros for protection on
+ access of critical kernel information. The restore interrupt macro must
+ restore the interrupt posture of the running thread prior to the value
+ present prior to the disable macro. In most cases, the save area macro
+ is used to define a local function save area for the disable and restore
+ macros. */
+
+#ifdef __thumb__
+
+unsigned int _tx_thread_interrupt_disable(void);
+unsigned int _tx_thread_interrupt_restore(UINT old_posture);
+
+
+#define TX_INTERRUPT_SAVE_AREA UINT interrupt_save;
+
+#define TX_DISABLE interrupt_save = _tx_thread_interrupt_disable();
+#define TX_RESTORE _tx_thread_interrupt_restore(interrupt_save);
+
+#else
+
+#define TX_INTERRUPT_SAVE_AREA UINT interrupt_save, tx_temp;
+
+#ifdef TX_ENABLE_FIQ_SUPPORT
+#define TX_DISABLE asm volatile (" MRS %0,CPSR; CPSID if ": "=r" (interrupt_save) );
+#else
+#define TX_DISABLE asm volatile (" MRS %0,CPSR; CPSID i ": "=r" (interrupt_save) );
+#endif
+
+#define TX_RESTORE asm volatile (" MSR CPSR_c,%0 "::"r" (interrupt_save) );
+
+#endif
+
+
+/* Define VFP extension for the ARMv7-A. Each is assumed to be called in the context of the executing
+ thread. */
+
+void tx_thread_vfp_enable(void);
+void tx_thread_vfp_disable(void);
+
+
+/* Define the interrupt lockout macros for each ThreadX object. */
+
+#define TX_BLOCK_POOL_DISABLE TX_DISABLE
+#define TX_BYTE_POOL_DISABLE TX_DISABLE
+#define TX_EVENT_FLAGS_GROUP_DISABLE TX_DISABLE
+#define TX_MUTEX_DISABLE TX_DISABLE
+#define TX_QUEUE_DISABLE TX_DISABLE
+#define TX_SEMAPHORE_DISABLE TX_DISABLE
+
+
+/* Define the version ID of ThreadX. This may be utilized by the application. */
+
+#ifdef TX_THREAD_INIT
+CHAR _tx_version_id[] =
+ "Copyright (c) Microsoft Corporation. All rights reserved. * ThreadX ARMv7-A Version 6.1.11 *";
+#else
+extern CHAR _tx_version_id[];
+#endif
+
+
+#endif
+
diff --git a/ports/cortex_a17/ac6/src/tx_thread_context_restore.S b/ports/cortex_a17/ac6/src/tx_thread_context_restore.S
new file mode 100644
index 00000000..fae7e72d
--- /dev/null
+++ b/ports/cortex_a17/ac6/src/tx_thread_context_restore.S
@@ -0,0 +1,222 @@
+/**************************************************************************/
+/* */
+/* Copyright (c) Microsoft Corporation. All rights reserved. */
+/* */
+/* This software is licensed under the Microsoft Software License */
+/* Terms for Microsoft Azure RTOS. Full text of the license can be */
+/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
+/* and in the root directory of this software. */
+/* */
+/**************************************************************************/
+
+
+/**************************************************************************/
+/**************************************************************************/
+/** */
+/** ThreadX Component */
+/** */
+/** Thread */
+/** */
+/**************************************************************************/
+/**************************************************************************/
+
+ .arm
+
+#ifdef TX_ENABLE_FIQ_SUPPORT
+SVC_MODE = 0xD3 // Disable IRQ/FIQ, SVC mode
+IRQ_MODE = 0xD2 // Disable IRQ/FIQ, IRQ mode
+#else
+SVC_MODE = 0x93 // Disable IRQ, SVC mode
+IRQ_MODE = 0x92 // Disable IRQ, IRQ mode
+#endif
+
+ .global _tx_thread_system_state
+ .global _tx_thread_current_ptr
+ .global _tx_thread_execute_ptr
+ .global _tx_timer_time_slice
+ .global _tx_thread_schedule
+ .global _tx_thread_preempt_disable
+
+
+
+/* No 16-bit Thumb mode veneer code is needed for _tx_thread_context_restore
+ since it will never be called 16-bit mode. */
+
+ .arm
+ .text
+ .align 2
+/**************************************************************************/
+/* */
+/* FUNCTION RELEASE */
+/* */
+/* _tx_thread_context_restore ARMv7-A */
+/* 6.1.11 */
+/* AUTHOR */
+/* */
+/* William E. Lamie, Microsoft Corporation */
+/* */
+/* DESCRIPTION */
+/* */
+/* This function restores the interrupt context if it is processing a */
+/* nested interrupt. If not, it returns to the interrupt thread if no */
+/* preemption is necessary. Otherwise, if preemption is necessary or */
+/* if no thread was running, the function returns to the scheduler. */
+/* */
+/* INPUT */
+/* */
+/* None */
+/* */
+/* OUTPUT */
+/* */
+/* None */
+/* */
+/* CALLS */
+/* */
+/* _tx_thread_schedule Thread scheduling routine */
+/* */
+/* CALLED BY */
+/* */
+/* ISRs Interrupt Service Routines */
+/* */
+/* RELEASE HISTORY */
+/* */
+/* DATE NAME DESCRIPTION */
+/* */
+/* 09-30-2020 William E. Lamie Initial Version 6.1 */
+/* 10-15-2021 William E. Lamie Modified comment(s), added */
+/* execution profile support, */
+/* resulting in version 6.1.9 */
+/* 04-25-2022 Zhen Kong Updated comments, */
+/* resulting in version 6.1.11 */
+/* */
+/**************************************************************************/
+ .global _tx_thread_context_restore
+ .type _tx_thread_context_restore,function
+_tx_thread_context_restore:
+
+ /* Lockout interrupts. */
+
+#ifdef TX_ENABLE_FIQ_SUPPORT
+ CPSID if // Disable IRQ and FIQ interrupts
+#else
+ CPSID i // Disable IRQ interrupts
+#endif
+
+#if (defined(TX_ENABLE_EXECUTION_CHANGE_NOTIFY) || defined(TX_EXECUTION_PROFILE_ENABLE))
+
+ /* Call the ISR exit function to indicate an ISR is complete. */
+
+ BL _tx_execution_isr_exit // Call the ISR exit function
+#endif
+
+ /* Determine if interrupts are nested. */
+
+ LDR r3, =_tx_thread_system_state // Pickup address of system state variable
+ LDR r2, [r3] // Pickup system state
+ SUB r2, r2, #1 // Decrement the counter
+ STR r2, [r3] // Store the counter
+ CMP r2, #0 // Was this the first interrupt?
+ BEQ __tx_thread_not_nested_restore // If so, not a nested restore
+
+ /* Interrupts are nested. */
+
+ /* Just recover the saved registers and return to the point of
+ interrupt. */
+
+ LDMIA sp!, {r0, r10, r12, lr} // Recover SPSR, POI, and scratch regs
+ MSR SPSR_cxsf, r0 // Put SPSR back
+ LDMIA sp!, {r0-r3} // Recover r0-r3
+ MOVS pc, lr // Return to point of interrupt
+
+__tx_thread_not_nested_restore:
+
+ /* Determine if a thread was interrupted and no preemption is required. */
+
+ LDR r1, =_tx_thread_current_ptr // Pickup address of current thread ptr
+ LDR r0, [r1] // Pickup actual current thread pointer
+ CMP r0, #0 // Is it NULL?
+ BEQ __tx_thread_idle_system_restore // Yes, idle system was interrupted
+
+ LDR r3, =_tx_thread_preempt_disable // Pickup preempt disable address
+ LDR r2, [r3] // Pickup actual preempt disable flag
+ CMP r2, #0 // Is it set?
+ BNE __tx_thread_no_preempt_restore // Yes, don't preempt this thread
+ LDR r3, =_tx_thread_execute_ptr // Pickup address of execute thread ptr
+ LDR r2, [r3] // Pickup actual execute thread pointer
+ CMP r0, r2 // Is the same thread highest priority?
+ BNE __tx_thread_preempt_restore // No, preemption needs to happen
+
+
+__tx_thread_no_preempt_restore:
+
+ /* Recover the saved context and return to the point of interrupt. */
+
+ /* Pickup the saved stack pointer. */
+
+ /* Recover the saved context and return to the point of interrupt. */
+ LDMIA sp!, {r0, r10, r12, lr} // Recover SPSR, POI, and scratch regs
+ MSR SPSR_cxsf, r0 // Put SPSR back
+ LDMIA sp!, {r0-r3} // Recover r0-r3
+ MOVS pc, lr // Return to point of interrupt
+
+__tx_thread_preempt_restore:
+
+ LDMIA sp!, {r3, r10, r12, lr} // Recover temporarily saved registers
+ MOV r1, lr // Save lr (point of interrupt)
+ MOV r2, #SVC_MODE // Build SVC mode CPSR
+ MSR CPSR_c, r2 // Enter SVC mode
+ STR r1, [sp, #-4]! // Save point of interrupt
+ STMDB sp!, {r4-r12, lr} // Save upper half of registers
+ MOV r4, r3 // Save SPSR in r4
+ MOV r2, #IRQ_MODE // Build IRQ mode CPSR
+ MSR CPSR_c, r2 // Enter IRQ mode
+ LDMIA sp!, {r0-r3} // Recover r0-r3
+ MOV r5, #SVC_MODE // Build SVC mode CPSR
+ MSR CPSR_c, r5 // Enter SVC mode
+ STMDB sp!, {r0-r3} // Save r0-r3 on thread's stack
+
+ LDR r1, =_tx_thread_current_ptr // Pickup address of current thread ptr
+ LDR r0, [r1] // Pickup current thread pointer
+
+#ifdef TX_ENABLE_VFP_SUPPORT
+ LDR r2, [r0, #144] // Pickup the VFP enabled flag
+ CMP r2, #0 // Is the VFP enabled?
+ BEQ _tx_skip_irq_vfp_save // No, skip VFP IRQ save
+ VMRS r2, FPSCR // Pickup the FPSCR
+ STR r2, [sp, #-4]! // Save FPSCR
+ VSTMDB sp!, {D16-D31} // Save D16-D31
+ VSTMDB sp!, {D0-D15} // Save D0-D15
+
+_tx_skip_irq_vfp_save:
+
+#endif
+
+ MOV r3, #1 // Build interrupt stack type
+ STMDB sp!, {r3, r4} // Save interrupt stack type and SPSR
+ STR sp, [r0, #8] // Save stack pointer in thread control
+ // block
+
+ /* Save the remaining time-slice and disable it. */
+ LDR r3, =_tx_timer_time_slice // Pickup time-slice variable address
+ LDR r2, [r3] // Pickup time-slice
+ CMP r2, #0 // Is it active?
+ BEQ __tx_thread_dont_save_ts // No, don't save it
+ STR r2, [r0, #24] // Save thread's time-slice
+ MOV r2, #0 // Clear value
+ STR r2, [r3] // Disable global time-slice flag
+
+__tx_thread_dont_save_ts:
+
+ /* Clear the current task pointer. */
+ MOV r0, #0 // NULL value
+ STR r0, [r1] // Clear current thread pointer
+
+ /* Return to the scheduler. */
+ B _tx_thread_schedule // Return to scheduler
+
+__tx_thread_idle_system_restore:
+
+ /* Just return back to the scheduler! */
+ MOV r0, #SVC_MODE // Build SVC mode CPSR
+ MSR CPSR_c, r0 // Enter SVC mode
+ B _tx_thread_schedule // Return to scheduler
diff --git a/ports/cortex_a17/ac6/src/tx_thread_context_save.S b/ports/cortex_a17/ac6/src/tx_thread_context_save.S
new file mode 100644
index 00000000..7ac48c2e
--- /dev/null
+++ b/ports/cortex_a17/ac6/src/tx_thread_context_save.S
@@ -0,0 +1,172 @@
+/**************************************************************************/
+/* */
+/* Copyright (c) Microsoft Corporation. All rights reserved. */
+/* */
+/* This software is licensed under the Microsoft Software License */
+/* Terms for Microsoft Azure RTOS. Full text of the license can be */
+/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
+/* and in the root directory of this software. */
+/* */
+/**************************************************************************/
+
+
+/**************************************************************************/
+/**************************************************************************/
+/** */
+/** ThreadX Component */
+/** */
+/** Thread */
+/** */
+/**************************************************************************/
+/**************************************************************************/
+
+ .global _tx_thread_system_state
+ .global _tx_thread_current_ptr
+ .global __tx_irq_processing_return
+
+
+/* No 16-bit Thumb mode veneer code is needed for _tx_thread_context_save
+ since it will never be called 16-bit mode. */
+
+ .arm
+ .text
+ .align 2
+/**************************************************************************/
+/* */
+/* FUNCTION RELEASE */
+/* */
+/* _tx_thread_context_save ARMv7-A */
+/* 6.1.11 */
+/* AUTHOR */
+/* */
+/* William E. Lamie, Microsoft Corporation */
+/* */
+/* DESCRIPTION */
+/* */
+/* This function saves the context of an executing thread in the */
+/* beginning of interrupt processing. The function also ensures that */
+/* the system stack is used upon return to the calling ISR. */
+/* */
+/* INPUT */
+/* */
+/* None */
+/* */
+/* OUTPUT */
+/* */
+/* None */
+/* */
+/* CALLS */
+/* */
+/* None */
+/* */
+/* CALLED BY */
+/* */
+/* ISRs */
+/* */
+/* RELEASE HISTORY */
+/* */
+/* DATE NAME DESCRIPTION */
+/* */
+/* 09-30-2020 William E. Lamie Initial Version 6.1 */
+/* 10-15-2021 William E. Lamie Modified comment(s), added */
+/* execution profile support, */
+/* resulting in version 6.1.9 */
+/* 04-25-2022 Zhen Kong Updated comments, */
+/* resulting in version 6.1.11 */
+/* */
+/**************************************************************************/
+ .global _tx_thread_context_save
+ .type _tx_thread_context_save,function
+_tx_thread_context_save:
+
+ /* Upon entry to this routine, it is assumed that IRQ interrupts are locked
+ out, we are in IRQ mode, and all registers are intact. */
+
+ /* Check for a nested interrupt condition. */
+
+ STMDB sp!, {r0-r3} // Save some working registers
+#ifdef TX_ENABLE_FIQ_SUPPORT
+ CPSID if // Disable FIQ interrupts
+#endif
+ LDR r3, =_tx_thread_system_state // Pickup address of system state variable
+ LDR r2, [r3] // Pickup system state
+ CMP r2, #0 // Is this the first interrupt?
+ BEQ __tx_thread_not_nested_save // Yes, not a nested context save
+
+ /* Nested interrupt condition. */
+
+ ADD r2, r2, #1 // Increment the interrupt counter
+ STR r2, [r3] // Store it back in the variable
+
+ /* Save the rest of the scratch registers on the stack and return to the
+ calling ISR. */
+
+ MRS r0, SPSR // Pickup saved SPSR
+ SUB lr, lr, #4 // Adjust point of interrupt
+ STMDB sp!, {r0, r10, r12, lr} // Store other registers
+
+ /* Return to the ISR. */
+
+ MOV r10, #0 // Clear stack limit
+
+#if (defined(TX_ENABLE_EXECUTION_CHANGE_NOTIFY) || defined(TX_EXECUTION_PROFILE_ENABLE))
+
+ /* Call the ISR enter function to indicate an ISR is executing. */
+
+ PUSH {lr} // Save ISR lr
+ BL _tx_execution_isr_enter // Call the ISR enter function
+ POP {lr} // Recover ISR lr
+#endif
+
+ B __tx_irq_processing_return // Continue IRQ processing
+
+__tx_thread_not_nested_save:
+
+ /* Otherwise, not nested, check to see if a thread was running. */
+ ADD r2, r2, #1 // Increment the interrupt counter
+ STR r2, [r3] // Store it back in the variable
+ LDR r1, =_tx_thread_current_ptr // Pickup address of current thread ptr
+ LDR r0, [r1] // Pickup current thread pointer
+ CMP r0, #0 // Is it NULL?
+ BEQ __tx_thread_idle_system_save // If so, interrupt occurred in
+ // scheduling loop - nothing needs saving!
+
+ /* Save minimal context of interrupted thread. */
+
+ MRS r2, SPSR // Pickup saved SPSR
+ SUB lr, lr, #4 // Adjust point of interrupt
+ STMDB sp!, {r2, r10, r12, lr} // Store other registers
+
+ MOV r10, #0 // Clear stack limit
+
+#if (defined(TX_ENABLE_EXECUTION_CHANGE_NOTIFY) || defined(TX_EXECUTION_PROFILE_ENABLE))
+
+ /* Call the ISR enter function to indicate an ISR is executing. */
+
+ PUSH {lr} // Save ISR lr
+ BL _tx_execution_isr_enter // Call the ISR enter function
+ POP {lr} // Recover ISR lr
+#endif
+
+ B __tx_irq_processing_return // Continue IRQ processing
+
+__tx_thread_idle_system_save:
+
+ /* Interrupt occurred in the scheduling loop. */
+
+ /* Not much to do here, just adjust the stack pointer, and return to IRQ
+ processing. */
+
+ MOV r10, #0 // Clear stack limit
+
+#if (defined(TX_ENABLE_EXECUTION_CHANGE_NOTIFY) || defined(TX_EXECUTION_PROFILE_ENABLE))
+
+ /* Call the ISR enter function to indicate an ISR is executing. */
+
+ PUSH {lr} // Save ISR lr
+ BL _tx_execution_isr_enter // Call the ISR enter function
+ POP {lr} // Recover ISR lr
+#endif
+
+ ADD sp, sp, #16 // Recover saved registers
+ B __tx_irq_processing_return // Continue IRQ processing
diff --git a/ports/cortex_a17/ac6/src/tx_thread_fiq_context_restore.S b/ports/cortex_a17/ac6/src/tx_thread_fiq_context_restore.S
new file mode 100644
index 00000000..006be973
--- /dev/null
+++ b/ports/cortex_a17/ac6/src/tx_thread_fiq_context_restore.S
@@ -0,0 +1,223 @@
+/**************************************************************************/
+/* */
+/* Copyright (c) Microsoft Corporation. All rights reserved. */
+/* */
+/* This software is licensed under the Microsoft Software License */
+/* Terms for Microsoft Azure RTOS. Full text of the license can be */
+/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
+/* and in the root directory of this software. */
+/* */
+/**************************************************************************/
+
+
+/**************************************************************************/
+/**************************************************************************/
+/** */
+/** ThreadX Component */
+/** */
+/** Thread */
+/** */
+/**************************************************************************/
+/**************************************************************************/
+
+SVC_MODE = 0xD3 // SVC mode
+FIQ_MODE = 0xD1 // FIQ mode
+MODE_MASK = 0x1F // Mode mask
+THUMB_MASK = 0x20 // Thumb bit mask
+IRQ_MODE_BITS = 0x12 // IRQ mode bits
+
+
+ .global _tx_thread_system_state
+ .global _tx_thread_current_ptr
+ .global _tx_thread_system_stack_ptr
+ .global _tx_thread_execute_ptr
+ .global _tx_timer_time_slice
+ .global _tx_thread_schedule
+ .global _tx_thread_preempt_disable
+ .global _tx_execution_isr_exit
+
+
+/* No 16-bit Thumb mode veneer code is needed for _tx_thread_fiq_context_restore
+ since it will never be called 16-bit mode. */
+
+ .arm
+ .text
+ .align 2
+/**************************************************************************/
+/* */
+/* FUNCTION RELEASE */
+/* */
+/* _tx_thread_fiq_context_restore ARMv7-A */
+/* 6.1.11 */
+/* AUTHOR */
+/* */
+/* William E. Lamie, Microsoft Corporation */
+/* */
+/* DESCRIPTION */
+/* */
+/* This function restores the fiq interrupt context when processing a */
+/* nested interrupt. If not, it returns to the interrupt thread if no */
+/* preemption is necessary. Otherwise, if preemption is necessary or */
+/* if no thread was running, the function returns to the scheduler. */
+/* */
+/* INPUT */
+/* */
+/* None */
+/* */
+/* OUTPUT */
+/* */
+/* None */
+/* */
+/* CALLS */
+/* */
+/* _tx_thread_schedule Thread scheduling routine */
+/* */
+/* CALLED BY */
+/* */
+/* FIQ ISR Interrupt Service Routines */
+/* */
+/* RELEASE HISTORY */
+/* */
+/* DATE NAME DESCRIPTION */
+/* */
+/* 09-30-2020 William E. Lamie Initial Version 6.1 */
+/* 10-15-2021 William E. Lamie Modified comment(s), added */
+/* execution profile support, */
+/* resulting in version 6.1.9 */
+/* 04-25-2022 Zhen Kong Updated comments, */
+/* resulting in version 6.1.11 */
+/* */
+/**************************************************************************/
+ .global _tx_thread_fiq_context_restore
+ .type _tx_thread_fiq_context_restore,function
+_tx_thread_fiq_context_restore:
+
+ /* Lockout interrupts. */
+
+ CPSID if // Disable IRQ and FIQ interrupts
+
+#if (defined(TX_ENABLE_EXECUTION_CHANGE_NOTIFY) || defined(TX_EXECUTION_PROFILE_ENABLE))
+
+ /* Call the ISR exit function to indicate an ISR is complete. */
+
+ BL _tx_execution_isr_exit // Call the ISR exit function
+#endif
+
+ /* Determine if interrupts are nested. */
+ LDR r3, =_tx_thread_system_state // Pickup address of system state variable
+ LDR r2, [r3] // Pickup system state
+ SUB r2, r2, #1 // Decrement the counter
+ STR r2, [r3] // Store the counter
+ CMP r2, #0 // Was this the first interrupt?
+ BEQ __tx_thread_fiq_not_nested_restore // If so, not a nested restore
+
+ /* Interrupts are nested. */
+
+ /* Just recover the saved registers and return to the point of
+ interrupt. */
+
+ LDMIA sp!, {r0, r10, r12, lr} // Recover SPSR, POI, and scratch regs
+ MSR SPSR_cxsf, r0 // Put SPSR back
+ LDMIA sp!, {r0-r3} // Recover r0-r3
+ MOVS pc, lr // Return to point of interrupt
+
+__tx_thread_fiq_not_nested_restore:
+
+ /* Determine if a thread was interrupted and no preemption is required. */
+
+ LDR r1, [sp] // Pickup the saved SPSR
+ MOV r2, #MODE_MASK // Build mask to isolate the interrupted mode
+ AND r1, r1, r2 // Isolate mode bits
+ CMP r1, #IRQ_MODE_BITS // Was an interrupt taken in IRQ mode before we
+ // got to context save? */
+ BEQ __tx_thread_fiq_no_preempt_restore // Yes, just go back to point of interrupt
+
+
+ LDR r1, =_tx_thread_current_ptr // Pickup address of current thread ptr
+ LDR r0, [r1] // Pickup actual current thread pointer
+ CMP r0, #0 // Is it NULL?
+ BEQ __tx_thread_fiq_idle_system_restore // Yes, idle system was interrupted
+
+ LDR r3, =_tx_thread_preempt_disable // Pickup preempt disable address
+ LDR r2, [r3] // Pickup actual preempt disable flag
+ CMP r2, #0 // Is it set?
+ BNE __tx_thread_fiq_no_preempt_restore // Yes, don't preempt this thread
+ LDR r3, =_tx_thread_execute_ptr // Pickup address of execute thread ptr
+ LDR r2, [r3] // Pickup actual execute thread pointer
+ CMP r0, r2 // Is the same thread highest priority?
+ BNE __tx_thread_fiq_preempt_restore // No, preemption needs to happen
+
+
+__tx_thread_fiq_no_preempt_restore:
+
+ /* Restore interrupted thread or ISR. */
+ /* Recover the saved context and return to the point of interrupt. */
+
+ LDMIA sp!, {r0, lr} // Recover SPSR, POI, and scratch regs
+ MSR SPSR_cxsf, r0 // Put SPSR back
+ LDMIA sp!, {r0-r3} // Recover r0-r3
+ MOVS pc, lr // Return to point of interrupt
+
+__tx_thread_fiq_preempt_restore:
+
+ LDMIA sp!, {r3, lr} // Recover temporarily saved registers
+ MOV r1, lr // Save lr (point of interrupt)
+ MOV r2, #SVC_MODE // Build SVC mode CPSR
+ MSR CPSR_c, r2 // Enter SVC mode
+ STR r1, [sp, #-4]! // Save point of interrupt
+ STMDB sp!, {r4-r12, lr} // Save upper half of registers
+ MOV r4, r3 // Save SPSR in r4
+ MOV r2, #FIQ_MODE // Build FIQ mode CPSR
+ MSR CPSR_c, r2 // Reenter FIQ mode
+ LDMIA sp!, {r0-r3} // Recover r0-r3
+ MOV r5, #SVC_MODE // Build SVC mode CPSR
+ MSR CPSR_c, r5 // Enter SVC mode
+ STMDB sp!, {r0-r3} // Save r0-r3 on thread's stack
+
+ LDR r1, =_tx_thread_current_ptr // Pickup address of current thread ptr
+ LDR r0, [r1] // Pickup current thread pointer
+
+#ifdef TX_ENABLE_VFP_SUPPORT
+ LDR r2, [r0, #144] // Pickup the VFP enabled flag
+ CMP r2, #0 // Is the VFP enabled?
+ BEQ _tx_skip_fiq_vfp_save // No, skip VFP IRQ save
+ VMRS r2, FPSCR // Pickup the FPSCR
+ STR r2, [sp, #-4]! // Save FPSCR
+ VSTMDB sp!, {D16-D31} // Save D16-D31
+ VSTMDB sp!, {D0-D15} // Save D0-D15
+_tx_skip_fiq_vfp_save:
+#endif
+
+ MOV r3, #1 // Build interrupt stack type
+ STMDB sp!, {r3, r4} // Save interrupt stack type and SPSR
+ STR sp, [r0, #8] // Save stack pointer in thread control
+ // block */
+ LDR r3, =_tx_timer_time_slice // Pickup time-slice variable address
+ LDR r2, [r3] // Pickup time-slice
+ CMP r2, #0 // Is it active?
+ BEQ __tx_thread_fiq_dont_save_ts // No, don't save it
+
+ STR r2, [r0, #24] // Save thread's time-slice
+ MOV r2, #0 // Clear value
+ STR r2, [r3] // Disable global time-slice flag
+
+__tx_thread_fiq_dont_save_ts:
+
+ /* Clear the current task pointer. */
+
+ MOV r0, #0 // NULL value
+ STR r0, [r1] // Clear current thread pointer
+
+ /* Return to the scheduler. */
+
+ B _tx_thread_schedule // Return to scheduler
+
+__tx_thread_fiq_idle_system_restore:
+
+ /* Just return back to the scheduler! */
+
+ ADD sp, sp, #24 // Recover FIQ stack space
+ MOV r3, #SVC_MODE // Build SVC mode CPSR
+ MSR CPSR_c, r3 // Lockout interrupts
+ B _tx_thread_schedule // Return to scheduler
+
diff --git a/ports/cortex_a17/ac6/src/tx_thread_fiq_context_save.S b/ports/cortex_a17/ac6/src/tx_thread_fiq_context_save.S
new file mode 100644
index 00000000..7db6a4c2
--- /dev/null
+++ b/ports/cortex_a17/ac6/src/tx_thread_fiq_context_save.S
@@ -0,0 +1,178 @@
+/**************************************************************************/
+/* */
+/* Copyright (c) Microsoft Corporation. All rights reserved. */
+/* */
+/* This software is licensed under the Microsoft Software License */
+/* Terms for Microsoft Azure RTOS. Full text of the license can be */
+/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
+/* and in the root directory of this software. */
+/* */
+/**************************************************************************/
+
+
+/**************************************************************************/
+/**************************************************************************/
+/** */
+/** ThreadX Component */
+/** */
+/** Thread */
+/** */
+/**************************************************************************/
+/**************************************************************************/
+
+ .global _tx_thread_system_state
+ .global _tx_thread_current_ptr
+ .global __tx_fiq_processing_return
+ .global _tx_execution_isr_enter
+
+
+/* No 16-bit Thumb mode veneer code is needed for _tx_thread_fiq_context_save
+ since it will never be called 16-bit mode. */
+
+ .arm
+ .text
+ .align 2
+/**************************************************************************/
+/* */
+/* FUNCTION RELEASE */
+/* */
+/* _tx_thread_fiq_context_save ARMv7-A */
+/* 6.1.11 */
+/* AUTHOR */
+/* */
+/* William E. Lamie, Microsoft Corporation */
+/* */
+/* DESCRIPTION */
+/* */
+/* This function saves the context of an executing thread in the */
+/* beginning of interrupt processing. The function also ensures that */
+/* the system stack is used upon return to the calling ISR. */
+/* */
+/* INPUT */
+/* */
+/* None */
+/* */
+/* OUTPUT */
+/* */
+/* None */
+/* */
+/* CALLS */
+/* */
+/* None */
+/* */
+/* CALLED BY */
+/* */
+/* ISRs */
+/* */
+/* RELEASE HISTORY */
+/* */
+/* DATE NAME DESCRIPTION */
+/* */
+/* 09-30-2020 William E. Lamie Initial Version 6.1 */
+/* 10-15-2021 William E. Lamie Modified comment(s), added */
+/* execution profile support, */
+/* resulting in version 6.1.9 */
+/* 04-25-2022 Zhen Kong Updated comments, */
+/* resulting in version 6.1.11 */
+/* */
+/**************************************************************************/
+ .global _tx_thread_fiq_context_save
+ .type _tx_thread_fiq_context_save,function
+_tx_thread_fiq_context_save:
+
+ /* Upon entry to this routine, it is assumed that IRQ interrupts are locked
+ out, we are in IRQ mode, and all registers are intact. */
+
+ /* Check for a nested interrupt condition. */
+
+ STMDB sp!, {r0-r3} // Save some working registers
+ LDR r3, =_tx_thread_system_state // Pickup address of system state variable
+ LDR r2, [r3] // Pickup system state
+ CMP r2, #0 // Is this the first interrupt?
+ BEQ __tx_thread_fiq_not_nested_save // Yes, not a nested context save
+
+ /* Nested interrupt condition. */
+
+ ADD r2, r2, #1 // Increment the interrupt counter
+ STR r2, [r3] // Store it back in the variable
+
+ /* Save the rest of the scratch registers on the stack and return to the
+ calling ISR. */
+
+ MRS r0, SPSR // Pickup saved SPSR
+ SUB lr, lr, #4 // Adjust point of interrupt
+ STMDB sp!, {r0, r10, r12, lr} // Store other registers
+
+ /* Return to the ISR. */
+
+ MOV r10, #0 // Clear stack limit
+
+#if (defined(TX_ENABLE_EXECUTION_CHANGE_NOTIFY) || defined(TX_EXECUTION_PROFILE_ENABLE))
+
+ /* Call the ISR enter function to indicate an ISR is executing. */
+
+ PUSH {lr} // Save ISR lr
+ BL _tx_execution_isr_enter // Call the ISR enter function
+ POP {lr} // Recover ISR lr
+#endif
+
+ B __tx_fiq_processing_return // Continue FIQ processing
+//
+__tx_thread_fiq_not_nested_save:
+
+ /* Otherwise, not nested, check to see if a thread was running. */
+
+ ADD r2, r2, #1 // Increment the interrupt counter
+ STR r2, [r3] // Store it back in the variable
+ LDR r1, =_tx_thread_current_ptr // Pickup address of current thread ptr
+ LDR r0, [r1] // Pickup current thread pointer
+ CMP r0, #0 // Is it NULL?
+ BEQ __tx_thread_fiq_idle_system_save // If so, interrupt occurred in
+ // scheduling loop - nothing needs saving!
+
+ /* Save minimal context of interrupted thread. */
+
+ MRS r2, SPSR // Pickup saved SPSR
+ SUB lr, lr, #4 // Adjust point of interrupt
+ STMDB sp!, {r2, lr} // Store other registers, Note that we don't
+ // need to save sl and ip since FIQ has
+ // copies of these registers. Nested
+ // interrupt processing does need to save
+ // these registers.
+
+ MOV r10, #0 // Clear stack limit
+
+#if (defined(TX_ENABLE_EXECUTION_CHANGE_NOTIFY) || defined(TX_EXECUTION_PROFILE_ENABLE))
+
+ /* Call the ISR enter function to indicate an ISR is executing. */
+
+ PUSH {lr} // Save ISR lr
+ BL _tx_execution_isr_enter // Call the ISR enter function
+ POP {lr} // Recover ISR lr
+#endif
+
+ B __tx_fiq_processing_return // Continue FIQ processing
+
+__tx_thread_fiq_idle_system_save:
+
+ /* Interrupt occurred in the scheduling loop. */
+
+#if (defined(TX_ENABLE_EXECUTION_CHANGE_NOTIFY) || defined(TX_EXECUTION_PROFILE_ENABLE))
+
+ /* Call the ISR enter function to indicate an ISR is executing. */
+
+ PUSH {lr} // Save ISR lr
+ BL _tx_execution_isr_enter // Call the ISR enter function
+ POP {lr} // Recover ISR lr
+#endif
+
+ /* Not much to do here, save the current SPSR and LR for possible
+ use in IRQ interrupted in idle system conditions, and return to
+ FIQ interrupt processing. */
+
+ MRS r0, SPSR // Pickup saved SPSR
+ SUB lr, lr, #4 // Adjust point of interrupt
+ STMDB sp!, {r0, lr} // Store other registers that will get used
+ // or stripped off the stack in context
+ // restore
+ B __tx_fiq_processing_return // Continue FIQ processing
diff --git a/ports/cortex_a17/ac6/src/tx_thread_fiq_nesting_end.S b/ports/cortex_a17/ac6/src/tx_thread_fiq_nesting_end.S
new file mode 100644
index 00000000..b34d881e
--- /dev/null
+++ b/ports/cortex_a17/ac6/src/tx_thread_fiq_nesting_end.S
@@ -0,0 +1,104 @@
+/**************************************************************************/
+/* */
+/* Copyright (c) Microsoft Corporation. All rights reserved. */
+/* */
+/* This software is licensed under the Microsoft Software License */
+/* Terms for Microsoft Azure RTOS. Full text of the license can be */
+/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
+/* and in the root directory of this software. */
+/* */
+/**************************************************************************/
+
+
+/**************************************************************************/
+/**************************************************************************/
+/** */
+/** ThreadX Component */
+/** */
+/** Thread */
+/** */
+/**************************************************************************/
+/**************************************************************************/
+
+#ifdef TX_ENABLE_FIQ_SUPPORT
+DISABLE_INTS = 0xC0 // Disable IRQ/FIQ interrupts
+#else
+DISABLE_INTS = 0x80 // Disable IRQ interrupts
+#endif
+MODE_MASK = 0x1F // Mode mask
+FIQ_MODE_BITS = 0x11 // FIQ mode bits
+
+
+/* No 16-bit Thumb mode veneer code is needed for _tx_thread_fiq_nesting_end
+ since it will never be called 16-bit mode. */
+
+ .arm
+ .text
+ .align 2
+/**************************************************************************/
+/* */
+/* FUNCTION RELEASE */
+/* */
+/* _tx_thread_fiq_nesting_end ARMv7-A */
+/* 6.1.11 */
+/* AUTHOR */
+/* */
+/* William E. Lamie, Microsoft Corporation */
+/* */
+/* DESCRIPTION */
+/* */
+/* This function is called by the application from FIQ mode after */
+/* _tx_thread_fiq_nesting_start has been called and switches the FIQ */
+/* processing from system mode back to FIQ mode prior to the ISR */
+/* calling _tx_thread_fiq_context_restore. Note that this function */
+/* assumes the system stack pointer is in the same position after */
+/* nesting start function was called. */
+/* */
+/* This function assumes that the system mode stack pointer was setup */
+/* during low-level initialization (tx_initialize_low_level.s). */
+/* */
+/* This function returns with FIQ interrupts disabled. */
+/* */
+/* INPUT */
+/* */
+/* None */
+/* */
+/* OUTPUT */
+/* */
+/* None */
+/* */
+/* CALLS */
+/* */
+/* None */
+/* */
+/* CALLED BY */
+/* */
+/* ISRs */
+/* */
+/* RELEASE HISTORY */
+/* */
+/* DATE NAME DESCRIPTION */
+/* */
+/* 09-30-2020 William E. Lamie Initial Version 6.1 */
+/* 04-25-2022 Zhen Kong Updated comments, */
+/* resulting in version 6.1.11 */
+/* */
+/**************************************************************************/
+ .global _tx_thread_fiq_nesting_end
+ .type _tx_thread_fiq_nesting_end,function
+_tx_thread_fiq_nesting_end:
+ MOV r3,lr // Save ISR return address
+ MRS r0, CPSR // Pickup the CPSR
+ ORR r0, r0, #DISABLE_INTS // Build disable interrupt value
+ MSR CPSR_c, r0 // Disable interrupts
+ LDMIA sp!, {r1, lr} // Pickup saved lr (and r1 throw-away for
+ // 8-byte alignment logic)
+ BIC r0, r0, #MODE_MASK // Clear mode bits
+ ORR r0, r0, #FIQ_MODE_BITS // Build IRQ mode CPSR
+ MSR CPSR_c, r0 // Reenter IRQ mode
+
+#ifdef __THUMB_INTERWORK
+ BX r3 // Return to caller
+#else
+ MOV pc, r3 // Return to caller
+#endif
diff --git a/ports/cortex_a17/ac6/src/tx_thread_fiq_nesting_start.S b/ports/cortex_a17/ac6/src/tx_thread_fiq_nesting_start.S
new file mode 100644
index 00000000..c9cd5a06
--- /dev/null
+++ b/ports/cortex_a17/ac6/src/tx_thread_fiq_nesting_start.S
@@ -0,0 +1,96 @@
+/**************************************************************************/
+/* */
+/* Copyright (c) Microsoft Corporation. All rights reserved. */
+/* */
+/* This software is licensed under the Microsoft Software License */
+/* Terms for Microsoft Azure RTOS. Full text of the license can be */
+/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
+/* and in the root directory of this software. */
+/* */
+/**************************************************************************/
+
+
+/**************************************************************************/
+/**************************************************************************/
+/** */
+/** ThreadX Component */
+/** */
+/** Thread */
+/** */
+/**************************************************************************/
+/**************************************************************************/
+
+FIQ_DISABLE = 0x40 // FIQ disable bit
+MODE_MASK = 0x1F // Mode mask
+SYS_MODE_BITS = 0x1F // System mode bits
+
+
+/* No 16-bit Thumb mode veneer code is needed for _tx_thread_fiq_nesting_start
+ since it will never be called 16-bit mode. */
+
+ .arm
+ .text
+ .align 2
+/**************************************************************************/
+/* */
+/* FUNCTION RELEASE */
+/* */
+/* _tx_thread_fiq_nesting_start ARMv7-A */
+/* 6.1.11 */
+/* AUTHOR */
+/* */
+/* William E. Lamie, Microsoft Corporation */
+/* */
+/* DESCRIPTION */
+/* */
+/* This function is called by the application from FIQ mode after */
+/* _tx_thread_fiq_context_save has been called and switches the FIQ */
+/* processing to the system mode so nested FIQ interrupt processing */
+/* is possible (system mode has its own "lr" register). Note that */
+/* this function assumes that the system mode stack pointer was setup */
+/* during low-level initialization (tx_initialize_low_level.s). */
+/* */
+/* This function returns with FIQ interrupts enabled. */
+/* */
+/* INPUT */
+/* */
+/* None */
+/* */
+/* OUTPUT */
+/* */
+/* None */
+/* */
+/* CALLS */
+/* */
+/* None */
+/* */
+/* CALLED BY */
+/* */
+/* ISRs */
+/* */
+/* RELEASE HISTORY */
+/* */
+/* DATE NAME DESCRIPTION */
+/* */
+/* 09-30-2020 William E. Lamie Initial Version 6.1 */
+/* 04-25-2022 Zhen Kong Updated comments, */
+/* resulting in version 6.1.11 */
+/* */
+/**************************************************************************/
+ .global _tx_thread_fiq_nesting_start
+ .type _tx_thread_fiq_nesting_start,function
+_tx_thread_fiq_nesting_start:
+ MOV r3,lr // Save ISR return address
+ MRS r0, CPSR // Pickup the CPSR
+ BIC r0, r0, #MODE_MASK // Clear the mode bits
+ ORR r0, r0, #SYS_MODE_BITS // Build system mode CPSR
+ MSR CPSR_c, r0 // Enter system mode
+ STMDB sp!, {r1, lr} // Push the system mode lr on the system mode stack
+ // and push r1 just to keep 8-byte alignment
+ BIC r0, r0, #FIQ_DISABLE // Build enable FIQ CPSR
+ MSR CPSR_c, r0 // Enter system mode
+#ifdef __THUMB_INTERWORK
+ BX r3 // Return to caller
+#else
+ MOV pc, r3 // Return to caller
+#endif
diff --git a/ports/cortex_a17/ac6/src/tx_thread_interrupt_control.S b/ports/cortex_a17/ac6/src/tx_thread_interrupt_control.S
new file mode 100644
index 00000000..63b1609a
--- /dev/null
+++ b/ports/cortex_a17/ac6/src/tx_thread_interrupt_control.S
@@ -0,0 +1,104 @@
+/**************************************************************************/
+/* */
+/* Copyright (c) Microsoft Corporation. All rights reserved. */
+/* */
+/* This software is licensed under the Microsoft Software License */
+/* Terms for Microsoft Azure RTOS. Full text of the license can be */
+/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
+/* and in the root directory of this software. */
+/* */
+/**************************************************************************/
+
+
+/**************************************************************************/
+/**************************************************************************/
+/** */
+/** ThreadX Component */
+/** */
+/** Thread */
+/** */
+/**************************************************************************/
+/**************************************************************************/
+
+INT_MASK = 0x03F
+
+
+/* Define the 16-bit Thumb mode veneer for _tx_thread_interrupt_control for
+ applications calling this function from to 16-bit Thumb mode. */
+
+ .text
+ .align 2
+ .global $_tx_thread_interrupt_control
+$_tx_thread_interrupt_control:
+ .thumb
+ BX pc // Switch to 32-bit mode
+ NOP //
+ .arm
+ STMFD sp!, {lr} // Save return address
+ BL _tx_thread_interrupt_control // Call _tx_thread_interrupt_control function
+ LDMFD sp!, {lr} // Recover saved return address
+ BX lr // Return to 16-bit caller
+
+
+ .text
+ .align 2
+/**************************************************************************/
+/* */
+/* FUNCTION RELEASE */
+/* */
+/* _tx_thread_interrupt_control ARMv7-A */
+/* 6.1.11 */
+/* AUTHOR */
+/* */
+/* William E. Lamie, Microsoft Corporation */
+/* */
+/* DESCRIPTION */
+/* */
+/* This function is responsible for changing the interrupt lockout */
+/* posture of the system. */
+/* */
+/* INPUT */
+/* */
+/* new_posture New interrupt lockout posture */
+/* */
+/* OUTPUT */
+/* */
+/* old_posture Old interrupt lockout posture */
+/* */
+/* CALLS */
+/* */
+/* None */
+/* */
+/* CALLED BY */
+/* */
+/* Application Code */
+/* */
+/* RELEASE HISTORY */
+/* */
+/* DATE NAME DESCRIPTION */
+/* */
+/* 09-30-2020 William E. Lamie Initial Version 6.1 */
+/* 04-25-2022 Zhen Kong Updated comments, */
+/* resulting in version 6.1.11 */
+/* */
+/**************************************************************************/
+ .global _tx_thread_interrupt_control
+ .type _tx_thread_interrupt_control,function
+_tx_thread_interrupt_control:
+
+ /* Pickup current interrupt lockout posture. */
+
+ MRS r3, CPSR // Pickup current CPSR
+ MOV r2, #INT_MASK // Build interrupt mask
+ AND r1, r3, r2 // Clear interrupt lockout bits
+ ORR r1, r1, r0 // Or-in new interrupt lockout bits
+
+ /* Apply the new interrupt posture. */
+
+ MSR CPSR_c, r1 // Setup new CPSR
+ BIC r0, r3, r2 // Return previous interrupt mask
+#ifdef __THUMB_INTERWORK
+ BX lr // Return to caller
+#else
+ MOV pc, lr // Return to caller
+#endif
diff --git a/ports/cortex_a17/ac6/src/tx_thread_interrupt_disable.S b/ports/cortex_a17/ac6/src/tx_thread_interrupt_disable.S
new file mode 100644
index 00000000..13258808
--- /dev/null
+++ b/ports/cortex_a17/ac6/src/tx_thread_interrupt_disable.S
@@ -0,0 +1,101 @@
+/**************************************************************************/
+/* */
+/* Copyright (c) Microsoft Corporation. All rights reserved. */
+/* */
+/* This software is licensed under the Microsoft Software License */
+/* Terms for Microsoft Azure RTOS. Full text of the license can be */
+/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
+/* and in the root directory of this software. */
+/* */
+/**************************************************************************/
+
+
+/**************************************************************************/
+/**************************************************************************/
+/** */
+/** ThreadX Component */
+/** */
+/** Thread */
+/** */
+/**************************************************************************/
+/**************************************************************************/
+
+/* Define the 16-bit Thumb mode veneer for _tx_thread_interrupt_disable for
+ applications calling this function from to 16-bit Thumb mode. */
+
+ .text
+ .align 2
+ .global $_tx_thread_interrupt_disable
+$_tx_thread_interrupt_disable:
+ .thumb
+ BX pc // Switch to 32-bit mode
+ NOP //
+ .arm
+ STMFD sp!, {lr} // Save return address
+ BL _tx_thread_interrupt_disable // Call _tx_thread_interrupt_disable function
+ LDMFD sp!, {lr} // Recover saved return address
+ BX lr // Return to 16-bit caller
+
+
+ .text
+ .align 2
+/**************************************************************************/
+/* */
+/* FUNCTION RELEASE */
+/* */
+/* _tx_thread_interrupt_disable ARMv7-A */
+/* 6.1.11 */
+/* AUTHOR */
+/* */
+/* William E. Lamie, Microsoft Corporation */
+/* */
+/* DESCRIPTION */
+/* */
+/* This function is responsible for disabling interrupts */
+/* */
+/* INPUT */
+/* */
+/* None */
+/* */
+/* OUTPUT */
+/* */
+/* old_posture Old interrupt lockout posture */
+/* */
+/* CALLS */
+/* */
+/* None */
+/* */
+/* CALLED BY */
+/* */
+/* Application Code */
+/* */
+/* RELEASE HISTORY */
+/* */
+/* DATE NAME DESCRIPTION */
+/* */
+/* 09-30-2020 William E. Lamie Initial Version 6.1 */
+/* 04-25-2022 Zhen Kong Updated comments, */
+/* resulting in version 6.1.11 */
+/* */
+/**************************************************************************/
+ .global _tx_thread_interrupt_disable
+ .type _tx_thread_interrupt_disable,function
+_tx_thread_interrupt_disable:
+
+ /* Pickup current interrupt lockout posture. */
+
+ MRS r0, CPSR // Pickup current CPSR
+
+ /* Mask interrupts. */
+
+#ifdef TX_ENABLE_FIQ_SUPPORT
+ CPSID if // Disable IRQ and FIQ
+#else
+ CPSID i // Disable IRQ
+#endif
+
+#ifdef __THUMB_INTERWORK
+ BX lr // Return to caller
+#else
+ MOV pc, lr // Return to caller
+#endif
diff --git a/ports/cortex_a17/ac6/src/tx_thread_interrupt_restore.S b/ports/cortex_a17/ac6/src/tx_thread_interrupt_restore.S
new file mode 100644
index 00000000..2d582511
--- /dev/null
+++ b/ports/cortex_a17/ac6/src/tx_thread_interrupt_restore.S
@@ -0,0 +1,93 @@
+/**************************************************************************/
+/* */
+/* Copyright (c) Microsoft Corporation. All rights reserved. */
+/* */
+/* This software is licensed under the Microsoft Software License */
+/* Terms for Microsoft Azure RTOS. Full text of the license can be */
+/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
+/* and in the root directory of this software. */
+/* */
+/**************************************************************************/
+
+
+/**************************************************************************/
+/**************************************************************************/
+/** */
+/** ThreadX Component */
+/** */
+/** Thread */
+/** */
+/**************************************************************************/
+/**************************************************************************/
+
+/* Define the 16-bit Thumb mode veneer for _tx_thread_interrupt_restore for
+ applications calling this function from to 16-bit Thumb mode. */
+
+ .text
+ .align 2
+ .global $_tx_thread_interrupt_restore
+$_tx_thread_interrupt_restore:
+ .thumb
+ BX pc // Switch to 32-bit mode
+ NOP //
+ .arm
+ STMFD sp!, {lr} // Save return address
+ BL _tx_thread_interrupt_restore // Call _tx_thread_interrupt_restore function
+ LDMFD sp!, {lr} // Recover saved return address
+ BX lr // Return to 16-bit caller
+
+
+ .text
+ .align 2
+/**************************************************************************/
+/* */
+/* FUNCTION RELEASE */
+/* */
+/* _tx_thread_interrupt_restore ARMv7-A */
+/* 6.1.11 */
+/* AUTHOR */
+/* */
+/* William E. Lamie, Microsoft Corporation */
+/* */
+/* DESCRIPTION */
+/* */
+/* This function is responsible for restoring interrupts to the state */
+/* returned by a previous _tx_thread_interrupt_disable call. */
+/* */
+/* INPUT */
+/* */
+/* old_posture Old interrupt lockout posture */
+/* */
+/* OUTPUT */
+/* */
+/* None */
+/* */
+/* CALLS */
+/* */
+/* None */
+/* */
+/* CALLED BY */
+/* */
+/* Application Code */
+/* */
+/* RELEASE HISTORY */
+/* */
+/* DATE NAME DESCRIPTION */
+/* */
+/* 09-30-2020 William E. Lamie Initial Version 6.1 */
+/* 04-25-2022 Zhen Kong Updated comments, */
+/* resulting in version 6.1.11 */
+/* */
+/**************************************************************************/
+ .global _tx_thread_interrupt_restore
+ .type _tx_thread_interrupt_restore,function
+_tx_thread_interrupt_restore:
+
+ /* Apply the new interrupt posture. */
+
+ MSR CPSR_c, r0 // Setup new CPSR
+#ifdef __THUMB_INTERWORK
+ BX lr // Return to caller
+#else
+ MOV pc, lr // Return to caller
+#endif
diff --git a/ports/cortex_a17/ac6/src/tx_thread_irq_nesting_end.S b/ports/cortex_a17/ac6/src/tx_thread_irq_nesting_end.S
new file mode 100644
index 00000000..ec7e63c6
--- /dev/null
+++ b/ports/cortex_a17/ac6/src/tx_thread_irq_nesting_end.S
@@ -0,0 +1,103 @@
+/**************************************************************************/
+/* */
+/* Copyright (c) Microsoft Corporation. All rights reserved. */
+/* */
+/* This software is licensed under the Microsoft Software License */
+/* Terms for Microsoft Azure RTOS. Full text of the license can be */
+/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
+/* and in the root directory of this software. */
+/* */
+/**************************************************************************/
+
+
+/**************************************************************************/
+/**************************************************************************/
+/** */
+/** ThreadX Component */
+/** */
+/** Thread */
+/** */
+/**************************************************************************/
+/**************************************************************************/
+
+#ifdef TX_ENABLE_FIQ_SUPPORT
+DISABLE_INTS = 0xC0 // Disable IRQ/FIQ interrupts
+#else
+DISABLE_INTS = 0x80 // Disable IRQ interrupts
+#endif
+MODE_MASK = 0x1F // Mode mask
+IRQ_MODE_BITS = 0x12 // IRQ mode bits
+
+
+/* No 16-bit Thumb mode veneer code is needed for _tx_thread_irq_nesting_end
+ since it will never be called 16-bit mode. */
+
+ .arm
+ .text
+ .align 2
+/**************************************************************************/
+/* */
+/* FUNCTION RELEASE */
+/* */
+/* _tx_thread_irq_nesting_end ARMv7-A */
+/* 6.1.11 */
+/* AUTHOR */
+/* */
+/* William E. Lamie, Microsoft Corporation */
+/* */
+/* DESCRIPTION */
+/* */
+/* This function is called by the application from IRQ mode after */
+/* _tx_thread_irq_nesting_start has been called and switches the IRQ */
+/* processing from system mode back to IRQ mode prior to the ISR */
+/* calling _tx_thread_context_restore. Note that this function */
+/* assumes the system stack pointer is in the same position after */
+/* nesting start function was called. */
+/* */
+/* This function assumes that the system mode stack pointer was setup */
+/* during low-level initialization (tx_initialize_low_level.s). */
+/* */
+/* This function returns with IRQ interrupts disabled. */
+/* */
+/* INPUT */
+/* */
+/* None */
+/* */
+/* OUTPUT */
+/* */
+/* None */
+/* */
+/* CALLS */
+/* */
+/* None */
+/* */
+/* CALLED BY */
+/* */
+/* ISRs */
+/* */
+/* RELEASE HISTORY */
+/* */
+/* DATE NAME DESCRIPTION */
+/* */
+/* 09-30-2020 William E. Lamie Initial Version 6.1 */
+/* 04-25-2022 Zhen Kong Updated comments, */
+/* resulting in version 6.1.11 */
+/* */
+/**************************************************************************/
+ .global _tx_thread_irq_nesting_end
+ .type _tx_thread_irq_nesting_end,function
+_tx_thread_irq_nesting_end:
+ MOV r3,lr // Save ISR return address
+ MRS r0, CPSR // Pickup the CPSR
+ ORR r0, r0, #DISABLE_INTS // Build disable interrupt value
+ MSR CPSR_c, r0 // Disable interrupts
+ LDMIA sp!, {r1, lr} // Pickup saved lr (and r1 throw-away for
+ // 8-byte alignment logic)
+ BIC r0, r0, #MODE_MASK // Clear mode bits
+ ORR r0, r0, #IRQ_MODE_BITS // Build IRQ mode CPSR
+ MSR CPSR_c, r0 // Reenter IRQ mode
+#ifdef __THUMB_INTERWORK
+ BX r3 // Return to caller
+#else
+ MOV pc, r3 // Return to caller
+#endif
diff --git a/ports/cortex_a17/ac6/src/tx_thread_irq_nesting_start.S b/ports/cortex_a17/ac6/src/tx_thread_irq_nesting_start.S
new file mode 100644
index 00000000..c69976ed
--- /dev/null
+++ b/ports/cortex_a17/ac6/src/tx_thread_irq_nesting_start.S
@@ -0,0 +1,96 @@
+/**************************************************************************/
+/* */
+/* Copyright (c) Microsoft Corporation. All rights reserved. */
+/* */
+/* This software is licensed under the Microsoft Software License */
+/* Terms for Microsoft Azure RTOS. Full text of the license can be */
+/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
+/* and in the root directory of this software. */
+/* */
+/**************************************************************************/
+
+
+/**************************************************************************/
+/**************************************************************************/
+/** */
+/** ThreadX Component */
+/** */
+/** Thread */
+/** */
+/**************************************************************************/
+/**************************************************************************/
+
+IRQ_DISABLE = 0x80 // IRQ disable bit
+MODE_MASK = 0x1F // Mode mask
+SYS_MODE_BITS = 0x1F // System mode bits
+
+
+/* No 16-bit Thumb mode veneer code is needed for _tx_thread_irq_nesting_start
+ since it will never be called 16-bit mode. */
+
+ .arm
+ .text
+ .align 2
+/**************************************************************************/
+/* */
+/* FUNCTION RELEASE */
+/* */
+/* _tx_thread_irq_nesting_start ARMv7-A */
+/* 6.1.11 */
+/* AUTHOR */
+/* */
+/* William E. Lamie, Microsoft Corporation */
+/* */
+/* DESCRIPTION */
+/* */
+/* This function is called by the application from IRQ mode after */
+/* _tx_thread_context_save has been called and switches the IRQ */
+/* processing to the system mode so nested IRQ interrupt processing */
+/* is possible (system mode has its own "lr" register). Note that */
+/* this function assumes that the system mode stack pointer was setup */
+/* during low-level initialization (tx_initialize_low_level.s). */
+/* */
+/* This function returns with IRQ interrupts enabled. */
+/* */
+/* INPUT */
+/* */
+/* None */
+/* */
+/* OUTPUT */
+/* */
+/* None */
+/* */
+/* CALLS */
+/* */
+/* None */
+/* */
+/* CALLED BY */
+/* */
+/* ISRs */
+/* */
+/* RELEASE HISTORY */
+/* */
+/* DATE NAME DESCRIPTION */
+/* */
+/* 09-30-2020 William E. Lamie Initial Version 6.1 */
+/* 04-25-2022 Zhen Kong Updated comments, */
+/* resulting in version 6.1.11 */
+/* */
+/**************************************************************************/
+ .global _tx_thread_irq_nesting_start
+ .type _tx_thread_irq_nesting_start,function
+_tx_thread_irq_nesting_start:
+ MOV r3,lr // Save ISR return address
+ MRS r0, CPSR // Pickup the CPSR
+ BIC r0, r0, #MODE_MASK // Clear the mode bits
+ ORR r0, r0, #SYS_MODE_BITS // Build system mode CPSR
+ MSR CPSR_c, r0 // Enter system mode
+ STMDB sp!, {r1, lr} // Push the system mode lr on the system mode stack
+ // and push r1 just to keep 8-byte alignment
+ BIC r0, r0, #IRQ_DISABLE // Build enable IRQ CPSR
+ MSR CPSR_c, r0 // Enter system mode
+#ifdef __THUMB_INTERWORK
+ BX r3 // Return to caller
+#else
+ MOV pc, r3 // Return to caller
+#endif
diff --git a/ports/cortex_a17/ac6/src/tx_thread_schedule.S b/ports/cortex_a17/ac6/src/tx_thread_schedule.S
new file mode 100644
index 00000000..8330e9df
--- /dev/null
+++ b/ports/cortex_a17/ac6/src/tx_thread_schedule.S
@@ -0,0 +1,230 @@
+/**************************************************************************/
+/* */
+/* Copyright (c) Microsoft Corporation. All rights reserved. */
+/* */
+/* This software is licensed under the Microsoft Software License */
+/* Terms for Microsoft Azure RTOS. Full text of the license can be */
+/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
+/* and in the root directory of this software. */
+/* */
+/**************************************************************************/
+
+
+/**************************************************************************/
+/**************************************************************************/
+/** */
+/** ThreadX Component */
+/** */
+/** Thread */
+/** */
+/**************************************************************************/
+/**************************************************************************/
+
+ .global _tx_thread_execute_ptr
+ .global _tx_thread_current_ptr
+ .global _tx_timer_time_slice
+
+
+/* Define the 16-bit Thumb mode veneer for _tx_thread_schedule for
+ applications calling this function from to 16-bit Thumb mode. */
+
+ .text
+ .align 2
+ .global $_tx_thread_schedule
+ .type $_tx_thread_schedule,function
+$_tx_thread_schedule:
+ .thumb
+ BX pc // Switch to 32-bit mode
+ NOP //
+ .arm
+ STMFD sp!, {lr} // Save return address
+ BL _tx_thread_schedule // Call _tx_thread_schedule function
+ LDMFD sp!, {lr} // Recover saved return address
+ BX lr // Return to 16-bit caller
+
+
+ .text
+ .align 2
+/**************************************************************************/
+/* */
+/* FUNCTION RELEASE */
+/* */
+/* _tx_thread_schedule ARMv7-A */
+/* 6.1.11 */
+/* AUTHOR */
+/* */
+/* William E. Lamie, Microsoft Corporation */
+/* */
+/* DESCRIPTION */
+/* */
+/* This function waits for a thread control block pointer to appear in */
+/* the _tx_thread_execute_ptr variable. Once a thread pointer appears */
+/* in the variable, the corresponding thread is resumed. */
+/* */
+/* INPUT */
+/* */
+/* None */
+/* */
+/* OUTPUT */
+/* */
+/* None */
+/* */
+/* CALLS */
+/* */
+/* None */
+/* */
+/* CALLED BY */
+/* */
+/* _tx_initialize_kernel_enter ThreadX entry function */
+/* _tx_thread_system_return Return to system from thread */
+/* _tx_thread_context_restore Restore thread's context */
+/* */
+/* RELEASE HISTORY */
+/* */
+/* DATE NAME DESCRIPTION */
+/* */
+/* 09-30-2020 William E. Lamie Initial Version 6.1 */
+/* 10-15-2021 William E. Lamie Modified comment(s), added */
+/* execution profile support, */
+/* resulting in version 6.1.9 */
+/* 04-25-2022 Zhen Kong Updated comments, */
+/* resulting in version 6.1.11 */
+/* */
+/**************************************************************************/
+ .global _tx_thread_schedule
+ .type _tx_thread_schedule,function
+_tx_thread_schedule:
+
+ /* Enable interrupts. */
+
+#ifdef TX_ENABLE_FIQ_SUPPORT
+ CPSIE if // Enable IRQ and FIQ interrupts
+#else
+ CPSIE i // Enable IRQ interrupts
+#endif
+
+ /* Wait for a thread to execute. */
+ LDR r1, =_tx_thread_execute_ptr // Address of thread execute ptr
+
+__tx_thread_schedule_loop:
+
+ LDR r0, [r1] // Pickup next thread to execute
+ CMP r0, #0 // Is it NULL?
+ BEQ __tx_thread_schedule_loop // If so, keep looking for a thread
+ /* Yes! We have a thread to execute. Lockout interrupts and
+ transfer control to it. */
+
+#ifdef TX_ENABLE_FIQ_SUPPORT
+ CPSID if // Disable IRQ and FIQ interrupts
+#else
+ CPSID i // Disable IRQ interrupts
+#endif
+
+ /* Setup the current thread pointer. */
+
+ LDR r1, =_tx_thread_current_ptr // Pickup address of current thread
+ STR r0, [r1] // Setup current thread pointer
+
+ /* Increment the run count for this thread. */
+
+ LDR r2, [r0, #4] // Pickup run counter
+ LDR r3, [r0, #24] // Pickup time-slice for this thread
+ ADD r2, r2, #1 // Increment thread run-counter
+ STR r2, [r0, #4] // Store the new run counter
+
+ /* Setup time-slice, if present. */
+
+ LDR r2, =_tx_timer_time_slice // Pickup address of time-slice
+ // variable
+ LDR sp, [r0, #8] // Switch stack pointers
+ STR r3, [r2] // Setup time-slice
+
+#if (defined(TX_ENABLE_EXECUTION_CHANGE_NOTIFY) || defined(TX_EXECUTION_PROFILE_ENABLE))
+
+ /* Call the thread entry function to indicate the thread is executing. */
+
+ MOV r5, r0 // Save r0
+ BL _tx_execution_thread_enter // Call the thread execution enter function
+ MOV r0, r5 // Restore r0
+#endif
+
+ /* Determine if an interrupt frame or a synchronous task suspension frame
+ is present. */
+
+ LDMIA sp!, {r4, r5} // Pickup the stack type and saved CPSR
+ CMP r4, #0 // Check for synchronous context switch
+ BEQ _tx_solicited_return
+ MSR SPSR_cxsf, r5 // Setup SPSR for return
+#ifdef TX_ENABLE_VFP_SUPPORT
+ LDR r1, [r0, #144] // Pickup the VFP enabled flag
+ CMP r1, #0 // Is the VFP enabled?
+ BEQ _tx_skip_interrupt_vfp_restore // No, skip VFP interrupt restore
+ VLDMIA sp!, {D0-D15} // Recover D0-D15
+ VLDMIA sp!, {D16-D31} // Recover D16-D31
+ LDR r4, [sp], #4 // Pickup FPSCR
+ VMSR FPSCR, r4 // Restore FPSCR
+_tx_skip_interrupt_vfp_restore:
+#endif
+ LDMIA sp!, {r0-r12, lr, pc}^ // Return to point of thread interrupt
+
+_tx_solicited_return:
+
+#ifdef TX_ENABLE_VFP_SUPPORT
+ LDR r1, [r0, #144] // Pickup the VFP enabled flag
+ CMP r1, #0 // Is the VFP enabled?
+ BEQ _tx_skip_solicited_vfp_restore // No, skip VFP solicited restore
+ VLDMIA sp!, {D8-D15} // Recover D8-D15
+ VLDMIA sp!, {D16-D31} // Recover D16-D31
+ LDR r4, [sp], #4 // Pickup FPSCR
+ VMSR FPSCR, r4 // Restore FPSCR
+_tx_skip_solicited_vfp_restore:
+#endif
+ MSR CPSR_cxsf, r5 // Recover CPSR
+ LDMIA sp!, {r4-r11, lr} // Return to thread synchronously
+#ifdef __THUMB_INTERWORK
+ BX lr // Return to caller
+#else
+ MOV pc, lr // Return to caller
+#endif
+
+#ifdef TX_ENABLE_VFP_SUPPORT
+
+ .global tx_thread_vfp_enable
+ .type tx_thread_vfp_enable,function
+tx_thread_vfp_enable:
+ MRS r2, CPSR // Pickup the CPSR
+#ifdef TX_ENABLE_FIQ_SUPPORT
+ CPSID if // Enable IRQ and FIQ interrupts
+#else
+ CPSID i // Enable IRQ interrupts
+#endif
+ LDR r0, =_tx_thread_current_ptr // Build current thread pointer address
+ LDR r1, [r0] // Pickup current thread pointer
+ CMP r1, #0 // Check for NULL thread pointer
+ BEQ __tx_no_thread_to_enable // If NULL, skip VFP enable
+ MOV r0, #1 // Build enable value
+ STR r0, [r1, #144] // Set the VFP enable flag (tx_thread_vfp_enable field in TX_THREAD)
+__tx_no_thread_to_enable:
+ MSR CPSR_cxsf, r2 // Recover CPSR
+ BX LR // Return to caller
+
+ .global tx_thread_vfp_disable
+ .type tx_thread_vfp_disable,function
+tx_thread_vfp_disable:
+ MRS r2, CPSR // Pickup the CPSR
+#ifdef TX_ENABLE_FIQ_SUPPORT
+ CPSID if // Enable IRQ and FIQ interrupts
+#else
+ CPSID i // Enable IRQ interrupts
+#endif
+ LDR r0, =_tx_thread_current_ptr // Build current thread pointer address
+ LDR r1, [r0] // Pickup current thread pointer
+ CMP r1, #0 // Check for NULL thread pointer
+ BEQ __tx_no_thread_to_disable // If NULL, skip VFP disable
+ MOV r0, #0 // Build disable value
+ STR r0, [r1, #144] // Clear the VFP enable flag (tx_thread_vfp_enable field in TX_THREAD)
+__tx_no_thread_to_disable:
+ MSR CPSR_cxsf, r2 // Recover CPSR
+ BX LR // Return to caller
+
+#endif
diff --git a/ports/cortex_a17/ac6/src/tx_thread_stack_build.S b/ports/cortex_a17/ac6/src/tx_thread_stack_build.S
new file mode 100644
index 00000000..f413e673
--- /dev/null
+++ b/ports/cortex_a17/ac6/src/tx_thread_stack_build.S
@@ -0,0 +1,164 @@
+/**************************************************************************/
+/* */
+/* Copyright (c) Microsoft Corporation. All rights reserved. */
+/* */
+/* This software is licensed under the Microsoft Software License */
+/* Terms for Microsoft Azure RTOS. Full text of the license can be */
+/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
+/* and in the root directory of this software. */
+/* */
+/**************************************************************************/
+
+
+/**************************************************************************/
+/**************************************************************************/
+/** */
+/** ThreadX Component */
+/** */
+/** Thread */
+/** */
+/**************************************************************************/
+/**************************************************************************/
+ .arm
+
+SVC_MODE = 0x13 // SVC mode
+#ifdef TX_ENABLE_FIQ_SUPPORT
+CPSR_MASK = 0xDF // Mask initial CPSR, IRQ & FIQ interrupts enabled
+#else
+CPSR_MASK = 0x9F // Mask initial CPSR, IRQ interrupts enabled
+#endif
+
+
+/* Define the 16-bit Thumb mode veneer for _tx_thread_stack_build for
+ applications calling this function from to 16-bit Thumb mode. */
+
+ .text
+ .align 2
+ .thumb
+ .global $_tx_thread_stack_build
+ .type $_tx_thread_stack_build,function
+$_tx_thread_stack_build:
+ BX pc // Switch to 32-bit mode
+ NOP //
+ .arm
+ STMFD sp!, {lr} // Save return address
+ BL _tx_thread_stack_build // Call _tx_thread_stack_build function
+ LDMFD sp!, {lr} // Recover saved return address
+ BX lr // Return to 16-bit caller
+
+
+ .text
+ .align 2
+/**************************************************************************/
+/* */
+/* FUNCTION RELEASE */
+/* */
+/* _tx_thread_stack_build ARMv7-A */
+/* 6.1.11 */
+/* AUTHOR */
+/* */
+/* William E. Lamie, Microsoft Corporation */
+/* */
+/* DESCRIPTION */
+/* */
+/* This function builds a stack frame on the supplied thread's stack. */
+/* The stack frame results in a fake interrupt return to the supplied */
+/* function pointer. */
+/* */
+/* INPUT */
+/* */
+/* thread_ptr Pointer to thread control blk */
+/* function_ptr Pointer to return function */
+/* */
+/* OUTPUT */
+/* */
+/* None */
+/* */
+/* CALLS */
+/* */
+/* None */
+/* */
+/* CALLED BY */
+/* */
+/* _tx_thread_create Create thread service */
+/* */
+/* RELEASE HISTORY */
+/* */
+/* DATE NAME DESCRIPTION */
+/* */
+/* 09-30-2020 William E. Lamie Initial Version 6.1 */
+/* 04-25-2022 Zhen Kong Updated comments, */
+/* resulting in version 6.1.11 */
+/* */
+/**************************************************************************/
+ .global _tx_thread_stack_build
+ .type _tx_thread_stack_build,function
+_tx_thread_stack_build:
+
+
+ /* Build a fake interrupt frame. The form of the fake interrupt stack
+ on the ARMv7-A should look like the following after it is built:
+
+ Stack Top: 1 Interrupt stack frame type
+ CPSR Initial value for CPSR
+ a1 (r0) Initial value for a1
+ a2 (r1) Initial value for a2
+ a3 (r2) Initial value for a3
+ a4 (r3) Initial value for a4
+ v1 (r4) Initial value for v1
+ v2 (r5) Initial value for v2
+ v3 (r6) Initial value for v3
+ v4 (r7) Initial value for v4
+ v5 (r8) Initial value for v5
+ sb (r9) Initial value for sb
+ sl (r10) Initial value for sl
+ fp (r11) Initial value for fp
+ ip (r12) Initial value for ip
+ lr (r14) Initial value for lr
+ pc (r15) Initial value for
+ 0 For stack backtracing
+
+ Stack Bottom: (higher memory address) */
+
+ LDR r2, [r0, #16] // Pickup end of stack area
+ BIC r2, r2, #7 // Ensure 8-byte alignment
+ SUB r2, r2, #76 // Allocate space for the stack frame
+
+ /* Actually build the stack frame. */
+
+ MOV r3, #1 // Build interrupt stack type
+ STR r3, [r2, #0] // Store stack type
+ MOV r3, #0 // Build initial register value
+ STR r3, [r2, #8] // Store initial r0
+ STR r3, [r2, #12] // Store initial r1
+ STR r3, [r2, #16] // Store initial r2
+ STR r3, [r2, #20] // Store initial r3
+ STR r3, [r2, #24] // Store initial r4
+ STR r3, [r2, #28] // Store initial r5
+ STR r3, [r2, #32] // Store initial r6
+ STR r3, [r2, #36] // Store initial r7
+ STR r3, [r2, #40] // Store initial r8
+ STR r3, [r2, #44] // Store initial r9
+ LDR r3, [r0, #12] // Pickup stack starting address
+ STR r3, [r2, #48] // Store initial r10 (sl)
+ LDR r3,=_tx_thread_schedule // Pickup address of _tx_thread_schedule for GDB backtrace
+ STR r3, [r2, #60] // Store initial r14 (lr)
+ MOV r3, #0 // Build initial register value
+ STR r3, [r2, #52] // Store initial r11
+ STR r3, [r2, #56] // Store initial r12
+ STR r1, [r2, #64] // Store initial pc
+ STR r3, [r2, #68] // 0 for back-trace
+ MRS r1, CPSR // Pickup CPSR
+ BIC r1, r1, #CPSR_MASK // Mask mode bits of CPSR
+ ORR r3, r1, #SVC_MODE // Build CPSR, SVC mode, interrupts enabled
+ STR r3, [r2, #4] // Store initial CPSR
+
+ /* Setup stack pointer. */
+
+ STR r2, [r0, #8] // Save stack pointer in thread's
+ // control block
+#ifdef __THUMB_INTERWORK
+ BX lr // Return to caller
+#else
+ MOV pc, lr // Return to caller
+#endif
diff --git a/ports/cortex_a17/ac6/src/tx_thread_system_return.S b/ports/cortex_a17/ac6/src/tx_thread_system_return.S
new file mode 100644
index 00000000..cb7d62ce
--- /dev/null
+++ b/ports/cortex_a17/ac6/src/tx_thread_system_return.S
@@ -0,0 +1,162 @@
+/**************************************************************************/
+/* */
+/* Copyright (c) Microsoft Corporation. All rights reserved. */
+/* */
+/* This software is licensed under the Microsoft Software License */
+/* Terms for Microsoft Azure RTOS. Full text of the license can be */
+/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
+/* and in the root directory of this software. */
+/* */
+/**************************************************************************/
+
+
+/**************************************************************************/
+/**************************************************************************/
+/** */
+/** ThreadX Component */
+/** */
+/** Thread */
+/** */
+/**************************************************************************/
+/**************************************************************************/
+
+ .arm
+
+
+ .global _tx_thread_current_ptr
+ .global _tx_timer_time_slice
+ .global _tx_thread_schedule
+
+
+
+/* Define the 16-bit Thumb mode veneer for _tx_thread_system_return for
+ applications calling this function from to 16-bit Thumb mode. */
+
+ .text
+ .align 2
+ .global $_tx_thread_system_return
+ .type $_tx_thread_system_return,function
+$_tx_thread_system_return:
+ .thumb
+ BX pc // Switch to 32-bit mode
+ NOP //
+ .arm
+ STMFD sp!, {lr} // Save return address
+ BL _tx_thread_system_return // Call _tx_thread_system_return function
+ LDMFD sp!, {lr} // Recover saved return address
+ BX lr // Return to 16-bit caller
+
+
+ .text
+ .align 2
+/**************************************************************************/
+/* */
+/* FUNCTION RELEASE */
+/* */
+/* _tx_thread_system_return ARMv7-A */
+/* 6.1.11 */
+/* AUTHOR */
+/* */
+/* William E. Lamie, Microsoft Corporation */
+/* */
+/* DESCRIPTION */
+/* */
+/* This function is target processor specific. It is used to transfer */
+/* control from a thread back to the ThreadX system. Only a */
+/* minimal context is saved since the compiler assumes temp registers */
+/* are going to get slicked by a function call anyway. */
+/* */
+/* INPUT */
+/* */
+/* None */
+/* */
+/* OUTPUT */
+/* */
+/* None */
+/* */
+/* CALLS */
+/* */
+/* _tx_thread_schedule Thread scheduling loop */
+/* */
+/* CALLED BY */
+/* */
+/* ThreadX components */
+/* */
+/* RELEASE HISTORY */
+/* */
+/* DATE NAME DESCRIPTION */
+/* */
+/* 09-30-2020 William E. Lamie Initial Version 6.1 */
+/* 10-15-2021 William E. Lamie Modified comment(s), added */
+/* execution profile support, */
+/* resulting in version 6.1.9 */
+/* 04-25-2022 Zhen Kong Updated comments, */
+/* resulting in version 6.1.11 */
+/* */
+/**************************************************************************/
+ .global _tx_thread_system_return
+ .type _tx_thread_system_return,function
+_tx_thread_system_return:
+
+ /* Save minimal context on the stack. */
+
+ STMDB sp!, {r4-r11, lr} // Save minimal context
+
+ LDR r4, =_tx_thread_current_ptr // Pickup address of current ptr
+ LDR r5, [r4] // Pickup current thread pointer
+
+#ifdef TX_ENABLE_VFP_SUPPORT
+ LDR r1, [r5, #144] // Pickup the VFP enabled flag
+ CMP r1, #0 // Is the VFP enabled?
+ BEQ _tx_skip_solicited_vfp_save // No, skip VFP solicited save
+ VMRS r1, FPSCR // Pickup the FPSCR
+ STR r1, [sp, #-4]! // Save FPSCR
+ VSTMDB sp!, {D16-D31} // Save D16-D31
+ VSTMDB sp!, {D8-D15} // Save D8-D15
+_tx_skip_solicited_vfp_save:
+#endif
+
+ MOV r0, #0 // Build a solicited stack type
+ MRS r1, CPSR // Pickup the CPSR
+ STMDB sp!, {r0-r1} // Save type and CPSR
+
+ /* Lockout interrupts. */
+
+#ifdef TX_ENABLE_FIQ_SUPPORT
+ CPSID if // Disable IRQ and FIQ interrupts
+#else
+ CPSID i // Disable IRQ interrupts
+#endif
+
+#if (defined(TX_ENABLE_EXECUTION_CHANGE_NOTIFY) || defined(TX_EXECUTION_PROFILE_ENABLE))
+
+ /* Call the thread exit function to indicate the thread is no longer executing. */
+
+ BL _tx_execution_thread_exit // Call the thread exit function
+#endif
+ MOV r3, r4 // Pickup address of current ptr
+ MOV r0, r5 // Pickup current thread pointer
+ LDR r2, =_tx_timer_time_slice // Pickup address of time slice
+ LDR r1, [r2] // Pickup current time slice
+
+ /* Save current stack and switch to system stack. */
+
+ STR sp, [r0, #8] // Save thread stack pointer
+
+ /* Determine if the time-slice is active. */
+
+ MOV r4, #0 // Build clear value
+ CMP r1, #0 // Is a time-slice active?
+ BEQ __tx_thread_dont_save_ts // No, don't save the time-slice
+
+ /* Save time-slice for the thread and clear the current time-slice. */
+
+ STR r4, [r2] // Clear time-slice
+ STR r1, [r0, #24] // Save current time-slice
+
+__tx_thread_dont_save_ts:
+
+ /* Clear the current thread pointer. */
+
+ STR r4, [r3] // Clear current thread pointer
+ B _tx_thread_schedule // Jump to scheduler!
diff --git a/ports/cortex_a17/ac6/src/tx_thread_vectored_context_save.S b/ports/cortex_a17/ac6/src/tx_thread_vectored_context_save.S
new file mode 100644
index 00000000..d846223f
--- /dev/null
+++ b/ports/cortex_a17/ac6/src/tx_thread_vectored_context_save.S
@@ -0,0 +1,165 @@
+/**************************************************************************/
+/* */
+/* Copyright (c) Microsoft Corporation. All rights reserved. */
+/* */
+/* This software is licensed under the Microsoft Software License */
+/* Terms for Microsoft Azure RTOS. Full text of the license can be */
+/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
+/* and in the root directory of this software. */
+/* */
+/**************************************************************************/
+
+
+/**************************************************************************/
+/**************************************************************************/
+/** */
+/** ThreadX Component */
+/** */
+/** Thread */
+/** */
+/**************************************************************************/
+/**************************************************************************/
+
+ .global _tx_thread_system_state
+ .global _tx_thread_current_ptr
+ .global _tx_execution_isr_enter
+
+
+
+/* No 16-bit Thumb mode veneer code is needed for _tx_thread_vectored_context_save
+ since it will never be called 16-bit mode. */
+
+ .arm
+ .text
+ .align 2
+/**************************************************************************/
+/* */
+/* FUNCTION RELEASE */
+/* */
+/* _tx_thread_vectored_context_save ARMv7-A */
+/* 6.1.11 */
+/* AUTHOR */
+/* */
+/* William E. Lamie, Microsoft Corporation */
+/* */
+/* DESCRIPTION */
+/* */
+/* This function saves the context of an executing thread in the */
+/* beginning of interrupt processing. The function also ensures that */
+/* the system stack is used upon return to the calling ISR. */
+/* */
+/* INPUT */
+/* */
+/* None */
+/* */
+/* OUTPUT */
+/* */
+/* None */
+/* */
+/* CALLS */
+/* */
+/* None */
+/* */
+/* CALLED BY */
+/* */
+/* ISRs */
+/* */
+/* RELEASE HISTORY */
+/* */
+/* DATE NAME DESCRIPTION */
+/* */
+/* 09-30-2020 William E. Lamie Initial Version 6.1 */
+/* 10-15-2021 William E. Lamie Modified comment(s), added */
+/* execution profile support, */
+/* resulting in version 6.1.9 */
+/* 04-25-2022 Zhen Kong Updated comments, */
+/* resulting in version 6.1.11 */
+/* */
+/**************************************************************************/
+ .global _tx_thread_vectored_context_save
+ .type _tx_thread_vectored_context_save,function
+_tx_thread_vectored_context_save:
+
+ /* Upon entry to this routine, it is assumed that IRQ interrupts are locked
+ out, we are in IRQ mode, and all registers are intact. */
+
+ /* Check for a nested interrupt condition. */
+
+#ifdef TX_ENABLE_FIQ_SUPPORT
+ CPSID if // Disable IRQ and FIQ interrupts
+#endif
+ LDR r3, =_tx_thread_system_state // Pickup address of system state variable
+ LDR r2, [r3, #0] // Pickup system state
+ CMP r2, #0 // Is this the first interrupt?
+ BEQ __tx_thread_not_nested_save // Yes, not a nested context save
+
+ /* Nested interrupt condition. */
+
+ ADD r2, r2, #1 // Increment the interrupt counter
+ STR r2, [r3, #0] // Store it back in the variable
+
+ /* Note: Minimal context of interrupted thread is already saved. */
+
+ /* Return to the ISR. */
+
+ MOV r10, #0 // Clear stack limit
+
+#if (defined(TX_ENABLE_EXECUTION_CHANGE_NOTIFY) || defined(TX_EXECUTION_PROFILE_ENABLE))
+
+ /* Call the ISR enter function to indicate an ISR is executing. */
+
+ PUSH {lr} // Save ISR lr
+ BL _tx_execution_isr_enter // Call the ISR enter function
+ POP {lr} // Recover ISR lr
+#endif
+
+ MOV pc, lr // Return to caller
+
+__tx_thread_not_nested_save:
+
+ /* Otherwise, not nested, check to see if a thread was running. */
+
+ ADD r2, r2, #1 // Increment the interrupt counter
+ STR r2, [r3, #0] // Store it back in the variable
+ LDR r1, =_tx_thread_current_ptr // Pickup address of current thread ptr
+ LDR r0, [r1, #0] // Pickup current thread pointer
+ CMP r0, #0 // Is it NULL?
+ BEQ __tx_thread_idle_system_save // If so, interrupt occurred in
+ // scheduling loop - nothing needs saving!
+ /* Note: Minimal context of interrupted thread is already saved. */
+
+ /* Save the current stack pointer in the thread's control block. */
+
+ MOV r10, #0 // Clear stack limit
+
+#if (defined(TX_ENABLE_EXECUTION_CHANGE_NOTIFY) || defined(TX_EXECUTION_PROFILE_ENABLE))
+
+ /* Call the ISR enter function to indicate an ISR is executing. */
+
+ PUSH {lr} // Save ISR lr
+ BL _tx_execution_isr_enter // Call the ISR enter function
+ POP {lr} // Recover ISR lr
+#endif
+
+ MOV pc, lr // Return to caller
+
+__tx_thread_idle_system_save:
+
+ /* Interrupt occurred in the scheduling loop. */
+
+ /* Not much to do here, just adjust the stack pointer, and return to IRQ
+ processing. */
+
+ MOV r10, #0 // Clear stack limit
+
+#if (defined(TX_ENABLE_EXECUTION_CHANGE_NOTIFY) || defined(TX_EXECUTION_PROFILE_ENABLE))
+
+ /* Call the ISR enter function to indicate an ISR is executing. */
+
+ PUSH {lr} // Save ISR lr
+ BL _tx_execution_isr_enter // Call the ISR enter function
+ POP {lr} // Recover ISR lr
+#endif
+
+ ADD sp, sp, #32 // Recover saved registers
+ MOV pc, lr // Return to caller
diff --git a/ports/cortex_a17/ac6/src/tx_timer_interrupt.S b/ports/cortex_a17/ac6/src/tx_timer_interrupt.S
new file mode 100644
index 00000000..7337ed0c
--- /dev/null
+++ b/ports/cortex_a17/ac6/src/tx_timer_interrupt.S
@@ -0,0 +1,231 @@
+/**************************************************************************/
+/* */
+/* Copyright (c) Microsoft Corporation. All rights reserved. */
+/* */
+/* This software is licensed under the Microsoft Software License */
+/* Terms for Microsoft Azure RTOS. Full text of the license can be */
+/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
+/* and in the root directory of this software. */
+/* */
+/**************************************************************************/
+
+
+/**************************************************************************/
+/**************************************************************************/
+/** */
+/** ThreadX Component */
+/** */
+/** Timer */
+/** */
+/**************************************************************************/
+/**************************************************************************/
+
+ .arm
+
+
+/* Define Assembly language external references... */
+
+ .global _tx_timer_time_slice
+ .global _tx_timer_system_clock
+ .global _tx_timer_current_ptr
+ .global _tx_timer_list_start
+ .global _tx_timer_list_end
+ .global _tx_timer_expired_time_slice
+ .global _tx_timer_expired
+ .global _tx_thread_time_slice
+
+
+
+/* Define the 16-bit Thumb mode veneer for _tx_timer_interrupt for
+ applications calling this function from to 16-bit Thumb mode. */
+
+ .text
+ .align 2
+ .thumb
+ .global $_tx_timer_interrupt
+ .type $_tx_timer_interrupt,function
+$_tx_timer_interrupt:
+ BX pc // Switch to 32-bit mode
+ NOP //
+ .arm
+ STMFD sp!, {lr} // Save return address
+ BL _tx_timer_interrupt // Call _tx_timer_interrupt function
+ LDMFD sp!, {lr} // Recover saved return address
+ BX lr // Return to 16-bit caller
+
+
+ .text
+ .align 2
+/**************************************************************************/
+/* */
+/* FUNCTION RELEASE */
+/* */
+/* _tx_timer_interrupt ARMv7-A */
+/* 6.1.11 */
+/* AUTHOR */
+/* */
+/* William E. Lamie, Microsoft Corporation */
+/* */
+/* DESCRIPTION */
+/* */
+/* This function processes the hardware timer interrupt. This */
+/* processing includes incrementing the system clock and checking for */
+/* time slice and/or timer expiration. If either is found, the */
+/* interrupt context save/restore functions are called along with the */
+/* expiration functions. */
+/* */
+/* INPUT */
+/* */
+/* None */
+/* */
+/* OUTPUT */
+/* */
+/* None */
+/* */
+/* CALLS */
+/* */
+/* _tx_thread_time_slice Time slice interrupted thread */
+/* _tx_timer_expiration_process Timer expiration processing */
+/* */
+/* CALLED BY */
+/* */
+/* interrupt vector */
+/* */
+/* RELEASE HISTORY */
+/* */
+/* DATE NAME DESCRIPTION */
+/* */
+/* 09-30-2020 William E. Lamie Initial Version 6.1 */
+/* 04-25-2022 Zhen Kong Updated comments, */
+/* resulting in version 6.1.11 */
+/* */
+/**************************************************************************/
+ .global _tx_timer_interrupt
+ .type _tx_timer_interrupt,function
+_tx_timer_interrupt:
+
+ /* Upon entry to this routine, it is assumed that context save has already
+ been called, and therefore the compiler scratch registers are available
+ for use. */
+
+ /* Increment the system clock. */
+
+ LDR r1, =_tx_timer_system_clock // Pickup address of system clock
+ LDR r0, [r1] // Pickup system clock
+ ADD r0, r0, #1 // Increment system clock
+ STR r0, [r1] // Store new system clock
+
+ /* Test for time-slice expiration. */
+
+ LDR r3, =_tx_timer_time_slice // Pickup address of time-slice
+ LDR r2, [r3] // Pickup time-slice
+ CMP r2, #0 // Is it non-active?
+ BEQ __tx_timer_no_time_slice // Yes, skip time-slice processing
+
+ /* Decrement the time_slice. */
+
+ SUB r2, r2, #1 // Decrement the time-slice
+ STR r2, [r3] // Store new time-slice value
+
+ /* Check for expiration. */
+
+ CMP r2, #0 // Has it expired?
+ BNE __tx_timer_no_time_slice // No, skip expiration processing
+
+ /* Set the time-slice expired flag. */
+
+ LDR r3, =_tx_timer_expired_time_slice // Pickup address of expired flag
+ MOV r0, #1 // Build expired value
+ STR r0, [r3] // Set time-slice expiration flag
+
+__tx_timer_no_time_slice:
+
+ /* Test for timer expiration. */
+
+ LDR r1, =_tx_timer_current_ptr // Pickup current timer pointer address
+ LDR r0, [r1] // Pickup current timer
+ LDR r2, [r0] // Pickup timer list entry
+ CMP r2, #0 // Is there anything in the list?
+ BEQ __tx_timer_no_timer // No, just increment the timer
+
+ /* Set expiration flag. */
+
+ LDR r3, =_tx_timer_expired // Pickup expiration flag address
+ MOV r2, #1 // Build expired value
+ STR r2, [r3] // Set expired flag
+ B __tx_timer_done // Finished timer processing
+
+__tx_timer_no_timer:
+
+ /* No timer expired, increment the timer pointer. */
+ ADD r0, r0, #4 // Move to next timer
+
+ /* Check for wraparound. */
+
+ LDR r3, =_tx_timer_list_end // Pickup address of timer list end
+ LDR r2, [r3] // Pickup list end
+ CMP r0, r2 // Are we at list end?
+ BNE __tx_timer_skip_wrap // No, skip wraparound logic
+
+ /* Wrap to beginning of list. */
+
+ LDR r3, =_tx_timer_list_start // Pickup address of timer list start
+ LDR r0, [r3] // Set current pointer to list start
+
+__tx_timer_skip_wrap:
+
+ STR r0, [r1] // Store new current timer pointer
+
+__tx_timer_done:
+
+ /* See if anything has expired. */
+
+ LDR r3, =_tx_timer_expired_time_slice // Pickup address of expired flag
+ LDR r2, [r3] // Pickup time-slice expired flag
+ CMP r2, #0 // Did a time-slice expire?
+ BNE __tx_something_expired // If non-zero, time-slice expired
+ LDR r1, =_tx_timer_expired // Pickup address of other expired flag
+ LDR r0, [r1] // Pickup timer expired flag
+ CMP r0, #0 // Did a timer expire?
+ BEQ __tx_timer_nothing_expired // No, nothing expired
+
+__tx_something_expired:
+
+ STMDB sp!, {r0, lr} // Save the lr register on the stack
+ // and save r0 just to keep 8-byte alignment
+
+ /* Did a timer expire? */
+
+ LDR r1, =_tx_timer_expired // Pickup address of expired flag
+ LDR r0, [r1] // Pickup timer expired flag
+ CMP r0, #0 // Check for timer expiration
+ BEQ __tx_timer_dont_activate // If not set, skip timer activation
+
+ /* Process timer expiration. */
+ BL _tx_timer_expiration_process // Call the timer expiration handling routine
+
+__tx_timer_dont_activate:
+
+ /* Did time slice expire? */
+
+ LDR r3, =_tx_timer_expired_time_slice // Pickup address of time-slice expired
+ LDR r2, [r3] // Pickup the actual flag
+ CMP r2, #0 // See if the flag is set
+ BEQ __tx_timer_not_ts_expiration // No, skip time-slice processing
+
+ /* Time slice interrupted thread. */
+
+ BL _tx_thread_time_slice // Call time-slice processing
+
+__tx_timer_not_ts_expiration:
+
+ LDMIA sp!, {r0, lr} // Recover lr register (r0 is just there for
+ // the 8-byte stack alignment
+
+__tx_timer_nothing_expired:
+
+#ifdef __THUMB_INTERWORK
+ BX lr // Return to caller
+#else
+ MOV pc, lr // Return to caller
+#endif
diff --git a/ports/cortex_a17/gnu/example_build/build_threadx.bat b/ports/cortex_a17/gnu/example_build/build_threadx.bat
new file mode 100644
index 00000000..571df29e
--- /dev/null
+++ b/ports/cortex_a17/gnu/example_build/build_threadx.bat
@@ -0,0 +1,238 @@
+del tx.a
+arm-none-eabi-gcc -c -g -mcpu=cortex-a17 tx_initialize_low_level.S
+arm-none-eabi-gcc -c -g -mcpu=cortex-a17 ../src/tx_thread_stack_build.S
+arm-none-eabi-gcc -c -g -mcpu=cortex-a17 ../src/tx_thread_schedule.S
+arm-none-eabi-gcc -c -g -mcpu=cortex-a17 ../src/tx_thread_system_return.S
+arm-none-eabi-gcc -c -g -mcpu=cortex-a17 ../src/tx_thread_context_save.S
+arm-none-eabi-gcc -c -g -mcpu=cortex-a17 ../src/tx_thread_context_restore.S
+arm-none-eabi-gcc -c -g -mcpu=cortex-a17 ../src/tx_thread_interrupt_control.S
+arm-none-eabi-gcc -c -g -mcpu=cortex-a17 ../src/tx_timer_interrupt.S
+arm-none-eabi-gcc -c -g -mcpu=cortex-a17 ../src/tx_thread_interrupt_disable.S
+arm-none-eabi-gcc -c -g -mcpu=cortex-a17 ../src/tx_thread_interrupt_restore.S
+arm-none-eabi-gcc -c -g -mcpu=cortex-a17 ../src/tx_thread_fiq_context_save.S
+arm-none-eabi-gcc -c -g -mcpu=cortex-a17 ../src/tx_thread_fiq_nesting_start.S
+arm-none-eabi-gcc -c -g -mcpu=cortex-a17 ../src/tx_thread_irq_nesting_start.S
+arm-none-eabi-gcc -c -g -mcpu=cortex-a17 ../src/tx_thread_irq_nesting_end.S
+arm-none-eabi-gcc -c -g -mcpu=cortex-a17 ../src/tx_thread_fiq_nesting_end.S
+arm-none-eabi-gcc -c -g -mcpu=cortex-a17 ../src/tx_thread_fiq_context_restore.S
+arm-none-eabi-gcc -c -g -mcpu=cortex-a17 ../src/tx_thread_vectored_context_save.S
+arm-none-eabi-gcc -c -g -mcpu=cortex-a17 -I../../../../common/inc -I../inc ../../../../common/src/tx_block_allocate.c
+arm-none-eabi-gcc -c -g -mcpu=cortex-a17 -I../../../../common/inc -I../inc ../../../../common/src/tx_block_pool_cleanup.c
+arm-none-eabi-gcc -c -g -mcpu=cortex-a17 -I../../../../common/inc -I../inc ../../../../common/src/tx_block_pool_create.c
+arm-none-eabi-gcc -c -g -mcpu=cortex-a17 -I../../../../common/inc -I../inc ../../../../common/src/tx_block_pool_delete.c
+arm-none-eabi-gcc -c -g -mcpu=cortex-a17 -I../../../../common/inc -I../inc ../../../../common/src/tx_block_pool_info_get.c
+arm-none-eabi-gcc -c -g -mcpu=cortex-a17 -I../../../../common/inc -I../inc ../../../../common/src/tx_block_pool_initialize.c
+arm-none-eabi-gcc -c -g -mcpu=cortex-a17 -I../../../../common/inc -I../inc ../../../../common/src/tx_block_pool_performance_info_get.c
+arm-none-eabi-gcc -c -g -mcpu=cortex-a17 -I../../../../common/inc -I../inc ../../../../common/src/tx_block_pool_performance_system_info_get.c
+arm-none-eabi-gcc -c -g -mcpu=cortex-a17 -I../../../../common/inc -I../inc ../../../../common/src/tx_block_pool_prioritize.c
+arm-none-eabi-gcc -c -g -mcpu=cortex-a17 -I../../../../common/inc -I../inc ../../../../common/src/tx_block_release.c
+arm-none-eabi-gcc -c -g -mcpu=cortex-a17 -I../../../../common/inc -I../inc ../../../../common/src/tx_byte_allocate.c
+arm-none-eabi-gcc -c -g -mcpu=cortex-a17 -I../../../../common/inc -I../inc ../../../../common/src/tx_byte_pool_cleanup.c
+arm-none-eabi-gcc -c -g -mcpu=cortex-a17 -I../../../../common/inc -I../inc ../../../../common/src/tx_byte_pool_create.c
+arm-none-eabi-gcc -c -g -mcpu=cortex-a17 -I../../../../common/inc -I../inc ../../../../common/src/tx_byte_pool_delete.c
+arm-none-eabi-gcc -c -g -mcpu=cortex-a17 -I../../../../common/inc -I../inc ../../../../common/src/tx_byte_pool_info_get.c
+arm-none-eabi-gcc -c -g -mcpu=cortex-a17 -I../../../../common/inc -I../inc ../../../../common/src/tx_byte_pool_initialize.c
+arm-none-eabi-gcc -c -g -mcpu=cortex-a17 -I../../../../common/inc -I../inc ../../../../common/src/tx_byte_pool_performance_info_get.c
+arm-none-eabi-gcc -c -g -mcpu=cortex-a17 -I../../../../common/inc -I../inc ../../../../common/src/tx_byte_pool_performance_system_info_get.c
+arm-none-eabi-gcc -c -g -mcpu=cortex-a17 -I../../../../common/inc -I../inc ../../../../common/src/tx_byte_pool_prioritize.c
+arm-none-eabi-gcc -c -g -mcpu=cortex-a17 -I../../../../common/inc -I../inc ../../../../common/src/tx_byte_pool_search.c
+arm-none-eabi-gcc -c -g -mcpu=cortex-a17 -I../../../../common/inc -I../inc ../../../../common/src/tx_byte_release.c
+arm-none-eabi-gcc -c -g -mcpu=cortex-a17 -I../../../../common/inc -I../inc ../../../../common/src/tx_event_flags_cleanup.c
+arm-none-eabi-gcc -c -g -mcpu=cortex-a17 -I../../../../common/inc -I../inc ../../../../common/src/tx_event_flags_create.c
+arm-none-eabi-gcc -c -g -mcpu=cortex-a17 -I../../../../common/inc -I../inc ../../../../common/src/tx_event_flags_delete.c
+arm-none-eabi-gcc -c -g -mcpu=cortex-a17 -I../../../../common/inc -I../inc ../../../../common/src/tx_event_flags_get.c
+arm-none-eabi-gcc -c -g -mcpu=cortex-a17 -I../../../../common/inc -I../inc ../../../../common/src/tx_event_flags_info_get.c
+arm-none-eabi-gcc -c -g -mcpu=cortex-a17 -I../../../../common/inc -I../inc ../../../../common/src/tx_event_flags_initialize.c
+arm-none-eabi-gcc -c -g -mcpu=cortex-a17 -I../../../../common/inc -I../inc ../../../../common/src/tx_event_flags_performance_info_get.c
+arm-none-eabi-gcc -c -g -mcpu=cortex-a17 -I../../../../common/inc -I../inc ../../../../common/src/tx_event_flags_performance_system_info_get.c
+arm-none-eabi-gcc -c -g -mcpu=cortex-a17 -I../../../../common/inc -I../inc ../../../../common/src/tx_event_flags_set.c
+arm-none-eabi-gcc -c -g -mcpu=cortex-a17 -I../../../../common/inc -I../inc ../../../../common/src/tx_event_flags_set_notify.c
+arm-none-eabi-gcc -c -g -mcpu=cortex-a17 -I../../../../common/inc -I../inc ../../../../common/src/tx_initialize_high_level.c
+arm-none-eabi-gcc -c -g -mcpu=cortex-a17 -I../../../../common/inc -I../inc ../../../../common/src/tx_initialize_kernel_enter.c
+arm-none-eabi-gcc -c -g -mcpu=cortex-a17 -I../../../../common/inc -I../inc ../../../../common/src/tx_initialize_kernel_setup.c
+arm-none-eabi-gcc -c -g -mcpu=cortex-a17 -I../../../../common/inc -I../inc ../../../../common/src/tx_mutex_cleanup.c
+arm-none-eabi-gcc -c -g -mcpu=cortex-a17 -I../../../../common/inc -I../inc ../../../../common/src/tx_mutex_create.c
+arm-none-eabi-gcc -c -g -mcpu=cortex-a17 -I../../../../common/inc -I../inc ../../../../common/src/tx_mutex_delete.c
+arm-none-eabi-gcc -c -g -mcpu=cortex-a17 -I../../../../common/inc -I../inc ../../../../common/src/tx_mutex_get.c
+arm-none-eabi-gcc -c -g -mcpu=cortex-a17 -I../../../../common/inc -I../inc ../../../../common/src/tx_mutex_info_get.c
+arm-none-eabi-gcc -c -g -mcpu=cortex-a17 -I../../../../common/inc -I../inc ../../../../common/src/tx_mutex_initialize.c
+arm-none-eabi-gcc -c -g -mcpu=cortex-a17 -I../../../../common/inc -I../inc ../../../../common/src/tx_mutex_performance_info_get.c
+arm-none-eabi-gcc -c -g -mcpu=cortex-a17 -I../../../../common/inc -I../inc ../../../../common/src/tx_mutex_performance_system_info_get.c
+arm-none-eabi-gcc -c -g -mcpu=cortex-a17 -I../../../../common/inc -I../inc ../../../../common/src/tx_mutex_prioritize.c
+arm-none-eabi-gcc -c -g -mcpu=cortex-a17 -I../../../../common/inc -I../inc ../../../../common/src/tx_mutex_priority_change.c
+arm-none-eabi-gcc -c -g -mcpu=cortex-a17 -I../../../../common/inc -I../inc ../../../../common/src/tx_mutex_put.c
+arm-none-eabi-gcc -c -g -mcpu=cortex-a17 -I../../../../common/inc -I../inc ../../../../common/src/tx_queue_cleanup.c
+arm-none-eabi-gcc -c -g -mcpu=cortex-a17 -I../../../../common/inc -I../inc ../../../../common/src/tx_queue_create.c
+arm-none-eabi-gcc -c -g -mcpu=cortex-a17 -I../../../../common/inc -I../inc ../../../../common/src/tx_queue_delete.c
+arm-none-eabi-gcc -c -g -mcpu=cortex-a17 -I../../../../common/inc -I../inc ../../../../common/src/tx_queue_flush.c
+arm-none-eabi-gcc -c -g -mcpu=cortex-a17 -I../../../../common/inc -I../inc ../../../../common/src/tx_queue_front_send.c
+arm-none-eabi-gcc -c -g -mcpu=cortex-a17 -I../../../../common/inc -I../inc ../../../../common/src/tx_queue_info_get.c
+arm-none-eabi-gcc -c -g -mcpu=cortex-a17 -I../../../../common/inc -I../inc ../../../../common/src/tx_queue_initialize.c
+arm-none-eabi-gcc -c -g -mcpu=cortex-a17 -I../../../../common/inc -I../inc ../../../../common/src/tx_queue_performance_info_get.c
+arm-none-eabi-gcc -c -g -mcpu=cortex-a17 -I../../../../common/inc -I../inc ../../../../common/src/tx_queue_performance_system_info_get.c
+arm-none-eabi-gcc -c -g -mcpu=cortex-a17 -I../../../../common/inc -I../inc ../../../../common/src/tx_queue_prioritize.c
+arm-none-eabi-gcc -c -g -mcpu=cortex-a17 -I../../../../common/inc -I../inc ../../../../common/src/tx_queue_receive.c
+arm-none-eabi-gcc -c -g -mcpu=cortex-a17 -I../../../../common/inc -I../inc ../../../../common/src/tx_queue_send.c
+arm-none-eabi-gcc -c -g -mcpu=cortex-a17 -I../../../../common/inc -I../inc ../../../../common/src/tx_queue_send_notify.c
+arm-none-eabi-gcc -c -g -mcpu=cortex-a17 -I../../../../common/inc -I../inc ../../../../common/src/tx_semaphore_ceiling_put.c
+arm-none-eabi-gcc -c -g -mcpu=cortex-a17 -I../../../../common/inc -I../inc ../../../../common/src/tx_semaphore_cleanup.c
+arm-none-eabi-gcc -c -g -mcpu=cortex-a17 -I../../../../common/inc -I../inc ../../../../common/src/tx_semaphore_create.c
+arm-none-eabi-gcc -c -g -mcpu=cortex-a17 -I../../../../common/inc -I../inc ../../../../common/src/tx_semaphore_delete.c
+arm-none-eabi-gcc -c -g -mcpu=cortex-a17 -I../../../../common/inc -I../inc ../../../../common/src/tx_semaphore_get.c
+arm-none-eabi-gcc -c -g -mcpu=cortex-a17 -I../../../../common/inc -I../inc ../../../../common/src/tx_semaphore_info_get.c
+arm-none-eabi-gcc -c -g -mcpu=cortex-a17 -I../../../../common/inc -I../inc ../../../../common/src/tx_semaphore_initialize.c
+arm-none-eabi-gcc -c -g -mcpu=cortex-a17 -I../../../../common/inc -I../inc ../../../../common/src/tx_semaphore_performance_info_get.c
+arm-none-eabi-gcc -c -g -mcpu=cortex-a17 -I../../../../common/inc -I../inc ../../../../common/src/tx_semaphore_performance_system_info_get.c
+arm-none-eabi-gcc -c -g -mcpu=cortex-a17 -I../../../../common/inc -I../inc ../../../../common/src/tx_semaphore_prioritize.c
+arm-none-eabi-gcc -c -g -mcpu=cortex-a17 -I../../../../common/inc -I../inc ../../../../common/src/tx_semaphore_put.c
+arm-none-eabi-gcc -c -g -mcpu=cortex-a17 -I../../../../common/inc -I../inc ../../../../common/src/tx_semaphore_put_notify.c
+arm-none-eabi-gcc -c -g -mcpu=cortex-a17 -I../../../../common/inc -I../inc ../../../../common/src/tx_thread_create.c
+arm-none-eabi-gcc -c -g -mcpu=cortex-a17 -I../../../../common/inc -I../inc ../../../../common/src/tx_thread_delete.c
+arm-none-eabi-gcc -c -g -mcpu=cortex-a17 -I../../../../common/inc -I../inc ../../../../common/src/tx_thread_entry_exit_notify.c
+arm-none-eabi-gcc -c -g -mcpu=cortex-a17 -I../../../../common/inc -I../inc ../../../../common/src/tx_thread_identify.c
+arm-none-eabi-gcc -c -g -mcpu=cortex-a17 -I../../../../common/inc -I../inc ../../../../common/src/tx_thread_info_get.c
+arm-none-eabi-gcc -c -g -mcpu=cortex-a17 -I../../../../common/inc -I../inc ../../../../common/src/tx_thread_initialize.c
+arm-none-eabi-gcc -c -g -mcpu=cortex-a17 -I../../../../common/inc -I../inc ../../../../common/src/tx_thread_performance_info_get.c
+arm-none-eabi-gcc -c -g -mcpu=cortex-a17 -I../../../../common/inc -I../inc ../../../../common/src/tx_thread_performance_system_info_get.c
+arm-none-eabi-gcc -c -g -mcpu=cortex-a17 -I../../../../common/inc -I../inc ../../../../common/src/tx_thread_preemption_change.c
+arm-none-eabi-gcc -c -g -mcpu=cortex-a17 -I../../../../common/inc -I../inc ../../../../common/src/tx_thread_priority_change.c
+arm-none-eabi-gcc -c -g -mcpu=cortex-a17 -I../../../../common/inc -I../inc ../../../../common/src/tx_thread_relinquish.c
+arm-none-eabi-gcc -c -g -mcpu=cortex-a17 -I../../../../common/inc -I../inc ../../../../common/src/tx_thread_reset.c
+arm-none-eabi-gcc -c -g -mcpu=cortex-a17 -I../../../../common/inc -I../inc ../../../../common/src/tx_thread_resume.c
+arm-none-eabi-gcc -c -g -mcpu=cortex-a17 -I../../../../common/inc -I../inc ../../../../common/src/tx_thread_shell_entry.c
+arm-none-eabi-gcc -c -g -mcpu=cortex-a17 -I../../../../common/inc -I../inc ../../../../common/src/tx_thread_sleep.c
+arm-none-eabi-gcc -c -g -mcpu=cortex-a17 -I../../../../common/inc -I../inc ../../../../common/src/tx_thread_stack_analyze.c
+arm-none-eabi-gcc -c -g -mcpu=cortex-a17 -I../../../../common/inc -I../inc ../../../../common/src/tx_thread_stack_error_handler.c
+arm-none-eabi-gcc -c -g -mcpu=cortex-a17 -I../../../../common/inc -I../inc ../../../../common/src/tx_thread_stack_error_notify.c
+arm-none-eabi-gcc -c -g -mcpu=cortex-a17 -I../../../../common/inc -I../inc ../../../../common/src/tx_thread_suspend.c
+arm-none-eabi-gcc -c -g -mcpu=cortex-a17 -I../../../../common/inc -I../inc ../../../../common/src/tx_thread_system_preempt_check.c
+arm-none-eabi-gcc -c -g -mcpu=cortex-a17 -I../../../../common/inc -I../inc ../../../../common/src/tx_thread_system_resume.c
+arm-none-eabi-gcc -c -g -mcpu=cortex-a17 -I../../../../common/inc -I../inc ../../../../common/src/tx_thread_system_suspend.c
+arm-none-eabi-gcc -c -g -mcpu=cortex-a17 -I../../../../common/inc -I../inc ../../../../common/src/tx_thread_terminate.c
+arm-none-eabi-gcc -c -g -mcpu=cortex-a17 -I../../../../common/inc -I../inc ../../../../common/src/tx_thread_time_slice.c
+arm-none-eabi-gcc -c -g -mcpu=cortex-a17 -I../../../../common/inc -I../inc ../../../../common/src/tx_thread_time_slice_change.c
+arm-none-eabi-gcc -c -g -mcpu=cortex-a17 -I../../../../common/inc -I../inc ../../../../common/src/tx_thread_timeout.c
+arm-none-eabi-gcc -c -g -mcpu=cortex-a17 -I../../../../common/inc -I../inc ../../../../common/src/tx_thread_wait_abort.c
+arm-none-eabi-gcc -c -g -mcpu=cortex-a17 -I../../../../common/inc -I../inc ../../../../common/src/tx_time_get.c
+arm-none-eabi-gcc -c -g -mcpu=cortex-a17 -I../../../../common/inc -I../inc ../../../../common/src/tx_time_set.c
+arm-none-eabi-gcc -c -g -mcpu=cortex-a17 -I../../../../common/inc -I../inc ../../../../common/src/tx_timer_activate.c
+arm-none-eabi-gcc -c -g -mcpu=cortex-a17 -I../../../../common/inc -I../inc ../../../../common/src/tx_timer_change.c
+arm-none-eabi-gcc -c -g -mcpu=cortex-a17 -I../../../../common/inc -I../inc ../../../../common/src/tx_timer_create.c
+arm-none-eabi-gcc -c -g -mcpu=cortex-a17 -I../../../../common/inc -I../inc ../../../../common/src/tx_timer_deactivate.c
+arm-none-eabi-gcc -c -g -mcpu=cortex-a17 -I../../../../common/inc -I../inc ../../../../common/src/tx_timer_delete.c
+arm-none-eabi-gcc -c -g -mcpu=cortex-a17 -I../../../../common/inc -I../inc ../../../../common/src/tx_timer_expiration_process.c
+arm-none-eabi-gcc -c -g -mcpu=cortex-a17 -I../../../../common/inc -I../inc ../../../../common/src/tx_timer_info_get.c
+arm-none-eabi-gcc -c -g -mcpu=cortex-a17 -I../../../../common/inc -I../inc ../../../../common/src/tx_timer_initialize.c
+arm-none-eabi-gcc -c -g -mcpu=cortex-a17 -I../../../../common/inc -I../inc ../../../../common/src/tx_timer_performance_info_get.c
+arm-none-eabi-gcc -c -g -mcpu=cortex-a17 -I../../../../common/inc -I../inc ../../../../common/src/tx_timer_performance_system_info_get.c
+arm-none-eabi-gcc -c -g -mcpu=cortex-a17 -I../../../../common/inc -I../inc ../../../../common/src/tx_timer_system_activate.c
+arm-none-eabi-gcc -c -g -mcpu=cortex-a17 -I../../../../common/inc -I../inc ../../../../common/src/tx_timer_system_deactivate.c
+arm-none-eabi-gcc -c -g -mcpu=cortex-a17 -I../../../../common/inc -I../inc ../../../../common/src/tx_timer_thread_entry.c
+arm-none-eabi-gcc -c -g -mcpu=cortex-a17 -I../../../../common/inc -I../inc ../../../../common/src/tx_trace_enable.c
+arm-none-eabi-gcc -c -g -mcpu=cortex-a17 -I../../../../common/inc -I../inc ../../../../common/src/tx_trace_disable.c
+arm-none-eabi-gcc -c -g -mcpu=cortex-a17 -I../../../../common/inc -I../inc ../../../../common/src/tx_trace_initialize.c
+arm-none-eabi-gcc -c -g -mcpu=cortex-a17 -I../../../../common/inc -I../inc ../../../../common/src/tx_trace_interrupt_control.c
+arm-none-eabi-gcc -c -g -mcpu=cortex-a17 -I../../../../common/inc -I../inc ../../../../common/src/tx_trace_isr_enter_insert.c
+arm-none-eabi-gcc -c -g -mcpu=cortex-a17 -I../../../../common/inc -I../inc ../../../../common/src/tx_trace_isr_exit_insert.c
+arm-none-eabi-gcc -c -g -mcpu=cortex-a17 -I../../../../common/inc -I../inc ../../../../common/src/tx_trace_object_register.c
+arm-none-eabi-gcc -c -g -mcpu=cortex-a17 -I../../../../common/inc -I../inc ../../../../common/src/tx_trace_object_unregister.c
+arm-none-eabi-gcc -c -g -mcpu=cortex-a17 -I../../../../common/inc -I../inc ../../../../common/src/tx_trace_user_event_insert.c
+arm-none-eabi-gcc -c -g -mcpu=cortex-a17 -I../../../../common/inc -I../inc ../../../../common/src/tx_trace_buffer_full_notify.c
+arm-none-eabi-gcc -c -g -mcpu=cortex-a17 -I../../../../common/inc -I../inc ../../../../common/src/tx_trace_event_filter.c
+arm-none-eabi-gcc -c -g -mcpu=cortex-a17 -I../../../../common/inc -I../inc ../../../../common/src/tx_trace_event_unfilter.c
+arm-none-eabi-gcc -c -g -mcpu=cortex-a17 -I../../../../common/inc -I../inc ../../../../common/src/txe_block_allocate.c
+arm-none-eabi-gcc -c -g -mcpu=cortex-a17 -I../../../../common/inc -I../inc ../../../../common/src/txe_block_pool_create.c
+arm-none-eabi-gcc -c -g -mcpu=cortex-a17 -I../../../../common/inc -I../inc ../../../../common/src/txe_block_pool_delete.c
+arm-none-eabi-gcc -c -g -mcpu=cortex-a17 -I../../../../common/inc -I../inc ../../../../common/src/txe_block_pool_info_get.c
+arm-none-eabi-gcc -c -g -mcpu=cortex-a17 -I../../../../common/inc -I../inc ../../../../common/src/txe_block_pool_prioritize.c
+arm-none-eabi-gcc -c -g -mcpu=cortex-a17 -I../../../../common/inc -I../inc ../../../../common/src/txe_block_release.c
+arm-none-eabi-gcc -c -g -mcpu=cortex-a17 -I../../../../common/inc -I../inc ../../../../common/src/txe_byte_allocate.c
+arm-none-eabi-gcc -c -g -mcpu=cortex-a17 -I../../../../common/inc -I../inc ../../../../common/src/txe_byte_pool_create.c
+arm-none-eabi-gcc -c -g -mcpu=cortex-a17 -I../../../../common/inc -I../inc ../../../../common/src/txe_byte_pool_delete.c
+arm-none-eabi-gcc -c -g -mcpu=cortex-a17 -I../../../../common/inc -I../inc ../../../../common/src/txe_byte_pool_info_get.c
+arm-none-eabi-gcc -c -g -mcpu=cortex-a17 -I../../../../common/inc -I../inc ../../../../common/src/txe_byte_pool_prioritize.c
+arm-none-eabi-gcc -c -g -mcpu=cortex-a17 -I../../../../common/inc -I../inc ../../../../common/src/txe_byte_release.c
+arm-none-eabi-gcc -c -g -mcpu=cortex-a17 -I../../../../common/inc -I../inc ../../../../common/src/txe_event_flags_create.c
+arm-none-eabi-gcc -c -g -mcpu=cortex-a17 -I../../../../common/inc -I../inc ../../../../common/src/txe_event_flags_delete.c
+arm-none-eabi-gcc -c -g -mcpu=cortex-a17 -I../../../../common/inc -I../inc ../../../../common/src/txe_event_flags_get.c
+arm-none-eabi-gcc -c -g -mcpu=cortex-a17 -I../../../../common/inc -I../inc ../../../../common/src/txe_event_flags_info_get.c
+arm-none-eabi-gcc -c -g -mcpu=cortex-a17 -I../../../../common/inc -I../inc ../../../../common/src/txe_event_flags_set.c
+arm-none-eabi-gcc -c -g -mcpu=cortex-a17 -I../../../../common/inc -I../inc ../../../../common/src/txe_event_flags_set_notify.c
+arm-none-eabi-gcc -c -g -mcpu=cortex-a17 -I../../../../common/inc -I../inc ../../../../common/src/txe_mutex_create.c
+arm-none-eabi-gcc -c -g -mcpu=cortex-a17 -I../../../../common/inc -I../inc ../../../../common/src/txe_mutex_delete.c
+arm-none-eabi-gcc -c -g -mcpu=cortex-a17 -I../../../../common/inc -I../inc ../../../../common/src/txe_mutex_get.c
+arm-none-eabi-gcc -c -g -mcpu=cortex-a17 -I../../../../common/inc -I../inc ../../../../common/src/txe_mutex_info_get.c
+arm-none-eabi-gcc -c -g -mcpu=cortex-a17 -I../../../../common/inc -I../inc ../../../../common/src/txe_mutex_prioritize.c
+arm-none-eabi-gcc -c -g -mcpu=cortex-a17 -I../../../../common/inc -I../inc ../../../../common/src/txe_mutex_put.c
+arm-none-eabi-gcc -c -g -mcpu=cortex-a17 -I../../../../common/inc -I../inc ../../../../common/src/txe_queue_create.c
+arm-none-eabi-gcc -c -g -mcpu=cortex-a17 -I../../../../common/inc -I../inc ../../../../common/src/txe_queue_delete.c
+arm-none-eabi-gcc -c -g -mcpu=cortex-a17 -I../../../../common/inc -I../inc ../../../../common/src/txe_queue_flush.c
+arm-none-eabi-gcc -c -g -mcpu=cortex-a17 -I../../../../common/inc -I../inc ../../../../common/src/txe_queue_front_send.c
+arm-none-eabi-gcc -c -g -mcpu=cortex-a17 -I../../../../common/inc -I../inc ../../../../common/src/txe_queue_info_get.c
+arm-none-eabi-gcc -c -g -mcpu=cortex-a17 -I../../../../common/inc -I../inc ../../../../common/src/txe_queue_prioritize.c
+arm-none-eabi-gcc -c -g -mcpu=cortex-a17 -I../../../../common/inc -I../inc ../../../../common/src/txe_queue_receive.c
+arm-none-eabi-gcc -c -g -mcpu=cortex-a17 -I../../../../common/inc -I../inc ../../../../common/src/txe_queue_send.c
+arm-none-eabi-gcc -c -g -mcpu=cortex-a17 -I../../../../common/inc -I../inc ../../../../common/src/txe_queue_send_notify.c
+arm-none-eabi-gcc -c -g -mcpu=cortex-a17 -I../../../../common/inc -I../inc ../../../../common/src/txe_semaphore_ceiling_put.c
+arm-none-eabi-gcc -c -g -mcpu=cortex-a17 -I../../../../common/inc -I../inc ../../../../common/src/txe_semaphore_create.c
+arm-none-eabi-gcc -c -g -mcpu=cortex-a17 -I../../../../common/inc -I../inc ../../../../common/src/txe_semaphore_delete.c
+arm-none-eabi-gcc -c -g -mcpu=cortex-a17 -I../../../../common/inc -I../inc ../../../../common/src/txe_semaphore_get.c
+arm-none-eabi-gcc -c -g -mcpu=cortex-a17 -I../../../../common/inc -I../inc ../../../../common/src/txe_semaphore_info_get.c
+arm-none-eabi-gcc -c -g -mcpu=cortex-a17 -I../../../../common/inc -I../inc ../../../../common/src/txe_semaphore_prioritize.c
+arm-none-eabi-gcc -c -g -mcpu=cortex-a17 -I../../../../common/inc -I../inc ../../../../common/src/txe_semaphore_put.c
+arm-none-eabi-gcc -c -g -mcpu=cortex-a17 -I../../../../common/inc -I../inc ../../../../common/src/txe_semaphore_put_notify.c
+arm-none-eabi-gcc -c -g -mcpu=cortex-a17 -I../../../../common/inc -I../inc ../../../../common/src/txe_thread_create.c
+arm-none-eabi-gcc -c -g -mcpu=cortex-a17 -I../../../../common/inc -I../inc ../../../../common/src/txe_thread_delete.c
+arm-none-eabi-gcc -c -g -mcpu=cortex-a17 -I../../../../common/inc -I../inc ../../../../common/src/txe_thread_entry_exit_notify.c
+arm-none-eabi-gcc -c -g -mcpu=cortex-a17 -I../../../../common/inc -I../inc ../../../../common/src/txe_thread_info_get.c
+arm-none-eabi-gcc -c -g -mcpu=cortex-a17 -I../../../../common/inc -I../inc ../../../../common/src/txe_thread_preemption_change.c
+arm-none-eabi-gcc -c -g -mcpu=cortex-a17 -I../../../../common/inc -I../inc ../../../../common/src/txe_thread_priority_change.c
+arm-none-eabi-gcc -c -g -mcpu=cortex-a17 -I../../../../common/inc -I../inc ../../../../common/src/txe_thread_relinquish.c
+arm-none-eabi-gcc -c -g -mcpu=cortex-a17 -I../../../../common/inc -I../inc ../../../../common/src/txe_thread_reset.c
+arm-none-eabi-gcc -c -g -mcpu=cortex-a17 -I../../../../common/inc -I../inc ../../../../common/src/txe_thread_resume.c
+arm-none-eabi-gcc -c -g -mcpu=cortex-a17 -I../../../../common/inc -I../inc ../../../../common/src/txe_thread_suspend.c
+arm-none-eabi-gcc -c -g -mcpu=cortex-a17 -I../../../../common/inc -I../inc ../../../../common/src/txe_thread_terminate.c
+arm-none-eabi-gcc -c -g -mcpu=cortex-a17 -I../../../../common/inc -I../inc ../../../../common/src/txe_thread_time_slice_change.c
+arm-none-eabi-gcc -c -g -mcpu=cortex-a17 -I../../../../common/inc -I../inc ../../../../common/src/txe_thread_wait_abort.c
+arm-none-eabi-gcc -c -g -mcpu=cortex-a17 -I../../../../common/inc -I../inc ../../../../common/src/txe_timer_activate.c
+arm-none-eabi-gcc -c -g -mcpu=cortex-a17 -I../../../../common/inc -I../inc ../../../../common/src/txe_timer_change.c
+arm-none-eabi-gcc -c -g -mcpu=cortex-a17 -I../../../../common/inc -I../inc ../../../../common/src/txe_timer_create.c
+arm-none-eabi-gcc -c -g -mcpu=cortex-a17 -I../../../../common/inc -I../inc ../../../../common/src/txe_timer_deactivate.c
+arm-none-eabi-gcc -c -g -mcpu=cortex-a17 -I../../../../common/inc -I../inc ../../../../common/src/txe_timer_delete.c
+arm-none-eabi-gcc -c -g -mcpu=cortex-a17 -I../../../../common/inc -I../inc ../../../../common/src/txe_timer_info_get.c
+arm-none-eabi-ar -r tx.a tx_thread_stack_build.o tx_thread_schedule.o tx_thread_system_return.o tx_thread_context_save.o tx_thread_context_restore.o tx_timer_interrupt.o tx_thread_interrupt_control.o
+arm-none-eabi-ar -r tx.a tx_thread_interrupt_disable.o tx_thread_interrupt_restore.o tx_thread_fiq_context_save.o tx_thread_fiq_nesting_start.o tx_thread_irq_nesting_start.o tx_thread_irq_nesting_end.o
+arm-none-eabi-ar -r tx.a tx_thread_fiq_nesting_end.o tx_thread_fiq_context_restore.o tx_thread_vectored_context_save.o tx_initialize_low_level.o
+arm-none-eabi-ar -r tx.a tx_block_allocate.o tx_block_pool_cleanup.o tx_block_pool_create.o tx_block_pool_delete.o tx_block_pool_info_get.o
+arm-none-eabi-ar -r tx.a tx_block_pool_initialize.o tx_block_pool_performance_info_get.o tx_block_pool_performance_system_info_get.o tx_block_pool_prioritize.o
+arm-none-eabi-ar -r tx.a tx_block_release.o tx_byte_allocate.o tx_byte_pool_cleanup.o tx_byte_pool_create.o tx_byte_pool_delete.o tx_byte_pool_info_get.o
+arm-none-eabi-ar -r tx.a tx_byte_pool_initialize.o tx_byte_pool_performance_info_get.o tx_byte_pool_performance_system_info_get.o tx_byte_pool_prioritize.o
+arm-none-eabi-ar -r tx.a tx_byte_pool_search.o tx_byte_release.o tx_event_flags_cleanup.o tx_event_flags_create.o tx_event_flags_delete.o tx_event_flags_get.o
+arm-none-eabi-ar -r tx.a tx_event_flags_info_get.o tx_event_flags_initialize.o tx_event_flags_performance_info_get.o tx_event_flags_performance_system_info_get.o
+arm-none-eabi-ar -r tx.a tx_event_flags_set.o tx_event_flags_set_notify.o tx_initialize_high_level.o tx_initialize_kernel_enter.o tx_initialize_kernel_setup.o
+arm-none-eabi-ar -r tx.a tx_mutex_cleanup.o tx_mutex_create.o tx_mutex_delete.o tx_mutex_get.o tx_mutex_info_get.o tx_mutex_initialize.o tx_mutex_performance_info_get.o
+arm-none-eabi-ar -r tx.a tx_mutex_performance_system_info_get.o tx_mutex_prioritize.o tx_mutex_priority_change.o tx_mutex_put.o tx_queue_cleanup.o tx_queue_create.o
+arm-none-eabi-ar -r tx.a tx_queue_delete.o tx_queue_flush.o tx_queue_front_send.o tx_queue_info_get.o tx_queue_initialize.o tx_queue_performance_info_get.o
+arm-none-eabi-ar -r tx.a tx_queue_performance_system_info_get.o tx_queue_prioritize.o tx_queue_receive.o tx_queue_send.o tx_queue_send_notify.o tx_semaphore_ceiling_put.o
+arm-none-eabi-ar -r tx.a tx_semaphore_cleanup.o tx_semaphore_create.o tx_semaphore_delete.o tx_semaphore_get.o tx_semaphore_info_get.o tx_semaphore_initialize.o
+arm-none-eabi-ar -r tx.a tx_semaphore_performance_info_get.o tx_semaphore_performance_system_info_get.o tx_semaphore_prioritize.o tx_semaphore_put.o tx_semaphore_put_notify.o
+arm-none-eabi-ar -r tx.a tx_thread_create.o tx_thread_delete.o tx_thread_entry_exit_notify.o tx_thread_identify.o tx_thread_info_get.o tx_thread_initialize.o
+arm-none-eabi-ar -r tx.a tx_thread_performance_info_get.o tx_thread_performance_system_info_get.o tx_thread_preemption_change.o tx_thread_priority_change.o tx_thread_relinquish.o
+arm-none-eabi-ar -r tx.a tx_thread_reset.o tx_thread_resume.o tx_thread_shell_entry.o tx_thread_sleep.o tx_thread_stack_analyze.o tx_thread_stack_error_handler.o
+arm-none-eabi-ar -r tx.a tx_thread_stack_error_notify.o tx_thread_suspend.o tx_thread_system_preempt_check.o tx_thread_system_resume.o tx_thread_system_suspend.o
+arm-none-eabi-ar -r tx.a tx_thread_terminate.o tx_thread_time_slice.o tx_thread_time_slice_change.o tx_thread_timeout.o tx_thread_wait_abort.o tx_time_get.o
+arm-none-eabi-ar -r tx.a tx_time_set.o tx_timer_activate.o tx_timer_change.o tx_timer_create.o tx_timer_deactivate.o tx_timer_delete.o tx_timer_expiration_process.o
+arm-none-eabi-ar -r tx.a tx_timer_info_get.o tx_timer_initialize.o tx_timer_performance_info_get.o tx_timer_performance_system_info_get.o tx_timer_system_activate.o
+arm-none-eabi-ar -r tx.a tx_timer_system_deactivate.o tx_timer_thread_entry.o tx_trace_enable.o tx_trace_disable.o tx_trace_initialize.o tx_trace_interrupt_control.o
+arm-none-eabi-ar -r tx.a tx_trace_isr_enter_insert.o tx_trace_isr_exit_insert.o tx_trace_object_register.o tx_trace_object_unregister.o tx_trace_user_event_insert.o
+arm-none-eabi-ar -r tx.a tx_trace_buffer_full_notify.o tx_trace_event_filter.o tx_trace_event_unfilter.o
+arm-none-eabi-ar -r tx.a txe_block_allocate.o txe_block_pool_create.o txe_block_pool_delete.o txe_block_pool_info_get.o txe_block_pool_prioritize.o txe_block_release.o
+arm-none-eabi-ar -r tx.a txe_byte_allocate.o txe_byte_pool_create.o txe_byte_pool_delete.o txe_byte_pool_info_get.o txe_byte_pool_prioritize.o txe_byte_release.o
+arm-none-eabi-ar -r tx.a txe_event_flags_create.o txe_event_flags_delete.o txe_event_flags_get.o txe_event_flags_info_get.o txe_event_flags_set.o
+arm-none-eabi-ar -r tx.a txe_event_flags_set_notify.o txe_mutex_create.o txe_mutex_delete.o txe_mutex_get.o txe_mutex_info_get.o txe_mutex_prioritize.o
+arm-none-eabi-ar -r tx.a txe_mutex_put.o txe_queue_create.o txe_queue_delete.o txe_queue_flush.o txe_queue_front_send.o txe_queue_info_get.o txe_queue_prioritize.o
+arm-none-eabi-ar -r tx.a txe_queue_receive.o txe_queue_send.o txe_queue_send_notify.o txe_semaphore_ceiling_put.o txe_semaphore_create.o txe_semaphore_delete.o
+arm-none-eabi-ar -r tx.a txe_semaphore_get.o txe_semaphore_info_get.o txe_semaphore_prioritize.o txe_semaphore_put.o txe_semaphore_put_notify.o txe_thread_create.o
+arm-none-eabi-ar -r tx.a txe_thread_delete.o txe_thread_entry_exit_notify.o txe_thread_info_get.o txe_thread_preemption_change.o txe_thread_priority_change.o
+arm-none-eabi-ar -r tx.a txe_thread_relinquish.o txe_thread_reset.o txe_thread_resume.o txe_thread_suspend.o txe_thread_terminate.o txe_thread_time_slice_change.o
+arm-none-eabi-ar -r tx.a txe_thread_wait_abort.o txe_timer_activate.o txe_timer_change.o txe_timer_create.o txe_timer_deactivate.o txe_timer_delete.o txe_timer_info_get.o
diff --git a/ports/cortex_a17/gnu/example_build/build_threadx_sample.bat b/ports/cortex_a17/gnu/example_build/build_threadx_sample.bat
new file mode 100644
index 00000000..ce088835
--- /dev/null
+++ b/ports/cortex_a17/gnu/example_build/build_threadx_sample.bat
@@ -0,0 +1,6 @@
+arm-none-eabi-gcc -c -g -mcpu=cortex-a17 reset.S
+arm-none-eabi-gcc -c -g -mcpu=cortex-a17 crt0.S
+arm-none-eabi-gcc -c -g -mcpu=cortex-a17 tx_initialize_low_level.S
+arm-none-eabi-gcc -c -g -mcpu=cortex-a17 -I../../../../common/inc -I../inc sample_threadx.c
+arm-none-eabi-gcc -g -mcpu=cortex-a17 -T sample_threadx.ld --specs=nosys.specs -o sample_threadx.out -Wl,-Map=sample_threadx.map tx_initialize_low_level.o sample_threadx.o tx.a
+
diff --git a/ports/cortex_a17/gnu/example_build/crt0.S b/ports/cortex_a17/gnu/example_build/crt0.S
new file mode 100644
index 00000000..56b6c958
--- /dev/null
+++ b/ports/cortex_a17/gnu/example_build/crt0.S
@@ -0,0 +1,90 @@
+
+/* .text is used instead of .section .text so it works with arm-aout too. */
+ .text
+ .code 32
+ .align 0
+
+ .global _mainCRTStartup
+ .global _start
+ .global start
+start:
+_start:
+_mainCRTStartup:
+
+/* Start by setting up a stack */
+ /* Set up the stack pointer to a fixed value */
+ ldr r3, .LC0
+ mov sp, r3
+ /* Setup a default stack-limit in case the code has been
+ compiled with "-mapcs-stack-check". Hard-wiring this value
+ is not ideal, since there is currently no support for
+ checking that the heap and stack have not collided, or that
+ this default 64k is enough for the program being executed.
+ However, it ensures that this simple crt0 world will not
+ immediately cause an overflow event: */
+ sub sl, sp, #64 << 10 /* Still assumes 256bytes below sl */
+ mov a2, #0 /* Second arg: fill value */
+ mov fp, a2 /* Null frame pointer */
+ mov r7, a2 /* Null frame pointer for Thumb */
+
+ ldr a1, .LC1 /* First arg: start of memory block */
+ ldr a3, .LC2
+ sub a3, a3, a1 /* Third arg: length of block */
+
+
+
+ bl memset
+ mov r0, #0 /* no arguments */
+ mov r1, #0 /* no argv either */
+#ifdef __USES_INITFINI__
+ /* Some arm/elf targets use the .init and .fini sections
+ to create constructors and destructors, and for these
+ targets we need to call the _init function and arrange
+ for _fini to be called at program exit. */
+ mov r4, r0
+ mov r5, r1
+/* ldr r0, .Lfini */
+ bl atexit
+/* bl init */
+ mov r0, r4
+ mov r1, r5
+#endif
+ bl main
+
+ bl exit /* Should not return. */
+
+
+ /* For Thumb, constants must be after the code since only
+ positive offsets are supported for PC relative addresses. */
+
+ .align 0
+.LC0:
+.LC1:
+ .word __bss_start__
+.LC2:
+ .word __bss_end__
+/*
+#ifdef __USES_INITFINI__
+.Lfini:
+ .word _fini
+#endif */
+ /* Return ... */
+#ifdef __APCS_26__
+ movs pc, lr
+#else
+#ifdef __THUMB_INTERWORK
+ bx lr
+#else
+ mov pc, lr
+#endif
+#endif
+
+
+/* Workspace for Angel calls. */
+ .data
+/* Data returned by monitor SWI. */
+.global __stack_base__
+HeapBase: .word 0
+HeapLimit: .word 0
+__stack_base__: .word 0
+StackLimit: .word 0
diff --git a/ports/cortex_a17/gnu/example_build/reset.S b/ports/cortex_a17/gnu/example_build/reset.S
new file mode 100644
index 00000000..597e9d9a
--- /dev/null
+++ b/ports/cortex_a17/gnu/example_build/reset.S
@@ -0,0 +1,64 @@
+/**************************************************************************/
+/* */
+/* Copyright (c) Microsoft Corporation. All rights reserved. */
+/* */
+/* This software is licensed under the Microsoft Software License */
+/* Terms for Microsoft Azure RTOS. Full text of the license can be */
+/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
+/* and in the root directory of this software. */
+/* */
+/**************************************************************************/
+
+
+/**************************************************************************/
+/**************************************************************************/
+/** */
+/** ThreadX Component */
+/** */
+/** Initialize */
+/** */
+/**************************************************************************/
+/**************************************************************************/
+
+ .arm
+
+ .global _start
+ .global __tx_undefined
+ .global __tx_swi_interrupt
+ .global __tx_prefetch_handler
+ .global __tx_abort_handler
+ .global __tx_reserved_handler
+ .global __tx_irq_handler
+ .global __tx_fiq_handler
+
+/* Define the vector area. This should be located or copied to 0. */
+
+ .text
+ .global __vectors
+__vectors:
+
+ LDR pc, STARTUP // Reset goes to startup function
+ LDR pc, UNDEFINED // Undefined handler
+ LDR pc, SWI // Software interrupt handler
+ LDR pc, PREFETCH // Prefetch exception handler
+ LDR pc, ABORT // Abort exception handler
+ LDR pc, RESERVED // Reserved exception handler
+ LDR pc, IRQ // IRQ interrupt handler
+ LDR pc, FIQ // FIQ interrupt handler
+
+STARTUP:
+ .word _start // Reset goes to C startup function
+UNDEFINED:
+ .word __tx_undefined // Undefined handler
+SWI:
+ .word __tx_swi_interrupt // Software interrupt handler
+PREFETCH:
+ .word __tx_prefetch_handler // Prefetch exception handler
+ABORT:
+ .word __tx_abort_handler // Abort exception handler
+RESERVED:
+ .word __tx_reserved_handler // Reserved exception handler
+IRQ:
+ .word __tx_irq_handler // IRQ interrupt handler
+FIQ:
+ .word __tx_fiq_handler // FIQ interrupt handler
diff --git a/ports/cortex_a17/gnu/example_build/sample_threadx.c b/ports/cortex_a17/gnu/example_build/sample_threadx.c
new file mode 100644
index 00000000..8c61de06
--- /dev/null
+++ b/ports/cortex_a17/gnu/example_build/sample_threadx.c
@@ -0,0 +1,369 @@
+/* This is a small demo of the high-performance ThreadX kernel. It includes examples of eight
+ threads of different priorities, using a message queue, semaphore, mutex, event flags group,
+ byte pool, and block pool. */
+
+#include "tx_api.h"
+
+#define DEMO_STACK_SIZE 1024
+#define DEMO_BYTE_POOL_SIZE 9120
+#define DEMO_BLOCK_POOL_SIZE 100
+#define DEMO_QUEUE_SIZE 100
+
+
+/* Define the ThreadX object control blocks... */
+
+TX_THREAD thread_0;
+TX_THREAD thread_1;
+TX_THREAD thread_2;
+TX_THREAD thread_3;
+TX_THREAD thread_4;
+TX_THREAD thread_5;
+TX_THREAD thread_6;
+TX_THREAD thread_7;
+TX_QUEUE queue_0;
+TX_SEMAPHORE semaphore_0;
+TX_MUTEX mutex_0;
+TX_EVENT_FLAGS_GROUP event_flags_0;
+TX_BYTE_POOL byte_pool_0;
+TX_BLOCK_POOL block_pool_0;
+
+
+/* Define the counters used in the demo application... */
+
+ULONG thread_0_counter;
+ULONG thread_1_counter;
+ULONG thread_1_messages_sent;
+ULONG thread_2_counter;
+ULONG thread_2_messages_received;
+ULONG thread_3_counter;
+ULONG thread_4_counter;
+ULONG thread_5_counter;
+ULONG thread_6_counter;
+ULONG thread_7_counter;
+
+
+/* Define thread prototypes. */
+
+void thread_0_entry(ULONG thread_input);
+void thread_1_entry(ULONG thread_input);
+void thread_2_entry(ULONG thread_input);
+void thread_3_and_4_entry(ULONG thread_input);
+void thread_5_entry(ULONG thread_input);
+void thread_6_and_7_entry(ULONG thread_input);
+
+
+/* Define main entry point. */
+
+int main()
+{
+
+ /* Enter the ThreadX kernel. */
+ tx_kernel_enter();
+}
+
+
+/* Define what the initial system looks like. */
+
+void tx_application_define(void *first_unused_memory)
+{
+
+CHAR *pointer = TX_NULL;
+
+
+ /* Create a byte memory pool from which to allocate the thread stacks. */
+ tx_byte_pool_create(&byte_pool_0, "byte pool 0", first_unused_memory, DEMO_BYTE_POOL_SIZE);
+
+ /* Put system definition stuff in here, e.g. thread creates and other assorted
+ create information. */
+
+ /* Allocate the stack for thread 0. */
+ tx_byte_allocate(&byte_pool_0, (VOID **) &pointer, DEMO_STACK_SIZE, TX_NO_WAIT);
+
+ /* Create the main thread. */
+ tx_thread_create(&thread_0, "thread 0", thread_0_entry, 0,
+ pointer, DEMO_STACK_SIZE,
+ 1, 1, TX_NO_TIME_SLICE, TX_AUTO_START);
+
+
+ /* Allocate the stack for thread 1. */
+ tx_byte_allocate(&byte_pool_0, (VOID **) &pointer, DEMO_STACK_SIZE, TX_NO_WAIT);
+
+ /* Create threads 1 and 2. These threads pass information through a ThreadX
+ message queue. It is also interesting to note that these threads have a time
+ slice. */
+ tx_thread_create(&thread_1, "thread 1", thread_1_entry, 1,
+ pointer, DEMO_STACK_SIZE,
+ 16, 16, 4, TX_AUTO_START);
+
+ /* Allocate the stack for thread 2. */
+ tx_byte_allocate(&byte_pool_0, (VOID **) &pointer, DEMO_STACK_SIZE, TX_NO_WAIT);
+
+ tx_thread_create(&thread_2, "thread 2", thread_2_entry, 2,
+ pointer, DEMO_STACK_SIZE,
+ 16, 16, 4, TX_AUTO_START);
+
+ /* Allocate the stack for thread 3. */
+ tx_byte_allocate(&byte_pool_0, (VOID **) &pointer, DEMO_STACK_SIZE, TX_NO_WAIT);
+
+ /* Create threads 3 and 4. These threads compete for a ThreadX counting semaphore.
+ An interesting thing here is that both threads share the same instruction area. */
+ tx_thread_create(&thread_3, "thread 3", thread_3_and_4_entry, 3,
+ pointer, DEMO_STACK_SIZE,
+ 8, 8, TX_NO_TIME_SLICE, TX_AUTO_START);
+
+ /* Allocate the stack for thread 4. */
+ tx_byte_allocate(&byte_pool_0, (VOID **) &pointer, DEMO_STACK_SIZE, TX_NO_WAIT);
+
+ tx_thread_create(&thread_4, "thread 4", thread_3_and_4_entry, 4,
+ pointer, DEMO_STACK_SIZE,
+ 8, 8, TX_NO_TIME_SLICE, TX_AUTO_START);
+
+ /* Allocate the stack for thread 5. */
+ tx_byte_allocate(&byte_pool_0, (VOID **) &pointer, DEMO_STACK_SIZE, TX_NO_WAIT);
+
+ /* Create thread 5. This thread simply pends on an event flag which will be set
+ by thread_0. */
+ tx_thread_create(&thread_5, "thread 5", thread_5_entry, 5,
+ pointer, DEMO_STACK_SIZE,
+ 4, 4, TX_NO_TIME_SLICE, TX_AUTO_START);
+
+ /* Allocate the stack for thread 6. */
+ tx_byte_allocate(&byte_pool_0, (VOID **) &pointer, DEMO_STACK_SIZE, TX_NO_WAIT);
+
+ /* Create threads 6 and 7. These threads compete for a ThreadX mutex. */
+ tx_thread_create(&thread_6, "thread 6", thread_6_and_7_entry, 6,
+ pointer, DEMO_STACK_SIZE,
+ 8, 8, TX_NO_TIME_SLICE, TX_AUTO_START);
+
+ /* Allocate the stack for thread 7. */
+ tx_byte_allocate(&byte_pool_0, (VOID **) &pointer, DEMO_STACK_SIZE, TX_NO_WAIT);
+
+ tx_thread_create(&thread_7, "thread 7", thread_6_and_7_entry, 7,
+ pointer, DEMO_STACK_SIZE,
+ 8, 8, TX_NO_TIME_SLICE, TX_AUTO_START);
+
+ /* Allocate the message queue. */
+ tx_byte_allocate(&byte_pool_0, (VOID **) &pointer, DEMO_QUEUE_SIZE*sizeof(ULONG), TX_NO_WAIT);
+
+ /* Create the message queue shared by threads 1 and 2. */
+ tx_queue_create(&queue_0, "queue 0", TX_1_ULONG, pointer, DEMO_QUEUE_SIZE*sizeof(ULONG));
+
+ /* Create the semaphore used by threads 3 and 4. */
+ tx_semaphore_create(&semaphore_0, "semaphore 0", 1);
+
+ /* Create the event flags group used by threads 1 and 5. */
+ tx_event_flags_create(&event_flags_0, "event flags 0");
+
+ /* Create the mutex used by thread 6 and 7 without priority inheritance. */
+ tx_mutex_create(&mutex_0, "mutex 0", TX_NO_INHERIT);
+
+ /* Allocate the memory for a small block pool. */
+ tx_byte_allocate(&byte_pool_0, (VOID **) &pointer, DEMO_BLOCK_POOL_SIZE, TX_NO_WAIT);
+
+ /* Create a block memory pool to allocate a message buffer from. */
+ tx_block_pool_create(&block_pool_0, "block pool 0", sizeof(ULONG), pointer, DEMO_BLOCK_POOL_SIZE);
+
+ /* Allocate a block and release the block memory. */
+ tx_block_allocate(&block_pool_0, (VOID **) &pointer, TX_NO_WAIT);
+
+ /* Release the block back to the pool. */
+ tx_block_release(pointer);
+}
+
+
+
+/* Define the test threads. */
+
+void thread_0_entry(ULONG thread_input)
+{
+
+UINT status;
+
+
+ /* This thread simply sits in while-forever-sleep loop. */
+ while(1)
+ {
+
+ /* Increment the thread counter. */
+ thread_0_counter++;
+
+ /* Sleep for 10 ticks. */
+ tx_thread_sleep(10);
+
+ /* Set event flag 0 to wakeup thread 5. */
+ status = tx_event_flags_set(&event_flags_0, 0x1, TX_OR);
+
+ /* Check status. */
+ if (status != TX_SUCCESS)
+ break;
+ }
+}
+
+
+void thread_1_entry(ULONG thread_input)
+{
+
+UINT status;
+
+
+ /* This thread simply sends messages to a queue shared by thread 2. */
+ while(1)
+ {
+
+ /* Increment the thread counter. */
+ thread_1_counter++;
+
+ /* Send message to queue 0. */
+ status = tx_queue_send(&queue_0, &thread_1_messages_sent, TX_WAIT_FOREVER);
+
+ /* Check completion status. */
+ if (status != TX_SUCCESS)
+ break;
+
+ /* Increment the message sent. */
+ thread_1_messages_sent++;
+ }
+}
+
+
+void thread_2_entry(ULONG thread_input)
+{
+
+ULONG received_message;
+UINT status;
+
+ /* This thread retrieves messages placed on the queue by thread 1. */
+ while(1)
+ {
+
+ /* Increment the thread counter. */
+ thread_2_counter++;
+
+ /* Retrieve a message from the queue. */
+ status = tx_queue_receive(&queue_0, &received_message, TX_WAIT_FOREVER);
+
+ /* Check completion status and make sure the message is what we
+ expected. */
+ if ((status != TX_SUCCESS) || (received_message != thread_2_messages_received))
+ break;
+
+ /* Otherwise, all is okay. Increment the received message count. */
+ thread_2_messages_received++;
+ }
+}
+
+
+void thread_3_and_4_entry(ULONG thread_input)
+{
+
+UINT status;
+
+
+ /* This function is executed from thread 3 and thread 4. As the loop
+ below shows, these function compete for ownership of semaphore_0. */
+ while(1)
+ {
+
+ /* Increment the thread counter. */
+ if (thread_input == 3)
+ thread_3_counter++;
+ else
+ thread_4_counter++;
+
+ /* Get the semaphore with suspension. */
+ status = tx_semaphore_get(&semaphore_0, TX_WAIT_FOREVER);
+
+ /* Check status. */
+ if (status != TX_SUCCESS)
+ break;
+
+ /* Sleep for 2 ticks to hold the semaphore. */
+ tx_thread_sleep(2);
+
+ /* Release the semaphore. */
+ status = tx_semaphore_put(&semaphore_0);
+
+ /* Check status. */
+ if (status != TX_SUCCESS)
+ break;
+ }
+}
+
+
+void thread_5_entry(ULONG thread_input)
+{
+
+UINT status;
+ULONG actual_flags;
+
+
+ /* This thread simply waits for an event in a forever loop. */
+ while(1)
+ {
+
+ /* Increment the thread counter. */
+ thread_5_counter++;
+
+ /* Wait for event flag 0. */
+ status = tx_event_flags_get(&event_flags_0, 0x1, TX_OR_CLEAR,
+ &actual_flags, TX_WAIT_FOREVER);
+
+ /* Check status. */
+ if ((status != TX_SUCCESS) || (actual_flags != 0x1))
+ break;
+ }
+}
+
+
+void thread_6_and_7_entry(ULONG thread_input)
+{
+
+UINT status;
+
+
+ /* This function is executed from thread 6 and thread 7. As the loop
+ below shows, these function compete for ownership of mutex_0. */
+ while(1)
+ {
+
+ /* Increment the thread counter. */
+ if (thread_input == 6)
+ thread_6_counter++;
+ else
+ thread_7_counter++;
+
+ /* Get the mutex with suspension. */
+ status = tx_mutex_get(&mutex_0, TX_WAIT_FOREVER);
+
+ /* Check status. */
+ if (status != TX_SUCCESS)
+ break;
+
+ /* Get the mutex again with suspension. This shows
+ that an owning thread may retrieve the mutex it
+ owns multiple times. */
+ status = tx_mutex_get(&mutex_0, TX_WAIT_FOREVER);
+
+ /* Check status. */
+ if (status != TX_SUCCESS)
+ break;
+
+ /* Sleep for 2 ticks to hold the mutex. */
+ tx_thread_sleep(2);
+
+ /* Release the mutex. */
+ status = tx_mutex_put(&mutex_0);
+
+ /* Check status. */
+ if (status != TX_SUCCESS)
+ break;
+
+ /* Release the mutex again. This will actually
+ release ownership since it was obtained twice. */
+ status = tx_mutex_put(&mutex_0);
+
+ /* Check status. */
+ if (status != TX_SUCCESS)
+ break;
+ }
+}
diff --git a/ports/cortex_a17/gnu/example_build/sample_threadx.ld b/ports/cortex_a17/gnu/example_build/sample_threadx.ld
new file mode 100644
index 00000000..3dea4e1c
--- /dev/null
+++ b/ports/cortex_a17/gnu/example_build/sample_threadx.ld
@@ -0,0 +1,239 @@
+OUTPUT_FORMAT("elf32-littlearm", "elf32-bigarm",
+ "elf32-littlearm")
+OUTPUT_ARCH(arm)
+/* ENTRY(_start) */
+/* Do we need any of these for elf?
+ __DYNAMIC = 0; */
+SECTIONS
+{
+ . = 0x00000000;
+
+ .vectors : {reset.o(.text) }
+
+ /* Read-only sections, merged into text segment: */
+ . = 0x00001000;
+ .interp : { *(.interp) }
+ .hash : { *(.hash) }
+ .dynsym : { *(.dynsym) }
+ .dynstr : { *(.dynstr) }
+ .gnu.version : { *(.gnu.version) }
+ .gnu.version_d : { *(.gnu.version_d) }
+ .gnu.version_r : { *(.gnu.version_r) }
+ .rel.init : { *(.rel.init) }
+ .rela.init : { *(.rela.init) }
+ .rel.text :
+ {
+ *(.rel.text)
+ *(.rel.text.*)
+ *(.rel.gnu.linkonce.t*)
+ }
+ .rela.text :
+ {
+ *(.rela.text)
+ *(.rela.text.*)
+ *(.rela.gnu.linkonce.t*)
+ }
+ .rel.fini : { *(.rel.fini) }
+ .rela.fini : { *(.rela.fini) }
+ .rel.rodata :
+ {
+ *(.rel.rodata)
+ *(.rel.rodata.*)
+ *(.rel.gnu.linkonce.r*)
+ }
+ .rela.rodata :
+ {
+ *(.rela.rodata)
+ *(.rela.rodata.*)
+ *(.rela.gnu.linkonce.r*)
+ }
+ .rel.data :
+ {
+ *(.rel.data)
+ *(.rel.data.*)
+ *(.rel.gnu.linkonce.d*)
+ }
+ .rela.data :
+ {
+ *(.rela.data)
+ *(.rela.data.*)
+ *(.rela.gnu.linkonce.d*)
+ }
+ .rel.ctors : { *(.rel.ctors) }
+ .rela.ctors : { *(.rela.ctors) }
+ .rel.dtors : { *(.rel.dtors) }
+ .rela.dtors : { *(.rela.dtors) }
+ .rel.got : { *(.rel.got) }
+ .rela.got : { *(.rela.got) }
+ .rel.sdata :
+ {
+ *(.rel.sdata)
+ *(.rel.sdata.*)
+ *(.rel.gnu.linkonce.s*)
+ }
+ .rela.sdata :
+ {
+ *(.rela.sdata)
+ *(.rela.sdata.*)
+ *(.rela.gnu.linkonce.s*)
+ }
+ .rel.sbss : { *(.rel.sbss) }
+ .rela.sbss : { *(.rela.sbss) }
+ .rel.bss : { *(.rel.bss) }
+ .rela.bss : { *(.rela.bss) }
+ .rel.plt : { *(.rel.plt) }
+ .rela.plt : { *(.rela.plt) }
+ .plt : { *(.plt) }
+ .text :
+ {
+ *(.text)
+ *(.text.*)
+ *(.stub)
+ /* .gnu.warning sections are handled specially by elf32.em. */
+ *(.gnu.warning)
+ *(.gnu.linkonce.t*)
+ *(.glue_7t) *(.glue_7)
+ } =0
+ .init :
+ {
+ KEEP (*(.init))
+ } =0
+ _etext = .;
+ PROVIDE (etext = .);
+ .fini :
+ {
+ KEEP (*(.fini))
+ } =0
+ .rodata : { *(.rodata) *(.rodata.*) *(.gnu.linkonce.r*) }
+ .rodata1 : { *(.rodata1) }
+ .eh_frame_hdr : { *(.eh_frame_hdr) }
+ /* Adjust the address for the data segment. We want to adjust up to
+ the same address within the page on the next page up. */
+ . = ALIGN(256) + (. & (256 - 1));
+ .data :
+ {
+ *(.data)
+ *(.data.*)
+ *(.gnu.linkonce.d*)
+ SORT(CONSTRUCTORS)
+ }
+ .data1 : { *(.data1) }
+ .eh_frame : { KEEP (*(.eh_frame)) }
+ .gcc_except_table : { *(.gcc_except_table) }
+ .ctors :
+ {
+ /* gcc uses crtbegin.o to find the start of
+ the constructors, so we make sure it is
+ first. Because this is a wildcard, it
+ doesn't matter if the user does not
+ actually link against crtbegin.o; the
+ linker won't look for a file to match a
+ wildcard. The wildcard also means that it
+ doesn't matter which directory crtbegin.o
+ is in. */
+ KEEP (*crtbegin.o(.ctors))
+ /* We don't want to include the .ctor section from
+ from the crtend.o file until after the sorted ctors.
+ The .ctor section from the crtend file contains the
+ end of ctors marker and it must be last */
+ KEEP (*(EXCLUDE_FILE (*crtend.o ) .ctors))
+ KEEP (*(SORT(.ctors.*)))
+ KEEP (*(.ctors))
+ }
+ .dtors :
+ {
+ KEEP (*crtbegin.o(.dtors))
+ KEEP (*(EXCLUDE_FILE (*crtend.o ) .dtors))
+ KEEP (*(SORT(.dtors.*)))
+ KEEP (*(.dtors))
+ }
+ .jcr : { KEEP (*(.jcr)) }
+ .got : { *(.got.plt) *(.got) }
+ .dynamic : { *(.dynamic) }
+ /* We want the small data sections together, so single-instruction offsets
+ can access them all, and initialized data all before uninitialized, so
+ we can shorten the on-disk segment size. */
+ .sdata :
+ {
+ *(.sdata)
+ *(.sdata.*)
+ *(.gnu.linkonce.s.*)
+ }
+ _edata = .;
+ PROVIDE (edata = .);
+ __bss_start = .;
+ __bss_start__ = .;
+ .sbss :
+ {
+ *(.dynsbss)
+ *(.sbss)
+ *(.sbss.*)
+ *(.scommon)
+ }
+ .bss :
+ {
+ *(.dynbss)
+ *(.bss)
+ *(.bss.*)
+ *(COMMON)
+ /* Align here to ensure that the .bss section occupies space up to
+ _end. Align after .bss to ensure correct alignment even if the
+ .bss section disappears because there are no input sections. */
+ . = ALIGN(32 / 8);
+ }
+ . = ALIGN(32 / 8);
+
+ _bss_end__ = . ; __bss_end__ = . ;
+ PROVIDE (end = .);
+
+ .stack :
+ {
+
+ _stack_bottom = ABSOLUTE(.) ;
+
+ /* Allocate room for stack. This must be big enough for the IRQ, FIQ, and
+ SYS stack if nested interrupts are enabled. */
+ . = ALIGN(8) ;
+ . += 4096 ;
+ _sp = . - 16 ;
+ _stack_top = ABSOLUTE(.) ;
+ }
+
+ _end = .; __end__ = . ;
+
+ /* Stabs debugging sections. */
+ .stab 0 : { *(.stab) }
+ .stabstr 0 : { *(.stabstr) }
+ .stab.excl 0 : { *(.stab.excl) }
+ .stab.exclstr 0 : { *(.stab.exclstr) }
+ .stab.index 0 : { *(.stab.index) }
+ .stab.indexstr 0 : { *(.stab.indexstr) }
+ .comment 0 : { *(.comment) }
+ /* DWARF debug sections.
+ Symbols in the DWARF debugging sections are relative to the beginning
+ of the section so we begin them at 0. */
+ /* DWARF 1 */
+ .debug 0 : { *(.debug) }
+ .line 0 : { *(.line) }
+ /* GNU DWARF 1 extensions */
+ .debug_srcinfo 0 : { *(.debug_srcinfo) }
+ .debug_sfnames 0 : { *(.debug_sfnames) }
+ /* DWARF 1.1 and DWARF 2 */
+ .debug_aranges 0 : { *(.debug_aranges) }
+ .debug_pubnames 0 : { *(.debug_pubnames) }
+ /* DWARF 2 */
+ .debug_info 0 : { *(.debug_info) }
+ .debug_abbrev 0 : { *(.debug_abbrev) }
+ .debug_line 0 : { *(.debug_line) }
+ .debug_frame 0 : { *(.debug_frame) }
+ .debug_str 0 : { *(.debug_str) }
+ .debug_loc 0 : { *(.debug_loc) }
+ .debug_macinfo 0 : { *(.debug_macinfo) }
+ /* SGI/MIPS DWARF 2 extensions */
+ .debug_weaknames 0 : { *(.debug_weaknames) }
+ .debug_funcnames 0 : { *(.debug_funcnames) }
+ .debug_typenames 0 : { *(.debug_typenames) }
+ .debug_varnames 0 : { *(.debug_varnames) }
+
+ /* These must appear regardless of . */
+}
diff --git a/ports/cortex_a17/gnu/example_build/tx_initialize_low_level.S b/ports/cortex_a17/gnu/example_build/tx_initialize_low_level.S
new file mode 100644
index 00000000..7de5d3ce
--- /dev/null
+++ b/ports/cortex_a17/gnu/example_build/tx_initialize_low_level.S
@@ -0,0 +1,305 @@
+/**************************************************************************/
+/* */
+/* Copyright (c) Microsoft Corporation. All rights reserved. */
+/* */
+/* This software is licensed under the Microsoft Software License */
+/* Terms for Microsoft Azure RTOS. Full text of the license can be */
+/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
+/* and in the root directory of this software. */
+/* */
+/**************************************************************************/
+
+
+/**************************************************************************/
+/**************************************************************************/
+/** */
+/** ThreadX Component */
+/** */
+/** Initialize */
+/** */
+/**************************************************************************/
+/**************************************************************************/
+
+ .arm
+
+SVC_MODE = 0xD3 // Disable IRQ/FIQ SVC mode
+IRQ_MODE = 0xD2 // Disable IRQ/FIQ IRQ mode
+FIQ_MODE = 0xD1 // Disable IRQ/FIQ FIQ mode
+SYS_MODE = 0xDF // Disable IRQ/FIQ SYS mode
+FIQ_STACK_SIZE = 512 // FIQ stack size
+IRQ_STACK_SIZE = 1024 // IRQ stack size
+SYS_STACK_SIZE = 1024 // System stack size
+
+ .global _tx_thread_system_stack_ptr
+ .global _tx_initialize_unused_memory
+ .global _tx_thread_context_save
+ .global _tx_thread_context_restore
+ .global _tx_timer_interrupt
+ .global _end
+ .global _sp
+ .global _stack_bottom
+
+
+/* Define the 16-bit Thumb mode veneer for _tx_initialize_low_level for
+ applications calling this function from to 16-bit Thumb mode. */
+
+ .text
+ .align 2
+ .thumb
+ .global $_tx_initialize_low_level
+ .type $_tx_initialize_low_level,function
+$_tx_initialize_low_level:
+ BX pc // Switch to 32-bit mode
+ NOP //
+ .arm
+ STMFD sp!, {lr} // Save return address
+ BL _tx_initialize_low_level // Call _tx_initialize_low_level function
+ LDMFD sp!, {lr} // Recover saved return address
+ BX lr // Return to 16-bit caller
+
+ .text
+ .align 2
+/**************************************************************************/
+/* */
+/* FUNCTION RELEASE */
+/* */
+/* _tx_initialize_low_level ARMv7-A */
+/* 6.1.11 */
+/* AUTHOR */
+/* */
+/* William E. Lamie, Microsoft Corporation */
+/* */
+/* DESCRIPTION */
+/* */
+/* This function is responsible for any low-level processor */
+/* initialization, including setting up interrupt vectors, setting */
+/* up a periodic timer interrupt source, saving the system stack */
+/* pointer for use in ISR processing later, and finding the first */
+/* available RAM memory address for tx_application_define. */
+/* */
+/* INPUT */
+/* */
+/* None */
+/* */
+/* OUTPUT */
+/* */
+/* None */
+/* */
+/* CALLS */
+/* */
+/* None */
+/* */
+/* CALLED BY */
+/* */
+/* _tx_initialize_kernel_enter ThreadX entry function */
+/* */
+/* RELEASE HISTORY */
+/* */
+/* DATE NAME DESCRIPTION */
+/* */
+/* 09-30-2020 William E. Lamie Initial Version 6.1 */
+/* 04-25-2022 Zhen Kong Updated comments, */
+/* resulting in version 6.1.11 */
+/* */
+/**************************************************************************/
+ .global _tx_initialize_low_level
+ .type _tx_initialize_low_level,function
+_tx_initialize_low_level:
+
+ /* We must be in SVC mode at this point! */
+
+ /* Setup various stack pointers. */
+
+ LDR r1, =_sp // Get pointer to stack area
+
+#ifdef TX_ENABLE_IRQ_NESTING
+
+ /* Setup the system mode stack for nested interrupt support */
+
+ LDR r2, =SYS_STACK_SIZE // Pickup stack size
+ MOV r3, #SYS_MODE // Build SYS mode CPSR
+ MSR CPSR_c, r3 // Enter SYS mode
+ SUB r1, r1, #1 // Backup 1 byte
+ BIC r1, r1, #7 // Ensure 8-byte alignment
+ MOV sp, r1 // Setup SYS stack pointer
+ SUB r1, r1, r2 // Calculate start of next stack
+#endif
+
+ LDR r2, =FIQ_STACK_SIZE // Pickup stack size
+ MOV r0, #FIQ_MODE // Build FIQ mode CPSR
+ MSR CPSR, r0 // Enter FIQ mode
+ SUB r1, r1, #1 // Backup 1 byte
+ BIC r1, r1, #7 // Ensure 8-byte alignment
+ MOV sp, r1 // Setup FIQ stack pointer
+ SUB r1, r1, r2 // Calculate start of next stack
+ LDR r2, =IRQ_STACK_SIZE // Pickup IRQ stack size
+ MOV r0, #IRQ_MODE // Build IRQ mode CPSR
+ MSR CPSR, r0 // Enter IRQ mode
+ SUB r1, r1, #1 // Backup 1 byte
+ BIC r1, r1, #7 // Ensure 8-byte alignment
+ MOV sp, r1 // Setup IRQ stack pointer
+ SUB r3, r1, r2 // Calculate end of IRQ stack
+ MOV r0, #SVC_MODE // Build SVC mode CPSR
+ MSR CPSR, r0 // Enter SVC mode
+ LDR r2, =_stack_bottom // Pickup stack bottom
+ CMP r3, r2 // Compare the current stack end with the bottom
+_stack_error_loop:
+ BLT _stack_error_loop // If the IRQ stack exceeds the stack bottom, just sit here!
+
+ LDR r2, =_tx_thread_system_stack_ptr // Pickup stack pointer
+ STR r1, [r2] // Save the system stack
+
+ LDR r1, =_end // Get end of non-initialized RAM area
+ LDR r2, =_tx_initialize_unused_memory // Pickup unused memory ptr address
+ ADD r1, r1, #8 // Increment to next free word
+ STR r1, [r2] // Save first free memory address
+
+#ifdef __THUMB_INTERWORK
+ BX lr // Return to caller
+#else
+ MOV pc, lr // Return to caller
+#endif
+
+/* Define shells for each of the interrupt vectors. */
+
+ .global __tx_undefined
+__tx_undefined:
+ B __tx_undefined // Undefined handler
+
+ .global __tx_swi_interrupt
+__tx_swi_interrupt:
+ B __tx_swi_interrupt // Software interrupt handler
+
+ .global __tx_prefetch_handler
+__tx_prefetch_handler:
+ B __tx_prefetch_handler // Prefetch exception handler
+
+ .global __tx_abort_handler
+__tx_abort_handler:
+ B __tx_abort_handler // Abort exception handler
+
+ .global __tx_reserved_handler
+__tx_reserved_handler:
+ B __tx_reserved_handler // Reserved exception handler
+
+ .global __tx_irq_handler
+ .global __tx_irq_processing_return
+__tx_irq_handler:
+
+ /* Jump to context save to save system context. */
+ B _tx_thread_context_save
+__tx_irq_processing_return:
+//
+ /* At this point execution is still in the IRQ mode. The CPSR, point of
+ interrupt, and all C scratch registers are available for use. In
+ addition, IRQ interrupts may be re-enabled - with certain restrictions -
+ if nested IRQ interrupts are desired. Interrupts may be re-enabled over
+ small code sequences where lr is saved before enabling interrupts and
+ restored after interrupts are again disabled. */
+
+ /* Interrupt nesting is allowed after calling _tx_thread_irq_nesting_start
+ from IRQ mode with interrupts disabled. This routine switches to the
+ system mode and returns with IRQ interrupts enabled.
+
+ NOTE: It is very important to ensure all IRQ interrupts are cleared
+ prior to enabling nested IRQ interrupts. */
+#ifdef TX_ENABLE_IRQ_NESTING
+ BL _tx_thread_irq_nesting_start
+#endif
+
+ /* For debug purpose, execute the timer interrupt processing here. In
+ a real system, some kind of status indication would have to be checked
+ before the timer interrupt handler could be called. */
+
+ BL _tx_timer_interrupt // Timer interrupt handler
+
+
+ /* If interrupt nesting was started earlier, the end of interrupt nesting
+ service must be called before returning to _tx_thread_context_restore.
+ This routine returns in processing in IRQ mode with interrupts disabled. */
+#ifdef TX_ENABLE_IRQ_NESTING
+ BL _tx_thread_irq_nesting_end
+#endif
+
+ /* Jump to context restore to restore system context. */
+ B _tx_thread_context_restore
+
+
+ /* This is an example of a vectored IRQ handler. */
+
+
+
+ /* Save initial context and call context save to prepare for
+ vectored ISR execution. */
+
+ /* At this point execution is still in the IRQ mode. The CPSR, point of
+ interrupt, and all C scratch registers are available for use. In
+ addition, IRQ interrupts may be re-enabled - with certain restrictions -
+ if nested IRQ interrupts are desired. Interrupts may be re-enabled over
+ small code sequences where lr is saved before enabling interrupts and
+ restored after interrupts are again disabled. */
+
+
+ /* Interrupt nesting is allowed after calling _tx_thread_irq_nesting_start
+ from IRQ mode with interrupts disabled. This routine switches to the
+ system mode and returns with IRQ interrupts enabled.
+
+ NOTE: It is very important to ensure all IRQ interrupts are cleared
+ prior to enabling nested IRQ interrupts. */
+
+ /* Application IRQ handlers can be called here! */
+
+ /* If interrupt nesting was started earlier, the end of interrupt nesting
+ service must be called before returning to _tx_thread_context_restore.
+ This routine returns in processing in IRQ mode with interrupts disabled. */
+
+
+
+#ifdef TX_ENABLE_FIQ_SUPPORT
+ .global __tx_fiq_handler
+ .global __tx_fiq_processing_return
+__tx_fiq_handler:
+
+ /* Jump to fiq context save to save system context. */
+ B _tx_thread_fiq_context_save
+__tx_fiq_processing_return:
+
+ /* At this point execution is still in the FIQ mode. The CPSR, point of
+ interrupt, and all C scratch registers are available for use. */
+
+ /* Interrupt nesting is allowed after calling _tx_thread_fiq_nesting_start
+ from FIQ mode with interrupts disabled. This routine switches to the
+ system mode and returns with FIQ interrupts enabled.
+
+ NOTE: It is very important to ensure all FIQ interrupts are cleared
+ prior to enabling nested FIQ interrupts. */
+#ifdef TX_ENABLE_FIQ_NESTING
+ BL _tx_thread_fiq_nesting_start
+#endif
+
+ /* Application FIQ handlers can be called here! */
+
+ /* If interrupt nesting was started earlier, the end of interrupt nesting
+ service must be called before returning to _tx_thread_fiq_context_restore. */
+#ifdef TX_ENABLE_FIQ_NESTING
+ BL _tx_thread_fiq_nesting_end
+#endif
+
+ /* Jump to fiq context restore to restore system context. */
+ B _tx_thread_fiq_context_restore
+
+
+#else
+ .global __tx_fiq_handler
+__tx_fiq_handler:
+ B __tx_fiq_handler // FIQ interrupt handler
+#endif
+
+
+BUILD_OPTIONS:
+ .word _tx_build_options // Reference to bring in
+VERSION_ID:
+ .word _tx_version_id // Reference to bring in
+
+
+
diff --git a/ports/cortex_a17/gnu/inc/tx_port.h b/ports/cortex_a17/gnu/inc/tx_port.h
new file mode 100644
index 00000000..19463de1
--- /dev/null
+++ b/ports/cortex_a17/gnu/inc/tx_port.h
@@ -0,0 +1,328 @@
+/**************************************************************************/
+/* */
+/* Copyright (c) Microsoft Corporation. All rights reserved. */
+/* */
+/* This software is licensed under the Microsoft Software License */
+/* Terms for Microsoft Azure RTOS. Full text of the license can be */
+/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
+/* and in the root directory of this software. */
+/* */
+/**************************************************************************/
+
+
+/**************************************************************************/
+/**************************************************************************/
+/** */
+/** ThreadX Component */
+/** */
+/** Port Specific */
+/** */
+/**************************************************************************/
+/**************************************************************************/
+
+
+/**************************************************************************/
+/* */
+/* PORT SPECIFIC C INFORMATION RELEASE */
+/* */
+/* tx_port.h ARMv7-A */
+/* 6.1.11 */
+/* */
+/* AUTHOR */
+/* */
+/* William E. Lamie, Microsoft Corporation */
+/* */
+/* DESCRIPTION */
+/* */
+/* This file contains data type definitions that make the ThreadX */
+/* real-time kernel function identically on a variety of different */
+/* processor architectures. For example, the size or number of bits */
+/* in an "int" data type vary between microprocessor architectures and */
+/* even C compilers for the same microprocessor. ThreadX does not */
+/* directly use native C data types. Instead, ThreadX creates its */
+/* own special types that can be mapped to actual data types by this */
+/* file to guarantee consistency in the interface and functionality. */
+/* */
+/* RELEASE HISTORY */
+/* */
+/* DATE NAME DESCRIPTION */
+/* */
+/* 09-30-2020 William E. Lamie Initial Version 6.1 */
+/* 04-02-2021 Bhupendra Naphade Modified comment(s),updated */
+/* macro definition, */
+/* resulting in version 6.1.6 */
+/* 04-25-2022 Zhen Kong Updated comments, */
+/* resulting in version 6.1.11 */
+/* */
+/**************************************************************************/
+
+#ifndef TX_PORT_H
+#define TX_PORT_H
+
+
+/* Determine if the optional ThreadX user define file should be used. */
+
+#ifdef TX_INCLUDE_USER_DEFINE_FILE
+
+
+/* Yes, include the user defines in tx_user.h. The defines in this file may
+ alternately be defined on the command line. */
+
+#include "tx_user.h"
+#endif
+
+
+/* Define compiler library include files. */
+
+#include
+#include
+
+
+/* Define ThreadX basic types for this port. */
+
+#define VOID void
+typedef char CHAR;
+typedef unsigned char UCHAR;
+typedef int INT;
+typedef unsigned int UINT;
+typedef long LONG;
+typedef unsigned long ULONG;
+typedef short SHORT;
+typedef unsigned short USHORT;
+
+
+/* Define the priority levels for ThreadX. Legal values range
+ from 32 to 1024 and MUST be evenly divisible by 32. */
+
+#ifndef TX_MAX_PRIORITIES
+#define TX_MAX_PRIORITIES 32
+#endif
+
+
+/* Define the minimum stack for a ThreadX thread on this processor. If the size supplied during
+ thread creation is less than this value, the thread create call will return an error. */
+
+#ifndef TX_MINIMUM_STACK
+#define TX_MINIMUM_STACK 200 /* Minimum stack size for this port */
+#endif
+
+
+/* Define the system timer thread's default stack size and priority. These are only applicable
+ if TX_TIMER_PROCESS_IN_ISR is not defined. */
+
+#ifndef TX_TIMER_THREAD_STACK_SIZE
+#define TX_TIMER_THREAD_STACK_SIZE 1024 /* Default timer thread stack size */
+#endif
+
+#ifndef TX_TIMER_THREAD_PRIORITY
+#define TX_TIMER_THREAD_PRIORITY 0 /* Default timer thread priority */
+#endif
+
+
+/* Define various constants for the ThreadX ARM port. */
+
+#ifdef TX_ENABLE_FIQ_SUPPORT
+#define TX_INT_DISABLE 0xC0 /* Disable IRQ & FIQ interrupts */
+#else
+#define TX_INT_DISABLE 0x80 /* Disable IRQ interrupts */
+#endif
+#define TX_INT_ENABLE 0x00 /* Enable IRQ interrupts */
+
+
+/* Define the clock source for trace event entry time stamp. The following two item are port specific.
+ For example, if the time source is at the address 0x0a800024 and is 16-bits in size, the clock
+ source constants would be:
+
+#define TX_TRACE_TIME_SOURCE *((ULONG *) 0x0a800024)
+#define TX_TRACE_TIME_MASK 0x0000FFFFUL
+
+*/
+
+#ifndef TX_TRACE_TIME_SOURCE
+#define TX_TRACE_TIME_SOURCE ++_tx_trace_simulated_time
+#endif
+#ifndef TX_TRACE_TIME_MASK
+#define TX_TRACE_TIME_MASK 0xFFFFFFFFUL
+#endif
+
+
+/* Define the port specific options for the _tx_build_options variable. This variable indicates
+ how the ThreadX library was built. */
+
+#ifdef TX_ENABLE_FIQ_SUPPORT
+#define TX_FIQ_ENABLED 1
+#else
+#define TX_FIQ_ENABLED 0
+#endif
+
+#ifdef TX_ENABLE_IRQ_NESTING
+#define TX_IRQ_NESTING_ENABLED 2
+#else
+#define TX_IRQ_NESTING_ENABLED 0
+#endif
+
+#ifdef TX_ENABLE_FIQ_NESTING
+#define TX_FIQ_NESTING_ENABLED 4
+#else
+#define TX_FIQ_NESTING_ENABLED 0
+#endif
+
+#define TX_PORT_SPECIFIC_BUILD_OPTIONS TX_FIQ_ENABLED | TX_IRQ_NESTING_ENABLED | TX_FIQ_NESTING_ENABLED
+
+
+/* Define the in-line initialization constant so that modules with in-line
+ initialization capabilities can prevent their initialization from being
+ a function call. */
+
+#define TX_INLINE_INITIALIZATION
+
+
+/* Determine whether or not stack checking is enabled. By default, ThreadX stack checking is
+ disabled. When the following is defined, ThreadX thread stack checking is enabled. If stack
+ checking is enabled (TX_ENABLE_STACK_CHECKING is defined), the TX_DISABLE_STACK_FILLING
+ define is negated, thereby forcing the stack fill which is necessary for the stack checking
+ logic. */
+
+#ifdef TX_ENABLE_STACK_CHECKING
+#undef TX_DISABLE_STACK_FILLING
+#endif
+
+
+/* Define the TX_THREAD control block extensions for this port. The main reason
+ for the multiple macros is so that backward compatibility can be maintained with
+ existing ThreadX kernel awareness modules. */
+
+#define TX_THREAD_EXTENSION_0
+#define TX_THREAD_EXTENSION_1
+#define TX_THREAD_EXTENSION_2 ULONG tx_thread_vfp_enable;
+#define TX_THREAD_EXTENSION_3
+
+
+/* Define the port extensions of the remaining ThreadX objects. */
+
+#define TX_BLOCK_POOL_EXTENSION
+#define TX_BYTE_POOL_EXTENSION
+#define TX_EVENT_FLAGS_GROUP_EXTENSION
+#define TX_MUTEX_EXTENSION
+#define TX_QUEUE_EXTENSION
+#define TX_SEMAPHORE_EXTENSION
+#define TX_TIMER_EXTENSION
+
+
+/* Define the user extension field of the thread control block. Nothing
+ additional is needed for this port so it is defined as white space. */
+
+#ifndef TX_THREAD_USER_EXTENSION
+#define TX_THREAD_USER_EXTENSION
+#endif
+
+
+/* Define the macros for processing extensions in tx_thread_create, tx_thread_delete,
+ tx_thread_shell_entry, and tx_thread_terminate. */
+
+
+#define TX_THREAD_CREATE_EXTENSION(thread_ptr)
+#define TX_THREAD_DELETE_EXTENSION(thread_ptr)
+#define TX_THREAD_COMPLETED_EXTENSION(thread_ptr)
+#define TX_THREAD_TERMINATED_EXTENSION(thread_ptr)
+
+
+/* Define the ThreadX object creation extensions for the remaining objects. */
+
+#define TX_BLOCK_POOL_CREATE_EXTENSION(pool_ptr)
+#define TX_BYTE_POOL_CREATE_EXTENSION(pool_ptr)
+#define TX_EVENT_FLAGS_GROUP_CREATE_EXTENSION(group_ptr)
+#define TX_MUTEX_CREATE_EXTENSION(mutex_ptr)
+#define TX_QUEUE_CREATE_EXTENSION(queue_ptr)
+#define TX_SEMAPHORE_CREATE_EXTENSION(semaphore_ptr)
+#define TX_TIMER_CREATE_EXTENSION(timer_ptr)
+
+
+/* Define the ThreadX object deletion extensions for the remaining objects. */
+
+#define TX_BLOCK_POOL_DELETE_EXTENSION(pool_ptr)
+#define TX_BYTE_POOL_DELETE_EXTENSION(pool_ptr)
+#define TX_EVENT_FLAGS_GROUP_DELETE_EXTENSION(group_ptr)
+#define TX_MUTEX_DELETE_EXTENSION(mutex_ptr)
+#define TX_QUEUE_DELETE_EXTENSION(queue_ptr)
+#define TX_SEMAPHORE_DELETE_EXTENSION(semaphore_ptr)
+#define TX_TIMER_DELETE_EXTENSION(timer_ptr)
+
+
+/* Determine if the ARM architecture has the CLZ instruction. This is available on
+ architectures v5 and above. If available, redefine the macro for calculating the
+ lowest bit set. */
+
+#if __TARGET_ARCH_ARM > 4
+
+#ifndef __thumb__
+
+#define TX_LOWEST_SET_BIT_CALCULATE(m, b) m = m & ((ULONG) (-((LONG) m))); \
+ asm volatile (" CLZ %0,%1 ": "=r" (b) : "r" (m) ); \
+ b = 31 - b;
+#endif
+#endif
+
+
+/* Define ThreadX interrupt lockout and restore macros for protection on
+ access of critical kernel information. The restore interrupt macro must
+ restore the interrupt posture of the running thread prior to the value
+ present prior to the disable macro. In most cases, the save area macro
+ is used to define a local function save area for the disable and restore
+ macros. */
+
+#ifdef __thumb__
+
+unsigned int _tx_thread_interrupt_disable(void);
+unsigned int _tx_thread_interrupt_restore(UINT old_posture);
+
+
+#define TX_INTERRUPT_SAVE_AREA UINT interrupt_save;
+
+#define TX_DISABLE interrupt_save = _tx_thread_interrupt_disable();
+#define TX_RESTORE _tx_thread_interrupt_restore(interrupt_save);
+
+#else
+
+#define TX_INTERRUPT_SAVE_AREA UINT interrupt_save, tx_temp;
+
+#ifdef TX_ENABLE_FIQ_SUPPORT
+#define TX_DISABLE asm volatile (" MRS %0,CPSR; CPSID if ": "=r" (interrupt_save) );
+#else
+#define TX_DISABLE asm volatile (" MRS %0,CPSR; CPSID i ": "=r" (interrupt_save) );
+#endif
+
+#define TX_RESTORE asm volatile (" MSR CPSR_c,%0 "::"r" (interrupt_save) );
+
+#endif
+
+
+/* Define VFP extension for the ARMv7-A. Each is assumed to be called in the context of the executing
+ thread. */
+
+void tx_thread_vfp_enable(void);
+void tx_thread_vfp_disable(void);
+
+
+/* Define the interrupt lockout macros for each ThreadX object. */
+
+#define TX_BLOCK_POOL_DISABLE TX_DISABLE
+#define TX_BYTE_POOL_DISABLE TX_DISABLE
+#define TX_EVENT_FLAGS_GROUP_DISABLE TX_DISABLE
+#define TX_MUTEX_DISABLE TX_DISABLE
+#define TX_QUEUE_DISABLE TX_DISABLE
+#define TX_SEMAPHORE_DISABLE TX_DISABLE
+
+
+/* Define the version ID of ThreadX. This may be utilized by the application. */
+
+#ifdef TX_THREAD_INIT
+CHAR _tx_version_id[] =
+ "Copyright (c) Microsoft Corporation. All rights reserved. * ThreadX ARMv7-A Version 6.1.11 *";
+#else
+extern CHAR _tx_version_id[];
+#endif
+
+
+#endif
+
diff --git a/ports/cortex_a17/gnu/src/tx_thread_context_restore.S b/ports/cortex_a17/gnu/src/tx_thread_context_restore.S
new file mode 100644
index 00000000..fae7e72d
--- /dev/null
+++ b/ports/cortex_a17/gnu/src/tx_thread_context_restore.S
@@ -0,0 +1,222 @@
+/**************************************************************************/
+/* */
+/* Copyright (c) Microsoft Corporation. All rights reserved. */
+/* */
+/* This software is licensed under the Microsoft Software License */
+/* Terms for Microsoft Azure RTOS. Full text of the license can be */
+/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
+/* and in the root directory of this software. */
+/* */
+/**************************************************************************/
+
+
+/**************************************************************************/
+/**************************************************************************/
+/** */
+/** ThreadX Component */
+/** */
+/** Thread */
+/** */
+/**************************************************************************/
+/**************************************************************************/
+
+ .arm
+
+#ifdef TX_ENABLE_FIQ_SUPPORT
+SVC_MODE = 0xD3 // Disable IRQ/FIQ, SVC mode
+IRQ_MODE = 0xD2 // Disable IRQ/FIQ, IRQ mode
+#else
+SVC_MODE = 0x93 // Disable IRQ, SVC mode
+IRQ_MODE = 0x92 // Disable IRQ, IRQ mode
+#endif
+
+ .global _tx_thread_system_state
+ .global _tx_thread_current_ptr
+ .global _tx_thread_execute_ptr
+ .global _tx_timer_time_slice
+ .global _tx_thread_schedule
+ .global _tx_thread_preempt_disable
+
+
+
+/* No 16-bit Thumb mode veneer code is needed for _tx_thread_context_restore
+ since it will never be called 16-bit mode. */
+
+ .arm
+ .text
+ .align 2
+/**************************************************************************/
+/* */
+/* FUNCTION RELEASE */
+/* */
+/* _tx_thread_context_restore ARMv7-A */
+/* 6.1.11 */
+/* AUTHOR */
+/* */
+/* William E. Lamie, Microsoft Corporation */
+/* */
+/* DESCRIPTION */
+/* */
+/* This function restores the interrupt context if it is processing a */
+/* nested interrupt. If not, it returns to the interrupt thread if no */
+/* preemption is necessary. Otherwise, if preemption is necessary or */
+/* if no thread was running, the function returns to the scheduler. */
+/* */
+/* INPUT */
+/* */
+/* None */
+/* */
+/* OUTPUT */
+/* */
+/* None */
+/* */
+/* CALLS */
+/* */
+/* _tx_thread_schedule Thread scheduling routine */
+/* */
+/* CALLED BY */
+/* */
+/* ISRs Interrupt Service Routines */
+/* */
+/* RELEASE HISTORY */
+/* */
+/* DATE NAME DESCRIPTION */
+/* */
+/* 09-30-2020 William E. Lamie Initial Version 6.1 */
+/* 10-15-2021 William E. Lamie Modified comment(s), added */
+/* execution profile support, */
+/* resulting in version 6.1.9 */
+/* 04-25-2022 Zhen Kong Updated comments, */
+/* resulting in version 6.1.11 */
+/* */
+/**************************************************************************/
+ .global _tx_thread_context_restore
+ .type _tx_thread_context_restore,function
+_tx_thread_context_restore:
+
+ /* Lockout interrupts. */
+
+#ifdef TX_ENABLE_FIQ_SUPPORT
+ CPSID if // Disable IRQ and FIQ interrupts
+#else
+ CPSID i // Disable IRQ interrupts
+#endif
+
+#if (defined(TX_ENABLE_EXECUTION_CHANGE_NOTIFY) || defined(TX_EXECUTION_PROFILE_ENABLE))
+
+ /* Call the ISR exit function to indicate an ISR is complete. */
+
+ BL _tx_execution_isr_exit // Call the ISR exit function
+#endif
+
+ /* Determine if interrupts are nested. */
+
+ LDR r3, =_tx_thread_system_state // Pickup address of system state variable
+ LDR r2, [r3] // Pickup system state
+ SUB r2, r2, #1 // Decrement the counter
+ STR r2, [r3] // Store the counter
+ CMP r2, #0 // Was this the first interrupt?
+ BEQ __tx_thread_not_nested_restore // If so, not a nested restore
+
+ /* Interrupts are nested. */
+
+ /* Just recover the saved registers and return to the point of
+ interrupt. */
+
+ LDMIA sp!, {r0, r10, r12, lr} // Recover SPSR, POI, and scratch regs
+ MSR SPSR_cxsf, r0 // Put SPSR back
+ LDMIA sp!, {r0-r3} // Recover r0-r3
+ MOVS pc, lr // Return to point of interrupt
+
+__tx_thread_not_nested_restore:
+
+ /* Determine if a thread was interrupted and no preemption is required. */
+
+ LDR r1, =_tx_thread_current_ptr // Pickup address of current thread ptr
+ LDR r0, [r1] // Pickup actual current thread pointer
+ CMP r0, #0 // Is it NULL?
+ BEQ __tx_thread_idle_system_restore // Yes, idle system was interrupted
+
+ LDR r3, =_tx_thread_preempt_disable // Pickup preempt disable address
+ LDR r2, [r3] // Pickup actual preempt disable flag
+ CMP r2, #0 // Is it set?
+ BNE __tx_thread_no_preempt_restore // Yes, don't preempt this thread
+ LDR r3, =_tx_thread_execute_ptr // Pickup address of execute thread ptr
+ LDR r2, [r3] // Pickup actual execute thread pointer
+ CMP r0, r2 // Is the same thread highest priority?
+ BNE __tx_thread_preempt_restore // No, preemption needs to happen
+
+
+__tx_thread_no_preempt_restore:
+
+ /* Recover the saved context and return to the point of interrupt. */
+
+ /* Pickup the saved stack pointer. */
+
+ /* Recover the saved context and return to the point of interrupt. */
+ LDMIA sp!, {r0, r10, r12, lr} // Recover SPSR, POI, and scratch regs
+ MSR SPSR_cxsf, r0 // Put SPSR back
+ LDMIA sp!, {r0-r3} // Recover r0-r3
+ MOVS pc, lr // Return to point of interrupt
+
+__tx_thread_preempt_restore:
+
+ LDMIA sp!, {r3, r10, r12, lr} // Recover temporarily saved registers
+ MOV r1, lr // Save lr (point of interrupt)
+ MOV r2, #SVC_MODE // Build SVC mode CPSR
+ MSR CPSR_c, r2 // Enter SVC mode
+ STR r1, [sp, #-4]! // Save point of interrupt
+ STMDB sp!, {r4-r12, lr} // Save upper half of registers
+ MOV r4, r3 // Save SPSR in r4
+ MOV r2, #IRQ_MODE // Build IRQ mode CPSR
+ MSR CPSR_c, r2 // Enter IRQ mode
+ LDMIA sp!, {r0-r3} // Recover r0-r3
+ MOV r5, #SVC_MODE // Build SVC mode CPSR
+ MSR CPSR_c, r5 // Enter SVC mode
+ STMDB sp!, {r0-r3} // Save r0-r3 on thread's stack
+
+ LDR r1, =_tx_thread_current_ptr // Pickup address of current thread ptr
+ LDR r0, [r1] // Pickup current thread pointer
+
+#ifdef TX_ENABLE_VFP_SUPPORT
+ LDR r2, [r0, #144] // Pickup the VFP enabled flag
+ CMP r2, #0 // Is the VFP enabled?
+ BEQ _tx_skip_irq_vfp_save // No, skip VFP IRQ save
+ VMRS r2, FPSCR // Pickup the FPSCR
+ STR r2, [sp, #-4]! // Save FPSCR
+ VSTMDB sp!, {D16-D31} // Save D16-D31
+ VSTMDB sp!, {D0-D15} // Save D0-D15
+
+_tx_skip_irq_vfp_save:
+
+#endif
+
+ MOV r3, #1 // Build interrupt stack type
+ STMDB sp!, {r3, r4} // Save interrupt stack type and SPSR
+ STR sp, [r0, #8] // Save stack pointer in thread control
+ // block
+
+ /* Save the remaining time-slice and disable it. */
+ LDR r3, =_tx_timer_time_slice // Pickup time-slice variable address
+ LDR r2, [r3] // Pickup time-slice
+ CMP r2, #0 // Is it active?
+ BEQ __tx_thread_dont_save_ts // No, don't save it
+ STR r2, [r0, #24] // Save thread's time-slice
+ MOV r2, #0 // Clear value
+ STR r2, [r3] // Disable global time-slice flag
+
+__tx_thread_dont_save_ts:
+
+ /* Clear the current task pointer. */
+ MOV r0, #0 // NULL value
+ STR r0, [r1] // Clear current thread pointer
+
+ /* Return to the scheduler. */
+ B _tx_thread_schedule // Return to scheduler
+
+__tx_thread_idle_system_restore:
+
+ /* Just return back to the scheduler! */
+ MOV r0, #SVC_MODE // Build SVC mode CPSR
+ MSR CPSR_c, r0 // Enter SVC mode
+ B _tx_thread_schedule // Return to scheduler
diff --git a/ports/cortex_a17/gnu/src/tx_thread_context_save.S b/ports/cortex_a17/gnu/src/tx_thread_context_save.S
new file mode 100644
index 00000000..7ac48c2e
--- /dev/null
+++ b/ports/cortex_a17/gnu/src/tx_thread_context_save.S
@@ -0,0 +1,172 @@
+/**************************************************************************/
+/* */
+/* Copyright (c) Microsoft Corporation. All rights reserved. */
+/* */
+/* This software is licensed under the Microsoft Software License */
+/* Terms for Microsoft Azure RTOS. Full text of the license can be */
+/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
+/* and in the root directory of this software. */
+/* */
+/**************************************************************************/
+
+
+/**************************************************************************/
+/**************************************************************************/
+/** */
+/** ThreadX Component */
+/** */
+/** Thread */
+/** */
+/**************************************************************************/
+/**************************************************************************/
+
+ .global _tx_thread_system_state
+ .global _tx_thread_current_ptr
+ .global __tx_irq_processing_return
+
+
+/* No 16-bit Thumb mode veneer code is needed for _tx_thread_context_save
+ since it will never be called 16-bit mode. */
+
+ .arm
+ .text
+ .align 2
+/**************************************************************************/
+/* */
+/* FUNCTION RELEASE */
+/* */
+/* _tx_thread_context_save ARMv7-A */
+/* 6.1.11 */
+/* AUTHOR */
+/* */
+/* William E. Lamie, Microsoft Corporation */
+/* */
+/* DESCRIPTION */
+/* */
+/* This function saves the context of an executing thread in the */
+/* beginning of interrupt processing. The function also ensures that */
+/* the system stack is used upon return to the calling ISR. */
+/* */
+/* INPUT */
+/* */
+/* None */
+/* */
+/* OUTPUT */
+/* */
+/* None */
+/* */
+/* CALLS */
+/* */
+/* None */
+/* */
+/* CALLED BY */
+/* */
+/* ISRs */
+/* */
+/* RELEASE HISTORY */
+/* */
+/* DATE NAME DESCRIPTION */
+/* */
+/* 09-30-2020 William E. Lamie Initial Version 6.1 */
+/* 10-15-2021 William E. Lamie Modified comment(s), added */
+/* execution profile support, */
+/* resulting in version 6.1.9 */
+/* 04-25-2022 Zhen Kong Updated comments, */
+/* resulting in version 6.1.11 */
+/* */
+/**************************************************************************/
+ .global _tx_thread_context_save
+ .type _tx_thread_context_save,function
+_tx_thread_context_save:
+
+ /* Upon entry to this routine, it is assumed that IRQ interrupts are locked
+ out, we are in IRQ mode, and all registers are intact. */
+
+ /* Check for a nested interrupt condition. */
+
+ STMDB sp!, {r0-r3} // Save some working registers
+#ifdef TX_ENABLE_FIQ_SUPPORT
+ CPSID if // Disable FIQ interrupts
+#endif
+ LDR r3, =_tx_thread_system_state // Pickup address of system state variable
+ LDR r2, [r3] // Pickup system state
+ CMP r2, #0 // Is this the first interrupt?
+ BEQ __tx_thread_not_nested_save // Yes, not a nested context save
+
+ /* Nested interrupt condition. */
+
+ ADD r2, r2, #1 // Increment the interrupt counter
+ STR r2, [r3] // Store it back in the variable
+
+ /* Save the rest of the scratch registers on the stack and return to the
+ calling ISR. */
+
+ MRS r0, SPSR // Pickup saved SPSR
+ SUB lr, lr, #4 // Adjust point of interrupt
+ STMDB sp!, {r0, r10, r12, lr} // Store other registers
+
+ /* Return to the ISR. */
+
+ MOV r10, #0 // Clear stack limit
+
+#if (defined(TX_ENABLE_EXECUTION_CHANGE_NOTIFY) || defined(TX_EXECUTION_PROFILE_ENABLE))
+
+ /* Call the ISR enter function to indicate an ISR is executing. */
+
+ PUSH {lr} // Save ISR lr
+ BL _tx_execution_isr_enter // Call the ISR enter function
+ POP {lr} // Recover ISR lr
+#endif
+
+ B __tx_irq_processing_return // Continue IRQ processing
+
+__tx_thread_not_nested_save:
+
+ /* Otherwise, not nested, check to see if a thread was running. */
+ ADD r2, r2, #1 // Increment the interrupt counter
+ STR r2, [r3] // Store it back in the variable
+ LDR r1, =_tx_thread_current_ptr // Pickup address of current thread ptr
+ LDR r0, [r1] // Pickup current thread pointer
+ CMP r0, #0 // Is it NULL?
+ BEQ __tx_thread_idle_system_save // If so, interrupt occurred in
+ // scheduling loop - nothing needs saving!
+
+ /* Save minimal context of interrupted thread. */
+
+ MRS r2, SPSR // Pickup saved SPSR
+ SUB lr, lr, #4 // Adjust point of interrupt
+ STMDB sp!, {r2, r10, r12, lr} // Store other registers
+
+ MOV r10, #0 // Clear stack limit
+
+#if (defined(TX_ENABLE_EXECUTION_CHANGE_NOTIFY) || defined(TX_EXECUTION_PROFILE_ENABLE))
+
+ /* Call the ISR enter function to indicate an ISR is executing. */
+
+ PUSH {lr} // Save ISR lr
+ BL _tx_execution_isr_enter // Call the ISR enter function
+ POP {lr} // Recover ISR lr
+#endif
+
+ B __tx_irq_processing_return // Continue IRQ processing
+
+__tx_thread_idle_system_save:
+
+ /* Interrupt occurred in the scheduling loop. */
+
+ /* Not much to do here, just adjust the stack pointer, and return to IRQ
+ processing. */
+
+ MOV r10, #0 // Clear stack limit
+
+#if (defined(TX_ENABLE_EXECUTION_CHANGE_NOTIFY) || defined(TX_EXECUTION_PROFILE_ENABLE))
+
+ /* Call the ISR enter function to indicate an ISR is executing. */
+
+ PUSH {lr} // Save ISR lr
+ BL _tx_execution_isr_enter // Call the ISR enter function
+ POP {lr} // Recover ISR lr
+#endif
+
+ ADD sp, sp, #16 // Recover saved registers
+ B __tx_irq_processing_return // Continue IRQ processing
diff --git a/ports/cortex_a17/gnu/src/tx_thread_fiq_context_restore.S b/ports/cortex_a17/gnu/src/tx_thread_fiq_context_restore.S
new file mode 100644
index 00000000..006be973
--- /dev/null
+++ b/ports/cortex_a17/gnu/src/tx_thread_fiq_context_restore.S
@@ -0,0 +1,223 @@
+/**************************************************************************/
+/* */
+/* Copyright (c) Microsoft Corporation. All rights reserved. */
+/* */
+/* This software is licensed under the Microsoft Software License */
+/* Terms for Microsoft Azure RTOS. Full text of the license can be */
+/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
+/* and in the root directory of this software. */
+/* */
+/**************************************************************************/
+
+
+/**************************************************************************/
+/**************************************************************************/
+/** */
+/** ThreadX Component */
+/** */
+/** Thread */
+/** */
+/**************************************************************************/
+/**************************************************************************/
+
+SVC_MODE = 0xD3 // SVC mode
+FIQ_MODE = 0xD1 // FIQ mode
+MODE_MASK = 0x1F // Mode mask
+THUMB_MASK = 0x20 // Thumb bit mask
+IRQ_MODE_BITS = 0x12 // IRQ mode bits
+
+
+ .global _tx_thread_system_state
+ .global _tx_thread_current_ptr
+ .global _tx_thread_system_stack_ptr
+ .global _tx_thread_execute_ptr
+ .global _tx_timer_time_slice
+ .global _tx_thread_schedule
+ .global _tx_thread_preempt_disable
+ .global _tx_execution_isr_exit
+
+
+/* No 16-bit Thumb mode veneer code is needed for _tx_thread_fiq_context_restore
+ since it will never be called 16-bit mode. */
+
+ .arm
+ .text
+ .align 2
+/**************************************************************************/
+/* */
+/* FUNCTION RELEASE */
+/* */
+/* _tx_thread_fiq_context_restore ARMv7-A */
+/* 6.1.11 */
+/* AUTHOR */
+/* */
+/* William E. Lamie, Microsoft Corporation */
+/* */
+/* DESCRIPTION */
+/* */
+/* This function restores the fiq interrupt context when processing a */
+/* nested interrupt. If not, it returns to the interrupt thread if no */
+/* preemption is necessary. Otherwise, if preemption is necessary or */
+/* if no thread was running, the function returns to the scheduler. */
+/* */
+/* INPUT */
+/* */
+/* None */
+/* */
+/* OUTPUT */
+/* */
+/* None */
+/* */
+/* CALLS */
+/* */
+/* _tx_thread_schedule Thread scheduling routine */
+/* */
+/* CALLED BY */
+/* */
+/* FIQ ISR Interrupt Service Routines */
+/* */
+/* RELEASE HISTORY */
+/* */
+/* DATE NAME DESCRIPTION */
+/* */
+/* 09-30-2020 William E. Lamie Initial Version 6.1 */
+/* 10-15-2021 William E. Lamie Modified comment(s), added */
+/* execution profile support, */
+/* resulting in version 6.1.9 */
+/* 04-25-2022 Zhen Kong Updated comments, */
+/* resulting in version 6.1.11 */
+/* */
+/**************************************************************************/
+ .global _tx_thread_fiq_context_restore
+ .type _tx_thread_fiq_context_restore,function
+_tx_thread_fiq_context_restore:
+
+ /* Lockout interrupts. */
+
+ CPSID if // Disable IRQ and FIQ interrupts
+
+#if (defined(TX_ENABLE_EXECUTION_CHANGE_NOTIFY) || defined(TX_EXECUTION_PROFILE_ENABLE))
+
+ /* Call the ISR exit function to indicate an ISR is complete. */
+
+ BL _tx_execution_isr_exit // Call the ISR exit function
+#endif
+
+ /* Determine if interrupts are nested. */
+ LDR r3, =_tx_thread_system_state // Pickup address of system state variable
+ LDR r2, [r3] // Pickup system state
+ SUB r2, r2, #1 // Decrement the counter
+ STR r2, [r3] // Store the counter
+ CMP r2, #0 // Was this the first interrupt?
+ BEQ __tx_thread_fiq_not_nested_restore // If so, not a nested restore
+
+ /* Interrupts are nested. */
+
+ /* Just recover the saved registers and return to the point of
+ interrupt. */
+
+ LDMIA sp!, {r0, r10, r12, lr} // Recover SPSR, POI, and scratch regs
+ MSR SPSR_cxsf, r0 // Put SPSR back
+ LDMIA sp!, {r0-r3} // Recover r0-r3
+ MOVS pc, lr // Return to point of interrupt
+
+__tx_thread_fiq_not_nested_restore:
+
+ /* Determine if a thread was interrupted and no preemption is required. */
+
+ LDR r1, [sp] // Pickup the saved SPSR
+ MOV r2, #MODE_MASK // Build mask to isolate the interrupted mode
+ AND r1, r1, r2 // Isolate mode bits
+ CMP r1, #IRQ_MODE_BITS // Was an interrupt taken in IRQ mode before we
+ // got to context save? */
+ BEQ __tx_thread_fiq_no_preempt_restore // Yes, just go back to point of interrupt
+
+
+ LDR r1, =_tx_thread_current_ptr // Pickup address of current thread ptr
+ LDR r0, [r1] // Pickup actual current thread pointer
+ CMP r0, #0 // Is it NULL?
+ BEQ __tx_thread_fiq_idle_system_restore // Yes, idle system was interrupted
+
+ LDR r3, =_tx_thread_preempt_disable // Pickup preempt disable address
+ LDR r2, [r3] // Pickup actual preempt disable flag
+ CMP r2, #0 // Is it set?
+ BNE __tx_thread_fiq_no_preempt_restore // Yes, don't preempt this thread
+ LDR r3, =_tx_thread_execute_ptr // Pickup address of execute thread ptr
+ LDR r2, [r3] // Pickup actual execute thread pointer
+ CMP r0, r2 // Is the same thread highest priority?
+ BNE __tx_thread_fiq_preempt_restore // No, preemption needs to happen
+
+
+__tx_thread_fiq_no_preempt_restore:
+
+ /* Restore interrupted thread or ISR. */
+ /* Recover the saved context and return to the point of interrupt. */
+
+ LDMIA sp!, {r0, lr} // Recover SPSR, POI, and scratch regs
+ MSR SPSR_cxsf, r0 // Put SPSR back
+ LDMIA sp!, {r0-r3} // Recover r0-r3
+ MOVS pc, lr // Return to point of interrupt
+
+__tx_thread_fiq_preempt_restore:
+
+ LDMIA sp!, {r3, lr} // Recover temporarily saved registers
+ MOV r1, lr // Save lr (point of interrupt)
+ MOV r2, #SVC_MODE // Build SVC mode CPSR
+ MSR CPSR_c, r2 // Enter SVC mode
+ STR r1, [sp, #-4]! // Save point of interrupt
+ STMDB sp!, {r4-r12, lr} // Save upper half of registers
+ MOV r4, r3 // Save SPSR in r4
+ MOV r2, #FIQ_MODE // Build FIQ mode CPSR
+ MSR CPSR_c, r2 // Reenter FIQ mode
+ LDMIA sp!, {r0-r3} // Recover r0-r3
+ MOV r5, #SVC_MODE // Build SVC mode CPSR
+ MSR CPSR_c, r5 // Enter SVC mode
+ STMDB sp!, {r0-r3} // Save r0-r3 on thread's stack
+
+ LDR r1, =_tx_thread_current_ptr // Pickup address of current thread ptr
+ LDR r0, [r1] // Pickup current thread pointer
+
+#ifdef TX_ENABLE_VFP_SUPPORT
+ LDR r2, [r0, #144] // Pickup the VFP enabled flag
+ CMP r2, #0 // Is the VFP enabled?
+ BEQ _tx_skip_fiq_vfp_save // No, skip VFP IRQ save
+ VMRS r2, FPSCR // Pickup the FPSCR
+ STR r2, [sp, #-4]! // Save FPSCR
+ VSTMDB sp!, {D16-D31} // Save D16-D31
+ VSTMDB sp!, {D0-D15} // Save D0-D15
+_tx_skip_fiq_vfp_save:
+#endif
+
+ MOV r3, #1 // Build interrupt stack type
+ STMDB sp!, {r3, r4} // Save interrupt stack type and SPSR
+ STR sp, [r0, #8] // Save stack pointer in thread control
+ // block */
+ LDR r3, =_tx_timer_time_slice // Pickup time-slice variable address
+ LDR r2, [r3] // Pickup time-slice
+ CMP r2, #0 // Is it active?
+ BEQ __tx_thread_fiq_dont_save_ts // No, don't save it
+
+ STR r2, [r0, #24] // Save thread's time-slice
+ MOV r2, #0 // Clear value
+ STR r2, [r3] // Disable global time-slice flag
+
+__tx_thread_fiq_dont_save_ts:
+
+ /* Clear the current task pointer. */
+
+ MOV r0, #0 // NULL value
+ STR r0, [r1] // Clear current thread pointer
+
+ /* Return to the scheduler. */
+
+ B _tx_thread_schedule // Return to scheduler
+
+__tx_thread_fiq_idle_system_restore:
+
+ /* Just return back to the scheduler! */
+
+ ADD sp, sp, #24 // Recover FIQ stack space
+ MOV r3, #SVC_MODE // Build SVC mode CPSR
+ MSR CPSR_c, r3 // Lockout interrupts
+ B _tx_thread_schedule // Return to scheduler
+
diff --git a/ports/cortex_a17/gnu/src/tx_thread_fiq_context_save.S b/ports/cortex_a17/gnu/src/tx_thread_fiq_context_save.S
new file mode 100644
index 00000000..7db6a4c2
--- /dev/null
+++ b/ports/cortex_a17/gnu/src/tx_thread_fiq_context_save.S
@@ -0,0 +1,178 @@
+/**************************************************************************/
+/* */
+/* Copyright (c) Microsoft Corporation. All rights reserved. */
+/* */
+/* This software is licensed under the Microsoft Software License */
+/* Terms for Microsoft Azure RTOS. Full text of the license can be */
+/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
+/* and in the root directory of this software. */
+/* */
+/**************************************************************************/
+
+
+/**************************************************************************/
+/**************************************************************************/
+/** */
+/** ThreadX Component */
+/** */
+/** Thread */
+/** */
+/**************************************************************************/
+/**************************************************************************/
+
+ .global _tx_thread_system_state
+ .global _tx_thread_current_ptr
+ .global __tx_fiq_processing_return
+ .global _tx_execution_isr_enter
+
+
+/* No 16-bit Thumb mode veneer code is needed for _tx_thread_fiq_context_save
+ since it will never be called 16-bit mode. */
+
+ .arm
+ .text
+ .align 2
+/**************************************************************************/
+/* */
+/* FUNCTION RELEASE */
+/* */
+/* _tx_thread_fiq_context_save ARMv7-A */
+/* 6.1.11 */
+/* AUTHOR */
+/* */
+/* William E. Lamie, Microsoft Corporation */
+/* */
+/* DESCRIPTION */
+/* */
+/* This function saves the context of an executing thread in the */
+/* beginning of interrupt processing. The function also ensures that */
+/* the system stack is used upon return to the calling ISR. */
+/* */
+/* INPUT */
+/* */
+/* None */
+/* */
+/* OUTPUT */
+/* */
+/* None */
+/* */
+/* CALLS */
+/* */
+/* None */
+/* */
+/* CALLED BY */
+/* */
+/* ISRs */
+/* */
+/* RELEASE HISTORY */
+/* */
+/* DATE NAME DESCRIPTION */
+/* */
+/* 09-30-2020 William E. Lamie Initial Version 6.1 */
+/* 10-15-2021 William E. Lamie Modified comment(s), added */
+/* execution profile support, */
+/* resulting in version 6.1.9 */
+/* 04-25-2022 Zhen Kong Updated comments, */
+/* resulting in version 6.1.11 */
+/* */
+/**************************************************************************/
+ .global _tx_thread_fiq_context_save
+ .type _tx_thread_fiq_context_save,function
+_tx_thread_fiq_context_save:
+
+ /* Upon entry to this routine, it is assumed that IRQ interrupts are locked
+ out, we are in IRQ mode, and all registers are intact. */
+
+ /* Check for a nested interrupt condition. */
+
+ STMDB sp!, {r0-r3} // Save some working registers
+ LDR r3, =_tx_thread_system_state // Pickup address of system state variable
+ LDR r2, [r3] // Pickup system state
+ CMP r2, #0 // Is this the first interrupt?
+ BEQ __tx_thread_fiq_not_nested_save // Yes, not a nested context save
+
+ /* Nested interrupt condition. */
+
+ ADD r2, r2, #1 // Increment the interrupt counter
+ STR r2, [r3] // Store it back in the variable
+
+ /* Save the rest of the scratch registers on the stack and return to the
+ calling ISR. */
+
+ MRS r0, SPSR // Pickup saved SPSR
+ SUB lr, lr, #4 // Adjust point of interrupt
+ STMDB sp!, {r0, r10, r12, lr} // Store other registers
+
+ /* Return to the ISR. */
+
+ MOV r10, #0 // Clear stack limit
+
+#if (defined(TX_ENABLE_EXECUTION_CHANGE_NOTIFY) || defined(TX_EXECUTION_PROFILE_ENABLE))
+
+ /* Call the ISR enter function to indicate an ISR is executing. */
+
+ PUSH {lr} // Save ISR lr
+ BL _tx_execution_isr_enter // Call the ISR enter function
+ POP {lr} // Recover ISR lr
+#endif
+
+ B __tx_fiq_processing_return // Continue FIQ processing
+//
+__tx_thread_fiq_not_nested_save:
+
+ /* Otherwise, not nested, check to see if a thread was running. */
+
+ ADD r2, r2, #1 // Increment the interrupt counter
+ STR r2, [r3] // Store it back in the variable
+ LDR r1, =_tx_thread_current_ptr // Pickup address of current thread ptr
+ LDR r0, [r1] // Pickup current thread pointer
+ CMP r0, #0 // Is it NULL?
+ BEQ __tx_thread_fiq_idle_system_save // If so, interrupt occurred in
+ // scheduling loop - nothing needs saving!
+
+ /* Save minimal context of interrupted thread. */
+
+ MRS r2, SPSR // Pickup saved SPSR
+ SUB lr, lr, #4 // Adjust point of interrupt
+ STMDB sp!, {r2, lr} // Store other registers, Note that we don't
+ // need to save sl and ip since FIQ has
+ // copies of these registers. Nested
+ // interrupt processing does need to save
+ // these registers.
+
+ MOV r10, #0 // Clear stack limit
+
+#if (defined(TX_ENABLE_EXECUTION_CHANGE_NOTIFY) || defined(TX_EXECUTION_PROFILE_ENABLE))
+
+ /* Call the ISR enter function to indicate an ISR is executing. */
+
+ PUSH {lr} // Save ISR lr
+ BL _tx_execution_isr_enter // Call the ISR enter function
+ POP {lr} // Recover ISR lr
+#endif
+
+ B __tx_fiq_processing_return // Continue FIQ processing
+
+__tx_thread_fiq_idle_system_save:
+
+ /* Interrupt occurred in the scheduling loop. */
+
+#if (defined(TX_ENABLE_EXECUTION_CHANGE_NOTIFY) || defined(TX_EXECUTION_PROFILE_ENABLE))
+
+ /* Call the ISR enter function to indicate an ISR is executing. */
+
+ PUSH {lr} // Save ISR lr
+ BL _tx_execution_isr_enter // Call the ISR enter function
+ POP {lr} // Recover ISR lr
+#endif
+
+ /* Not much to do here, save the current SPSR and LR for possible
+ use in IRQ interrupted in idle system conditions, and return to
+ FIQ interrupt processing. */
+
+ MRS r0, SPSR // Pickup saved SPSR
+ SUB lr, lr, #4 // Adjust point of interrupt
+ STMDB sp!, {r0, lr} // Store other registers that will get used
+ // or stripped off the stack in context
+ // restore
+ B __tx_fiq_processing_return // Continue FIQ processing
diff --git a/ports/cortex_a17/gnu/src/tx_thread_fiq_nesting_end.S b/ports/cortex_a17/gnu/src/tx_thread_fiq_nesting_end.S
new file mode 100644
index 00000000..b34d881e
--- /dev/null
+++ b/ports/cortex_a17/gnu/src/tx_thread_fiq_nesting_end.S
@@ -0,0 +1,104 @@
+/**************************************************************************/
+/* */
+/* Copyright (c) Microsoft Corporation. All rights reserved. */
+/* */
+/* This software is licensed under the Microsoft Software License */
+/* Terms for Microsoft Azure RTOS. Full text of the license can be */
+/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
+/* and in the root directory of this software. */
+/* */
+/**************************************************************************/
+
+
+/**************************************************************************/
+/**************************************************************************/
+/** */
+/** ThreadX Component */
+/** */
+/** Thread */
+/** */
+/**************************************************************************/
+/**************************************************************************/
+
+#ifdef TX_ENABLE_FIQ_SUPPORT
+DISABLE_INTS = 0xC0 // Disable IRQ/FIQ interrupts
+#else
+DISABLE_INTS = 0x80 // Disable IRQ interrupts
+#endif
+MODE_MASK = 0x1F // Mode mask
+FIQ_MODE_BITS = 0x11 // FIQ mode bits
+
+
+/* No 16-bit Thumb mode veneer code is needed for _tx_thread_fiq_nesting_end
+ since it will never be called 16-bit mode. */
+
+ .arm
+ .text
+ .align 2
+/**************************************************************************/
+/* */
+/* FUNCTION RELEASE */
+/* */
+/* _tx_thread_fiq_nesting_end ARMv7-A */
+/* 6.1.11 */
+/* AUTHOR */
+/* */
+/* William E. Lamie, Microsoft Corporation */
+/* */
+/* DESCRIPTION */
+/* */
+/* This function is called by the application from FIQ mode after */
+/* _tx_thread_fiq_nesting_start has been called and switches the FIQ */
+/* processing from system mode back to FIQ mode prior to the ISR */
+/* calling _tx_thread_fiq_context_restore. Note that this function */
+/* assumes the system stack pointer is in the same position after */
+/* nesting start function was called. */
+/* */
+/* This function assumes that the system mode stack pointer was setup */
+/* during low-level initialization (tx_initialize_low_level.s). */
+/* */
+/* This function returns with FIQ interrupts disabled. */
+/* */
+/* INPUT */
+/* */
+/* None */
+/* */
+/* OUTPUT */
+/* */
+/* None */
+/* */
+/* CALLS */
+/* */
+/* None */
+/* */
+/* CALLED BY */
+/* */
+/* ISRs */
+/* */
+/* RELEASE HISTORY */
+/* */
+/* DATE NAME DESCRIPTION */
+/* */
+/* 09-30-2020 William E. Lamie Initial Version 6.1 */
+/* 04-25-2022 Zhen Kong Updated comments, */
+/* resulting in version 6.1.11 */
+/* */
+/**************************************************************************/
+ .global _tx_thread_fiq_nesting_end
+ .type _tx_thread_fiq_nesting_end,function
+_tx_thread_fiq_nesting_end:
+ MOV r3,lr // Save ISR return address
+ MRS r0, CPSR // Pickup the CPSR
+ ORR r0, r0, #DISABLE_INTS // Build disable interrupt value
+ MSR CPSR_c, r0 // Disable interrupts
+ LDMIA sp!, {r1, lr} // Pickup saved lr (and r1 throw-away for
+ // 8-byte alignment logic)
+ BIC r0, r0, #MODE_MASK // Clear mode bits
+ ORR r0, r0, #FIQ_MODE_BITS // Build IRQ mode CPSR
+ MSR CPSR_c, r0 // Reenter IRQ mode
+
+#ifdef __THUMB_INTERWORK
+ BX r3 // Return to caller
+#else
+ MOV pc, r3 // Return to caller
+#endif
diff --git a/ports/cortex_a17/gnu/src/tx_thread_fiq_nesting_start.S b/ports/cortex_a17/gnu/src/tx_thread_fiq_nesting_start.S
new file mode 100644
index 00000000..c9cd5a06
--- /dev/null
+++ b/ports/cortex_a17/gnu/src/tx_thread_fiq_nesting_start.S
@@ -0,0 +1,96 @@
+/**************************************************************************/
+/* */
+/* Copyright (c) Microsoft Corporation. All rights reserved. */
+/* */
+/* This software is licensed under the Microsoft Software License */
+/* Terms for Microsoft Azure RTOS. Full text of the license can be */
+/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
+/* and in the root directory of this software. */
+/* */
+/**************************************************************************/
+
+
+/**************************************************************************/
+/**************************************************************************/
+/** */
+/** ThreadX Component */
+/** */
+/** Thread */
+/** */
+/**************************************************************************/
+/**************************************************************************/
+
+FIQ_DISABLE = 0x40 // FIQ disable bit
+MODE_MASK = 0x1F // Mode mask
+SYS_MODE_BITS = 0x1F // System mode bits
+
+
+/* No 16-bit Thumb mode veneer code is needed for _tx_thread_fiq_nesting_start
+ since it will never be called 16-bit mode. */
+
+ .arm
+ .text
+ .align 2
+/**************************************************************************/
+/* */
+/* FUNCTION RELEASE */
+/* */
+/* _tx_thread_fiq_nesting_start ARMv7-A */
+/* 6.1.11 */
+/* AUTHOR */
+/* */
+/* William E. Lamie, Microsoft Corporation */
+/* */
+/* DESCRIPTION */
+/* */
+/* This function is called by the application from FIQ mode after */
+/* _tx_thread_fiq_context_save has been called and switches the FIQ */
+/* processing to the system mode so nested FIQ interrupt processing */
+/* is possible (system mode has its own "lr" register). Note that */
+/* this function assumes that the system mode stack pointer was setup */
+/* during low-level initialization (tx_initialize_low_level.s). */
+/* */
+/* This function returns with FIQ interrupts enabled. */
+/* */
+/* INPUT */
+/* */
+/* None */
+/* */
+/* OUTPUT */
+/* */
+/* None */
+/* */
+/* CALLS */
+/* */
+/* None */
+/* */
+/* CALLED BY */
+/* */
+/* ISRs */
+/* */
+/* RELEASE HISTORY */
+/* */
+/* DATE NAME DESCRIPTION */
+/* */
+/* 09-30-2020 William E. Lamie Initial Version 6.1 */
+/* 04-25-2022 Zhen Kong Updated comments, */
+/* resulting in version 6.1.11 */
+/* */
+/**************************************************************************/
+ .global _tx_thread_fiq_nesting_start
+ .type _tx_thread_fiq_nesting_start,function
+_tx_thread_fiq_nesting_start:
+ MOV r3,lr // Save ISR return address
+ MRS r0, CPSR // Pickup the CPSR
+ BIC r0, r0, #MODE_MASK // Clear the mode bits
+ ORR r0, r0, #SYS_MODE_BITS // Build system mode CPSR
+ MSR CPSR_c, r0 // Enter system mode
+ STMDB sp!, {r1, lr} // Push the system mode lr on the system mode stack
+ // and push r1 just to keep 8-byte alignment
+ BIC r0, r0, #FIQ_DISABLE // Build enable FIQ CPSR
+ MSR CPSR_c, r0 // Enter system mode
+#ifdef __THUMB_INTERWORK
+ BX r3 // Return to caller
+#else
+ MOV pc, r3 // Return to caller
+#endif
diff --git a/ports/cortex_a17/gnu/src/tx_thread_interrupt_control.S b/ports/cortex_a17/gnu/src/tx_thread_interrupt_control.S
new file mode 100644
index 00000000..63b1609a
--- /dev/null
+++ b/ports/cortex_a17/gnu/src/tx_thread_interrupt_control.S
@@ -0,0 +1,104 @@
+/**************************************************************************/
+/* */
+/* Copyright (c) Microsoft Corporation. All rights reserved. */
+/* */
+/* This software is licensed under the Microsoft Software License */
+/* Terms for Microsoft Azure RTOS. Full text of the license can be */
+/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
+/* and in the root directory of this software. */
+/* */
+/**************************************************************************/
+
+
+/**************************************************************************/
+/**************************************************************************/
+/** */
+/** ThreadX Component */
+/** */
+/** Thread */
+/** */
+/**************************************************************************/
+/**************************************************************************/
+
+INT_MASK = 0x03F
+
+
+/* Define the 16-bit Thumb mode veneer for _tx_thread_interrupt_control for
+ applications calling this function from to 16-bit Thumb mode. */
+
+ .text
+ .align 2
+ .global $_tx_thread_interrupt_control
+$_tx_thread_interrupt_control:
+ .thumb
+ BX pc // Switch to 32-bit mode
+ NOP //
+ .arm
+ STMFD sp!, {lr} // Save return address
+ BL _tx_thread_interrupt_control // Call _tx_thread_interrupt_control function
+ LDMFD sp!, {lr} // Recover saved return address
+ BX lr // Return to 16-bit caller
+
+
+ .text
+ .align 2
+/**************************************************************************/
+/* */
+/* FUNCTION RELEASE */
+/* */
+/* _tx_thread_interrupt_control ARMv7-A */
+/* 6.1.11 */
+/* AUTHOR */
+/* */
+/* William E. Lamie, Microsoft Corporation */
+/* */
+/* DESCRIPTION */
+/* */
+/* This function is responsible for changing the interrupt lockout */
+/* posture of the system. */
+/* */
+/* INPUT */
+/* */
+/* new_posture New interrupt lockout posture */
+/* */
+/* OUTPUT */
+/* */
+/* old_posture Old interrupt lockout posture */
+/* */
+/* CALLS */
+/* */
+/* None */
+/* */
+/* CALLED BY */
+/* */
+/* Application Code */
+/* */
+/* RELEASE HISTORY */
+/* */
+/* DATE NAME DESCRIPTION */
+/* */
+/* 09-30-2020 William E. Lamie Initial Version 6.1 */
+/* 04-25-2022 Zhen Kong Updated comments, */
+/* resulting in version 6.1.11 */
+/* */
+/**************************************************************************/
+ .global _tx_thread_interrupt_control
+ .type _tx_thread_interrupt_control,function
+_tx_thread_interrupt_control:
+
+ /* Pickup current interrupt lockout posture. */
+
+ MRS r3, CPSR // Pickup current CPSR
+ MOV r2, #INT_MASK // Build interrupt mask
+ AND r1, r3, r2 // Clear interrupt lockout bits
+ ORR r1, r1, r0 // Or-in new interrupt lockout bits
+
+ /* Apply the new interrupt posture. */
+
+ MSR CPSR_c, r1 // Setup new CPSR
+ BIC r0, r3, r2 // Return previous interrupt mask
+#ifdef __THUMB_INTERWORK
+ BX lr // Return to caller
+#else
+ MOV pc, lr // Return to caller
+#endif
diff --git a/ports/cortex_a17/gnu/src/tx_thread_interrupt_disable.S b/ports/cortex_a17/gnu/src/tx_thread_interrupt_disable.S
new file mode 100644
index 00000000..13258808
--- /dev/null
+++ b/ports/cortex_a17/gnu/src/tx_thread_interrupt_disable.S
@@ -0,0 +1,101 @@
+/**************************************************************************/
+/* */
+/* Copyright (c) Microsoft Corporation. All rights reserved. */
+/* */
+/* This software is licensed under the Microsoft Software License */
+/* Terms for Microsoft Azure RTOS. Full text of the license can be */
+/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
+/* and in the root directory of this software. */
+/* */
+/**************************************************************************/
+
+
+/**************************************************************************/
+/**************************************************************************/
+/** */
+/** ThreadX Component */
+/** */
+/** Thread */
+/** */
+/**************************************************************************/
+/**************************************************************************/
+
+/* Define the 16-bit Thumb mode veneer for _tx_thread_interrupt_disable for
+ applications calling this function from to 16-bit Thumb mode. */
+
+ .text
+ .align 2
+ .global $_tx_thread_interrupt_disable
+$_tx_thread_interrupt_disable:
+ .thumb
+ BX pc // Switch to 32-bit mode
+ NOP //
+ .arm
+ STMFD sp!, {lr} // Save return address
+ BL _tx_thread_interrupt_disable // Call _tx_thread_interrupt_disable function
+ LDMFD sp!, {lr} // Recover saved return address
+ BX lr // Return to 16-bit caller
+
+
+ .text
+ .align 2
+/**************************************************************************/
+/* */
+/* FUNCTION RELEASE */
+/* */
+/* _tx_thread_interrupt_disable ARMv7-A */
+/* 6.1.11 */
+/* AUTHOR */
+/* */
+/* William E. Lamie, Microsoft Corporation */
+/* */
+/* DESCRIPTION */
+/* */
+/* This function is responsible for disabling interrupts */
+/* */
+/* INPUT */
+/* */
+/* None */
+/* */
+/* OUTPUT */
+/* */
+/* old_posture Old interrupt lockout posture */
+/* */
+/* CALLS */
+/* */
+/* None */
+/* */
+/* CALLED BY */
+/* */
+/* Application Code */
+/* */
+/* RELEASE HISTORY */
+/* */
+/* DATE NAME DESCRIPTION */
+/* */
+/* 09-30-2020 William E. Lamie Initial Version 6.1 */
+/* 04-25-2022 Zhen Kong Updated comments, */
+/* resulting in version 6.1.11 */
+/* */
+/**************************************************************************/
+ .global _tx_thread_interrupt_disable
+ .type _tx_thread_interrupt_disable,function
+_tx_thread_interrupt_disable:
+
+ /* Pickup current interrupt lockout posture. */
+
+ MRS r0, CPSR // Pickup current CPSR
+
+ /* Mask interrupts. */
+
+#ifdef TX_ENABLE_FIQ_SUPPORT
+ CPSID if // Disable IRQ and FIQ
+#else
+ CPSID i // Disable IRQ
+#endif
+
+#ifdef __THUMB_INTERWORK
+ BX lr // Return to caller
+#else
+ MOV pc, lr // Return to caller
+#endif
diff --git a/ports/cortex_a17/gnu/src/tx_thread_interrupt_restore.S b/ports/cortex_a17/gnu/src/tx_thread_interrupt_restore.S
new file mode 100644
index 00000000..2d582511
--- /dev/null
+++ b/ports/cortex_a17/gnu/src/tx_thread_interrupt_restore.S
@@ -0,0 +1,93 @@
+/**************************************************************************/
+/* */
+/* Copyright (c) Microsoft Corporation. All rights reserved. */
+/* */
+/* This software is licensed under the Microsoft Software License */
+/* Terms for Microsoft Azure RTOS. Full text of the license can be */
+/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
+/* and in the root directory of this software. */
+/* */
+/**************************************************************************/
+
+
+/**************************************************************************/
+/**************************************************************************/
+/** */
+/** ThreadX Component */
+/** */
+/** Thread */
+/** */
+/**************************************************************************/
+/**************************************************************************/
+
+/* Define the 16-bit Thumb mode veneer for _tx_thread_interrupt_restore for
+ applications calling this function from to 16-bit Thumb mode. */
+
+ .text
+ .align 2
+ .global $_tx_thread_interrupt_restore
+$_tx_thread_interrupt_restore:
+ .thumb
+ BX pc // Switch to 32-bit mode
+ NOP //
+ .arm
+ STMFD sp!, {lr} // Save return address
+ BL _tx_thread_interrupt_restore // Call _tx_thread_interrupt_restore function
+ LDMFD sp!, {lr} // Recover saved return address
+ BX lr // Return to 16-bit caller
+
+
+ .text
+ .align 2
+/**************************************************************************/
+/* */
+/* FUNCTION RELEASE */
+/* */
+/* _tx_thread_interrupt_restore ARMv7-A */
+/* 6.1.11 */
+/* AUTHOR */
+/* */
+/* William E. Lamie, Microsoft Corporation */
+/* */
+/* DESCRIPTION */
+/* */
+/* This function is responsible for restoring interrupts to the state */
+/* returned by a previous _tx_thread_interrupt_disable call. */
+/* */
+/* INPUT */
+/* */
+/* old_posture Old interrupt lockout posture */
+/* */
+/* OUTPUT */
+/* */
+/* None */
+/* */
+/* CALLS */
+/* */
+/* None */
+/* */
+/* CALLED BY */
+/* */
+/* Application Code */
+/* */
+/* RELEASE HISTORY */
+/* */
+/* DATE NAME DESCRIPTION */
+/* */
+/* 09-30-2020 William E. Lamie Initial Version 6.1 */
+/* 04-25-2022 Zhen Kong Updated comments, */
+/* resulting in version 6.1.11 */
+/* */
+/**************************************************************************/
+ .global _tx_thread_interrupt_restore
+ .type _tx_thread_interrupt_restore,function
+_tx_thread_interrupt_restore:
+
+ /* Apply the new interrupt posture. */
+
+ MSR CPSR_c, r0 // Setup new CPSR
+#ifdef __THUMB_INTERWORK
+ BX lr // Return to caller
+#else
+ MOV pc, lr // Return to caller
+#endif
diff --git a/ports/cortex_a17/gnu/src/tx_thread_irq_nesting_end.S b/ports/cortex_a17/gnu/src/tx_thread_irq_nesting_end.S
new file mode 100644
index 00000000..ec7e63c6
--- /dev/null
+++ b/ports/cortex_a17/gnu/src/tx_thread_irq_nesting_end.S
@@ -0,0 +1,103 @@
+/**************************************************************************/
+/* */
+/* Copyright (c) Microsoft Corporation. All rights reserved. */
+/* */
+/* This software is licensed under the Microsoft Software License */
+/* Terms for Microsoft Azure RTOS. Full text of the license can be */
+/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
+/* and in the root directory of this software. */
+/* */
+/**************************************************************************/
+
+
+/**************************************************************************/
+/**************************************************************************/
+/** */
+/** ThreadX Component */
+/** */
+/** Thread */
+/** */
+/**************************************************************************/
+/**************************************************************************/
+
+#ifdef TX_ENABLE_FIQ_SUPPORT
+DISABLE_INTS = 0xC0 // Disable IRQ/FIQ interrupts
+#else
+DISABLE_INTS = 0x80 // Disable IRQ interrupts
+#endif
+MODE_MASK = 0x1F // Mode mask
+IRQ_MODE_BITS = 0x12 // IRQ mode bits
+
+
+/* No 16-bit Thumb mode veneer code is needed for _tx_thread_irq_nesting_end
+ since it will never be called 16-bit mode. */
+
+ .arm
+ .text
+ .align 2
+/**************************************************************************/
+/* */
+/* FUNCTION RELEASE */
+/* */
+/* _tx_thread_irq_nesting_end ARMv7-A */
+/* 6.1.11 */
+/* AUTHOR */
+/* */
+/* William E. Lamie, Microsoft Corporation */
+/* */
+/* DESCRIPTION */
+/* */
+/* This function is called by the application from IRQ mode after */
+/* _tx_thread_irq_nesting_start has been called and switches the IRQ */
+/* processing from system mode back to IRQ mode prior to the ISR */
+/* calling _tx_thread_context_restore. Note that this function */
+/* assumes the system stack pointer is in the same position after */
+/* nesting start function was called. */
+/* */
+/* This function assumes that the system mode stack pointer was setup */
+/* during low-level initialization (tx_initialize_low_level.s). */
+/* */
+/* This function returns with IRQ interrupts disabled. */
+/* */
+/* INPUT */
+/* */
+/* None */
+/* */
+/* OUTPUT */
+/* */
+/* None */
+/* */
+/* CALLS */
+/* */
+/* None */
+/* */
+/* CALLED BY */
+/* */
+/* ISRs */
+/* */
+/* RELEASE HISTORY */
+/* */
+/* DATE NAME DESCRIPTION */
+/* */
+/* 09-30-2020 William E. Lamie Initial Version 6.1 */
+/* 04-25-2022 Zhen Kong Updated comments, */
+/* resulting in version 6.1.11 */
+/* */
+/**************************************************************************/
+ .global _tx_thread_irq_nesting_end
+ .type _tx_thread_irq_nesting_end,function
+_tx_thread_irq_nesting_end:
+ MOV r3,lr // Save ISR return address
+ MRS r0, CPSR // Pickup the CPSR
+ ORR r0, r0, #DISABLE_INTS // Build disable interrupt value
+ MSR CPSR_c, r0 // Disable interrupts
+ LDMIA sp!, {r1, lr} // Pickup saved lr (and r1 throw-away for
+ // 8-byte alignment logic)
+ BIC r0, r0, #MODE_MASK // Clear mode bits
+ ORR r0, r0, #IRQ_MODE_BITS // Build IRQ mode CPSR
+ MSR CPSR_c, r0 // Reenter IRQ mode
+#ifdef __THUMB_INTERWORK
+ BX r3 // Return to caller
+#else
+ MOV pc, r3 // Return to caller
+#endif
diff --git a/ports/cortex_a17/gnu/src/tx_thread_irq_nesting_start.S b/ports/cortex_a17/gnu/src/tx_thread_irq_nesting_start.S
new file mode 100644
index 00000000..c69976ed
--- /dev/null
+++ b/ports/cortex_a17/gnu/src/tx_thread_irq_nesting_start.S
@@ -0,0 +1,96 @@
+/**************************************************************************/
+/* */
+/* Copyright (c) Microsoft Corporation. All rights reserved. */
+/* */
+/* This software is licensed under the Microsoft Software License */
+/* Terms for Microsoft Azure RTOS. Full text of the license can be */
+/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
+/* and in the root directory of this software. */
+/* */
+/**************************************************************************/
+
+
+/**************************************************************************/
+/**************************************************************************/
+/** */
+/** ThreadX Component */
+/** */
+/** Thread */
+/** */
+/**************************************************************************/
+/**************************************************************************/
+
+IRQ_DISABLE = 0x80 // IRQ disable bit
+MODE_MASK = 0x1F // Mode mask
+SYS_MODE_BITS = 0x1F // System mode bits
+
+
+/* No 16-bit Thumb mode veneer code is needed for _tx_thread_irq_nesting_start
+ since it will never be called 16-bit mode. */
+
+ .arm
+ .text
+ .align 2
+/**************************************************************************/
+/* */
+/* FUNCTION RELEASE */
+/* */
+/* _tx_thread_irq_nesting_start ARMv7-A */
+/* 6.1.11 */
+/* AUTHOR */
+/* */
+/* William E. Lamie, Microsoft Corporation */
+/* */
+/* DESCRIPTION */
+/* */
+/* This function is called by the application from IRQ mode after */
+/* _tx_thread_context_save has been called and switches the IRQ */
+/* processing to the system mode so nested IRQ interrupt processing */
+/* is possible (system mode has its own "lr" register). Note that */
+/* this function assumes that the system mode stack pointer was setup */
+/* during low-level initialization (tx_initialize_low_level.s). */
+/* */
+/* This function returns with IRQ interrupts enabled. */
+/* */
+/* INPUT */
+/* */
+/* None */
+/* */
+/* OUTPUT */
+/* */
+/* None */
+/* */
+/* CALLS */
+/* */
+/* None */
+/* */
+/* CALLED BY */
+/* */
+/* ISRs */
+/* */
+/* RELEASE HISTORY */
+/* */
+/* DATE NAME DESCRIPTION */
+/* */
+/* 09-30-2020 William E. Lamie Initial Version 6.1 */
+/* 04-25-2022 Zhen Kong Updated comments, */
+/* resulting in version 6.1.11 */
+/* */
+/**************************************************************************/
+ .global _tx_thread_irq_nesting_start
+ .type _tx_thread_irq_nesting_start,function
+_tx_thread_irq_nesting_start:
+ MOV r3,lr // Save ISR return address
+ MRS r0, CPSR // Pickup the CPSR
+ BIC r0, r0, #MODE_MASK // Clear the mode bits
+ ORR r0, r0, #SYS_MODE_BITS // Build system mode CPSR
+ MSR CPSR_c, r0 // Enter system mode
+ STMDB sp!, {r1, lr} // Push the system mode lr on the system mode stack
+ // and push r1 just to keep 8-byte alignment
+ BIC r0, r0, #IRQ_DISABLE // Build enable IRQ CPSR
+ MSR CPSR_c, r0 // Enter system mode
+#ifdef __THUMB_INTERWORK
+ BX r3 // Return to caller
+#else
+ MOV pc, r3 // Return to caller
+#endif
diff --git a/ports/cortex_a17/gnu/src/tx_thread_schedule.S b/ports/cortex_a17/gnu/src/tx_thread_schedule.S
new file mode 100644
index 00000000..8330e9df
--- /dev/null
+++ b/ports/cortex_a17/gnu/src/tx_thread_schedule.S
@@ -0,0 +1,230 @@
+/**************************************************************************/
+/* */
+/* Copyright (c) Microsoft Corporation. All rights reserved. */
+/* */
+/* This software is licensed under the Microsoft Software License */
+/* Terms for Microsoft Azure RTOS. Full text of the license can be */
+/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
+/* and in the root directory of this software. */
+/* */
+/**************************************************************************/
+
+
+/**************************************************************************/
+/**************************************************************************/
+/** */
+/** ThreadX Component */
+/** */
+/** Thread */
+/** */
+/**************************************************************************/
+/**************************************************************************/
+
+ .global _tx_thread_execute_ptr
+ .global _tx_thread_current_ptr
+ .global _tx_timer_time_slice
+
+
+/* Define the 16-bit Thumb mode veneer for _tx_thread_schedule for
+ applications calling this function from to 16-bit Thumb mode. */
+
+ .text
+ .align 2
+ .global $_tx_thread_schedule
+ .type $_tx_thread_schedule,function
+$_tx_thread_schedule:
+ .thumb
+ BX pc // Switch to 32-bit mode
+ NOP //
+ .arm
+ STMFD sp!, {lr} // Save return address
+ BL _tx_thread_schedule // Call _tx_thread_schedule function
+ LDMFD sp!, {lr} // Recover saved return address
+ BX lr // Return to 16-bit caller
+
+
+ .text
+ .align 2
+/**************************************************************************/
+/* */
+/* FUNCTION RELEASE */
+/* */
+/* _tx_thread_schedule ARMv7-A */
+/* 6.1.11 */
+/* AUTHOR */
+/* */
+/* William E. Lamie, Microsoft Corporation */
+/* */
+/* DESCRIPTION */
+/* */
+/* This function waits for a thread control block pointer to appear in */
+/* the _tx_thread_execute_ptr variable. Once a thread pointer appears */
+/* in the variable, the corresponding thread is resumed. */
+/* */
+/* INPUT */
+/* */
+/* None */
+/* */
+/* OUTPUT */
+/* */
+/* None */
+/* */
+/* CALLS */
+/* */
+/* None */
+/* */
+/* CALLED BY */
+/* */
+/* _tx_initialize_kernel_enter ThreadX entry function */
+/* _tx_thread_system_return Return to system from thread */
+/* _tx_thread_context_restore Restore thread's context */
+/* */
+/* RELEASE HISTORY */
+/* */
+/* DATE NAME DESCRIPTION */
+/* */
+/* 09-30-2020 William E. Lamie Initial Version 6.1 */
+/* 10-15-2021 William E. Lamie Modified comment(s), added */
+/* execution profile support, */
+/* resulting in version 6.1.9 */
+/* 04-25-2022 Zhen Kong Updated comments, */
+/* resulting in version 6.1.11 */
+/* */
+/**************************************************************************/
+ .global _tx_thread_schedule
+ .type _tx_thread_schedule,function
+_tx_thread_schedule:
+
+ /* Enable interrupts. */
+
+#ifdef TX_ENABLE_FIQ_SUPPORT
+ CPSIE if // Enable IRQ and FIQ interrupts
+#else
+ CPSIE i // Enable IRQ interrupts
+#endif
+
+ /* Wait for a thread to execute. */
+ LDR r1, =_tx_thread_execute_ptr // Address of thread execute ptr
+
+__tx_thread_schedule_loop:
+
+ LDR r0, [r1] // Pickup next thread to execute
+ CMP r0, #0 // Is it NULL?
+ BEQ __tx_thread_schedule_loop // If so, keep looking for a thread
+ /* Yes! We have a thread to execute. Lockout interrupts and
+ transfer control to it. */
+
+#ifdef TX_ENABLE_FIQ_SUPPORT
+ CPSID if // Disable IRQ and FIQ interrupts
+#else
+ CPSID i // Disable IRQ interrupts
+#endif
+
+ /* Setup the current thread pointer. */
+
+ LDR r1, =_tx_thread_current_ptr // Pickup address of current thread
+ STR r0, [r1] // Setup current thread pointer
+
+ /* Increment the run count for this thread. */
+
+ LDR r2, [r0, #4] // Pickup run counter
+ LDR r3, [r0, #24] // Pickup time-slice for this thread
+ ADD r2, r2, #1 // Increment thread run-counter
+ STR r2, [r0, #4] // Store the new run counter
+
+ /* Setup time-slice, if present. */
+
+ LDR r2, =_tx_timer_time_slice // Pickup address of time-slice
+ // variable
+ LDR sp, [r0, #8] // Switch stack pointers
+ STR r3, [r2] // Setup time-slice
+
+#if (defined(TX_ENABLE_EXECUTION_CHANGE_NOTIFY) || defined(TX_EXECUTION_PROFILE_ENABLE))
+
+ /* Call the thread entry function to indicate the thread is executing. */
+
+ MOV r5, r0 // Save r0
+ BL _tx_execution_thread_enter // Call the thread execution enter function
+ MOV r0, r5 // Restore r0
+#endif
+
+ /* Determine if an interrupt frame or a synchronous task suspension frame
+ is present. */
+
+ LDMIA sp!, {r4, r5} // Pickup the stack type and saved CPSR
+ CMP r4, #0 // Check for synchronous context switch
+ BEQ _tx_solicited_return
+ MSR SPSR_cxsf, r5 // Setup SPSR for return
+#ifdef TX_ENABLE_VFP_SUPPORT
+ LDR r1, [r0, #144] // Pickup the VFP enabled flag
+ CMP r1, #0 // Is the VFP enabled?
+ BEQ _tx_skip_interrupt_vfp_restore // No, skip VFP interrupt restore
+ VLDMIA sp!, {D0-D15} // Recover D0-D15
+ VLDMIA sp!, {D16-D31} // Recover D16-D31
+ LDR r4, [sp], #4 // Pickup FPSCR
+ VMSR FPSCR, r4 // Restore FPSCR
+_tx_skip_interrupt_vfp_restore:
+#endif
+ LDMIA sp!, {r0-r12, lr, pc}^ // Return to point of thread interrupt
+
+_tx_solicited_return:
+
+#ifdef TX_ENABLE_VFP_SUPPORT
+ LDR r1, [r0, #144] // Pickup the VFP enabled flag
+ CMP r1, #0 // Is the VFP enabled?
+ BEQ _tx_skip_solicited_vfp_restore // No, skip VFP solicited restore
+ VLDMIA sp!, {D8-D15} // Recover D8-D15
+ VLDMIA sp!, {D16-D31} // Recover D16-D31
+ LDR r4, [sp], #4 // Pickup FPSCR
+ VMSR FPSCR, r4 // Restore FPSCR
+_tx_skip_solicited_vfp_restore:
+#endif
+ MSR CPSR_cxsf, r5 // Recover CPSR
+ LDMIA sp!, {r4-r11, lr} // Return to thread synchronously
+#ifdef __THUMB_INTERWORK
+ BX lr // Return to caller
+#else
+ MOV pc, lr // Return to caller
+#endif
+
+#ifdef TX_ENABLE_VFP_SUPPORT
+
+ .global tx_thread_vfp_enable
+ .type tx_thread_vfp_enable,function
+tx_thread_vfp_enable:
+ MRS r2, CPSR // Pickup the CPSR
+#ifdef TX_ENABLE_FIQ_SUPPORT
+ CPSID if // Enable IRQ and FIQ interrupts
+#else
+ CPSID i // Enable IRQ interrupts
+#endif
+ LDR r0, =_tx_thread_current_ptr // Build current thread pointer address
+ LDR r1, [r0] // Pickup current thread pointer
+ CMP r1, #0 // Check for NULL thread pointer
+ BEQ __tx_no_thread_to_enable // If NULL, skip VFP enable
+ MOV r0, #1 // Build enable value
+ STR r0, [r1, #144] // Set the VFP enable flag (tx_thread_vfp_enable field in TX_THREAD)
+__tx_no_thread_to_enable:
+ MSR CPSR_cxsf, r2 // Recover CPSR
+ BX LR // Return to caller
+
+ .global tx_thread_vfp_disable
+ .type tx_thread_vfp_disable,function
+tx_thread_vfp_disable:
+ MRS r2, CPSR // Pickup the CPSR
+#ifdef TX_ENABLE_FIQ_SUPPORT
+ CPSID if // Enable IRQ and FIQ interrupts
+#else
+ CPSID i // Enable IRQ interrupts
+#endif
+ LDR r0, =_tx_thread_current_ptr // Build current thread pointer address
+ LDR r1, [r0] // Pickup current thread pointer
+ CMP r1, #0 // Check for NULL thread pointer
+ BEQ __tx_no_thread_to_disable // If NULL, skip VFP disable
+ MOV r0, #0 // Build disable value
+ STR r0, [r1, #144] // Clear the VFP enable flag (tx_thread_vfp_enable field in TX_THREAD)
+__tx_no_thread_to_disable:
+ MSR CPSR_cxsf, r2 // Recover CPSR
+ BX LR // Return to caller
+
+#endif
diff --git a/ports/cortex_a17/gnu/src/tx_thread_stack_build.S b/ports/cortex_a17/gnu/src/tx_thread_stack_build.S
new file mode 100644
index 00000000..f413e673
--- /dev/null
+++ b/ports/cortex_a17/gnu/src/tx_thread_stack_build.S
@@ -0,0 +1,164 @@
+/**************************************************************************/
+/* */
+/* Copyright (c) Microsoft Corporation. All rights reserved. */
+/* */
+/* This software is licensed under the Microsoft Software License */
+/* Terms for Microsoft Azure RTOS. Full text of the license can be */
+/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
+/* and in the root directory of this software. */
+/* */
+/**************************************************************************/
+
+
+/**************************************************************************/
+/**************************************************************************/
+/** */
+/** ThreadX Component */
+/** */
+/** Thread */
+/** */
+/**************************************************************************/
+/**************************************************************************/
+ .arm
+
+SVC_MODE = 0x13 // SVC mode
+#ifdef TX_ENABLE_FIQ_SUPPORT
+CPSR_MASK = 0xDF // Mask initial CPSR, IRQ & FIQ interrupts enabled
+#else
+CPSR_MASK = 0x9F // Mask initial CPSR, IRQ interrupts enabled
+#endif
+
+
+/* Define the 16-bit Thumb mode veneer for _tx_thread_stack_build for
+ applications calling this function from to 16-bit Thumb mode. */
+
+ .text
+ .align 2
+ .thumb
+ .global $_tx_thread_stack_build
+ .type $_tx_thread_stack_build,function
+$_tx_thread_stack_build:
+ BX pc // Switch to 32-bit mode
+ NOP //
+ .arm
+ STMFD sp!, {lr} // Save return address
+ BL _tx_thread_stack_build // Call _tx_thread_stack_build function
+ LDMFD sp!, {lr} // Recover saved return address
+ BX lr // Return to 16-bit caller
+
+
+ .text
+ .align 2
+/**************************************************************************/
+/* */
+/* FUNCTION RELEASE */
+/* */
+/* _tx_thread_stack_build ARMv7-A */
+/* 6.1.11 */
+/* AUTHOR */
+/* */
+/* William E. Lamie, Microsoft Corporation */
+/* */
+/* DESCRIPTION */
+/* */
+/* This function builds a stack frame on the supplied thread's stack. */
+/* The stack frame results in a fake interrupt return to the supplied */
+/* function pointer. */
+/* */
+/* INPUT */
+/* */
+/* thread_ptr Pointer to thread control blk */
+/* function_ptr Pointer to return function */
+/* */
+/* OUTPUT */
+/* */
+/* None */
+/* */
+/* CALLS */
+/* */
+/* None */
+/* */
+/* CALLED BY */
+/* */
+/* _tx_thread_create Create thread service */
+/* */
+/* RELEASE HISTORY */
+/* */
+/* DATE NAME DESCRIPTION */
+/* */
+/* 09-30-2020 William E. Lamie Initial Version 6.1 */
+/* 04-25-2022 Zhen Kong Updated comments, */
+/* resulting in version 6.1.11 */
+/* */
+/**************************************************************************/
+ .global _tx_thread_stack_build
+ .type _tx_thread_stack_build,function
+_tx_thread_stack_build:
+
+
+ /* Build a fake interrupt frame. The form of the fake interrupt stack
+ on the ARMv7-A should look like the following after it is built:
+
+ Stack Top: 1 Interrupt stack frame type
+ CPSR Initial value for CPSR
+ a1 (r0) Initial value for a1
+ a2 (r1) Initial value for a2
+ a3 (r2) Initial value for a3
+ a4 (r3) Initial value for a4
+ v1 (r4) Initial value for v1
+ v2 (r5) Initial value for v2
+ v3 (r6) Initial value for v3
+ v4 (r7) Initial value for v4
+ v5 (r8) Initial value for v5
+ sb (r9) Initial value for sb
+ sl (r10) Initial value for sl
+ fp (r11) Initial value for fp
+ ip (r12) Initial value for ip
+ lr (r14) Initial value for lr
+ pc (r15) Initial value for
+ 0 For stack backtracing
+
+ Stack Bottom: (higher memory address) */
+
+ LDR r2, [r0, #16] // Pickup end of stack area
+ BIC r2, r2, #7 // Ensure 8-byte alignment
+ SUB r2, r2, #76 // Allocate space for the stack frame
+
+ /* Actually build the stack frame. */
+
+ MOV r3, #1 // Build interrupt stack type
+ STR r3, [r2, #0] // Store stack type
+ MOV r3, #0 // Build initial register value
+ STR r3, [r2, #8] // Store initial r0
+ STR r3, [r2, #12] // Store initial r1
+ STR r3, [r2, #16] // Store initial r2
+ STR r3, [r2, #20] // Store initial r3
+ STR r3, [r2, #24] // Store initial r4
+ STR r3, [r2, #28] // Store initial r5
+ STR r3, [r2, #32] // Store initial r6
+ STR r3, [r2, #36] // Store initial r7
+ STR r3, [r2, #40] // Store initial r8
+ STR r3, [r2, #44] // Store initial r9
+ LDR r3, [r0, #12] // Pickup stack starting address
+ STR r3, [r2, #48] // Store initial r10 (sl)
+ LDR r3,=_tx_thread_schedule // Pickup address of _tx_thread_schedule for GDB backtrace
+ STR r3, [r2, #60] // Store initial r14 (lr)
+ MOV r3, #0 // Build initial register value
+ STR r3, [r2, #52] // Store initial r11
+ STR r3, [r2, #56] // Store initial r12
+ STR r1, [r2, #64] // Store initial pc
+ STR r3, [r2, #68] // 0 for back-trace
+ MRS r1, CPSR // Pickup CPSR
+ BIC r1, r1, #CPSR_MASK // Mask mode bits of CPSR
+ ORR r3, r1, #SVC_MODE // Build CPSR, SVC mode, interrupts enabled
+ STR r3, [r2, #4] // Store initial CPSR
+
+ /* Setup stack pointer. */
+
+ STR r2, [r0, #8] // Save stack pointer in thread's
+ // control block
+#ifdef __THUMB_INTERWORK
+ BX lr // Return to caller
+#else
+ MOV pc, lr // Return to caller
+#endif
diff --git a/ports/cortex_a17/gnu/src/tx_thread_system_return.S b/ports/cortex_a17/gnu/src/tx_thread_system_return.S
new file mode 100644
index 00000000..cb7d62ce
--- /dev/null
+++ b/ports/cortex_a17/gnu/src/tx_thread_system_return.S
@@ -0,0 +1,162 @@
+/**************************************************************************/
+/* */
+/* Copyright (c) Microsoft Corporation. All rights reserved. */
+/* */
+/* This software is licensed under the Microsoft Software License */
+/* Terms for Microsoft Azure RTOS. Full text of the license can be */
+/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
+/* and in the root directory of this software. */
+/* */
+/**************************************************************************/
+
+
+/**************************************************************************/
+/**************************************************************************/
+/** */
+/** ThreadX Component */
+/** */
+/** Thread */
+/** */
+/**************************************************************************/
+/**************************************************************************/
+
+ .arm
+
+
+ .global _tx_thread_current_ptr
+ .global _tx_timer_time_slice
+ .global _tx_thread_schedule
+
+
+
+/* Define the 16-bit Thumb mode veneer for _tx_thread_system_return for
+ applications calling this function from to 16-bit Thumb mode. */
+
+ .text
+ .align 2
+ .global $_tx_thread_system_return
+ .type $_tx_thread_system_return,function
+$_tx_thread_system_return:
+ .thumb
+ BX pc // Switch to 32-bit mode
+ NOP //
+ .arm
+ STMFD sp!, {lr} // Save return address
+ BL _tx_thread_system_return // Call _tx_thread_system_return function
+ LDMFD sp!, {lr} // Recover saved return address
+ BX lr // Return to 16-bit caller
+
+
+ .text
+ .align 2
+/**************************************************************************/
+/* */
+/* FUNCTION RELEASE */
+/* */
+/* _tx_thread_system_return ARMv7-A */
+/* 6.1.11 */
+/* AUTHOR */
+/* */
+/* William E. Lamie, Microsoft Corporation */
+/* */
+/* DESCRIPTION */
+/* */
+/* This function is target processor specific. It is used to transfer */
+/* control from a thread back to the ThreadX system. Only a */
+/* minimal context is saved since the compiler assumes temp registers */
+/* are going to get slicked by a function call anyway. */
+/* */
+/* INPUT */
+/* */
+/* None */
+/* */
+/* OUTPUT */
+/* */
+/* None */
+/* */
+/* CALLS */
+/* */
+/* _tx_thread_schedule Thread scheduling loop */
+/* */
+/* CALLED BY */
+/* */
+/* ThreadX components */
+/* */
+/* RELEASE HISTORY */
+/* */
+/* DATE NAME DESCRIPTION */
+/* */
+/* 09-30-2020 William E. Lamie Initial Version 6.1 */
+/* 10-15-2021 William E. Lamie Modified comment(s), added */
+/* execution profile support, */
+/* resulting in version 6.1.9 */
+/* 04-25-2022 Zhen Kong Updated comments, */
+/* resulting in version 6.1.11 */
+/* */
+/**************************************************************************/
+ .global _tx_thread_system_return
+ .type _tx_thread_system_return,function
+_tx_thread_system_return:
+
+ /* Save minimal context on the stack. */
+
+ STMDB sp!, {r4-r11, lr} // Save minimal context
+
+ LDR r4, =_tx_thread_current_ptr // Pickup address of current ptr
+ LDR r5, [r4] // Pickup current thread pointer
+
+#ifdef TX_ENABLE_VFP_SUPPORT
+ LDR r1, [r5, #144] // Pickup the VFP enabled flag
+ CMP r1, #0 // Is the VFP enabled?
+ BEQ _tx_skip_solicited_vfp_save // No, skip VFP solicited save
+ VMRS r1, FPSCR // Pickup the FPSCR
+ STR r1, [sp, #-4]! // Save FPSCR
+ VSTMDB sp!, {D16-D31} // Save D16-D31
+ VSTMDB sp!, {D8-D15} // Save D8-D15
+_tx_skip_solicited_vfp_save:
+#endif
+
+ MOV r0, #0 // Build a solicited stack type
+ MRS r1, CPSR // Pickup the CPSR
+ STMDB sp!, {r0-r1} // Save type and CPSR
+
+ /* Lockout interrupts. */
+
+#ifdef TX_ENABLE_FIQ_SUPPORT
+ CPSID if // Disable IRQ and FIQ interrupts
+#else
+ CPSID i // Disable IRQ interrupts
+#endif
+
+#if (defined(TX_ENABLE_EXECUTION_CHANGE_NOTIFY) || defined(TX_EXECUTION_PROFILE_ENABLE))
+
+ /* Call the thread exit function to indicate the thread is no longer executing. */
+
+ BL _tx_execution_thread_exit // Call the thread exit function
+#endif
+ MOV r3, r4 // Pickup address of current ptr
+ MOV r0, r5 // Pickup current thread pointer
+ LDR r2, =_tx_timer_time_slice // Pickup address of time slice
+ LDR r1, [r2] // Pickup current time slice
+
+ /* Save current stack and switch to system stack. */
+
+ STR sp, [r0, #8] // Save thread stack pointer
+
+ /* Determine if the time-slice is active. */
+
+ MOV r4, #0 // Build clear value
+ CMP r1, #0 // Is a time-slice active?
+ BEQ __tx_thread_dont_save_ts // No, don't save the time-slice
+
+ /* Save time-slice for the thread and clear the current time-slice. */
+
+ STR r4, [r2] // Clear time-slice
+ STR r1, [r0, #24] // Save current time-slice
+
+__tx_thread_dont_save_ts:
+
+ /* Clear the current thread pointer. */
+
+ STR r4, [r3] // Clear current thread pointer
+ B _tx_thread_schedule // Jump to scheduler!
diff --git a/ports/cortex_a17/gnu/src/tx_thread_vectored_context_save.S b/ports/cortex_a17/gnu/src/tx_thread_vectored_context_save.S
new file mode 100644
index 00000000..d846223f
--- /dev/null
+++ b/ports/cortex_a17/gnu/src/tx_thread_vectored_context_save.S
@@ -0,0 +1,165 @@
+/**************************************************************************/
+/* */
+/* Copyright (c) Microsoft Corporation. All rights reserved. */
+/* */
+/* This software is licensed under the Microsoft Software License */
+/* Terms for Microsoft Azure RTOS. Full text of the license can be */
+/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
+/* and in the root directory of this software. */
+/* */
+/**************************************************************************/
+
+
+/**************************************************************************/
+/**************************************************************************/
+/** */
+/** ThreadX Component */
+/** */
+/** Thread */
+/** */
+/**************************************************************************/
+/**************************************************************************/
+
+ .global _tx_thread_system_state
+ .global _tx_thread_current_ptr
+ .global _tx_execution_isr_enter
+
+
+
+/* No 16-bit Thumb mode veneer code is needed for _tx_thread_vectored_context_save
+ since it will never be called 16-bit mode. */
+
+ .arm
+ .text
+ .align 2
+/**************************************************************************/
+/* */
+/* FUNCTION RELEASE */
+/* */
+/* _tx_thread_vectored_context_save ARMv7-A */
+/* 6.1.11 */
+/* AUTHOR */
+/* */
+/* William E. Lamie, Microsoft Corporation */
+/* */
+/* DESCRIPTION */
+/* */
+/* This function saves the context of an executing thread in the */
+/* beginning of interrupt processing. The function also ensures that */
+/* the system stack is used upon return to the calling ISR. */
+/* */
+/* INPUT */
+/* */
+/* None */
+/* */
+/* OUTPUT */
+/* */
+/* None */
+/* */
+/* CALLS */
+/* */
+/* None */
+/* */
+/* CALLED BY */
+/* */
+/* ISRs */
+/* */
+/* RELEASE HISTORY */
+/* */
+/* DATE NAME DESCRIPTION */
+/* */
+/* 09-30-2020 William E. Lamie Initial Version 6.1 */
+/* 10-15-2021 William E. Lamie Modified comment(s), added */
+/* execution profile support, */
+/* resulting in version 6.1.9 */
+/* 04-25-2022 Zhen Kong Updated comments, */
+/* resulting in version 6.1.11 */
+/* */
+/**************************************************************************/
+ .global _tx_thread_vectored_context_save
+ .type _tx_thread_vectored_context_save,function
+_tx_thread_vectored_context_save:
+
+ /* Upon entry to this routine, it is assumed that IRQ interrupts are locked
+ out, we are in IRQ mode, and all registers are intact. */
+
+ /* Check for a nested interrupt condition. */
+
+#ifdef TX_ENABLE_FIQ_SUPPORT
+ CPSID if // Disable IRQ and FIQ interrupts
+#endif
+ LDR r3, =_tx_thread_system_state // Pickup address of system state variable
+ LDR r2, [r3, #0] // Pickup system state
+ CMP r2, #0 // Is this the first interrupt?
+ BEQ __tx_thread_not_nested_save // Yes, not a nested context save
+
+ /* Nested interrupt condition. */
+
+ ADD r2, r2, #1 // Increment the interrupt counter
+ STR r2, [r3, #0] // Store it back in the variable
+
+ /* Note: Minimal context of interrupted thread is already saved. */
+
+ /* Return to the ISR. */
+
+ MOV r10, #0 // Clear stack limit
+
+#if (defined(TX_ENABLE_EXECUTION_CHANGE_NOTIFY) || defined(TX_EXECUTION_PROFILE_ENABLE))
+
+ /* Call the ISR enter function to indicate an ISR is executing. */
+
+ PUSH {lr} // Save ISR lr
+ BL _tx_execution_isr_enter // Call the ISR enter function
+ POP {lr} // Recover ISR lr
+#endif
+
+ MOV pc, lr // Return to caller
+
+__tx_thread_not_nested_save:
+
+ /* Otherwise, not nested, check to see if a thread was running. */
+
+ ADD r2, r2, #1 // Increment the interrupt counter
+ STR r2, [r3, #0] // Store it back in the variable
+ LDR r1, =_tx_thread_current_ptr // Pickup address of current thread ptr
+ LDR r0, [r1, #0] // Pickup current thread pointer
+ CMP r0, #0 // Is it NULL?
+ BEQ __tx_thread_idle_system_save // If so, interrupt occurred in
+ // scheduling loop - nothing needs saving!
+ /* Note: Minimal context of interrupted thread is already saved. */
+
+ /* Save the current stack pointer in the thread's control block. */
+
+ MOV r10, #0 // Clear stack limit
+
+#if (defined(TX_ENABLE_EXECUTION_CHANGE_NOTIFY) || defined(TX_EXECUTION_PROFILE_ENABLE))
+
+ /* Call the ISR enter function to indicate an ISR is executing. */
+
+ PUSH {lr} // Save ISR lr
+ BL _tx_execution_isr_enter // Call the ISR enter function
+ POP {lr} // Recover ISR lr
+#endif
+
+ MOV pc, lr // Return to caller
+
+__tx_thread_idle_system_save:
+
+ /* Interrupt occurred in the scheduling loop. */
+
+ /* Not much to do here, just adjust the stack pointer, and return to IRQ
+ processing. */
+
+ MOV r10, #0 // Clear stack limit
+
+#if (defined(TX_ENABLE_EXECUTION_CHANGE_NOTIFY) || defined(TX_EXECUTION_PROFILE_ENABLE))
+
+ /* Call the ISR enter function to indicate an ISR is executing. */
+
+ PUSH {lr} // Save ISR lr
+ BL _tx_execution_isr_enter // Call the ISR enter function
+ POP {lr} // Recover ISR lr
+#endif
+
+ ADD sp, sp, #32 // Recover saved registers
+ MOV pc, lr // Return to caller
diff --git a/ports/cortex_a17/gnu/src/tx_timer_interrupt.S b/ports/cortex_a17/gnu/src/tx_timer_interrupt.S
new file mode 100644
index 00000000..7337ed0c
--- /dev/null
+++ b/ports/cortex_a17/gnu/src/tx_timer_interrupt.S
@@ -0,0 +1,231 @@
+/**************************************************************************/
+/* */
+/* Copyright (c) Microsoft Corporation. All rights reserved. */
+/* */
+/* This software is licensed under the Microsoft Software License */
+/* Terms for Microsoft Azure RTOS. Full text of the license can be */
+/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
+/* and in the root directory of this software. */
+/* */
+/**************************************************************************/
+
+
+/**************************************************************************/
+/**************************************************************************/
+/** */
+/** ThreadX Component */
+/** */
+/** Timer */
+/** */
+/**************************************************************************/
+/**************************************************************************/
+
+ .arm
+
+
+/* Define Assembly language external references... */
+
+ .global _tx_timer_time_slice
+ .global _tx_timer_system_clock
+ .global _tx_timer_current_ptr
+ .global _tx_timer_list_start
+ .global _tx_timer_list_end
+ .global _tx_timer_expired_time_slice
+ .global _tx_timer_expired
+ .global _tx_thread_time_slice
+
+
+
+/* Define the 16-bit Thumb mode veneer for _tx_timer_interrupt for
+ applications calling this function from to 16-bit Thumb mode. */
+
+ .text
+ .align 2
+ .thumb
+ .global $_tx_timer_interrupt
+ .type $_tx_timer_interrupt,function
+$_tx_timer_interrupt:
+ BX pc // Switch to 32-bit mode
+ NOP //
+ .arm
+ STMFD sp!, {lr} // Save return address
+ BL _tx_timer_interrupt // Call _tx_timer_interrupt function
+ LDMFD sp!, {lr} // Recover saved return address
+ BX lr // Return to 16-bit caller
+
+
+ .text
+ .align 2
+/**************************************************************************/
+/* */
+/* FUNCTION RELEASE */
+/* */
+/* _tx_timer_interrupt ARMv7-A */
+/* 6.1.11 */
+/* AUTHOR */
+/* */
+/* William E. Lamie, Microsoft Corporation */
+/* */
+/* DESCRIPTION */
+/* */
+/* This function processes the hardware timer interrupt. This */
+/* processing includes incrementing the system clock and checking for */
+/* time slice and/or timer expiration. If either is found, the */
+/* interrupt context save/restore functions are called along with the */
+/* expiration functions. */
+/* */
+/* INPUT */
+/* */
+/* None */
+/* */
+/* OUTPUT */
+/* */
+/* None */
+/* */
+/* CALLS */
+/* */
+/* _tx_thread_time_slice Time slice interrupted thread */
+/* _tx_timer_expiration_process Timer expiration processing */
+/* */
+/* CALLED BY */
+/* */
+/* interrupt vector */
+/* */
+/* RELEASE HISTORY */
+/* */
+/* DATE NAME DESCRIPTION */
+/* */
+/* 09-30-2020 William E. Lamie Initial Version 6.1 */
+/* 04-25-2022 Zhen Kong Updated comments, */
+/* resulting in version 6.1.11 */
+/* */
+/**************************************************************************/
+ .global _tx_timer_interrupt
+ .type _tx_timer_interrupt,function
+_tx_timer_interrupt:
+
+ /* Upon entry to this routine, it is assumed that context save has already
+ been called, and therefore the compiler scratch registers are available
+ for use. */
+
+ /* Increment the system clock. */
+
+ LDR r1, =_tx_timer_system_clock // Pickup address of system clock
+ LDR r0, [r1] // Pickup system clock
+ ADD r0, r0, #1 // Increment system clock
+ STR r0, [r1] // Store new system clock
+
+ /* Test for time-slice expiration. */
+
+ LDR r3, =_tx_timer_time_slice // Pickup address of time-slice
+ LDR r2, [r3] // Pickup time-slice
+ CMP r2, #0 // Is it non-active?
+ BEQ __tx_timer_no_time_slice // Yes, skip time-slice processing
+
+ /* Decrement the time_slice. */
+
+ SUB r2, r2, #1 // Decrement the time-slice
+ STR r2, [r3] // Store new time-slice value
+
+ /* Check for expiration. */
+
+ CMP r2, #0 // Has it expired?
+ BNE __tx_timer_no_time_slice // No, skip expiration processing
+
+ /* Set the time-slice expired flag. */
+
+ LDR r3, =_tx_timer_expired_time_slice // Pickup address of expired flag
+ MOV r0, #1 // Build expired value
+ STR r0, [r3] // Set time-slice expiration flag
+
+__tx_timer_no_time_slice:
+
+ /* Test for timer expiration. */
+
+ LDR r1, =_tx_timer_current_ptr // Pickup current timer pointer address
+ LDR r0, [r1] // Pickup current timer
+ LDR r2, [r0] // Pickup timer list entry
+ CMP r2, #0 // Is there anything in the list?
+ BEQ __tx_timer_no_timer // No, just increment the timer
+
+ /* Set expiration flag. */
+
+ LDR r3, =_tx_timer_expired // Pickup expiration flag address
+ MOV r2, #1 // Build expired value
+ STR r2, [r3] // Set expired flag
+ B __tx_timer_done // Finished timer processing
+
+__tx_timer_no_timer:
+
+ /* No timer expired, increment the timer pointer. */
+ ADD r0, r0, #4 // Move to next timer
+
+ /* Check for wraparound. */
+
+ LDR r3, =_tx_timer_list_end // Pickup address of timer list end
+ LDR r2, [r3] // Pickup list end
+ CMP r0, r2 // Are we at list end?
+ BNE __tx_timer_skip_wrap // No, skip wraparound logic
+
+ /* Wrap to beginning of list. */
+
+ LDR r3, =_tx_timer_list_start // Pickup address of timer list start
+ LDR r0, [r3] // Set current pointer to list start
+
+__tx_timer_skip_wrap:
+
+ STR r0, [r1] // Store new current timer pointer
+
+__tx_timer_done:
+
+ /* See if anything has expired. */
+
+ LDR r3, =_tx_timer_expired_time_slice // Pickup address of expired flag
+ LDR r2, [r3] // Pickup time-slice expired flag
+ CMP r2, #0 // Did a time-slice expire?
+ BNE __tx_something_expired // If non-zero, time-slice expired
+ LDR r1, =_tx_timer_expired // Pickup address of other expired flag
+ LDR r0, [r1] // Pickup timer expired flag
+ CMP r0, #0 // Did a timer expire?
+ BEQ __tx_timer_nothing_expired // No, nothing expired
+
+__tx_something_expired:
+
+ STMDB sp!, {r0, lr} // Save the lr register on the stack
+ // and save r0 just to keep 8-byte alignment
+
+ /* Did a timer expire? */
+
+ LDR r1, =_tx_timer_expired // Pickup address of expired flag
+ LDR r0, [r1] // Pickup timer expired flag
+ CMP r0, #0 // Check for timer expiration
+ BEQ __tx_timer_dont_activate // If not set, skip timer activation
+
+ /* Process timer expiration. */
+ BL _tx_timer_expiration_process // Call the timer expiration handling routine
+
+__tx_timer_dont_activate:
+
+ /* Did time slice expire? */
+
+ LDR r3, =_tx_timer_expired_time_slice // Pickup address of time-slice expired
+ LDR r2, [r3] // Pickup the actual flag
+ CMP r2, #0 // See if the flag is set
+ BEQ __tx_timer_not_ts_expiration // No, skip time-slice processing
+
+ /* Time slice interrupted thread. */
+
+ BL _tx_thread_time_slice // Call time-slice processing
+
+__tx_timer_not_ts_expiration:
+
+ LDMIA sp!, {r0, lr} // Recover lr register (r0 is just there for
+ // the 8-byte stack alignment
+
+__tx_timer_nothing_expired:
+
+#ifdef __THUMB_INTERWORK
+ BX lr // Return to caller
+#else
+ MOV pc, lr // Return to caller
+#endif
diff --git a/ports/cortex_a5/ac6/example_build/sample_threadx.c b/ports/cortex_a5/ac6/example_build/sample_threadx.c
new file mode 100644
index 00000000..8c61de06
--- /dev/null
+++ b/ports/cortex_a5/ac6/example_build/sample_threadx.c
@@ -0,0 +1,369 @@
+/* This is a small demo of the high-performance ThreadX kernel. It includes examples of eight
+ threads of different priorities, using a message queue, semaphore, mutex, event flags group,
+ byte pool, and block pool. */
+
+#include "tx_api.h"
+
+#define DEMO_STACK_SIZE 1024
+#define DEMO_BYTE_POOL_SIZE 9120
+#define DEMO_BLOCK_POOL_SIZE 100
+#define DEMO_QUEUE_SIZE 100
+
+
+/* Define the ThreadX object control blocks... */
+
+TX_THREAD thread_0;
+TX_THREAD thread_1;
+TX_THREAD thread_2;
+TX_THREAD thread_3;
+TX_THREAD thread_4;
+TX_THREAD thread_5;
+TX_THREAD thread_6;
+TX_THREAD thread_7;
+TX_QUEUE queue_0;
+TX_SEMAPHORE semaphore_0;
+TX_MUTEX mutex_0;
+TX_EVENT_FLAGS_GROUP event_flags_0;
+TX_BYTE_POOL byte_pool_0;
+TX_BLOCK_POOL block_pool_0;
+
+
+/* Define the counters used in the demo application... */
+
+ULONG thread_0_counter;
+ULONG thread_1_counter;
+ULONG thread_1_messages_sent;
+ULONG thread_2_counter;
+ULONG thread_2_messages_received;
+ULONG thread_3_counter;
+ULONG thread_4_counter;
+ULONG thread_5_counter;
+ULONG thread_6_counter;
+ULONG thread_7_counter;
+
+
+/* Define thread prototypes. */
+
+void thread_0_entry(ULONG thread_input);
+void thread_1_entry(ULONG thread_input);
+void thread_2_entry(ULONG thread_input);
+void thread_3_and_4_entry(ULONG thread_input);
+void thread_5_entry(ULONG thread_input);
+void thread_6_and_7_entry(ULONG thread_input);
+
+
+/* Define main entry point. */
+
+int main()
+{
+
+ /* Enter the ThreadX kernel. */
+ tx_kernel_enter();
+}
+
+
+/* Define what the initial system looks like. */
+
+void tx_application_define(void *first_unused_memory)
+{
+
+CHAR *pointer = TX_NULL;
+
+
+ /* Create a byte memory pool from which to allocate the thread stacks. */
+ tx_byte_pool_create(&byte_pool_0, "byte pool 0", first_unused_memory, DEMO_BYTE_POOL_SIZE);
+
+ /* Put system definition stuff in here, e.g. thread creates and other assorted
+ create information. */
+
+ /* Allocate the stack for thread 0. */
+ tx_byte_allocate(&byte_pool_0, (VOID **) &pointer, DEMO_STACK_SIZE, TX_NO_WAIT);
+
+ /* Create the main thread. */
+ tx_thread_create(&thread_0, "thread 0", thread_0_entry, 0,
+ pointer, DEMO_STACK_SIZE,
+ 1, 1, TX_NO_TIME_SLICE, TX_AUTO_START);
+
+
+ /* Allocate the stack for thread 1. */
+ tx_byte_allocate(&byte_pool_0, (VOID **) &pointer, DEMO_STACK_SIZE, TX_NO_WAIT);
+
+ /* Create threads 1 and 2. These threads pass information through a ThreadX
+ message queue. It is also interesting to note that these threads have a time
+ slice. */
+ tx_thread_create(&thread_1, "thread 1", thread_1_entry, 1,
+ pointer, DEMO_STACK_SIZE,
+ 16, 16, 4, TX_AUTO_START);
+
+ /* Allocate the stack for thread 2. */
+ tx_byte_allocate(&byte_pool_0, (VOID **) &pointer, DEMO_STACK_SIZE, TX_NO_WAIT);
+
+ tx_thread_create(&thread_2, "thread 2", thread_2_entry, 2,
+ pointer, DEMO_STACK_SIZE,
+ 16, 16, 4, TX_AUTO_START);
+
+ /* Allocate the stack for thread 3. */
+ tx_byte_allocate(&byte_pool_0, (VOID **) &pointer, DEMO_STACK_SIZE, TX_NO_WAIT);
+
+ /* Create threads 3 and 4. These threads compete for a ThreadX counting semaphore.
+ An interesting thing here is that both threads share the same instruction area. */
+ tx_thread_create(&thread_3, "thread 3", thread_3_and_4_entry, 3,
+ pointer, DEMO_STACK_SIZE,
+ 8, 8, TX_NO_TIME_SLICE, TX_AUTO_START);
+
+ /* Allocate the stack for thread 4. */
+ tx_byte_allocate(&byte_pool_0, (VOID **) &pointer, DEMO_STACK_SIZE, TX_NO_WAIT);
+
+ tx_thread_create(&thread_4, "thread 4", thread_3_and_4_entry, 4,
+ pointer, DEMO_STACK_SIZE,
+ 8, 8, TX_NO_TIME_SLICE, TX_AUTO_START);
+
+ /* Allocate the stack for thread 5. */
+ tx_byte_allocate(&byte_pool_0, (VOID **) &pointer, DEMO_STACK_SIZE, TX_NO_WAIT);
+
+ /* Create thread 5. This thread simply pends on an event flag which will be set
+ by thread_0. */
+ tx_thread_create(&thread_5, "thread 5", thread_5_entry, 5,
+ pointer, DEMO_STACK_SIZE,
+ 4, 4, TX_NO_TIME_SLICE, TX_AUTO_START);
+
+ /* Allocate the stack for thread 6. */
+ tx_byte_allocate(&byte_pool_0, (VOID **) &pointer, DEMO_STACK_SIZE, TX_NO_WAIT);
+
+ /* Create threads 6 and 7. These threads compete for a ThreadX mutex. */
+ tx_thread_create(&thread_6, "thread 6", thread_6_and_7_entry, 6,
+ pointer, DEMO_STACK_SIZE,
+ 8, 8, TX_NO_TIME_SLICE, TX_AUTO_START);
+
+ /* Allocate the stack for thread 7. */
+ tx_byte_allocate(&byte_pool_0, (VOID **) &pointer, DEMO_STACK_SIZE, TX_NO_WAIT);
+
+ tx_thread_create(&thread_7, "thread 7", thread_6_and_7_entry, 7,
+ pointer, DEMO_STACK_SIZE,
+ 8, 8, TX_NO_TIME_SLICE, TX_AUTO_START);
+
+ /* Allocate the message queue. */
+ tx_byte_allocate(&byte_pool_0, (VOID **) &pointer, DEMO_QUEUE_SIZE*sizeof(ULONG), TX_NO_WAIT);
+
+ /* Create the message queue shared by threads 1 and 2. */
+ tx_queue_create(&queue_0, "queue 0", TX_1_ULONG, pointer, DEMO_QUEUE_SIZE*sizeof(ULONG));
+
+ /* Create the semaphore used by threads 3 and 4. */
+ tx_semaphore_create(&semaphore_0, "semaphore 0", 1);
+
+ /* Create the event flags group used by threads 1 and 5. */
+ tx_event_flags_create(&event_flags_0, "event flags 0");
+
+ /* Create the mutex used by thread 6 and 7 without priority inheritance. */
+ tx_mutex_create(&mutex_0, "mutex 0", TX_NO_INHERIT);
+
+ /* Allocate the memory for a small block pool. */
+ tx_byte_allocate(&byte_pool_0, (VOID **) &pointer, DEMO_BLOCK_POOL_SIZE, TX_NO_WAIT);
+
+ /* Create a block memory pool to allocate a message buffer from. */
+ tx_block_pool_create(&block_pool_0, "block pool 0", sizeof(ULONG), pointer, DEMO_BLOCK_POOL_SIZE);
+
+ /* Allocate a block and release the block memory. */
+ tx_block_allocate(&block_pool_0, (VOID **) &pointer, TX_NO_WAIT);
+
+ /* Release the block back to the pool. */
+ tx_block_release(pointer);
+}
+
+
+
+/* Define the test threads. */
+
+void thread_0_entry(ULONG thread_input)
+{
+
+UINT status;
+
+
+ /* This thread simply sits in while-forever-sleep loop. */
+ while(1)
+ {
+
+ /* Increment the thread counter. */
+ thread_0_counter++;
+
+ /* Sleep for 10 ticks. */
+ tx_thread_sleep(10);
+
+ /* Set event flag 0 to wakeup thread 5. */
+ status = tx_event_flags_set(&event_flags_0, 0x1, TX_OR);
+
+ /* Check status. */
+ if (status != TX_SUCCESS)
+ break;
+ }
+}
+
+
+void thread_1_entry(ULONG thread_input)
+{
+
+UINT status;
+
+
+ /* This thread simply sends messages to a queue shared by thread 2. */
+ while(1)
+ {
+
+ /* Increment the thread counter. */
+ thread_1_counter++;
+
+ /* Send message to queue 0. */
+ status = tx_queue_send(&queue_0, &thread_1_messages_sent, TX_WAIT_FOREVER);
+
+ /* Check completion status. */
+ if (status != TX_SUCCESS)
+ break;
+
+ /* Increment the message sent. */
+ thread_1_messages_sent++;
+ }
+}
+
+
+void thread_2_entry(ULONG thread_input)
+{
+
+ULONG received_message;
+UINT status;
+
+ /* This thread retrieves messages placed on the queue by thread 1. */
+ while(1)
+ {
+
+ /* Increment the thread counter. */
+ thread_2_counter++;
+
+ /* Retrieve a message from the queue. */
+ status = tx_queue_receive(&queue_0, &received_message, TX_WAIT_FOREVER);
+
+ /* Check completion status and make sure the message is what we
+ expected. */
+ if ((status != TX_SUCCESS) || (received_message != thread_2_messages_received))
+ break;
+
+ /* Otherwise, all is okay. Increment the received message count. */
+ thread_2_messages_received++;
+ }
+}
+
+
+void thread_3_and_4_entry(ULONG thread_input)
+{
+
+UINT status;
+
+
+ /* This function is executed from thread 3 and thread 4. As the loop
+ below shows, these function compete for ownership of semaphore_0. */
+ while(1)
+ {
+
+ /* Increment the thread counter. */
+ if (thread_input == 3)
+ thread_3_counter++;
+ else
+ thread_4_counter++;
+
+ /* Get the semaphore with suspension. */
+ status = tx_semaphore_get(&semaphore_0, TX_WAIT_FOREVER);
+
+ /* Check status. */
+ if (status != TX_SUCCESS)
+ break;
+
+ /* Sleep for 2 ticks to hold the semaphore. */
+ tx_thread_sleep(2);
+
+ /* Release the semaphore. */
+ status = tx_semaphore_put(&semaphore_0);
+
+ /* Check status. */
+ if (status != TX_SUCCESS)
+ break;
+ }
+}
+
+
+void thread_5_entry(ULONG thread_input)
+{
+
+UINT status;
+ULONG actual_flags;
+
+
+ /* This thread simply waits for an event in a forever loop. */
+ while(1)
+ {
+
+ /* Increment the thread counter. */
+ thread_5_counter++;
+
+ /* Wait for event flag 0. */
+ status = tx_event_flags_get(&event_flags_0, 0x1, TX_OR_CLEAR,
+ &actual_flags, TX_WAIT_FOREVER);
+
+ /* Check status. */
+ if ((status != TX_SUCCESS) || (actual_flags != 0x1))
+ break;
+ }
+}
+
+
+void thread_6_and_7_entry(ULONG thread_input)
+{
+
+UINT status;
+
+
+ /* This function is executed from thread 6 and thread 7. As the loop
+ below shows, these function compete for ownership of mutex_0. */
+ while(1)
+ {
+
+ /* Increment the thread counter. */
+ if (thread_input == 6)
+ thread_6_counter++;
+ else
+ thread_7_counter++;
+
+ /* Get the mutex with suspension. */
+ status = tx_mutex_get(&mutex_0, TX_WAIT_FOREVER);
+
+ /* Check status. */
+ if (status != TX_SUCCESS)
+ break;
+
+ /* Get the mutex again with suspension. This shows
+ that an owning thread may retrieve the mutex it
+ owns multiple times. */
+ status = tx_mutex_get(&mutex_0, TX_WAIT_FOREVER);
+
+ /* Check status. */
+ if (status != TX_SUCCESS)
+ break;
+
+ /* Sleep for 2 ticks to hold the mutex. */
+ tx_thread_sleep(2);
+
+ /* Release the mutex. */
+ status = tx_mutex_put(&mutex_0);
+
+ /* Check status. */
+ if (status != TX_SUCCESS)
+ break;
+
+ /* Release the mutex again. This will actually
+ release ownership since it was obtained twice. */
+ status = tx_mutex_put(&mutex_0);
+
+ /* Check status. */
+ if (status != TX_SUCCESS)
+ break;
+ }
+}
diff --git a/ports/cortex_a5/ac6/example_build/sample_threadx/.cproject b/ports/cortex_a5/ac6/example_build/sample_threadx/.cproject
new file mode 100644
index 00000000..27463deb
--- /dev/null
+++ b/ports/cortex_a5/ac6/example_build/sample_threadx/.cproject
@@ -0,0 +1,176 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/ports/cortex_a5/ac6/example_build/sample_threadx/.project b/ports/cortex_a5/ac6/example_build/sample_threadx/.project
new file mode 100644
index 00000000..ed4c0885
--- /dev/null
+++ b/ports/cortex_a5/ac6/example_build/sample_threadx/.project
@@ -0,0 +1,27 @@
+
+
+ sample_threadx
+
+
+ tx
+
+
+
+ org.eclipse.cdt.managedbuilder.core.genmakebuilder
+ clean,full,incremental,
+
+
+
+
+ org.eclipse.cdt.managedbuilder.core.ScannerConfigBuilder
+ full,incremental,
+
+
+
+
+
+ org.eclipse.cdt.core.cnature
+ org.eclipse.cdt.managedbuilder.core.managedBuildNature
+ org.eclipse.cdt.managedbuilder.core.ScannerConfigNature
+
+
diff --git a/ports/cortex_a5/ac6/example_build/sample_threadx/sample_threadx.c b/ports/cortex_a5/ac6/example_build/sample_threadx/sample_threadx.c
new file mode 100644
index 00000000..8c61de06
--- /dev/null
+++ b/ports/cortex_a5/ac6/example_build/sample_threadx/sample_threadx.c
@@ -0,0 +1,369 @@
+/* This is a small demo of the high-performance ThreadX kernel. It includes examples of eight
+ threads of different priorities, using a message queue, semaphore, mutex, event flags group,
+ byte pool, and block pool. */
+
+#include "tx_api.h"
+
+#define DEMO_STACK_SIZE 1024
+#define DEMO_BYTE_POOL_SIZE 9120
+#define DEMO_BLOCK_POOL_SIZE 100
+#define DEMO_QUEUE_SIZE 100
+
+
+/* Define the ThreadX object control blocks... */
+
+TX_THREAD thread_0;
+TX_THREAD thread_1;
+TX_THREAD thread_2;
+TX_THREAD thread_3;
+TX_THREAD thread_4;
+TX_THREAD thread_5;
+TX_THREAD thread_6;
+TX_THREAD thread_7;
+TX_QUEUE queue_0;
+TX_SEMAPHORE semaphore_0;
+TX_MUTEX mutex_0;
+TX_EVENT_FLAGS_GROUP event_flags_0;
+TX_BYTE_POOL byte_pool_0;
+TX_BLOCK_POOL block_pool_0;
+
+
+/* Define the counters used in the demo application... */
+
+ULONG thread_0_counter;
+ULONG thread_1_counter;
+ULONG thread_1_messages_sent;
+ULONG thread_2_counter;
+ULONG thread_2_messages_received;
+ULONG thread_3_counter;
+ULONG thread_4_counter;
+ULONG thread_5_counter;
+ULONG thread_6_counter;
+ULONG thread_7_counter;
+
+
+/* Define thread prototypes. */
+
+void thread_0_entry(ULONG thread_input);
+void thread_1_entry(ULONG thread_input);
+void thread_2_entry(ULONG thread_input);
+void thread_3_and_4_entry(ULONG thread_input);
+void thread_5_entry(ULONG thread_input);
+void thread_6_and_7_entry(ULONG thread_input);
+
+
+/* Define main entry point. */
+
+int main()
+{
+
+ /* Enter the ThreadX kernel. */
+ tx_kernel_enter();
+}
+
+
+/* Define what the initial system looks like. */
+
+void tx_application_define(void *first_unused_memory)
+{
+
+CHAR *pointer = TX_NULL;
+
+
+ /* Create a byte memory pool from which to allocate the thread stacks. */
+ tx_byte_pool_create(&byte_pool_0, "byte pool 0", first_unused_memory, DEMO_BYTE_POOL_SIZE);
+
+ /* Put system definition stuff in here, e.g. thread creates and other assorted
+ create information. */
+
+ /* Allocate the stack for thread 0. */
+ tx_byte_allocate(&byte_pool_0, (VOID **) &pointer, DEMO_STACK_SIZE, TX_NO_WAIT);
+
+ /* Create the main thread. */
+ tx_thread_create(&thread_0, "thread 0", thread_0_entry, 0,
+ pointer, DEMO_STACK_SIZE,
+ 1, 1, TX_NO_TIME_SLICE, TX_AUTO_START);
+
+
+ /* Allocate the stack for thread 1. */
+ tx_byte_allocate(&byte_pool_0, (VOID **) &pointer, DEMO_STACK_SIZE, TX_NO_WAIT);
+
+ /* Create threads 1 and 2. These threads pass information through a ThreadX
+ message queue. It is also interesting to note that these threads have a time
+ slice. */
+ tx_thread_create(&thread_1, "thread 1", thread_1_entry, 1,
+ pointer, DEMO_STACK_SIZE,
+ 16, 16, 4, TX_AUTO_START);
+
+ /* Allocate the stack for thread 2. */
+ tx_byte_allocate(&byte_pool_0, (VOID **) &pointer, DEMO_STACK_SIZE, TX_NO_WAIT);
+
+ tx_thread_create(&thread_2, "thread 2", thread_2_entry, 2,
+ pointer, DEMO_STACK_SIZE,
+ 16, 16, 4, TX_AUTO_START);
+
+ /* Allocate the stack for thread 3. */
+ tx_byte_allocate(&byte_pool_0, (VOID **) &pointer, DEMO_STACK_SIZE, TX_NO_WAIT);
+
+ /* Create threads 3 and 4. These threads compete for a ThreadX counting semaphore.
+ An interesting thing here is that both threads share the same instruction area. */
+ tx_thread_create(&thread_3, "thread 3", thread_3_and_4_entry, 3,
+ pointer, DEMO_STACK_SIZE,
+ 8, 8, TX_NO_TIME_SLICE, TX_AUTO_START);
+
+ /* Allocate the stack for thread 4. */
+ tx_byte_allocate(&byte_pool_0, (VOID **) &pointer, DEMO_STACK_SIZE, TX_NO_WAIT);
+
+ tx_thread_create(&thread_4, "thread 4", thread_3_and_4_entry, 4,
+ pointer, DEMO_STACK_SIZE,
+ 8, 8, TX_NO_TIME_SLICE, TX_AUTO_START);
+
+ /* Allocate the stack for thread 5. */
+ tx_byte_allocate(&byte_pool_0, (VOID **) &pointer, DEMO_STACK_SIZE, TX_NO_WAIT);
+
+ /* Create thread 5. This thread simply pends on an event flag which will be set
+ by thread_0. */
+ tx_thread_create(&thread_5, "thread 5", thread_5_entry, 5,
+ pointer, DEMO_STACK_SIZE,
+ 4, 4, TX_NO_TIME_SLICE, TX_AUTO_START);
+
+ /* Allocate the stack for thread 6. */
+ tx_byte_allocate(&byte_pool_0, (VOID **) &pointer, DEMO_STACK_SIZE, TX_NO_WAIT);
+
+ /* Create threads 6 and 7. These threads compete for a ThreadX mutex. */
+ tx_thread_create(&thread_6, "thread 6", thread_6_and_7_entry, 6,
+ pointer, DEMO_STACK_SIZE,
+ 8, 8, TX_NO_TIME_SLICE, TX_AUTO_START);
+
+ /* Allocate the stack for thread 7. */
+ tx_byte_allocate(&byte_pool_0, (VOID **) &pointer, DEMO_STACK_SIZE, TX_NO_WAIT);
+
+ tx_thread_create(&thread_7, "thread 7", thread_6_and_7_entry, 7,
+ pointer, DEMO_STACK_SIZE,
+ 8, 8, TX_NO_TIME_SLICE, TX_AUTO_START);
+
+ /* Allocate the message queue. */
+ tx_byte_allocate(&byte_pool_0, (VOID **) &pointer, DEMO_QUEUE_SIZE*sizeof(ULONG), TX_NO_WAIT);
+
+ /* Create the message queue shared by threads 1 and 2. */
+ tx_queue_create(&queue_0, "queue 0", TX_1_ULONG, pointer, DEMO_QUEUE_SIZE*sizeof(ULONG));
+
+ /* Create the semaphore used by threads 3 and 4. */
+ tx_semaphore_create(&semaphore_0, "semaphore 0", 1);
+
+ /* Create the event flags group used by threads 1 and 5. */
+ tx_event_flags_create(&event_flags_0, "event flags 0");
+
+ /* Create the mutex used by thread 6 and 7 without priority inheritance. */
+ tx_mutex_create(&mutex_0, "mutex 0", TX_NO_INHERIT);
+
+ /* Allocate the memory for a small block pool. */
+ tx_byte_allocate(&byte_pool_0, (VOID **) &pointer, DEMO_BLOCK_POOL_SIZE, TX_NO_WAIT);
+
+ /* Create a block memory pool to allocate a message buffer from. */
+ tx_block_pool_create(&block_pool_0, "block pool 0", sizeof(ULONG), pointer, DEMO_BLOCK_POOL_SIZE);
+
+ /* Allocate a block and release the block memory. */
+ tx_block_allocate(&block_pool_0, (VOID **) &pointer, TX_NO_WAIT);
+
+ /* Release the block back to the pool. */
+ tx_block_release(pointer);
+}
+
+
+
+/* Define the test threads. */
+
+void thread_0_entry(ULONG thread_input)
+{
+
+UINT status;
+
+
+ /* This thread simply sits in while-forever-sleep loop. */
+ while(1)
+ {
+
+ /* Increment the thread counter. */
+ thread_0_counter++;
+
+ /* Sleep for 10 ticks. */
+ tx_thread_sleep(10);
+
+ /* Set event flag 0 to wakeup thread 5. */
+ status = tx_event_flags_set(&event_flags_0, 0x1, TX_OR);
+
+ /* Check status. */
+ if (status != TX_SUCCESS)
+ break;
+ }
+}
+
+
+void thread_1_entry(ULONG thread_input)
+{
+
+UINT status;
+
+
+ /* This thread simply sends messages to a queue shared by thread 2. */
+ while(1)
+ {
+
+ /* Increment the thread counter. */
+ thread_1_counter++;
+
+ /* Send message to queue 0. */
+ status = tx_queue_send(&queue_0, &thread_1_messages_sent, TX_WAIT_FOREVER);
+
+ /* Check completion status. */
+ if (status != TX_SUCCESS)
+ break;
+
+ /* Increment the message sent. */
+ thread_1_messages_sent++;
+ }
+}
+
+
+void thread_2_entry(ULONG thread_input)
+{
+
+ULONG received_message;
+UINT status;
+
+ /* This thread retrieves messages placed on the queue by thread 1. */
+ while(1)
+ {
+
+ /* Increment the thread counter. */
+ thread_2_counter++;
+
+ /* Retrieve a message from the queue. */
+ status = tx_queue_receive(&queue_0, &received_message, TX_WAIT_FOREVER);
+
+ /* Check completion status and make sure the message is what we
+ expected. */
+ if ((status != TX_SUCCESS) || (received_message != thread_2_messages_received))
+ break;
+
+ /* Otherwise, all is okay. Increment the received message count. */
+ thread_2_messages_received++;
+ }
+}
+
+
+void thread_3_and_4_entry(ULONG thread_input)
+{
+
+UINT status;
+
+
+ /* This function is executed from thread 3 and thread 4. As the loop
+ below shows, these function compete for ownership of semaphore_0. */
+ while(1)
+ {
+
+ /* Increment the thread counter. */
+ if (thread_input == 3)
+ thread_3_counter++;
+ else
+ thread_4_counter++;
+
+ /* Get the semaphore with suspension. */
+ status = tx_semaphore_get(&semaphore_0, TX_WAIT_FOREVER);
+
+ /* Check status. */
+ if (status != TX_SUCCESS)
+ break;
+
+ /* Sleep for 2 ticks to hold the semaphore. */
+ tx_thread_sleep(2);
+
+ /* Release the semaphore. */
+ status = tx_semaphore_put(&semaphore_0);
+
+ /* Check status. */
+ if (status != TX_SUCCESS)
+ break;
+ }
+}
+
+
+void thread_5_entry(ULONG thread_input)
+{
+
+UINT status;
+ULONG actual_flags;
+
+
+ /* This thread simply waits for an event in a forever loop. */
+ while(1)
+ {
+
+ /* Increment the thread counter. */
+ thread_5_counter++;
+
+ /* Wait for event flag 0. */
+ status = tx_event_flags_get(&event_flags_0, 0x1, TX_OR_CLEAR,
+ &actual_flags, TX_WAIT_FOREVER);
+
+ /* Check status. */
+ if ((status != TX_SUCCESS) || (actual_flags != 0x1))
+ break;
+ }
+}
+
+
+void thread_6_and_7_entry(ULONG thread_input)
+{
+
+UINT status;
+
+
+ /* This function is executed from thread 6 and thread 7. As the loop
+ below shows, these function compete for ownership of mutex_0. */
+ while(1)
+ {
+
+ /* Increment the thread counter. */
+ if (thread_input == 6)
+ thread_6_counter++;
+ else
+ thread_7_counter++;
+
+ /* Get the mutex with suspension. */
+ status = tx_mutex_get(&mutex_0, TX_WAIT_FOREVER);
+
+ /* Check status. */
+ if (status != TX_SUCCESS)
+ break;
+
+ /* Get the mutex again with suspension. This shows
+ that an owning thread may retrieve the mutex it
+ owns multiple times. */
+ status = tx_mutex_get(&mutex_0, TX_WAIT_FOREVER);
+
+ /* Check status. */
+ if (status != TX_SUCCESS)
+ break;
+
+ /* Sleep for 2 ticks to hold the mutex. */
+ tx_thread_sleep(2);
+
+ /* Release the mutex. */
+ status = tx_mutex_put(&mutex_0);
+
+ /* Check status. */
+ if (status != TX_SUCCESS)
+ break;
+
+ /* Release the mutex again. This will actually
+ release ownership since it was obtained twice. */
+ status = tx_mutex_put(&mutex_0);
+
+ /* Check status. */
+ if (status != TX_SUCCESS)
+ break;
+ }
+}
diff --git a/ports/cortex_a5/ac6/example_build/sample_threadx/sample_threadx.launch b/ports/cortex_a5/ac6/example_build/sample_threadx/sample_threadx.launch
new file mode 100644
index 00000000..efb12017
--- /dev/null
+++ b/ports/cortex_a5/ac6/example_build/sample_threadx/sample_threadx.launch
@@ -0,0 +1,188 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/ports/cortex_a5/ac6/example_build/sample_threadx/sample_threadx.scat b/ports/cortex_a5/ac6/example_build/sample_threadx/sample_threadx.scat
new file mode 100644
index 00000000..d23881cd
--- /dev/null
+++ b/ports/cortex_a5/ac6/example_build/sample_threadx/sample_threadx.scat
@@ -0,0 +1,44 @@
+;*******************************************************
+; Copyright (c) 2011-2016 Arm Limited (or its affiliates). All rights reserved.
+; Use, modification and redistribution of this file is subject to your possession of a
+; valid End User License Agreement for the Arm Product of which these examples are part of
+; and your compliance with all applicable terms and conditions of such licence agreement.
+;*******************************************************
+
+; Scatter-file for ARMv7-A bare-metal example on Versatile Express
+
+; This scatter-file places application code, data, stack and heap at suitable addresses in the memory map.
+
+
+SDRAM 0x80000000 0x20000000
+{
+ VECTORS +0
+ {
+ * (VECTORS, +FIRST) ; Vector table and other (assembler) startup code
+ * (InRoot$$Sections) ; All (library) code that must be in a root region
+ }
+
+ RO_CODE +0
+ { * (+RO-CODE) } ; Application RO code (.text)
+
+ RO_DATA +0
+ { * (+RO-DATA) } ; Application RO data (.constdata)
+
+ RW_DATA +0
+ { * (+RW) } ; Application RW data (.data)
+
+ ZI_DATA +0
+ { * (+ZI) } ; Application ZI data (.bss)
+
+ ARM_LIB_HEAP 0x80040000 EMPTY 0x00040000 ; Application heap
+ { }
+
+ ARM_LIB_STACK 0x80090000 EMPTY 0x00010000 ; Application (SVC mode) stack
+ { }
+
+; IRQ_STACK 0x800A0000 EMPTY -0x00010000 ; IRQ mode stack
+; { }
+
+ TTB 0x80100000 EMPTY 0x4000 ; Level-1 Translation Table for MMU
+ { }
+}
diff --git a/ports/cortex_a5/ac6/example_build/sample_threadx/startup.S b/ports/cortex_a5/ac6/example_build/sample_threadx/startup.S
new file mode 100644
index 00000000..670fadb9
--- /dev/null
+++ b/ports/cortex_a5/ac6/example_build/sample_threadx/startup.S
@@ -0,0 +1,397 @@
+//----------------------------------------------------------------
+// ARMv7-A Embedded example - Startup Code
+//
+// Copyright (c) 2005-2018 Arm Limited (or its affiliates). All rights reserved.
+// Use, modification and redistribution of this file is subject to your possession of a
+// valid End User License Agreement for the Arm Product of which these examples are part of
+// and your compliance with all applicable terms and conditions of such licence agreement.
+//----------------------------------------------------------------
+
+// Standard definitions of mode bits and interrupt (I & F) flags in PSRs
+
+#define Mode_USR 0x10
+#define Mode_FIQ 0x11
+#define Mode_IRQ 0x12
+#define Mode_SVC 0x13
+#define Mode_ABT 0x17
+#define Mode_UND 0x1B
+#define Mode_SYS 0x1F
+
+#define I_Bit 0x80 // When I bit is set, IRQ is disabled
+#define F_Bit 0x40 // When F bit is set, FIQ is disabled
+
+
+ .section VECTORS, "ax"
+ .align 3
+ .cfi_sections .debug_frame // put stack frame info into .debug_frame instead of .eh_frame
+
+
+//----------------------------------------------------------------
+// Entry point for the Reset handler
+//----------------------------------------------------------------
+
+ .global Vectors
+
+//----------------------------------------------------------------
+// Exception Vector Table
+//----------------------------------------------------------------
+// Note: LDR PC instructions are used here, though branch (B) instructions
+// could also be used, unless the exception handlers are >32MB away.
+
+Vectors:
+ LDR PC, Reset_Addr
+ LDR PC, Undefined_Addr
+ LDR PC, SVC_Addr
+ LDR PC, Prefetch_Addr
+ LDR PC, Abort_Addr
+ LDR PC, Hypervisor_Addr
+ LDR PC, IRQ_Addr
+ LDR PC, FIQ_Addr
+
+
+ .balign 4
+Reset_Addr:
+ .word Reset_Handler
+Undefined_Addr:
+ .word __tx_undefined
+SVC_Addr:
+ .word __tx_swi_interrupt
+Prefetch_Addr:
+ .word __tx_prefetch_handler
+Abort_Addr:
+ .word __tx_abort_handler
+Hypervisor_Addr:
+ .word __tx_reserved_handler
+IRQ_Addr:
+ .word __tx_irq_handler
+FIQ_Addr:
+ .word __tx_fiq_handler
+
+
+//----------------------------------------------------------------
+// Exception Handlers
+//----------------------------------------------------------------
+
+Undefined_Handler:
+ B Undefined_Handler
+SVC_Handler:
+ B SVC_Handler
+Prefetch_Handler:
+ B Prefetch_Handler
+Abort_Handler:
+ B Abort_Handler
+Hypervisor_Handler:
+ B Hypervisor_Handler
+IRQ_Handler:
+ B IRQ_Handler
+FIQ_Handler:
+ B FIQ_Handler
+
+
+//----------------------------------------------------------------
+// Reset Handler
+//----------------------------------------------------------------
+Reset_Handler:
+
+//----------------------------------------------------------------
+// Disable caches and MMU in case they were left enabled from an earlier run
+// This does not need to be done from a cold reset
+//----------------------------------------------------------------
+
+ MRC p15, 0, r0, c1, c0, 0 // Read System Control Register
+ BIC r0, r0, #(0x1 << 12) // Clear I bit 12 to disable I Cache
+ BIC r0, r0, #(0x1 << 2) // Clear C bit 2 to disable D Cache
+ BIC r0, r0, #0x1 // Clear M bit 0 to disable MMU
+ BIC r0, r0, #(0x1 << 11) // Clear Z bit 11 to disable branch prediction
+ MCR p15, 0, r0, c1, c0, 0 // Write System Control Register
+ ISB
+
+// The MMU is enabled later, before calling main(). Caches are enabled inside main(),
+// after the MMU has been enabled and scatterloading has been performed.
+
+//----------------------------------------------------------------
+// ACTLR.SMP bit must be set before the caches and MMU are enabled,
+// or any cache and TLB maintenance operations are performed, even for single-core
+//----------------------------------------------------------------
+ MRC p15, 0, r0, c1, c0, 1 // Read ACTLR
+ ORR r0, r0, #(1 << 6) // Set ACTLR.SMP bit
+ MCR p15, 0, r0, c1, c0, 1 // Write ACTLR
+ ISB
+
+//----------------------------------------------------------------
+// Invalidate Data and Instruction TLBs and branch predictor
+// This does not need to be done from a cold reset
+//----------------------------------------------------------------
+
+ MOV r0,#0
+ MCR p15, 0, r0, c8, c7, 0 // I-TLB and D-TLB invalidation
+ MCR p15, 0, r0, c7, c5, 6 // BPIALL - Invalidate entire branch predictor array
+
+//----------------------------------------------------------------
+// Initialize Supervisor Mode Stack
+// Note stack must be 8 byte aligned.
+//----------------------------------------------------------------
+
+ LDR SP, =Image$$ARM_LIB_STACK$$ZI$$Limit
+
+//----------------------------------------------------------------
+// Disable loop-buffer to fix errata on A15 r0p0
+//----------------------------------------------------------------
+ MRC p15, 0, r0, c0, c0, 0 // Read main ID register MIDR
+ MOV r1, r0, lsr #4 // Extract Primary Part Number
+ LDR r2, =0xFFF
+ AND r1, r1, r2
+ LDR r2, =0xC0F
+ CMP r1, r2 // Is this an A15?
+ BNE notA15r0p0 // Jump if not A15
+ AND r5, r0, #0x00f00000 // Variant
+ AND r6, r0, #0x0000000f // Revision
+ ORRS r6, r6, r5 // Combine variant and revision
+ BNE notA15r0p0 // Jump if not r0p0
+ MRC p15, 0, r0, c1, c0, 1 // Read Aux Ctrl Reg
+ ORR r0, r0, #(1 << 1) // Set bit 1 to Disable Loop Buffer
+ MCR p15, 0, r0, c1, c0, 1 // Write Aux Ctrl Reg
+ ISB
+notA15r0p0:
+
+//----------------------------------------------------------------
+// Set Vector Base Address Register (VBAR) to point to this application's vector table
+//----------------------------------------------------------------
+
+ LDR r0, =Vectors
+ MCR p15, 0, r0, c12, c0, 0
+
+//----------------------------------------------------------------
+// Cache Invalidation code for ARMv7-A
+// The caches, MMU and BTB do not need post-reset invalidation on Cortex-A7,
+// but forcing a cache invalidation makes the code more portable to other CPUs (e.g. Cortex-A9)
+//----------------------------------------------------------------
+
+ // Invalidate L1 Instruction Cache
+
+ MRC p15, 1, r0, c0, c0, 1 // Read Cache Level ID Register (CLIDR)
+ TST r0, #0x3 // Harvard Cache?
+ MOV r0, #0 // SBZ
+ MCRNE p15, 0, r0, c7, c5, 0 // ICIALLU - Invalidate instruction cache and flush branch target cache
+
+ // Invalidate Data/Unified Caches
+
+ MRC p15, 1, r0, c0, c0, 1 // Read CLIDR
+ ANDS r3, r0, #0x07000000 // Extract coherency level
+ MOV r3, r3, LSR #23 // Total cache levels << 1
+ BEQ Finished // If 0, no need to clean
+
+ MOV r10, #0 // R10 holds current cache level << 1
+Loop1:
+ ADD r2, r10, r10, LSR #1 // R2 holds cache "Set" position
+ MOV r1, r0, LSR r2 // Bottom 3 bits are the Cache-type for this level
+ AND r1, r1, #7 // Isolate those lower 3 bits
+ CMP r1, #2
+ BLT Skip // No cache or only instruction cache at this level
+
+ MCR p15, 2, r10, c0, c0, 0 // Write the Cache Size selection register
+ ISB // ISB to sync the change to the CacheSizeID reg
+ MRC p15, 1, r1, c0, c0, 0 // Reads current Cache Size ID register
+ AND r2, r1, #7 // Extract the line length field
+ ADD r2, r2, #4 // Add 4 for the line length offset (log2 16 bytes)
+ LDR r4, =0x3FF
+ ANDS r4, r4, r1, LSR #3 // R4 is the max number on the way size (right aligned)
+ CLZ r5, r4 // R5 is the bit position of the way size increment
+ LDR r7, =0x7FFF
+ ANDS r7, r7, r1, LSR #13 // R7 is the max number of the index size (right aligned)
+
+Loop2:
+ MOV r9, r4 // R9 working copy of the max way size (right aligned)
+
+Loop3:
+ ORR r11, r10, r9, LSL r5 // Factor in the Way number and cache number into R11
+ ORR r11, r11, r7, LSL r2 // Factor in the Set number
+ MCR p15, 0, r11, c7, c6, 2 // Invalidate by Set/Way
+ SUBS r9, r9, #1 // Decrement the Way number
+ BGE Loop3
+ SUBS r7, r7, #1 // Decrement the Set number
+ BGE Loop2
+Skip:
+ ADD r10, r10, #2 // Increment the cache number
+ CMP r3, r10
+ BGT Loop1
+
+Finished:
+
+
+//----------------------------------------------------------------
+// MMU Configuration
+// Set translation table base
+//----------------------------------------------------------------
+
+ // Two translation tables are supported, TTBR0 and TTBR1
+ // Configure translation table base (TTB) control register cp15,c2
+ // to a value of all zeros, indicates we are using TTB register 0.
+
+ MOV r0,#0x0
+ MCR p15, 0, r0, c2, c0, 2
+
+ // write the address of our page table base to TTB register 0
+ LDR r0,=Image$$TTB$$ZI$$Base
+
+ MOV r1, #0x08 // RGN=b01 (outer cacheable write-back cached, write allocate)
+ // S=0 (translation table walk to non-shared memory)
+ ORR r1,r1,#0x40 // IRGN=b01 (inner cacheability for the translation table walk is Write-back Write-allocate)
+
+ ORR r0,r0,r1
+
+ MCR p15, 0, r0, c2, c0, 0
+
+
+//----------------------------------------------------------------
+// PAGE TABLE generation
+
+// Generate the page tables
+// Build a flat translation table for the whole address space.
+// ie: Create 4096 1MB sections from 0x000xxxxx to 0xFFFxxxxx
+
+
+// 31 20 19 18 17 16 15 14 12 11 10 9 8 5 4 3 2 1 0
+// |section base address| 0 0 |nG| S |AP2| TEX | AP | P | Domain | XN | C B | 1 0|
+//
+// Bits[31:20] - Top 12 bits of VA is pointer into table
+// nG[17]=0 - Non global, enables matching against ASID in the TLB when set.
+// S[16]=0 - Indicates normal memory is shared when set.
+// AP2[15]=0
+// AP[11:10]=11 - Configure for full read/write access in all modes
+// TEX[14:12]=000
+// CB[3:2]= 00 - Set attributes to Strongly-ordered memory.
+// (except for the code segment descriptor, see below)
+// IMPP[9]=0 - Ignored
+// Domain[5:8]=1111 - Set all pages to use domain 15
+// XN[4]=1 - Execute never on Strongly-ordered memory
+// Bits[1:0]=10 - Indicate entry is a 1MB section
+//----------------------------------------------------------------
+ LDR r0,=Image$$TTB$$ZI$$Base
+ LDR r1,=0xfff // loop counter
+ LDR r2,=0b00000000000000000000110111100010
+
+ // r0 contains the address of the translation table base
+ // r1 is loop counter
+ // r2 is level1 descriptor (bits 19:0)
+
+ // use loop counter to create 4096 individual table entries.
+ // this writes from address 'Image$$TTB$$ZI$$Base' +
+ // offset 0x3FFC down to offset 0x0 in word steps (4 bytes)
+
+init_ttb_1:
+ ORR r3, r2, r1, LSL#20 // R3 now contains full level1 descriptor to write
+ ORR r3, r3, #0b0000000010000 // Set XN bit
+ STR r3, [r0, r1, LSL#2] // Str table entry at TTB base + loopcount*4
+ SUBS r1, r1, #1 // Decrement loop counter
+ BPL init_ttb_1
+
+ // In this example, the 1MB section based at '__code_start' is setup specially as cacheable (write back mode).
+ // TEX[14:12]=001 and CB[3:2]= 11, Outer and inner write back, write allocate normal memory.
+ LDR r1,=Image$$VECTORS$$Base // Base physical address of code segment
+ LSR r1, #20 // Shift right to align to 1MB boundaries
+ ORR r3, r2, r1, LSL#20 // Setup the initial level1 descriptor again
+ ORR r3, r3, #0b0000000001100 // Set CB bits
+ ORR r3, r3, #0b1000000000000 // Set TEX bit 12
+ STR r3, [r0, r1, LSL#2] // str table entry
+
+//----------------------------------------------------------------
+// Setup domain control register - Enable all domains to client mode
+//----------------------------------------------------------------
+
+ MRC p15, 0, r0, c3, c0, 0 // Read Domain Access Control Register
+ LDR r0, =0x55555555 // Initialize every domain entry to b01 (client)
+ MCR p15, 0, r0, c3, c0, 0 // Write Domain Access Control Register
+
+#if defined(__ARM_NEON) || defined(__ARM_FP)
+//----------------------------------------------------------------
+// Enable access to NEON/VFP by enabling access to Coprocessors 10 and 11.
+// Enables Full Access i.e. in both privileged and non privileged modes
+//----------------------------------------------------------------
+
+ MRC p15, 0, r0, c1, c0, 2 // Read Coprocessor Access Control Register (CPACR)
+ ORR r0, r0, #(0xF << 20) // Enable access to CP 10 & 11
+ MCR p15, 0, r0, c1, c0, 2 // Write Coprocessor Access Control Register (CPACR)
+ ISB
+
+//----------------------------------------------------------------
+// Switch on the VFP and NEON hardware
+//----------------------------------------------------------------
+
+ MOV r0, #0x40000000
+ VMSR FPEXC, r0 // Write FPEXC register, EN bit set
+#endif
+
+
+//----------------------------------------------------------------
+// Enable MMU and branch to __main
+// Leaving the caches disabled until after scatter loading.
+//----------------------------------------------------------------
+
+ LDR r12,=__main
+
+ MRC p15, 0, r0, c1, c0, 0 // Read System Control Register
+ BIC r0, r0, #(0x1 << 12) // Clear I bit 12 to disable I Cache
+ BIC r0, r0, #(0x1 << 2) // Clear C bit 2 to disable D Cache
+ BIC r0, r0, #0x2 // Clear A bit 1 to disable strict alignment fault checking
+ ORR r0, r0, #0x1 // Set M bit 0 to enable MMU before scatter loading
+ MCR p15, 0, r0, c1, c0, 0 // Write System Control Register
+ ISB
+
+// Now the MMU is enabled, virtual to physical address translations will occur. This will affect the next
+// instruction fetch.
+//
+// The two instructions currently in the pipeline will have been fetched before the MMU was enabled.
+// The branch to __main is safe because the Virtual Address (VA) is the same as the Physical Address (PA)
+// (flat mapping) of this code that enables the MMU and performs the branch
+
+ BX r12 // Branch to __main C library entry point
+
+
+
+//----------------------------------------------------------------
+// Enable caches and branch prediction
+// This code must be run from a privileged mode
+//----------------------------------------------------------------
+
+ .section ENABLECACHES,"ax"
+ .align 3
+
+ .global enable_caches
+ .type enable_caches, "function"
+ .cfi_startproc
+enable_caches:
+
+//----------------------------------------------------------------
+// Enable caches and branch prediction
+//----------------------------------------------------------------
+
+ MRC p15, 0, r0, c1, c0, 0 // Read System Control Register
+ ORR r0, r0, #(0x1 << 12) // Set I bit 12 to enable I Cache
+ ORR r0, r0, #(0x1 << 2) // Set C bit 2 to enable D Cache
+ ORR r0, r0, #(0x1 << 11) // Set Z bit 11 to enable branch prediction
+ MCR p15, 0, r0, c1, c0, 0 // Write System Control Register
+ ISB
+
+ MRC p15, 0, r0, c1, c0, 1 // Read Auxiliary Control Register
+ ORR r0, #2 // L2EN bit, enable L2 cache
+ ORR r0, r0, #(0x1 << 2) // Set DP bit 2 to enable L1 Dside prefetch
+ MCR p15, 0, r0, c1, c0, 1 // Write Auxiliary Control Register
+ ISB
+
+ BX lr
+ .cfi_endproc
+
+ .global disable_caches
+ .type disable_caches, "function"
+disable_caches:
+
+ MRC p15, 0, r0, c1, c0, 0 // Read System Control Register
+ BIC r0, r0, #(0x1 << 12) // Clear I bit 12 to disable I Cache
+ BIC r0, r0, #(0x1 << 2) // Clear C bit 2 to disable D Cache
+ MCR p15, 0, r0, c1, c0, 0 // Write System Control Register
+ ISB
+
+ BX lr
+
+
diff --git a/ports/cortex_a5/ac6/example_build/sample_threadx/tx_initialize_low_level.S b/ports/cortex_a5/ac6/example_build/sample_threadx/tx_initialize_low_level.S
new file mode 100644
index 00000000..715958f0
--- /dev/null
+++ b/ports/cortex_a5/ac6/example_build/sample_threadx/tx_initialize_low_level.S
@@ -0,0 +1,299 @@
+/**************************************************************************/
+/* */
+/* Copyright (c) Microsoft Corporation. All rights reserved. */
+/* */
+/* This software is licensed under the Microsoft Software License */
+/* Terms for Microsoft Azure RTOS. Full text of the license can be */
+/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
+/* and in the root directory of this software. */
+/* */
+/**************************************************************************/
+
+
+/**************************************************************************/
+/**************************************************************************/
+/** */
+/** ThreadX Component */
+/** */
+/** Initialize */
+/** */
+/**************************************************************************/
+/**************************************************************************/
+
+ .arm
+
+SVC_MODE = 0xD3 // Disable IRQ/FIQ SVC mode
+IRQ_MODE = 0xD2 // Disable IRQ/FIQ IRQ mode
+FIQ_MODE = 0xD1 // Disable IRQ/FIQ FIQ mode
+SYS_MODE = 0xDF // Disable IRQ/FIQ SYS mode
+FIQ_STACK_SIZE = 512 // FIQ stack size
+IRQ_STACK_SIZE = 1024 // IRQ stack size
+SYS_STACK_SIZE = 1024 // System stack size
+
+ .global _tx_thread_system_stack_ptr
+ .global _tx_initialize_unused_memory
+ .global _tx_thread_context_save
+ .global _tx_thread_context_restore
+ .global _tx_timer_interrupt
+
+
+
+/* Define the 16-bit Thumb mode veneer for _tx_initialize_low_level for
+ applications calling this function from to 16-bit Thumb mode. */
+
+ .text
+ .align 2
+ .thumb
+ .global $_tx_initialize_low_level
+ .type $_tx_initialize_low_level,function
+$_tx_initialize_low_level:
+ BX pc // Switch to 32-bit mode
+ NOP //
+ .arm
+ STMFD sp!, {lr} // Save return address
+ BL _tx_initialize_low_level // Call _tx_initialize_low_level function
+ LDMFD sp!, {lr} // Recover saved return address
+ BX lr // Return to 16-bit caller
+
+
+ .text
+ .align 2
+/**************************************************************************/
+/* */
+/* FUNCTION RELEASE */
+/* */
+/* _tx_initialize_low_level ARMV7-A */
+/* 6.1.11 */
+/* AUTHOR */
+/* */
+/* William E. Lamie, Microsoft Corporation */
+/* */
+/* DESCRIPTION */
+/* */
+/* This function is responsible for any low-level processor */
+/* initialization, including setting up interrupt vectors, setting */
+/* up a periodic timer interrupt source, saving the system stack */
+/* pointer for use in ISR processing later, and finding the first */
+/* available RAM memory address for tx_application_define. */
+/* */
+/* INPUT */
+/* */
+/* None */
+/* */
+/* OUTPUT */
+/* */
+/* None */
+/* */
+/* CALLS */
+/* */
+/* None */
+/* */
+/* CALLED BY */
+/* */
+/* _tx_initialize_kernel_enter ThreadX entry function */
+/* */
+/* RELEASE HISTORY */
+/* */
+/* DATE NAME DESCRIPTION */
+/* */
+/* 09-30-2020 William E. Lamie Initial Version 6.1 */
+/* 04-25-2022 Zhen Kong Updated comments, */
+/* resulting in version 6.1.11 */
+/* */
+/**************************************************************************/
+ .global _tx_initialize_low_level
+ .type _tx_initialize_low_level,function
+_tx_initialize_low_level:
+
+ /* We must be in SVC mode at this point! */
+
+ /* Setup various stack pointers. */
+
+ LDR r1, =Image$$ARM_LIB_STACK$$ZI$$Limit // Get pointer to stack area
+
+#ifdef TX_ENABLE_IRQ_NESTING
+
+ /* Setup the system mode stack for nested interrupt support */
+
+ LDR r2, =SYS_STACK_SIZE // Pickup stack size
+ MOV r3, #SYS_MODE // Build SYS mode CPSR
+ MSR CPSR_c, r3 // Enter SYS mode
+ SUB r1, r1, #1 // Backup 1 byte
+ BIC r1, r1, #7 // Ensure 8-byte alignment
+ MOV sp, r1 // Setup SYS stack pointer
+ SUB r1, r1, r2 // Calculate start of next stack
+#endif
+
+ LDR r2, =FIQ_STACK_SIZE // Pickup stack size
+ MOV r0, #FIQ_MODE // Build FIQ mode CPSR
+ MSR CPSR, r0 // Enter FIQ mode
+ SUB r1, r1, #1 // Backup 1 byte
+ BIC r1, r1, #7 // Ensure 8-byte alignment
+ MOV sp, r1 // Setup FIQ stack pointer
+ SUB r1, r1, r2 // Calculate start of next stack
+ LDR r2, =IRQ_STACK_SIZE // Pickup IRQ stack size
+ MOV r0, #IRQ_MODE // Build IRQ mode CPSR
+ MSR CPSR, r0 // Enter IRQ mode
+ SUB r1, r1, #1 // Backup 1 byte
+ BIC r1, r1, #7 // Ensure 8-byte alignment
+ MOV sp, r1 // Setup IRQ stack pointer
+ SUB r3, r1, r2 // Calculate end of IRQ stack
+ MOV r0, #SVC_MODE // Build SVC mode CPSR
+ MSR CPSR, r0 // Enter SVC mode
+ LDR r2, =Image$$ARM_LIB_STACK$$Base // Pickup stack bottom
+ CMP r3, r2 // Compare the current stack end with the bottom
+_stack_error_loop:
+ BLT _stack_error_loop // If the IRQ stack exceeds the stack bottom, just sit here!
+
+ LDR r2, =_tx_thread_system_stack_ptr // Pickup stack pointer
+ STR r1, [r2] // Save the system stack
+
+ LDR r1, =Image$$ZI_DATA$$ZI$$Limit // Get end of non-initialized RAM area
+ LDR r2, =_tx_initialize_unused_memory // Pickup unused memory ptr address
+ ADD r1, r1, #8 // Increment to next free word
+ STR r1, [r2] // Save first free memory address
+
+#ifdef __THUMB_INTERWORK
+ BX lr // Return to caller
+#else
+ MOV pc, lr // Return to caller
+#endif
+
+/* Define shells for each of the interrupt vectors. */
+
+ .global __tx_undefined
+__tx_undefined:
+ B __tx_undefined // Undefined handler
+
+ .global __tx_swi_interrupt
+__tx_swi_interrupt:
+ B __tx_swi_interrupt // Software interrupt handler
+
+ .global __tx_prefetch_handler
+__tx_prefetch_handler:
+ B __tx_prefetch_handler // Prefetch exception handler
+
+ .global __tx_abort_handler
+__tx_abort_handler:
+ B __tx_abort_handler // Abort exception handler
+
+ .global __tx_reserved_handler
+__tx_reserved_handler:
+ B __tx_reserved_handler // Reserved exception handler
+
+ .global __tx_irq_processing_return
+ .type __tx_irq_processing_return,function
+ .global __tx_irq_handler
+__tx_irq_handler:
+
+ /* Jump to context save to save system context. */
+ B _tx_thread_context_save
+__tx_irq_processing_return:
+
+ /* At this point execution is still in the IRQ mode. The CPSR, point of
+ interrupt, and all C scratch registers are available for use. In
+ addition, IRQ interrupts may be re-enabled - with certain restrictions -
+ if nested IRQ interrupts are desired. Interrupts may be re-enabled over
+ small code sequences where lr is saved before enabling interrupts and
+ restored after interrupts are again disabled. */
+
+ /* Interrupt nesting is allowed after calling _tx_thread_irq_nesting_start
+ from IRQ mode with interrupts disabled. This routine switches to the
+ system mode and returns with IRQ interrupts enabled.
+
+ NOTE: It is very important to ensure all IRQ interrupts are cleared
+ prior to enabling nested IRQ interrupts. */
+#ifdef TX_ENABLE_IRQ_NESTING
+ BL _tx_thread_irq_nesting_start
+#endif
+
+ /* For debug purpose, execute the timer interrupt processing here. In
+ a real system, some kind of status indication would have to be checked
+ before the timer interrupt handler could be called. */
+
+ BL _tx_timer_interrupt // Timer interrupt handler
+
+
+ /* If interrupt nesting was started earlier, the end of interrupt nesting
+ service must be called before returning to _tx_thread_context_restore.
+ This routine returns in processing in IRQ mode with interrupts disabled. */
+#ifdef TX_ENABLE_IRQ_NESTING
+ BL _tx_thread_irq_nesting_end
+#endif
+
+ /* Jump to context restore to restore system context. */
+ B _tx_thread_context_restore
+
+
+ /* This is an example of a vectored IRQ handler. */
+
+ /* At this point execution is still in the IRQ mode. The CPSR, point of
+ interrupt, and all C scratch registers are available for use. In
+ addition, IRQ interrupts may be re-enabled - with certain restrictions -
+ if nested IRQ interrupts are desired. Interrupts may be re-enabled over
+ small code sequences where lr is saved before enabling interrupts and
+ restored after interrupts are again disabled. */
+
+
+ /* Interrupt nesting is allowed after calling _tx_thread_irq_nesting_start
+ from IRQ mode with interrupts disabled. This routine switches to the
+ system mode and returns with IRQ interrupts enabled.
+
+ NOTE: It is very important to ensure all IRQ interrupts are cleared
+ prior to enabling nested IRQ interrupts. */
+
+ /* Application IRQ handlers can be called here! */
+
+ /* If interrupt nesting was started earlier, the end of interrupt nesting
+ service must be called before returning to _tx_thread_context_restore.
+ This routine returns in processing in IRQ mode with interrupts disabled. */
+
+ /* Jump to context restore to restore system context. */
+
+#ifdef TX_ENABLE_FIQ_SUPPORT
+ .global __tx_fiq_handler
+ .global __tx_fiq_processing_return
+__tx_fiq_handler:
+
+ /* Jump to fiq context save to save system context. */
+ B _tx_thread_fiq_context_save
+__tx_fiq_processing_return:
+
+ /* At this point execution is still in the FIQ mode. The CPSR, point of
+ interrupt, and all C scratch registers are available for use. */
+
+ /* Interrupt nesting is allowed after calling _tx_thread_fiq_nesting_start
+ from FIQ mode with interrupts disabled. This routine switches to the
+ system mode and returns with FIQ interrupts enabled.
+
+ NOTE: It is very important to ensure all FIQ interrupts are cleared
+ prior to enabling nested FIQ interrupts. */
+#ifdef TX_ENABLE_FIQ_NESTING
+ BL _tx_thread_fiq_nesting_start
+#endif
+
+ /* Application FIQ handlers can be called here! */
+
+ /* If interrupt nesting was started earlier, the end of interrupt nesting
+ service must be called before returning to _tx_thread_fiq_context_restore. */
+#ifdef TX_ENABLE_FIQ_NESTING
+ BL _tx_thread_fiq_nesting_end
+#endif
+
+ /* Jump to fiq context restore to restore system context. */
+ B _tx_thread_fiq_context_restore
+
+
+#else
+ .global __tx_fiq_handler
+__tx_fiq_handler:
+ B __tx_fiq_handler // FIQ interrupt handler
+#endif
+
+
+BUILD_OPTIONS:
+ .word _tx_build_options // Reference to bring in
+VERSION_ID:
+ .word _tx_version_id // Reference to bring in
+
+
diff --git a/ports/cortex_a5/ac6/example_build/tx/.cproject b/ports/cortex_a5/ac6/example_build/tx/.cproject
new file mode 100644
index 00000000..730528ae
--- /dev/null
+++ b/ports/cortex_a5/ac6/example_build/tx/.cproject
@@ -0,0 +1,146 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/ports/cortex_a5/ac6/example_build/tx/.project b/ports/cortex_a5/ac6/example_build/tx/.project
new file mode 100644
index 00000000..863ca5cb
--- /dev/null
+++ b/ports/cortex_a5/ac6/example_build/tx/.project
@@ -0,0 +1,48 @@
+
+
+ tx
+
+
+
+
+
+ org.eclipse.cdt.managedbuilder.core.genmakebuilder
+ clean,full,incremental,
+
+
+
+
+ org.eclipse.cdt.managedbuilder.core.ScannerConfigBuilder
+ full,incremental,
+
+
+
+
+
+ org.eclipse.cdt.core.cnature
+ org.eclipse.cdt.managedbuilder.core.managedBuildNature
+ org.eclipse.cdt.managedbuilder.core.ScannerConfigNature
+
+
+
+ inc_generic
+ 2
+ $%7BPARENT-5-PROJECT_LOC%7D/common/inc
+
+
+ inc_port
+ 2
+ $%7BPARENT-2-PROJECT_LOC%7D/inc
+
+
+ src_generic
+ 2
+ $%7BPARENT-5-PROJECT_LOC%7D/common/src
+
+
+ src_port
+ 2
+ $%7BPARENT-2-PROJECT_LOC%7D/src
+
+
+
diff --git a/ports/cortex_a5/ac6/inc/tx_port.h b/ports/cortex_a5/ac6/inc/tx_port.h
new file mode 100644
index 00000000..19463de1
--- /dev/null
+++ b/ports/cortex_a5/ac6/inc/tx_port.h
@@ -0,0 +1,328 @@
+/**************************************************************************/
+/* */
+/* Copyright (c) Microsoft Corporation. All rights reserved. */
+/* */
+/* This software is licensed under the Microsoft Software License */
+/* Terms for Microsoft Azure RTOS. Full text of the license can be */
+/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
+/* and in the root directory of this software. */
+/* */
+/**************************************************************************/
+
+
+/**************************************************************************/
+/**************************************************************************/
+/** */
+/** ThreadX Component */
+/** */
+/** Port Specific */
+/** */
+/**************************************************************************/
+/**************************************************************************/
+
+
+/**************************************************************************/
+/* */
+/* PORT SPECIFIC C INFORMATION RELEASE */
+/* */
+/* tx_port.h ARMv7-A */
+/* 6.1.11 */
+/* */
+/* AUTHOR */
+/* */
+/* William E. Lamie, Microsoft Corporation */
+/* */
+/* DESCRIPTION */
+/* */
+/* This file contains data type definitions that make the ThreadX */
+/* real-time kernel function identically on a variety of different */
+/* processor architectures. For example, the size or number of bits */
+/* in an "int" data type vary between microprocessor architectures and */
+/* even C compilers for the same microprocessor. ThreadX does not */
+/* directly use native C data types. Instead, ThreadX creates its */
+/* own special types that can be mapped to actual data types by this */
+/* file to guarantee consistency in the interface and functionality. */
+/* */
+/* RELEASE HISTORY */
+/* */
+/* DATE NAME DESCRIPTION */
+/* */
+/* 09-30-2020 William E. Lamie Initial Version 6.1 */
+/* 04-02-2021 Bhupendra Naphade Modified comment(s),updated */
+/* macro definition, */
+/* resulting in version 6.1.6 */
+/* 04-25-2022 Zhen Kong Updated comments, */
+/* resulting in version 6.1.11 */
+/* */
+/**************************************************************************/
+
+#ifndef TX_PORT_H
+#define TX_PORT_H
+
+
+/* Determine if the optional ThreadX user define file should be used. */
+
+#ifdef TX_INCLUDE_USER_DEFINE_FILE
+
+
+/* Yes, include the user defines in tx_user.h. The defines in this file may
+ alternately be defined on the command line. */
+
+#include "tx_user.h"
+#endif
+
+
+/* Define compiler library include files. */
+
+#include
+#include
+
+
+/* Define ThreadX basic types for this port. */
+
+#define VOID void
+typedef char CHAR;
+typedef unsigned char UCHAR;
+typedef int INT;
+typedef unsigned int UINT;
+typedef long LONG;
+typedef unsigned long ULONG;
+typedef short SHORT;
+typedef unsigned short USHORT;
+
+
+/* Define the priority levels for ThreadX. Legal values range
+ from 32 to 1024 and MUST be evenly divisible by 32. */
+
+#ifndef TX_MAX_PRIORITIES
+#define TX_MAX_PRIORITIES 32
+#endif
+
+
+/* Define the minimum stack for a ThreadX thread on this processor. If the size supplied during
+ thread creation is less than this value, the thread create call will return an error. */
+
+#ifndef TX_MINIMUM_STACK
+#define TX_MINIMUM_STACK 200 /* Minimum stack size for this port */
+#endif
+
+
+/* Define the system timer thread's default stack size and priority. These are only applicable
+ if TX_TIMER_PROCESS_IN_ISR is not defined. */
+
+#ifndef TX_TIMER_THREAD_STACK_SIZE
+#define TX_TIMER_THREAD_STACK_SIZE 1024 /* Default timer thread stack size */
+#endif
+
+#ifndef TX_TIMER_THREAD_PRIORITY
+#define TX_TIMER_THREAD_PRIORITY 0 /* Default timer thread priority */
+#endif
+
+
+/* Define various constants for the ThreadX ARM port. */
+
+#ifdef TX_ENABLE_FIQ_SUPPORT
+#define TX_INT_DISABLE 0xC0 /* Disable IRQ & FIQ interrupts */
+#else
+#define TX_INT_DISABLE 0x80 /* Disable IRQ interrupts */
+#endif
+#define TX_INT_ENABLE 0x00 /* Enable IRQ interrupts */
+
+
+/* Define the clock source for trace event entry time stamp. The following two item are port specific.
+ For example, if the time source is at the address 0x0a800024 and is 16-bits in size, the clock
+ source constants would be:
+
+#define TX_TRACE_TIME_SOURCE *((ULONG *) 0x0a800024)
+#define TX_TRACE_TIME_MASK 0x0000FFFFUL
+
+*/
+
+#ifndef TX_TRACE_TIME_SOURCE
+#define TX_TRACE_TIME_SOURCE ++_tx_trace_simulated_time
+#endif
+#ifndef TX_TRACE_TIME_MASK
+#define TX_TRACE_TIME_MASK 0xFFFFFFFFUL
+#endif
+
+
+/* Define the port specific options for the _tx_build_options variable. This variable indicates
+ how the ThreadX library was built. */
+
+#ifdef TX_ENABLE_FIQ_SUPPORT
+#define TX_FIQ_ENABLED 1
+#else
+#define TX_FIQ_ENABLED 0
+#endif
+
+#ifdef TX_ENABLE_IRQ_NESTING
+#define TX_IRQ_NESTING_ENABLED 2
+#else
+#define TX_IRQ_NESTING_ENABLED 0
+#endif
+
+#ifdef TX_ENABLE_FIQ_NESTING
+#define TX_FIQ_NESTING_ENABLED 4
+#else
+#define TX_FIQ_NESTING_ENABLED 0
+#endif
+
+#define TX_PORT_SPECIFIC_BUILD_OPTIONS TX_FIQ_ENABLED | TX_IRQ_NESTING_ENABLED | TX_FIQ_NESTING_ENABLED
+
+
+/* Define the in-line initialization constant so that modules with in-line
+ initialization capabilities can prevent their initialization from being
+ a function call. */
+
+#define TX_INLINE_INITIALIZATION
+
+
+/* Determine whether or not stack checking is enabled. By default, ThreadX stack checking is
+ disabled. When the following is defined, ThreadX thread stack checking is enabled. If stack
+ checking is enabled (TX_ENABLE_STACK_CHECKING is defined), the TX_DISABLE_STACK_FILLING
+ define is negated, thereby forcing the stack fill which is necessary for the stack checking
+ logic. */
+
+#ifdef TX_ENABLE_STACK_CHECKING
+#undef TX_DISABLE_STACK_FILLING
+#endif
+
+
+/* Define the TX_THREAD control block extensions for this port. The main reason
+ for the multiple macros is so that backward compatibility can be maintained with
+ existing ThreadX kernel awareness modules. */
+
+#define TX_THREAD_EXTENSION_0
+#define TX_THREAD_EXTENSION_1
+#define TX_THREAD_EXTENSION_2 ULONG tx_thread_vfp_enable;
+#define TX_THREAD_EXTENSION_3
+
+
+/* Define the port extensions of the remaining ThreadX objects. */
+
+#define TX_BLOCK_POOL_EXTENSION
+#define TX_BYTE_POOL_EXTENSION
+#define TX_EVENT_FLAGS_GROUP_EXTENSION
+#define TX_MUTEX_EXTENSION
+#define TX_QUEUE_EXTENSION
+#define TX_SEMAPHORE_EXTENSION
+#define TX_TIMER_EXTENSION
+
+
+/* Define the user extension field of the thread control block. Nothing
+ additional is needed for this port so it is defined as white space. */
+
+#ifndef TX_THREAD_USER_EXTENSION
+#define TX_THREAD_USER_EXTENSION
+#endif
+
+
+/* Define the macros for processing extensions in tx_thread_create, tx_thread_delete,
+ tx_thread_shell_entry, and tx_thread_terminate. */
+
+
+#define TX_THREAD_CREATE_EXTENSION(thread_ptr)
+#define TX_THREAD_DELETE_EXTENSION(thread_ptr)
+#define TX_THREAD_COMPLETED_EXTENSION(thread_ptr)
+#define TX_THREAD_TERMINATED_EXTENSION(thread_ptr)
+
+
+/* Define the ThreadX object creation extensions for the remaining objects. */
+
+#define TX_BLOCK_POOL_CREATE_EXTENSION(pool_ptr)
+#define TX_BYTE_POOL_CREATE_EXTENSION(pool_ptr)
+#define TX_EVENT_FLAGS_GROUP_CREATE_EXTENSION(group_ptr)
+#define TX_MUTEX_CREATE_EXTENSION(mutex_ptr)
+#define TX_QUEUE_CREATE_EXTENSION(queue_ptr)
+#define TX_SEMAPHORE_CREATE_EXTENSION(semaphore_ptr)
+#define TX_TIMER_CREATE_EXTENSION(timer_ptr)
+
+
+/* Define the ThreadX object deletion extensions for the remaining objects. */
+
+#define TX_BLOCK_POOL_DELETE_EXTENSION(pool_ptr)
+#define TX_BYTE_POOL_DELETE_EXTENSION(pool_ptr)
+#define TX_EVENT_FLAGS_GROUP_DELETE_EXTENSION(group_ptr)
+#define TX_MUTEX_DELETE_EXTENSION(mutex_ptr)
+#define TX_QUEUE_DELETE_EXTENSION(queue_ptr)
+#define TX_SEMAPHORE_DELETE_EXTENSION(semaphore_ptr)
+#define TX_TIMER_DELETE_EXTENSION(timer_ptr)
+
+
+/* Determine if the ARM architecture has the CLZ instruction. This is available on
+ architectures v5 and above. If available, redefine the macro for calculating the
+ lowest bit set. */
+
+#if __TARGET_ARCH_ARM > 4
+
+#ifndef __thumb__
+
+#define TX_LOWEST_SET_BIT_CALCULATE(m, b) m = m & ((ULONG) (-((LONG) m))); \
+ asm volatile (" CLZ %0,%1 ": "=r" (b) : "r" (m) ); \
+ b = 31 - b;
+#endif
+#endif
+
+
+/* Define ThreadX interrupt lockout and restore macros for protection on
+ access of critical kernel information. The restore interrupt macro must
+ restore the interrupt posture of the running thread prior to the value
+ present prior to the disable macro. In most cases, the save area macro
+ is used to define a local function save area for the disable and restore
+ macros. */
+
+#ifdef __thumb__
+
+unsigned int _tx_thread_interrupt_disable(void);
+unsigned int _tx_thread_interrupt_restore(UINT old_posture);
+
+
+#define TX_INTERRUPT_SAVE_AREA UINT interrupt_save;
+
+#define TX_DISABLE interrupt_save = _tx_thread_interrupt_disable();
+#define TX_RESTORE _tx_thread_interrupt_restore(interrupt_save);
+
+#else
+
+#define TX_INTERRUPT_SAVE_AREA UINT interrupt_save, tx_temp;
+
+#ifdef TX_ENABLE_FIQ_SUPPORT
+#define TX_DISABLE asm volatile (" MRS %0,CPSR; CPSID if ": "=r" (interrupt_save) );
+#else
+#define TX_DISABLE asm volatile (" MRS %0,CPSR; CPSID i ": "=r" (interrupt_save) );
+#endif
+
+#define TX_RESTORE asm volatile (" MSR CPSR_c,%0 "::"r" (interrupt_save) );
+
+#endif
+
+
+/* Define VFP extension for the ARMv7-A. Each is assumed to be called in the context of the executing
+ thread. */
+
+void tx_thread_vfp_enable(void);
+void tx_thread_vfp_disable(void);
+
+
+/* Define the interrupt lockout macros for each ThreadX object. */
+
+#define TX_BLOCK_POOL_DISABLE TX_DISABLE
+#define TX_BYTE_POOL_DISABLE TX_DISABLE
+#define TX_EVENT_FLAGS_GROUP_DISABLE TX_DISABLE
+#define TX_MUTEX_DISABLE TX_DISABLE
+#define TX_QUEUE_DISABLE TX_DISABLE
+#define TX_SEMAPHORE_DISABLE TX_DISABLE
+
+
+/* Define the version ID of ThreadX. This may be utilized by the application. */
+
+#ifdef TX_THREAD_INIT
+CHAR _tx_version_id[] =
+ "Copyright (c) Microsoft Corporation. All rights reserved. * ThreadX ARMv7-A Version 6.1.11 *";
+#else
+extern CHAR _tx_version_id[];
+#endif
+
+
+#endif
+
diff --git a/ports/cortex_a5/ac6/src/tx_thread_context_restore.S b/ports/cortex_a5/ac6/src/tx_thread_context_restore.S
new file mode 100644
index 00000000..fae7e72d
--- /dev/null
+++ b/ports/cortex_a5/ac6/src/tx_thread_context_restore.S
@@ -0,0 +1,222 @@
+/**************************************************************************/
+/* */
+/* Copyright (c) Microsoft Corporation. All rights reserved. */
+/* */
+/* This software is licensed under the Microsoft Software License */
+/* Terms for Microsoft Azure RTOS. Full text of the license can be */
+/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
+/* and in the root directory of this software. */
+/* */
+/**************************************************************************/
+
+
+/**************************************************************************/
+/**************************************************************************/
+/** */
+/** ThreadX Component */
+/** */
+/** Thread */
+/** */
+/**************************************************************************/
+/**************************************************************************/
+
+ .arm
+
+#ifdef TX_ENABLE_FIQ_SUPPORT
+SVC_MODE = 0xD3 // Disable IRQ/FIQ, SVC mode
+IRQ_MODE = 0xD2 // Disable IRQ/FIQ, IRQ mode
+#else
+SVC_MODE = 0x93 // Disable IRQ, SVC mode
+IRQ_MODE = 0x92 // Disable IRQ, IRQ mode
+#endif
+
+ .global _tx_thread_system_state
+ .global _tx_thread_current_ptr
+ .global _tx_thread_execute_ptr
+ .global _tx_timer_time_slice
+ .global _tx_thread_schedule
+ .global _tx_thread_preempt_disable
+
+
+
+/* No 16-bit Thumb mode veneer code is needed for _tx_thread_context_restore
+ since it will never be called 16-bit mode. */
+
+ .arm
+ .text
+ .align 2
+/**************************************************************************/
+/* */
+/* FUNCTION RELEASE */
+/* */
+/* _tx_thread_context_restore ARMv7-A */
+/* 6.1.11 */
+/* AUTHOR */
+/* */
+/* William E. Lamie, Microsoft Corporation */
+/* */
+/* DESCRIPTION */
+/* */
+/* This function restores the interrupt context if it is processing a */
+/* nested interrupt. If not, it returns to the interrupt thread if no */
+/* preemption is necessary. Otherwise, if preemption is necessary or */
+/* if no thread was running, the function returns to the scheduler. */
+/* */
+/* INPUT */
+/* */
+/* None */
+/* */
+/* OUTPUT */
+/* */
+/* None */
+/* */
+/* CALLS */
+/* */
+/* _tx_thread_schedule Thread scheduling routine */
+/* */
+/* CALLED BY */
+/* */
+/* ISRs Interrupt Service Routines */
+/* */
+/* RELEASE HISTORY */
+/* */
+/* DATE NAME DESCRIPTION */
+/* */
+/* 09-30-2020 William E. Lamie Initial Version 6.1 */
+/* 10-15-2021 William E. Lamie Modified comment(s), added */
+/* execution profile support, */
+/* resulting in version 6.1.9 */
+/* 04-25-2022 Zhen Kong Updated comments, */
+/* resulting in version 6.1.11 */
+/* */
+/**************************************************************************/
+ .global _tx_thread_context_restore
+ .type _tx_thread_context_restore,function
+_tx_thread_context_restore:
+
+ /* Lockout interrupts. */
+
+#ifdef TX_ENABLE_FIQ_SUPPORT
+ CPSID if // Disable IRQ and FIQ interrupts
+#else
+ CPSID i // Disable IRQ interrupts
+#endif
+
+#if (defined(TX_ENABLE_EXECUTION_CHANGE_NOTIFY) || defined(TX_EXECUTION_PROFILE_ENABLE))
+
+ /* Call the ISR exit function to indicate an ISR is complete. */
+
+ BL _tx_execution_isr_exit // Call the ISR exit function
+#endif
+
+ /* Determine if interrupts are nested. */
+
+ LDR r3, =_tx_thread_system_state // Pickup address of system state variable
+ LDR r2, [r3] // Pickup system state
+ SUB r2, r2, #1 // Decrement the counter
+ STR r2, [r3] // Store the counter
+ CMP r2, #0 // Was this the first interrupt?
+ BEQ __tx_thread_not_nested_restore // If so, not a nested restore
+
+ /* Interrupts are nested. */
+
+ /* Just recover the saved registers and return to the point of
+ interrupt. */
+
+ LDMIA sp!, {r0, r10, r12, lr} // Recover SPSR, POI, and scratch regs
+ MSR SPSR_cxsf, r0 // Put SPSR back
+ LDMIA sp!, {r0-r3} // Recover r0-r3
+ MOVS pc, lr // Return to point of interrupt
+
+__tx_thread_not_nested_restore:
+
+ /* Determine if a thread was interrupted and no preemption is required. */
+
+ LDR r1, =_tx_thread_current_ptr // Pickup address of current thread ptr
+ LDR r0, [r1] // Pickup actual current thread pointer
+ CMP r0, #0 // Is it NULL?
+ BEQ __tx_thread_idle_system_restore // Yes, idle system was interrupted
+
+ LDR r3, =_tx_thread_preempt_disable // Pickup preempt disable address
+ LDR r2, [r3] // Pickup actual preempt disable flag
+ CMP r2, #0 // Is it set?
+ BNE __tx_thread_no_preempt_restore // Yes, don't preempt this thread
+ LDR r3, =_tx_thread_execute_ptr // Pickup address of execute thread ptr
+ LDR r2, [r3] // Pickup actual execute thread pointer
+ CMP r0, r2 // Is the same thread highest priority?
+ BNE __tx_thread_preempt_restore // No, preemption needs to happen
+
+
+__tx_thread_no_preempt_restore:
+
+ /* Recover the saved context and return to the point of interrupt. */
+
+ /* Pickup the saved stack pointer. */
+
+ /* Recover the saved context and return to the point of interrupt. */
+ LDMIA sp!, {r0, r10, r12, lr} // Recover SPSR, POI, and scratch regs
+ MSR SPSR_cxsf, r0 // Put SPSR back
+ LDMIA sp!, {r0-r3} // Recover r0-r3
+ MOVS pc, lr // Return to point of interrupt
+
+__tx_thread_preempt_restore:
+
+ LDMIA sp!, {r3, r10, r12, lr} // Recover temporarily saved registers
+ MOV r1, lr // Save lr (point of interrupt)
+ MOV r2, #SVC_MODE // Build SVC mode CPSR
+ MSR CPSR_c, r2 // Enter SVC mode
+ STR r1, [sp, #-4]! // Save point of interrupt
+ STMDB sp!, {r4-r12, lr} // Save upper half of registers
+ MOV r4, r3 // Save SPSR in r4
+ MOV r2, #IRQ_MODE // Build IRQ mode CPSR
+ MSR CPSR_c, r2 // Enter IRQ mode
+ LDMIA sp!, {r0-r3} // Recover r0-r3
+ MOV r5, #SVC_MODE // Build SVC mode CPSR
+ MSR CPSR_c, r5 // Enter SVC mode
+ STMDB sp!, {r0-r3} // Save r0-r3 on thread's stack
+
+ LDR r1, =_tx_thread_current_ptr // Pickup address of current thread ptr
+ LDR r0, [r1] // Pickup current thread pointer
+
+#ifdef TX_ENABLE_VFP_SUPPORT
+ LDR r2, [r0, #144] // Pickup the VFP enabled flag
+ CMP r2, #0 // Is the VFP enabled?
+ BEQ _tx_skip_irq_vfp_save // No, skip VFP IRQ save
+ VMRS r2, FPSCR // Pickup the FPSCR
+ STR r2, [sp, #-4]! // Save FPSCR
+ VSTMDB sp!, {D16-D31} // Save D16-D31
+ VSTMDB sp!, {D0-D15} // Save D0-D15
+
+_tx_skip_irq_vfp_save:
+
+#endif
+
+ MOV r3, #1 // Build interrupt stack type
+ STMDB sp!, {r3, r4} // Save interrupt stack type and SPSR
+ STR sp, [r0, #8] // Save stack pointer in thread control
+ // block
+
+ /* Save the remaining time-slice and disable it. */
+ LDR r3, =_tx_timer_time_slice // Pickup time-slice variable address
+ LDR r2, [r3] // Pickup time-slice
+ CMP r2, #0 // Is it active?
+ BEQ __tx_thread_dont_save_ts // No, don't save it
+ STR r2, [r0, #24] // Save thread's time-slice
+ MOV r2, #0 // Clear value
+ STR r2, [r3] // Disable global time-slice flag
+
+__tx_thread_dont_save_ts:
+
+ /* Clear the current task pointer. */
+ MOV r0, #0 // NULL value
+ STR r0, [r1] // Clear current thread pointer
+
+ /* Return to the scheduler. */
+ B _tx_thread_schedule // Return to scheduler
+
+__tx_thread_idle_system_restore:
+
+ /* Just return back to the scheduler! */
+ MOV r0, #SVC_MODE // Build SVC mode CPSR
+ MSR CPSR_c, r0 // Enter SVC mode
+ B _tx_thread_schedule // Return to scheduler
diff --git a/ports/cortex_a5/ac6/src/tx_thread_context_save.S b/ports/cortex_a5/ac6/src/tx_thread_context_save.S
new file mode 100644
index 00000000..7ac48c2e
--- /dev/null
+++ b/ports/cortex_a5/ac6/src/tx_thread_context_save.S
@@ -0,0 +1,172 @@
+/**************************************************************************/
+/* */
+/* Copyright (c) Microsoft Corporation. All rights reserved. */
+/* */
+/* This software is licensed under the Microsoft Software License */
+/* Terms for Microsoft Azure RTOS. Full text of the license can be */
+/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
+/* and in the root directory of this software. */
+/* */
+/**************************************************************************/
+
+
+/**************************************************************************/
+/**************************************************************************/
+/** */
+/** ThreadX Component */
+/** */
+/** Thread */
+/** */
+/**************************************************************************/
+/**************************************************************************/
+
+ .global _tx_thread_system_state
+ .global _tx_thread_current_ptr
+ .global __tx_irq_processing_return
+
+
+/* No 16-bit Thumb mode veneer code is needed for _tx_thread_context_save
+ since it will never be called 16-bit mode. */
+
+ .arm
+ .text
+ .align 2
+/**************************************************************************/
+/* */
+/* FUNCTION RELEASE */
+/* */
+/* _tx_thread_context_save ARMv7-A */
+/* 6.1.11 */
+/* AUTHOR */
+/* */
+/* William E. Lamie, Microsoft Corporation */
+/* */
+/* DESCRIPTION */
+/* */
+/* This function saves the context of an executing thread in the */
+/* beginning of interrupt processing. The function also ensures that */
+/* the system stack is used upon return to the calling ISR. */
+/* */
+/* INPUT */
+/* */
+/* None */
+/* */
+/* OUTPUT */
+/* */
+/* None */
+/* */
+/* CALLS */
+/* */
+/* None */
+/* */
+/* CALLED BY */
+/* */
+/* ISRs */
+/* */
+/* RELEASE HISTORY */
+/* */
+/* DATE NAME DESCRIPTION */
+/* */
+/* 09-30-2020 William E. Lamie Initial Version 6.1 */
+/* 10-15-2021 William E. Lamie Modified comment(s), added */
+/* execution profile support, */
+/* resulting in version 6.1.9 */
+/* 04-25-2022 Zhen Kong Updated comments, */
+/* resulting in version 6.1.11 */
+/* */
+/**************************************************************************/
+ .global _tx_thread_context_save
+ .type _tx_thread_context_save,function
+_tx_thread_context_save:
+
+ /* Upon entry to this routine, it is assumed that IRQ interrupts are locked
+ out, we are in IRQ mode, and all registers are intact. */
+
+ /* Check for a nested interrupt condition. */
+
+ STMDB sp!, {r0-r3} // Save some working registers
+#ifdef TX_ENABLE_FIQ_SUPPORT
+ CPSID if // Disable FIQ interrupts
+#endif
+ LDR r3, =_tx_thread_system_state // Pickup address of system state variable
+ LDR r2, [r3] // Pickup system state
+ CMP r2, #0 // Is this the first interrupt?
+ BEQ __tx_thread_not_nested_save // Yes, not a nested context save
+
+ /* Nested interrupt condition. */
+
+ ADD r2, r2, #1 // Increment the interrupt counter
+ STR r2, [r3] // Store it back in the variable
+
+ /* Save the rest of the scratch registers on the stack and return to the
+ calling ISR. */
+
+ MRS r0, SPSR // Pickup saved SPSR
+ SUB lr, lr, #4 // Adjust point of interrupt
+ STMDB sp!, {r0, r10, r12, lr} // Store other registers
+
+ /* Return to the ISR. */
+
+ MOV r10, #0 // Clear stack limit
+
+#if (defined(TX_ENABLE_EXECUTION_CHANGE_NOTIFY) || defined(TX_EXECUTION_PROFILE_ENABLE))
+
+ /* Call the ISR enter function to indicate an ISR is executing. */
+
+ PUSH {lr} // Save ISR lr
+ BL _tx_execution_isr_enter // Call the ISR enter function
+ POP {lr} // Recover ISR lr
+#endif
+
+ B __tx_irq_processing_return // Continue IRQ processing
+
+__tx_thread_not_nested_save:
+
+ /* Otherwise, not nested, check to see if a thread was running. */
+ ADD r2, r2, #1 // Increment the interrupt counter
+ STR r2, [r3] // Store it back in the variable
+ LDR r1, =_tx_thread_current_ptr // Pickup address of current thread ptr
+ LDR r0, [r1] // Pickup current thread pointer
+ CMP r0, #0 // Is it NULL?
+ BEQ __tx_thread_idle_system_save // If so, interrupt occurred in
+ // scheduling loop - nothing needs saving!
+
+ /* Save minimal context of interrupted thread. */
+
+ MRS r2, SPSR // Pickup saved SPSR
+ SUB lr, lr, #4 // Adjust point of interrupt
+ STMDB sp!, {r2, r10, r12, lr} // Store other registers
+
+ MOV r10, #0 // Clear stack limit
+
+#if (defined(TX_ENABLE_EXECUTION_CHANGE_NOTIFY) || defined(TX_EXECUTION_PROFILE_ENABLE))
+
+ /* Call the ISR enter function to indicate an ISR is executing. */
+
+ PUSH {lr} // Save ISR lr
+ BL _tx_execution_isr_enter // Call the ISR enter function
+ POP {lr} // Recover ISR lr
+#endif
+
+ B __tx_irq_processing_return // Continue IRQ processing
+
+__tx_thread_idle_system_save:
+
+ /* Interrupt occurred in the scheduling loop. */
+
+ /* Not much to do here, just adjust the stack pointer, and return to IRQ
+ processing. */
+
+ MOV r10, #0 // Clear stack limit
+
+#if (defined(TX_ENABLE_EXECUTION_CHANGE_NOTIFY) || defined(TX_EXECUTION_PROFILE_ENABLE))
+
+ /* Call the ISR enter function to indicate an ISR is executing. */
+
+ PUSH {lr} // Save ISR lr
+ BL _tx_execution_isr_enter // Call the ISR enter function
+ POP {lr} // Recover ISR lr
+#endif
+
+ ADD sp, sp, #16 // Recover saved registers
+ B __tx_irq_processing_return // Continue IRQ processing
diff --git a/ports/cortex_a5/ac6/src/tx_thread_fiq_context_restore.S b/ports/cortex_a5/ac6/src/tx_thread_fiq_context_restore.S
new file mode 100644
index 00000000..006be973
--- /dev/null
+++ b/ports/cortex_a5/ac6/src/tx_thread_fiq_context_restore.S
@@ -0,0 +1,223 @@
+/**************************************************************************/
+/* */
+/* Copyright (c) Microsoft Corporation. All rights reserved. */
+/* */
+/* This software is licensed under the Microsoft Software License */
+/* Terms for Microsoft Azure RTOS. Full text of the license can be */
+/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
+/* and in the root directory of this software. */
+/* */
+/**************************************************************************/
+
+
+/**************************************************************************/
+/**************************************************************************/
+/** */
+/** ThreadX Component */
+/** */
+/** Thread */
+/** */
+/**************************************************************************/
+/**************************************************************************/
+
+SVC_MODE = 0xD3 // SVC mode
+FIQ_MODE = 0xD1 // FIQ mode
+MODE_MASK = 0x1F // Mode mask
+THUMB_MASK = 0x20 // Thumb bit mask
+IRQ_MODE_BITS = 0x12 // IRQ mode bits
+
+
+ .global _tx_thread_system_state
+ .global _tx_thread_current_ptr
+ .global _tx_thread_system_stack_ptr
+ .global _tx_thread_execute_ptr
+ .global _tx_timer_time_slice
+ .global _tx_thread_schedule
+ .global _tx_thread_preempt_disable
+ .global _tx_execution_isr_exit
+
+
+/* No 16-bit Thumb mode veneer code is needed for _tx_thread_fiq_context_restore
+ since it will never be called 16-bit mode. */
+
+ .arm
+ .text
+ .align 2
+/**************************************************************************/
+/* */
+/* FUNCTION RELEASE */
+/* */
+/* _tx_thread_fiq_context_restore ARMv7-A */
+/* 6.1.11 */
+/* AUTHOR */
+/* */
+/* William E. Lamie, Microsoft Corporation */
+/* */
+/* DESCRIPTION */
+/* */
+/* This function restores the fiq interrupt context when processing a */
+/* nested interrupt. If not, it returns to the interrupt thread if no */
+/* preemption is necessary. Otherwise, if preemption is necessary or */
+/* if no thread was running, the function returns to the scheduler. */
+/* */
+/* INPUT */
+/* */
+/* None */
+/* */
+/* OUTPUT */
+/* */
+/* None */
+/* */
+/* CALLS */
+/* */
+/* _tx_thread_schedule Thread scheduling routine */
+/* */
+/* CALLED BY */
+/* */
+/* FIQ ISR Interrupt Service Routines */
+/* */
+/* RELEASE HISTORY */
+/* */
+/* DATE NAME DESCRIPTION */
+/* */
+/* 09-30-2020 William E. Lamie Initial Version 6.1 */
+/* 10-15-2021 William E. Lamie Modified comment(s), added */
+/* execution profile support, */
+/* resulting in version 6.1.9 */
+/* 04-25-2022 Zhen Kong Updated comments, */
+/* resulting in version 6.1.11 */
+/* */
+/**************************************************************************/
+ .global _tx_thread_fiq_context_restore
+ .type _tx_thread_fiq_context_restore,function
+_tx_thread_fiq_context_restore:
+
+ /* Lockout interrupts. */
+
+ CPSID if // Disable IRQ and FIQ interrupts
+
+#if (defined(TX_ENABLE_EXECUTION_CHANGE_NOTIFY) || defined(TX_EXECUTION_PROFILE_ENABLE))
+
+ /* Call the ISR exit function to indicate an ISR is complete. */
+
+ BL _tx_execution_isr_exit // Call the ISR exit function
+#endif
+
+ /* Determine if interrupts are nested. */
+ LDR r3, =_tx_thread_system_state // Pickup address of system state variable
+ LDR r2, [r3] // Pickup system state
+ SUB r2, r2, #1 // Decrement the counter
+ STR r2, [r3] // Store the counter
+ CMP r2, #0 // Was this the first interrupt?
+ BEQ __tx_thread_fiq_not_nested_restore // If so, not a nested restore
+
+ /* Interrupts are nested. */
+
+ /* Just recover the saved registers and return to the point of
+ interrupt. */
+
+ LDMIA sp!, {r0, r10, r12, lr} // Recover SPSR, POI, and scratch regs
+ MSR SPSR_cxsf, r0 // Put SPSR back
+ LDMIA sp!, {r0-r3} // Recover r0-r3
+ MOVS pc, lr // Return to point of interrupt
+
+__tx_thread_fiq_not_nested_restore:
+
+ /* Determine if a thread was interrupted and no preemption is required. */
+
+ LDR r1, [sp] // Pickup the saved SPSR
+ MOV r2, #MODE_MASK // Build mask to isolate the interrupted mode
+ AND r1, r1, r2 // Isolate mode bits
+ CMP r1, #IRQ_MODE_BITS // Was an interrupt taken in IRQ mode before we
+ // got to context save? */
+ BEQ __tx_thread_fiq_no_preempt_restore // Yes, just go back to point of interrupt
+
+
+ LDR r1, =_tx_thread_current_ptr // Pickup address of current thread ptr
+ LDR r0, [r1] // Pickup actual current thread pointer
+ CMP r0, #0 // Is it NULL?
+ BEQ __tx_thread_fiq_idle_system_restore // Yes, idle system was interrupted
+
+ LDR r3, =_tx_thread_preempt_disable // Pickup preempt disable address
+ LDR r2, [r3] // Pickup actual preempt disable flag
+ CMP r2, #0 // Is it set?
+ BNE __tx_thread_fiq_no_preempt_restore // Yes, don't preempt this thread
+ LDR r3, =_tx_thread_execute_ptr // Pickup address of execute thread ptr
+ LDR r2, [r3] // Pickup actual execute thread pointer
+ CMP r0, r2 // Is the same thread highest priority?
+ BNE __tx_thread_fiq_preempt_restore // No, preemption needs to happen
+
+
+__tx_thread_fiq_no_preempt_restore:
+
+ /* Restore interrupted thread or ISR. */
+ /* Recover the saved context and return to the point of interrupt. */
+
+ LDMIA sp!, {r0, lr} // Recover SPSR, POI, and scratch regs
+ MSR SPSR_cxsf, r0 // Put SPSR back
+ LDMIA sp!, {r0-r3} // Recover r0-r3
+ MOVS pc, lr // Return to point of interrupt
+
+__tx_thread_fiq_preempt_restore:
+
+ LDMIA sp!, {r3, lr} // Recover temporarily saved registers
+ MOV r1, lr // Save lr (point of interrupt)
+ MOV r2, #SVC_MODE // Build SVC mode CPSR
+ MSR CPSR_c, r2 // Enter SVC mode
+ STR r1, [sp, #-4]! // Save point of interrupt
+ STMDB sp!, {r4-r12, lr} // Save upper half of registers
+ MOV r4, r3 // Save SPSR in r4
+ MOV r2, #FIQ_MODE // Build FIQ mode CPSR
+ MSR CPSR_c, r2 // Reenter FIQ mode
+ LDMIA sp!, {r0-r3} // Recover r0-r3
+ MOV r5, #SVC_MODE // Build SVC mode CPSR
+ MSR CPSR_c, r5 // Enter SVC mode
+ STMDB sp!, {r0-r3} // Save r0-r3 on thread's stack
+
+ LDR r1, =_tx_thread_current_ptr // Pickup address of current thread ptr
+ LDR r0, [r1] // Pickup current thread pointer
+
+#ifdef TX_ENABLE_VFP_SUPPORT
+ LDR r2, [r0, #144] // Pickup the VFP enabled flag
+ CMP r2, #0 // Is the VFP enabled?
+ BEQ _tx_skip_fiq_vfp_save // No, skip VFP IRQ save
+ VMRS r2, FPSCR // Pickup the FPSCR
+ STR r2, [sp, #-4]! // Save FPSCR
+ VSTMDB sp!, {D16-D31} // Save D16-D31
+ VSTMDB sp!, {D0-D15} // Save D0-D15
+_tx_skip_fiq_vfp_save:
+#endif
+
+ MOV r3, #1 // Build interrupt stack type
+ STMDB sp!, {r3, r4} // Save interrupt stack type and SPSR
+ STR sp, [r0, #8] // Save stack pointer in thread control
+ // block */
+ LDR r3, =_tx_timer_time_slice // Pickup time-slice variable address
+ LDR r2, [r3] // Pickup time-slice
+ CMP r2, #0 // Is it active?
+ BEQ __tx_thread_fiq_dont_save_ts // No, don't save it
+
+ STR r2, [r0, #24] // Save thread's time-slice
+ MOV r2, #0 // Clear value
+ STR r2, [r3] // Disable global time-slice flag
+
+__tx_thread_fiq_dont_save_ts:
+
+ /* Clear the current task pointer. */
+
+ MOV r0, #0 // NULL value
+ STR r0, [r1] // Clear current thread pointer
+
+ /* Return to the scheduler. */
+
+ B _tx_thread_schedule // Return to scheduler
+
+__tx_thread_fiq_idle_system_restore:
+
+ /* Just return back to the scheduler! */
+
+ ADD sp, sp, #24 // Recover FIQ stack space
+ MOV r3, #SVC_MODE // Build SVC mode CPSR
+ MSR CPSR_c, r3 // Lockout interrupts
+ B _tx_thread_schedule // Return to scheduler
+
diff --git a/ports/cortex_a5/ac6/src/tx_thread_fiq_context_save.S b/ports/cortex_a5/ac6/src/tx_thread_fiq_context_save.S
new file mode 100644
index 00000000..7db6a4c2
--- /dev/null
+++ b/ports/cortex_a5/ac6/src/tx_thread_fiq_context_save.S
@@ -0,0 +1,178 @@
+/**************************************************************************/
+/* */
+/* Copyright (c) Microsoft Corporation. All rights reserved. */
+/* */
+/* This software is licensed under the Microsoft Software License */
+/* Terms for Microsoft Azure RTOS. Full text of the license can be */
+/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
+/* and in the root directory of this software. */
+/* */
+/**************************************************************************/
+
+
+/**************************************************************************/
+/**************************************************************************/
+/** */
+/** ThreadX Component */
+/** */
+/** Thread */
+/** */
+/**************************************************************************/
+/**************************************************************************/
+
+ .global _tx_thread_system_state
+ .global _tx_thread_current_ptr
+ .global __tx_fiq_processing_return
+ .global _tx_execution_isr_enter
+
+
+/* No 16-bit Thumb mode veneer code is needed for _tx_thread_fiq_context_save
+ since it will never be called 16-bit mode. */
+
+ .arm
+ .text
+ .align 2
+/**************************************************************************/
+/* */
+/* FUNCTION RELEASE */
+/* */
+/* _tx_thread_fiq_context_save ARMv7-A */
+/* 6.1.11 */
+/* AUTHOR */
+/* */
+/* William E. Lamie, Microsoft Corporation */
+/* */
+/* DESCRIPTION */
+/* */
+/* This function saves the context of an executing thread in the */
+/* beginning of interrupt processing. The function also ensures that */
+/* the system stack is used upon return to the calling ISR. */
+/* */
+/* INPUT */
+/* */
+/* None */
+/* */
+/* OUTPUT */
+/* */
+/* None */
+/* */
+/* CALLS */
+/* */
+/* None */
+/* */
+/* CALLED BY */
+/* */
+/* ISRs */
+/* */
+/* RELEASE HISTORY */
+/* */
+/* DATE NAME DESCRIPTION */
+/* */
+/* 09-30-2020 William E. Lamie Initial Version 6.1 */
+/* 10-15-2021 William E. Lamie Modified comment(s), added */
+/* execution profile support, */
+/* resulting in version 6.1.9 */
+/* 04-25-2022 Zhen Kong Updated comments, */
+/* resulting in version 6.1.11 */
+/* */
+/**************************************************************************/
+ .global _tx_thread_fiq_context_save
+ .type _tx_thread_fiq_context_save,function
+_tx_thread_fiq_context_save:
+
+ /* Upon entry to this routine, it is assumed that IRQ interrupts are locked
+ out, we are in IRQ mode, and all registers are intact. */
+
+ /* Check for a nested interrupt condition. */
+
+ STMDB sp!, {r0-r3} // Save some working registers
+ LDR r3, =_tx_thread_system_state // Pickup address of system state variable
+ LDR r2, [r3] // Pickup system state
+ CMP r2, #0 // Is this the first interrupt?
+ BEQ __tx_thread_fiq_not_nested_save // Yes, not a nested context save
+
+ /* Nested interrupt condition. */
+
+ ADD r2, r2, #1 // Increment the interrupt counter
+ STR r2, [r3] // Store it back in the variable
+
+ /* Save the rest of the scratch registers on the stack and return to the
+ calling ISR. */
+
+ MRS r0, SPSR // Pickup saved SPSR
+ SUB lr, lr, #4 // Adjust point of interrupt
+ STMDB sp!, {r0, r10, r12, lr} // Store other registers
+
+ /* Return to the ISR. */
+
+ MOV r10, #0 // Clear stack limit
+
+#if (defined(TX_ENABLE_EXECUTION_CHANGE_NOTIFY) || defined(TX_EXECUTION_PROFILE_ENABLE))
+
+ /* Call the ISR enter function to indicate an ISR is executing. */
+
+ PUSH {lr} // Save ISR lr
+ BL _tx_execution_isr_enter // Call the ISR enter function
+ POP {lr} // Recover ISR lr
+#endif
+
+ B __tx_fiq_processing_return // Continue FIQ processing
+//
+__tx_thread_fiq_not_nested_save:
+
+ /* Otherwise, not nested, check to see if a thread was running. */
+
+ ADD r2, r2, #1 // Increment the interrupt counter
+ STR r2, [r3] // Store it back in the variable
+ LDR r1, =_tx_thread_current_ptr // Pickup address of current thread ptr
+ LDR r0, [r1] // Pickup current thread pointer
+ CMP r0, #0 // Is it NULL?
+ BEQ __tx_thread_fiq_idle_system_save // If so, interrupt occurred in
+ // scheduling loop - nothing needs saving!
+
+ /* Save minimal context of interrupted thread. */
+
+ MRS r2, SPSR // Pickup saved SPSR
+ SUB lr, lr, #4 // Adjust point of interrupt
+ STMDB sp!, {r2, lr} // Store other registers, Note that we don't
+ // need to save sl and ip since FIQ has
+ // copies of these registers. Nested
+ // interrupt processing does need to save
+ // these registers.
+
+ MOV r10, #0 // Clear stack limit
+
+#if (defined(TX_ENABLE_EXECUTION_CHANGE_NOTIFY) || defined(TX_EXECUTION_PROFILE_ENABLE))
+
+ /* Call the ISR enter function to indicate an ISR is executing. */
+
+ PUSH {lr} // Save ISR lr
+ BL _tx_execution_isr_enter // Call the ISR enter function
+ POP {lr} // Recover ISR lr
+#endif
+
+ B __tx_fiq_processing_return // Continue FIQ processing
+
+__tx_thread_fiq_idle_system_save:
+
+ /* Interrupt occurred in the scheduling loop. */
+
+#if (defined(TX_ENABLE_EXECUTION_CHANGE_NOTIFY) || defined(TX_EXECUTION_PROFILE_ENABLE))
+
+ /* Call the ISR enter function to indicate an ISR is executing. */
+
+ PUSH {lr} // Save ISR lr
+ BL _tx_execution_isr_enter // Call the ISR enter function
+ POP {lr} // Recover ISR lr
+#endif
+
+ /* Not much to do here, save the current SPSR and LR for possible
+ use in IRQ interrupted in idle system conditions, and return to
+ FIQ interrupt processing. */
+
+ MRS r0, SPSR // Pickup saved SPSR
+ SUB lr, lr, #4 // Adjust point of interrupt
+ STMDB sp!, {r0, lr} // Store other registers that will get used
+ // or stripped off the stack in context
+ // restore
+ B __tx_fiq_processing_return // Continue FIQ processing
diff --git a/ports/cortex_a5/ac6/src/tx_thread_fiq_nesting_end.S b/ports/cortex_a5/ac6/src/tx_thread_fiq_nesting_end.S
new file mode 100644
index 00000000..b34d881e
--- /dev/null
+++ b/ports/cortex_a5/ac6/src/tx_thread_fiq_nesting_end.S
@@ -0,0 +1,104 @@
+/**************************************************************************/
+/* */
+/* Copyright (c) Microsoft Corporation. All rights reserved. */
+/* */
+/* This software is licensed under the Microsoft Software License */
+/* Terms for Microsoft Azure RTOS. Full text of the license can be */
+/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
+/* and in the root directory of this software. */
+/* */
+/**************************************************************************/
+
+
+/**************************************************************************/
+/**************************************************************************/
+/** */
+/** ThreadX Component */
+/** */
+/** Thread */
+/** */
+/**************************************************************************/
+/**************************************************************************/
+
+#ifdef TX_ENABLE_FIQ_SUPPORT
+DISABLE_INTS = 0xC0 // Disable IRQ/FIQ interrupts
+#else
+DISABLE_INTS = 0x80 // Disable IRQ interrupts
+#endif
+MODE_MASK = 0x1F // Mode mask
+FIQ_MODE_BITS = 0x11 // FIQ mode bits
+
+
+/* No 16-bit Thumb mode veneer code is needed for _tx_thread_fiq_nesting_end
+ since it will never be called 16-bit mode. */
+
+ .arm
+ .text
+ .align 2
+/**************************************************************************/
+/* */
+/* FUNCTION RELEASE */
+/* */
+/* _tx_thread_fiq_nesting_end ARMv7-A */
+/* 6.1.11 */
+/* AUTHOR */
+/* */
+/* William E. Lamie, Microsoft Corporation */
+/* */
+/* DESCRIPTION */
+/* */
+/* This function is called by the application from FIQ mode after */
+/* _tx_thread_fiq_nesting_start has been called and switches the FIQ */
+/* processing from system mode back to FIQ mode prior to the ISR */
+/* calling _tx_thread_fiq_context_restore. Note that this function */
+/* assumes the system stack pointer is in the same position after */
+/* nesting start function was called. */
+/* */
+/* This function assumes that the system mode stack pointer was setup */
+/* during low-level initialization (tx_initialize_low_level.s). */
+/* */
+/* This function returns with FIQ interrupts disabled. */
+/* */
+/* INPUT */
+/* */
+/* None */
+/* */
+/* OUTPUT */
+/* */
+/* None */
+/* */
+/* CALLS */
+/* */
+/* None */
+/* */
+/* CALLED BY */
+/* */
+/* ISRs */
+/* */
+/* RELEASE HISTORY */
+/* */
+/* DATE NAME DESCRIPTION */
+/* */
+/* 09-30-2020 William E. Lamie Initial Version 6.1 */
+/* 04-25-2022 Zhen Kong Updated comments, */
+/* resulting in version 6.1.11 */
+/* */
+/**************************************************************************/
+ .global _tx_thread_fiq_nesting_end
+ .type _tx_thread_fiq_nesting_end,function
+_tx_thread_fiq_nesting_end:
+ MOV r3,lr // Save ISR return address
+ MRS r0, CPSR // Pickup the CPSR
+ ORR r0, r0, #DISABLE_INTS // Build disable interrupt value
+ MSR CPSR_c, r0 // Disable interrupts
+ LDMIA sp!, {r1, lr} // Pickup saved lr (and r1 throw-away for
+ // 8-byte alignment logic)
+ BIC r0, r0, #MODE_MASK // Clear mode bits
+ ORR r0, r0, #FIQ_MODE_BITS // Build IRQ mode CPSR
+ MSR CPSR_c, r0 // Reenter IRQ mode
+
+#ifdef __THUMB_INTERWORK
+ BX r3 // Return to caller
+#else
+ MOV pc, r3 // Return to caller
+#endif
diff --git a/ports/cortex_a5/ac6/src/tx_thread_fiq_nesting_start.S b/ports/cortex_a5/ac6/src/tx_thread_fiq_nesting_start.S
new file mode 100644
index 00000000..c9cd5a06
--- /dev/null
+++ b/ports/cortex_a5/ac6/src/tx_thread_fiq_nesting_start.S
@@ -0,0 +1,96 @@
+/**************************************************************************/
+/* */
+/* Copyright (c) Microsoft Corporation. All rights reserved. */
+/* */
+/* This software is licensed under the Microsoft Software License */
+/* Terms for Microsoft Azure RTOS. Full text of the license can be */
+/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
+/* and in the root directory of this software. */
+/* */
+/**************************************************************************/
+
+
+/**************************************************************************/
+/**************************************************************************/
+/** */
+/** ThreadX Component */
+/** */
+/** Thread */
+/** */
+/**************************************************************************/
+/**************************************************************************/
+
+FIQ_DISABLE = 0x40 // FIQ disable bit
+MODE_MASK = 0x1F // Mode mask
+SYS_MODE_BITS = 0x1F // System mode bits
+
+
+/* No 16-bit Thumb mode veneer code is needed for _tx_thread_fiq_nesting_start
+ since it will never be called 16-bit mode. */
+
+ .arm
+ .text
+ .align 2
+/**************************************************************************/
+/* */
+/* FUNCTION RELEASE */
+/* */
+/* _tx_thread_fiq_nesting_start ARMv7-A */
+/* 6.1.11 */
+/* AUTHOR */
+/* */
+/* William E. Lamie, Microsoft Corporation */
+/* */
+/* DESCRIPTION */
+/* */
+/* This function is called by the application from FIQ mode after */
+/* _tx_thread_fiq_context_save has been called and switches the FIQ */
+/* processing to the system mode so nested FIQ interrupt processing */
+/* is possible (system mode has its own "lr" register). Note that */
+/* this function assumes that the system mode stack pointer was setup */
+/* during low-level initialization (tx_initialize_low_level.s). */
+/* */
+/* This function returns with FIQ interrupts enabled. */
+/* */
+/* INPUT */
+/* */
+/* None */
+/* */
+/* OUTPUT */
+/* */
+/* None */
+/* */
+/* CALLS */
+/* */
+/* None */
+/* */
+/* CALLED BY */
+/* */
+/* ISRs */
+/* */
+/* RELEASE HISTORY */
+/* */
+/* DATE NAME DESCRIPTION */
+/* */
+/* 09-30-2020 William E. Lamie Initial Version 6.1 */
+/* 04-25-2022 Zhen Kong Updated comments, */
+/* resulting in version 6.1.11 */
+/* */
+/**************************************************************************/
+ .global _tx_thread_fiq_nesting_start
+ .type _tx_thread_fiq_nesting_start,function
+_tx_thread_fiq_nesting_start:
+ MOV r3,lr // Save ISR return address
+ MRS r0, CPSR // Pickup the CPSR
+ BIC r0, r0, #MODE_MASK // Clear the mode bits
+ ORR r0, r0, #SYS_MODE_BITS // Build system mode CPSR
+ MSR CPSR_c, r0 // Enter system mode
+ STMDB sp!, {r1, lr} // Push the system mode lr on the system mode stack
+ // and push r1 just to keep 8-byte alignment
+ BIC r0, r0, #FIQ_DISABLE // Build enable FIQ CPSR
+ MSR CPSR_c, r0 // Enter system mode
+#ifdef __THUMB_INTERWORK
+ BX r3 // Return to caller
+#else
+ MOV pc, r3 // Return to caller
+#endif
diff --git a/ports/cortex_a5/ac6/src/tx_thread_interrupt_control.S b/ports/cortex_a5/ac6/src/tx_thread_interrupt_control.S
new file mode 100644
index 00000000..63b1609a
--- /dev/null
+++ b/ports/cortex_a5/ac6/src/tx_thread_interrupt_control.S
@@ -0,0 +1,104 @@
+/**************************************************************************/
+/* */
+/* Copyright (c) Microsoft Corporation. All rights reserved. */
+/* */
+/* This software is licensed under the Microsoft Software License */
+/* Terms for Microsoft Azure RTOS. Full text of the license can be */
+/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
+/* and in the root directory of this software. */
+/* */
+/**************************************************************************/
+
+
+/**************************************************************************/
+/**************************************************************************/
+/** */
+/** ThreadX Component */
+/** */
+/** Thread */
+/** */
+/**************************************************************************/
+/**************************************************************************/
+
+INT_MASK = 0x03F
+
+
+/* Define the 16-bit Thumb mode veneer for _tx_thread_interrupt_control for
+ applications calling this function from to 16-bit Thumb mode. */
+
+ .text
+ .align 2
+ .global $_tx_thread_interrupt_control
+$_tx_thread_interrupt_control:
+ .thumb
+ BX pc // Switch to 32-bit mode
+ NOP //
+ .arm
+ STMFD sp!, {lr} // Save return address
+ BL _tx_thread_interrupt_control // Call _tx_thread_interrupt_control function
+ LDMFD sp!, {lr} // Recover saved return address
+ BX lr // Return to 16-bit caller
+
+
+ .text
+ .align 2
+/**************************************************************************/
+/* */
+/* FUNCTION RELEASE */
+/* */
+/* _tx_thread_interrupt_control ARMv7-A */
+/* 6.1.11 */
+/* AUTHOR */
+/* */
+/* William E. Lamie, Microsoft Corporation */
+/* */
+/* DESCRIPTION */
+/* */
+/* This function is responsible for changing the interrupt lockout */
+/* posture of the system. */
+/* */
+/* INPUT */
+/* */
+/* new_posture New interrupt lockout posture */
+/* */
+/* OUTPUT */
+/* */
+/* old_posture Old interrupt lockout posture */
+/* */
+/* CALLS */
+/* */
+/* None */
+/* */
+/* CALLED BY */
+/* */
+/* Application Code */
+/* */
+/* RELEASE HISTORY */
+/* */
+/* DATE NAME DESCRIPTION */
+/* */
+/* 09-30-2020 William E. Lamie Initial Version 6.1 */
+/* 04-25-2022 Zhen Kong Updated comments, */
+/* resulting in version 6.1.11 */
+/* */
+/**************************************************************************/
+ .global _tx_thread_interrupt_control
+ .type _tx_thread_interrupt_control,function
+_tx_thread_interrupt_control:
+
+ /* Pickup current interrupt lockout posture. */
+
+ MRS r3, CPSR // Pickup current CPSR
+ MOV r2, #INT_MASK // Build interrupt mask
+ AND r1, r3, r2 // Clear interrupt lockout bits
+ ORR r1, r1, r0 // Or-in new interrupt lockout bits
+
+ /* Apply the new interrupt posture. */
+
+ MSR CPSR_c, r1 // Setup new CPSR
+ BIC r0, r3, r2 // Return previous interrupt mask
+#ifdef __THUMB_INTERWORK
+ BX lr // Return to caller
+#else
+ MOV pc, lr // Return to caller
+#endif
diff --git a/ports/cortex_a5/ac6/src/tx_thread_interrupt_disable.S b/ports/cortex_a5/ac6/src/tx_thread_interrupt_disable.S
new file mode 100644
index 00000000..13258808
--- /dev/null
+++ b/ports/cortex_a5/ac6/src/tx_thread_interrupt_disable.S
@@ -0,0 +1,101 @@
+/**************************************************************************/
+/* */
+/* Copyright (c) Microsoft Corporation. All rights reserved. */
+/* */
+/* This software is licensed under the Microsoft Software License */
+/* Terms for Microsoft Azure RTOS. Full text of the license can be */
+/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
+/* and in the root directory of this software. */
+/* */
+/**************************************************************************/
+
+
+/**************************************************************************/
+/**************************************************************************/
+/** */
+/** ThreadX Component */
+/** */
+/** Thread */
+/** */
+/**************************************************************************/
+/**************************************************************************/
+
+/* Define the 16-bit Thumb mode veneer for _tx_thread_interrupt_disable for
+ applications calling this function from to 16-bit Thumb mode. */
+
+ .text
+ .align 2
+ .global $_tx_thread_interrupt_disable
+$_tx_thread_interrupt_disable:
+ .thumb
+ BX pc // Switch to 32-bit mode
+ NOP //
+ .arm
+ STMFD sp!, {lr} // Save return address
+ BL _tx_thread_interrupt_disable // Call _tx_thread_interrupt_disable function
+ LDMFD sp!, {lr} // Recover saved return address
+ BX lr // Return to 16-bit caller
+
+
+ .text
+ .align 2
+/**************************************************************************/
+/* */
+/* FUNCTION RELEASE */
+/* */
+/* _tx_thread_interrupt_disable ARMv7-A */
+/* 6.1.11 */
+/* AUTHOR */
+/* */
+/* William E. Lamie, Microsoft Corporation */
+/* */
+/* DESCRIPTION */
+/* */
+/* This function is responsible for disabling interrupts */
+/* */
+/* INPUT */
+/* */
+/* None */
+/* */
+/* OUTPUT */
+/* */
+/* old_posture Old interrupt lockout posture */
+/* */
+/* CALLS */
+/* */
+/* None */
+/* */
+/* CALLED BY */
+/* */
+/* Application Code */
+/* */
+/* RELEASE HISTORY */
+/* */
+/* DATE NAME DESCRIPTION */
+/* */
+/* 09-30-2020 William E. Lamie Initial Version 6.1 */
+/* 04-25-2022 Zhen Kong Updated comments, */
+/* resulting in version 6.1.11 */
+/* */
+/**************************************************************************/
+ .global _tx_thread_interrupt_disable
+ .type _tx_thread_interrupt_disable,function
+_tx_thread_interrupt_disable:
+
+ /* Pickup current interrupt lockout posture. */
+
+ MRS r0, CPSR // Pickup current CPSR
+
+ /* Mask interrupts. */
+
+#ifdef TX_ENABLE_FIQ_SUPPORT
+ CPSID if // Disable IRQ and FIQ
+#else
+ CPSID i // Disable IRQ
+#endif
+
+#ifdef __THUMB_INTERWORK
+ BX lr // Return to caller
+#else
+ MOV pc, lr // Return to caller
+#endif
diff --git a/ports/cortex_a5/ac6/src/tx_thread_interrupt_restore.S b/ports/cortex_a5/ac6/src/tx_thread_interrupt_restore.S
new file mode 100644
index 00000000..2d582511
--- /dev/null
+++ b/ports/cortex_a5/ac6/src/tx_thread_interrupt_restore.S
@@ -0,0 +1,93 @@
+/**************************************************************************/
+/* */
+/* Copyright (c) Microsoft Corporation. All rights reserved. */
+/* */
+/* This software is licensed under the Microsoft Software License */
+/* Terms for Microsoft Azure RTOS. Full text of the license can be */
+/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
+/* and in the root directory of this software. */
+/* */
+/**************************************************************************/
+
+
+/**************************************************************************/
+/**************************************************************************/
+/** */
+/** ThreadX Component */
+/** */
+/** Thread */
+/** */
+/**************************************************************************/
+/**************************************************************************/
+
+/* Define the 16-bit Thumb mode veneer for _tx_thread_interrupt_restore for
+ applications calling this function from to 16-bit Thumb mode. */
+
+ .text
+ .align 2
+ .global $_tx_thread_interrupt_restore
+$_tx_thread_interrupt_restore:
+ .thumb
+ BX pc // Switch to 32-bit mode
+ NOP //
+ .arm
+ STMFD sp!, {lr} // Save return address
+ BL _tx_thread_interrupt_restore // Call _tx_thread_interrupt_restore function
+ LDMFD sp!, {lr} // Recover saved return address
+ BX lr // Return to 16-bit caller
+
+
+ .text
+ .align 2
+/**************************************************************************/
+/* */
+/* FUNCTION RELEASE */
+/* */
+/* _tx_thread_interrupt_restore ARMv7-A */
+/* 6.1.11 */
+/* AUTHOR */
+/* */
+/* William E. Lamie, Microsoft Corporation */
+/* */
+/* DESCRIPTION */
+/* */
+/* This function is responsible for restoring interrupts to the state */
+/* returned by a previous _tx_thread_interrupt_disable call. */
+/* */
+/* INPUT */
+/* */
+/* old_posture Old interrupt lockout posture */
+/* */
+/* OUTPUT */
+/* */
+/* None */
+/* */
+/* CALLS */
+/* */
+/* None */
+/* */
+/* CALLED BY */
+/* */
+/* Application Code */
+/* */
+/* RELEASE HISTORY */
+/* */
+/* DATE NAME DESCRIPTION */
+/* */
+/* 09-30-2020 William E. Lamie Initial Version 6.1 */
+/* 04-25-2022 Zhen Kong Updated comments, */
+/* resulting in version 6.1.11 */
+/* */
+/**************************************************************************/
+ .global _tx_thread_interrupt_restore
+ .type _tx_thread_interrupt_restore,function
+_tx_thread_interrupt_restore:
+
+ /* Apply the new interrupt posture. */
+
+ MSR CPSR_c, r0 // Setup new CPSR
+#ifdef __THUMB_INTERWORK
+ BX lr // Return to caller
+#else
+ MOV pc, lr // Return to caller
+#endif
diff --git a/ports/cortex_a5/ac6/src/tx_thread_irq_nesting_end.S b/ports/cortex_a5/ac6/src/tx_thread_irq_nesting_end.S
new file mode 100644
index 00000000..ec7e63c6
--- /dev/null
+++ b/ports/cortex_a5/ac6/src/tx_thread_irq_nesting_end.S
@@ -0,0 +1,103 @@
+/**************************************************************************/
+/* */
+/* Copyright (c) Microsoft Corporation. All rights reserved. */
+/* */
+/* This software is licensed under the Microsoft Software License */
+/* Terms for Microsoft Azure RTOS. Full text of the license can be */
+/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
+/* and in the root directory of this software. */
+/* */
+/**************************************************************************/
+
+
+/**************************************************************************/
+/**************************************************************************/
+/** */
+/** ThreadX Component */
+/** */
+/** Thread */
+/** */
+/**************************************************************************/
+/**************************************************************************/
+
+#ifdef TX_ENABLE_FIQ_SUPPORT
+DISABLE_INTS = 0xC0 // Disable IRQ/FIQ interrupts
+#else
+DISABLE_INTS = 0x80 // Disable IRQ interrupts
+#endif
+MODE_MASK = 0x1F // Mode mask
+IRQ_MODE_BITS = 0x12 // IRQ mode bits
+
+
+/* No 16-bit Thumb mode veneer code is needed for _tx_thread_irq_nesting_end
+ since it will never be called 16-bit mode. */
+
+ .arm
+ .text
+ .align 2
+/**************************************************************************/
+/* */
+/* FUNCTION RELEASE */
+/* */
+/* _tx_thread_irq_nesting_end ARMv7-A */
+/* 6.1.11 */
+/* AUTHOR */
+/* */
+/* William E. Lamie, Microsoft Corporation */
+/* */
+/* DESCRIPTION */
+/* */
+/* This function is called by the application from IRQ mode after */
+/* _tx_thread_irq_nesting_start has been called and switches the IRQ */
+/* processing from system mode back to IRQ mode prior to the ISR */
+/* calling _tx_thread_context_restore. Note that this function */
+/* assumes the system stack pointer is in the same position after */
+/* nesting start function was called. */
+/* */
+/* This function assumes that the system mode stack pointer was setup */
+/* during low-level initialization (tx_initialize_low_level.s). */
+/* */
+/* This function returns with IRQ interrupts disabled. */
+/* */
+/* INPUT */
+/* */
+/* None */
+/* */
+/* OUTPUT */
+/* */
+/* None */
+/* */
+/* CALLS */
+/* */
+/* None */
+/* */
+/* CALLED BY */
+/* */
+/* ISRs */
+/* */
+/* RELEASE HISTORY */
+/* */
+/* DATE NAME DESCRIPTION */
+/* */
+/* 09-30-2020 William E. Lamie Initial Version 6.1 */
+/* 04-25-2022 Zhen Kong Updated comments, */
+/* resulting in version 6.1.11 */
+/* */
+/**************************************************************************/
+ .global _tx_thread_irq_nesting_end
+ .type _tx_thread_irq_nesting_end,function
+_tx_thread_irq_nesting_end:
+ MOV r3,lr // Save ISR return address
+ MRS r0, CPSR // Pickup the CPSR
+ ORR r0, r0, #DISABLE_INTS // Build disable interrupt value
+ MSR CPSR_c, r0 // Disable interrupts
+ LDMIA sp!, {r1, lr} // Pickup saved lr (and r1 throw-away for
+ // 8-byte alignment logic)
+ BIC r0, r0, #MODE_MASK // Clear mode bits
+ ORR r0, r0, #IRQ_MODE_BITS // Build IRQ mode CPSR
+ MSR CPSR_c, r0 // Reenter IRQ mode
+#ifdef __THUMB_INTERWORK
+ BX r3 // Return to caller
+#else
+ MOV pc, r3 // Return to caller
+#endif
diff --git a/ports/cortex_a5/ac6/src/tx_thread_irq_nesting_start.S b/ports/cortex_a5/ac6/src/tx_thread_irq_nesting_start.S
new file mode 100644
index 00000000..c69976ed
--- /dev/null
+++ b/ports/cortex_a5/ac6/src/tx_thread_irq_nesting_start.S
@@ -0,0 +1,96 @@
+/**************************************************************************/
+/* */
+/* Copyright (c) Microsoft Corporation. All rights reserved. */
+/* */
+/* This software is licensed under the Microsoft Software License */
+/* Terms for Microsoft Azure RTOS. Full text of the license can be */
+/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
+/* and in the root directory of this software. */
+/* */
+/**************************************************************************/
+
+
+/**************************************************************************/
+/**************************************************************************/
+/** */
+/** ThreadX Component */
+/** */
+/** Thread */
+/** */
+/**************************************************************************/
+/**************************************************************************/
+
+IRQ_DISABLE = 0x80 // IRQ disable bit
+MODE_MASK = 0x1F // Mode mask
+SYS_MODE_BITS = 0x1F // System mode bits
+
+
+/* No 16-bit Thumb mode veneer code is needed for _tx_thread_irq_nesting_start
+ since it will never be called 16-bit mode. */
+
+ .arm
+ .text
+ .align 2
+/**************************************************************************/
+/* */
+/* FUNCTION RELEASE */
+/* */
+/* _tx_thread_irq_nesting_start ARMv7-A */
+/* 6.1.11 */
+/* AUTHOR */
+/* */
+/* William E. Lamie, Microsoft Corporation */
+/* */
+/* DESCRIPTION */
+/* */
+/* This function is called by the application from IRQ mode after */
+/* _tx_thread_context_save has been called and switches the IRQ */
+/* processing to the system mode so nested IRQ interrupt processing */
+/* is possible (system mode has its own "lr" register). Note that */
+/* this function assumes that the system mode stack pointer was setup */
+/* during low-level initialization (tx_initialize_low_level.s). */
+/* */
+/* This function returns with IRQ interrupts enabled. */
+/* */
+/* INPUT */
+/* */
+/* None */
+/* */
+/* OUTPUT */
+/* */
+/* None */
+/* */
+/* CALLS */
+/* */
+/* None */
+/* */
+/* CALLED BY */
+/* */
+/* ISRs */
+/* */
+/* RELEASE HISTORY */
+/* */
+/* DATE NAME DESCRIPTION */
+/* */
+/* 09-30-2020 William E. Lamie Initial Version 6.1 */
+/* 04-25-2022 Zhen Kong Updated comments, */
+/* resulting in version 6.1.11 */
+/* */
+/**************************************************************************/
+ .global _tx_thread_irq_nesting_start
+ .type _tx_thread_irq_nesting_start,function
+_tx_thread_irq_nesting_start:
+ MOV r3,lr // Save ISR return address
+ MRS r0, CPSR // Pickup the CPSR
+ BIC r0, r0, #MODE_MASK // Clear the mode bits
+ ORR r0, r0, #SYS_MODE_BITS // Build system mode CPSR
+ MSR CPSR_c, r0 // Enter system mode
+ STMDB sp!, {r1, lr} // Push the system mode lr on the system mode stack
+ // and push r1 just to keep 8-byte alignment
+ BIC r0, r0, #IRQ_DISABLE // Build enable IRQ CPSR
+ MSR CPSR_c, r0 // Enter system mode
+#ifdef __THUMB_INTERWORK
+ BX r3 // Return to caller
+#else
+ MOV pc, r3 // Return to caller
+#endif
diff --git a/ports/cortex_a5/ac6/src/tx_thread_schedule.S b/ports/cortex_a5/ac6/src/tx_thread_schedule.S
new file mode 100644
index 00000000..8330e9df
--- /dev/null
+++ b/ports/cortex_a5/ac6/src/tx_thread_schedule.S
@@ -0,0 +1,230 @@
+/**************************************************************************/
+/* */
+/* Copyright (c) Microsoft Corporation. All rights reserved. */
+/* */
+/* This software is licensed under the Microsoft Software License */
+/* Terms for Microsoft Azure RTOS. Full text of the license can be */
+/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
+/* and in the root directory of this software. */
+/* */
+/**************************************************************************/
+
+
+/**************************************************************************/
+/**************************************************************************/
+/** */
+/** ThreadX Component */
+/** */
+/** Thread */
+/** */
+/**************************************************************************/
+/**************************************************************************/
+
+ .global _tx_thread_execute_ptr
+ .global _tx_thread_current_ptr
+ .global _tx_timer_time_slice
+
+
+/* Define the 16-bit Thumb mode veneer for _tx_thread_schedule for
+ applications calling this function from to 16-bit Thumb mode. */
+
+ .text
+ .align 2
+ .global $_tx_thread_schedule
+ .type $_tx_thread_schedule,function
+$_tx_thread_schedule:
+ .thumb
+ BX pc // Switch to 32-bit mode
+ NOP //
+ .arm
+ STMFD sp!, {lr} // Save return address
+ BL _tx_thread_schedule // Call _tx_thread_schedule function
+ LDMFD sp!, {lr} // Recover saved return address
+ BX lr // Return to 16-bit caller
+
+
+ .text
+ .align 2
+/**************************************************************************/
+/* */
+/* FUNCTION RELEASE */
+/* */
+/* _tx_thread_schedule ARMv7-A */
+/* 6.1.11 */
+/* AUTHOR */
+/* */
+/* William E. Lamie, Microsoft Corporation */
+/* */
+/* DESCRIPTION */
+/* */
+/* This function waits for a thread control block pointer to appear in */
+/* the _tx_thread_execute_ptr variable. Once a thread pointer appears */
+/* in the variable, the corresponding thread is resumed. */
+/* */
+/* INPUT */
+/* */
+/* None */
+/* */
+/* OUTPUT */
+/* */
+/* None */
+/* */
+/* CALLS */
+/* */
+/* None */
+/* */
+/* CALLED BY */
+/* */
+/* _tx_initialize_kernel_enter ThreadX entry function */
+/* _tx_thread_system_return Return to system from thread */
+/* _tx_thread_context_restore Restore thread's context */
+/* */
+/* RELEASE HISTORY */
+/* */
+/* DATE NAME DESCRIPTION */
+/* */
+/* 09-30-2020 William E. Lamie Initial Version 6.1 */
+/* 10-15-2021 William E. Lamie Modified comment(s), added */
+/* execution profile support, */
+/* resulting in version 6.1.9 */
+/* 04-25-2022 Zhen Kong Updated comments, */
+/* resulting in version 6.1.11 */
+/* */
+/**************************************************************************/
+ .global _tx_thread_schedule
+ .type _tx_thread_schedule,function
+_tx_thread_schedule:
+
+ /* Enable interrupts. */
+
+#ifdef TX_ENABLE_FIQ_SUPPORT
+ CPSIE if // Enable IRQ and FIQ interrupts
+#else
+ CPSIE i // Enable IRQ interrupts
+#endif
+
+ /* Wait for a thread to execute. */
+ LDR r1, =_tx_thread_execute_ptr // Address of thread execute ptr
+
+__tx_thread_schedule_loop:
+
+ LDR r0, [r1] // Pickup next thread to execute
+ CMP r0, #0 // Is it NULL?
+ BEQ __tx_thread_schedule_loop // If so, keep looking for a thread
+ /* Yes! We have a thread to execute. Lockout interrupts and
+ transfer control to it. */
+
+#ifdef TX_ENABLE_FIQ_SUPPORT
+ CPSID if // Disable IRQ and FIQ interrupts
+#else
+ CPSID i // Disable IRQ interrupts
+#endif
+
+ /* Setup the current thread pointer. */
+
+ LDR r1, =_tx_thread_current_ptr // Pickup address of current thread
+ STR r0, [r1] // Setup current thread pointer
+
+ /* Increment the run count for this thread. */
+
+ LDR r2, [r0, #4] // Pickup run counter
+ LDR r3, [r0, #24] // Pickup time-slice for this thread
+ ADD r2, r2, #1 // Increment thread run-counter
+ STR r2, [r0, #4] // Store the new run counter
+
+ /* Setup time-slice, if present. */
+
+ LDR r2, =_tx_timer_time_slice // Pickup address of time-slice
+ // variable
+ LDR sp, [r0, #8] // Switch stack pointers
+ STR r3, [r2] // Setup time-slice
+
+#if (defined(TX_ENABLE_EXECUTION_CHANGE_NOTIFY) || defined(TX_EXECUTION_PROFILE_ENABLE))
+
+ /* Call the thread entry function to indicate the thread is executing. */
+
+ MOV r5, r0 // Save r0
+ BL _tx_execution_thread_enter // Call the thread execution enter function
+ MOV r0, r5 // Restore r0
+#endif
+
+ /* Determine if an interrupt frame or a synchronous task suspension frame
+ is present. */
+
+ LDMIA sp!, {r4, r5} // Pickup the stack type and saved CPSR
+ CMP r4, #0 // Check for synchronous context switch
+ BEQ _tx_solicited_return
+ MSR SPSR_cxsf, r5 // Setup SPSR for return
+#ifdef TX_ENABLE_VFP_SUPPORT
+ LDR r1, [r0, #144] // Pickup the VFP enabled flag
+ CMP r1, #0 // Is the VFP enabled?
+ BEQ _tx_skip_interrupt_vfp_restore // No, skip VFP interrupt restore
+ VLDMIA sp!, {D0-D15} // Recover D0-D15
+ VLDMIA sp!, {D16-D31} // Recover D16-D31
+ LDR r4, [sp], #4 // Pickup FPSCR
+ VMSR FPSCR, r4 // Restore FPSCR
+_tx_skip_interrupt_vfp_restore:
+#endif
+ LDMIA sp!, {r0-r12, lr, pc}^ // Return to point of thread interrupt
+
+_tx_solicited_return:
+
+#ifdef TX_ENABLE_VFP_SUPPORT
+ LDR r1, [r0, #144] // Pickup the VFP enabled flag
+ CMP r1, #0 // Is the VFP enabled?
+ BEQ _tx_skip_solicited_vfp_restore // No, skip VFP solicited restore
+ VLDMIA sp!, {D8-D15} // Recover D8-D15
+ VLDMIA sp!, {D16-D31} // Recover D16-D31
+ LDR r4, [sp], #4 // Pickup FPSCR
+ VMSR FPSCR, r4 // Restore FPSCR
+_tx_skip_solicited_vfp_restore:
+#endif
+ MSR CPSR_cxsf, r5 // Recover CPSR
+ LDMIA sp!, {r4-r11, lr} // Return to thread synchronously
+#ifdef __THUMB_INTERWORK
+ BX lr // Return to caller
+#else
+ MOV pc, lr // Return to caller
+#endif
+
+#ifdef TX_ENABLE_VFP_SUPPORT
+
+ .global tx_thread_vfp_enable
+ .type tx_thread_vfp_enable,function
+tx_thread_vfp_enable:
+ MRS r2, CPSR // Pickup the CPSR
+#ifdef TX_ENABLE_FIQ_SUPPORT
+ CPSID if // Enable IRQ and FIQ interrupts
+#else
+ CPSID i // Enable IRQ interrupts
+#endif
+ LDR r0, =_tx_thread_current_ptr // Build current thread pointer address
+ LDR r1, [r0] // Pickup current thread pointer
+ CMP r1, #0 // Check for NULL thread pointer
+ BEQ __tx_no_thread_to_enable // If NULL, skip VFP enable
+ MOV r0, #1 // Build enable value
+ STR r0, [r1, #144] // Set the VFP enable flag (tx_thread_vfp_enable field in TX_THREAD)
+__tx_no_thread_to_enable:
+ MSR CPSR_cxsf, r2 // Recover CPSR
+ BX LR // Return to caller
+
+ .global tx_thread_vfp_disable
+ .type tx_thread_vfp_disable,function
+tx_thread_vfp_disable:
+ MRS r2, CPSR // Pickup the CPSR
+#ifdef TX_ENABLE_FIQ_SUPPORT
+ CPSID if // Enable IRQ and FIQ interrupts
+#else
+ CPSID i // Enable IRQ interrupts
+#endif
+ LDR r0, =_tx_thread_current_ptr // Build current thread pointer address
+ LDR r1, [r0] // Pickup current thread pointer
+ CMP r1, #0 // Check for NULL thread pointer
+ BEQ __tx_no_thread_to_disable // If NULL, skip VFP disable
+ MOV r0, #0 // Build disable value
+ STR r0, [r1, #144] // Clear the VFP enable flag (tx_thread_vfp_enable field in TX_THREAD)
+__tx_no_thread_to_disable:
+ MSR CPSR_cxsf, r2 // Recover CPSR
+ BX LR // Return to caller
+
+#endif
diff --git a/ports/cortex_a5/ac6/src/tx_thread_stack_build.S b/ports/cortex_a5/ac6/src/tx_thread_stack_build.S
new file mode 100644
index 00000000..f413e673
--- /dev/null
+++ b/ports/cortex_a5/ac6/src/tx_thread_stack_build.S
@@ -0,0 +1,164 @@
+/**************************************************************************/
+/* */
+/* Copyright (c) Microsoft Corporation. All rights reserved. */
+/* */
+/* This software is licensed under the Microsoft Software License */
+/* Terms for Microsoft Azure RTOS. Full text of the license can be */
+/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
+/* and in the root directory of this software. */
+/* */
+/**************************************************************************/
+
+
+/**************************************************************************/
+/**************************************************************************/
+/** */
+/** ThreadX Component */
+/** */
+/** Thread */
+/** */
+/**************************************************************************/
+/**************************************************************************/
+ .arm
+
+SVC_MODE = 0x13 // SVC mode
+#ifdef TX_ENABLE_FIQ_SUPPORT
+CPSR_MASK = 0xDF // Mask initial CPSR, IRQ & FIQ interrupts enabled
+#else
+CPSR_MASK = 0x9F // Mask initial CPSR, IRQ interrupts enabled
+#endif
+
+
+/* Define the 16-bit Thumb mode veneer for _tx_thread_stack_build for
+ applications calling this function from to 16-bit Thumb mode. */
+
+ .text
+ .align 2
+ .thumb
+ .global $_tx_thread_stack_build
+ .type $_tx_thread_stack_build,function
+$_tx_thread_stack_build:
+ BX pc // Switch to 32-bit mode
+ NOP //
+ .arm
+ STMFD sp!, {lr} // Save return address
+ BL _tx_thread_stack_build // Call _tx_thread_stack_build function
+ LDMFD sp!, {lr} // Recover saved return address
+ BX lr // Return to 16-bit caller
+
+
+ .text
+ .align 2
+/**************************************************************************/
+/* */
+/* FUNCTION RELEASE */
+/* */
+/* _tx_thread_stack_build ARMv7-A */
+/* 6.1.11 */
+/* AUTHOR */
+/* */
+/* William E. Lamie, Microsoft Corporation */
+/* */
+/* DESCRIPTION */
+/* */
+/* This function builds a stack frame on the supplied thread's stack. */
+/* The stack frame results in a fake interrupt return to the supplied */
+/* function pointer. */
+/* */
+/* INPUT */
+/* */
+/* thread_ptr Pointer to thread control blk */
+/* function_ptr Pointer to return function */
+/* */
+/* OUTPUT */
+/* */
+/* None */
+/* */
+/* CALLS */
+/* */
+/* None */
+/* */
+/* CALLED BY */
+/* */
+/* _tx_thread_create Create thread service */
+/* */
+/* RELEASE HISTORY */
+/* */
+/* DATE NAME DESCRIPTION */
+/* */
+/* 09-30-2020 William E. Lamie Initial Version 6.1 */
+/* 04-25-2022 Zhen Kong Updated comments, */
+/* resulting in version 6.1.11 */
+/* */
+/**************************************************************************/
+ .global _tx_thread_stack_build
+ .type _tx_thread_stack_build,function
+_tx_thread_stack_build:
+
+
+ /* Build a fake interrupt frame. The form of the fake interrupt stack
+ on the ARMv7-A should look like the following after it is built:
+
+ Stack Top: 1 Interrupt stack frame type
+ CPSR Initial value for CPSR
+ a1 (r0) Initial value for a1
+ a2 (r1) Initial value for a2
+ a3 (r2) Initial value for a3
+ a4 (r3) Initial value for a4
+ v1 (r4) Initial value for v1
+ v2 (r5) Initial value for v2
+ v3 (r6) Initial value for v3
+ v4 (r7) Initial value for v4
+ v5 (r8) Initial value for v5
+ sb (r9) Initial value for sb
+ sl (r10) Initial value for sl
+ fp (r11) Initial value for fp
+ ip (r12) Initial value for ip
+ lr (r14) Initial value for lr
+ pc (r15) Initial value for
+ 0 For stack backtracing
+
+ Stack Bottom: (higher memory address) */
+
+ LDR r2, [r0, #16] // Pickup end of stack area
+ BIC r2, r2, #7 // Ensure 8-byte alignment
+ SUB r2, r2, #76 // Allocate space for the stack frame
+
+ /* Actually build the stack frame. */
+
+ MOV r3, #1 // Build interrupt stack type
+ STR r3, [r2, #0] // Store stack type
+ MOV r3, #0 // Build initial register value
+ STR r3, [r2, #8] // Store initial r0
+ STR r3, [r2, #12] // Store initial r1
+ STR r3, [r2, #16] // Store initial r2
+ STR r3, [r2, #20] // Store initial r3
+ STR r3, [r2, #24] // Store initial r4
+ STR r3, [r2, #28] // Store initial r5
+ STR r3, [r2, #32] // Store initial r6
+ STR r3, [r2, #36] // Store initial r7
+ STR r3, [r2, #40] // Store initial r8
+ STR r3, [r2, #44] // Store initial r9
+ LDR r3, [r0, #12] // Pickup stack starting address
+ STR r3, [r2, #48] // Store initial r10 (sl)
+ LDR r3,=_tx_thread_schedule // Pickup address of _tx_thread_schedule for GDB backtrace
+ STR r3, [r2, #60] // Store initial r14 (lr)
+ MOV r3, #0 // Build initial register value
+ STR r3, [r2, #52] // Store initial r11
+ STR r3, [r2, #56] // Store initial r12
+ STR r1, [r2, #64] // Store initial pc
+ STR r3, [r2, #68] // 0 for back-trace
+ MRS r1, CPSR // Pickup CPSR
+ BIC r1, r1, #CPSR_MASK // Mask mode bits of CPSR
+ ORR r3, r1, #SVC_MODE // Build CPSR, SVC mode, interrupts enabled
+ STR r3, [r2, #4] // Store initial CPSR
+
+ /* Setup stack pointer. */
+
+ STR r2, [r0, #8] // Save stack pointer in thread's
+ // control block
+#ifdef __THUMB_INTERWORK
+ BX lr // Return to caller
+#else
+ MOV pc, lr // Return to caller
+#endif
diff --git a/ports/cortex_a5/ac6/src/tx_thread_system_return.S b/ports/cortex_a5/ac6/src/tx_thread_system_return.S
new file mode 100644
index 00000000..cb7d62ce
--- /dev/null
+++ b/ports/cortex_a5/ac6/src/tx_thread_system_return.S
@@ -0,0 +1,162 @@
+/**************************************************************************/
+/* */
+/* Copyright (c) Microsoft Corporation. All rights reserved. */
+/* */
+/* This software is licensed under the Microsoft Software License */
+/* Terms for Microsoft Azure RTOS. Full text of the license can be */
+/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
+/* and in the root directory of this software. */
+/* */
+/**************************************************************************/
+
+
+/**************************************************************************/
+/**************************************************************************/
+/** */
+/** ThreadX Component */
+/** */
+/** Thread */
+/** */
+/**************************************************************************/
+/**************************************************************************/
+
+ .arm
+
+
+ .global _tx_thread_current_ptr
+ .global _tx_timer_time_slice
+ .global _tx_thread_schedule
+
+
+
+/* Define the 16-bit Thumb mode veneer for _tx_thread_system_return for
+ applications calling this function from to 16-bit Thumb mode. */
+
+ .text
+ .align 2
+ .global $_tx_thread_system_return
+ .type $_tx_thread_system_return,function
+$_tx_thread_system_return:
+ .thumb
+ BX pc // Switch to 32-bit mode
+ NOP //
+ .arm
+ STMFD sp!, {lr} // Save return address
+ BL _tx_thread_system_return // Call _tx_thread_system_return function
+ LDMFD sp!, {lr} // Recover saved return address
+ BX lr // Return to 16-bit caller
+
+
+ .text
+ .align 2
+/**************************************************************************/
+/* */
+/* FUNCTION RELEASE */
+/* */
+/* _tx_thread_system_return ARMv7-A */
+/* 6.1.11 */
+/* AUTHOR */
+/* */
+/* William E. Lamie, Microsoft Corporation */
+/* */
+/* DESCRIPTION */
+/* */
+/* This function is target processor specific. It is used to transfer */
+/* control from a thread back to the ThreadX system. Only a */
+/* minimal context is saved since the compiler assumes temp registers */
+/* are going to get slicked by a function call anyway. */
+/* */
+/* INPUT */
+/* */
+/* None */
+/* */
+/* OUTPUT */
+/* */
+/* None */
+/* */
+/* CALLS */
+/* */
+/* _tx_thread_schedule Thread scheduling loop */
+/* */
+/* CALLED BY */
+/* */
+/* ThreadX components */
+/* */
+/* RELEASE HISTORY */
+/* */
+/* DATE NAME DESCRIPTION */
+/* */
+/* 09-30-2020 William E. Lamie Initial Version 6.1 */
+/* 10-15-2021 William E. Lamie Modified comment(s), added */
+/* execution profile support, */
+/* resulting in version 6.1.9 */
+/* 04-25-2022 Zhen Kong Updated comments, */
+/* resulting in version 6.1.11 */
+/* */
+/**************************************************************************/
+ .global _tx_thread_system_return
+ .type _tx_thread_system_return,function
+_tx_thread_system_return:
+
+ /* Save minimal context on the stack. */
+
+ STMDB sp!, {r4-r11, lr} // Save minimal context
+
+ LDR r4, =_tx_thread_current_ptr // Pickup address of current ptr
+ LDR r5, [r4] // Pickup current thread pointer
+
+#ifdef TX_ENABLE_VFP_SUPPORT
+ LDR r1, [r5, #144] // Pickup the VFP enabled flag
+ CMP r1, #0 // Is the VFP enabled?
+ BEQ _tx_skip_solicited_vfp_save // No, skip VFP solicited save
+ VMRS r1, FPSCR // Pickup the FPSCR
+ STR r1, [sp, #-4]! // Save FPSCR
+ VSTMDB sp!, {D16-D31} // Save D16-D31
+ VSTMDB sp!, {D8-D15} // Save D8-D15
+_tx_skip_solicited_vfp_save:
+#endif
+
+ MOV r0, #0 // Build a solicited stack type
+ MRS r1, CPSR // Pickup the CPSR
+ STMDB sp!, {r0-r1} // Save type and CPSR
+
+ /* Lockout interrupts. */
+
+#ifdef TX_ENABLE_FIQ_SUPPORT
+ CPSID if // Disable IRQ and FIQ interrupts
+#else
+ CPSID i // Disable IRQ interrupts
+#endif
+
+#if (defined(TX_ENABLE_EXECUTION_CHANGE_NOTIFY) || defined(TX_EXECUTION_PROFILE_ENABLE))
+
+ /* Call the thread exit function to indicate the thread is no longer executing. */
+
+ BL _tx_execution_thread_exit // Call the thread exit function
+#endif
+ MOV r3, r4 // Pickup address of current ptr
+ MOV r0, r5 // Pickup current thread pointer
+ LDR r2, =_tx_timer_time_slice // Pickup address of time slice
+ LDR r1, [r2] // Pickup current time slice
+
+ /* Save current stack and switch to system stack. */
+
+ STR sp, [r0, #8] // Save thread stack pointer
+
+ /* Determine if the time-slice is active. */
+
+ MOV r4, #0 // Build clear value
+ CMP r1, #0 // Is a time-slice active?
+ BEQ __tx_thread_dont_save_ts // No, don't save the time-slice
+
+ /* Save time-slice for the thread and clear the current time-slice. */
+
+ STR r4, [r2] // Clear time-slice
+ STR r1, [r0, #24] // Save current time-slice
+
+__tx_thread_dont_save_ts:
+
+ /* Clear the current thread pointer. */
+
+ STR r4, [r3] // Clear current thread pointer
+ B _tx_thread_schedule // Jump to scheduler!
diff --git a/ports/cortex_a5/ac6/src/tx_thread_vectored_context_save.S b/ports/cortex_a5/ac6/src/tx_thread_vectored_context_save.S
new file mode 100644
index 00000000..d846223f
--- /dev/null
+++ b/ports/cortex_a5/ac6/src/tx_thread_vectored_context_save.S
@@ -0,0 +1,165 @@
+/**************************************************************************/
+/* */
+/* Copyright (c) Microsoft Corporation. All rights reserved. */
+/* */
+/* This software is licensed under the Microsoft Software License */
+/* Terms for Microsoft Azure RTOS. Full text of the license can be */
+/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
+/* and in the root directory of this software. */
+/* */
+/**************************************************************************/
+
+
+/**************************************************************************/
+/**************************************************************************/
+/** */
+/** ThreadX Component */
+/** */
+/** Thread */
+/** */
+/**************************************************************************/
+/**************************************************************************/
+
+ .global _tx_thread_system_state
+ .global _tx_thread_current_ptr
+ .global _tx_execution_isr_enter
+
+
+
+/* No 16-bit Thumb mode veneer code is needed for _tx_thread_vectored_context_save
+ since it will never be called 16-bit mode. */
+
+ .arm
+ .text
+ .align 2
+/**************************************************************************/
+/* */
+/* FUNCTION RELEASE */
+/* */
+/* _tx_thread_vectored_context_save ARMv7-A */
+/* 6.1.11 */
+/* AUTHOR */
+/* */
+/* William E. Lamie, Microsoft Corporation */
+/* */
+/* DESCRIPTION */
+/* */
+/* This function saves the context of an executing thread in the */
+/* beginning of interrupt processing. The function also ensures that */
+/* the system stack is used upon return to the calling ISR. */
+/* */
+/* INPUT */
+/* */
+/* None */
+/* */
+/* OUTPUT */
+/* */
+/* None */
+/* */
+/* CALLS */
+/* */
+/* None */
+/* */
+/* CALLED BY */
+/* */
+/* ISRs */
+/* */
+/* RELEASE HISTORY */
+/* */
+/* DATE NAME DESCRIPTION */
+/* */
+/* 09-30-2020 William E. Lamie Initial Version 6.1 */
+/* 10-15-2021 William E. Lamie Modified comment(s), added */
+/* execution profile support, */
+/* resulting in version 6.1.9 */
+/* 04-25-2022 Zhen Kong Updated comments, */
+/* resulting in version 6.1.11 */
+/* */
+/**************************************************************************/
+ .global _tx_thread_vectored_context_save
+ .type _tx_thread_vectored_context_save,function
+_tx_thread_vectored_context_save:
+
+ /* Upon entry to this routine, it is assumed that IRQ interrupts are locked
+ out, we are in IRQ mode, and all registers are intact. */
+
+ /* Check for a nested interrupt condition. */
+
+#ifdef TX_ENABLE_FIQ_SUPPORT
+ CPSID if // Disable IRQ and FIQ interrupts
+#endif
+ LDR r3, =_tx_thread_system_state // Pickup address of system state variable
+ LDR r2, [r3, #0] // Pickup system state
+ CMP r2, #0 // Is this the first interrupt?
+ BEQ __tx_thread_not_nested_save // Yes, not a nested context save
+
+ /* Nested interrupt condition. */
+
+ ADD r2, r2, #1 // Increment the interrupt counter
+ STR r2, [r3, #0] // Store it back in the variable
+
+ /* Note: Minimal context of interrupted thread is already saved. */
+
+ /* Return to the ISR. */
+
+ MOV r10, #0 // Clear stack limit
+
+#if (defined(TX_ENABLE_EXECUTION_CHANGE_NOTIFY) || defined(TX_EXECUTION_PROFILE_ENABLE))
+
+ /* Call the ISR enter function to indicate an ISR is executing. */
+
+ PUSH {lr} // Save ISR lr
+ BL _tx_execution_isr_enter // Call the ISR enter function
+ POP {lr} // Recover ISR lr
+#endif
+
+ MOV pc, lr // Return to caller
+
+__tx_thread_not_nested_save:
+
+ /* Otherwise, not nested, check to see if a thread was running. */
+
+ ADD r2, r2, #1 // Increment the interrupt counter
+ STR r2, [r3, #0] // Store it back in the variable
+ LDR r1, =_tx_thread_current_ptr // Pickup address of current thread ptr
+ LDR r0, [r1, #0] // Pickup current thread pointer
+ CMP r0, #0 // Is it NULL?
+ BEQ __tx_thread_idle_system_save // If so, interrupt occurred in
+ // scheduling loop - nothing needs saving!
+ /* Note: Minimal context of interrupted thread is already saved. */
+
+ /* Save the current stack pointer in the thread's control block. */
+
+ MOV r10, #0 // Clear stack limit
+
+#if (defined(TX_ENABLE_EXECUTION_CHANGE_NOTIFY) || defined(TX_EXECUTION_PROFILE_ENABLE))
+
+ /* Call the ISR enter function to indicate an ISR is executing. */
+
+ PUSH {lr} // Save ISR lr
+ BL _tx_execution_isr_enter // Call the ISR enter function
+ POP {lr} // Recover ISR lr
+#endif
+
+ MOV pc, lr // Return to caller
+
+__tx_thread_idle_system_save:
+
+ /* Interrupt occurred in the scheduling loop. */
+
+ /* Not much to do here, just adjust the stack pointer, and return to IRQ
+ processing. */
+
+ MOV r10, #0 // Clear stack limit
+
+#if (defined(TX_ENABLE_EXECUTION_CHANGE_NOTIFY) || defined(TX_EXECUTION_PROFILE_ENABLE))
+
+ /* Call the ISR enter function to indicate an ISR is executing. */
+
+ PUSH {lr} // Save ISR lr
+ BL _tx_execution_isr_enter // Call the ISR enter function
+ POP {lr} // Recover ISR lr
+#endif
+
+ ADD sp, sp, #32 // Recover saved registers
+ MOV pc, lr // Return to caller
diff --git a/ports/cortex_a5/ac6/src/tx_timer_interrupt.S b/ports/cortex_a5/ac6/src/tx_timer_interrupt.S
new file mode 100644
index 00000000..7337ed0c
--- /dev/null
+++ b/ports/cortex_a5/ac6/src/tx_timer_interrupt.S
@@ -0,0 +1,231 @@
+/**************************************************************************/
+/* */
+/* Copyright (c) Microsoft Corporation. All rights reserved. */
+/* */
+/* This software is licensed under the Microsoft Software License */
+/* Terms for Microsoft Azure RTOS. Full text of the license can be */
+/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
+/* and in the root directory of this software. */
+/* */
+/**************************************************************************/
+
+
+/**************************************************************************/
+/**************************************************************************/
+/** */
+/** ThreadX Component */
+/** */
+/** Timer */
+/** */
+/**************************************************************************/
+/**************************************************************************/
+
+ .arm
+
+
+/* Define Assembly language external references... */
+
+ .global _tx_timer_time_slice
+ .global _tx_timer_system_clock
+ .global _tx_timer_current_ptr
+ .global _tx_timer_list_start
+ .global _tx_timer_list_end
+ .global _tx_timer_expired_time_slice
+ .global _tx_timer_expired
+ .global _tx_thread_time_slice
+
+
+
+/* Define the 16-bit Thumb mode veneer for _tx_timer_interrupt for
+ applications calling this function from to 16-bit Thumb mode. */
+
+ .text
+ .align 2
+ .thumb
+ .global $_tx_timer_interrupt
+ .type $_tx_timer_interrupt,function
+$_tx_timer_interrupt:
+ BX pc // Switch to 32-bit mode
+ NOP //
+ .arm
+ STMFD sp!, {lr} // Save return address
+ BL _tx_timer_interrupt // Call _tx_timer_interrupt function
+ LDMFD sp!, {lr} // Recover saved return address
+ BX lr // Return to 16-bit caller
+
+
+ .text
+ .align 2
+/**************************************************************************/
+/* */
+/* FUNCTION RELEASE */
+/* */
+/* _tx_timer_interrupt ARMv7-A */
+/* 6.1.11 */
+/* AUTHOR */
+/* */
+/* William E. Lamie, Microsoft Corporation */
+/* */
+/* DESCRIPTION */
+/* */
+/* This function processes the hardware timer interrupt. This */
+/* processing includes incrementing the system clock and checking for */
+/* time slice and/or timer expiration. If either is found, the */
+/* interrupt context save/restore functions are called along with the */
+/* expiration functions. */
+/* */
+/* INPUT */
+/* */
+/* None */
+/* */
+/* OUTPUT */
+/* */
+/* None */
+/* */
+/* CALLS */
+/* */
+/* _tx_thread_time_slice Time slice interrupted thread */
+/* _tx_timer_expiration_process Timer expiration processing */
+/* */
+/* CALLED BY */
+/* */
+/* interrupt vector */
+/* */
+/* RELEASE HISTORY */
+/* */
+/* DATE NAME DESCRIPTION */
+/* */
+/* 09-30-2020 William E. Lamie Initial Version 6.1 */
+/* 04-25-2022 Zhen Kong Updated comments, */
+/* resulting in version 6.1.11 */
+/* */
+/**************************************************************************/
+ .global _tx_timer_interrupt
+ .type _tx_timer_interrupt,function
+_tx_timer_interrupt:
+
+ /* Upon entry to this routine, it is assumed that context save has already
+ been called, and therefore the compiler scratch registers are available
+ for use. */
+
+ /* Increment the system clock. */
+
+ LDR r1, =_tx_timer_system_clock // Pickup address of system clock
+ LDR r0, [r1] // Pickup system clock
+ ADD r0, r0, #1 // Increment system clock
+ STR r0, [r1] // Store new system clock
+
+ /* Test for time-slice expiration. */
+
+ LDR r3, =_tx_timer_time_slice // Pickup address of time-slice
+ LDR r2, [r3] // Pickup time-slice
+ CMP r2, #0 // Is it non-active?
+ BEQ __tx_timer_no_time_slice // Yes, skip time-slice processing
+
+ /* Decrement the time_slice. */
+
+ SUB r2, r2, #1 // Decrement the time-slice
+ STR r2, [r3] // Store new time-slice value
+
+ /* Check for expiration. */
+
+ CMP r2, #0 // Has it expired?
+ BNE __tx_timer_no_time_slice // No, skip expiration processing
+
+ /* Set the time-slice expired flag. */
+
+ LDR r3, =_tx_timer_expired_time_slice // Pickup address of expired flag
+ MOV r0, #1 // Build expired value
+ STR r0, [r3] // Set time-slice expiration flag
+
+__tx_timer_no_time_slice:
+
+ /* Test for timer expiration. */
+
+ LDR r1, =_tx_timer_current_ptr // Pickup current timer pointer address
+ LDR r0, [r1] // Pickup current timer
+ LDR r2, [r0] // Pickup timer list entry
+ CMP r2, #0 // Is there anything in the list?
+ BEQ __tx_timer_no_timer // No, just increment the timer
+
+ /* Set expiration flag. */
+
+ LDR r3, =_tx_timer_expired // Pickup expiration flag address
+ MOV r2, #1 // Build expired value
+ STR r2, [r3] // Set expired flag
+ B __tx_timer_done // Finished timer processing
+
+__tx_timer_no_timer:
+
+ /* No timer expired, increment the timer pointer. */
+ ADD r0, r0, #4 // Move to next timer
+
+ /* Check for wraparound. */
+
+ LDR r3, =_tx_timer_list_end // Pickup address of timer list end
+ LDR r2, [r3] // Pickup list end
+ CMP r0, r2 // Are we at list end?
+ BNE __tx_timer_skip_wrap // No, skip wraparound logic
+
+ /* Wrap to beginning of list. */
+
+ LDR r3, =_tx_timer_list_start // Pickup address of timer list start
+ LDR r0, [r3] // Set current pointer to list start
+
+__tx_timer_skip_wrap:
+
+ STR r0, [r1] // Store new current timer pointer
+
+__tx_timer_done:
+
+ /* See if anything has expired. */
+
+ LDR r3, =_tx_timer_expired_time_slice // Pickup address of expired flag
+ LDR r2, [r3] // Pickup time-slice expired flag
+ CMP r2, #0 // Did a time-slice expire?
+ BNE __tx_something_expired // If non-zero, time-slice expired
+ LDR r1, =_tx_timer_expired // Pickup address of other expired flag
+ LDR r0, [r1] // Pickup timer expired flag
+ CMP r0, #0 // Did a timer expire?
+ BEQ __tx_timer_nothing_expired // No, nothing expired
+
+__tx_something_expired:
+
+ STMDB sp!, {r0, lr} // Save the lr register on the stack
+ // and save r0 just to keep 8-byte alignment
+
+ /* Did a timer expire? */
+
+ LDR r1, =_tx_timer_expired // Pickup address of expired flag
+ LDR r0, [r1] // Pickup timer expired flag
+ CMP r0, #0 // Check for timer expiration
+ BEQ __tx_timer_dont_activate // If not set, skip timer activation
+
+ /* Process timer expiration. */
+ BL _tx_timer_expiration_process // Call the timer expiration handling routine
+
+__tx_timer_dont_activate:
+
+ /* Did time slice expire? */
+
+ LDR r3, =_tx_timer_expired_time_slice // Pickup address of time-slice expired
+ LDR r2, [r3] // Pickup the actual flag
+ CMP r2, #0 // See if the flag is set
+ BEQ __tx_timer_not_ts_expiration // No, skip time-slice processing
+
+ /* Time slice interrupted thread. */
+
+ BL _tx_thread_time_slice // Call time-slice processing
+
+__tx_timer_not_ts_expiration:
+
+ LDMIA sp!, {r0, lr} // Recover lr register (r0 is just there for
+ // the 8-byte stack alignment
+
+__tx_timer_nothing_expired:
+
+#ifdef __THUMB_INTERWORK
+ BX lr // Return to caller
+#else
+ MOV pc, lr // Return to caller
+#endif
diff --git a/ports/cortex_a5/gnu/example_build/build_threadx_sample.bat b/ports/cortex_a5/gnu/example_build/build_threadx_sample.bat
index 123a84c8..54aa192c 100644
--- a/ports/cortex_a5/gnu/example_build/build_threadx_sample.bat
+++ b/ports/cortex_a5/gnu/example_build/build_threadx_sample.bat
@@ -2,5 +2,5 @@ arm-none-eabi-gcc -c -g -mcpu=cortex-a5 reset.S
arm-none-eabi-gcc -c -g -mcpu=cortex-a5 crt0.S
arm-none-eabi-gcc -c -g -mcpu=cortex-a5 tx_initialize_low_level.S
arm-none-eabi-gcc -c -g -mcpu=cortex-a5 -I../../../../common/inc -I../inc sample_threadx.c
-arm-none-eabi-ld -A cortex-a5 -T sample_threadx.ld reset.o crt0.o tx_initialize_low_level.o sample_threadx.o tx.a libc.a libgcc.a -o sample_threadx.out -M > sample_threadx.map
+arm-none-eabi-gcc -g -mcpu=cortex-a5 -T sample_threadx.ld --specs=nosys.specs -o sample_threadx.out -Wl,-Map=sample_threadx.map tx_initialize_low_level.o sample_threadx.o tx.a
diff --git a/ports/cortex_a5/gnu/example_build/crt0.S b/ports/cortex_a5/gnu/example_build/crt0.S
index aa0f3239..56b6c958 100644
--- a/ports/cortex_a5/gnu/example_build/crt0.S
+++ b/ports/cortex_a5/gnu/example_build/crt0.S
@@ -26,13 +26,13 @@ _mainCRTStartup:
mov a2, #0 /* Second arg: fill value */
mov fp, a2 /* Null frame pointer */
mov r7, a2 /* Null frame pointer for Thumb */
-
- ldr a1, .LC1 /* First arg: start of memory block */
- ldr a3, .LC2
- sub a3, a3, a1 /* Third arg: length of block */
-
-
+ ldr a1, .LC1 /* First arg: start of memory block */
+ ldr a3, .LC2
+ sub a3, a3, a1 /* Third arg: length of block */
+
+
+
bl memset
mov r0, #0 /* no arguments */
mov r1, #0 /* no argv either */
@@ -48,15 +48,15 @@ _mainCRTStartup:
/* bl init */
mov r0, r4
mov r1, r5
-#endif
+#endif
bl main
bl exit /* Should not return. */
-
- /* For Thumb, constants must be after the code since only
+
+ /* For Thumb, constants must be after the code since only
positive offsets are supported for PC relative addresses. */
-
+
.align 0
.LC0:
.LC1:
diff --git a/ports/cortex_a5/gnu/example_build/reset.S b/ports/cortex_a5/gnu/example_build/reset.S
index 856e31eb..597e9d9a 100644
--- a/ports/cortex_a5/gnu/example_build/reset.S
+++ b/ports/cortex_a5/gnu/example_build/reset.S
@@ -1,35 +1,24 @@
-@/**************************************************************************/
-@/* */
-@/* Copyright (c) Microsoft Corporation. All rights reserved. */
-@/* */
-@/* This software is licensed under the Microsoft Software License */
-@/* Terms for Microsoft Azure RTOS. Full text of the license can be */
-@/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
-@/* and in the root directory of this software. */
-@/* */
-@/**************************************************************************/
-@
-@
-@/**************************************************************************/
-@/**************************************************************************/
-@/** */
-@/** ThreadX Component */
-@/** */
-@/** Initialize */
-@/** */
-@/**************************************************************************/
-@/**************************************************************************/
-@
-@
-@#define TX_SOURCE_CODE
-@
-@
-@/* Include necessary system files. */
-@
-@#include "tx_api.h"
-@#include "tx_initialize.h"
-@#include "tx_thread.h"
-@#include "tx_timer.h"
+/**************************************************************************/
+/* */
+/* Copyright (c) Microsoft Corporation. All rights reserved. */
+/* */
+/* This software is licensed under the Microsoft Software License */
+/* Terms for Microsoft Azure RTOS. Full text of the license can be */
+/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
+/* and in the root directory of this software. */
+/* */
+/**************************************************************************/
+
+
+/**************************************************************************/
+/**************************************************************************/
+/** */
+/** ThreadX Component */
+/** */
+/** Initialize */
+/** */
+/**************************************************************************/
+/**************************************************************************/
.arm
@@ -41,36 +30,35 @@
.global __tx_reserved_handler
.global __tx_irq_handler
.global __tx_fiq_handler
-@
-@
-@/* Define the vector area. This should be located or copied to 0. */
-@
+
+/* Define the vector area. This should be located or copied to 0. */
+
.text
.global __vectors
__vectors:
- LDR pc, STARTUP @ Reset goes to startup function
- LDR pc, UNDEFINED @ Undefined handler
- LDR pc, SWI @ Software interrupt handler
- LDR pc, PREFETCH @ Prefetch exception handler
- LDR pc, ABORT @ Abort exception handler
- LDR pc, RESERVED @ Reserved exception handler
- LDR pc, IRQ @ IRQ interrupt handler
- LDR pc, FIQ @ FIQ interrupt handler
+ LDR pc, STARTUP // Reset goes to startup function
+ LDR pc, UNDEFINED // Undefined handler
+ LDR pc, SWI // Software interrupt handler
+ LDR pc, PREFETCH // Prefetch exception handler
+ LDR pc, ABORT // Abort exception handler
+ LDR pc, RESERVED // Reserved exception handler
+ LDR pc, IRQ // IRQ interrupt handler
+ LDR pc, FIQ // FIQ interrupt handler
STARTUP:
- .word _start @ Reset goes to C startup function
+ .word _start // Reset goes to C startup function
UNDEFINED:
- .word __tx_undefined @ Undefined handler
+ .word __tx_undefined // Undefined handler
SWI:
- .word __tx_swi_interrupt @ Software interrupt handler
+ .word __tx_swi_interrupt // Software interrupt handler
PREFETCH:
- .word __tx_prefetch_handler @ Prefetch exception handler
-ABORT:
- .word __tx_abort_handler @ Abort exception handler
-RESERVED:
- .word __tx_reserved_handler @ Reserved exception handler
-IRQ:
- .word __tx_irq_handler @ IRQ interrupt handler
+ .word __tx_prefetch_handler // Prefetch exception handler
+ABORT:
+ .word __tx_abort_handler // Abort exception handler
+RESERVED:
+ .word __tx_reserved_handler // Reserved exception handler
+IRQ:
+ .word __tx_irq_handler // IRQ interrupt handler
FIQ:
- .word __tx_fiq_handler @ FIQ interrupt handler
+ .word __tx_fiq_handler // FIQ interrupt handler
diff --git a/ports/cortex_a5/gnu/example_build/sample_threadx.c b/ports/cortex_a5/gnu/example_build/sample_threadx.c
index 418ec634..8c61de06 100644
--- a/ports/cortex_a5/gnu/example_build/sample_threadx.c
+++ b/ports/cortex_a5/gnu/example_build/sample_threadx.c
@@ -1,5 +1,5 @@
/* This is a small demo of the high-performance ThreadX kernel. It includes examples of eight
- threads of different priorities, using a message queue, semaphore, mutex, event flags group,
+ threads of different priorities, using a message queue, semaphore, mutex, event flags group,
byte pool, and block pool. */
#include "tx_api.h"
@@ -80,42 +80,42 @@ CHAR *pointer = TX_NULL;
tx_byte_allocate(&byte_pool_0, (VOID **) &pointer, DEMO_STACK_SIZE, TX_NO_WAIT);
/* Create the main thread. */
- tx_thread_create(&thread_0, "thread 0", thread_0_entry, 0,
- pointer, DEMO_STACK_SIZE,
+ tx_thread_create(&thread_0, "thread 0", thread_0_entry, 0,
+ pointer, DEMO_STACK_SIZE,
1, 1, TX_NO_TIME_SLICE, TX_AUTO_START);
/* Allocate the stack for thread 1. */
tx_byte_allocate(&byte_pool_0, (VOID **) &pointer, DEMO_STACK_SIZE, TX_NO_WAIT);
- /* Create threads 1 and 2. These threads pass information through a ThreadX
+ /* Create threads 1 and 2. These threads pass information through a ThreadX
message queue. It is also interesting to note that these threads have a time
slice. */
- tx_thread_create(&thread_1, "thread 1", thread_1_entry, 1,
- pointer, DEMO_STACK_SIZE,
+ tx_thread_create(&thread_1, "thread 1", thread_1_entry, 1,
+ pointer, DEMO_STACK_SIZE,
16, 16, 4, TX_AUTO_START);
/* Allocate the stack for thread 2. */
tx_byte_allocate(&byte_pool_0, (VOID **) &pointer, DEMO_STACK_SIZE, TX_NO_WAIT);
- tx_thread_create(&thread_2, "thread 2", thread_2_entry, 2,
- pointer, DEMO_STACK_SIZE,
+ tx_thread_create(&thread_2, "thread 2", thread_2_entry, 2,
+ pointer, DEMO_STACK_SIZE,
16, 16, 4, TX_AUTO_START);
/* Allocate the stack for thread 3. */
tx_byte_allocate(&byte_pool_0, (VOID **) &pointer, DEMO_STACK_SIZE, TX_NO_WAIT);
- /* Create threads 3 and 4. These threads compete for a ThreadX counting semaphore.
+ /* Create threads 3 and 4. These threads compete for a ThreadX counting semaphore.
An interesting thing here is that both threads share the same instruction area. */
- tx_thread_create(&thread_3, "thread 3", thread_3_and_4_entry, 3,
- pointer, DEMO_STACK_SIZE,
+ tx_thread_create(&thread_3, "thread 3", thread_3_and_4_entry, 3,
+ pointer, DEMO_STACK_SIZE,
8, 8, TX_NO_TIME_SLICE, TX_AUTO_START);
/* Allocate the stack for thread 4. */
tx_byte_allocate(&byte_pool_0, (VOID **) &pointer, DEMO_STACK_SIZE, TX_NO_WAIT);
- tx_thread_create(&thread_4, "thread 4", thread_3_and_4_entry, 4,
- pointer, DEMO_STACK_SIZE,
+ tx_thread_create(&thread_4, "thread 4", thread_3_and_4_entry, 4,
+ pointer, DEMO_STACK_SIZE,
8, 8, TX_NO_TIME_SLICE, TX_AUTO_START);
/* Allocate the stack for thread 5. */
@@ -123,23 +123,23 @@ CHAR *pointer = TX_NULL;
/* Create thread 5. This thread simply pends on an event flag which will be set
by thread_0. */
- tx_thread_create(&thread_5, "thread 5", thread_5_entry, 5,
- pointer, DEMO_STACK_SIZE,
+ tx_thread_create(&thread_5, "thread 5", thread_5_entry, 5,
+ pointer, DEMO_STACK_SIZE,
4, 4, TX_NO_TIME_SLICE, TX_AUTO_START);
/* Allocate the stack for thread 6. */
tx_byte_allocate(&byte_pool_0, (VOID **) &pointer, DEMO_STACK_SIZE, TX_NO_WAIT);
/* Create threads 6 and 7. These threads compete for a ThreadX mutex. */
- tx_thread_create(&thread_6, "thread 6", thread_6_and_7_entry, 6,
- pointer, DEMO_STACK_SIZE,
+ tx_thread_create(&thread_6, "thread 6", thread_6_and_7_entry, 6,
+ pointer, DEMO_STACK_SIZE,
8, 8, TX_NO_TIME_SLICE, TX_AUTO_START);
/* Allocate the stack for thread 7. */
tx_byte_allocate(&byte_pool_0, (VOID **) &pointer, DEMO_STACK_SIZE, TX_NO_WAIT);
- tx_thread_create(&thread_7, "thread 7", thread_6_and_7_entry, 7,
- pointer, DEMO_STACK_SIZE,
+ tx_thread_create(&thread_7, "thread 7", thread_6_and_7_entry, 7,
+ pointer, DEMO_STACK_SIZE,
8, 8, TX_NO_TIME_SLICE, TX_AUTO_START);
/* Allocate the message queue. */
@@ -242,11 +242,11 @@ UINT status;
/* Retrieve a message from the queue. */
status = tx_queue_receive(&queue_0, &received_message, TX_WAIT_FOREVER);
- /* Check completion status and make sure the message is what we
+ /* Check completion status and make sure the message is what we
expected. */
if ((status != TX_SUCCESS) || (received_message != thread_2_messages_received))
break;
-
+
/* Otherwise, all is okay. Increment the received message count. */
thread_2_messages_received++;
}
@@ -305,7 +305,7 @@ ULONG actual_flags;
thread_5_counter++;
/* Wait for event flag 0. */
- status = tx_event_flags_get(&event_flags_0, 0x1, TX_OR_CLEAR,
+ status = tx_event_flags_get(&event_flags_0, 0x1, TX_OR_CLEAR,
&actual_flags, TX_WAIT_FOREVER);
/* Check status. */
@@ -358,7 +358,7 @@ UINT status;
if (status != TX_SUCCESS)
break;
- /* Release the mutex again. This will actually
+ /* Release the mutex again. This will actually
release ownership since it was obtained twice. */
status = tx_mutex_put(&mutex_0);
diff --git a/ports/cortex_a5/gnu/example_build/tx_initialize_low_level.S b/ports/cortex_a5/gnu/example_build/tx_initialize_low_level.S
index 1e7b0abb..7de5d3ce 100644
--- a/ports/cortex_a5/gnu/example_build/tx_initialize_low_level.S
+++ b/ports/cortex_a5/gnu/example_build/tx_initialize_low_level.S
@@ -1,47 +1,35 @@
-@/**************************************************************************/
-@/* */
-@/* Copyright (c) Microsoft Corporation. All rights reserved. */
-@/* */
-@/* This software is licensed under the Microsoft Software License */
-@/* Terms for Microsoft Azure RTOS. Full text of the license can be */
-@/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
-@/* and in the root directory of this software. */
-@/* */
-@/**************************************************************************/
-@
-@
-@/**************************************************************************/
-@/**************************************************************************/
-@/** */
-@/** ThreadX Component */
-@/** */
-@/** Initialize */
-@/** */
-@/**************************************************************************/
-@/**************************************************************************/
-@
-@
-@#define TX_SOURCE_CODE
-@
-@
-@/* Include necessary system files. */
-@
-@#include "tx_api.h"
-@#include "tx_initialize.h"
-@#include "tx_thread.h"
-@#include "tx_timer.h"
+/**************************************************************************/
+/* */
+/* Copyright (c) Microsoft Corporation. All rights reserved. */
+/* */
+/* This software is licensed under the Microsoft Software License */
+/* Terms for Microsoft Azure RTOS. Full text of the license can be */
+/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
+/* and in the root directory of this software. */
+/* */
+/**************************************************************************/
+
+
+/**************************************************************************/
+/**************************************************************************/
+/** */
+/** ThreadX Component */
+/** */
+/** Initialize */
+/** */
+/**************************************************************************/
+/**************************************************************************/
.arm
-SVC_MODE = 0xD3 @ Disable IRQ/FIQ SVC mode
-IRQ_MODE = 0xD2 @ Disable IRQ/FIQ IRQ mode
-FIQ_MODE = 0xD1 @ Disable IRQ/FIQ FIQ mode
-SYS_MODE = 0xDF @ Disable IRQ/FIQ SYS mode
-FIQ_STACK_SIZE = 512 @ FIQ stack size
-IRQ_STACK_SIZE = 1024 @ IRQ stack size
-SYS_STACK_SIZE = 1024 @ System stack size
-@
-@
+SVC_MODE = 0xD3 // Disable IRQ/FIQ SVC mode
+IRQ_MODE = 0xD2 // Disable IRQ/FIQ IRQ mode
+FIQ_MODE = 0xD1 // Disable IRQ/FIQ FIQ mode
+SYS_MODE = 0xDF // Disable IRQ/FIQ SYS mode
+FIQ_STACK_SIZE = 512 // FIQ stack size
+IRQ_STACK_SIZE = 1024 // IRQ stack size
+SYS_STACK_SIZE = 1024 // System stack size
+
.global _tx_thread_system_stack_ptr
.global _tx_initialize_unused_memory
.global _tx_thread_context_save
@@ -51,297 +39,267 @@ SYS_STACK_SIZE = 1024 @ System stack size
.global _sp
.global _stack_bottom
-@
-@
-@/* Define the 16-bit Thumb mode veneer for _tx_initialize_low_level for
-@ applications calling this function from to 16-bit Thumb mode. */
-@
+
+/* Define the 16-bit Thumb mode veneer for _tx_initialize_low_level for
+ applications calling this function from to 16-bit Thumb mode. */
+
.text
.align 2
.thumb
.global $_tx_initialize_low_level
.type $_tx_initialize_low_level,function
$_tx_initialize_low_level:
- BX pc @ Switch to 32-bit mode
- NOP @
+ BX pc // Switch to 32-bit mode
+ NOP //
.arm
- STMFD sp!, {lr} @ Save return address
- BL _tx_initialize_low_level @ Call _tx_initialize_low_level function
- LDMFD sp!, {lr} @ Recover saved return address
- BX lr @ Return to 16-bit caller
-@
-@
+ STMFD sp!, {lr} // Save return address
+ BL _tx_initialize_low_level // Call _tx_initialize_low_level function
+ LDMFD sp!, {lr} // Recover saved return address
+ BX lr // Return to 16-bit caller
+
.text
.align 2
-@/**************************************************************************/
-@/* */
-@/* FUNCTION RELEASE */
-@/* */
-@/* _tx_initialize_low_level Cortex-A5/GNU */
-@/* 6.1 */
-@/* AUTHOR */
-@/* */
-@/* William E. Lamie, Microsoft Corporation */
-@/* */
-@/* DESCRIPTION */
-@/* */
-@/* This function is responsible for any low-level processor */
-@/* initialization, including setting up interrupt vectors, setting */
-@/* up a periodic timer interrupt source, saving the system stack */
-@/* pointer for use in ISR processing later, and finding the first */
-@/* available RAM memory address for tx_application_define. */
-@/* */
-@/* INPUT */
-@/* */
-@/* None */
-@/* */
-@/* OUTPUT */
-@/* */
-@/* None */
-@/* */
-@/* CALLS */
-@/* */
-@/* None */
-@/* */
-@/* CALLED BY */
-@/* */
-@/* _tx_initialize_kernel_enter ThreadX entry function */
-@/* */
-@/* RELEASE HISTORY */
-@/* */
-@/* DATE NAME DESCRIPTION */
-@/* */
-@/* 09-30-2020 William E. Lamie Initial Version 6.1 */
-@/* */
-@/**************************************************************************/
-@VOID _tx_initialize_low_level(VOID)
-@{
+/**************************************************************************/
+/* */
+/* FUNCTION RELEASE */
+/* */
+/* _tx_initialize_low_level ARMv7-A */
+/* 6.1.11 */
+/* AUTHOR */
+/* */
+/* William E. Lamie, Microsoft Corporation */
+/* */
+/* DESCRIPTION */
+/* */
+/* This function is responsible for any low-level processor */
+/* initialization, including setting up interrupt vectors, setting */
+/* up a periodic timer interrupt source, saving the system stack */
+/* pointer for use in ISR processing later, and finding the first */
+/* available RAM memory address for tx_application_define. */
+/* */
+/* INPUT */
+/* */
+/* None */
+/* */
+/* OUTPUT */
+/* */
+/* None */
+/* */
+/* CALLS */
+/* */
+/* None */
+/* */
+/* CALLED BY */
+/* */
+/* _tx_initialize_kernel_enter ThreadX entry function */
+/* */
+/* RELEASE HISTORY */
+/* */
+/* DATE NAME DESCRIPTION */
+/* */
+/* 09-30-2020 William E. Lamie Initial Version 6.1 */
+/* 04-25-2022 Zhen Kong Updated comments, */
+/* resulting in version 6.1.11 */
+/* */
+/**************************************************************************/
.global _tx_initialize_low_level
.type _tx_initialize_low_level,function
_tx_initialize_low_level:
-@
-@ /* We must be in SVC mode at this point! */
-@
-@ /* Setup various stack pointers. */
-@
- LDR r1, =_sp @ Get pointer to stack area
-#ifdef TX_ENABLE_IRQ_NESTING
-@
-@ /* Setup the system mode stack for nested interrupt support */
-@
- LDR r2, =SYS_STACK_SIZE @ Pickup stack size
- MOV r3, #SYS_MODE @ Build SYS mode CPSR
- MSR CPSR_c, r3 @ Enter SYS mode
- SUB r1, r1, #1 @ Backup 1 byte
- BIC r1, r1, #7 @ Ensure 8-byte alignment
- MOV sp, r1 @ Setup SYS stack pointer
- SUB r1, r1, r2 @ Calculate start of next stack
+ /* We must be in SVC mode at this point! */
+
+ /* Setup various stack pointers. */
+
+ LDR r1, =_sp // Get pointer to stack area
+
+#ifdef TX_ENABLE_IRQ_NESTING
+
+ /* Setup the system mode stack for nested interrupt support */
+
+ LDR r2, =SYS_STACK_SIZE // Pickup stack size
+ MOV r3, #SYS_MODE // Build SYS mode CPSR
+ MSR CPSR_c, r3 // Enter SYS mode
+ SUB r1, r1, #1 // Backup 1 byte
+ BIC r1, r1, #7 // Ensure 8-byte alignment
+ MOV sp, r1 // Setup SYS stack pointer
+ SUB r1, r1, r2 // Calculate start of next stack
#endif
- LDR r2, =FIQ_STACK_SIZE @ Pickup stack size
- MOV r0, #FIQ_MODE @ Build FIQ mode CPSR
- MSR CPSR, r0 @ Enter FIQ mode
- SUB r1, r1, #1 @ Backup 1 byte
- BIC r1, r1, #7 @ Ensure 8-byte alignment
- MOV sp, r1 @ Setup FIQ stack pointer
- SUB r1, r1, r2 @ Calculate start of next stack
- LDR r2, =IRQ_STACK_SIZE @ Pickup IRQ stack size
- MOV r0, #IRQ_MODE @ Build IRQ mode CPSR
- MSR CPSR, r0 @ Enter IRQ mode
- SUB r1, r1, #1 @ Backup 1 byte
- BIC r1, r1, #7 @ Ensure 8-byte alignment
- MOV sp, r1 @ Setup IRQ stack pointer
- SUB r3, r1, r2 @ Calculate end of IRQ stack
- MOV r0, #SVC_MODE @ Build SVC mode CPSR
- MSR CPSR, r0 @ Enter SVC mode
- LDR r2, =_stack_bottom @ Pickup stack bottom
- CMP r3, r2 @ Compare the current stack end with the bottom
-_stack_error_loop:
- BLT _stack_error_loop @ If the IRQ stack exceeds the stack bottom, just sit here!
-@
-@ /* Save the system stack pointer. */
-@ _tx_thread_system_stack_ptr = (VOID_PTR) (sp);
-@
- LDR r2, =_tx_thread_system_stack_ptr @ Pickup stack pointer
- STR r1, [r2] @ Save the system stack
-@
-@ /* Save the first available memory address. */
-@ _tx_initialize_unused_memory = (VOID_PTR) _end;
-@
- LDR r1, =_end @ Get end of non-initialized RAM area
- LDR r2, =_tx_initialize_unused_memory @ Pickup unused memory ptr address
- ADD r1, r1, #8 @ Increment to next free word
- STR r1, [r2] @ Save first free memory address
-@
-@ /* Setup Timer for periodic interrupts. */
-@
-@ /* Done, return to caller. */
-@
+ LDR r2, =FIQ_STACK_SIZE // Pickup stack size
+ MOV r0, #FIQ_MODE // Build FIQ mode CPSR
+ MSR CPSR, r0 // Enter FIQ mode
+ SUB r1, r1, #1 // Backup 1 byte
+ BIC r1, r1, #7 // Ensure 8-byte alignment
+ MOV sp, r1 // Setup FIQ stack pointer
+ SUB r1, r1, r2 // Calculate start of next stack
+ LDR r2, =IRQ_STACK_SIZE // Pickup IRQ stack size
+ MOV r0, #IRQ_MODE // Build IRQ mode CPSR
+ MSR CPSR, r0 // Enter IRQ mode
+ SUB r1, r1, #1 // Backup 1 byte
+ BIC r1, r1, #7 // Ensure 8-byte alignment
+ MOV sp, r1 // Setup IRQ stack pointer
+ SUB r3, r1, r2 // Calculate end of IRQ stack
+ MOV r0, #SVC_MODE // Build SVC mode CPSR
+ MSR CPSR, r0 // Enter SVC mode
+ LDR r2, =_stack_bottom // Pickup stack bottom
+ CMP r3, r2 // Compare the current stack end with the bottom
+_stack_error_loop:
+ BLT _stack_error_loop // If the IRQ stack exceeds the stack bottom, just sit here!
+
+ LDR r2, =_tx_thread_system_stack_ptr // Pickup stack pointer
+ STR r1, [r2] // Save the system stack
+
+ LDR r1, =_end // Get end of non-initialized RAM area
+ LDR r2, =_tx_initialize_unused_memory // Pickup unused memory ptr address
+ ADD r1, r1, #8 // Increment to next free word
+ STR r1, [r2] // Save first free memory address
+
#ifdef __THUMB_INTERWORK
- BX lr @ Return to caller
+ BX lr // Return to caller
#else
- MOV pc, lr @ Return to caller
+ MOV pc, lr // Return to caller
#endif
-@}
-@
-@
-@/* Define shells for each of the interrupt vectors. */
-@
+
+/* Define shells for each of the interrupt vectors. */
+
.global __tx_undefined
__tx_undefined:
- B __tx_undefined @ Undefined handler
-@
+ B __tx_undefined // Undefined handler
+
.global __tx_swi_interrupt
__tx_swi_interrupt:
- B __tx_swi_interrupt @ Software interrupt handler
-@
+ B __tx_swi_interrupt // Software interrupt handler
+
.global __tx_prefetch_handler
__tx_prefetch_handler:
- B __tx_prefetch_handler @ Prefetch exception handler
-@
+ B __tx_prefetch_handler // Prefetch exception handler
+
.global __tx_abort_handler
__tx_abort_handler:
- B __tx_abort_handler @ Abort exception handler
-@
+ B __tx_abort_handler // Abort exception handler
+
.global __tx_reserved_handler
__tx_reserved_handler:
- B __tx_reserved_handler @ Reserved exception handler
-@
+ B __tx_reserved_handler // Reserved exception handler
+
.global __tx_irq_handler
- .global __tx_irq_processing_return
+ .global __tx_irq_processing_return
__tx_irq_handler:
-@
-@ /* Jump to context save to save system context. */
+
+ /* Jump to context save to save system context. */
B _tx_thread_context_save
__tx_irq_processing_return:
-@
-@ /* At this point execution is still in the IRQ mode. The CPSR, point of
-@ interrupt, and all C scratch registers are available for use. In
-@ addition, IRQ interrupts may be re-enabled - with certain restrictions -
-@ if nested IRQ interrupts are desired. Interrupts may be re-enabled over
-@ small code sequences where lr is saved before enabling interrupts and
-@ restored after interrupts are again disabled. */
-@
-@ /* Interrupt nesting is allowed after calling _tx_thread_irq_nesting_start
-@ from IRQ mode with interrupts disabled. This routine switches to the
-@ system mode and returns with IRQ interrupts enabled.
-@
-@ NOTE: It is very important to ensure all IRQ interrupts are cleared
-@ prior to enabling nested IRQ interrupts. */
+//
+ /* At this point execution is still in the IRQ mode. The CPSR, point of
+ interrupt, and all C scratch registers are available for use. In
+ addition, IRQ interrupts may be re-enabled - with certain restrictions -
+ if nested IRQ interrupts are desired. Interrupts may be re-enabled over
+ small code sequences where lr is saved before enabling interrupts and
+ restored after interrupts are again disabled. */
+
+ /* Interrupt nesting is allowed after calling _tx_thread_irq_nesting_start
+ from IRQ mode with interrupts disabled. This routine switches to the
+ system mode and returns with IRQ interrupts enabled.
+
+ NOTE: It is very important to ensure all IRQ interrupts are cleared
+ prior to enabling nested IRQ interrupts. */
#ifdef TX_ENABLE_IRQ_NESTING
BL _tx_thread_irq_nesting_start
#endif
-@
-@ /* For debug purpose, execute the timer interrupt processing here. In
-@ a real system, some kind of status indication would have to be checked
-@ before the timer interrupt handler could be called. */
-@
- BL _tx_timer_interrupt @ Timer interrupt handler
-@
-@
-@ /* If interrupt nesting was started earlier, the end of interrupt nesting
-@ service must be called before returning to _tx_thread_context_restore.
-@ This routine returns in processing in IRQ mode with interrupts disabled. */
+
+ /* For debug purpose, execute the timer interrupt processing here. In
+ a real system, some kind of status indication would have to be checked
+ before the timer interrupt handler could be called. */
+
+ BL _tx_timer_interrupt // Timer interrupt handler
+
+
+ /* If interrupt nesting was started earlier, the end of interrupt nesting
+ service must be called before returning to _tx_thread_context_restore.
+ This routine returns in processing in IRQ mode with interrupts disabled. */
#ifdef TX_ENABLE_IRQ_NESTING
BL _tx_thread_irq_nesting_end
#endif
-@
-@ /* Jump to context restore to restore system context. */
+
+ /* Jump to context restore to restore system context. */
B _tx_thread_context_restore
-@
-@
-@ /* This is an example of a vectored IRQ handler. */
-@
-@ .global __tx_example_vectored_irq_handler
-@__tx_example_vectored_irq_handler:
-@
-@
-@ /* Save initial context and call context save to prepare for
-@ vectored ISR execution. */
-@
-@ STMDB sp!, {r0-r3} @ Save some scratch registers
-@ MRS r0, SPSR @ Pickup saved SPSR
-@ SUB lr, lr, #4 @ Adjust point of interrupt
-@ STMDB sp!, {r0, r10, r12, lr} @ Store other scratch registers
-@ BL _tx_thread_vectored_context_save @ Vectored context save
-@
-@ /* At this point execution is still in the IRQ mode. The CPSR, point of
-@ interrupt, and all C scratch registers are available for use. In
-@ addition, IRQ interrupts may be re-enabled - with certain restrictions -
-@ if nested IRQ interrupts are desired. Interrupts may be re-enabled over
-@ small code sequences where lr is saved before enabling interrupts and
-@ restored after interrupts are again disabled. */
-@
-@
-@ /* Interrupt nesting is allowed after calling _tx_thread_irq_nesting_start
-@ from IRQ mode with interrupts disabled. This routine switches to the
-@ system mode and returns with IRQ interrupts enabled.
-@
-@ NOTE: It is very important to ensure all IRQ interrupts are cleared
-@ prior to enabling nested IRQ interrupts. */
-@#ifdef TX_ENABLE_IRQ_NESTING
-@ BL _tx_thread_irq_nesting_start
-@#endif
-@
-@ /* Application IRQ handlers can be called here! */
-@
-@ /* If interrupt nesting was started earlier, the end of interrupt nesting
-@ service must be called before returning to _tx_thread_context_restore.
-@ This routine returns in processing in IRQ mode with interrupts disabled. */
-@#ifdef TX_ENABLE_IRQ_NESTING
-@ BL _tx_thread_irq_nesting_end
-@#endif
-@
-@ /* Jump to context restore to restore system context. */
-@ B _tx_thread_context_restore
-@
-@
+
+
+ /* This is an example of a vectored IRQ handler. */
+
+
+
+ /* Save initial context and call context save to prepare for
+ vectored ISR execution. */
+
+ /* At this point execution is still in the IRQ mode. The CPSR, point of
+ interrupt, and all C scratch registers are available for use. In
+ addition, IRQ interrupts may be re-enabled - with certain restrictions -
+ if nested IRQ interrupts are desired. Interrupts may be re-enabled over
+ small code sequences where lr is saved before enabling interrupts and
+ restored after interrupts are again disabled. */
+
+
+ /* Interrupt nesting is allowed after calling _tx_thread_irq_nesting_start
+ from IRQ mode with interrupts disabled. This routine switches to the
+ system mode and returns with IRQ interrupts enabled.
+
+ NOTE: It is very important to ensure all IRQ interrupts are cleared
+ prior to enabling nested IRQ interrupts. */
+
+ /* Application IRQ handlers can be called here! */
+
+ /* If interrupt nesting was started earlier, the end of interrupt nesting
+ service must be called before returning to _tx_thread_context_restore.
+ This routine returns in processing in IRQ mode with interrupts disabled. */
+
+
+
#ifdef TX_ENABLE_FIQ_SUPPORT
.global __tx_fiq_handler
.global __tx_fiq_processing_return
__tx_fiq_handler:
-@
-@ /* Jump to fiq context save to save system context. */
+
+ /* Jump to fiq context save to save system context. */
B _tx_thread_fiq_context_save
__tx_fiq_processing_return:
-@
-@ /* At this point execution is still in the FIQ mode. The CPSR, point of
-@ interrupt, and all C scratch registers are available for use. */
-@
-@ /* Interrupt nesting is allowed after calling _tx_thread_fiq_nesting_start
-@ from FIQ mode with interrupts disabled. This routine switches to the
-@ system mode and returns with FIQ interrupts enabled.
-@
-@ NOTE: It is very important to ensure all FIQ interrupts are cleared
-@ prior to enabling nested FIQ interrupts. */
+
+ /* At this point execution is still in the FIQ mode. The CPSR, point of
+ interrupt, and all C scratch registers are available for use. */
+
+ /* Interrupt nesting is allowed after calling _tx_thread_fiq_nesting_start
+ from FIQ mode with interrupts disabled. This routine switches to the
+ system mode and returns with FIQ interrupts enabled.
+
+ NOTE: It is very important to ensure all FIQ interrupts are cleared
+ prior to enabling nested FIQ interrupts. */
#ifdef TX_ENABLE_FIQ_NESTING
BL _tx_thread_fiq_nesting_start
#endif
-@
-@ /* Application FIQ handlers can be called here! */
-@
-@ /* If interrupt nesting was started earlier, the end of interrupt nesting
-@ service must be called before returning to _tx_thread_fiq_context_restore. */
+
+ /* Application FIQ handlers can be called here! */
+
+ /* If interrupt nesting was started earlier, the end of interrupt nesting
+ service must be called before returning to _tx_thread_fiq_context_restore. */
#ifdef TX_ENABLE_FIQ_NESTING
BL _tx_thread_fiq_nesting_end
#endif
-@
-@ /* Jump to fiq context restore to restore system context. */
+
+ /* Jump to fiq context restore to restore system context. */
B _tx_thread_fiq_context_restore
-@
-@
+
+
#else
.global __tx_fiq_handler
__tx_fiq_handler:
- B __tx_fiq_handler @ FIQ interrupt handler
+ B __tx_fiq_handler // FIQ interrupt handler
#endif
-@
-@
+
+
BUILD_OPTIONS:
- .word _tx_build_options @ Reference to bring in
+ .word _tx_build_options // Reference to bring in
VERSION_ID:
- .word _tx_version_id @ Reference to bring in
+ .word _tx_version_id // Reference to bring in
diff --git a/ports/cortex_a5/gnu/inc/tx_port.h b/ports/cortex_a5/gnu/inc/tx_port.h
index b810f807..19463de1 100644
--- a/ports/cortex_a5/gnu/inc/tx_port.h
+++ b/ports/cortex_a5/gnu/inc/tx_port.h
@@ -12,7 +12,7 @@
/**************************************************************************/
/**************************************************************************/
-/** */
+/** */
/** ThreadX Component */
/** */
/** Port Specific */
@@ -21,36 +21,38 @@
/**************************************************************************/
-/**************************************************************************/
-/* */
-/* PORT SPECIFIC C INFORMATION RELEASE */
-/* */
-/* tx_port.h Cortex-A5/GNU */
-/* 6.1.6 */
+/**************************************************************************/
+/* */
+/* PORT SPECIFIC C INFORMATION RELEASE */
+/* */
+/* tx_port.h ARMv7-A */
+/* 6.1.11 */
/* */
/* AUTHOR */
/* */
/* William E. Lamie, Microsoft Corporation */
/* */
/* DESCRIPTION */
-/* */
-/* This file contains data type definitions that make the ThreadX */
-/* real-time kernel function identically on a variety of different */
-/* processor architectures. For example, the size or number of bits */
-/* in an "int" data type vary between microprocessor architectures and */
-/* even C compilers for the same microprocessor. ThreadX does not */
-/* directly use native C data types. Instead, ThreadX creates its */
-/* own special types that can be mapped to actual data types by this */
-/* file to guarantee consistency in the interface and functionality. */
-/* */
-/* RELEASE HISTORY */
-/* */
+/* */
+/* This file contains data type definitions that make the ThreadX */
+/* real-time kernel function identically on a variety of different */
+/* processor architectures. For example, the size or number of bits */
+/* in an "int" data type vary between microprocessor architectures and */
+/* even C compilers for the same microprocessor. ThreadX does not */
+/* directly use native C data types. Instead, ThreadX creates its */
+/* own special types that can be mapped to actual data types by this */
+/* file to guarantee consistency in the interface and functionality. */
+/* */
+/* RELEASE HISTORY */
+/* */
/* DATE NAME DESCRIPTION */
/* */
/* 09-30-2020 William E. Lamie Initial Version 6.1 */
/* 04-02-2021 Bhupendra Naphade Modified comment(s),updated */
/* macro definition, */
/* resulting in version 6.1.6 */
+/* 04-25-2022 Zhen Kong Updated comments, */
+/* resulting in version 6.1.11 */
/* */
/**************************************************************************/
@@ -63,7 +65,7 @@
#ifdef TX_INCLUDE_USER_DEFINE_FILE
-/* Yes, include the user defines in tx_user.h. The defines in this file may
+/* Yes, include the user defines in tx_user.h. The defines in this file may
alternately be defined on the command line. */
#include "tx_user.h"
@@ -76,7 +78,7 @@
#include
-/* Define ThreadX basic types for this port. */
+/* Define ThreadX basic types for this port. */
#define VOID void
typedef char CHAR;
@@ -112,12 +114,12 @@ typedef unsigned short USHORT;
#define TX_TIMER_THREAD_STACK_SIZE 1024 /* Default timer thread stack size */
#endif
-#ifndef TX_TIMER_THREAD_PRIORITY
-#define TX_TIMER_THREAD_PRIORITY 0 /* Default timer thread priority */
+#ifndef TX_TIMER_THREAD_PRIORITY
+#define TX_TIMER_THREAD_PRIORITY 0 /* Default timer thread priority */
#endif
-/* Define various constants for the ThreadX ARM port. */
+/* Define various constants for the ThreadX ARM port. */
#ifdef TX_ENABLE_FIQ_SUPPORT
#define TX_INT_DISABLE 0xC0 /* Disable IRQ & FIQ interrupts */
@@ -127,8 +129,8 @@ typedef unsigned short USHORT;
#define TX_INT_ENABLE 0x00 /* Enable IRQ interrupts */
-/* Define the clock source for trace event entry time stamp. The following two item are port specific.
- For example, if the time source is at the address 0x0a800024 and is 16-bits in size, the clock
+/* Define the clock source for trace event entry time stamp. The following two item are port specific.
+ For example, if the time source is at the address 0x0a800024 and is 16-bits in size, the clock
source constants would be:
#define TX_TRACE_TIME_SOURCE *((ULONG *) 0x0a800024)
@@ -175,7 +177,7 @@ typedef unsigned short USHORT;
#define TX_INLINE_INITIALIZATION
-/* Determine whether or not stack checking is enabled. By default, ThreadX stack checking is
+/* Determine whether or not stack checking is enabled. By default, ThreadX stack checking is
disabled. When the following is defined, ThreadX thread stack checking is enabled. If stack
checking is enabled (TX_ENABLE_STACK_CHECKING is defined), the TX_DISABLE_STACK_FILLING
define is negated, thereby forcing the stack fill which is necessary for the stack checking
@@ -187,13 +189,13 @@ typedef unsigned short USHORT;
/* Define the TX_THREAD control block extensions for this port. The main reason
- for the multiple macros is so that backward compatibility can be maintained with
+ for the multiple macros is so that backward compatibility can be maintained with
existing ThreadX kernel awareness modules. */
-#define TX_THREAD_EXTENSION_0
-#define TX_THREAD_EXTENSION_1
+#define TX_THREAD_EXTENSION_0
+#define TX_THREAD_EXTENSION_1
#define TX_THREAD_EXTENSION_2 ULONG tx_thread_vfp_enable;
-#define TX_THREAD_EXTENSION_3
+#define TX_THREAD_EXTENSION_3
/* Define the port extensions of the remaining ThreadX objects. */
@@ -207,11 +209,11 @@ typedef unsigned short USHORT;
#define TX_TIMER_EXTENSION
-/* Define the user extension field of the thread control block. Nothing
+/* Define the user extension field of the thread control block. Nothing
additional is needed for this port so it is defined as white space. */
#ifndef TX_THREAD_USER_EXTENSION
-#define TX_THREAD_USER_EXTENSION
+#define TX_THREAD_USER_EXTENSION
#endif
@@ -219,8 +221,8 @@ typedef unsigned short USHORT;
tx_thread_shell_entry, and tx_thread_terminate. */
-#define TX_THREAD_CREATE_EXTENSION(thread_ptr)
-#define TX_THREAD_DELETE_EXTENSION(thread_ptr)
+#define TX_THREAD_CREATE_EXTENSION(thread_ptr)
+#define TX_THREAD_DELETE_EXTENSION(thread_ptr)
#define TX_THREAD_COMPLETED_EXTENSION(thread_ptr)
#define TX_THREAD_TERMINATED_EXTENSION(thread_ptr)
@@ -247,24 +249,24 @@ typedef unsigned short USHORT;
#define TX_TIMER_DELETE_EXTENSION(timer_ptr)
-/* Determine if the ARM architecture has the CLZ instruction. This is available on
- architectures v5 and above. If available, redefine the macro for calculating the
+/* Determine if the ARM architecture has the CLZ instruction. This is available on
+ architectures v5 and above. If available, redefine the macro for calculating the
lowest bit set. */
-
+
#if __TARGET_ARCH_ARM > 4
#ifndef __thumb__
#define TX_LOWEST_SET_BIT_CALCULATE(m, b) m = m & ((ULONG) (-((LONG) m))); \
asm volatile (" CLZ %0,%1 ": "=r" (b) : "r" (m) ); \
- b = 31 - b;
+ b = 31 - b;
#endif
#endif
-/* Define ThreadX interrupt lockout and restore macros for protection on
- access of critical kernel information. The restore interrupt macro must
- restore the interrupt posture of the running thread prior to the value
+/* Define ThreadX interrupt lockout and restore macros for protection on
+ access of critical kernel information. The restore interrupt macro must
+ restore the interrupt posture of the running thread prior to the value
present prior to the disable macro. In most cases, the save area macro
is used to define a local function save area for the disable and restore
macros. */
@@ -295,7 +297,7 @@ unsigned int _tx_thread_interrupt_restore(UINT old_posture);
#endif
-/* Define VFP extension for the Cortex-A5. Each is assumed to be called in the context of the executing
+/* Define VFP extension for the ARMv7-A. Each is assumed to be called in the context of the executing
thread. */
void tx_thread_vfp_enable(void);
@@ -315,8 +317,8 @@ void tx_thread_vfp_disable(void);
/* Define the version ID of ThreadX. This may be utilized by the application. */
#ifdef TX_THREAD_INIT
-CHAR _tx_version_id[] =
- "Copyright (c) Microsoft Corporation. All rights reserved. * ThreadX Cortex-A5/GNU Version 6.1.9 *";
+CHAR _tx_version_id[] =
+ "Copyright (c) Microsoft Corporation. All rights reserved. * ThreadX ARMv7-A Version 6.1.11 *";
#else
extern CHAR _tx_version_id[];
#endif
diff --git a/ports/cortex_a5/gnu/src/tx_thread_context_restore.S b/ports/cortex_a5/gnu/src/tx_thread_context_restore.S
index 21887189..fae7e72d 100644
--- a/ports/cortex_a5/gnu/src/tx_thread_context_restore.S
+++ b/ports/cortex_a5/gnu/src/tx_thread_context_restore.S
@@ -1,260 +1,222 @@
-@/**************************************************************************/
-@/* */
-@/* Copyright (c) Microsoft Corporation. All rights reserved. */
-@/* */
-@/* This software is licensed under the Microsoft Software License */
-@/* Terms for Microsoft Azure RTOS. Full text of the license can be */
-@/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
-@/* and in the root directory of this software. */
-@/* */
-@/**************************************************************************/
-@
-@
-@/**************************************************************************/
-@/**************************************************************************/
-@/** */
-@/** ThreadX Component */
-@/** */
-@/** Thread */
-@/** */
-@/**************************************************************************/
-@/**************************************************************************/
-@
-@
-@#define TX_SOURCE_CODE
-@
-@
-@/* Include necessary system files. */
-@
-@#include "tx_api.h"
-@#include "tx_thread.h"
-@#include "tx_timer.h"
-@
+/**************************************************************************/
+/* */
+/* Copyright (c) Microsoft Corporation. All rights reserved. */
+/* */
+/* This software is licensed under the Microsoft Software License */
+/* Terms for Microsoft Azure RTOS. Full text of the license can be */
+/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
+/* and in the root directory of this software. */
+/* */
+/**************************************************************************/
+
+
+/**************************************************************************/
+/**************************************************************************/
+/** */
+/** ThreadX Component */
+/** */
+/** Thread */
+/** */
+/**************************************************************************/
+/**************************************************************************/
+
.arm
#ifdef TX_ENABLE_FIQ_SUPPORT
-SVC_MODE = 0xD3 @ Disable IRQ/FIQ, SVC mode
-IRQ_MODE = 0xD2 @ Disable IRQ/FIQ, IRQ mode
+SVC_MODE = 0xD3 // Disable IRQ/FIQ, SVC mode
+IRQ_MODE = 0xD2 // Disable IRQ/FIQ, IRQ mode
#else
-SVC_MODE = 0x93 @ Disable IRQ, SVC mode
-IRQ_MODE = 0x92 @ Disable IRQ, IRQ mode
+SVC_MODE = 0x93 // Disable IRQ, SVC mode
+IRQ_MODE = 0x92 // Disable IRQ, IRQ mode
#endif
-@
+
.global _tx_thread_system_state
.global _tx_thread_current_ptr
.global _tx_thread_execute_ptr
.global _tx_timer_time_slice
.global _tx_thread_schedule
.global _tx_thread_preempt_disable
- .global _tx_execution_isr_exit
-@
-@
-@/* No 16-bit Thumb mode veneer code is needed for _tx_thread_context_restore
-@ since it will never be called 16-bit mode. */
-@
+
+
+
+/* No 16-bit Thumb mode veneer code is needed for _tx_thread_context_restore
+ since it will never be called 16-bit mode. */
+
.arm
.text
.align 2
-@/**************************************************************************/
-@/* */
-@/* FUNCTION RELEASE */
-@/* */
-@/* _tx_thread_context_restore Cortex-A5/GNU */
-@/* 6.1.9 */
-@/* AUTHOR */
-@/* */
-@/* William E. Lamie, Microsoft Corporation */
-@/* */
-@/* DESCRIPTION */
-@/* */
-@/* This function restores the interrupt context if it is processing a */
-@/* nested interrupt. If not, it returns to the interrupt thread if no */
-@/* preemption is necessary. Otherwise, if preemption is necessary or */
-@/* if no thread was running, the function returns to the scheduler. */
-@/* */
-@/* INPUT */
-@/* */
-@/* None */
-@/* */
-@/* OUTPUT */
-@/* */
-@/* None */
-@/* */
-@/* CALLS */
-@/* */
-@/* _tx_thread_schedule Thread scheduling routine */
-@/* */
-@/* CALLED BY */
-@/* */
-@/* ISRs Interrupt Service Routines */
-@/* */
-@/* RELEASE HISTORY */
-@/* */
-@/* DATE NAME DESCRIPTION */
-@/* */
-@/* 09-30-2020 William E. Lamie Initial Version 6.1 */
-@/* 10-15-2021 William E. Lamie Modified comment(s), added */
-@/* execution profile support, */
-@/* resulting in version 6.1.9 */
-@/* */
-@/**************************************************************************/
-@VOID _tx_thread_context_restore(VOID)
-@{
+/**************************************************************************/
+/* */
+/* FUNCTION RELEASE */
+/* */
+/* _tx_thread_context_restore ARMv7-A */
+/* 6.1.11 */
+/* AUTHOR */
+/* */
+/* William E. Lamie, Microsoft Corporation */
+/* */
+/* DESCRIPTION */
+/* */
+/* This function restores the interrupt context if it is processing a */
+/* nested interrupt. If not, it returns to the interrupt thread if no */
+/* preemption is necessary. Otherwise, if preemption is necessary or */
+/* if no thread was running, the function returns to the scheduler. */
+/* */
+/* INPUT */
+/* */
+/* None */
+/* */
+/* OUTPUT */
+/* */
+/* None */
+/* */
+/* CALLS */
+/* */
+/* _tx_thread_schedule Thread scheduling routine */
+/* */
+/* CALLED BY */
+/* */
+/* ISRs Interrupt Service Routines */
+/* */
+/* RELEASE HISTORY */
+/* */
+/* DATE NAME DESCRIPTION */
+/* */
+/* 09-30-2020 William E. Lamie Initial Version 6.1 */
+/* 10-15-2021 William E. Lamie Modified comment(s), added */
+/* execution profile support, */
+/* resulting in version 6.1.9 */
+/* 04-25-2022 Zhen Kong Updated comments, */
+/* resulting in version 6.1.11 */
+/* */
+/**************************************************************************/
.global _tx_thread_context_restore
.type _tx_thread_context_restore,function
_tx_thread_context_restore:
-@
-@ /* Lockout interrupts. */
-@
+
+ /* Lockout interrupts. */
+
#ifdef TX_ENABLE_FIQ_SUPPORT
- CPSID if @ Disable IRQ and FIQ interrupts
+ CPSID if // Disable IRQ and FIQ interrupts
#else
- CPSID i @ Disable IRQ interrupts
+ CPSID i // Disable IRQ interrupts
#endif
#if (defined(TX_ENABLE_EXECUTION_CHANGE_NOTIFY) || defined(TX_EXECUTION_PROFILE_ENABLE))
-@
-@ /* Call the ISR exit function to indicate an ISR is complete. */
-@
- BL _tx_execution_isr_exit @ Call the ISR exit function
-#endif
-@
-@ /* Determine if interrupts are nested. */
-@ if (--_tx_thread_system_state)
-@ {
-@
- LDR r3, =_tx_thread_system_state @ Pickup address of system state variable
- LDR r2, [r3] @ Pickup system state
- SUB r2, r2, #1 @ Decrement the counter
- STR r2, [r3] @ Store the counter
- CMP r2, #0 @ Was this the first interrupt?
- BEQ __tx_thread_not_nested_restore @ If so, not a nested restore
-@
-@ /* Interrupts are nested. */
-@
-@ /* Just recover the saved registers and return to the point of
-@ interrupt. */
-@
- LDMIA sp!, {r0, r10, r12, lr} @ Recover SPSR, POI, and scratch regs
- MSR SPSR_cxsf, r0 @ Put SPSR back
- LDMIA sp!, {r0-r3} @ Recover r0-r3
- MOVS pc, lr @ Return to point of interrupt
-@
-@ }
-__tx_thread_not_nested_restore:
-@
-@ /* Determine if a thread was interrupted and no preemption is required. */
-@ else if (((_tx_thread_current_ptr) && (_tx_thread_current_ptr == _tx_thread_execute_ptr)
-@ || (_tx_thread_preempt_disable))
-@ {
-@
- LDR r1, =_tx_thread_current_ptr @ Pickup address of current thread ptr
- LDR r0, [r1] @ Pickup actual current thread pointer
- CMP r0, #0 @ Is it NULL?
- BEQ __tx_thread_idle_system_restore @ Yes, idle system was interrupted
-@
- LDR r3, =_tx_thread_preempt_disable @ Pickup preempt disable address
- LDR r2, [r3] @ Pickup actual preempt disable flag
- CMP r2, #0 @ Is it set?
- BNE __tx_thread_no_preempt_restore @ Yes, don't preempt this thread
- LDR r3, =_tx_thread_execute_ptr @ Pickup address of execute thread ptr
- LDR r2, [r3] @ Pickup actual execute thread pointer
- CMP r0, r2 @ Is the same thread highest priority?
- BNE __tx_thread_preempt_restore @ No, preemption needs to happen
-@
-@
-__tx_thread_no_preempt_restore:
-@
-@ /* Restore interrupted thread or ISR. */
-@
-@ /* Pickup the saved stack pointer. */
-@ tmp_ptr = _tx_thread_current_ptr -> tx_thread_stack_ptr;
-@
-@ /* Recover the saved context and return to the point of interrupt. */
-@
- LDMIA sp!, {r0, r10, r12, lr} @ Recover SPSR, POI, and scratch regs
- MSR SPSR_cxsf, r0 @ Put SPSR back
- LDMIA sp!, {r0-r3} @ Recover r0-r3
- MOVS pc, lr @ Return to point of interrupt
-@
-@ }
-@ else
-@ {
-__tx_thread_preempt_restore:
-@
- LDMIA sp!, {r3, r10, r12, lr} @ Recover temporarily saved registers
- MOV r1, lr @ Save lr (point of interrupt)
- MOV r2, #SVC_MODE @ Build SVC mode CPSR
- MSR CPSR_c, r2 @ Enter SVC mode
- STR r1, [sp, #-4]! @ Save point of interrupt
- STMDB sp!, {r4-r12, lr} @ Save upper half of registers
- MOV r4, r3 @ Save SPSR in r4
- MOV r2, #IRQ_MODE @ Build IRQ mode CPSR
- MSR CPSR_c, r2 @ Enter IRQ mode
- LDMIA sp!, {r0-r3} @ Recover r0-r3
- MOV r5, #SVC_MODE @ Build SVC mode CPSR
- MSR CPSR_c, r5 @ Enter SVC mode
- STMDB sp!, {r0-r3} @ Save r0-r3 on thread's stack
- LDR r1, =_tx_thread_current_ptr @ Pickup address of current thread ptr
- LDR r0, [r1] @ Pickup current thread pointer
+ /* Call the ISR exit function to indicate an ISR is complete. */
+
+ BL _tx_execution_isr_exit // Call the ISR exit function
+#endif
+
+ /* Determine if interrupts are nested. */
+
+ LDR r3, =_tx_thread_system_state // Pickup address of system state variable
+ LDR r2, [r3] // Pickup system state
+ SUB r2, r2, #1 // Decrement the counter
+ STR r2, [r3] // Store the counter
+ CMP r2, #0 // Was this the first interrupt?
+ BEQ __tx_thread_not_nested_restore // If so, not a nested restore
+
+ /* Interrupts are nested. */
+
+ /* Just recover the saved registers and return to the point of
+ interrupt. */
+
+ LDMIA sp!, {r0, r10, r12, lr} // Recover SPSR, POI, and scratch regs
+ MSR SPSR_cxsf, r0 // Put SPSR back
+ LDMIA sp!, {r0-r3} // Recover r0-r3
+ MOVS pc, lr // Return to point of interrupt
+
+__tx_thread_not_nested_restore:
+
+ /* Determine if a thread was interrupted and no preemption is required. */
+
+ LDR r1, =_tx_thread_current_ptr // Pickup address of current thread ptr
+ LDR r0, [r1] // Pickup actual current thread pointer
+ CMP r0, #0 // Is it NULL?
+ BEQ __tx_thread_idle_system_restore // Yes, idle system was interrupted
+
+ LDR r3, =_tx_thread_preempt_disable // Pickup preempt disable address
+ LDR r2, [r3] // Pickup actual preempt disable flag
+ CMP r2, #0 // Is it set?
+ BNE __tx_thread_no_preempt_restore // Yes, don't preempt this thread
+ LDR r3, =_tx_thread_execute_ptr // Pickup address of execute thread ptr
+ LDR r2, [r3] // Pickup actual execute thread pointer
+ CMP r0, r2 // Is the same thread highest priority?
+ BNE __tx_thread_preempt_restore // No, preemption needs to happen
+
+
+__tx_thread_no_preempt_restore:
+
+ /* Recover the saved context and return to the point of interrupt. */
+
+ /* Pickup the saved stack pointer. */
+
+ /* Recover the saved context and return to the point of interrupt. */
+ LDMIA sp!, {r0, r10, r12, lr} // Recover SPSR, POI, and scratch regs
+ MSR SPSR_cxsf, r0 // Put SPSR back
+ LDMIA sp!, {r0-r3} // Recover r0-r3
+ MOVS pc, lr // Return to point of interrupt
+
+__tx_thread_preempt_restore:
+
+ LDMIA sp!, {r3, r10, r12, lr} // Recover temporarily saved registers
+ MOV r1, lr // Save lr (point of interrupt)
+ MOV r2, #SVC_MODE // Build SVC mode CPSR
+ MSR CPSR_c, r2 // Enter SVC mode
+ STR r1, [sp, #-4]! // Save point of interrupt
+ STMDB sp!, {r4-r12, lr} // Save upper half of registers
+ MOV r4, r3 // Save SPSR in r4
+ MOV r2, #IRQ_MODE // Build IRQ mode CPSR
+ MSR CPSR_c, r2 // Enter IRQ mode
+ LDMIA sp!, {r0-r3} // Recover r0-r3
+ MOV r5, #SVC_MODE // Build SVC mode CPSR
+ MSR CPSR_c, r5 // Enter SVC mode
+ STMDB sp!, {r0-r3} // Save r0-r3 on thread's stack
+
+ LDR r1, =_tx_thread_current_ptr // Pickup address of current thread ptr
+ LDR r0, [r1] // Pickup current thread pointer
#ifdef TX_ENABLE_VFP_SUPPORT
- LDR r2, [r0, #144] @ Pickup the VFP enabled flag
- CMP r2, #0 @ Is the VFP enabled?
- BEQ _tx_skip_irq_vfp_save @ No, skip VFP IRQ save
- VMRS r2, FPSCR @ Pickup the FPSCR
- STR r2, [sp, #-4]! @ Save FPSCR
- VSTMDB sp!, {D16-D31} @ Save D16-D31
- VSTMDB sp!, {D0-D15} @ Save D0-D15
+ LDR r2, [r0, #144] // Pickup the VFP enabled flag
+ CMP r2, #0 // Is the VFP enabled?
+ BEQ _tx_skip_irq_vfp_save // No, skip VFP IRQ save
+ VMRS r2, FPSCR // Pickup the FPSCR
+ STR r2, [sp, #-4]! // Save FPSCR
+ VSTMDB sp!, {D16-D31} // Save D16-D31
+ VSTMDB sp!, {D0-D15} // Save D0-D15
+
_tx_skip_irq_vfp_save:
+
#endif
- MOV r3, #1 @ Build interrupt stack type
- STMDB sp!, {r3, r4} @ Save interrupt stack type and SPSR
- STR sp, [r0, #8] @ Save stack pointer in thread control
- @ block
-@
-@ /* Save the remaining time-slice and disable it. */
-@ if (_tx_timer_time_slice)
-@ {
-@
- LDR r3, =_tx_timer_time_slice @ Pickup time-slice variable address
- LDR r2, [r3] @ Pickup time-slice
- CMP r2, #0 @ Is it active?
- BEQ __tx_thread_dont_save_ts @ No, don't save it
-@
-@ _tx_thread_current_ptr -> tx_thread_time_slice = _tx_timer_time_slice;
-@ _tx_timer_time_slice = 0;
-@
- STR r2, [r0, #24] @ Save thread's time-slice
- MOV r2, #0 @ Clear value
- STR r2, [r3] @ Disable global time-slice flag
-@
-@ }
+ MOV r3, #1 // Build interrupt stack type
+ STMDB sp!, {r3, r4} // Save interrupt stack type and SPSR
+ STR sp, [r0, #8] // Save stack pointer in thread control
+ // block
+
+ /* Save the remaining time-slice and disable it. */
+ LDR r3, =_tx_timer_time_slice // Pickup time-slice variable address
+ LDR r2, [r3] // Pickup time-slice
+ CMP r2, #0 // Is it active?
+ BEQ __tx_thread_dont_save_ts // No, don't save it
+ STR r2, [r0, #24] // Save thread's time-slice
+ MOV r2, #0 // Clear value
+ STR r2, [r3] // Disable global time-slice flag
+
__tx_thread_dont_save_ts:
-@
-@
-@ /* Clear the current task pointer. */
-@ _tx_thread_current_ptr = TX_NULL;
-@
- MOV r0, #0 @ NULL value
- STR r0, [r1] @ Clear current thread pointer
-@
-@ /* Return to the scheduler. */
-@ _tx_thread_schedule();
-@
- B _tx_thread_schedule @ Return to scheduler
-@ }
-@
+
+ /* Clear the current task pointer. */
+ MOV r0, #0 // NULL value
+ STR r0, [r1] // Clear current thread pointer
+
+ /* Return to the scheduler. */
+ B _tx_thread_schedule // Return to scheduler
+
__tx_thread_idle_system_restore:
-@
-@ /* Just return back to the scheduler! */
-@
- MOV r0, #SVC_MODE @ Build SVC mode CPSR
- MSR CPSR_c, r0 @ Enter SVC mode
- B _tx_thread_schedule @ Return to scheduler
-@}
-
-
+ /* Just return back to the scheduler! */
+ MOV r0, #SVC_MODE // Build SVC mode CPSR
+ MSR CPSR_c, r0 // Enter SVC mode
+ B _tx_thread_schedule // Return to scheduler
diff --git a/ports/cortex_a5/gnu/src/tx_thread_context_save.S b/ports/cortex_a5/gnu/src/tx_thread_context_save.S
index 9716f531..7ac48c2e 100644
--- a/ports/cortex_a5/gnu/src/tx_thread_context_save.S
+++ b/ports/cortex_a5/gnu/src/tx_thread_context_save.S
@@ -1,206 +1,172 @@
-@/**************************************************************************/
-@/* */
-@/* Copyright (c) Microsoft Corporation. All rights reserved. */
-@/* */
-@/* This software is licensed under the Microsoft Software License */
-@/* Terms for Microsoft Azure RTOS. Full text of the license can be */
-@/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
-@/* and in the root directory of this software. */
-@/* */
-@/**************************************************************************/
-@
-@
-@/**************************************************************************/
-@/**************************************************************************/
-@/** */
-@/** ThreadX Component */
-@/** */
-@/** Thread */
-@/** */
-@/**************************************************************************/
-@/**************************************************************************/
-@
-@
-@#define TX_SOURCE_CODE
-@
-@
-@/* Include necessary system files. */
-@
-@#include "tx_api.h"
-@#include "tx_thread.h"
-@#include "tx_timer.h"
-@
-@
+/**************************************************************************/
+/* */
+/* Copyright (c) Microsoft Corporation. All rights reserved. */
+/* */
+/* This software is licensed under the Microsoft Software License */
+/* Terms for Microsoft Azure RTOS. Full text of the license can be */
+/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
+/* and in the root directory of this software. */
+/* */
+/**************************************************************************/
+
+
+/**************************************************************************/
+/**************************************************************************/
+/** */
+/** ThreadX Component */
+/** */
+/** Thread */
+/** */
+/**************************************************************************/
+/**************************************************************************/
+
.global _tx_thread_system_state
.global _tx_thread_current_ptr
- .global _tx_irq_processing_return
- .global _tx_execution_isr_enter
-@
-@
-@/* No 16-bit Thumb mode veneer code is needed for _tx_thread_context_save
-@ since it will never be called 16-bit mode. */
-@
+ .global __tx_irq_processing_return
+
+
+/* No 16-bit Thumb mode veneer code is needed for _tx_thread_context_save
+ since it will never be called 16-bit mode. */
+
.arm
.text
.align 2
-@/**************************************************************************/
-@/* */
-@/* FUNCTION RELEASE */
-@/* */
-@/* _tx_thread_context_save Cortex-A5/GNU */
-@/* 6.1.9 */
-@/* AUTHOR */
-@/* */
-@/* William E. Lamie, Microsoft Corporation */
-@/* */
-@/* DESCRIPTION */
-@/* */
-@/* This function saves the context of an executing thread in the */
-@/* beginning of interrupt processing. The function also ensures that */
-@/* the system stack is used upon return to the calling ISR. */
-@/* */
-@/* INPUT */
-@/* */
-@/* None */
-@/* */
-@/* OUTPUT */
-@/* */
-@/* None */
-@/* */
-@/* CALLS */
-@/* */
-@/* None */
-@/* */
-@/* CALLED BY */
-@/* */
-@/* ISRs */
-@/* */
-@/* RELEASE HISTORY */
-@/* */
-@/* DATE NAME DESCRIPTION */
-@/* */
-@/* 09-30-2020 William E. Lamie Initial Version 6.1 */
-@/* 10-15-2021 William E. Lamie Modified comment(s), added */
-@/* execution profile support, */
-@/* resulting in version 6.1.9 */
-@/* */
-@/**************************************************************************/
-@VOID _tx_thread_context_save(VOID)
-@{
+/**************************************************************************/
+/* */
+/* FUNCTION RELEASE */
+/* */
+/* _tx_thread_context_save ARMv7-A */
+/* 6.1.11 */
+/* AUTHOR */
+/* */
+/* William E. Lamie, Microsoft Corporation */
+/* */
+/* DESCRIPTION */
+/* */
+/* This function saves the context of an executing thread in the */
+/* beginning of interrupt processing. The function also ensures that */
+/* the system stack is used upon return to the calling ISR. */
+/* */
+/* INPUT */
+/* */
+/* None */
+/* */
+/* OUTPUT */
+/* */
+/* None */
+/* */
+/* CALLS */
+/* */
+/* None */
+/* */
+/* CALLED BY */
+/* */
+/* ISRs */
+/* */
+/* RELEASE HISTORY */
+/* */
+/* DATE NAME DESCRIPTION */
+/* */
+/* 09-30-2020 William E. Lamie Initial Version 6.1 */
+/* 10-15-2021 William E. Lamie Modified comment(s), added */
+/* execution profile support, */
+/* resulting in version 6.1.9 */
+/* 04-25-2022 Zhen Kong Updated comments, */
+/* resulting in version 6.1.11 */
+/* */
+/**************************************************************************/
.global _tx_thread_context_save
.type _tx_thread_context_save,function
_tx_thread_context_save:
-@
-@ /* Upon entry to this routine, it is assumed that IRQ interrupts are locked
-@ out, we are in IRQ mode, and all registers are intact. */
-@
-@ /* Check for a nested interrupt condition. */
-@ if (_tx_thread_system_state++)
-@ {
-@
- STMDB sp!, {r0-r3} @ Save some working registers
+
+ /* Upon entry to this routine, it is assumed that IRQ interrupts are locked
+ out, we are in IRQ mode, and all registers are intact. */
+
+ /* Check for a nested interrupt condition. */
+
+ STMDB sp!, {r0-r3} // Save some working registers
#ifdef TX_ENABLE_FIQ_SUPPORT
- CPSID if @ Disable FIQ interrupts
+ CPSID if // Disable FIQ interrupts
#endif
- LDR r3, =_tx_thread_system_state @ Pickup address of system state variable
- LDR r2, [r3] @ Pickup system state
- CMP r2, #0 @ Is this the first interrupt?
- BEQ __tx_thread_not_nested_save @ Yes, not a nested context save
-@
-@ /* Nested interrupt condition. */
-@
- ADD r2, r2, #1 @ Increment the interrupt counter
- STR r2, [r3] @ Store it back in the variable
-@
-@ /* Save the rest of the scratch registers on the stack and return to the
-@ calling ISR. */
-@
- MRS r0, SPSR @ Pickup saved SPSR
- SUB lr, lr, #4 @ Adjust point of interrupt
- STMDB sp!, {r0, r10, r12, lr} @ Store other registers
-@
-@ /* Return to the ISR. */
-@
- MOV r10, #0 @ Clear stack limit
+ LDR r3, =_tx_thread_system_state // Pickup address of system state variable
+ LDR r2, [r3] // Pickup system state
+ CMP r2, #0 // Is this the first interrupt?
+ BEQ __tx_thread_not_nested_save // Yes, not a nested context save
+
+ /* Nested interrupt condition. */
+
+ ADD r2, r2, #1 // Increment the interrupt counter
+ STR r2, [r3] // Store it back in the variable
+
+ /* Save the rest of the scratch registers on the stack and return to the
+ calling ISR. */
+
+ MRS r0, SPSR // Pickup saved SPSR
+ SUB lr, lr, #4 // Adjust point of interrupt
+ STMDB sp!, {r0, r10, r12, lr} // Store other registers
+
+ /* Return to the ISR. */
+
+ MOV r10, #0 // Clear stack limit
#if (defined(TX_ENABLE_EXECUTION_CHANGE_NOTIFY) || defined(TX_EXECUTION_PROFILE_ENABLE))
-@
-@ /* Call the ISR enter function to indicate an ISR is executing. */
-@
- PUSH {lr} @ Save ISR lr
- BL _tx_execution_isr_enter @ Call the ISR enter function
- POP {lr} @ Recover ISR lr
+
+ /* Call the ISR enter function to indicate an ISR is executing. */
+
+ PUSH {lr} // Save ISR lr
+ BL _tx_execution_isr_enter // Call the ISR enter function
+ POP {lr} // Recover ISR lr
#endif
- B __tx_irq_processing_return @ Continue IRQ processing
-@
+ B __tx_irq_processing_return // Continue IRQ processing
+
__tx_thread_not_nested_save:
-@ }
-@
-@ /* Otherwise, not nested, check to see if a thread was running. */
-@ else if (_tx_thread_current_ptr)
-@ {
-@
- ADD r2, r2, #1 @ Increment the interrupt counter
- STR r2, [r3] @ Store it back in the variable
- LDR r1, =_tx_thread_current_ptr @ Pickup address of current thread ptr
- LDR r0, [r1] @ Pickup current thread pointer
- CMP r0, #0 @ Is it NULL?
- BEQ __tx_thread_idle_system_save @ If so, interrupt occurred in
- @ scheduling loop - nothing needs saving!
-@
-@ /* Save minimal context of interrupted thread. */
-@
- MRS r2, SPSR @ Pickup saved SPSR
- SUB lr, lr, #4 @ Adjust point of interrupt
- STMDB sp!, {r2, r10, r12, lr} @ Store other registers
-@
-@ /* Save the current stack pointer in the thread's control block. */
-@ _tx_thread_current_ptr -> tx_thread_stack_ptr = sp;
-@
-@ /* Switch to the system stack. */
-@ sp = _tx_thread_system_stack_ptr@
-@
- MOV r10, #0 @ Clear stack limit
+
+ /* Otherwise, not nested, check to see if a thread was running. */
+ ADD r2, r2, #1 // Increment the interrupt counter
+ STR r2, [r3] // Store it back in the variable
+ LDR r1, =_tx_thread_current_ptr // Pickup address of current thread ptr
+ LDR r0, [r1] // Pickup current thread pointer
+ CMP r0, #0 // Is it NULL?
+ BEQ __tx_thread_idle_system_save // If so, interrupt occurred in
+ // scheduling loop - nothing needs saving!
+
+ /* Save minimal context of interrupted thread. */
+
+ MRS r2, SPSR // Pickup saved SPSR
+ SUB lr, lr, #4 // Adjust point of interrupt
+ STMDB sp!, {r2, r10, r12, lr} // Store other registers
+
+ MOV r10, #0 // Clear stack limit
#if (defined(TX_ENABLE_EXECUTION_CHANGE_NOTIFY) || defined(TX_EXECUTION_PROFILE_ENABLE))
-@
-@ /* Call the ISR enter function to indicate an ISR is executing. */
-@
- PUSH {lr} @ Save ISR lr
- BL _tx_execution_isr_enter @ Call the ISR enter function
- POP {lr} @ Recover ISR lr
+
+ /* Call the ISR enter function to indicate an ISR is executing. */
+
+ PUSH {lr} // Save ISR lr
+ BL _tx_execution_isr_enter // Call the ISR enter function
+ POP {lr} // Recover ISR lr
#endif
- B __tx_irq_processing_return @ Continue IRQ processing
-@
-@ }
-@ else
-@ {
-@
+ B __tx_irq_processing_return // Continue IRQ processing
+
__tx_thread_idle_system_save:
-@
-@ /* Interrupt occurred in the scheduling loop. */
-@
-@ /* Not much to do here, just adjust the stack pointer, and return to IRQ
-@ processing. */
-@
- MOV r10, #0 @ Clear stack limit
+
+ /* Interrupt occurred in the scheduling loop. */
+
+ /* Not much to do here, just adjust the stack pointer, and return to IRQ
+ processing. */
+
+ MOV r10, #0 // Clear stack limit
#if (defined(TX_ENABLE_EXECUTION_CHANGE_NOTIFY) || defined(TX_EXECUTION_PROFILE_ENABLE))
-@
-@ /* Call the ISR enter function to indicate an ISR is executing. */
-@
- PUSH {lr} @ Save ISR lr
- BL _tx_execution_isr_enter @ Call the ISR enter function
- POP {lr} @ Recover ISR lr
+
+ /* Call the ISR enter function to indicate an ISR is executing. */
+
+ PUSH {lr} // Save ISR lr
+ BL _tx_execution_isr_enter // Call the ISR enter function
+ POP {lr} // Recover ISR lr
#endif
- ADD sp, sp, #16 @ Recover saved registers
- B __tx_irq_processing_return @ Continue IRQ processing
-@
-@ }
-@}
-
-
-
+ ADD sp, sp, #16 // Recover saved registers
+ B __tx_irq_processing_return // Continue IRQ processing
diff --git a/ports/cortex_a5/gnu/src/tx_thread_fiq_context_restore.S b/ports/cortex_a5/gnu/src/tx_thread_fiq_context_restore.S
index fed1fbda..006be973 100644
--- a/ports/cortex_a5/gnu/src/tx_thread_fiq_context_restore.S
+++ b/ports/cortex_a5/gnu/src/tx_thread_fiq_context_restore.S
@@ -1,43 +1,32 @@
-@/**************************************************************************/
-@/* */
-@/* Copyright (c) Microsoft Corporation. All rights reserved. */
-@/* */
-@/* This software is licensed under the Microsoft Software License */
-@/* Terms for Microsoft Azure RTOS. Full text of the license can be */
-@/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
-@/* and in the root directory of this software. */
-@/* */
-@/**************************************************************************/
-@
-@
-@/**************************************************************************/
-@/**************************************************************************/
-@/** */
-@/** ThreadX Component */
-@/** */
-@/** Thread */
-@/** */
-@/**************************************************************************/
-@/**************************************************************************/
-@
-@
-@#define TX_SOURCE_CODE
-@
-@
-@/* Include necessary system files. */
-@
-@#include "tx_api.h"
-@#include "tx_thread.h"
-@#include "tx_timer.h"
-@
-@
-SVC_MODE = 0xD3 @ SVC mode
-FIQ_MODE = 0xD1 @ FIQ mode
-MODE_MASK = 0x1F @ Mode mask
-THUMB_MASK = 0x20 @ Thumb bit mask
-IRQ_MODE_BITS = 0x12 @ IRQ mode bits
-@
-@
+/**************************************************************************/
+/* */
+/* Copyright (c) Microsoft Corporation. All rights reserved. */
+/* */
+/* This software is licensed under the Microsoft Software License */
+/* Terms for Microsoft Azure RTOS. Full text of the license can be */
+/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
+/* and in the root directory of this software. */
+/* */
+/**************************************************************************/
+
+
+/**************************************************************************/
+/**************************************************************************/
+/** */
+/** ThreadX Component */
+/** */
+/** Thread */
+/** */
+/**************************************************************************/
+/**************************************************************************/
+
+SVC_MODE = 0xD3 // SVC mode
+FIQ_MODE = 0xD1 // FIQ mode
+MODE_MASK = 0x1F // Mode mask
+THUMB_MASK = 0x20 // Thumb bit mask
+IRQ_MODE_BITS = 0x12 // IRQ mode bits
+
+
.global _tx_thread_system_state
.global _tx_thread_current_ptr
.global _tx_thread_system_stack_ptr
@@ -46,218 +35,189 @@ IRQ_MODE_BITS = 0x12 @ IRQ mode bits
.global _tx_thread_schedule
.global _tx_thread_preempt_disable
.global _tx_execution_isr_exit
-@
-@
-@/* No 16-bit Thumb mode veneer code is needed for _tx_thread_fiq_context_restore
-@ since it will never be called 16-bit mode. */
-@
+
+
+/* No 16-bit Thumb mode veneer code is needed for _tx_thread_fiq_context_restore
+ since it will never be called 16-bit mode. */
+
.arm
.text
.align 2
-@/**************************************************************************/
-@/* */
-@/* FUNCTION RELEASE */
-@/* */
-@/* _tx_thread_fiq_context_restore Cortex-A5/GNU */
-@/* 6.1.9 */
-@/* AUTHOR */
-@/* */
-@/* William E. Lamie, Microsoft Corporation */
-@/* */
-@/* DESCRIPTION */
-@/* */
-@/* This function restores the fiq interrupt context when processing a */
-@/* nested interrupt. If not, it returns to the interrupt thread if no */
-@/* preemption is necessary. Otherwise, if preemption is necessary or */
-@/* if no thread was running, the function returns to the scheduler. */
-@/* */
-@/* INPUT */
-@/* */
-@/* None */
-@/* */
-@/* OUTPUT */
-@/* */
-@/* None */
-@/* */
-@/* CALLS */
-@/* */
-@/* _tx_thread_schedule Thread scheduling routine */
-@/* */
-@/* CALLED BY */
-@/* */
-@/* FIQ ISR Interrupt Service Routines */
-@/* */
-@/* RELEASE HISTORY */
-@/* */
-@/* DATE NAME DESCRIPTION */
-@/* */
-@/* 09-30-2020 William E. Lamie Initial Version 6.1 */
-@/* 10-15-2021 William E. Lamie Modified comment(s), added */
-@/* execution profile support, */
-@/* resulting in version 6.1.9 */
-@/* */
-@/**************************************************************************/
-@VOID _tx_thread_fiq_context_restore(VOID)
-@{
+/**************************************************************************/
+/* */
+/* FUNCTION RELEASE */
+/* */
+/* _tx_thread_fiq_context_restore ARMv7-A */
+/* 6.1.11 */
+/* AUTHOR */
+/* */
+/* William E. Lamie, Microsoft Corporation */
+/* */
+/* DESCRIPTION */
+/* */
+/* This function restores the fiq interrupt context when processing a */
+/* nested interrupt. If not, it returns to the interrupt thread if no */
+/* preemption is necessary. Otherwise, if preemption is necessary or */
+/* if no thread was running, the function returns to the scheduler. */
+/* */
+/* INPUT */
+/* */
+/* None */
+/* */
+/* OUTPUT */
+/* */
+/* None */
+/* */
+/* CALLS */
+/* */
+/* _tx_thread_schedule Thread scheduling routine */
+/* */
+/* CALLED BY */
+/* */
+/* FIQ ISR Interrupt Service Routines */
+/* */
+/* RELEASE HISTORY */
+/* */
+/* DATE NAME DESCRIPTION */
+/* */
+/* 09-30-2020 William E. Lamie Initial Version 6.1 */
+/* 10-15-2021 William E. Lamie Modified comment(s), added */
+/* execution profile support, */
+/* resulting in version 6.1.9 */
+/* 04-25-2022 Zhen Kong Updated comments, */
+/* resulting in version 6.1.11 */
+/* */
+/**************************************************************************/
.global _tx_thread_fiq_context_restore
.type _tx_thread_fiq_context_restore,function
_tx_thread_fiq_context_restore:
-@
-@ /* Lockout interrupts. */
-@
- CPSID if @ Disable IRQ and FIQ interrupts
+
+ /* Lockout interrupts. */
+
+ CPSID if // Disable IRQ and FIQ interrupts
#if (defined(TX_ENABLE_EXECUTION_CHANGE_NOTIFY) || defined(TX_EXECUTION_PROFILE_ENABLE))
-@
-@ /* Call the ISR exit function to indicate an ISR is complete. */
-@
- BL _tx_execution_isr_exit @ Call the ISR exit function
+
+ /* Call the ISR exit function to indicate an ISR is complete. */
+
+ BL _tx_execution_isr_exit // Call the ISR exit function
#endif
-@
-@ /* Determine if interrupts are nested. */
-@ if (--_tx_thread_system_state)
-@ {
-@
- LDR r3, =_tx_thread_system_state @ Pickup address of system state variable
- LDR r2, [r3] @ Pickup system state
- SUB r2, r2, #1 @ Decrement the counter
- STR r2, [r3] @ Store the counter
- CMP r2, #0 @ Was this the first interrupt?
- BEQ __tx_thread_fiq_not_nested_restore @ If so, not a nested restore
-@
-@ /* Interrupts are nested. */
-@
-@ /* Just recover the saved registers and return to the point of
-@ interrupt. */
-@
- LDMIA sp!, {r0, r10, r12, lr} @ Recover SPSR, POI, and scratch regs
- MSR SPSR_cxsf, r0 @ Put SPSR back
- LDMIA sp!, {r0-r3} @ Recover r0-r3
- MOVS pc, lr @ Return to point of interrupt
-@
-@ }
+
+ /* Determine if interrupts are nested. */
+ LDR r3, =_tx_thread_system_state // Pickup address of system state variable
+ LDR r2, [r3] // Pickup system state
+ SUB r2, r2, #1 // Decrement the counter
+ STR r2, [r3] // Store the counter
+ CMP r2, #0 // Was this the first interrupt?
+ BEQ __tx_thread_fiq_not_nested_restore // If so, not a nested restore
+
+ /* Interrupts are nested. */
+
+ /* Just recover the saved registers and return to the point of
+ interrupt. */
+
+ LDMIA sp!, {r0, r10, r12, lr} // Recover SPSR, POI, and scratch regs
+ MSR SPSR_cxsf, r0 // Put SPSR back
+ LDMIA sp!, {r0-r3} // Recover r0-r3
+ MOVS pc, lr // Return to point of interrupt
+
__tx_thread_fiq_not_nested_restore:
-@
-@ /* Determine if a thread was interrupted and no preemption is required. */
-@ else if (((_tx_thread_current_ptr) && (_tx_thread_current_ptr == _tx_thread_execute_ptr)
-@ || (_tx_thread_preempt_disable))
-@ {
-@
- LDR r1, [sp] @ Pickup the saved SPSR
- MOV r2, #MODE_MASK @ Build mask to isolate the interrupted mode
- AND r1, r1, r2 @ Isolate mode bits
- CMP r1, #IRQ_MODE_BITS @ Was an interrupt taken in IRQ mode before we
- @ got to context save? */
- BEQ __tx_thread_fiq_no_preempt_restore @ Yes, just go back to point of interrupt
+
+ /* Determine if a thread was interrupted and no preemption is required. */
+
+ LDR r1, [sp] // Pickup the saved SPSR
+ MOV r2, #MODE_MASK // Build mask to isolate the interrupted mode
+ AND r1, r1, r2 // Isolate mode bits
+ CMP r1, #IRQ_MODE_BITS // Was an interrupt taken in IRQ mode before we
+ // got to context save? */
+ BEQ __tx_thread_fiq_no_preempt_restore // Yes, just go back to point of interrupt
- LDR r1, =_tx_thread_current_ptr @ Pickup address of current thread ptr
- LDR r0, [r1] @ Pickup actual current thread pointer
- CMP r0, #0 @ Is it NULL?
- BEQ __tx_thread_fiq_idle_system_restore @ Yes, idle system was interrupted
+ LDR r1, =_tx_thread_current_ptr // Pickup address of current thread ptr
+ LDR r0, [r1] // Pickup actual current thread pointer
+ CMP r0, #0 // Is it NULL?
+ BEQ __tx_thread_fiq_idle_system_restore // Yes, idle system was interrupted
- LDR r3, =_tx_thread_preempt_disable @ Pickup preempt disable address
- LDR r2, [r3] @ Pickup actual preempt disable flag
- CMP r2, #0 @ Is it set?
- BNE __tx_thread_fiq_no_preempt_restore @ Yes, don't preempt this thread
- LDR r3, =_tx_thread_execute_ptr @ Pickup address of execute thread ptr
- LDR r2, [r3] @ Pickup actual execute thread pointer
- CMP r0, r2 @ Is the same thread highest priority?
- BNE __tx_thread_fiq_preempt_restore @ No, preemption needs to happen
+ LDR r3, =_tx_thread_preempt_disable // Pickup preempt disable address
+ LDR r2, [r3] // Pickup actual preempt disable flag
+ CMP r2, #0 // Is it set?
+ BNE __tx_thread_fiq_no_preempt_restore // Yes, don't preempt this thread
+ LDR r3, =_tx_thread_execute_ptr // Pickup address of execute thread ptr
+ LDR r2, [r3] // Pickup actual execute thread pointer
+ CMP r0, r2 // Is the same thread highest priority?
+ BNE __tx_thread_fiq_preempt_restore // No, preemption needs to happen
__tx_thread_fiq_no_preempt_restore:
-@
-@ /* Restore interrupted thread or ISR. */
-@
-@ /* Pickup the saved stack pointer. */
-@ tmp_ptr = _tx_thread_current_ptr -> tx_thread_stack_ptr;
-@
-@ /* Recover the saved context and return to the point of interrupt. */
-@
- LDMIA sp!, {r0, lr} @ Recover SPSR, POI, and scratch regs
- MSR SPSR_cxsf, r0 @ Put SPSR back
- LDMIA sp!, {r0-r3} @ Recover r0-r3
- MOVS pc, lr @ Return to point of interrupt
-@
-@ }
-@ else
-@ {
-__tx_thread_fiq_preempt_restore:
-@
- LDMIA sp!, {r3, lr} @ Recover temporarily saved registers
- MOV r1, lr @ Save lr (point of interrupt)
- MOV r2, #SVC_MODE @ Build SVC mode CPSR
- MSR CPSR_c, r2 @ Enter SVC mode
- STR r1, [sp, #-4]! @ Save point of interrupt
- STMDB sp!, {r4-r12, lr} @ Save upper half of registers
- MOV r4, r3 @ Save SPSR in r4
- MOV r2, #FIQ_MODE @ Build FIQ mode CPSR
- MSR CPSR_c, r2 @ Reenter FIQ mode
- LDMIA sp!, {r0-r3} @ Recover r0-r3
- MOV r5, #SVC_MODE @ Build SVC mode CPSR
- MSR CPSR_c, r5 @ Enter SVC mode
- STMDB sp!, {r0-r3} @ Save r0-r3 on thread's stack
- LDR r1, =_tx_thread_current_ptr @ Pickup address of current thread ptr
- LDR r0, [r1] @ Pickup current thread pointer
+ /* Restore interrupted thread or ISR. */
+ /* Recover the saved context and return to the point of interrupt. */
+
+ LDMIA sp!, {r0, lr} // Recover SPSR, POI, and scratch regs
+ MSR SPSR_cxsf, r0 // Put SPSR back
+ LDMIA sp!, {r0-r3} // Recover r0-r3
+ MOVS pc, lr // Return to point of interrupt
+
+__tx_thread_fiq_preempt_restore:
+
+ LDMIA sp!, {r3, lr} // Recover temporarily saved registers
+ MOV r1, lr // Save lr (point of interrupt)
+ MOV r2, #SVC_MODE // Build SVC mode CPSR
+ MSR CPSR_c, r2 // Enter SVC mode
+ STR r1, [sp, #-4]! // Save point of interrupt
+ STMDB sp!, {r4-r12, lr} // Save upper half of registers
+ MOV r4, r3 // Save SPSR in r4
+ MOV r2, #FIQ_MODE // Build FIQ mode CPSR
+ MSR CPSR_c, r2 // Reenter FIQ mode
+ LDMIA sp!, {r0-r3} // Recover r0-r3
+ MOV r5, #SVC_MODE // Build SVC mode CPSR
+ MSR CPSR_c, r5 // Enter SVC mode
+ STMDB sp!, {r0-r3} // Save r0-r3 on thread's stack
+
+ LDR r1, =_tx_thread_current_ptr // Pickup address of current thread ptr
+ LDR r0, [r1] // Pickup current thread pointer
#ifdef TX_ENABLE_VFP_SUPPORT
- LDR r2, [r0, #144] @ Pickup the VFP enabled flag
- CMP r2, #0 @ Is the VFP enabled?
- BEQ _tx_skip_fiq_vfp_save @ No, skip VFP IRQ save
- VMRS r2, FPSCR @ Pickup the FPSCR
- STR r2, [sp, #-4]! @ Save FPSCR
- VSTMDB sp!, {D16-D31} @ Save D16-D31
- VSTMDB sp!, {D0-D15} @ Save D0-D15
+ LDR r2, [r0, #144] // Pickup the VFP enabled flag
+ CMP r2, #0 // Is the VFP enabled?
+ BEQ _tx_skip_fiq_vfp_save // No, skip VFP IRQ save
+ VMRS r2, FPSCR // Pickup the FPSCR
+ STR r2, [sp, #-4]! // Save FPSCR
+ VSTMDB sp!, {D16-D31} // Save D16-D31
+ VSTMDB sp!, {D0-D15} // Save D0-D15
_tx_skip_fiq_vfp_save:
#endif
- MOV r3, #1 @ Build interrupt stack type
- STMDB sp!, {r3, r4} @ Save interrupt stack type and SPSR
- STR sp, [r0, #8] @ Save stack pointer in thread control
- @ block */
-@
-@ /* Save the remaining time-slice and disable it. */
-@ if (_tx_timer_time_slice)
-@ {
-@
- LDR r3, =_tx_timer_time_slice @ Pickup time-slice variable address
- LDR r2, [r3] @ Pickup time-slice
- CMP r2, #0 @ Is it active?
- BEQ __tx_thread_fiq_dont_save_ts @ No, don't save it
-@
-@ _tx_thread_current_ptr -> tx_thread_time_slice = _tx_timer_time_slice;
-@ _tx_timer_time_slice = 0;
-@
- STR r2, [r0, #24] @ Save thread's time-slice
- MOV r2, #0 @ Clear value
- STR r2, [r3] @ Disable global time-slice flag
-@
-@ }
-__tx_thread_fiq_dont_save_ts:
-@
-@
-@ /* Clear the current task pointer. */
-@ _tx_thread_current_ptr = TX_NULL;
-@
- MOV r0, #0 @ NULL value
- STR r0, [r1] @ Clear current thread pointer
-@
-@ /* Return to the scheduler. */
-@ _tx_thread_schedule();
-@
- B _tx_thread_schedule @ Return to scheduler
-@ }
-@
-__tx_thread_fiq_idle_system_restore:
-@
-@ /* Just return back to the scheduler! */
-@
- ADD sp, sp, #24 @ Recover FIQ stack space
- MOV r3, #SVC_MODE @ Build SVC mode CPSR
- MSR CPSR_c, r3 @ Lockout interrupts
- B _tx_thread_schedule @ Return to scheduler
-@
-@}
+ MOV r3, #1 // Build interrupt stack type
+ STMDB sp!, {r3, r4} // Save interrupt stack type and SPSR
+ STR sp, [r0, #8] // Save stack pointer in thread control
+ // block */
+ LDR r3, =_tx_timer_time_slice // Pickup time-slice variable address
+ LDR r2, [r3] // Pickup time-slice
+ CMP r2, #0 // Is it active?
+ BEQ __tx_thread_fiq_dont_save_ts // No, don't save it
+
+ STR r2, [r0, #24] // Save thread's time-slice
+ MOV r2, #0 // Clear value
+ STR r2, [r3] // Disable global time-slice flag
+
+__tx_thread_fiq_dont_save_ts:
+
+ /* Clear the current task pointer. */
+
+ MOV r0, #0 // NULL value
+ STR r0, [r1] // Clear current thread pointer
+
+ /* Return to the scheduler. */
+
+ B _tx_thread_schedule // Return to scheduler
+
+__tx_thread_fiq_idle_system_restore:
+
+ /* Just return back to the scheduler! */
+
+ ADD sp, sp, #24 // Recover FIQ stack space
+ MOV r3, #SVC_MODE // Build SVC mode CPSR
+ MSR CPSR_c, r3 // Lockout interrupts
+ B _tx_thread_schedule // Return to scheduler
diff --git a/ports/cortex_a5/gnu/src/tx_thread_fiq_context_save.S b/ports/cortex_a5/gnu/src/tx_thread_fiq_context_save.S
index a7977de0..7db6a4c2 100644
--- a/ports/cortex_a5/gnu/src/tx_thread_fiq_context_save.S
+++ b/ports/cortex_a5/gnu/src/tx_thread_fiq_context_save.S
@@ -1,207 +1,178 @@
-@/**************************************************************************/
-@/* */
-@/* Copyright (c) Microsoft Corporation. All rights reserved. */
-@/* */
-@/* This software is licensed under the Microsoft Software License */
-@/* Terms for Microsoft Azure RTOS. Full text of the license can be */
-@/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
-@/* and in the root directory of this software. */
-@/* */
-@/**************************************************************************/
-@
-@
-@/**************************************************************************/
-@/**************************************************************************/
-@/** */
-@/** ThreadX Component */
-@/** */
-@/** Thread */
-@/** */
-@/**************************************************************************/
-@/**************************************************************************/
-@
-@
-@#define TX_SOURCE_CODE
-@
-@
-@/* Include necessary system files. */
-@
-@#include "tx_api.h"
-@#include "tx_thread.h"
-@
-@
+/**************************************************************************/
+/* */
+/* Copyright (c) Microsoft Corporation. All rights reserved. */
+/* */
+/* This software is licensed under the Microsoft Software License */
+/* Terms for Microsoft Azure RTOS. Full text of the license can be */
+/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
+/* and in the root directory of this software. */
+/* */
+/**************************************************************************/
+
+
+/**************************************************************************/
+/**************************************************************************/
+/** */
+/** ThreadX Component */
+/** */
+/** Thread */
+/** */
+/**************************************************************************/
+/**************************************************************************/
+
.global _tx_thread_system_state
.global _tx_thread_current_ptr
.global __tx_fiq_processing_return
.global _tx_execution_isr_enter
-@
-@
-@/* No 16-bit Thumb mode veneer code is needed for _tx_thread_fiq_context_save
-@ since it will never be called 16-bit mode. */
-@
+
+
+/* No 16-bit Thumb mode veneer code is needed for _tx_thread_fiq_context_save
+ since it will never be called 16-bit mode. */
+
.arm
.text
.align 2
-@/**************************************************************************/
-@/* */
-@/* FUNCTION RELEASE */
-@/* */
-@/* _tx_thread_fiq_context_save Cortex-A5/GNU */
-@/* 6.1.9 */
-@/* AUTHOR */
-@/* */
-@/* William E. Lamie, Microsoft Corporation */
-@/* */
-@/* DESCRIPTION */
-@/* */
-@/* This function saves the context of an executing thread in the */
-@/* beginning of interrupt processing. The function also ensures that */
-@/* the system stack is used upon return to the calling ISR. */
-@/* */
-@/* INPUT */
-@/* */
-@/* None */
-@/* */
-@/* OUTPUT */
-@/* */
-@/* None */
-@/* */
-@/* CALLS */
-@/* */
-@/* None */
-@/* */
-@/* CALLED BY */
-@/* */
-@/* ISRs */
-@/* */
-@/* RELEASE HISTORY */
-@/* */
-@/* DATE NAME DESCRIPTION */
-@/* */
-@/* 09-30-2020 William E. Lamie Initial Version 6.1 */
-@/* 10-15-2021 William E. Lamie Modified comment(s), added */
-@/* execution profile support, */
-@/* resulting in version 6.1.9 */
-@/* */
-@/**************************************************************************/
-@ VOID _tx_thread_fiq_context_save(VOID)
-@{
+/**************************************************************************/
+/* */
+/* FUNCTION RELEASE */
+/* */
+/* _tx_thread_fiq_context_save ARMv7-A */
+/* 6.1.11 */
+/* AUTHOR */
+/* */
+/* William E. Lamie, Microsoft Corporation */
+/* */
+/* DESCRIPTION */
+/* */
+/* This function saves the context of an executing thread in the */
+/* beginning of interrupt processing. The function also ensures that */
+/* the system stack is used upon return to the calling ISR. */
+/* */
+/* INPUT */
+/* */
+/* None */
+/* */
+/* OUTPUT */
+/* */
+/* None */
+/* */
+/* CALLS */
+/* */
+/* None */
+/* */
+/* CALLED BY */
+/* */
+/* ISRs */
+/* */
+/* RELEASE HISTORY */
+/* */
+/* DATE NAME DESCRIPTION */
+/* */
+/* 09-30-2020 William E. Lamie Initial Version 6.1 */
+/* 10-15-2021 William E. Lamie Modified comment(s), added */
+/* execution profile support, */
+/* resulting in version 6.1.9 */
+/* 04-25-2022 Zhen Kong Updated comments, */
+/* resulting in version 6.1.11 */
+/* */
+/**************************************************************************/
.global _tx_thread_fiq_context_save
.type _tx_thread_fiq_context_save,function
_tx_thread_fiq_context_save:
-@
-@ /* Upon entry to this routine, it is assumed that IRQ interrupts are locked
-@ out, we are in IRQ mode, and all registers are intact. */
-@
-@ /* Check for a nested interrupt condition. */
-@ if (_tx_thread_system_state++)
-@ {
-@
- STMDB sp!, {r0-r3} @ Save some working registers
- LDR r3, =_tx_thread_system_state @ Pickup address of system state variable
- LDR r2, [r3] @ Pickup system state
- CMP r2, #0 @ Is this the first interrupt?
- BEQ __tx_thread_fiq_not_nested_save @ Yes, not a nested context save
-@
-@ /* Nested interrupt condition. */
-@
- ADD r2, r2, #1 @ Increment the interrupt counter
- STR r2, [r3] @ Store it back in the variable
-@
-@ /* Save the rest of the scratch registers on the stack and return to the
-@ calling ISR. */
-@
- MRS r0, SPSR @ Pickup saved SPSR
- SUB lr, lr, #4 @ Adjust point of interrupt
- STMDB sp!, {r0, r10, r12, lr} @ Store other registers
-@
-@ /* Return to the ISR. */
-@
- MOV r10, #0 @ Clear stack limit
+
+ /* Upon entry to this routine, it is assumed that IRQ interrupts are locked
+ out, we are in IRQ mode, and all registers are intact. */
+
+ /* Check for a nested interrupt condition. */
+
+ STMDB sp!, {r0-r3} // Save some working registers
+ LDR r3, =_tx_thread_system_state // Pickup address of system state variable
+ LDR r2, [r3] // Pickup system state
+ CMP r2, #0 // Is this the first interrupt?
+ BEQ __tx_thread_fiq_not_nested_save // Yes, not a nested context save
+
+ /* Nested interrupt condition. */
+
+ ADD r2, r2, #1 // Increment the interrupt counter
+ STR r2, [r3] // Store it back in the variable
+
+ /* Save the rest of the scratch registers on the stack and return to the
+ calling ISR. */
+
+ MRS r0, SPSR // Pickup saved SPSR
+ SUB lr, lr, #4 // Adjust point of interrupt
+ STMDB sp!, {r0, r10, r12, lr} // Store other registers
+
+ /* Return to the ISR. */
+
+ MOV r10, #0 // Clear stack limit
#if (defined(TX_ENABLE_EXECUTION_CHANGE_NOTIFY) || defined(TX_EXECUTION_PROFILE_ENABLE))
-@
-@ /* Call the ISR enter function to indicate an ISR is executing. */
-@
- PUSH {lr} @ Save ISR lr
- BL _tx_execution_isr_enter @ Call the ISR enter function
- POP {lr} @ Recover ISR lr
+
+ /* Call the ISR enter function to indicate an ISR is executing. */
+
+ PUSH {lr} // Save ISR lr
+ BL _tx_execution_isr_enter // Call the ISR enter function
+ POP {lr} // Recover ISR lr
#endif
- B __tx_fiq_processing_return @ Continue FIQ processing
-@
+ B __tx_fiq_processing_return // Continue FIQ processing
+//
__tx_thread_fiq_not_nested_save:
-@ }
-@
-@ /* Otherwise, not nested, check to see if a thread was running. */
-@ else if (_tx_thread_current_ptr)
-@ {
-@
- ADD r2, r2, #1 @ Increment the interrupt counter
- STR r2, [r3] @ Store it back in the variable
- LDR r1, =_tx_thread_current_ptr @ Pickup address of current thread ptr
- LDR r0, [r1] @ Pickup current thread pointer
- CMP r0, #0 @ Is it NULL?
- BEQ __tx_thread_fiq_idle_system_save @ If so, interrupt occurred in
-@ @ scheduling loop - nothing needs saving!
-@
-@ /* Save minimal context of interrupted thread. */
-@
- MRS r2, SPSR @ Pickup saved SPSR
- SUB lr, lr, #4 @ Adjust point of interrupt
- STMDB sp!, {r2, lr} @ Store other registers, Note that we don't
-@ @ need to save sl and ip since FIQ has
-@ @ copies of these registers. Nested
-@ @ interrupt processing does need to save
-@ @ these registers.
-@
-@ /* Save the current stack pointer in the thread's control block. */
-@ _tx_thread_current_ptr -> tx_thread_stack_ptr = sp;
-@
-@ /* Switch to the system stack. */
-@ sp = _tx_thread_system_stack_ptr;
-@
- MOV r10, #0 @ Clear stack limit
+
+ /* Otherwise, not nested, check to see if a thread was running. */
+
+ ADD r2, r2, #1 // Increment the interrupt counter
+ STR r2, [r3] // Store it back in the variable
+ LDR r1, =_tx_thread_current_ptr // Pickup address of current thread ptr
+ LDR r0, [r1] // Pickup current thread pointer
+ CMP r0, #0 // Is it NULL?
+ BEQ __tx_thread_fiq_idle_system_save // If so, interrupt occurred in
+ // scheduling loop - nothing needs saving!
+
+ /* Save minimal context of interrupted thread. */
+
+ MRS r2, SPSR // Pickup saved SPSR
+ SUB lr, lr, #4 // Adjust point of interrupt
+ STMDB sp!, {r2, lr} // Store other registers, Note that we don't
+ // need to save sl and ip since FIQ has
+ // copies of these registers. Nested
+ // interrupt processing does need to save
+ // these registers.
+
+ MOV r10, #0 // Clear stack limit
#if (defined(TX_ENABLE_EXECUTION_CHANGE_NOTIFY) || defined(TX_EXECUTION_PROFILE_ENABLE))
-@
-@ /* Call the ISR enter function to indicate an ISR is executing. */
-@
- PUSH {lr} @ Save ISR lr
- BL _tx_execution_isr_enter @ Call the ISR enter function
- POP {lr} @ Recover ISR lr
+
+ /* Call the ISR enter function to indicate an ISR is executing. */
+
+ PUSH {lr} // Save ISR lr
+ BL _tx_execution_isr_enter // Call the ISR enter function
+ POP {lr} // Recover ISR lr
#endif
- B __tx_fiq_processing_return @ Continue FIQ processing
-@
-@ }
-@ else
-@ {
-@
+ B __tx_fiq_processing_return // Continue FIQ processing
+
__tx_thread_fiq_idle_system_save:
-@
-@ /* Interrupt occurred in the scheduling loop. */
-@
-#if (defined(TX_ENABLE_EXECUTION_CHANGE_NOTIFY) || defined(TX_EXECUTION_PROFILE_ENABLE))
-@
-@ /* Call the ISR enter function to indicate an ISR is executing. */
-@
- PUSH {lr} @ Save ISR lr
- BL _tx_execution_isr_enter @ Call the ISR enter function
- POP {lr} @ Recover ISR lr
-#endif
-@
-@ /* Not much to do here, save the current SPSR and LR for possible
-@ use in IRQ interrupted in idle system conditions, and return to
-@ FIQ interrupt processing. */
-@
- MRS r0, SPSR @ Pickup saved SPSR
- SUB lr, lr, #4 @ Adjust point of interrupt
- STMDB sp!, {r0, lr} @ Store other registers that will get used
-@ @ or stripped off the stack in context
-@ @ restore
- B __tx_fiq_processing_return @ Continue FIQ processing
-@
-@ }
-@}
+ /* Interrupt occurred in the scheduling loop. */
+
+#if (defined(TX_ENABLE_EXECUTION_CHANGE_NOTIFY) || defined(TX_EXECUTION_PROFILE_ENABLE))
+
+ /* Call the ISR enter function to indicate an ISR is executing. */
+
+ PUSH {lr} // Save ISR lr
+ BL _tx_execution_isr_enter // Call the ISR enter function
+ POP {lr} // Recover ISR lr
+#endif
+
+ /* Not much to do here, save the current SPSR and LR for possible
+ use in IRQ interrupted in idle system conditions, and return to
+ FIQ interrupt processing. */
+
+ MRS r0, SPSR // Pickup saved SPSR
+ SUB lr, lr, #4 // Adjust point of interrupt
+ STMDB sp!, {r0, lr} // Store other registers that will get used
+ // or stripped off the stack in context
+ // restore
+ B __tx_fiq_processing_return // Continue FIQ processing
diff --git a/ports/cortex_a5/gnu/src/tx_thread_fiq_nesting_end.S b/ports/cortex_a5/gnu/src/tx_thread_fiq_nesting_end.S
index 5a21397a..b34d881e 100644
--- a/ports/cortex_a5/gnu/src/tx_thread_fiq_nesting_end.S
+++ b/ports/cortex_a5/gnu/src/tx_thread_fiq_nesting_end.S
@@ -1,116 +1,104 @@
-@/**************************************************************************/
-@/* */
-@/* Copyright (c) Microsoft Corporation. All rights reserved. */
-@/* */
-@/* This software is licensed under the Microsoft Software License */
-@/* Terms for Microsoft Azure RTOS. Full text of the license can be */
-@/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
-@/* and in the root directory of this software. */
-@/* */
-@/**************************************************************************/
-@
-@
-@/**************************************************************************/
-@/**************************************************************************/
-@/** */
-@/** ThreadX Component */
-@/** */
-@/** Thread */
-@/** */
-@/**************************************************************************/
-@/**************************************************************************/
-@
-@
-@#define TX_SOURCE_CODE
-@
-@
-@/* Include necessary system files. */
-@
-@#include "tx_api.h"
-@#include "tx_thread.h"
-@
-@
+/**************************************************************************/
+/* */
+/* Copyright (c) Microsoft Corporation. All rights reserved. */
+/* */
+/* This software is licensed under the Microsoft Software License */
+/* Terms for Microsoft Azure RTOS. Full text of the license can be */
+/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
+/* and in the root directory of this software. */
+/* */
+/**************************************************************************/
+
+
+/**************************************************************************/
+/**************************************************************************/
+/** */
+/** ThreadX Component */
+/** */
+/** Thread */
+/** */
+/**************************************************************************/
+/**************************************************************************/
+
#ifdef TX_ENABLE_FIQ_SUPPORT
-DISABLE_INTS = 0xC0 @ Disable IRQ/FIQ interrupts
+DISABLE_INTS = 0xC0 // Disable IRQ/FIQ interrupts
#else
-DISABLE_INTS = 0x80 @ Disable IRQ interrupts
+DISABLE_INTS = 0x80 // Disable IRQ interrupts
#endif
-MODE_MASK = 0x1F @ Mode mask
-FIQ_MODE_BITS = 0x11 @ FIQ mode bits
-@
-@
-@/* No 16-bit Thumb mode veneer code is needed for _tx_thread_fiq_nesting_end
-@ since it will never be called 16-bit mode. */
-@
+MODE_MASK = 0x1F // Mode mask
+FIQ_MODE_BITS = 0x11 // FIQ mode bits
+
+
+/* No 16-bit Thumb mode veneer code is needed for _tx_thread_fiq_nesting_end
+ since it will never be called 16-bit mode. */
+
.arm
.text
.align 2
-@/**************************************************************************/
-@/* */
-@/* FUNCTION RELEASE */
-@/* */
-@/* _tx_thread_fiq_nesting_end Cortex-A5/GNU */
-@/* 6.1 */
-@/* AUTHOR */
-@/* */
-@/* William E. Lamie, Microsoft Corporation */
-@/* */
-@/* DESCRIPTION */
-@/* */
-@/* This function is called by the application from FIQ mode after */
-@/* _tx_thread_fiq_nesting_start has been called and switches the FIQ */
-@/* processing from system mode back to FIQ mode prior to the ISR */
-@/* calling _tx_thread_fiq_context_restore. Note that this function */
-@/* assumes the system stack pointer is in the same position after */
-@/* nesting start function was called. */
-@/* */
-@/* This function assumes that the system mode stack pointer was setup */
-@/* during low-level initialization (tx_initialize_low_level.s). */
-@/* */
-@/* This function returns with FIQ interrupts disabled. */
-@/* */
-@/* INPUT */
-@/* */
-@/* None */
-@/* */
-@/* OUTPUT */
-@/* */
-@/* None */
-@/* */
-@/* CALLS */
-@/* */
-@/* None */
-@/* */
-@/* CALLED BY */
-@/* */
-@/* ISRs */
-@/* */
-@/* RELEASE HISTORY */
-@/* */
-@/* DATE NAME DESCRIPTION */
-@/* */
-@/* 09-30-2020 William E. Lamie Initial Version 6.1 */
-@/* */
-@/**************************************************************************/
-@VOID _tx_thread_fiq_nesting_end(VOID)
-@{
+/**************************************************************************/
+/* */
+/* FUNCTION RELEASE */
+/* */
+/* _tx_thread_fiq_nesting_end ARMv7-A */
+/* 6.1.11 */
+/* AUTHOR */
+/* */
+/* William E. Lamie, Microsoft Corporation */
+/* */
+/* DESCRIPTION */
+/* */
+/* This function is called by the application from FIQ mode after */
+/* _tx_thread_fiq_nesting_start has been called and switches the FIQ */
+/* processing from system mode back to FIQ mode prior to the ISR */
+/* calling _tx_thread_fiq_context_restore. Note that this function */
+/* assumes the system stack pointer is in the same position after */
+/* nesting start function was called. */
+/* */
+/* This function assumes that the system mode stack pointer was setup */
+/* during low-level initialization (tx_initialize_low_level.s). */
+/* */
+/* This function returns with FIQ interrupts disabled. */
+/* */
+/* INPUT */
+/* */
+/* None */
+/* */
+/* OUTPUT */
+/* */
+/* None */
+/* */
+/* CALLS */
+/* */
+/* None */
+/* */
+/* CALLED BY */
+/* */
+/* ISRs */
+/* */
+/* RELEASE HISTORY */
+/* */
+/* DATE NAME DESCRIPTION */
+/* */
+/* 09-30-2020 William E. Lamie Initial Version 6.1 */
+/* 04-25-2022 Zhen Kong Updated comments, */
+/* resulting in version 6.1.11 */
+/* */
+/**************************************************************************/
.global _tx_thread_fiq_nesting_end
.type _tx_thread_fiq_nesting_end,function
_tx_thread_fiq_nesting_end:
- MOV r3,lr @ Save ISR return address
- MRS r0, CPSR @ Pickup the CPSR
- ORR r0, r0, #DISABLE_INTS @ Build disable interrupt value
- MSR CPSR_c, r0 @ Disable interrupts
- LDMIA sp!, {r1, lr} @ Pickup saved lr (and r1 throw-away for
- @ 8-byte alignment logic)
- BIC r0, r0, #MODE_MASK @ Clear mode bits
- ORR r0, r0, #FIQ_MODE_BITS @ Build IRQ mode CPSR
- MSR CPSR_c, r0 @ Reenter IRQ mode
+ MOV r3,lr // Save ISR return address
+ MRS r0, CPSR // Pickup the CPSR
+ ORR r0, r0, #DISABLE_INTS // Build disable interrupt value
+ MSR CPSR_c, r0 // Disable interrupts
+ LDMIA sp!, {r1, lr} // Pickup saved lr (and r1 throw-away for
+ // 8-byte alignment logic)
+ BIC r0, r0, #MODE_MASK // Clear mode bits
+ ORR r0, r0, #FIQ_MODE_BITS // Build IRQ mode CPSR
+ MSR CPSR_c, r0 // Reenter IRQ mode
#ifdef __THUMB_INTERWORK
- BX r3 @ Return to caller
+ BX r3 // Return to caller
#else
- MOV pc, r3 @ Return to caller
+ MOV pc, r3 // Return to caller
#endif
-@}
-
diff --git a/ports/cortex_a5/gnu/src/tx_thread_fiq_nesting_start.S b/ports/cortex_a5/gnu/src/tx_thread_fiq_nesting_start.S
index 6afdc528..c9cd5a06 100644
--- a/ports/cortex_a5/gnu/src/tx_thread_fiq_nesting_start.S
+++ b/ports/cortex_a5/gnu/src/tx_thread_fiq_nesting_start.S
@@ -1,108 +1,96 @@
-@/**************************************************************************/
-@/* */
-@/* Copyright (c) Microsoft Corporation. All rights reserved. */
-@/* */
-@/* This software is licensed under the Microsoft Software License */
-@/* Terms for Microsoft Azure RTOS. Full text of the license can be */
-@/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
-@/* and in the root directory of this software. */
-@/* */
-@/**************************************************************************/
-@
-@
-@/**************************************************************************/
-@/**************************************************************************/
-@/** */
-@/** ThreadX Component */
-@/** */
-@/** Thread */
-@/** */
-@/**************************************************************************/
-@/**************************************************************************/
-@
-@
-@#define TX_SOURCE_CODE
-@
-@
-@/* Include necessary system files. */
-@
-@#include "tx_api.h"
-@#include "tx_thread.h"
-@
-@
-FIQ_DISABLE = 0x40 @ FIQ disable bit
-MODE_MASK = 0x1F @ Mode mask
-SYS_MODE_BITS = 0x1F @ System mode bits
-@
-@
-@/* No 16-bit Thumb mode veneer code is needed for _tx_thread_fiq_nesting_start
-@ since it will never be called 16-bit mode. */
-@
+/**************************************************************************/
+/* */
+/* Copyright (c) Microsoft Corporation. All rights reserved. */
+/* */
+/* This software is licensed under the Microsoft Software License */
+/* Terms for Microsoft Azure RTOS. Full text of the license can be */
+/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
+/* and in the root directory of this software. */
+/* */
+/**************************************************************************/
+
+
+/**************************************************************************/
+/**************************************************************************/
+/** */
+/** ThreadX Component */
+/** */
+/** Thread */
+/** */
+/**************************************************************************/
+/**************************************************************************/
+
+FIQ_DISABLE = 0x40 // FIQ disable bit
+MODE_MASK = 0x1F // Mode mask
+SYS_MODE_BITS = 0x1F // System mode bits
+
+
+/* No 16-bit Thumb mode veneer code is needed for _tx_thread_fiq_nesting_start
+ since it will never be called 16-bit mode. */
+
.arm
.text
.align 2
-@/**************************************************************************/
-@/* */
-@/* FUNCTION RELEASE */
-@/* */
-@/* _tx_thread_fiq_nesting_start Cortex-A5/GNU */
-@/* 6.1 */
-@/* AUTHOR */
-@/* */
-@/* William E. Lamie, Microsoft Corporation */
-@/* */
-@/* DESCRIPTION */
-@/* */
-@/* This function is called by the application from FIQ mode after */
-@/* _tx_thread_fiq_context_save has been called and switches the FIQ */
-@/* processing to the system mode so nested FIQ interrupt processing */
-@/* is possible (system mode has its own "lr" register). Note that */
-@/* this function assumes that the system mode stack pointer was setup */
-@/* during low-level initialization (tx_initialize_low_level.s). */
-@/* */
-@/* This function returns with FIQ interrupts enabled. */
-@/* */
-@/* INPUT */
-@/* */
-@/* None */
-@/* */
-@/* OUTPUT */
-@/* */
-@/* None */
-@/* */
-@/* CALLS */
-@/* */
-@/* None */
-@/* */
-@/* CALLED BY */
-@/* */
-@/* ISRs */
-@/* */
-@/* RELEASE HISTORY */
-@/* */
-@/* DATE NAME DESCRIPTION */
-@/* */
-@/* 09-30-2020 William E. Lamie Initial Version 6.1 */
-@/* */
-@/**************************************************************************/
-@VOID _tx_thread_fiq_nesting_start(VOID)
-@{
+/**************************************************************************/
+/* */
+/* FUNCTION RELEASE */
+/* */
+/* _tx_thread_fiq_nesting_start ARMv7-A */
+/* 6.1.11 */
+/* AUTHOR */
+/* */
+/* William E. Lamie, Microsoft Corporation */
+/* */
+/* DESCRIPTION */
+/* */
+/* This function is called by the application from FIQ mode after */
+/* _tx_thread_fiq_context_save has been called and switches the FIQ */
+/* processing to the system mode so nested FIQ interrupt processing */
+/* is possible (system mode has its own "lr" register). Note that */
+/* this function assumes that the system mode stack pointer was setup */
+/* during low-level initialization (tx_initialize_low_level.s). */
+/* */
+/* This function returns with FIQ interrupts enabled. */
+/* */
+/* INPUT */
+/* */
+/* None */
+/* */
+/* OUTPUT */
+/* */
+/* None */
+/* */
+/* CALLS */
+/* */
+/* None */
+/* */
+/* CALLED BY */
+/* */
+/* ISRs */
+/* */
+/* RELEASE HISTORY */
+/* */
+/* DATE NAME DESCRIPTION */
+/* */
+/* 09-30-2020 William E. Lamie Initial Version 6.1 */
+/* 04-25-2022 Zhen Kong Updated comments, */
+/* resulting in version 6.1.11 */
+/* */
+/**************************************************************************/
.global _tx_thread_fiq_nesting_start
.type _tx_thread_fiq_nesting_start,function
_tx_thread_fiq_nesting_start:
- MOV r3,lr @ Save ISR return address
- MRS r0, CPSR @ Pickup the CPSR
- BIC r0, r0, #MODE_MASK @ Clear the mode bits
- ORR r0, r0, #SYS_MODE_BITS @ Build system mode CPSR
- MSR CPSR_c, r0 @ Enter system mode
- STMDB sp!, {r1, lr} @ Push the system mode lr on the system mode stack
- @ and push r1 just to keep 8-byte alignment
- BIC r0, r0, #FIQ_DISABLE @ Build enable FIQ CPSR
- MSR CPSR_c, r0 @ Enter system mode
+ MOV r3,lr // Save ISR return address
+ MRS r0, CPSR // Pickup the CPSR
+ BIC r0, r0, #MODE_MASK // Clear the mode bits
+ ORR r0, r0, #SYS_MODE_BITS // Build system mode CPSR
+ MSR CPSR_c, r0 // Enter system mode
+ STMDB sp!, {r1, lr} // Push the system mode lr on the system mode stack
+ // and push r1 just to keep 8-byte alignment
+ BIC r0, r0, #FIQ_DISABLE // Build enable FIQ CPSR
+ MSR CPSR_c, r0 // Enter system mode
#ifdef __THUMB_INTERWORK
- BX r3 @ Return to caller
+ BX r3 // Return to caller
#else
- MOV pc, r3 @ Return to caller
+ MOV pc, r3 // Return to caller
#endif
-@}
-
diff --git a/ports/cortex_a5/gnu/src/tx_thread_interrupt_control.S b/ports/cortex_a5/gnu/src/tx_thread_interrupt_control.S
index a56054c4..63b1609a 100644
--- a/ports/cortex_a5/gnu/src/tx_thread_interrupt_control.S
+++ b/ports/cortex_a5/gnu/src/tx_thread_interrupt_control.S
@@ -1,115 +1,104 @@
-@/**************************************************************************/
-@/* */
-@/* Copyright (c) Microsoft Corporation. All rights reserved. */
-@/* */
-@/* This software is licensed under the Microsoft Software License */
-@/* Terms for Microsoft Azure RTOS. Full text of the license can be */
-@/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
-@/* and in the root directory of this software. */
-@/* */
-@/**************************************************************************/
-@
-@
-@/**************************************************************************/
-@/**************************************************************************/
-@/** */
-@/** ThreadX Component */
-@/** */
-@/** Thread */
-@/** */
-@/**************************************************************************/
-@/**************************************************************************/
-@
-@#define TX_SOURCE_CODE
-@
-@
-@/* Include necessary system files. */
-@
-@#include "tx_api.h"
-@#include "tx_thread.h" */
-@
+/**************************************************************************/
+/* */
+/* Copyright (c) Microsoft Corporation. All rights reserved. */
+/* */
+/* This software is licensed under the Microsoft Software License */
+/* Terms for Microsoft Azure RTOS. Full text of the license can be */
+/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
+/* and in the root directory of this software. */
+/* */
+/**************************************************************************/
+
+
+/**************************************************************************/
+/**************************************************************************/
+/** */
+/** ThreadX Component */
+/** */
+/** Thread */
+/** */
+/**************************************************************************/
+/**************************************************************************/
INT_MASK = 0x03F
-@
-@/* Define the 16-bit Thumb mode veneer for _tx_thread_interrupt_control for
-@ applications calling this function from to 16-bit Thumb mode. */
-@
+
+/* Define the 16-bit Thumb mode veneer for _tx_thread_interrupt_control for
+ applications calling this function from to 16-bit Thumb mode. */
+
.text
.align 2
.global $_tx_thread_interrupt_control
$_tx_thread_interrupt_control:
.thumb
- BX pc @ Switch to 32-bit mode
- NOP @
+ BX pc // Switch to 32-bit mode
+ NOP //
.arm
- STMFD sp!, {lr} @ Save return address
- BL _tx_thread_interrupt_control @ Call _tx_thread_interrupt_control function
- LDMFD sp!, {lr} @ Recover saved return address
- BX lr @ Return to 16-bit caller
-@
-@
+ STMFD sp!, {lr} // Save return address
+ BL _tx_thread_interrupt_control // Call _tx_thread_interrupt_control function
+ LDMFD sp!, {lr} // Recover saved return address
+ BX lr // Return to 16-bit caller
+
+
.text
.align 2
-@/**************************************************************************/
-@/* */
-@/* FUNCTION RELEASE */
-@/* */
-@/* _tx_thread_interrupt_control Cortex-A5/GNU */
-@/* 6.1 */
-@/* AUTHOR */
-@/* */
-@/* William E. Lamie, Microsoft Corporation */
-@/* */
-@/* DESCRIPTION */
-@/* */
-@/* This function is responsible for changing the interrupt lockout */
-@/* posture of the system. */
-@/* */
-@/* INPUT */
-@/* */
-@/* new_posture New interrupt lockout posture */
-@/* */
-@/* OUTPUT */
-@/* */
-@/* old_posture Old interrupt lockout posture */
-@/* */
-@/* CALLS */
-@/* */
-@/* None */
-@/* */
-@/* CALLED BY */
-@/* */
-@/* Application Code */
-@/* */
-@/* RELEASE HISTORY */
-@/* */
-@/* DATE NAME DESCRIPTION */
-@/* */
-@/* 09-30-2020 William E. Lamie Initial Version 6.1 */
-@/* */
-@/**************************************************************************/
-@UINT _tx_thread_interrupt_control(UINT new_posture)
-@{
+/**************************************************************************/
+/* */
+/* FUNCTION RELEASE */
+/* */
+/* _tx_thread_interrupt_control ARMv7-A */
+/* 6.1.11 */
+/* AUTHOR */
+/* */
+/* William E. Lamie, Microsoft Corporation */
+/* */
+/* DESCRIPTION */
+/* */
+/* This function is responsible for changing the interrupt lockout */
+/* posture of the system. */
+/* */
+/* INPUT */
+/* */
+/* new_posture New interrupt lockout posture */
+/* */
+/* OUTPUT */
+/* */
+/* old_posture Old interrupt lockout posture */
+/* */
+/* CALLS */
+/* */
+/* None */
+/* */
+/* CALLED BY */
+/* */
+/* Application Code */
+/* */
+/* RELEASE HISTORY */
+/* */
+/* DATE NAME DESCRIPTION */
+/* */
+/* 09-30-2020 William E. Lamie Initial Version 6.1 */
+/* 04-25-2022 Zhen Kong Updated comments, */
+/* resulting in version 6.1.11 */
+/* */
+/**************************************************************************/
.global _tx_thread_interrupt_control
.type _tx_thread_interrupt_control,function
_tx_thread_interrupt_control:
-@
-@ /* Pickup current interrupt lockout posture. */
-@
- MRS r3, CPSR @ Pickup current CPSR
- MOV r2, #INT_MASK @ Build interrupt mask
- AND r1, r3, r2 @ Clear interrupt lockout bits
- ORR r1, r1, r0 @ Or-in new interrupt lockout bits
-@
-@ /* Apply the new interrupt posture. */
-@
- MSR CPSR_c, r1 @ Setup new CPSR
- BIC r0, r3, r2 @ Return previous interrupt mask
-#ifdef __THUMB_INTERWORK
- BX lr @ Return to caller
-#else
- MOV pc, lr @ Return to caller
-#endif
-@}
+ /* Pickup current interrupt lockout posture. */
+
+ MRS r3, CPSR // Pickup current CPSR
+ MOV r2, #INT_MASK // Build interrupt mask
+ AND r1, r3, r2 // Clear interrupt lockout bits
+ ORR r1, r1, r0 // Or-in new interrupt lockout bits
+
+ /* Apply the new interrupt posture. */
+
+ MSR CPSR_c, r1 // Setup new CPSR
+ BIC r0, r3, r2 // Return previous interrupt mask
+#ifdef __THUMB_INTERWORK
+ BX lr // Return to caller
+#else
+ MOV pc, lr // Return to caller
+#endif
diff --git a/ports/cortex_a5/gnu/src/tx_thread_interrupt_disable.S b/ports/cortex_a5/gnu/src/tx_thread_interrupt_disable.S
index 76693663..13258808 100644
--- a/ports/cortex_a5/gnu/src/tx_thread_interrupt_disable.S
+++ b/ports/cortex_a5/gnu/src/tx_thread_interrupt_disable.S
@@ -1,113 +1,101 @@
-@/**************************************************************************/
-@/* */
-@/* Copyright (c) Microsoft Corporation. All rights reserved. */
-@/* */
-@/* This software is licensed under the Microsoft Software License */
-@/* Terms for Microsoft Azure RTOS. Full text of the license can be */
-@/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
-@/* and in the root directory of this software. */
-@/* */
-@/**************************************************************************/
-@
-@
-@/**************************************************************************/
-@/**************************************************************************/
-@/** */
-@/** ThreadX Component */
-@/** */
-@/** Thread */
-@/** */
-@/**************************************************************************/
-@/**************************************************************************/
-@
-@#define TX_SOURCE_CODE
-@
-@
-@/* Include necessary system files. */
-@
-@#include "tx_api.h"
-@#include "tx_thread.h"
-@
-@
-@/* Define the 16-bit Thumb mode veneer for _tx_thread_interrupt_disable for
-@ applications calling this function from to 16-bit Thumb mode. */
-@
+/**************************************************************************/
+/* */
+/* Copyright (c) Microsoft Corporation. All rights reserved. */
+/* */
+/* This software is licensed under the Microsoft Software License */
+/* Terms for Microsoft Azure RTOS. Full text of the license can be */
+/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
+/* and in the root directory of this software. */
+/* */
+/**************************************************************************/
+
+
+/**************************************************************************/
+/**************************************************************************/
+/** */
+/** ThreadX Component */
+/** */
+/** Thread */
+/** */
+/**************************************************************************/
+/**************************************************************************/
+
+/* Define the 16-bit Thumb mode veneer for _tx_thread_interrupt_disable for
+ applications calling this function from to 16-bit Thumb mode. */
+
.text
.align 2
.global $_tx_thread_interrupt_disable
$_tx_thread_interrupt_disable:
.thumb
- BX pc @ Switch to 32-bit mode
- NOP @
+ BX pc // Switch to 32-bit mode
+ NOP //
.arm
- STMFD sp!, {lr} @ Save return address
- BL _tx_thread_interrupt_disable @ Call _tx_thread_interrupt_disable function
- LDMFD sp!, {lr} @ Recover saved return address
- BX lr @ Return to 16-bit caller
-@
-@
+ STMFD sp!, {lr} // Save return address
+ BL _tx_thread_interrupt_disable // Call _tx_thread_interrupt_disable function
+ LDMFD sp!, {lr} // Recover saved return address
+ BX lr // Return to 16-bit caller
+
+
.text
.align 2
-@/**************************************************************************/
-@/* */
-@/* FUNCTION RELEASE */
-@/* */
-@/* _tx_thread_interrupt_disable Cortex-A5/GNU */
-@/* 6.1 */
-@/* AUTHOR */
-@/* */
-@/* William E. Lamie, Microsoft Corporation */
-@/* */
-@/* DESCRIPTION */
-@/* */
-@/* This function is responsible for disabling interrupts */
-@/* */
-@/* INPUT */
-@/* */
-@/* None */
-@/* */
-@/* OUTPUT */
-@/* */
-@/* old_posture Old interrupt lockout posture */
-@/* */
-@/* CALLS */
-@/* */
-@/* None */
-@/* */
-@/* CALLED BY */
-@/* */
-@/* Application Code */
-@/* */
-@/* RELEASE HISTORY */
-@/* */
-@/* DATE NAME DESCRIPTION */
-@/* */
-@/* 09-30-2020 William E. Lamie Initial Version 6.1 */
-@/* */
-@/**************************************************************************/
-@UINT _tx_thread_interrupt_disable(void)
-@{
+/**************************************************************************/
+/* */
+/* FUNCTION RELEASE */
+/* */
+/* _tx_thread_interrupt_disable ARMv7-A */
+/* 6.1.11 */
+/* AUTHOR */
+/* */
+/* William E. Lamie, Microsoft Corporation */
+/* */
+/* DESCRIPTION */
+/* */
+/* This function is responsible for disabling interrupts */
+/* */
+/* INPUT */
+/* */
+/* None */
+/* */
+/* OUTPUT */
+/* */
+/* old_posture Old interrupt lockout posture */
+/* */
+/* CALLS */
+/* */
+/* None */
+/* */
+/* CALLED BY */
+/* */
+/* Application Code */
+/* */
+/* RELEASE HISTORY */
+/* */
+/* DATE NAME DESCRIPTION */
+/* */
+/* 09-30-2020 William E. Lamie Initial Version 6.1 */
+/* 04-25-2022 Zhen Kong Updated comments, */
+/* resulting in version 6.1.11 */
+/* */
+/**************************************************************************/
.global _tx_thread_interrupt_disable
.type _tx_thread_interrupt_disable,function
_tx_thread_interrupt_disable:
-@
-@ /* Pickup current interrupt lockout posture. */
-@
- MRS r0, CPSR @ Pickup current CPSR
-@
-@ /* Mask interrupts. */
-@
+
+ /* Pickup current interrupt lockout posture. */
+
+ MRS r0, CPSR // Pickup current CPSR
+
+ /* Mask interrupts. */
+
#ifdef TX_ENABLE_FIQ_SUPPORT
- CPSID if @ Disable IRQ and FIQ
+ CPSID if // Disable IRQ and FIQ
#else
- CPSID i @ Disable IRQ
+ CPSID i // Disable IRQ
#endif
#ifdef __THUMB_INTERWORK
- BX lr @ Return to caller
+ BX lr // Return to caller
#else
- MOV pc, lr @ Return to caller
+ MOV pc, lr // Return to caller
#endif
-@}
-
-
diff --git a/ports/cortex_a5/gnu/src/tx_thread_interrupt_restore.S b/ports/cortex_a5/gnu/src/tx_thread_interrupt_restore.S
index 043c6fd9..2d582511 100644
--- a/ports/cortex_a5/gnu/src/tx_thread_interrupt_restore.S
+++ b/ports/cortex_a5/gnu/src/tx_thread_interrupt_restore.S
@@ -1,104 +1,93 @@
-@/**************************************************************************/
-@/* */
-@/* Copyright (c) Microsoft Corporation. All rights reserved. */
-@/* */
-@/* This software is licensed under the Microsoft Software License */
-@/* Terms for Microsoft Azure RTOS. Full text of the license can be */
-@/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
-@/* and in the root directory of this software. */
-@/* */
-@/**************************************************************************/
-@
-@
-@/**************************************************************************/
-@/**************************************************************************/
-@/** */
-@/** ThreadX Component */
-@/** */
-@/** Thread */
-@/** */
-@/**************************************************************************/
-@/**************************************************************************/
-@
-@#define TX_SOURCE_CODE
-@
-@
-@/* Include necessary system files. */
-@
-@#include "tx_api.h"
-@#include "tx_thread.h"
-@
-@
-@/* Define the 16-bit Thumb mode veneer for _tx_thread_interrupt_restore for
-@ applications calling this function from to 16-bit Thumb mode. */
-@
+/**************************************************************************/
+/* */
+/* Copyright (c) Microsoft Corporation. All rights reserved. */
+/* */
+/* This software is licensed under the Microsoft Software License */
+/* Terms for Microsoft Azure RTOS. Full text of the license can be */
+/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
+/* and in the root directory of this software. */
+/* */
+/**************************************************************************/
+
+
+/**************************************************************************/
+/**************************************************************************/
+/** */
+/** ThreadX Component */
+/** */
+/** Thread */
+/** */
+/**************************************************************************/
+/**************************************************************************/
+
+/* Define the 16-bit Thumb mode veneer for _tx_thread_interrupt_restore for
+ applications calling this function from to 16-bit Thumb mode. */
+
.text
.align 2
.global $_tx_thread_interrupt_restore
$_tx_thread_interrupt_restore:
.thumb
- BX pc @ Switch to 32-bit mode
- NOP @
+ BX pc // Switch to 32-bit mode
+ NOP //
.arm
- STMFD sp!, {lr} @ Save return address
- BL _tx_thread_interrupt_restore @ Call _tx_thread_interrupt_restore function
- LDMFD sp!, {lr} @ Recover saved return address
- BX lr @ Return to 16-bit caller
-@
-@
+ STMFD sp!, {lr} // Save return address
+ BL _tx_thread_interrupt_restore // Call _tx_thread_interrupt_restore function
+ LDMFD sp!, {lr} // Recover saved return address
+ BX lr // Return to 16-bit caller
+
+
.text
.align 2
-@/**************************************************************************/
-@/* */
-@/* FUNCTION RELEASE */
-@/* */
-@/* _tx_thread_interrupt_restore Cortex-A5/GNU */
-@/* 6.1 */
-@/* AUTHOR */
-@/* */
-@/* William E. Lamie, Microsoft Corporation */
-@/* */
-@/* DESCRIPTION */
-@/* */
-@/* This function is responsible for restoring interrupts to the state */
-@/* returned by a previous _tx_thread_interrupt_disable call. */
-@/* */
-@/* INPUT */
-@/* */
-@/* old_posture Old interrupt lockout posture */
-@/* */
-@/* OUTPUT */
-@/* */
-@/* None */
-@/* */
-@/* CALLS */
-@/* */
-@/* None */
-@/* */
-@/* CALLED BY */
-@/* */
-@/* Application Code */
-@/* */
-@/* RELEASE HISTORY */
-@/* */
-@/* DATE NAME DESCRIPTION */
-@/* */
-@/* 09-30-2020 William E. Lamie Initial Version 6.1 */
-@/* */
-@/**************************************************************************/
-@UINT _tx_thread_interrupt_restore(UINT old_posture)
-@{
+/**************************************************************************/
+/* */
+/* FUNCTION RELEASE */
+/* */
+/* _tx_thread_interrupt_restore ARMv7-A */
+/* 6.1.11 */
+/* AUTHOR */
+/* */
+/* William E. Lamie, Microsoft Corporation */
+/* */
+/* DESCRIPTION */
+/* */
+/* This function is responsible for restoring interrupts to the state */
+/* returned by a previous _tx_thread_interrupt_disable call. */
+/* */
+/* INPUT */
+/* */
+/* old_posture Old interrupt lockout posture */
+/* */
+/* OUTPUT */
+/* */
+/* None */
+/* */
+/* CALLS */
+/* */
+/* None */
+/* */
+/* CALLED BY */
+/* */
+/* Application Code */
+/* */
+/* RELEASE HISTORY */
+/* */
+/* DATE NAME DESCRIPTION */
+/* */
+/* 09-30-2020 William E. Lamie Initial Version 6.1 */
+/* 04-25-2022 Zhen Kong Updated comments, */
+/* resulting in version 6.1.11 */
+/* */
+/**************************************************************************/
.global _tx_thread_interrupt_restore
.type _tx_thread_interrupt_restore,function
_tx_thread_interrupt_restore:
-@
-@ /* Apply the new interrupt posture. */
-@
- MSR CPSR_c, r0 @ Setup new CPSR
-#ifdef __THUMB_INTERWORK
- BX lr @ Return to caller
-#else
- MOV pc, lr @ Return to caller
-#endif
-@}
+ /* Apply the new interrupt posture. */
+
+ MSR CPSR_c, r0 // Setup new CPSR
+#ifdef __THUMB_INTERWORK
+ BX lr // Return to caller
+#else
+ MOV pc, lr // Return to caller
+#endif
diff --git a/ports/cortex_a5/gnu/src/tx_thread_irq_nesting_end.S b/ports/cortex_a5/gnu/src/tx_thread_irq_nesting_end.S
index 2cd4f44a..ec7e63c6 100644
--- a/ports/cortex_a5/gnu/src/tx_thread_irq_nesting_end.S
+++ b/ports/cortex_a5/gnu/src/tx_thread_irq_nesting_end.S
@@ -1,115 +1,103 @@
-@/**************************************************************************/
-@/* */
-@/* Copyright (c) Microsoft Corporation. All rights reserved. */
-@/* */
-@/* This software is licensed under the Microsoft Software License */
-@/* Terms for Microsoft Azure RTOS. Full text of the license can be */
-@/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
-@/* and in the root directory of this software. */
-@/* */
-@/**************************************************************************/
-@
-@
-@/**************************************************************************/
-@/**************************************************************************/
-@/** */
-@/** ThreadX Component */
-@/** */
-@/** Thread */
-@/** */
-@/**************************************************************************/
-@/**************************************************************************/
-@
-@
-@#define TX_SOURCE_CODE
-@
-@
-@/* Include necessary system files. */
-@
-@#include "tx_api.h"
-@#include "tx_thread.h"
-@
-@
+/**************************************************************************/
+/* */
+/* Copyright (c) Microsoft Corporation. All rights reserved. */
+/* */
+/* This software is licensed under the Microsoft Software License */
+/* Terms for Microsoft Azure RTOS. Full text of the license can be */
+/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
+/* and in the root directory of this software. */
+/* */
+/**************************************************************************/
+
+
+/**************************************************************************/
+/**************************************************************************/
+/** */
+/** ThreadX Component */
+/** */
+/** Thread */
+/** */
+/**************************************************************************/
+/**************************************************************************/
+
#ifdef TX_ENABLE_FIQ_SUPPORT
-DISABLE_INTS = 0xC0 @ Disable IRQ/FIQ interrupts
+DISABLE_INTS = 0xC0 // Disable IRQ/FIQ interrupts
#else
-DISABLE_INTS = 0x80 @ Disable IRQ interrupts
+DISABLE_INTS = 0x80 // Disable IRQ interrupts
#endif
-MODE_MASK = 0x1F @ Mode mask
-IRQ_MODE_BITS = 0x12 @ IRQ mode bits
-@
-@
-@/* No 16-bit Thumb mode veneer code is needed for _tx_thread_irq_nesting_end
-@ since it will never be called 16-bit mode. */
-@
+MODE_MASK = 0x1F // Mode mask
+IRQ_MODE_BITS = 0x12 // IRQ mode bits
+
+
+/* No 16-bit Thumb mode veneer code is needed for _tx_thread_irq_nesting_end
+ since it will never be called 16-bit mode. */
+
.arm
.text
.align 2
-@/**************************************************************************/
-@/* */
-@/* FUNCTION RELEASE */
-@/* */
-@/* _tx_thread_irq_nesting_end Cortex-A5/GNU */
-@/* 6.1 */
-@/* AUTHOR */
-@/* */
-@/* William E. Lamie, Microsoft Corporation */
-@/* */
-@/* DESCRIPTION */
-@/* */
-@/* This function is called by the application from IRQ mode after */
-@/* _tx_thread_irq_nesting_start has been called and switches the IRQ */
-@/* processing from system mode back to IRQ mode prior to the ISR */
-@/* calling _tx_thread_context_restore. Note that this function */
-@/* assumes the system stack pointer is in the same position after */
-@/* nesting start function was called. */
-@/* */
-@/* This function assumes that the system mode stack pointer was setup */
-@/* during low-level initialization (tx_initialize_low_level.s). */
-@/* */
-@/* This function returns with IRQ interrupts disabled. */
-@/* */
-@/* INPUT */
-@/* */
-@/* None */
-@/* */
-@/* OUTPUT */
-@/* */
-@/* None */
-@/* */
-@/* CALLS */
-@/* */
-@/* None */
-@/* */
-@/* CALLED BY */
-@/* */
-@/* ISRs */
-@/* */
-@/* RELEASE HISTORY */
-@/* */
-@/* DATE NAME DESCRIPTION */
-@/* */
-@/* 09-30-2020 William E. Lamie Initial Version 6.1 */
-@/* */
-@/**************************************************************************/
-@VOID _tx_thread_irq_nesting_end(VOID)
-@{
+/**************************************************************************/
+/* */
+/* FUNCTION RELEASE */
+/* */
+/* _tx_thread_irq_nesting_end ARMv7-A */
+/* 6.1.11 */
+/* AUTHOR */
+/* */
+/* William E. Lamie, Microsoft Corporation */
+/* */
+/* DESCRIPTION */
+/* */
+/* This function is called by the application from IRQ mode after */
+/* _tx_thread_irq_nesting_start has been called and switches the IRQ */
+/* processing from system mode back to IRQ mode prior to the ISR */
+/* calling _tx_thread_context_restore. Note that this function */
+/* assumes the system stack pointer is in the same position after */
+/* nesting start function was called. */
+/* */
+/* This function assumes that the system mode stack pointer was setup */
+/* during low-level initialization (tx_initialize_low_level.s). */
+/* */
+/* This function returns with IRQ interrupts disabled. */
+/* */
+/* INPUT */
+/* */
+/* None */
+/* */
+/* OUTPUT */
+/* */
+/* None */
+/* */
+/* CALLS */
+/* */
+/* None */
+/* */
+/* CALLED BY */
+/* */
+/* ISRs */
+/* */
+/* RELEASE HISTORY */
+/* */
+/* DATE NAME DESCRIPTION */
+/* */
+/* 09-30-2020 William E. Lamie Initial Version 6.1 */
+/* 04-25-2022 Zhen Kong Updated comments, */
+/* resulting in version 6.1.11 */
+/* */
+/**************************************************************************/
.global _tx_thread_irq_nesting_end
.type _tx_thread_irq_nesting_end,function
_tx_thread_irq_nesting_end:
- MOV r3,lr @ Save ISR return address
- MRS r0, CPSR @ Pickup the CPSR
- ORR r0, r0, #DISABLE_INTS @ Build disable interrupt value
- MSR CPSR_c, r0 @ Disable interrupts
- LDMIA sp!, {r1, lr} @ Pickup saved lr (and r1 throw-away for
- @ 8-byte alignment logic)
- BIC r0, r0, #MODE_MASK @ Clear mode bits
- ORR r0, r0, #IRQ_MODE_BITS @ Build IRQ mode CPSR
- MSR CPSR_c, r0 @ Reenter IRQ mode
+ MOV r3,lr // Save ISR return address
+ MRS r0, CPSR // Pickup the CPSR
+ ORR r0, r0, #DISABLE_INTS // Build disable interrupt value
+ MSR CPSR_c, r0 // Disable interrupts
+ LDMIA sp!, {r1, lr} // Pickup saved lr (and r1 throw-away for
+ // 8-byte alignment logic)
+ BIC r0, r0, #MODE_MASK // Clear mode bits
+ ORR r0, r0, #IRQ_MODE_BITS // Build IRQ mode CPSR
+ MSR CPSR_c, r0 // Reenter IRQ mode
#ifdef __THUMB_INTERWORK
- BX r3 @ Return to caller
+ BX r3 // Return to caller
#else
- MOV pc, r3 @ Return to caller
+ MOV pc, r3 // Return to caller
#endif
-@}
-
diff --git a/ports/cortex_a5/gnu/src/tx_thread_irq_nesting_start.S b/ports/cortex_a5/gnu/src/tx_thread_irq_nesting_start.S
index ff5ef319..c69976ed 100644
--- a/ports/cortex_a5/gnu/src/tx_thread_irq_nesting_start.S
+++ b/ports/cortex_a5/gnu/src/tx_thread_irq_nesting_start.S
@@ -1,108 +1,96 @@
-@/**************************************************************************/
-@/* */
-@/* Copyright (c) Microsoft Corporation. All rights reserved. */
-@/* */
-@/* This software is licensed under the Microsoft Software License */
-@/* Terms for Microsoft Azure RTOS. Full text of the license can be */
-@/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
-@/* and in the root directory of this software. */
-@/* */
-@/**************************************************************************/
-@
-@
-@/**************************************************************************/
-@/**************************************************************************/
-@/** */
-@/** ThreadX Component */
-@/** */
-@/** Thread */
-@/** */
-@/**************************************************************************/
-@/**************************************************************************/
-@
-@
-@#define TX_SOURCE_CODE
-@
-@
-@/* Include necessary system files. */
-@
-@#include "tx_api.h"
-@#include "tx_thread.h"
-@
-@
-IRQ_DISABLE = 0x80 @ IRQ disable bit
-MODE_MASK = 0x1F @ Mode mask
-SYS_MODE_BITS = 0x1F @ System mode bits
-@
-@
-@/* No 16-bit Thumb mode veneer code is needed for _tx_thread_irq_nesting_start
-@ since it will never be called 16-bit mode. */
-@
+/**************************************************************************/
+/* */
+/* Copyright (c) Microsoft Corporation. All rights reserved. */
+/* */
+/* This software is licensed under the Microsoft Software License */
+/* Terms for Microsoft Azure RTOS. Full text of the license can be */
+/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
+/* and in the root directory of this software. */
+/* */
+/**************************************************************************/
+
+
+/**************************************************************************/
+/**************************************************************************/
+/** */
+/** ThreadX Component */
+/** */
+/** Thread */
+/** */
+/**************************************************************************/
+/**************************************************************************/
+
+IRQ_DISABLE = 0x80 // IRQ disable bit
+MODE_MASK = 0x1F // Mode mask
+SYS_MODE_BITS = 0x1F // System mode bits
+
+
+/* No 16-bit Thumb mode veneer code is needed for _tx_thread_irq_nesting_start
+ since it will never be called 16-bit mode. */
+
.arm
.text
.align 2
-@/**************************************************************************/
-@/* */
-@/* FUNCTION RELEASE */
-@/* */
-@/* _tx_thread_irq_nesting_start Cortex-A5/GNU */
-@/* 6.1 */
-@/* AUTHOR */
-@/* */
-@/* William E. Lamie, Microsoft Corporation */
-@/* */
-@/* DESCRIPTION */
-@/* */
-@/* This function is called by the application from IRQ mode after */
-@/* _tx_thread_context_save has been called and switches the IRQ */
-@/* processing to the system mode so nested IRQ interrupt processing */
-@/* is possible (system mode has its own "lr" register). Note that */
-@/* this function assumes that the system mode stack pointer was setup */
-@/* during low-level initialization (tx_initialize_low_level.s). */
-@/* */
-@/* This function returns with IRQ interrupts enabled. */
-@/* */
-@/* INPUT */
-@/* */
-@/* None */
-@/* */
-@/* OUTPUT */
-@/* */
-@/* None */
-@/* */
-@/* CALLS */
-@/* */
-@/* None */
-@/* */
-@/* CALLED BY */
-@/* */
-@/* ISRs */
-@/* */
-@/* RELEASE HISTORY */
-@/* */
-@/* DATE NAME DESCRIPTION */
-@/* */
-@/* 09-30-2020 William E. Lamie Initial Version 6.1 */
-@/* */
-@/**************************************************************************/
-@VOID _tx_thread_irq_nesting_start(VOID)
-@{
+/**************************************************************************/
+/* */
+/* FUNCTION RELEASE */
+/* */
+/* _tx_thread_irq_nesting_start ARMv7-A */
+/* 6.1.11 */
+/* AUTHOR */
+/* */
+/* William E. Lamie, Microsoft Corporation */
+/* */
+/* DESCRIPTION */
+/* */
+/* This function is called by the application from IRQ mode after */
+/* _tx_thread_context_save has been called and switches the IRQ */
+/* processing to the system mode so nested IRQ interrupt processing */
+/* is possible (system mode has its own "lr" register). Note that */
+/* this function assumes that the system mode stack pointer was setup */
+/* during low-level initialization (tx_initialize_low_level.s). */
+/* */
+/* This function returns with IRQ interrupts enabled. */
+/* */
+/* INPUT */
+/* */
+/* None */
+/* */
+/* OUTPUT */
+/* */
+/* None */
+/* */
+/* CALLS */
+/* */
+/* None */
+/* */
+/* CALLED BY */
+/* */
+/* ISRs */
+/* */
+/* RELEASE HISTORY */
+/* */
+/* DATE NAME DESCRIPTION */
+/* */
+/* 09-30-2020 William E. Lamie Initial Version 6.1 */
+/* 04-25-2022 Zhen Kong Updated comments, */
+/* resulting in version 6.1.11 */
+/* */
+/**************************************************************************/
.global _tx_thread_irq_nesting_start
.type _tx_thread_irq_nesting_start,function
_tx_thread_irq_nesting_start:
- MOV r3,lr @ Save ISR return address
- MRS r0, CPSR @ Pickup the CPSR
- BIC r0, r0, #MODE_MASK @ Clear the mode bits
- ORR r0, r0, #SYS_MODE_BITS @ Build system mode CPSR
- MSR CPSR_c, r0 @ Enter system mode
- STMDB sp!, {r1, lr} @ Push the system mode lr on the system mode stack
- @ and push r1 just to keep 8-byte alignment
- BIC r0, r0, #IRQ_DISABLE @ Build enable IRQ CPSR
- MSR CPSR_c, r0 @ Enter system mode
+ MOV r3,lr // Save ISR return address
+ MRS r0, CPSR // Pickup the CPSR
+ BIC r0, r0, #MODE_MASK // Clear the mode bits
+ ORR r0, r0, #SYS_MODE_BITS // Build system mode CPSR
+ MSR CPSR_c, r0 // Enter system mode
+ STMDB sp!, {r1, lr} // Push the system mode lr on the system mode stack
+ // and push r1 just to keep 8-byte alignment
+ BIC r0, r0, #IRQ_DISABLE // Build enable IRQ CPSR
+ MSR CPSR_c, r0 // Enter system mode
#ifdef __THUMB_INTERWORK
- BX r3 @ Return to caller
+ BX r3 // Return to caller
#else
- MOV pc, r3 @ Return to caller
+ MOV pc, r3 // Return to caller
#endif
-@}
-
diff --git a/ports/cortex_a5/gnu/src/tx_thread_schedule.S b/ports/cortex_a5/gnu/src/tx_thread_schedule.S
index 5336abb5..8330e9df 100644
--- a/ports/cortex_a5/gnu/src/tx_thread_schedule.S
+++ b/ports/cortex_a5/gnu/src/tx_thread_schedule.S
@@ -1,258 +1,230 @@
-@/**************************************************************************/
-@/* */
-@/* Copyright (c) Microsoft Corporation. All rights reserved. */
-@/* */
-@/* This software is licensed under the Microsoft Software License */
-@/* Terms for Microsoft Azure RTOS. Full text of the license can be */
-@/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
-@/* and in the root directory of this software. */
-@/* */
-@/**************************************************************************/
-@
-@
-@/**************************************************************************/
-@/**************************************************************************/
-@/** */
-@/** ThreadX Component */
-@/** */
-@/** Thread */
-@/** */
-@/**************************************************************************/
-@/**************************************************************************/
-@
-@
-@#define TX_SOURCE_CODE
-@
-@
-@/* Include necessary system files. */
-@
-@#include "tx_api.h"
-@#include "tx_thread.h"
-@#include "tx_timer.h"
-@
-@
+/**************************************************************************/
+/* */
+/* Copyright (c) Microsoft Corporation. All rights reserved. */
+/* */
+/* This software is licensed under the Microsoft Software License */
+/* Terms for Microsoft Azure RTOS. Full text of the license can be */
+/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
+/* and in the root directory of this software. */
+/* */
+/**************************************************************************/
+
+
+/**************************************************************************/
+/**************************************************************************/
+/** */
+/** ThreadX Component */
+/** */
+/** Thread */
+/** */
+/**************************************************************************/
+/**************************************************************************/
+
.global _tx_thread_execute_ptr
.global _tx_thread_current_ptr
.global _tx_timer_time_slice
- .global _tx_execution_thread_enter
-@
-@
-@/* Define the 16-bit Thumb mode veneer for _tx_thread_schedule for
-@ applications calling this function from to 16-bit Thumb mode. */
-@
+
+
+/* Define the 16-bit Thumb mode veneer for _tx_thread_schedule for
+ applications calling this function from to 16-bit Thumb mode. */
+
.text
.align 2
.global $_tx_thread_schedule
.type $_tx_thread_schedule,function
$_tx_thread_schedule:
.thumb
- BX pc @ Switch to 32-bit mode
- NOP @
+ BX pc // Switch to 32-bit mode
+ NOP //
.arm
- STMFD sp!, {lr} @ Save return address
- BL _tx_thread_schedule @ Call _tx_thread_schedule function
- LDMFD sp!, {lr} @ Recover saved return address
- BX lr @ Return to 16-bit caller
-@
-@
+ STMFD sp!, {lr} // Save return address
+ BL _tx_thread_schedule // Call _tx_thread_schedule function
+ LDMFD sp!, {lr} // Recover saved return address
+ BX lr // Return to 16-bit caller
+
+
.text
.align 2
-@/**************************************************************************/
-@/* */
-@/* FUNCTION RELEASE */
-@/* */
-@/* _tx_thread_schedule Cortex-A5/GNU */
-@/* 6.1.9 */
-@/* AUTHOR */
-@/* */
-@/* William E. Lamie, Microsoft Corporation */
-@/* */
-@/* DESCRIPTION */
-@/* */
-@/* This function waits for a thread control block pointer to appear in */
-@/* the _tx_thread_execute_ptr variable. Once a thread pointer appears */
-@/* in the variable, the corresponding thread is resumed. */
-@/* */
-@/* INPUT */
-@/* */
-@/* None */
-@/* */
-@/* OUTPUT */
-@/* */
-@/* None */
-@/* */
-@/* CALLS */
-@/* */
-@/* None */
-@/* */
-@/* CALLED BY */
-@/* */
-@/* _tx_initialize_kernel_enter ThreadX entry function */
-@/* _tx_thread_system_return Return to system from thread */
-@/* _tx_thread_context_restore Restore thread's context */
-@/* */
-@/* RELEASE HISTORY */
-@/* */
-@/* DATE NAME DESCRIPTION */
-@/* */
-@/* 09-30-2020 William E. Lamie Initial Version 6.1 */
-@/* 10-15-2021 William E. Lamie Modified comment(s), added */
-@/* execution profile support, */
-@/* resulting in version 6.1.9 */
-@/* */
-@/**************************************************************************/
-@VOID _tx_thread_schedule(VOID)
-@{
+/**************************************************************************/
+/* */
+/* FUNCTION RELEASE */
+/* */
+/* _tx_thread_schedule ARMv7-A */
+/* 6.1.11 */
+/* AUTHOR */
+/* */
+/* William E. Lamie, Microsoft Corporation */
+/* */
+/* DESCRIPTION */
+/* */
+/* This function waits for a thread control block pointer to appear in */
+/* the _tx_thread_execute_ptr variable. Once a thread pointer appears */
+/* in the variable, the corresponding thread is resumed. */
+/* */
+/* INPUT */
+/* */
+/* None */
+/* */
+/* OUTPUT */
+/* */
+/* None */
+/* */
+/* CALLS */
+/* */
+/* None */
+/* */
+/* CALLED BY */
+/* */
+/* _tx_initialize_kernel_enter ThreadX entry function */
+/* _tx_thread_system_return Return to system from thread */
+/* _tx_thread_context_restore Restore thread's context */
+/* */
+/* RELEASE HISTORY */
+/* */
+/* DATE NAME DESCRIPTION */
+/* */
+/* 09-30-2020 William E. Lamie Initial Version 6.1 */
+/* 10-15-2021 William E. Lamie Modified comment(s), added */
+/* execution profile support, */
+/* resulting in version 6.1.9 */
+/* 04-25-2022 Zhen Kong Updated comments, */
+/* resulting in version 6.1.11 */
+/* */
+/**************************************************************************/
.global _tx_thread_schedule
.type _tx_thread_schedule,function
_tx_thread_schedule:
-@
-@ /* Enable interrupts. */
-@
+
+ /* Enable interrupts. */
+
#ifdef TX_ENABLE_FIQ_SUPPORT
- CPSIE if @ Enable IRQ and FIQ interrupts
+ CPSIE if // Enable IRQ and FIQ interrupts
#else
- CPSIE i @ Enable IRQ interrupts
+ CPSIE i // Enable IRQ interrupts
#endif
-@
-@ /* Wait for a thread to execute. */
-@ do
-@ {
- LDR r1, =_tx_thread_execute_ptr @ Address of thread execute ptr
-@
+
+ /* Wait for a thread to execute. */
+ LDR r1, =_tx_thread_execute_ptr // Address of thread execute ptr
+
__tx_thread_schedule_loop:
-@
- LDR r0, [r1] @ Pickup next thread to execute
- CMP r0, #0 @ Is it NULL?
- BEQ __tx_thread_schedule_loop @ If so, keep looking for a thread
-@
-@ }
-@ while(_tx_thread_execute_ptr == TX_NULL);
-@
-@ /* Yes! We have a thread to execute. Lockout interrupts and
-@ transfer control to it. */
-@
+
+ LDR r0, [r1] // Pickup next thread to execute
+ CMP r0, #0 // Is it NULL?
+ BEQ __tx_thread_schedule_loop // If so, keep looking for a thread
+ /* Yes! We have a thread to execute. Lockout interrupts and
+ transfer control to it. */
+
#ifdef TX_ENABLE_FIQ_SUPPORT
- CPSID if @ Disable IRQ and FIQ interrupts
+ CPSID if // Disable IRQ and FIQ interrupts
#else
- CPSID i @ Disable IRQ interrupts
+ CPSID i // Disable IRQ interrupts
#endif
-@
-@ /* Setup the current thread pointer. */
-@ _tx_thread_current_ptr = _tx_thread_execute_ptr;
-@
- LDR r1, =_tx_thread_current_ptr @ Pickup address of current thread
- STR r0, [r1] @ Setup current thread pointer
-@
-@ /* Increment the run count for this thread. */
-@ _tx_thread_current_ptr -> tx_thread_run_count++;
-@
- LDR r2, [r0, #4] @ Pickup run counter
- LDR r3, [r0, #24] @ Pickup time-slice for this thread
- ADD r2, r2, #1 @ Increment thread run-counter
- STR r2, [r0, #4] @ Store the new run counter
-@
-@ /* Setup time-slice, if present. */
-@ _tx_timer_time_slice = _tx_thread_current_ptr -> tx_thread_time_slice;
-@
- LDR r2, =_tx_timer_time_slice @ Pickup address of time-slice
- @ variable
- LDR sp, [r0, #8] @ Switch stack pointers
- STR r3, [r2] @ Setup time-slice
-@
-@ /* Switch to the thread's stack. */
-@ sp = _tx_thread_execute_ptr -> tx_thread_stack_ptr;
-@
+
+ /* Setup the current thread pointer. */
+
+ LDR r1, =_tx_thread_current_ptr // Pickup address of current thread
+ STR r0, [r1] // Setup current thread pointer
+
+ /* Increment the run count for this thread. */
+
+ LDR r2, [r0, #4] // Pickup run counter
+ LDR r3, [r0, #24] // Pickup time-slice for this thread
+ ADD r2, r2, #1 // Increment thread run-counter
+ STR r2, [r0, #4] // Store the new run counter
+
+ /* Setup time-slice, if present. */
+
+ LDR r2, =_tx_timer_time_slice // Pickup address of time-slice
+ // variable
+ LDR sp, [r0, #8] // Switch stack pointers
+ STR r3, [r2] // Setup time-slice
+
#if (defined(TX_ENABLE_EXECUTION_CHANGE_NOTIFY) || defined(TX_EXECUTION_PROFILE_ENABLE))
-@
-@ /* Call the thread entry function to indicate the thread is executing. */
-@
- MOV r5, r0 @ Save r0
- BL _tx_execution_thread_enter @ Call the thread execution enter function
- MOV r0, r5 @ Restore r0
+
+ /* Call the thread entry function to indicate the thread is executing. */
+
+ MOV r5, r0 // Save r0
+ BL _tx_execution_thread_enter // Call the thread execution enter function
+ MOV r0, r5 // Restore r0
#endif
-@
-@ /* Determine if an interrupt frame or a synchronous task suspension frame
-@ is present. */
-@
- LDMIA sp!, {r4, r5} @ Pickup the stack type and saved CPSR
- CMP r4, #0 @ Check for synchronous context switch
+
+ /* Determine if an interrupt frame or a synchronous task suspension frame
+ is present. */
+
+ LDMIA sp!, {r4, r5} // Pickup the stack type and saved CPSR
+ CMP r4, #0 // Check for synchronous context switch
BEQ _tx_solicited_return
- MSR SPSR_cxsf, r5 @ Setup SPSR for return
+ MSR SPSR_cxsf, r5 // Setup SPSR for return
#ifdef TX_ENABLE_VFP_SUPPORT
- LDR r1, [r0, #144] @ Pickup the VFP enabled flag
- CMP r1, #0 @ Is the VFP enabled?
- BEQ _tx_skip_interrupt_vfp_restore @ No, skip VFP interrupt restore
- VLDMIA sp!, {D0-D15} @ Recover D0-D15
- VLDMIA sp!, {D16-D31} @ Recover D16-D31
- LDR r4, [sp], #4 @ Pickup FPSCR
- VMSR FPSCR, r4 @ Restore FPSCR
+ LDR r1, [r0, #144] // Pickup the VFP enabled flag
+ CMP r1, #0 // Is the VFP enabled?
+ BEQ _tx_skip_interrupt_vfp_restore // No, skip VFP interrupt restore
+ VLDMIA sp!, {D0-D15} // Recover D0-D15
+ VLDMIA sp!, {D16-D31} // Recover D16-D31
+ LDR r4, [sp], #4 // Pickup FPSCR
+ VMSR FPSCR, r4 // Restore FPSCR
_tx_skip_interrupt_vfp_restore:
#endif
- LDMIA sp!, {r0-r12, lr, pc}^ @ Return to point of thread interrupt
+ LDMIA sp!, {r0-r12, lr, pc}^ // Return to point of thread interrupt
_tx_solicited_return:
#ifdef TX_ENABLE_VFP_SUPPORT
- LDR r1, [r0, #144] @ Pickup the VFP enabled flag
- CMP r1, #0 @ Is the VFP enabled?
- BEQ _tx_skip_solicited_vfp_restore @ No, skip VFP solicited restore
- VLDMIA sp!, {D8-D15} @ Recover D8-D15
- VLDMIA sp!, {D16-D31} @ Recover D16-D31
- LDR r4, [sp], #4 @ Pickup FPSCR
- VMSR FPSCR, r4 @ Restore FPSCR
+ LDR r1, [r0, #144] // Pickup the VFP enabled flag
+ CMP r1, #0 // Is the VFP enabled?
+ BEQ _tx_skip_solicited_vfp_restore // No, skip VFP solicited restore
+ VLDMIA sp!, {D8-D15} // Recover D8-D15
+ VLDMIA sp!, {D16-D31} // Recover D16-D31
+ LDR r4, [sp], #4 // Pickup FPSCR
+ VMSR FPSCR, r4 // Restore FPSCR
_tx_skip_solicited_vfp_restore:
#endif
- MSR CPSR_cxsf, r5 @ Recover CPSR
- LDMIA sp!, {r4-r11, lr} @ Return to thread synchronously
+ MSR CPSR_cxsf, r5 // Recover CPSR
+ LDMIA sp!, {r4-r11, lr} // Return to thread synchronously
#ifdef __THUMB_INTERWORK
- BX lr @ Return to caller
+ BX lr // Return to caller
#else
- MOV pc, lr @ Return to caller
+ MOV pc, lr // Return to caller
#endif
-@
-@}
-@
#ifdef TX_ENABLE_VFP_SUPPORT
.global tx_thread_vfp_enable
.type tx_thread_vfp_enable,function
tx_thread_vfp_enable:
- MRS r2, CPSR @ Pickup the CPSR
+ MRS r2, CPSR // Pickup the CPSR
#ifdef TX_ENABLE_FIQ_SUPPORT
- CPSID if @ Enable IRQ and FIQ interrupts
+ CPSID if // Enable IRQ and FIQ interrupts
#else
- CPSID i @ Enable IRQ interrupts
+ CPSID i // Enable IRQ interrupts
#endif
- LDR r0, =_tx_thread_current_ptr @ Build current thread pointer address
- LDR r1, [r0] @ Pickup current thread pointer
- CMP r1, #0 @ Check for NULL thread pointer
- BEQ __tx_no_thread_to_enable @ If NULL, skip VFP enable
- MOV r0, #1 @ Build enable value
- STR r0, [r1, #144] @ Set the VFP enable flag (tx_thread_vfp_enable field in TX_THREAD)
+ LDR r0, =_tx_thread_current_ptr // Build current thread pointer address
+ LDR r1, [r0] // Pickup current thread pointer
+ CMP r1, #0 // Check for NULL thread pointer
+ BEQ __tx_no_thread_to_enable // If NULL, skip VFP enable
+ MOV r0, #1 // Build enable value
+ STR r0, [r1, #144] // Set the VFP enable flag (tx_thread_vfp_enable field in TX_THREAD)
__tx_no_thread_to_enable:
- MSR CPSR_cxsf, r2 @ Recover CPSR
- BX LR @ Return to caller
+ MSR CPSR_cxsf, r2 // Recover CPSR
+ BX LR // Return to caller
.global tx_thread_vfp_disable
.type tx_thread_vfp_disable,function
tx_thread_vfp_disable:
- MRS r2, CPSR @ Pickup the CPSR
+ MRS r2, CPSR // Pickup the CPSR
#ifdef TX_ENABLE_FIQ_SUPPORT
- CPSID if @ Enable IRQ and FIQ interrupts
+ CPSID if // Enable IRQ and FIQ interrupts
#else
- CPSID i @ Enable IRQ interrupts
+ CPSID i // Enable IRQ interrupts
#endif
- LDR r0, =_tx_thread_current_ptr @ Build current thread pointer address
- LDR r1, [r0] @ Pickup current thread pointer
- CMP r1, #0 @ Check for NULL thread pointer
- BEQ __tx_no_thread_to_disable @ If NULL, skip VFP disable
- MOV r0, #0 @ Build disable value
- STR r0, [r1, #144] @ Clear the VFP enable flag (tx_thread_vfp_enable field in TX_THREAD)
+ LDR r0, =_tx_thread_current_ptr // Build current thread pointer address
+ LDR r1, [r0] // Pickup current thread pointer
+ CMP r1, #0 // Check for NULL thread pointer
+ BEQ __tx_no_thread_to_disable // If NULL, skip VFP disable
+ MOV r0, #0 // Build disable value
+ STR r0, [r1, #144] // Clear the VFP enable flag (tx_thread_vfp_enable field in TX_THREAD)
__tx_no_thread_to_disable:
- MSR CPSR_cxsf, r2 @ Recover CPSR
- BX LR @ Return to caller
+ MSR CPSR_cxsf, r2 // Recover CPSR
+ BX LR // Return to caller
#endif
-
diff --git a/ports/cortex_a5/gnu/src/tx_thread_stack_build.S b/ports/cortex_a5/gnu/src/tx_thread_stack_build.S
index 8f09c7ae..f413e673 100644
--- a/ports/cortex_a5/gnu/src/tx_thread_stack_build.S
+++ b/ports/cortex_a5/gnu/src/tx_thread_stack_build.S
@@ -1,178 +1,164 @@
-@/**************************************************************************/
-@/* */
-@/* Copyright (c) Microsoft Corporation. All rights reserved. */
-@/* */
-@/* This software is licensed under the Microsoft Software License */
-@/* Terms for Microsoft Azure RTOS. Full text of the license can be */
-@/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
-@/* and in the root directory of this software. */
-@/* */
-@/**************************************************************************/
-@
-@
-@/**************************************************************************/
-@/**************************************************************************/
-@/** */
-@/** ThreadX Component */
-@/** */
-@/** Thread */
-@/** */
-@/**************************************************************************/
-@/**************************************************************************/
-@
-@
-@#define TX_SOURCE_CODE
-@
-@
-@/* Include necessary system files. */
-@
-@#include "tx_api.h"
-@#include "tx_thread.h"
-@
+/**************************************************************************/
+/* */
+/* Copyright (c) Microsoft Corporation. All rights reserved. */
+/* */
+/* This software is licensed under the Microsoft Software License */
+/* Terms for Microsoft Azure RTOS. Full text of the license can be */
+/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
+/* and in the root directory of this software. */
+/* */
+/**************************************************************************/
+
+
+/**************************************************************************/
+/**************************************************************************/
+/** */
+/** ThreadX Component */
+/** */
+/** Thread */
+/** */
+/**************************************************************************/
+/**************************************************************************/
.arm
-SVC_MODE = 0x13 @ SVC mode
+SVC_MODE = 0x13 // SVC mode
#ifdef TX_ENABLE_FIQ_SUPPORT
-CPSR_MASK = 0xDF @ Mask initial CPSR, IRQ & FIQ interrupts enabled
+CPSR_MASK = 0xDF // Mask initial CPSR, IRQ & FIQ interrupts enabled
#else
-CPSR_MASK = 0x9F @ Mask initial CPSR, IRQ interrupts enabled
+CPSR_MASK = 0x9F // Mask initial CPSR, IRQ interrupts enabled
#endif
-@
-@
-@/* Define the 16-bit Thumb mode veneer for _tx_thread_stack_build for
-@ applications calling this function from to 16-bit Thumb mode. */
-@
+
+
+/* Define the 16-bit Thumb mode veneer for _tx_thread_stack_build for
+ applications calling this function from to 16-bit Thumb mode. */
+
.text
.align 2
.thumb
.global $_tx_thread_stack_build
.type $_tx_thread_stack_build,function
$_tx_thread_stack_build:
- BX pc @ Switch to 32-bit mode
- NOP @
+ BX pc // Switch to 32-bit mode
+ NOP //
.arm
- STMFD sp!, {lr} @ Save return address
- BL _tx_thread_stack_build @ Call _tx_thread_stack_build function
- LDMFD sp!, {lr} @ Recover saved return address
- BX lr @ Return to 16-bit caller
-@
-@
+ STMFD sp!, {lr} // Save return address
+ BL _tx_thread_stack_build // Call _tx_thread_stack_build function
+ LDMFD sp!, {lr} // Recover saved return address
+ BX lr // Return to 16-bit caller
+
+
.text
.align 2
-@/**************************************************************************/
-@/* */
-@/* FUNCTION RELEASE */
-@/* */
-@/* _tx_thread_stack_build Cortex-A5/GNU */
-@/* 6.1 */
-@/* AUTHOR */
-@/* */
-@/* William E. Lamie, Microsoft Corporation */
-@/* */
-@/* DESCRIPTION */
-@/* */
-@/* This function builds a stack frame on the supplied thread's stack. */
-@/* The stack frame results in a fake interrupt return to the supplied */
-@/* function pointer. */
-@/* */
-@/* INPUT */
-@/* */
-@/* thread_ptr Pointer to thread control blk */
-@/* function_ptr Pointer to return function */
-@/* */
-@/* OUTPUT */
-@/* */
-@/* None */
-@/* */
-@/* CALLS */
-@/* */
-@/* None */
-@/* */
-@/* CALLED BY */
-@/* */
-@/* _tx_thread_create Create thread service */
-@/* */
-@/* RELEASE HISTORY */
-@/* */
-@/* DATE NAME DESCRIPTION */
-@/* */
-@/* 09-30-2020 William E. Lamie Initial Version 6.1 */
-@/* */
-@/**************************************************************************/
-@VOID _tx_thread_stack_build(TX_THREAD *thread_ptr, VOID (*function_ptr)(VOID))
-@{
+/**************************************************************************/
+/* */
+/* FUNCTION RELEASE */
+/* */
+/* _tx_thread_stack_build ARMv7-A */
+/* 6.1.11 */
+/* AUTHOR */
+/* */
+/* William E. Lamie, Microsoft Corporation */
+/* */
+/* DESCRIPTION */
+/* */
+/* This function builds a stack frame on the supplied thread's stack. */
+/* The stack frame results in a fake interrupt return to the supplied */
+/* function pointer. */
+/* */
+/* INPUT */
+/* */
+/* thread_ptr Pointer to thread control blk */
+/* function_ptr Pointer to return function */
+/* */
+/* OUTPUT */
+/* */
+/* None */
+/* */
+/* CALLS */
+/* */
+/* None */
+/* */
+/* CALLED BY */
+/* */
+/* _tx_thread_create Create thread service */
+/* */
+/* RELEASE HISTORY */
+/* */
+/* DATE NAME DESCRIPTION */
+/* */
+/* 09-30-2020 William E. Lamie Initial Version 6.1 */
+/* 04-25-2022 Zhen Kong Updated comments, */
+/* resulting in version 6.1.11 */
+/* */
+/**************************************************************************/
.global _tx_thread_stack_build
.type _tx_thread_stack_build,function
_tx_thread_stack_build:
-@
-@
-@ /* Build a fake interrupt frame. The form of the fake interrupt stack
-@ on the Cortex-A5 should look like the following after it is built:
-@
-@ Stack Top: 1 Interrupt stack frame type
-@ CPSR Initial value for CPSR
-@ a1 (r0) Initial value for a1
-@ a2 (r1) Initial value for a2
-@ a3 (r2) Initial value for a3
-@ a4 (r3) Initial value for a4
-@ v1 (r4) Initial value for v1
-@ v2 (r5) Initial value for v2
-@ v3 (r6) Initial value for v3
-@ v4 (r7) Initial value for v4
-@ v5 (r8) Initial value for v5
-@ sb (r9) Initial value for sb
-@ sl (r10) Initial value for sl
-@ fp (r11) Initial value for fp
-@ ip (r12) Initial value for ip
-@ lr (r14) Initial value for lr
-@ pc (r15) Initial value for pc
-@ 0 For stack backtracing
-@
-@ Stack Bottom: (higher memory address) */
-@
- LDR r2, [r0, #16] @ Pickup end of stack area
- BIC r2, r2, #7 @ Ensure 8-byte alignment
- SUB r2, r2, #76 @ Allocate space for the stack frame
-@
-@ /* Actually build the stack frame. */
-@
- MOV r3, #1 @ Build interrupt stack type
- STR r3, [r2, #0] @ Store stack type
- MOV r3, #0 @ Build initial register value
- STR r3, [r2, #8] @ Store initial r0
- STR r3, [r2, #12] @ Store initial r1
- STR r3, [r2, #16] @ Store initial r2
- STR r3, [r2, #20] @ Store initial r3
- STR r3, [r2, #24] @ Store initial r4
- STR r3, [r2, #28] @ Store initial r5
- STR r3, [r2, #32] @ Store initial r6
- STR r3, [r2, #36] @ Store initial r7
- STR r3, [r2, #40] @ Store initial r8
- STR r3, [r2, #44] @ Store initial r9
- LDR r3, [r0, #12] @ Pickup stack starting address
- STR r3, [r2, #48] @ Store initial r10 (sl)
- LDR r3,=_tx_thread_schedule @ Pickup address of _tx_thread_schedule for GDB backtrace
- STR r3, [r2, #60] @ Store initial r14 (lr)
- MOV r3, #0 @ Build initial register value
- STR r3, [r2, #52] @ Store initial r11
- STR r3, [r2, #56] @ Store initial r12
- STR r1, [r2, #64] @ Store initial pc
- STR r3, [r2, #68] @ 0 for back-trace
- MRS r1, CPSR @ Pickup CPSR
- BIC r1, r1, #CPSR_MASK @ Mask mode bits of CPSR
- ORR r3, r1, #SVC_MODE @ Build CPSR, SVC mode, interrupts enabled
- STR r3, [r2, #4] @ Store initial CPSR
-@
-@ /* Setup stack pointer. */
-@ thread_ptr -> tx_thread_stack_ptr = r2;
-@
- STR r2, [r0, #8] @ Save stack pointer in thread's
- @ control block
+
+
+ /* Build a fake interrupt frame. The form of the fake interrupt stack
+ on the ARMv7-A should look like the following after it is built:
+
+ Stack Top: 1 Interrupt stack frame type
+ CPSR Initial value for CPSR
+ a1 (r0) Initial value for a1
+ a2 (r1) Initial value for a2
+ a3 (r2) Initial value for a3
+ a4 (r3) Initial value for a4
+ v1 (r4) Initial value for v1
+ v2 (r5) Initial value for v2
+ v3 (r6) Initial value for v3
+ v4 (r7) Initial value for v4
+ v5 (r8) Initial value for v5
+ sb (r9) Initial value for sb
+ sl (r10) Initial value for sl
+ fp (r11) Initial value for fp
+ ip (r12) Initial value for ip
+ lr (r14) Initial value for lr
+ pc (r15) Initial value for
+ 0 For stack backtracing
+
+ Stack Bottom: (higher memory address) */
+
+ LDR r2, [r0, #16] // Pickup end of stack area
+ BIC r2, r2, #7 // Ensure 8-byte alignment
+ SUB r2, r2, #76 // Allocate space for the stack frame
+
+ /* Actually build the stack frame. */
+
+ MOV r3, #1 // Build interrupt stack type
+ STR r3, [r2, #0] // Store stack type
+ MOV r3, #0 // Build initial register value
+ STR r3, [r2, #8] // Store initial r0
+ STR r3, [r2, #12] // Store initial r1
+ STR r3, [r2, #16] // Store initial r2
+ STR r3, [r2, #20] // Store initial r3
+ STR r3, [r2, #24] // Store initial r4
+ STR r3, [r2, #28] // Store initial r5
+ STR r3, [r2, #32] // Store initial r6
+ STR r3, [r2, #36] // Store initial r7
+ STR r3, [r2, #40] // Store initial r8
+ STR r3, [r2, #44] // Store initial r9
+ LDR r3, [r0, #12] // Pickup stack starting address
+ STR r3, [r2, #48] // Store initial r10 (sl)
+ LDR r3,=_tx_thread_schedule // Pickup address of _tx_thread_schedule for GDB backtrace
+ STR r3, [r2, #60] // Store initial r14 (lr)
+ MOV r3, #0 // Build initial register value
+ STR r3, [r2, #52] // Store initial r11
+ STR r3, [r2, #56] // Store initial r12
+ STR r1, [r2, #64] // Store initial pc
+ STR r3, [r2, #68] // 0 for back-trace
+ MRS r1, CPSR // Pickup CPSR
+ BIC r1, r1, #CPSR_MASK // Mask mode bits of CPSR
+ ORR r3, r1, #SVC_MODE // Build CPSR, SVC mode, interrupts enabled
+ STR r3, [r2, #4] // Store initial CPSR
+
+ /* Setup stack pointer. */
+
+ STR r2, [r0, #8] // Save stack pointer in thread's
+ // control block
#ifdef __THUMB_INTERWORK
- BX lr @ Return to caller
+ BX lr // Return to caller
#else
- MOV pc, lr @ Return to caller
+ MOV pc, lr // Return to caller
#endif
-@}
-
-
diff --git a/ports/cortex_a5/gnu/src/tx_thread_system_return.S b/ports/cortex_a5/gnu/src/tx_thread_system_return.S
index 2ee8d99c..cb7d62ce 100644
--- a/ports/cortex_a5/gnu/src/tx_thread_system_return.S
+++ b/ports/cortex_a5/gnu/src/tx_thread_system_return.S
@@ -1,183 +1,162 @@
-@/**************************************************************************/
-@/* */
-@/* Copyright (c) Microsoft Corporation. All rights reserved. */
-@/* */
-@/* This software is licensed under the Microsoft Software License */
-@/* Terms for Microsoft Azure RTOS. Full text of the license can be */
-@/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
-@/* and in the root directory of this software. */
-@/* */
-@/**************************************************************************/
-@
-@
-@/**************************************************************************/
-@/**************************************************************************/
-@/** */
-@/** ThreadX Component */
-@/** */
-@/** Thread */
-@/** */
-@/**************************************************************************/
-@/**************************************************************************/
-@
-@#define TX_SOURCE_CODE
-@
-@
-@/* Include necessary system files. */
-@
-@#include "tx_api.h"
-@#include "tx_thread.h"
-@#include "tx_timer.h"
-@
+/**************************************************************************/
+/* */
+/* Copyright (c) Microsoft Corporation. All rights reserved. */
+/* */
+/* This software is licensed under the Microsoft Software License */
+/* Terms for Microsoft Azure RTOS. Full text of the license can be */
+/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
+/* and in the root directory of this software. */
+/* */
+/**************************************************************************/
+
+
+/**************************************************************************/
+/**************************************************************************/
+/** */
+/** ThreadX Component */
+/** */
+/** Thread */
+/** */
+/**************************************************************************/
+/**************************************************************************/
+
.arm
-@
-@
+
+
.global _tx_thread_current_ptr
.global _tx_timer_time_slice
.global _tx_thread_schedule
- .global _tx_execution_thread_exit
-@
-@
-@
-@/* Define the 16-bit Thumb mode veneer for _tx_thread_system_return for
-@ applications calling this function from to 16-bit Thumb mode. */
-@
+
+
+
+/* Define the 16-bit Thumb mode veneer for _tx_thread_system_return for
+ applications calling this function from to 16-bit Thumb mode. */
+
.text
.align 2
.global $_tx_thread_system_return
.type $_tx_thread_system_return,function
$_tx_thread_system_return:
.thumb
- BX pc @ Switch to 32-bit mode
- NOP @
+ BX pc // Switch to 32-bit mode
+ NOP //
.arm
- STMFD sp!, {lr} @ Save return address
- BL _tx_thread_system_return @ Call _tx_thread_system_return function
- LDMFD sp!, {lr} @ Recover saved return address
- BX lr @ Return to 16-bit caller
-@
-@
+ STMFD sp!, {lr} // Save return address
+ BL _tx_thread_system_return // Call _tx_thread_system_return function
+ LDMFD sp!, {lr} // Recover saved return address
+ BX lr // Return to 16-bit caller
+
+
.text
.align 2
-@/**************************************************************************/
-@/* */
-@/* FUNCTION RELEASE */
-@/* */
-@/* _tx_thread_system_return Cortex-A5/GNU */
-@/* 6.1.9 */
-@/* AUTHOR */
-@/* */
-@/* William E. Lamie, Microsoft Corporation */
-@/* */
-@/* DESCRIPTION */
-@/* */
-@/* This function is target processor specific. It is used to transfer */
-@/* control from a thread back to the ThreadX system. Only a */
-@/* minimal context is saved since the compiler assumes temp registers */
-@/* are going to get slicked by a function call anyway. */
-@/* */
-@/* INPUT */
-@/* */
-@/* None */
-@/* */
-@/* OUTPUT */
-@/* */
-@/* None */
-@/* */
-@/* CALLS */
-@/* */
-@/* _tx_thread_schedule Thread scheduling loop */
-@/* */
-@/* CALLED BY */
-@/* */
-@/* ThreadX components */
-@/* */
-@/* RELEASE HISTORY */
-@/* */
-@/* DATE NAME DESCRIPTION */
-@/* */
-@/* 09-30-2020 William E. Lamie Initial Version 6.1 */
-@/* 10-15-2021 William E. Lamie Modified comment(s), added */
-@/* execution profile support, */
-@/* resulting in version 6.1.9 */
-@/* */
-@/**************************************************************************/
-@VOID _tx_thread_system_return(VOID)
-@{
+/**************************************************************************/
+/* */
+/* FUNCTION RELEASE */
+/* */
+/* _tx_thread_system_return ARMv7-A */
+/* 6.1.11 */
+/* AUTHOR */
+/* */
+/* William E. Lamie, Microsoft Corporation */
+/* */
+/* DESCRIPTION */
+/* */
+/* This function is target processor specific. It is used to transfer */
+/* control from a thread back to the ThreadX system. Only a */
+/* minimal context is saved since the compiler assumes temp registers */
+/* are going to get slicked by a function call anyway. */
+/* */
+/* INPUT */
+/* */
+/* None */
+/* */
+/* OUTPUT */
+/* */
+/* None */
+/* */
+/* CALLS */
+/* */
+/* _tx_thread_schedule Thread scheduling loop */
+/* */
+/* CALLED BY */
+/* */
+/* ThreadX components */
+/* */
+/* RELEASE HISTORY */
+/* */
+/* DATE NAME DESCRIPTION */
+/* */
+/* 09-30-2020 William E. Lamie Initial Version 6.1 */
+/* 10-15-2021 William E. Lamie Modified comment(s), added */
+/* execution profile support, */
+/* resulting in version 6.1.9 */
+/* 04-25-2022 Zhen Kong Updated comments, */
+/* resulting in version 6.1.11 */
+/* */
+/**************************************************************************/
.global _tx_thread_system_return
.type _tx_thread_system_return,function
_tx_thread_system_return:
-@
-@ /* Save minimal context on the stack. */
-@
- STMDB sp!, {r4-r11, lr} @ Save minimal context
- LDR r4, =_tx_thread_current_ptr @ Pickup address of current ptr
- LDR r5, [r4] @ Pickup current thread pointer
-
+ /* Save minimal context on the stack. */
+
+ STMDB sp!, {r4-r11, lr} // Save minimal context
+
+ LDR r4, =_tx_thread_current_ptr // Pickup address of current ptr
+ LDR r5, [r4] // Pickup current thread pointer
+
#ifdef TX_ENABLE_VFP_SUPPORT
- LDR r1, [r5, #144] @ Pickup the VFP enabled flag
- CMP r1, #0 @ Is the VFP enabled?
- BEQ _tx_skip_solicited_vfp_save @ No, skip VFP solicited save
- VMRS r1, FPSCR @ Pickup the FPSCR
- STR r1, [sp, #-4]! @ Save FPSCR
- VSTMDB sp!, {D16-D31} @ Save D16-D31
- VSTMDB sp!, {D8-D15} @ Save D8-D15
+ LDR r1, [r5, #144] // Pickup the VFP enabled flag
+ CMP r1, #0 // Is the VFP enabled?
+ BEQ _tx_skip_solicited_vfp_save // No, skip VFP solicited save
+ VMRS r1, FPSCR // Pickup the FPSCR
+ STR r1, [sp, #-4]! // Save FPSCR
+ VSTMDB sp!, {D16-D31} // Save D16-D31
+ VSTMDB sp!, {D8-D15} // Save D8-D15
_tx_skip_solicited_vfp_save:
#endif
- MOV r0, #0 @ Build a solicited stack type
- MRS r1, CPSR @ Pickup the CPSR
- STMDB sp!, {r0-r1} @ Save type and CPSR
-@
-@ /* Lockout interrupts. */
-@
-#ifdef TX_ENABLE_FIQ_SUPPORT
- CPSID if @ Disable IRQ and FIQ interrupts
-#else
- CPSID i @ Disable IRQ interrupts
-#endif
-
-#if (defined(TX_ENABLE_EXECUTION_CHANGE_NOTIFY) || defined(TX_EXECUTION_PROFILE_ENABLE))
-@
-@ /* Call the thread exit function to indicate the thread is no longer executing. */
-@
- BL _tx_execution_thread_exit @ Call the thread exit function
-#endif
- MOV r3, r4 @ Pickup address of current ptr
- MOV r0, r5 @ Pickup current thread pointer
- LDR r2, =_tx_timer_time_slice @ Pickup address of time slice
- LDR r1, [r2] @ Pickup current time slice
-@
-@ /* Save current stack and switch to system stack. */
-@ _tx_thread_current_ptr -> tx_thread_stack_ptr = sp;
-@ sp = _tx_thread_system_stack_ptr;
-@
- STR sp, [r0, #8] @ Save thread stack pointer
-@
-@ /* Determine if the time-slice is active. */
-@ if (_tx_timer_time_slice)
-@ {
-@
- MOV r4, #0 @ Build clear value
- CMP r1, #0 @ Is a time-slice active?
- BEQ __tx_thread_dont_save_ts @ No, don't save the time-slice
-@
-@ /* Save time-slice for the thread and clear the current time-slice. */
-@ _tx_thread_current_ptr -> tx_thread_time_slice = _tx_timer_time_slice;
-@ _tx_timer_time_slice = 0;
-@
- STR r4, [r2] @ Clear time-slice
- STR r1, [r0, #24] @ Save current time-slice
-@
-@ }
-__tx_thread_dont_save_ts:
-@
-@ /* Clear the current thread pointer. */
-@ _tx_thread_current_ptr = TX_NULL;
-@
- STR r4, [r3] @ Clear current thread pointer
- B _tx_thread_schedule @ Jump to scheduler!
-@
-@}
+ MOV r0, #0 // Build a solicited stack type
+ MRS r1, CPSR // Pickup the CPSR
+ STMDB sp!, {r0-r1} // Save type and CPSR
+ /* Lockout interrupts. */
+
+#ifdef TX_ENABLE_FIQ_SUPPORT
+ CPSID if // Disable IRQ and FIQ interrupts
+#else
+ CPSID i // Disable IRQ interrupts
+#endif
+
+#if (defined(TX_ENABLE_EXECUTION_CHANGE_NOTIFY) || defined(TX_EXECUTION_PROFILE_ENABLE))
+
+ /* Call the thread exit function to indicate the thread is no longer executing. */
+
+ BL _tx_execution_thread_exit // Call the thread exit function
+#endif
+ MOV r3, r4 // Pickup address of current ptr
+ MOV r0, r5 // Pickup current thread pointer
+ LDR r2, =_tx_timer_time_slice // Pickup address of time slice
+ LDR r1, [r2] // Pickup current time slice
+
+ /* Save current stack and switch to system stack. */
+
+ STR sp, [r0, #8] // Save thread stack pointer
+
+ /* Determine if the time-slice is active. */
+
+ MOV r4, #0 // Build clear value
+ CMP r1, #0 // Is a time-slice active?
+ BEQ __tx_thread_dont_save_ts // No, don't save the time-slice
+
+ /* Save time-slice for the thread and clear the current time-slice. */
+
+ STR r4, [r2] // Clear time-slice
+ STR r1, [r0, #24] // Save current time-slice
+
+__tx_thread_dont_save_ts:
+
+ /* Clear the current thread pointer. */
+
+ STR r4, [r3] // Clear current thread pointer
+ B _tx_thread_schedule // Jump to scheduler!
diff --git a/ports/cortex_a5/gnu/src/tx_thread_vectored_context_save.S b/ports/cortex_a5/gnu/src/tx_thread_vectored_context_save.S
index c82c742d..d846223f 100644
--- a/ports/cortex_a5/gnu/src/tx_thread_vectored_context_save.S
+++ b/ports/cortex_a5/gnu/src/tx_thread_vectored_context_save.S
@@ -1,193 +1,165 @@
-@/**************************************************************************/
-@/* */
-@/* Copyright (c) Microsoft Corporation. All rights reserved. */
-@/* */
-@/* This software is licensed under the Microsoft Software License */
-@/* Terms for Microsoft Azure RTOS. Full text of the license can be */
-@/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
-@/* and in the root directory of this software. */
-@/* */
-@/**************************************************************************/
-@
-@
-@/**************************************************************************/
-@/**************************************************************************/
-@/** */
-@/** ThreadX Component */
-@/** */
-@/** Thread */
-@/** */
-@/**************************************************************************/
-@/**************************************************************************/
-@
-@
-@#define TX_SOURCE_CODE
-@
-@
-@/* Include necessary system files. */
-@
-@#include "tx_api.h"
-@#include "tx_thread.h"
-@
-@
+/**************************************************************************/
+/* */
+/* Copyright (c) Microsoft Corporation. All rights reserved. */
+/* */
+/* This software is licensed under the Microsoft Software License */
+/* Terms for Microsoft Azure RTOS. Full text of the license can be */
+/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
+/* and in the root directory of this software. */
+/* */
+/**************************************************************************/
+
+
+/**************************************************************************/
+/**************************************************************************/
+/** */
+/** ThreadX Component */
+/** */
+/** Thread */
+/** */
+/**************************************************************************/
+/**************************************************************************/
+
.global _tx_thread_system_state
.global _tx_thread_current_ptr
.global _tx_execution_isr_enter
-@
-@
-@
-@/* No 16-bit Thumb mode veneer code is needed for _tx_thread_vectored_context_save
-@ since it will never be called 16-bit mode. */
-@
+
+
+
+/* No 16-bit Thumb mode veneer code is needed for _tx_thread_vectored_context_save
+ since it will never be called 16-bit mode. */
+
.arm
.text
.align 2
-@/**************************************************************************/
-@/* */
-@/* FUNCTION RELEASE */
-@/* */
-@/* _tx_thread_vectored_context_save Cortex-A5/GNU */
-@/* 6.1.9 */
-@/* AUTHOR */
-@/* */
-@/* William E. Lamie, Microsoft Corporation */
-@/* */
-@/* DESCRIPTION */
-@/* */
-@/* This function saves the context of an executing thread in the */
-@/* beginning of interrupt processing. The function also ensures that */
-@/* the system stack is used upon return to the calling ISR. */
-@/* */
-@/* INPUT */
-@/* */
-@/* None */
-@/* */
-@/* OUTPUT */
-@/* */
-@/* None */
-@/* */
-@/* CALLS */
-@/* */
-@/* None */
-@/* */
-@/* CALLED BY */
-@/* */
-@/* ISRs */
-@/* */
-@/* RELEASE HISTORY */
-@/* */
-@/* DATE NAME DESCRIPTION */
-@/* */
-@/* 09-30-2020 William E. Lamie Initial Version 6.1 */
-@/* 10-15-2021 William E. Lamie Modified comment(s), added */
-@/* execution profile support, */
-@/* resulting in version 6.1.9 */
-@/* */
-@/**************************************************************************/
-@VOID _tx_thread_vectored_context_save(VOID)
-@{
+/**************************************************************************/
+/* */
+/* FUNCTION RELEASE */
+/* */
+/* _tx_thread_vectored_context_save ARMv7-A */
+/* 6.1.11 */
+/* AUTHOR */
+/* */
+/* William E. Lamie, Microsoft Corporation */
+/* */
+/* DESCRIPTION */
+/* */
+/* This function saves the context of an executing thread in the */
+/* beginning of interrupt processing. The function also ensures that */
+/* the system stack is used upon return to the calling ISR. */
+/* */
+/* INPUT */
+/* */
+/* None */
+/* */
+/* OUTPUT */
+/* */
+/* None */
+/* */
+/* CALLS */
+/* */
+/* None */
+/* */
+/* CALLED BY */
+/* */
+/* ISRs */
+/* */
+/* RELEASE HISTORY */
+/* */
+/* DATE NAME DESCRIPTION */
+/* */
+/* 09-30-2020 William E. Lamie Initial Version 6.1 */
+/* 10-15-2021 William E. Lamie Modified comment(s), added */
+/* execution profile support, */
+/* resulting in version 6.1.9 */
+/* 04-25-2022 Zhen Kong Updated comments, */
+/* resulting in version 6.1.11 */
+/* */
+/**************************************************************************/
.global _tx_thread_vectored_context_save
.type _tx_thread_vectored_context_save,function
_tx_thread_vectored_context_save:
-@
-@ /* Upon entry to this routine, it is assumed that IRQ interrupts are locked
-@ out, we are in IRQ mode, and all registers are intact. */
-@
-@ /* Check for a nested interrupt condition. */
-@ if (_tx_thread_system_state++)
-@ {
-@
+
+ /* Upon entry to this routine, it is assumed that IRQ interrupts are locked
+ out, we are in IRQ mode, and all registers are intact. */
+
+ /* Check for a nested interrupt condition. */
+
#ifdef TX_ENABLE_FIQ_SUPPORT
- CPSID if @ Disable IRQ and FIQ interrupts
+ CPSID if // Disable IRQ and FIQ interrupts
#endif
- LDR r3, =_tx_thread_system_state @ Pickup address of system state variable
- LDR r2, [r3, #0] @ Pickup system state
- CMP r2, #0 @ Is this the first interrupt?
- BEQ __tx_thread_not_nested_save @ Yes, not a nested context save
-@
-@ /* Nested interrupt condition. */
-@
- ADD r2, r2, #1 @ Increment the interrupt counter
- STR r2, [r3, #0] @ Store it back in the variable
-@
-@ /* Note: Minimal context of interrupted thread is already saved. */
-@
-@ /* Return to the ISR. */
-@
- MOV r10, #0 @ Clear stack limit
+ LDR r3, =_tx_thread_system_state // Pickup address of system state variable
+ LDR r2, [r3, #0] // Pickup system state
+ CMP r2, #0 // Is this the first interrupt?
+ BEQ __tx_thread_not_nested_save // Yes, not a nested context save
+
+ /* Nested interrupt condition. */
+
+ ADD r2, r2, #1 // Increment the interrupt counter
+ STR r2, [r3, #0] // Store it back in the variable
+
+ /* Note: Minimal context of interrupted thread is already saved. */
+
+ /* Return to the ISR. */
+
+ MOV r10, #0 // Clear stack limit
#if (defined(TX_ENABLE_EXECUTION_CHANGE_NOTIFY) || defined(TX_EXECUTION_PROFILE_ENABLE))
-@
-@ /* Call the ISR enter function to indicate an ISR is executing. */
-@
- PUSH {lr} @ Save ISR lr
- BL _tx_execution_isr_enter @ Call the ISR enter function
- POP {lr} @ Recover ISR lr
+
+ /* Call the ISR enter function to indicate an ISR is executing. */
+
+ PUSH {lr} // Save ISR lr
+ BL _tx_execution_isr_enter // Call the ISR enter function
+ POP {lr} // Recover ISR lr
#endif
- MOV pc, lr @ Return to caller
-@
+ MOV pc, lr // Return to caller
+
__tx_thread_not_nested_save:
-@ }
-@
-@ /* Otherwise, not nested, check to see if a thread was running. */
-@ else if (_tx_thread_current_ptr)
-@ {
-@
- ADD r2, r2, #1 @ Increment the interrupt counter
- STR r2, [r3, #0] @ Store it back in the variable
- LDR r1, =_tx_thread_current_ptr @ Pickup address of current thread ptr
- LDR r0, [r1, #0] @ Pickup current thread pointer
- CMP r0, #0 @ Is it NULL?
- BEQ __tx_thread_idle_system_save @ If so, interrupt occurred in
- @ scheduling loop - nothing needs saving!
-@
-@ /* Note: Minimal context of interrupted thread is already saved. */
-@
-@ /* Save the current stack pointer in the thread's control block. */
-@ _tx_thread_current_ptr -> tx_thread_stack_ptr = sp;
-@
-@ /* Switch to the system stack. */
-@ sp = _tx_thread_system_stack_ptr;
-@
- MOV r10, #0 @ Clear stack limit
+
+ /* Otherwise, not nested, check to see if a thread was running. */
+
+ ADD r2, r2, #1 // Increment the interrupt counter
+ STR r2, [r3, #0] // Store it back in the variable
+ LDR r1, =_tx_thread_current_ptr // Pickup address of current thread ptr
+ LDR r0, [r1, #0] // Pickup current thread pointer
+ CMP r0, #0 // Is it NULL?
+ BEQ __tx_thread_idle_system_save // If so, interrupt occurred in
+ // scheduling loop - nothing needs saving!
+ /* Note: Minimal context of interrupted thread is already saved. */
+
+ /* Save the current stack pointer in the thread's control block. */
+
+ MOV r10, #0 // Clear stack limit
#if (defined(TX_ENABLE_EXECUTION_CHANGE_NOTIFY) || defined(TX_EXECUTION_PROFILE_ENABLE))
-@
-@ /* Call the ISR enter function to indicate an ISR is executing. */
-@
- PUSH {lr} @ Save ISR lr
- BL _tx_execution_isr_enter @ Call the ISR enter function
- POP {lr} @ Recover ISR lr
+
+ /* Call the ISR enter function to indicate an ISR is executing. */
+
+ PUSH {lr} // Save ISR lr
+ BL _tx_execution_isr_enter // Call the ISR enter function
+ POP {lr} // Recover ISR lr
#endif
- MOV pc, lr @ Return to caller
-@
-@ }
-@ else
-@ {
-@
+ MOV pc, lr // Return to caller
+
__tx_thread_idle_system_save:
-@
-@ /* Interrupt occurred in the scheduling loop. */
-@
-@ /* Not much to do here, just adjust the stack pointer, and return to IRQ
-@ processing. */
-@
- MOV r10, #0 @ Clear stack limit
+
+ /* Interrupt occurred in the scheduling loop. */
+
+ /* Not much to do here, just adjust the stack pointer, and return to IRQ
+ processing. */
+
+ MOV r10, #0 // Clear stack limit
#if (defined(TX_ENABLE_EXECUTION_CHANGE_NOTIFY) || defined(TX_EXECUTION_PROFILE_ENABLE))
-@
-@ /* Call the ISR enter function to indicate an ISR is executing. */
-@
- PUSH {lr} @ Save ISR lr
- BL _tx_execution_isr_enter @ Call the ISR enter function
- POP {lr} @ Recover ISR lr
+
+ /* Call the ISR enter function to indicate an ISR is executing. */
+
+ PUSH {lr} // Save ISR lr
+ BL _tx_execution_isr_enter // Call the ISR enter function
+ POP {lr} // Recover ISR lr
#endif
- ADD sp, sp, #32 @ Recover saved registers
- MOV pc, lr @ Return to caller
-@
-@ }
-@}
-
+ ADD sp, sp, #32 // Recover saved registers
+ MOV pc, lr // Return to caller
diff --git a/ports/cortex_a5/gnu/src/tx_timer_interrupt.S b/ports/cortex_a5/gnu/src/tx_timer_interrupt.S
index 2f922527..7337ed0c 100644
--- a/ports/cortex_a5/gnu/src/tx_timer_interrupt.S
+++ b/ports/cortex_a5/gnu/src/tx_timer_interrupt.S
@@ -1,40 +1,30 @@
-@/**************************************************************************/
-@/* */
-@/* Copyright (c) Microsoft Corporation. All rights reserved. */
-@/* */
-@/* This software is licensed under the Microsoft Software License */
-@/* Terms for Microsoft Azure RTOS. Full text of the license can be */
-@/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
-@/* and in the root directory of this software. */
-@/* */
-@/**************************************************************************/
-@
-@
-@/**************************************************************************/
-@/**************************************************************************/
-@/** */
-@/** ThreadX Component */
-@/** */
-@/** Timer */
-@/** */
-@/**************************************************************************/
-@/**************************************************************************/
-@
-@#define TX_SOURCE_CODE
-@
-@
-@/* Include necessary system files. */
-@
-@#include "tx_api.h"
-@#include "tx_timer.h"
-@#include "tx_thread.h"
-@
-@
+/**************************************************************************/
+/* */
+/* Copyright (c) Microsoft Corporation. All rights reserved. */
+/* */
+/* This software is licensed under the Microsoft Software License */
+/* Terms for Microsoft Azure RTOS. Full text of the license can be */
+/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
+/* and in the root directory of this software. */
+/* */
+/**************************************************************************/
+
+
+/**************************************************************************/
+/**************************************************************************/
+/** */
+/** ThreadX Component */
+/** */
+/** Timer */
+/** */
+/**************************************************************************/
+/**************************************************************************/
+
.arm
-@
-@/* Define Assembly language external references... */
-@
+
+/* Define Assembly language external references... */
+
.global _tx_timer_time_slice
.global _tx_timer_system_clock
.global _tx_timer_current_ptr
@@ -43,237 +33,199 @@
.global _tx_timer_expired_time_slice
.global _tx_timer_expired
.global _tx_thread_time_slice
-@
-@
-@
-@/* Define the 16-bit Thumb mode veneer for _tx_timer_interrupt for
-@ applications calling this function from to 16-bit Thumb mode. */
-@
+
+
+
+/* Define the 16-bit Thumb mode veneer for _tx_timer_interrupt for
+ applications calling this function from to 16-bit Thumb mode. */
+
.text
.align 2
.thumb
.global $_tx_timer_interrupt
.type $_tx_timer_interrupt,function
$_tx_timer_interrupt:
- BX pc @ Switch to 32-bit mode
- NOP @
+ BX pc // Switch to 32-bit mode
+ NOP //
.arm
- STMFD sp!, {lr} @ Save return address
- BL _tx_timer_interrupt @ Call _tx_timer_interrupt function
- LDMFD sp!, {lr} @ Recover saved return address
- BX lr @ Return to 16-bit caller
-@
-@
+ STMFD sp!, {lr} // Save return address
+ BL _tx_timer_interrupt // Call _tx_timer_interrupt function
+ LDMFD sp!, {lr} // Recover saved return address
+ BX lr // Return to 16-bit caller
+
+
.text
.align 2
-@/**************************************************************************/
-@/* */
-@/* FUNCTION RELEASE */
-@/* */
-@/* _tx_timer_interrupt Cortex-A5/GNU */
-@/* 6.1 */
-@/* AUTHOR */
-@/* */
-@/* William E. Lamie, Microsoft Corporation */
-@/* */
-@/* DESCRIPTION */
-@/* */
-@/* This function processes the hardware timer interrupt. This */
-@/* processing includes incrementing the system clock and checking for */
-@/* time slice and/or timer expiration. If either is found, the */
-@/* interrupt context save/restore functions are called along with the */
-@/* expiration functions. */
-@/* */
-@/* INPUT */
-@/* */
-@/* None */
-@/* */
-@/* OUTPUT */
-@/* */
-@/* None */
-@/* */
-@/* CALLS */
-@/* */
-@/* _tx_thread_time_slice Time slice interrupted thread */
-@/* _tx_timer_expiration_process Timer expiration processing */
-@/* */
-@/* CALLED BY */
-@/* */
-@/* interrupt vector */
-@/* */
-@/* RELEASE HISTORY */
-@/* */
-@/* DATE NAME DESCRIPTION */
-@/* */
-@/* 09-30-2020 William E. Lamie Initial Version 6.1 */
-@/* */
-@/**************************************************************************/
-@VOID _tx_timer_interrupt(VOID)
-@{
+/**************************************************************************/
+/* */
+/* FUNCTION RELEASE */
+/* */
+/* _tx_timer_interrupt ARMv7-A */
+/* 6.1.11 */
+/* AUTHOR */
+/* */
+/* William E. Lamie, Microsoft Corporation */
+/* */
+/* DESCRIPTION */
+/* */
+/* This function processes the hardware timer interrupt. This */
+/* processing includes incrementing the system clock and checking for */
+/* time slice and/or timer expiration. If either is found, the */
+/* interrupt context save/restore functions are called along with the */
+/* expiration functions. */
+/* */
+/* INPUT */
+/* */
+/* None */
+/* */
+/* OUTPUT */
+/* */
+/* None */
+/* */
+/* CALLS */
+/* */
+/* _tx_thread_time_slice Time slice interrupted thread */
+/* _tx_timer_expiration_process Timer expiration processing */
+/* */
+/* CALLED BY */
+/* */
+/* interrupt vector */
+/* */
+/* RELEASE HISTORY */
+/* */
+/* DATE NAME DESCRIPTION */
+/* */
+/* 09-30-2020 William E. Lamie Initial Version 6.1 */
+/* 04-25-2022 Zhen Kong Updated comments, */
+/* resulting in version 6.1.11 */
+/* */
+/**************************************************************************/
.global _tx_timer_interrupt
.type _tx_timer_interrupt,function
_tx_timer_interrupt:
-@
-@ /* Upon entry to this routine, it is assumed that context save has already
-@ been called, and therefore the compiler scratch registers are available
-@ for use. */
-@
-@ /* Increment the system clock. */
-@ _tx_timer_system_clock++;
-@
- LDR r1, =_tx_timer_system_clock @ Pickup address of system clock
- LDR r0, [r1] @ Pickup system clock
- ADD r0, r0, #1 @ Increment system clock
- STR r0, [r1] @ Store new system clock
-@
-@ /* Test for time-slice expiration. */
-@ if (_tx_timer_time_slice)
-@ {
-@
- LDR r3, =_tx_timer_time_slice @ Pickup address of time-slice
- LDR r2, [r3] @ Pickup time-slice
- CMP r2, #0 @ Is it non-active?
- BEQ __tx_timer_no_time_slice @ Yes, skip time-slice processing
-@
-@ /* Decrement the time_slice. */
-@ _tx_timer_time_slice--;
-@
- SUB r2, r2, #1 @ Decrement the time-slice
- STR r2, [r3] @ Store new time-slice value
-@
-@ /* Check for expiration. */
-@ if (__tx_timer_time_slice == 0)
-@
- CMP r2, #0 @ Has it expired?
- BNE __tx_timer_no_time_slice @ No, skip expiration processing
-@
-@ /* Set the time-slice expired flag. */
-@ _tx_timer_expired_time_slice = TX_TRUE;
-@
- LDR r3, =_tx_timer_expired_time_slice @ Pickup address of expired flag
- MOV r0, #1 @ Build expired value
- STR r0, [r3] @ Set time-slice expiration flag
-@
-@ }
-@
-__tx_timer_no_time_slice:
-@
-@ /* Test for timer expiration. */
-@ if (*_tx_timer_current_ptr)
-@ {
-@
- LDR r1, =_tx_timer_current_ptr @ Pickup current timer pointer address
- LDR r0, [r1] @ Pickup current timer
- LDR r2, [r0] @ Pickup timer list entry
- CMP r2, #0 @ Is there anything in the list?
- BEQ __tx_timer_no_timer @ No, just increment the timer
-@
-@ /* Set expiration flag. */
-@ _tx_timer_expired = TX_TRUE;
-@
- LDR r3, =_tx_timer_expired @ Pickup expiration flag address
- MOV r2, #1 @ Build expired value
- STR r2, [r3] @ Set expired flag
- B __tx_timer_done @ Finished timer processing
-@
-@ }
-@ else
-@ {
-__tx_timer_no_timer:
-@
-@ /* No timer expired, increment the timer pointer. */
-@ _tx_timer_current_ptr++;
-@
- ADD r0, r0, #4 @ Move to next timer
-@
-@ /* Check for wraparound. */
-@ if (_tx_timer_current_ptr == _tx_timer_list_end)
-@
- LDR r3, =_tx_timer_list_end @ Pickup address of timer list end
- LDR r2, [r3] @ Pickup list end
- CMP r0, r2 @ Are we at list end?
- BNE __tx_timer_skip_wrap @ No, skip wraparound logic
-@
-@ /* Wrap to beginning of list. */
-@ _tx_timer_current_ptr = _tx_timer_list_start;
-@
- LDR r3, =_tx_timer_list_start @ Pickup address of timer list start
- LDR r0, [r3] @ Set current pointer to list start
-@
-__tx_timer_skip_wrap:
-@
- STR r0, [r1] @ Store new current timer pointer
-@ }
-@
-__tx_timer_done:
-@
-@
-@ /* See if anything has expired. */
-@ if ((_tx_timer_expired_time_slice) || (_tx_timer_expired))
-@ {
-@
- LDR r3, =_tx_timer_expired_time_slice @ Pickup address of expired flag
- LDR r2, [r3] @ Pickup time-slice expired flag
- CMP r2, #0 @ Did a time-slice expire?
- BNE __tx_something_expired @ If non-zero, time-slice expired
- LDR r1, =_tx_timer_expired @ Pickup address of other expired flag
- LDR r0, [r1] @ Pickup timer expired flag
- CMP r0, #0 @ Did a timer expire?
- BEQ __tx_timer_nothing_expired @ No, nothing expired
-@
-__tx_something_expired:
-@
-@
- STMDB sp!, {r0, lr} @ Save the lr register on the stack
- @ and save r0 just to keep 8-byte alignment
-@
-@ /* Did a timer expire? */
-@ if (_tx_timer_expired)
-@ {
-@
- LDR r1, =_tx_timer_expired @ Pickup address of expired flag
- LDR r0, [r1] @ Pickup timer expired flag
- CMP r0, #0 @ Check for timer expiration
- BEQ __tx_timer_dont_activate @ If not set, skip timer activation
-@
-@ /* Process timer expiration. */
-@ _tx_timer_expiration_process();
-@
- BL _tx_timer_expiration_process @ Call the timer expiration handling routine
-@
-@ }
-__tx_timer_dont_activate:
-@
-@ /* Did time slice expire? */
-@ if (_tx_timer_expired_time_slice)
-@ {
-@
- LDR r3, =_tx_timer_expired_time_slice @ Pickup address of time-slice expired
- LDR r2, [r3] @ Pickup the actual flag
- CMP r2, #0 @ See if the flag is set
- BEQ __tx_timer_not_ts_expiration @ No, skip time-slice processing
-@
-@ /* Time slice interrupted thread. */
-@ _tx_thread_time_slice();
-@
- BL _tx_thread_time_slice @ Call time-slice processing
-@
-@ }
-@
-__tx_timer_not_ts_expiration:
-@
- LDMIA sp!, {r0, lr} @ Recover lr register (r0 is just there for
- @ the 8-byte stack alignment
-@
-@ }
-@
-__tx_timer_nothing_expired:
-@
-#ifdef __THUMB_INTERWORK
- BX lr @ Return to caller
-#else
- MOV pc, lr @ Return to caller
-#endif
-@
-@}
+ /* Upon entry to this routine, it is assumed that context save has already
+ been called, and therefore the compiler scratch registers are available
+ for use. */
+
+ /* Increment the system clock. */
+
+ LDR r1, =_tx_timer_system_clock // Pickup address of system clock
+ LDR r0, [r1] // Pickup system clock
+ ADD r0, r0, #1 // Increment system clock
+ STR r0, [r1] // Store new system clock
+
+ /* Test for time-slice expiration. */
+
+ LDR r3, =_tx_timer_time_slice // Pickup address of time-slice
+ LDR r2, [r3] // Pickup time-slice
+ CMP r2, #0 // Is it non-active?
+ BEQ __tx_timer_no_time_slice // Yes, skip time-slice processing
+
+ /* Decrement the time_slice. */
+
+ SUB r2, r2, #1 // Decrement the time-slice
+ STR r2, [r3] // Store new time-slice value
+
+ /* Check for expiration. */
+
+ CMP r2, #0 // Has it expired?
+ BNE __tx_timer_no_time_slice // No, skip expiration processing
+
+ /* Set the time-slice expired flag. */
+
+ LDR r3, =_tx_timer_expired_time_slice // Pickup address of expired flag
+ MOV r0, #1 // Build expired value
+ STR r0, [r3] // Set time-slice expiration flag
+
+__tx_timer_no_time_slice:
+
+ /* Test for timer expiration. */
+
+ LDR r1, =_tx_timer_current_ptr // Pickup current timer pointer address
+ LDR r0, [r1] // Pickup current timer
+ LDR r2, [r0] // Pickup timer list entry
+ CMP r2, #0 // Is there anything in the list?
+ BEQ __tx_timer_no_timer // No, just increment the timer
+
+ /* Set expiration flag. */
+
+ LDR r3, =_tx_timer_expired // Pickup expiration flag address
+ MOV r2, #1 // Build expired value
+ STR r2, [r3] // Set expired flag
+ B __tx_timer_done // Finished timer processing
+
+__tx_timer_no_timer:
+
+ /* No timer expired, increment the timer pointer. */
+ ADD r0, r0, #4 // Move to next timer
+
+ /* Check for wraparound. */
+
+ LDR r3, =_tx_timer_list_end // Pickup address of timer list end
+ LDR r2, [r3] // Pickup list end
+ CMP r0, r2 // Are we at list end?
+ BNE __tx_timer_skip_wrap // No, skip wraparound logic
+
+ /* Wrap to beginning of list. */
+
+ LDR r3, =_tx_timer_list_start // Pickup address of timer list start
+ LDR r0, [r3] // Set current pointer to list start
+
+__tx_timer_skip_wrap:
+
+ STR r0, [r1] // Store new current timer pointer
+
+__tx_timer_done:
+
+ /* See if anything has expired. */
+
+ LDR r3, =_tx_timer_expired_time_slice // Pickup address of expired flag
+ LDR r2, [r3] // Pickup time-slice expired flag
+ CMP r2, #0 // Did a time-slice expire?
+ BNE __tx_something_expired // If non-zero, time-slice expired
+ LDR r1, =_tx_timer_expired // Pickup address of other expired flag
+ LDR r0, [r1] // Pickup timer expired flag
+ CMP r0, #0 // Did a timer expire?
+ BEQ __tx_timer_nothing_expired // No, nothing expired
+
+__tx_something_expired:
+
+ STMDB sp!, {r0, lr} // Save the lr register on the stack
+ // and save r0 just to keep 8-byte alignment
+
+ /* Did a timer expire? */
+
+ LDR r1, =_tx_timer_expired // Pickup address of expired flag
+ LDR r0, [r1] // Pickup timer expired flag
+ CMP r0, #0 // Check for timer expiration
+ BEQ __tx_timer_dont_activate // If not set, skip timer activation
+
+ /* Process timer expiration. */
+ BL _tx_timer_expiration_process // Call the timer expiration handling routine
+
+__tx_timer_dont_activate:
+
+ /* Did time slice expire? */
+
+ LDR r3, =_tx_timer_expired_time_slice // Pickup address of time-slice expired
+ LDR r2, [r3] // Pickup the actual flag
+ CMP r2, #0 // See if the flag is set
+ BEQ __tx_timer_not_ts_expiration // No, skip time-slice processing
+
+ /* Time slice interrupted thread. */
+
+ BL _tx_thread_time_slice // Call time-slice processing
+
+__tx_timer_not_ts_expiration:
+
+ LDMIA sp!, {r0, lr} // Recover lr register (r0 is just there for
+ // the 8-byte stack alignment
+
+__tx_timer_nothing_expired:
+
+#ifdef __THUMB_INTERWORK
+ BX lr // Return to caller
+#else
+ MOV pc, lr // Return to caller
+#endif
diff --git a/ports/cortex_a7/ac6/example_build/sample_threadx.c b/ports/cortex_a7/ac6/example_build/sample_threadx.c
new file mode 100644
index 00000000..8c61de06
--- /dev/null
+++ b/ports/cortex_a7/ac6/example_build/sample_threadx.c
@@ -0,0 +1,369 @@
+/* This is a small demo of the high-performance ThreadX kernel. It includes examples of eight
+ threads of different priorities, using a message queue, semaphore, mutex, event flags group,
+ byte pool, and block pool. */
+
+#include "tx_api.h"
+
+#define DEMO_STACK_SIZE 1024
+#define DEMO_BYTE_POOL_SIZE 9120
+#define DEMO_BLOCK_POOL_SIZE 100
+#define DEMO_QUEUE_SIZE 100
+
+
+/* Define the ThreadX object control blocks... */
+
+TX_THREAD thread_0;
+TX_THREAD thread_1;
+TX_THREAD thread_2;
+TX_THREAD thread_3;
+TX_THREAD thread_4;
+TX_THREAD thread_5;
+TX_THREAD thread_6;
+TX_THREAD thread_7;
+TX_QUEUE queue_0;
+TX_SEMAPHORE semaphore_0;
+TX_MUTEX mutex_0;
+TX_EVENT_FLAGS_GROUP event_flags_0;
+TX_BYTE_POOL byte_pool_0;
+TX_BLOCK_POOL block_pool_0;
+
+
+/* Define the counters used in the demo application... */
+
+ULONG thread_0_counter;
+ULONG thread_1_counter;
+ULONG thread_1_messages_sent;
+ULONG thread_2_counter;
+ULONG thread_2_messages_received;
+ULONG thread_3_counter;
+ULONG thread_4_counter;
+ULONG thread_5_counter;
+ULONG thread_6_counter;
+ULONG thread_7_counter;
+
+
+/* Define thread prototypes. */
+
+void thread_0_entry(ULONG thread_input);
+void thread_1_entry(ULONG thread_input);
+void thread_2_entry(ULONG thread_input);
+void thread_3_and_4_entry(ULONG thread_input);
+void thread_5_entry(ULONG thread_input);
+void thread_6_and_7_entry(ULONG thread_input);
+
+
+/* Define main entry point. */
+
+int main()
+{
+
+ /* Enter the ThreadX kernel. */
+ tx_kernel_enter();
+}
+
+
+/* Define what the initial system looks like. */
+
+void tx_application_define(void *first_unused_memory)
+{
+
+CHAR *pointer = TX_NULL;
+
+
+ /* Create a byte memory pool from which to allocate the thread stacks. */
+ tx_byte_pool_create(&byte_pool_0, "byte pool 0", first_unused_memory, DEMO_BYTE_POOL_SIZE);
+
+ /* Put system definition stuff in here, e.g. thread creates and other assorted
+ create information. */
+
+ /* Allocate the stack for thread 0. */
+ tx_byte_allocate(&byte_pool_0, (VOID **) &pointer, DEMO_STACK_SIZE, TX_NO_WAIT);
+
+ /* Create the main thread. */
+ tx_thread_create(&thread_0, "thread 0", thread_0_entry, 0,
+ pointer, DEMO_STACK_SIZE,
+ 1, 1, TX_NO_TIME_SLICE, TX_AUTO_START);
+
+
+ /* Allocate the stack for thread 1. */
+ tx_byte_allocate(&byte_pool_0, (VOID **) &pointer, DEMO_STACK_SIZE, TX_NO_WAIT);
+
+ /* Create threads 1 and 2. These threads pass information through a ThreadX
+ message queue. It is also interesting to note that these threads have a time
+ slice. */
+ tx_thread_create(&thread_1, "thread 1", thread_1_entry, 1,
+ pointer, DEMO_STACK_SIZE,
+ 16, 16, 4, TX_AUTO_START);
+
+ /* Allocate the stack for thread 2. */
+ tx_byte_allocate(&byte_pool_0, (VOID **) &pointer, DEMO_STACK_SIZE, TX_NO_WAIT);
+
+ tx_thread_create(&thread_2, "thread 2", thread_2_entry, 2,
+ pointer, DEMO_STACK_SIZE,
+ 16, 16, 4, TX_AUTO_START);
+
+ /* Allocate the stack for thread 3. */
+ tx_byte_allocate(&byte_pool_0, (VOID **) &pointer, DEMO_STACK_SIZE, TX_NO_WAIT);
+
+ /* Create threads 3 and 4. These threads compete for a ThreadX counting semaphore.
+ An interesting thing here is that both threads share the same instruction area. */
+ tx_thread_create(&thread_3, "thread 3", thread_3_and_4_entry, 3,
+ pointer, DEMO_STACK_SIZE,
+ 8, 8, TX_NO_TIME_SLICE, TX_AUTO_START);
+
+ /* Allocate the stack for thread 4. */
+ tx_byte_allocate(&byte_pool_0, (VOID **) &pointer, DEMO_STACK_SIZE, TX_NO_WAIT);
+
+ tx_thread_create(&thread_4, "thread 4", thread_3_and_4_entry, 4,
+ pointer, DEMO_STACK_SIZE,
+ 8, 8, TX_NO_TIME_SLICE, TX_AUTO_START);
+
+ /* Allocate the stack for thread 5. */
+ tx_byte_allocate(&byte_pool_0, (VOID **) &pointer, DEMO_STACK_SIZE, TX_NO_WAIT);
+
+ /* Create thread 5. This thread simply pends on an event flag which will be set
+ by thread_0. */
+ tx_thread_create(&thread_5, "thread 5", thread_5_entry, 5,
+ pointer, DEMO_STACK_SIZE,
+ 4, 4, TX_NO_TIME_SLICE, TX_AUTO_START);
+
+ /* Allocate the stack for thread 6. */
+ tx_byte_allocate(&byte_pool_0, (VOID **) &pointer, DEMO_STACK_SIZE, TX_NO_WAIT);
+
+ /* Create threads 6 and 7. These threads compete for a ThreadX mutex. */
+ tx_thread_create(&thread_6, "thread 6", thread_6_and_7_entry, 6,
+ pointer, DEMO_STACK_SIZE,
+ 8, 8, TX_NO_TIME_SLICE, TX_AUTO_START);
+
+ /* Allocate the stack for thread 7. */
+ tx_byte_allocate(&byte_pool_0, (VOID **) &pointer, DEMO_STACK_SIZE, TX_NO_WAIT);
+
+ tx_thread_create(&thread_7, "thread 7", thread_6_and_7_entry, 7,
+ pointer, DEMO_STACK_SIZE,
+ 8, 8, TX_NO_TIME_SLICE, TX_AUTO_START);
+
+ /* Allocate the message queue. */
+ tx_byte_allocate(&byte_pool_0, (VOID **) &pointer, DEMO_QUEUE_SIZE*sizeof(ULONG), TX_NO_WAIT);
+
+ /* Create the message queue shared by threads 1 and 2. */
+ tx_queue_create(&queue_0, "queue 0", TX_1_ULONG, pointer, DEMO_QUEUE_SIZE*sizeof(ULONG));
+
+ /* Create the semaphore used by threads 3 and 4. */
+ tx_semaphore_create(&semaphore_0, "semaphore 0", 1);
+
+ /* Create the event flags group used by threads 1 and 5. */
+ tx_event_flags_create(&event_flags_0, "event flags 0");
+
+ /* Create the mutex used by thread 6 and 7 without priority inheritance. */
+ tx_mutex_create(&mutex_0, "mutex 0", TX_NO_INHERIT);
+
+ /* Allocate the memory for a small block pool. */
+ tx_byte_allocate(&byte_pool_0, (VOID **) &pointer, DEMO_BLOCK_POOL_SIZE, TX_NO_WAIT);
+
+ /* Create a block memory pool to allocate a message buffer from. */
+ tx_block_pool_create(&block_pool_0, "block pool 0", sizeof(ULONG), pointer, DEMO_BLOCK_POOL_SIZE);
+
+ /* Allocate a block and release the block memory. */
+ tx_block_allocate(&block_pool_0, (VOID **) &pointer, TX_NO_WAIT);
+
+ /* Release the block back to the pool. */
+ tx_block_release(pointer);
+}
+
+
+
+/* Define the test threads. */
+
+void thread_0_entry(ULONG thread_input)
+{
+
+UINT status;
+
+
+ /* This thread simply sits in while-forever-sleep loop. */
+ while(1)
+ {
+
+ /* Increment the thread counter. */
+ thread_0_counter++;
+
+ /* Sleep for 10 ticks. */
+ tx_thread_sleep(10);
+
+ /* Set event flag 0 to wakeup thread 5. */
+ status = tx_event_flags_set(&event_flags_0, 0x1, TX_OR);
+
+ /* Check status. */
+ if (status != TX_SUCCESS)
+ break;
+ }
+}
+
+
+void thread_1_entry(ULONG thread_input)
+{
+
+UINT status;
+
+
+ /* This thread simply sends messages to a queue shared by thread 2. */
+ while(1)
+ {
+
+ /* Increment the thread counter. */
+ thread_1_counter++;
+
+ /* Send message to queue 0. */
+ status = tx_queue_send(&queue_0, &thread_1_messages_sent, TX_WAIT_FOREVER);
+
+ /* Check completion status. */
+ if (status != TX_SUCCESS)
+ break;
+
+ /* Increment the message sent. */
+ thread_1_messages_sent++;
+ }
+}
+
+
+void thread_2_entry(ULONG thread_input)
+{
+
+ULONG received_message;
+UINT status;
+
+ /* This thread retrieves messages placed on the queue by thread 1. */
+ while(1)
+ {
+
+ /* Increment the thread counter. */
+ thread_2_counter++;
+
+ /* Retrieve a message from the queue. */
+ status = tx_queue_receive(&queue_0, &received_message, TX_WAIT_FOREVER);
+
+ /* Check completion status and make sure the message is what we
+ expected. */
+ if ((status != TX_SUCCESS) || (received_message != thread_2_messages_received))
+ break;
+
+ /* Otherwise, all is okay. Increment the received message count. */
+ thread_2_messages_received++;
+ }
+}
+
+
+void thread_3_and_4_entry(ULONG thread_input)
+{
+
+UINT status;
+
+
+ /* This function is executed from thread 3 and thread 4. As the loop
+ below shows, these function compete for ownership of semaphore_0. */
+ while(1)
+ {
+
+ /* Increment the thread counter. */
+ if (thread_input == 3)
+ thread_3_counter++;
+ else
+ thread_4_counter++;
+
+ /* Get the semaphore with suspension. */
+ status = tx_semaphore_get(&semaphore_0, TX_WAIT_FOREVER);
+
+ /* Check status. */
+ if (status != TX_SUCCESS)
+ break;
+
+ /* Sleep for 2 ticks to hold the semaphore. */
+ tx_thread_sleep(2);
+
+ /* Release the semaphore. */
+ status = tx_semaphore_put(&semaphore_0);
+
+ /* Check status. */
+ if (status != TX_SUCCESS)
+ break;
+ }
+}
+
+
+void thread_5_entry(ULONG thread_input)
+{
+
+UINT status;
+ULONG actual_flags;
+
+
+ /* This thread simply waits for an event in a forever loop. */
+ while(1)
+ {
+
+ /* Increment the thread counter. */
+ thread_5_counter++;
+
+ /* Wait for event flag 0. */
+ status = tx_event_flags_get(&event_flags_0, 0x1, TX_OR_CLEAR,
+ &actual_flags, TX_WAIT_FOREVER);
+
+ /* Check status. */
+ if ((status != TX_SUCCESS) || (actual_flags != 0x1))
+ break;
+ }
+}
+
+
+void thread_6_and_7_entry(ULONG thread_input)
+{
+
+UINT status;
+
+
+ /* This function is executed from thread 6 and thread 7. As the loop
+ below shows, these function compete for ownership of mutex_0. */
+ while(1)
+ {
+
+ /* Increment the thread counter. */
+ if (thread_input == 6)
+ thread_6_counter++;
+ else
+ thread_7_counter++;
+
+ /* Get the mutex with suspension. */
+ status = tx_mutex_get(&mutex_0, TX_WAIT_FOREVER);
+
+ /* Check status. */
+ if (status != TX_SUCCESS)
+ break;
+
+ /* Get the mutex again with suspension. This shows
+ that an owning thread may retrieve the mutex it
+ owns multiple times. */
+ status = tx_mutex_get(&mutex_0, TX_WAIT_FOREVER);
+
+ /* Check status. */
+ if (status != TX_SUCCESS)
+ break;
+
+ /* Sleep for 2 ticks to hold the mutex. */
+ tx_thread_sleep(2);
+
+ /* Release the mutex. */
+ status = tx_mutex_put(&mutex_0);
+
+ /* Check status. */
+ if (status != TX_SUCCESS)
+ break;
+
+ /* Release the mutex again. This will actually
+ release ownership since it was obtained twice. */
+ status = tx_mutex_put(&mutex_0);
+
+ /* Check status. */
+ if (status != TX_SUCCESS)
+ break;
+ }
+}
diff --git a/ports/cortex_a7/ac6/example_build/sample_threadx/sample_threadx.c b/ports/cortex_a7/ac6/example_build/sample_threadx/sample_threadx.c
index 418ec634..8c61de06 100644
--- a/ports/cortex_a7/ac6/example_build/sample_threadx/sample_threadx.c
+++ b/ports/cortex_a7/ac6/example_build/sample_threadx/sample_threadx.c
@@ -1,5 +1,5 @@
/* This is a small demo of the high-performance ThreadX kernel. It includes examples of eight
- threads of different priorities, using a message queue, semaphore, mutex, event flags group,
+ threads of different priorities, using a message queue, semaphore, mutex, event flags group,
byte pool, and block pool. */
#include "tx_api.h"
@@ -80,42 +80,42 @@ CHAR *pointer = TX_NULL;
tx_byte_allocate(&byte_pool_0, (VOID **) &pointer, DEMO_STACK_SIZE, TX_NO_WAIT);
/* Create the main thread. */
- tx_thread_create(&thread_0, "thread 0", thread_0_entry, 0,
- pointer, DEMO_STACK_SIZE,
+ tx_thread_create(&thread_0, "thread 0", thread_0_entry, 0,
+ pointer, DEMO_STACK_SIZE,
1, 1, TX_NO_TIME_SLICE, TX_AUTO_START);
/* Allocate the stack for thread 1. */
tx_byte_allocate(&byte_pool_0, (VOID **) &pointer, DEMO_STACK_SIZE, TX_NO_WAIT);
- /* Create threads 1 and 2. These threads pass information through a ThreadX
+ /* Create threads 1 and 2. These threads pass information through a ThreadX
message queue. It is also interesting to note that these threads have a time
slice. */
- tx_thread_create(&thread_1, "thread 1", thread_1_entry, 1,
- pointer, DEMO_STACK_SIZE,
+ tx_thread_create(&thread_1, "thread 1", thread_1_entry, 1,
+ pointer, DEMO_STACK_SIZE,
16, 16, 4, TX_AUTO_START);
/* Allocate the stack for thread 2. */
tx_byte_allocate(&byte_pool_0, (VOID **) &pointer, DEMO_STACK_SIZE, TX_NO_WAIT);
- tx_thread_create(&thread_2, "thread 2", thread_2_entry, 2,
- pointer, DEMO_STACK_SIZE,
+ tx_thread_create(&thread_2, "thread 2", thread_2_entry, 2,
+ pointer, DEMO_STACK_SIZE,
16, 16, 4, TX_AUTO_START);
/* Allocate the stack for thread 3. */
tx_byte_allocate(&byte_pool_0, (VOID **) &pointer, DEMO_STACK_SIZE, TX_NO_WAIT);
- /* Create threads 3 and 4. These threads compete for a ThreadX counting semaphore.
+ /* Create threads 3 and 4. These threads compete for a ThreadX counting semaphore.
An interesting thing here is that both threads share the same instruction area. */
- tx_thread_create(&thread_3, "thread 3", thread_3_and_4_entry, 3,
- pointer, DEMO_STACK_SIZE,
+ tx_thread_create(&thread_3, "thread 3", thread_3_and_4_entry, 3,
+ pointer, DEMO_STACK_SIZE,
8, 8, TX_NO_TIME_SLICE, TX_AUTO_START);
/* Allocate the stack for thread 4. */
tx_byte_allocate(&byte_pool_0, (VOID **) &pointer, DEMO_STACK_SIZE, TX_NO_WAIT);
- tx_thread_create(&thread_4, "thread 4", thread_3_and_4_entry, 4,
- pointer, DEMO_STACK_SIZE,
+ tx_thread_create(&thread_4, "thread 4", thread_3_and_4_entry, 4,
+ pointer, DEMO_STACK_SIZE,
8, 8, TX_NO_TIME_SLICE, TX_AUTO_START);
/* Allocate the stack for thread 5. */
@@ -123,23 +123,23 @@ CHAR *pointer = TX_NULL;
/* Create thread 5. This thread simply pends on an event flag which will be set
by thread_0. */
- tx_thread_create(&thread_5, "thread 5", thread_5_entry, 5,
- pointer, DEMO_STACK_SIZE,
+ tx_thread_create(&thread_5, "thread 5", thread_5_entry, 5,
+ pointer, DEMO_STACK_SIZE,
4, 4, TX_NO_TIME_SLICE, TX_AUTO_START);
/* Allocate the stack for thread 6. */
tx_byte_allocate(&byte_pool_0, (VOID **) &pointer, DEMO_STACK_SIZE, TX_NO_WAIT);
/* Create threads 6 and 7. These threads compete for a ThreadX mutex. */
- tx_thread_create(&thread_6, "thread 6", thread_6_and_7_entry, 6,
- pointer, DEMO_STACK_SIZE,
+ tx_thread_create(&thread_6, "thread 6", thread_6_and_7_entry, 6,
+ pointer, DEMO_STACK_SIZE,
8, 8, TX_NO_TIME_SLICE, TX_AUTO_START);
/* Allocate the stack for thread 7. */
tx_byte_allocate(&byte_pool_0, (VOID **) &pointer, DEMO_STACK_SIZE, TX_NO_WAIT);
- tx_thread_create(&thread_7, "thread 7", thread_6_and_7_entry, 7,
- pointer, DEMO_STACK_SIZE,
+ tx_thread_create(&thread_7, "thread 7", thread_6_and_7_entry, 7,
+ pointer, DEMO_STACK_SIZE,
8, 8, TX_NO_TIME_SLICE, TX_AUTO_START);
/* Allocate the message queue. */
@@ -242,11 +242,11 @@ UINT status;
/* Retrieve a message from the queue. */
status = tx_queue_receive(&queue_0, &received_message, TX_WAIT_FOREVER);
- /* Check completion status and make sure the message is what we
+ /* Check completion status and make sure the message is what we
expected. */
if ((status != TX_SUCCESS) || (received_message != thread_2_messages_received))
break;
-
+
/* Otherwise, all is okay. Increment the received message count. */
thread_2_messages_received++;
}
@@ -305,7 +305,7 @@ ULONG actual_flags;
thread_5_counter++;
/* Wait for event flag 0. */
- status = tx_event_flags_get(&event_flags_0, 0x1, TX_OR_CLEAR,
+ status = tx_event_flags_get(&event_flags_0, 0x1, TX_OR_CLEAR,
&actual_flags, TX_WAIT_FOREVER);
/* Check status. */
@@ -358,7 +358,7 @@ UINT status;
if (status != TX_SUCCESS)
break;
- /* Release the mutex again. This will actually
+ /* Release the mutex again. This will actually
release ownership since it was obtained twice. */
status = tx_mutex_put(&mutex_0);
diff --git a/ports/cortex_a7/ac6/example_build/sample_threadx/sample_threadx.launch b/ports/cortex_a7/ac6/example_build/sample_threadx/sample_threadx.launch
new file mode 100644
index 00000000..0bc5b11b
--- /dev/null
+++ b/ports/cortex_a7/ac6/example_build/sample_threadx/sample_threadx.launch
@@ -0,0 +1,188 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/ports/cortex_a7/ac6/example_build/sample_threadx/sample_threadx.scat b/ports/cortex_a7/ac6/example_build/sample_threadx/sample_threadx.scat
index 82477876..d23881cd 100644
--- a/ports/cortex_a7/ac6/example_build/sample_threadx/sample_threadx.scat
+++ b/ports/cortex_a7/ac6/example_build/sample_threadx/sample_threadx.scat
@@ -5,12 +5,10 @@
; and your compliance with all applicable terms and conditions of such licence agreement.
;*******************************************************
-; Scatter-file for Cortex-A7 bare-metal example on Versatile Express
+; Scatter-file for ARMv7-A bare-metal example on Versatile Express
; This scatter-file places application code, data, stack and heap at suitable addresses in the memory map.
-; Versatile Express with Cortex-A7 has 1GB SDRAM at 0x60000000 to 0x9FFFFFFF, which this scatter-file uses.
-
SDRAM 0x80000000 0x20000000
{
diff --git a/ports/cortex_a7/ac6/example_build/sample_threadx/startup.S b/ports/cortex_a7/ac6/example_build/sample_threadx/startup.S
index 35be43cf..670fadb9 100644
--- a/ports/cortex_a7/ac6/example_build/sample_threadx/startup.S
+++ b/ports/cortex_a7/ac6/example_build/sample_threadx/startup.S
@@ -1,13 +1,12 @@
//----------------------------------------------------------------
-// Cortex-A7 Embedded example - Startup Code
+// ARMv7-A Embedded example - Startup Code
//
// Copyright (c) 2005-2018 Arm Limited (or its affiliates). All rights reserved.
// Use, modification and redistribution of this file is subject to your possession of a
-// valid End User License Agreement for the Arm Product of which these examples are part of
+// valid End User License Agreement for the Arm Product of which these examples are part of
// and your compliance with all applicable terms and conditions of such licence agreement.
//----------------------------------------------------------------
-
// Standard definitions of mode bits and interrupt (I & F) flags in PSRs
#define Mode_USR 0x10
@@ -26,7 +25,7 @@
.align 3
.cfi_sections .debug_frame // put stack frame info into .debug_frame instead of .eh_frame
-
+
//----------------------------------------------------------------
// Entry point for the Reset handler
//----------------------------------------------------------------
@@ -103,6 +102,7 @@ Reset_Handler:
BIC r0, r0, #(0x1 << 12) // Clear I bit 12 to disable I Cache
BIC r0, r0, #(0x1 << 2) // Clear C bit 2 to disable D Cache
BIC r0, r0, #0x1 // Clear M bit 0 to disable MMU
+ BIC r0, r0, #(0x1 << 11) // Clear Z bit 11 to disable branch prediction
MCR p15, 0, r0, c1, c0, 0 // Write System Control Register
ISB
@@ -134,6 +134,26 @@ Reset_Handler:
LDR SP, =Image$$ARM_LIB_STACK$$ZI$$Limit
+//----------------------------------------------------------------
+// Disable loop-buffer to fix errata on A15 r0p0
+//----------------------------------------------------------------
+ MRC p15, 0, r0, c0, c0, 0 // Read main ID register MIDR
+ MOV r1, r0, lsr #4 // Extract Primary Part Number
+ LDR r2, =0xFFF
+ AND r1, r1, r2
+ LDR r2, =0xC0F
+ CMP r1, r2 // Is this an A15?
+ BNE notA15r0p0 // Jump if not A15
+ AND r5, r0, #0x00f00000 // Variant
+ AND r6, r0, #0x0000000f // Revision
+ ORRS r6, r6, r5 // Combine variant and revision
+ BNE notA15r0p0 // Jump if not r0p0
+ MRC p15, 0, r0, c1, c0, 1 // Read Aux Ctrl Reg
+ ORR r0, r0, #(1 << 1) // Set bit 1 to Disable Loop Buffer
+ MCR p15, 0, r0, c1, c0, 1 // Write Aux Ctrl Reg
+ ISB
+notA15r0p0:
+
//----------------------------------------------------------------
// Set Vector Base Address Register (VBAR) to point to this application's vector table
//----------------------------------------------------------------
@@ -142,7 +162,7 @@ Reset_Handler:
MCR p15, 0, r0, c12, c0, 0
//----------------------------------------------------------------
-// Cache Invalidation code for Cortex-A7
+// Cache Invalidation code for ARMv7-A
// The caches, MMU and BTB do not need post-reset invalidation on Cortex-A7,
// but forcing a cache invalidation makes the code more portable to other CPUs (e.g. Cortex-A9)
//----------------------------------------------------------------
@@ -213,16 +233,18 @@ Finished:
// write the address of our page table base to TTB register 0
LDR r0,=Image$$TTB$$ZI$$Base
+
MOV r1, #0x08 // RGN=b01 (outer cacheable write-back cached, write allocate)
// S=0 (translation table walk to non-shared memory)
ORR r1,r1,#0x40 // IRGN=b01 (inner cacheability for the translation table walk is Write-back Write-allocate)
ORR r0,r0,r1
+
MCR p15, 0, r0, c2, c0, 0
//----------------------------------------------------------------
-// PAGE TABLE generation
+// PAGE TABLE generation
// Generate the page tables
// Build a flat translation table for the whole address space.
@@ -235,7 +257,7 @@ Finished:
// Bits[31:20] - Top 12 bits of VA is pointer into table
// nG[17]=0 - Non global, enables matching against ASID in the TLB when set.
// S[16]=0 - Indicates normal memory is shared when set.
-// AP2[15]=0
+// AP2[15]=0
// AP[11:10]=11 - Configure for full read/write access in all modes
// TEX[14:12]=000
// CB[3:2]= 00 - Set attributes to Strongly-ordered memory.
@@ -254,7 +276,7 @@ Finished:
// r2 is level1 descriptor (bits 19:0)
// use loop counter to create 4096 individual table entries.
- // this writes from address 'Image$$TTB$$ZI$$Base' +
+ // this writes from address 'Image$$TTB$$ZI$$Base' +
// offset 0x3FFC down to offset 0x0 in word steps (4 bytes)
init_ttb_1:
@@ -328,7 +350,7 @@ init_ttb_1:
//----------------------------------------------------------------
-// Enable caches
+// Enable caches and branch prediction
// This code must be run from a privileged mode
//----------------------------------------------------------------
@@ -341,15 +363,35 @@ init_ttb_1:
enable_caches:
//----------------------------------------------------------------
-// Enable caches
+// Enable caches and branch prediction
//----------------------------------------------------------------
MRC p15, 0, r0, c1, c0, 0 // Read System Control Register
ORR r0, r0, #(0x1 << 12) // Set I bit 12 to enable I Cache
ORR r0, r0, #(0x1 << 2) // Set C bit 2 to enable D Cache
+ ORR r0, r0, #(0x1 << 11) // Set Z bit 11 to enable branch prediction
MCR p15, 0, r0, c1, c0, 0 // Write System Control Register
ISB
+ MRC p15, 0, r0, c1, c0, 1 // Read Auxiliary Control Register
+ ORR r0, #2 // L2EN bit, enable L2 cache
+ ORR r0, r0, #(0x1 << 2) // Set DP bit 2 to enable L1 Dside prefetch
+ MCR p15, 0, r0, c1, c0, 1 // Write Auxiliary Control Register
+ ISB
+
BX lr
.cfi_endproc
+ .global disable_caches
+ .type disable_caches, "function"
+disable_caches:
+
+ MRC p15, 0, r0, c1, c0, 0 // Read System Control Register
+ BIC r0, r0, #(0x1 << 12) // Clear I bit 12 to disable I Cache
+ BIC r0, r0, #(0x1 << 2) // Clear C bit 2 to disable D Cache
+ MCR p15, 0, r0, c1, c0, 0 // Write System Control Register
+ ISB
+
+ BX lr
+
+
diff --git a/ports/cortex_a7/ac6/example_build/sample_threadx/tx_initialize_low_level.S b/ports/cortex_a7/ac6/example_build/sample_threadx/tx_initialize_low_level.S
index 304e84f1..715958f0 100644
--- a/ports/cortex_a7/ac6/example_build/sample_threadx/tx_initialize_low_level.S
+++ b/ports/cortex_a7/ac6/example_build/sample_threadx/tx_initialize_low_level.S
@@ -1,344 +1,299 @@
-@/**************************************************************************/
-@/* */
-@/* Copyright (c) Microsoft Corporation. All rights reserved. */
-@/* */
-@/* This software is licensed under the Microsoft Software License */
-@/* Terms for Microsoft Azure RTOS. Full text of the license can be */
-@/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
-@/* and in the root directory of this software. */
-@/* */
-@/**************************************************************************/
-@
-@
-@/**************************************************************************/
-@/**************************************************************************/
-@/** */
-@/** ThreadX Component */
-@/** */
-@/** Initialize */
-@/** */
-@/**************************************************************************/
-@/**************************************************************************/
-@
-@
-@#define TX_SOURCE_CODE
-@
-@
-@/* Include necessary system files. */
-@
-@#include "tx_api.h"
-@#include "tx_initialize.h"
-@#include "tx_thread.h"
-@#include "tx_timer.h"
+/**************************************************************************/
+/* */
+/* Copyright (c) Microsoft Corporation. All rights reserved. */
+/* */
+/* This software is licensed under the Microsoft Software License */
+/* Terms for Microsoft Azure RTOS. Full text of the license can be */
+/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
+/* and in the root directory of this software. */
+/* */
+/**************************************************************************/
+
+
+/**************************************************************************/
+/**************************************************************************/
+/** */
+/** ThreadX Component */
+/** */
+/** Initialize */
+/** */
+/**************************************************************************/
+/**************************************************************************/
.arm
-SVC_MODE = 0xD3 @ Disable IRQ/FIQ SVC mode
-IRQ_MODE = 0xD2 @ Disable IRQ/FIQ IRQ mode
-FIQ_MODE = 0xD1 @ Disable IRQ/FIQ FIQ mode
-SYS_MODE = 0xDF @ Disable IRQ/FIQ SYS mode
-FIQ_STACK_SIZE = 512 @ FIQ stack size
-IRQ_STACK_SIZE = 1024 @ IRQ stack size
-SYS_STACK_SIZE = 1024 @ System stack size
-@
-@
+SVC_MODE = 0xD3 // Disable IRQ/FIQ SVC mode
+IRQ_MODE = 0xD2 // Disable IRQ/FIQ IRQ mode
+FIQ_MODE = 0xD1 // Disable IRQ/FIQ FIQ mode
+SYS_MODE = 0xDF // Disable IRQ/FIQ SYS mode
+FIQ_STACK_SIZE = 512 // FIQ stack size
+IRQ_STACK_SIZE = 1024 // IRQ stack size
+SYS_STACK_SIZE = 1024 // System stack size
+
.global _tx_thread_system_stack_ptr
.global _tx_initialize_unused_memory
.global _tx_thread_context_save
.global _tx_thread_context_restore
.global _tx_timer_interrupt
-@
-@
-@/* Define the 16-bit Thumb mode veneer for _tx_initialize_low_level for
-@ applications calling this function from to 16-bit Thumb mode. */
-@
+
+
+/* Define the 16-bit Thumb mode veneer for _tx_initialize_low_level for
+ applications calling this function from to 16-bit Thumb mode. */
+
.text
.align 2
.thumb
.global $_tx_initialize_low_level
.type $_tx_initialize_low_level,function
$_tx_initialize_low_level:
- BX pc @ Switch to 32-bit mode
- NOP @
+ BX pc // Switch to 32-bit mode
+ NOP //
.arm
- STMFD sp!, {lr} @ Save return address
- BL _tx_initialize_low_level @ Call _tx_initialize_low_level function
- LDMFD sp!, {lr} @ Recover saved return address
- BX lr @ Return to 16-bit caller
-@
-@
+ STMFD sp!, {lr} // Save return address
+ BL _tx_initialize_low_level // Call _tx_initialize_low_level function
+ LDMFD sp!, {lr} // Recover saved return address
+ BX lr // Return to 16-bit caller
+
+
.text
.align 2
-@/**************************************************************************/
-@/* */
-@/* FUNCTION RELEASE */
-@/* */
-@/* _tx_initialize_low_level Cortex-A7/AC6 */
-@/* 6.1 */
-@/* AUTHOR */
-@/* */
-@/* William E. Lamie, Microsoft Corporation */
-@/* */
-@/* DESCRIPTION */
-@/* */
-@/* This function is responsible for any low-level processor */
-@/* initialization, including setting up interrupt vectors, setting */
-@/* up a periodic timer interrupt source, saving the system stack */
-@/* pointer for use in ISR processing later, and finding the first */
-@/* available RAM memory address for tx_application_define. */
-@/* */
-@/* INPUT */
-@/* */
-@/* None */
-@/* */
-@/* OUTPUT */
-@/* */
-@/* None */
-@/* */
-@/* CALLS */
-@/* */
-@/* None */
-@/* */
-@/* CALLED BY */
-@/* */
-@/* _tx_initialize_kernel_enter ThreadX entry function */
-@/* */
-@/* RELEASE HISTORY */
-@/* */
-@/* DATE NAME DESCRIPTION */
-@/* */
-@/* 09-30-2020 William E. Lamie Initial Version 6.1 */
-@/* */
-@/**************************************************************************/
-@VOID _tx_initialize_low_level(VOID)
-@{
+/**************************************************************************/
+/* */
+/* FUNCTION RELEASE */
+/* */
+/* _tx_initialize_low_level ARMV7-A */
+/* 6.1.11 */
+/* AUTHOR */
+/* */
+/* William E. Lamie, Microsoft Corporation */
+/* */
+/* DESCRIPTION */
+/* */
+/* This function is responsible for any low-level processor */
+/* initialization, including setting up interrupt vectors, setting */
+/* up a periodic timer interrupt source, saving the system stack */
+/* pointer for use in ISR processing later, and finding the first */
+/* available RAM memory address for tx_application_define. */
+/* */
+/* INPUT */
+/* */
+/* None */
+/* */
+/* OUTPUT */
+/* */
+/* None */
+/* */
+/* CALLS */
+/* */
+/* None */
+/* */
+/* CALLED BY */
+/* */
+/* _tx_initialize_kernel_enter ThreadX entry function */
+/* */
+/* RELEASE HISTORY */
+/* */
+/* DATE NAME DESCRIPTION */
+/* */
+/* 09-30-2020 William E. Lamie Initial Version 6.1 */
+/* 04-25-2022 Zhen Kong Updated comments, */
+/* resulting in version 6.1.11 */
+/* */
+/**************************************************************************/
.global _tx_initialize_low_level
.type _tx_initialize_low_level,function
_tx_initialize_low_level:
-@
-@ /* We must be in SVC mode at this point! */
-@
-@ /* Setup various stack pointers. */
-@
- LDR r1, =Image$$ARM_LIB_STACK$$ZI$$Limit @ Get pointer to stack area
-#ifdef TX_ENABLE_IRQ_NESTING
-@
-@ /* Setup the system mode stack for nested interrupt support */
-@
- LDR r2, =SYS_STACK_SIZE @ Pickup stack size
- MOV r3, #SYS_MODE @ Build SYS mode CPSR
- MSR CPSR_c, r3 @ Enter SYS mode
- SUB r1, r1, #1 @ Backup 1 byte
- BIC r1, r1, #7 @ Ensure 8-byte alignment
- MOV sp, r1 @ Setup SYS stack pointer
- SUB r1, r1, r2 @ Calculate start of next stack
+ /* We must be in SVC mode at this point! */
+
+ /* Setup various stack pointers. */
+
+ LDR r1, =Image$$ARM_LIB_STACK$$ZI$$Limit // Get pointer to stack area
+
+#ifdef TX_ENABLE_IRQ_NESTING
+
+ /* Setup the system mode stack for nested interrupt support */
+
+ LDR r2, =SYS_STACK_SIZE // Pickup stack size
+ MOV r3, #SYS_MODE // Build SYS mode CPSR
+ MSR CPSR_c, r3 // Enter SYS mode
+ SUB r1, r1, #1 // Backup 1 byte
+ BIC r1, r1, #7 // Ensure 8-byte alignment
+ MOV sp, r1 // Setup SYS stack pointer
+ SUB r1, r1, r2 // Calculate start of next stack
#endif
- LDR r2, =FIQ_STACK_SIZE @ Pickup stack size
- MOV r0, #FIQ_MODE @ Build FIQ mode CPSR
- MSR CPSR, r0 @ Enter FIQ mode
- SUB r1, r1, #1 @ Backup 1 byte
- BIC r1, r1, #7 @ Ensure 8-byte alignment
- MOV sp, r1 @ Setup FIQ stack pointer
- SUB r1, r1, r2 @ Calculate start of next stack
- LDR r2, =IRQ_STACK_SIZE @ Pickup IRQ stack size
- MOV r0, #IRQ_MODE @ Build IRQ mode CPSR
- MSR CPSR, r0 @ Enter IRQ mode
- SUB r1, r1, #1 @ Backup 1 byte
- BIC r1, r1, #7 @ Ensure 8-byte alignment
- MOV sp, r1 @ Setup IRQ stack pointer
- SUB r3, r1, r2 @ Calculate end of IRQ stack
- MOV r0, #SVC_MODE @ Build SVC mode CPSR
- MSR CPSR, r0 @ Enter SVC mode
- LDR r2, =Image$$ARM_LIB_STACK$$Base @ Pickup stack bottom
- CMP r3, r2 @ Compare the current stack end with the bottom
-_stack_error_loop:
- BLT _stack_error_loop @ If the IRQ stack exceeds the stack bottom, just sit here!
-@
-@ /* Save the system stack pointer. */
-@ _tx_thread_system_stack_ptr = (VOID_PTR) (sp);
-@
- LDR r2, =_tx_thread_system_stack_ptr @ Pickup stack pointer
- STR r1, [r2] @ Save the system stack
-@
-@ /* Save the first available memory address. */
-@ _tx_initialize_unused_memory = (VOID_PTR) _end;
-@
- LDR r1, =Image$$ZI_DATA$$ZI$$Limit @ Get end of non-initialized RAM area
- LDR r2, =_tx_initialize_unused_memory @ Pickup unused memory ptr address
- ADD r1, r1, #8 @ Increment to next free word
- STR r1, [r2] @ Save first free memory address
-@
-@ /* Setup Timer for periodic interrupts. */
-@
-@ /* Done, return to caller. */
-@
+ LDR r2, =FIQ_STACK_SIZE // Pickup stack size
+ MOV r0, #FIQ_MODE // Build FIQ mode CPSR
+ MSR CPSR, r0 // Enter FIQ mode
+ SUB r1, r1, #1 // Backup 1 byte
+ BIC r1, r1, #7 // Ensure 8-byte alignment
+ MOV sp, r1 // Setup FIQ stack pointer
+ SUB r1, r1, r2 // Calculate start of next stack
+ LDR r2, =IRQ_STACK_SIZE // Pickup IRQ stack size
+ MOV r0, #IRQ_MODE // Build IRQ mode CPSR
+ MSR CPSR, r0 // Enter IRQ mode
+ SUB r1, r1, #1 // Backup 1 byte
+ BIC r1, r1, #7 // Ensure 8-byte alignment
+ MOV sp, r1 // Setup IRQ stack pointer
+ SUB r3, r1, r2 // Calculate end of IRQ stack
+ MOV r0, #SVC_MODE // Build SVC mode CPSR
+ MSR CPSR, r0 // Enter SVC mode
+ LDR r2, =Image$$ARM_LIB_STACK$$Base // Pickup stack bottom
+ CMP r3, r2 // Compare the current stack end with the bottom
+_stack_error_loop:
+ BLT _stack_error_loop // If the IRQ stack exceeds the stack bottom, just sit here!
+
+ LDR r2, =_tx_thread_system_stack_ptr // Pickup stack pointer
+ STR r1, [r2] // Save the system stack
+
+ LDR r1, =Image$$ZI_DATA$$ZI$$Limit // Get end of non-initialized RAM area
+ LDR r2, =_tx_initialize_unused_memory // Pickup unused memory ptr address
+ ADD r1, r1, #8 // Increment to next free word
+ STR r1, [r2] // Save first free memory address
+
#ifdef __THUMB_INTERWORK
- BX lr @ Return to caller
+ BX lr // Return to caller
#else
- MOV pc, lr @ Return to caller
+ MOV pc, lr // Return to caller
#endif
-@}
-@
-@
-@/* Define shells for each of the interrupt vectors. */
-@
+
+/* Define shells for each of the interrupt vectors. */
+
.global __tx_undefined
__tx_undefined:
- B __tx_undefined @ Undefined handler
-@
+ B __tx_undefined // Undefined handler
+
.global __tx_swi_interrupt
__tx_swi_interrupt:
- B __tx_swi_interrupt @ Software interrupt handler
-@
+ B __tx_swi_interrupt // Software interrupt handler
+
.global __tx_prefetch_handler
__tx_prefetch_handler:
- B __tx_prefetch_handler @ Prefetch exception handler
-@
+ B __tx_prefetch_handler // Prefetch exception handler
+
.global __tx_abort_handler
__tx_abort_handler:
- B __tx_abort_handler @ Abort exception handler
-@
+ B __tx_abort_handler // Abort exception handler
+
.global __tx_reserved_handler
__tx_reserved_handler:
- B __tx_reserved_handler @ Reserved exception handler
-@
+ B __tx_reserved_handler // Reserved exception handler
+
.global __tx_irq_processing_return
.type __tx_irq_processing_return,function
.global __tx_irq_handler
__tx_irq_handler:
-@
-@ /* Jump to context save to save system context. */
+
+ /* Jump to context save to save system context. */
B _tx_thread_context_save
__tx_irq_processing_return:
-@
-@ /* At this point execution is still in the IRQ mode. The CPSR, point of
-@ interrupt, and all C scratch registers are available for use. In
-@ addition, IRQ interrupts may be re-enabled - with certain restrictions -
-@ if nested IRQ interrupts are desired. Interrupts may be re-enabled over
-@ small code sequences where lr is saved before enabling interrupts and
-@ restored after interrupts are again disabled. */
-@
-@ /* Interrupt nesting is allowed after calling _tx_thread_irq_nesting_start
-@ from IRQ mode with interrupts disabled. This routine switches to the
-@ system mode and returns with IRQ interrupts enabled.
-@
-@ NOTE: It is very important to ensure all IRQ interrupts are cleared
-@ prior to enabling nested IRQ interrupts. */
+
+ /* At this point execution is still in the IRQ mode. The CPSR, point of
+ interrupt, and all C scratch registers are available for use. In
+ addition, IRQ interrupts may be re-enabled - with certain restrictions -
+ if nested IRQ interrupts are desired. Interrupts may be re-enabled over
+ small code sequences where lr is saved before enabling interrupts and
+ restored after interrupts are again disabled. */
+
+ /* Interrupt nesting is allowed after calling _tx_thread_irq_nesting_start
+ from IRQ mode with interrupts disabled. This routine switches to the
+ system mode and returns with IRQ interrupts enabled.
+
+ NOTE: It is very important to ensure all IRQ interrupts are cleared
+ prior to enabling nested IRQ interrupts. */
#ifdef TX_ENABLE_IRQ_NESTING
BL _tx_thread_irq_nesting_start
#endif
-@
-@ /* For debug purpose, execute the timer interrupt processing here. In
-@ a real system, some kind of status indication would have to be checked
-@ before the timer interrupt handler could be called. */
-@
- BL _tx_timer_interrupt @ Timer interrupt handler
-@
-@
-@ /* If interrupt nesting was started earlier, the end of interrupt nesting
-@ service must be called before returning to _tx_thread_context_restore.
-@ This routine returns in processing in IRQ mode with interrupts disabled. */
+
+ /* For debug purpose, execute the timer interrupt processing here. In
+ a real system, some kind of status indication would have to be checked
+ before the timer interrupt handler could be called. */
+
+ BL _tx_timer_interrupt // Timer interrupt handler
+
+
+ /* If interrupt nesting was started earlier, the end of interrupt nesting
+ service must be called before returning to _tx_thread_context_restore.
+ This routine returns in processing in IRQ mode with interrupts disabled. */
#ifdef TX_ENABLE_IRQ_NESTING
BL _tx_thread_irq_nesting_end
#endif
-@
-@ /* Jump to context restore to restore system context. */
+
+ /* Jump to context restore to restore system context. */
B _tx_thread_context_restore
-@
-@
-@ /* This is an example of a vectored IRQ handler. */
-@
-@ .global __tx_example_vectored_irq_handler
-@__tx_example_vectored_irq_handler:
-@
-@
-@ /* Save initial context and call context save to prepare for
-@ vectored ISR execution. */
-@
-@ STMDB sp!, {r0-r3} @ Save some scratch registers
-@ MRS r0, SPSR @ Pickup saved SPSR
-@ SUB lr, lr, #4 @ Adjust point of interrupt
-@ STMDB sp!, {r0, r10, r12, lr} @ Store other scratch registers
-@ BL _tx_thread_vectored_context_save @ Vectored context save
-@
-@ /* At this point execution is still in the IRQ mode. The CPSR, point of
-@ interrupt, and all C scratch registers are available for use. In
-@ addition, IRQ interrupts may be re-enabled - with certain restrictions -
-@ if nested IRQ interrupts are desired. Interrupts may be re-enabled over
-@ small code sequences where lr is saved before enabling interrupts and
-@ restored after interrupts are again disabled. */
-@
-@
-@ /* Interrupt nesting is allowed after calling _tx_thread_irq_nesting_start
-@ from IRQ mode with interrupts disabled. This routine switches to the
-@ system mode and returns with IRQ interrupts enabled.
-@
-@ NOTE: It is very important to ensure all IRQ interrupts are cleared
-@ prior to enabling nested IRQ interrupts. */
-@#ifdef TX_ENABLE_IRQ_NESTING
-@ BL _tx_thread_irq_nesting_start
-@#endif
-@
-@ /* Application IRQ handlers can be called here! */
-@
-@ /* If interrupt nesting was started earlier, the end of interrupt nesting
-@ service must be called before returning to _tx_thread_context_restore.
-@ This routine returns in processing in IRQ mode with interrupts disabled. */
-@#ifdef TX_ENABLE_IRQ_NESTING
-@ BL _tx_thread_irq_nesting_end
-@#endif
-@
-@ /* Jump to context restore to restore system context. */
-@ B _tx_thread_context_restore
-@
-@
+
+
+ /* This is an example of a vectored IRQ handler. */
+
+ /* At this point execution is still in the IRQ mode. The CPSR, point of
+ interrupt, and all C scratch registers are available for use. In
+ addition, IRQ interrupts may be re-enabled - with certain restrictions -
+ if nested IRQ interrupts are desired. Interrupts may be re-enabled over
+ small code sequences where lr is saved before enabling interrupts and
+ restored after interrupts are again disabled. */
+
+
+ /* Interrupt nesting is allowed after calling _tx_thread_irq_nesting_start
+ from IRQ mode with interrupts disabled. This routine switches to the
+ system mode and returns with IRQ interrupts enabled.
+
+ NOTE: It is very important to ensure all IRQ interrupts are cleared
+ prior to enabling nested IRQ interrupts. */
+
+ /* Application IRQ handlers can be called here! */
+
+ /* If interrupt nesting was started earlier, the end of interrupt nesting
+ service must be called before returning to _tx_thread_context_restore.
+ This routine returns in processing in IRQ mode with interrupts disabled. */
+
+ /* Jump to context restore to restore system context. */
+
#ifdef TX_ENABLE_FIQ_SUPPORT
.global __tx_fiq_handler
.global __tx_fiq_processing_return
__tx_fiq_handler:
-@
-@ /* Jump to fiq context save to save system context. */
+
+ /* Jump to fiq context save to save system context. */
B _tx_thread_fiq_context_save
__tx_fiq_processing_return:
-@
-@ /* At this point execution is still in the FIQ mode. The CPSR, point of
-@ interrupt, and all C scratch registers are available for use. */
-@
-@ /* Interrupt nesting is allowed after calling _tx_thread_fiq_nesting_start
-@ from FIQ mode with interrupts disabled. This routine switches to the
-@ system mode and returns with FIQ interrupts enabled.
-@
-@ NOTE: It is very important to ensure all FIQ interrupts are cleared
-@ prior to enabling nested FIQ interrupts. */
+
+ /* At this point execution is still in the FIQ mode. The CPSR, point of
+ interrupt, and all C scratch registers are available for use. */
+
+ /* Interrupt nesting is allowed after calling _tx_thread_fiq_nesting_start
+ from FIQ mode with interrupts disabled. This routine switches to the
+ system mode and returns with FIQ interrupts enabled.
+
+ NOTE: It is very important to ensure all FIQ interrupts are cleared
+ prior to enabling nested FIQ interrupts. */
#ifdef TX_ENABLE_FIQ_NESTING
BL _tx_thread_fiq_nesting_start
#endif
-@
-@ /* Application FIQ handlers can be called here! */
-@
-@ /* If interrupt nesting was started earlier, the end of interrupt nesting
-@ service must be called before returning to _tx_thread_fiq_context_restore. */
+
+ /* Application FIQ handlers can be called here! */
+
+ /* If interrupt nesting was started earlier, the end of interrupt nesting
+ service must be called before returning to _tx_thread_fiq_context_restore. */
#ifdef TX_ENABLE_FIQ_NESTING
BL _tx_thread_fiq_nesting_end
#endif
-@
-@ /* Jump to fiq context restore to restore system context. */
+
+ /* Jump to fiq context restore to restore system context. */
B _tx_thread_fiq_context_restore
-@
-@
+
+
#else
.global __tx_fiq_handler
__tx_fiq_handler:
- B __tx_fiq_handler @ FIQ interrupt handler
+ B __tx_fiq_handler // FIQ interrupt handler
#endif
-@
-@
+
+
BUILD_OPTIONS:
- .word _tx_build_options @ Reference to bring in
+ .word _tx_build_options // Reference to bring in
VERSION_ID:
- .word _tx_version_id @ Reference to bring in
+ .word _tx_version_id // Reference to bring in
diff --git a/ports/cortex_a7/ac6/inc/tx_port.h b/ports/cortex_a7/ac6/inc/tx_port.h
index cc3cc987..19463de1 100644
--- a/ports/cortex_a7/ac6/inc/tx_port.h
+++ b/ports/cortex_a7/ac6/inc/tx_port.h
@@ -12,7 +12,7 @@
/**************************************************************************/
/**************************************************************************/
-/** */
+/** */
/** ThreadX Component */
/** */
/** Port Specific */
@@ -21,36 +21,38 @@
/**************************************************************************/
-/**************************************************************************/
-/* */
-/* PORT SPECIFIC C INFORMATION RELEASE */
-/* */
-/* tx_port.h Cortex-A7/AC6 */
-/* 6.1.6 */
+/**************************************************************************/
+/* */
+/* PORT SPECIFIC C INFORMATION RELEASE */
+/* */
+/* tx_port.h ARMv7-A */
+/* 6.1.11 */
/* */
/* AUTHOR */
/* */
/* William E. Lamie, Microsoft Corporation */
/* */
/* DESCRIPTION */
-/* */
-/* This file contains data type definitions that make the ThreadX */
-/* real-time kernel function identically on a variety of different */
-/* processor architectures. For example, the size or number of bits */
-/* in an "int" data type vary between microprocessor architectures and */
-/* even C compilers for the same microprocessor. ThreadX does not */
-/* directly use native C data types. Instead, ThreadX creates its */
-/* own special types that can be mapped to actual data types by this */
-/* file to guarantee consistency in the interface and functionality. */
-/* */
-/* RELEASE HISTORY */
-/* */
+/* */
+/* This file contains data type definitions that make the ThreadX */
+/* real-time kernel function identically on a variety of different */
+/* processor architectures. For example, the size or number of bits */
+/* in an "int" data type vary between microprocessor architectures and */
+/* even C compilers for the same microprocessor. ThreadX does not */
+/* directly use native C data types. Instead, ThreadX creates its */
+/* own special types that can be mapped to actual data types by this */
+/* file to guarantee consistency in the interface and functionality. */
+/* */
+/* RELEASE HISTORY */
+/* */
/* DATE NAME DESCRIPTION */
/* */
/* 09-30-2020 William E. Lamie Initial Version 6.1 */
/* 04-02-2021 Bhupendra Naphade Modified comment(s),updated */
/* macro definition, */
/* resulting in version 6.1.6 */
+/* 04-25-2022 Zhen Kong Updated comments, */
+/* resulting in version 6.1.11 */
/* */
/**************************************************************************/
@@ -63,7 +65,7 @@
#ifdef TX_INCLUDE_USER_DEFINE_FILE
-/* Yes, include the user defines in tx_user.h. The defines in this file may
+/* Yes, include the user defines in tx_user.h. The defines in this file may
alternately be defined on the command line. */
#include "tx_user.h"
@@ -76,7 +78,7 @@
#include
-/* Define ThreadX basic types for this port. */
+/* Define ThreadX basic types for this port. */
#define VOID void
typedef char CHAR;
@@ -112,12 +114,12 @@ typedef unsigned short USHORT;
#define TX_TIMER_THREAD_STACK_SIZE 1024 /* Default timer thread stack size */
#endif
-#ifndef TX_TIMER_THREAD_PRIORITY
-#define TX_TIMER_THREAD_PRIORITY 0 /* Default timer thread priority */
+#ifndef TX_TIMER_THREAD_PRIORITY
+#define TX_TIMER_THREAD_PRIORITY 0 /* Default timer thread priority */
#endif
-/* Define various constants for the ThreadX ARM port. */
+/* Define various constants for the ThreadX ARM port. */
#ifdef TX_ENABLE_FIQ_SUPPORT
#define TX_INT_DISABLE 0xC0 /* Disable IRQ & FIQ interrupts */
@@ -127,8 +129,8 @@ typedef unsigned short USHORT;
#define TX_INT_ENABLE 0x00 /* Enable IRQ interrupts */
-/* Define the clock source for trace event entry time stamp. The following two item are port specific.
- For example, if the time source is at the address 0x0a800024 and is 16-bits in size, the clock
+/* Define the clock source for trace event entry time stamp. The following two item are port specific.
+ For example, if the time source is at the address 0x0a800024 and is 16-bits in size, the clock
source constants would be:
#define TX_TRACE_TIME_SOURCE *((ULONG *) 0x0a800024)
@@ -175,7 +177,7 @@ typedef unsigned short USHORT;
#define TX_INLINE_INITIALIZATION
-/* Determine whether or not stack checking is enabled. By default, ThreadX stack checking is
+/* Determine whether or not stack checking is enabled. By default, ThreadX stack checking is
disabled. When the following is defined, ThreadX thread stack checking is enabled. If stack
checking is enabled (TX_ENABLE_STACK_CHECKING is defined), the TX_DISABLE_STACK_FILLING
define is negated, thereby forcing the stack fill which is necessary for the stack checking
@@ -187,13 +189,13 @@ typedef unsigned short USHORT;
/* Define the TX_THREAD control block extensions for this port. The main reason
- for the multiple macros is so that backward compatibility can be maintained with
+ for the multiple macros is so that backward compatibility can be maintained with
existing ThreadX kernel awareness modules. */
-#define TX_THREAD_EXTENSION_0
-#define TX_THREAD_EXTENSION_1
+#define TX_THREAD_EXTENSION_0
+#define TX_THREAD_EXTENSION_1
#define TX_THREAD_EXTENSION_2 ULONG tx_thread_vfp_enable;
-#define TX_THREAD_EXTENSION_3
+#define TX_THREAD_EXTENSION_3
/* Define the port extensions of the remaining ThreadX objects. */
@@ -207,11 +209,11 @@ typedef unsigned short USHORT;
#define TX_TIMER_EXTENSION
-/* Define the user extension field of the thread control block. Nothing
+/* Define the user extension field of the thread control block. Nothing
additional is needed for this port so it is defined as white space. */
#ifndef TX_THREAD_USER_EXTENSION
-#define TX_THREAD_USER_EXTENSION
+#define TX_THREAD_USER_EXTENSION
#endif
@@ -219,8 +221,8 @@ typedef unsigned short USHORT;
tx_thread_shell_entry, and tx_thread_terminate. */
-#define TX_THREAD_CREATE_EXTENSION(thread_ptr)
-#define TX_THREAD_DELETE_EXTENSION(thread_ptr)
+#define TX_THREAD_CREATE_EXTENSION(thread_ptr)
+#define TX_THREAD_DELETE_EXTENSION(thread_ptr)
#define TX_THREAD_COMPLETED_EXTENSION(thread_ptr)
#define TX_THREAD_TERMINATED_EXTENSION(thread_ptr)
@@ -247,24 +249,24 @@ typedef unsigned short USHORT;
#define TX_TIMER_DELETE_EXTENSION(timer_ptr)
-/* Determine if the ARM architecture has the CLZ instruction. This is available on
- architectures v5 and above. If available, redefine the macro for calculating the
+/* Determine if the ARM architecture has the CLZ instruction. This is available on
+ architectures v5 and above. If available, redefine the macro for calculating the
lowest bit set. */
-
+
#if __TARGET_ARCH_ARM > 4
#ifndef __thumb__
#define TX_LOWEST_SET_BIT_CALCULATE(m, b) m = m & ((ULONG) (-((LONG) m))); \
asm volatile (" CLZ %0,%1 ": "=r" (b) : "r" (m) ); \
- b = 31 - b;
+ b = 31 - b;
#endif
#endif
-/* Define ThreadX interrupt lockout and restore macros for protection on
- access of critical kernel information. The restore interrupt macro must
- restore the interrupt posture of the running thread prior to the value
+/* Define ThreadX interrupt lockout and restore macros for protection on
+ access of critical kernel information. The restore interrupt macro must
+ restore the interrupt posture of the running thread prior to the value
present prior to the disable macro. In most cases, the save area macro
is used to define a local function save area for the disable and restore
macros. */
@@ -295,7 +297,7 @@ unsigned int _tx_thread_interrupt_restore(UINT old_posture);
#endif
-/* Define VFP extension for the Cortex-A7. Each is assumed to be called in the context of the executing
+/* Define VFP extension for the ARMv7-A. Each is assumed to be called in the context of the executing
thread. */
void tx_thread_vfp_enable(void);
@@ -315,8 +317,8 @@ void tx_thread_vfp_disable(void);
/* Define the version ID of ThreadX. This may be utilized by the application. */
#ifdef TX_THREAD_INIT
-CHAR _tx_version_id[] =
- "Copyright (c) Microsoft Corporation. All rights reserved. * ThreadX Cortex-A7/AC6 Version 6.1.9 *";
+CHAR _tx_version_id[] =
+ "Copyright (c) Microsoft Corporation. All rights reserved. * ThreadX ARMv7-A Version 6.1.11 *";
#else
extern CHAR _tx_version_id[];
#endif
diff --git a/ports/cortex_a7/ac6/src/tx_thread_context_restore.S b/ports/cortex_a7/ac6/src/tx_thread_context_restore.S
index 744ecb0a..fae7e72d 100644
--- a/ports/cortex_a7/ac6/src/tx_thread_context_restore.S
+++ b/ports/cortex_a7/ac6/src/tx_thread_context_restore.S
@@ -1,259 +1,222 @@
-@/**************************************************************************/
-@/* */
-@/* Copyright (c) Microsoft Corporation. All rights reserved. */
-@/* */
-@/* This software is licensed under the Microsoft Software License */
-@/* Terms for Microsoft Azure RTOS. Full text of the license can be */
-@/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
-@/* and in the root directory of this software. */
-@/* */
-@/**************************************************************************/
-@
-@
-@/**************************************************************************/
-@/**************************************************************************/
-@/** */
-@/** ThreadX Component */
-@/** */
-@/** Thread */
-@/** */
-@/**************************************************************************/
-@/**************************************************************************/
-@
-@
-@#define TX_SOURCE_CODE
-@
-@
-@/* Include necessary system files. */
-@
-@#include "tx_api.h"
-@#include "tx_thread.h"
-@#include "tx_timer.h"
-@
+/**************************************************************************/
+/* */
+/* Copyright (c) Microsoft Corporation. All rights reserved. */
+/* */
+/* This software is licensed under the Microsoft Software License */
+/* Terms for Microsoft Azure RTOS. Full text of the license can be */
+/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
+/* and in the root directory of this software. */
+/* */
+/**************************************************************************/
+
+
+/**************************************************************************/
+/**************************************************************************/
+/** */
+/** ThreadX Component */
+/** */
+/** Thread */
+/** */
+/**************************************************************************/
+/**************************************************************************/
+
.arm
#ifdef TX_ENABLE_FIQ_SUPPORT
-SVC_MODE = 0xD3 @ Disable IRQ/FIQ, SVC mode
-IRQ_MODE = 0xD2 @ Disable IRQ/FIQ, IRQ mode
+SVC_MODE = 0xD3 // Disable IRQ/FIQ, SVC mode
+IRQ_MODE = 0xD2 // Disable IRQ/FIQ, IRQ mode
#else
-SVC_MODE = 0x93 @ Disable IRQ, SVC mode
-IRQ_MODE = 0x92 @ Disable IRQ, IRQ mode
+SVC_MODE = 0x93 // Disable IRQ, SVC mode
+IRQ_MODE = 0x92 // Disable IRQ, IRQ mode
#endif
-@
+
.global _tx_thread_system_state
.global _tx_thread_current_ptr
.global _tx_thread_execute_ptr
.global _tx_timer_time_slice
.global _tx_thread_schedule
.global _tx_thread_preempt_disable
-@
-@
-@/* No 16-bit Thumb mode veneer code is needed for _tx_thread_context_restore
-@ since it will never be called 16-bit mode. */
-@
+
+
+
+/* No 16-bit Thumb mode veneer code is needed for _tx_thread_context_restore
+ since it will never be called 16-bit mode. */
+
.arm
.text
.align 2
-@/**************************************************************************/
-@/* */
-@/* FUNCTION RELEASE */
-@/* */
-@/* _tx_thread_context_restore Cortex-A7/AC6 */
-@/* 6.1.9 */
-@/* AUTHOR */
-@/* */
-@/* William E. Lamie, Microsoft Corporation */
-@/* */
-@/* DESCRIPTION */
-@/* */
-@/* This function restores the interrupt context if it is processing a */
-@/* nested interrupt. If not, it returns to the interrupt thread if no */
-@/* preemption is necessary. Otherwise, if preemption is necessary or */
-@/* if no thread was running, the function returns to the scheduler. */
-@/* */
-@/* INPUT */
-@/* */
-@/* None */
-@/* */
-@/* OUTPUT */
-@/* */
-@/* None */
-@/* */
-@/* CALLS */
-@/* */
-@/* _tx_thread_schedule Thread scheduling routine */
-@/* */
-@/* CALLED BY */
-@/* */
-@/* ISRs Interrupt Service Routines */
-@/* */
-@/* RELEASE HISTORY */
-@/* */
-@/* DATE NAME DESCRIPTION */
-@/* */
-@/* 09-30-2020 William E. Lamie Initial Version 6.1 */
-@/* 10-15-2021 William E. Lamie Modified comment(s), added */
-@/* execution profile support, */
-@/* resulting in version 6.1.9 */
-@/* */
-@/**************************************************************************/
-@VOID _tx_thread_context_restore(VOID)
-@{
+/**************************************************************************/
+/* */
+/* FUNCTION RELEASE */
+/* */
+/* _tx_thread_context_restore ARMv7-A */
+/* 6.1.11 */
+/* AUTHOR */
+/* */
+/* William E. Lamie, Microsoft Corporation */
+/* */
+/* DESCRIPTION */
+/* */
+/* This function restores the interrupt context if it is processing a */
+/* nested interrupt. If not, it returns to the interrupt thread if no */
+/* preemption is necessary. Otherwise, if preemption is necessary or */
+/* if no thread was running, the function returns to the scheduler. */
+/* */
+/* INPUT */
+/* */
+/* None */
+/* */
+/* OUTPUT */
+/* */
+/* None */
+/* */
+/* CALLS */
+/* */
+/* _tx_thread_schedule Thread scheduling routine */
+/* */
+/* CALLED BY */
+/* */
+/* ISRs Interrupt Service Routines */
+/* */
+/* RELEASE HISTORY */
+/* */
+/* DATE NAME DESCRIPTION */
+/* */
+/* 09-30-2020 William E. Lamie Initial Version 6.1 */
+/* 10-15-2021 William E. Lamie Modified comment(s), added */
+/* execution profile support, */
+/* resulting in version 6.1.9 */
+/* 04-25-2022 Zhen Kong Updated comments, */
+/* resulting in version 6.1.11 */
+/* */
+/**************************************************************************/
.global _tx_thread_context_restore
.type _tx_thread_context_restore,function
_tx_thread_context_restore:
-@
-@ /* Lockout interrupts. */
-@
+
+ /* Lockout interrupts. */
+
#ifdef TX_ENABLE_FIQ_SUPPORT
- CPSID if @ Disable IRQ and FIQ interrupts
+ CPSID if // Disable IRQ and FIQ interrupts
#else
- CPSID i @ Disable IRQ interrupts
+ CPSID i // Disable IRQ interrupts
#endif
#if (defined(TX_ENABLE_EXECUTION_CHANGE_NOTIFY) || defined(TX_EXECUTION_PROFILE_ENABLE))
-@
-@ /* Call the ISR exit function to indicate an ISR is complete. */
-@
- BL _tx_execution_isr_exit @ Call the ISR exit function
-#endif
-@
-@ /* Determine if interrupts are nested. */
-@ if (--_tx_thread_system_state)
-@ {
-@
- LDR r3, =_tx_thread_system_state @ Pickup address of system state variable
- LDR r2, [r3] @ Pickup system state
- SUB r2, r2, #1 @ Decrement the counter
- STR r2, [r3] @ Store the counter
- CMP r2, #0 @ Was this the first interrupt?
- BEQ __tx_thread_not_nested_restore @ If so, not a nested restore
-@
-@ /* Interrupts are nested. */
-@
-@ /* Just recover the saved registers and return to the point of
-@ interrupt. */
-@
- LDMIA sp!, {r0, r10, r12, lr} @ Recover SPSR, POI, and scratch regs
- MSR SPSR_cxsf, r0 @ Put SPSR back
- LDMIA sp!, {r0-r3} @ Recover r0-r3
- MOVS pc, lr @ Return to point of interrupt
-@
-@ }
-__tx_thread_not_nested_restore:
-@
-@ /* Determine if a thread was interrupted and no preemption is required. */
-@ else if (((_tx_thread_current_ptr) && (_tx_thread_current_ptr == _tx_thread_execute_ptr)
-@ || (_tx_thread_preempt_disable))
-@ {
-@
- LDR r1, =_tx_thread_current_ptr @ Pickup address of current thread ptr
- LDR r0, [r1] @ Pickup actual current thread pointer
- CMP r0, #0 @ Is it NULL?
- BEQ __tx_thread_idle_system_restore @ Yes, idle system was interrupted
-@
- LDR r3, =_tx_thread_preempt_disable @ Pickup preempt disable address
- LDR r2, [r3] @ Pickup actual preempt disable flag
- CMP r2, #0 @ Is it set?
- BNE __tx_thread_no_preempt_restore @ Yes, don't preempt this thread
- LDR r3, =_tx_thread_execute_ptr @ Pickup address of execute thread ptr
- LDR r2, [r3] @ Pickup actual execute thread pointer
- CMP r0, r2 @ Is the same thread highest priority?
- BNE __tx_thread_preempt_restore @ No, preemption needs to happen
-@
-@
-__tx_thread_no_preempt_restore:
-@
-@ /* Restore interrupted thread or ISR. */
-@
-@ /* Pickup the saved stack pointer. */
-@ tmp_ptr = _tx_thread_current_ptr -> tx_thread_stack_ptr;
-@
-@ /* Recover the saved context and return to the point of interrupt. */
-@
- LDMIA sp!, {r0, r10, r12, lr} @ Recover SPSR, POI, and scratch regs
- MSR SPSR_cxsf, r0 @ Put SPSR back
- LDMIA sp!, {r0-r3} @ Recover r0-r3
- MOVS pc, lr @ Return to point of interrupt
-@
-@ }
-@ else
-@ {
-__tx_thread_preempt_restore:
-@
- LDMIA sp!, {r3, r10, r12, lr} @ Recover temporarily saved registers
- MOV r1, lr @ Save lr (point of interrupt)
- MOV r2, #SVC_MODE @ Build SVC mode CPSR
- MSR CPSR_c, r2 @ Enter SVC mode
- STR r1, [sp, #-4]! @ Save point of interrupt
- STMDB sp!, {r4-r12, lr} @ Save upper half of registers
- MOV r4, r3 @ Save SPSR in r4
- MOV r2, #IRQ_MODE @ Build IRQ mode CPSR
- MSR CPSR_c, r2 @ Enter IRQ mode
- LDMIA sp!, {r0-r3} @ Recover r0-r3
- MOV r5, #SVC_MODE @ Build SVC mode CPSR
- MSR CPSR_c, r5 @ Enter SVC mode
- STMDB sp!, {r0-r3} @ Save r0-r3 on thread's stack
- LDR r1, =_tx_thread_current_ptr @ Pickup address of current thread ptr
- LDR r0, [r1] @ Pickup current thread pointer
+ /* Call the ISR exit function to indicate an ISR is complete. */
+
+ BL _tx_execution_isr_exit // Call the ISR exit function
+#endif
+
+ /* Determine if interrupts are nested. */
+
+ LDR r3, =_tx_thread_system_state // Pickup address of system state variable
+ LDR r2, [r3] // Pickup system state
+ SUB r2, r2, #1 // Decrement the counter
+ STR r2, [r3] // Store the counter
+ CMP r2, #0 // Was this the first interrupt?
+ BEQ __tx_thread_not_nested_restore // If so, not a nested restore
+
+ /* Interrupts are nested. */
+
+ /* Just recover the saved registers and return to the point of
+ interrupt. */
+
+ LDMIA sp!, {r0, r10, r12, lr} // Recover SPSR, POI, and scratch regs
+ MSR SPSR_cxsf, r0 // Put SPSR back
+ LDMIA sp!, {r0-r3} // Recover r0-r3
+ MOVS pc, lr // Return to point of interrupt
+
+__tx_thread_not_nested_restore:
+
+ /* Determine if a thread was interrupted and no preemption is required. */
+
+ LDR r1, =_tx_thread_current_ptr // Pickup address of current thread ptr
+ LDR r0, [r1] // Pickup actual current thread pointer
+ CMP r0, #0 // Is it NULL?
+ BEQ __tx_thread_idle_system_restore // Yes, idle system was interrupted
+
+ LDR r3, =_tx_thread_preempt_disable // Pickup preempt disable address
+ LDR r2, [r3] // Pickup actual preempt disable flag
+ CMP r2, #0 // Is it set?
+ BNE __tx_thread_no_preempt_restore // Yes, don't preempt this thread
+ LDR r3, =_tx_thread_execute_ptr // Pickup address of execute thread ptr
+ LDR r2, [r3] // Pickup actual execute thread pointer
+ CMP r0, r2 // Is the same thread highest priority?
+ BNE __tx_thread_preempt_restore // No, preemption needs to happen
+
+
+__tx_thread_no_preempt_restore:
+
+ /* Recover the saved context and return to the point of interrupt. */
+
+ /* Pickup the saved stack pointer. */
+
+ /* Recover the saved context and return to the point of interrupt. */
+ LDMIA sp!, {r0, r10, r12, lr} // Recover SPSR, POI, and scratch regs
+ MSR SPSR_cxsf, r0 // Put SPSR back
+ LDMIA sp!, {r0-r3} // Recover r0-r3
+ MOVS pc, lr // Return to point of interrupt
+
+__tx_thread_preempt_restore:
+
+ LDMIA sp!, {r3, r10, r12, lr} // Recover temporarily saved registers
+ MOV r1, lr // Save lr (point of interrupt)
+ MOV r2, #SVC_MODE // Build SVC mode CPSR
+ MSR CPSR_c, r2 // Enter SVC mode
+ STR r1, [sp, #-4]! // Save point of interrupt
+ STMDB sp!, {r4-r12, lr} // Save upper half of registers
+ MOV r4, r3 // Save SPSR in r4
+ MOV r2, #IRQ_MODE // Build IRQ mode CPSR
+ MSR CPSR_c, r2 // Enter IRQ mode
+ LDMIA sp!, {r0-r3} // Recover r0-r3
+ MOV r5, #SVC_MODE // Build SVC mode CPSR
+ MSR CPSR_c, r5 // Enter SVC mode
+ STMDB sp!, {r0-r3} // Save r0-r3 on thread's stack
+
+ LDR r1, =_tx_thread_current_ptr // Pickup address of current thread ptr
+ LDR r0, [r1] // Pickup current thread pointer
#ifdef TX_ENABLE_VFP_SUPPORT
- LDR r2, [r0, #144] @ Pickup the VFP enabled flag
- CMP r2, #0 @ Is the VFP enabled?
- BEQ _tx_skip_irq_vfp_save @ No, skip VFP IRQ save
- VMRS r2, FPSCR @ Pickup the FPSCR
- STR r2, [sp, #-4]! @ Save FPSCR
- VSTMDB sp!, {D16-D31} @ Save D16-D31
- VSTMDB sp!, {D0-D15} @ Save D0-D15
+ LDR r2, [r0, #144] // Pickup the VFP enabled flag
+ CMP r2, #0 // Is the VFP enabled?
+ BEQ _tx_skip_irq_vfp_save // No, skip VFP IRQ save
+ VMRS r2, FPSCR // Pickup the FPSCR
+ STR r2, [sp, #-4]! // Save FPSCR
+ VSTMDB sp!, {D16-D31} // Save D16-D31
+ VSTMDB sp!, {D0-D15} // Save D0-D15
+
_tx_skip_irq_vfp_save:
+
#endif
- MOV r3, #1 @ Build interrupt stack type
- STMDB sp!, {r3, r4} @ Save interrupt stack type and SPSR
- STR sp, [r0, #8] @ Save stack pointer in thread control
- @ block
-@
-@ /* Save the remaining time-slice and disable it. */
-@ if (_tx_timer_time_slice)
-@ {
-@
- LDR r3, =_tx_timer_time_slice @ Pickup time-slice variable address
- LDR r2, [r3] @ Pickup time-slice
- CMP r2, #0 @ Is it active?
- BEQ __tx_thread_dont_save_ts @ No, don't save it
-@
-@ _tx_thread_current_ptr -> tx_thread_time_slice = _tx_timer_time_slice;
-@ _tx_timer_time_slice = 0;
-@
- STR r2, [r0, #24] @ Save thread's time-slice
- MOV r2, #0 @ Clear value
- STR r2, [r3] @ Disable global time-slice flag
-@
-@ }
+ MOV r3, #1 // Build interrupt stack type
+ STMDB sp!, {r3, r4} // Save interrupt stack type and SPSR
+ STR sp, [r0, #8] // Save stack pointer in thread control
+ // block
+
+ /* Save the remaining time-slice and disable it. */
+ LDR r3, =_tx_timer_time_slice // Pickup time-slice variable address
+ LDR r2, [r3] // Pickup time-slice
+ CMP r2, #0 // Is it active?
+ BEQ __tx_thread_dont_save_ts // No, don't save it
+ STR r2, [r0, #24] // Save thread's time-slice
+ MOV r2, #0 // Clear value
+ STR r2, [r3] // Disable global time-slice flag
+
__tx_thread_dont_save_ts:
-@
-@
-@ /* Clear the current task pointer. */
-@ _tx_thread_current_ptr = TX_NULL;
-@
- MOV r0, #0 @ NULL value
- STR r0, [r1] @ Clear current thread pointer
-@
-@ /* Return to the scheduler. */
-@ _tx_thread_schedule();
-@
- B _tx_thread_schedule @ Return to scheduler
-@ }
-@
+
+ /* Clear the current task pointer. */
+ MOV r0, #0 // NULL value
+ STR r0, [r1] // Clear current thread pointer
+
+ /* Return to the scheduler. */
+ B _tx_thread_schedule // Return to scheduler
+
__tx_thread_idle_system_restore:
-@
-@ /* Just return back to the scheduler! */
-@
- MOV r0, #SVC_MODE @ Build SVC mode CPSR
- MSR CPSR_c, r0 @ Enter SVC mode
- B _tx_thread_schedule @ Return to scheduler
-@}
-
-
+ /* Just return back to the scheduler! */
+ MOV r0, #SVC_MODE // Build SVC mode CPSR
+ MSR CPSR_c, r0 // Enter SVC mode
+ B _tx_thread_schedule // Return to scheduler
diff --git a/ports/cortex_a7/ac6/src/tx_thread_context_save.S b/ports/cortex_a7/ac6/src/tx_thread_context_save.S
index 8593e032..7ac48c2e 100644
--- a/ports/cortex_a7/ac6/src/tx_thread_context_save.S
+++ b/ports/cortex_a7/ac6/src/tx_thread_context_save.S
@@ -1,205 +1,172 @@
-@/**************************************************************************/
-@/* */
-@/* Copyright (c) Microsoft Corporation. All rights reserved. */
-@/* */
-@/* This software is licensed under the Microsoft Software License */
-@/* Terms for Microsoft Azure RTOS. Full text of the license can be */
-@/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
-@/* and in the root directory of this software. */
-@/* */
-@/**************************************************************************/
-@
-@
-@/**************************************************************************/
-@/**************************************************************************/
-@/** */
-@/** ThreadX Component */
-@/** */
-@/** Thread */
-@/** */
-@/**************************************************************************/
-@/**************************************************************************/
-@
-@
-@#define TX_SOURCE_CODE
-@
-@
-@/* Include necessary system files. */
-@
-@#include "tx_api.h"
-@#include "tx_thread.h"
-@#include "tx_timer.h"
-@
-@
+/**************************************************************************/
+/* */
+/* Copyright (c) Microsoft Corporation. All rights reserved. */
+/* */
+/* This software is licensed under the Microsoft Software License */
+/* Terms for Microsoft Azure RTOS. Full text of the license can be */
+/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
+/* and in the root directory of this software. */
+/* */
+/**************************************************************************/
+
+
+/**************************************************************************/
+/**************************************************************************/
+/** */
+/** ThreadX Component */
+/** */
+/** Thread */
+/** */
+/**************************************************************************/
+/**************************************************************************/
+
.global _tx_thread_system_state
.global _tx_thread_current_ptr
.global __tx_irq_processing_return
-@
-@
-@/* No 16-bit Thumb mode veneer code is needed for _tx_thread_context_save
-@ since it will never be called 16-bit mode. */
-@
+
+
+/* No 16-bit Thumb mode veneer code is needed for _tx_thread_context_save
+ since it will never be called 16-bit mode. */
+
.arm
.text
.align 2
-@/**************************************************************************/
-@/* */
-@/* FUNCTION RELEASE */
-@/* */
-@/* _tx_thread_context_save Cortex-A7/AC6 */
-@/* 6.1.9 */
-@/* AUTHOR */
-@/* */
-@/* William E. Lamie, Microsoft Corporation */
-@/* */
-@/* DESCRIPTION */
-@/* */
-@/* This function saves the context of an executing thread in the */
-@/* beginning of interrupt processing. The function also ensures that */
-@/* the system stack is used upon return to the calling ISR. */
-@/* */
-@/* INPUT */
-@/* */
-@/* None */
-@/* */
-@/* OUTPUT */
-@/* */
-@/* None */
-@/* */
-@/* CALLS */
-@/* */
-@/* None */
-@/* */
-@/* CALLED BY */
-@/* */
-@/* ISRs */
-@/* */
-@/* RELEASE HISTORY */
-@/* */
-@/* DATE NAME DESCRIPTION */
-@/* */
-@/* 09-30-2020 William E. Lamie Initial Version 6.1 */
-@/* 10-15-2021 William E. Lamie Modified comment(s), added */
-@/* execution profile support, */
-@/* resulting in version 6.1.9 */
-@/* */
-@/**************************************************************************/
-@VOID _tx_thread_context_save(VOID)
-@{
+/**************************************************************************/
+/* */
+/* FUNCTION RELEASE */
+/* */
+/* _tx_thread_context_save ARMv7-A */
+/* 6.1.11 */
+/* AUTHOR */
+/* */
+/* William E. Lamie, Microsoft Corporation */
+/* */
+/* DESCRIPTION */
+/* */
+/* This function saves the context of an executing thread in the */
+/* beginning of interrupt processing. The function also ensures that */
+/* the system stack is used upon return to the calling ISR. */
+/* */
+/* INPUT */
+/* */
+/* None */
+/* */
+/* OUTPUT */
+/* */
+/* None */
+/* */
+/* CALLS */
+/* */
+/* None */
+/* */
+/* CALLED BY */
+/* */
+/* ISRs */
+/* */
+/* RELEASE HISTORY */
+/* */
+/* DATE NAME DESCRIPTION */
+/* */
+/* 09-30-2020 William E. Lamie Initial Version 6.1 */
+/* 10-15-2021 William E. Lamie Modified comment(s), added */
+/* execution profile support, */
+/* resulting in version 6.1.9 */
+/* 04-25-2022 Zhen Kong Updated comments, */
+/* resulting in version 6.1.11 */
+/* */
+/**************************************************************************/
.global _tx_thread_context_save
.type _tx_thread_context_save,function
_tx_thread_context_save:
-@
-@ /* Upon entry to this routine, it is assumed that IRQ interrupts are locked
-@ out, we are in IRQ mode, and all registers are intact. */
-@
-@ /* Check for a nested interrupt condition. */
-@ if (_tx_thread_system_state++)
-@ {
-@
- STMDB sp!, {r0-r3} @ Save some working registers
+
+ /* Upon entry to this routine, it is assumed that IRQ interrupts are locked
+ out, we are in IRQ mode, and all registers are intact. */
+
+ /* Check for a nested interrupt condition. */
+
+ STMDB sp!, {r0-r3} // Save some working registers
#ifdef TX_ENABLE_FIQ_SUPPORT
- CPSID if @ Disable FIQ interrupts
+ CPSID if // Disable FIQ interrupts
#endif
- LDR r3, =_tx_thread_system_state @ Pickup address of system state variable
- LDR r2, [r3] @ Pickup system state
- CMP r2, #0 @ Is this the first interrupt?
- BEQ __tx_thread_not_nested_save @ Yes, not a nested context save
-@
-@ /* Nested interrupt condition. */
-@
- ADD r2, r2, #1 @ Increment the interrupt counter
- STR r2, [r3] @ Store it back in the variable
-@
-@ /* Save the rest of the scratch registers on the stack and return to the
-@ calling ISR. */
-@
- MRS r0, SPSR @ Pickup saved SPSR
- SUB lr, lr, #4 @ Adjust point of interrupt
- STMDB sp!, {r0, r10, r12, lr} @ Store other registers
-@
-@ /* Return to the ISR. */
-@
- MOV r10, #0 @ Clear stack limit
+ LDR r3, =_tx_thread_system_state // Pickup address of system state variable
+ LDR r2, [r3] // Pickup system state
+ CMP r2, #0 // Is this the first interrupt?
+ BEQ __tx_thread_not_nested_save // Yes, not a nested context save
+
+ /* Nested interrupt condition. */
+
+ ADD r2, r2, #1 // Increment the interrupt counter
+ STR r2, [r3] // Store it back in the variable
+
+ /* Save the rest of the scratch registers on the stack and return to the
+ calling ISR. */
+
+ MRS r0, SPSR // Pickup saved SPSR
+ SUB lr, lr, #4 // Adjust point of interrupt
+ STMDB sp!, {r0, r10, r12, lr} // Store other registers
+
+ /* Return to the ISR. */
+
+ MOV r10, #0 // Clear stack limit
#if (defined(TX_ENABLE_EXECUTION_CHANGE_NOTIFY) || defined(TX_EXECUTION_PROFILE_ENABLE))
-@
-@ /* Call the ISR enter function to indicate an ISR is executing. */
-@
- PUSH {lr} @ Save ISR lr
- BL _tx_execution_isr_enter @ Call the ISR enter function
- POP {lr} @ Recover ISR lr
+
+ /* Call the ISR enter function to indicate an ISR is executing. */
+
+ PUSH {lr} // Save ISR lr
+ BL _tx_execution_isr_enter // Call the ISR enter function
+ POP {lr} // Recover ISR lr
#endif
- B __tx_irq_processing_return @ Continue IRQ processing
-@
+ B __tx_irq_processing_return // Continue IRQ processing
+
__tx_thread_not_nested_save:
-@ }
-@
-@ /* Otherwise, not nested, check to see if a thread was running. */
-@ else if (_tx_thread_current_ptr)
-@ {
-@
- ADD r2, r2, #1 @ Increment the interrupt counter
- STR r2, [r3] @ Store it back in the variable
- LDR r1, =_tx_thread_current_ptr @ Pickup address of current thread ptr
- LDR r0, [r1] @ Pickup current thread pointer
- CMP r0, #0 @ Is it NULL?
- BEQ __tx_thread_idle_system_save @ If so, interrupt occurred in
- @ scheduling loop - nothing needs saving!
-@
-@ /* Save minimal context of interrupted thread. */
-@
- MRS r2, SPSR @ Pickup saved SPSR
- SUB lr, lr, #4 @ Adjust point of interrupt
- STMDB sp!, {r2, r10, r12, lr} @ Store other registers
-@
-@ /* Save the current stack pointer in the thread's control block. */
-@ _tx_thread_current_ptr -> tx_thread_stack_ptr = sp;
-@
-@ /* Switch to the system stack. */
-@ sp = _tx_thread_system_stack_ptr@
-@
- MOV r10, #0 @ Clear stack limit
+
+ /* Otherwise, not nested, check to see if a thread was running. */
+ ADD r2, r2, #1 // Increment the interrupt counter
+ STR r2, [r3] // Store it back in the variable
+ LDR r1, =_tx_thread_current_ptr // Pickup address of current thread ptr
+ LDR r0, [r1] // Pickup current thread pointer
+ CMP r0, #0 // Is it NULL?
+ BEQ __tx_thread_idle_system_save // If so, interrupt occurred in
+ // scheduling loop - nothing needs saving!
+
+ /* Save minimal context of interrupted thread. */
+
+ MRS r2, SPSR // Pickup saved SPSR
+ SUB lr, lr, #4 // Adjust point of interrupt
+ STMDB sp!, {r2, r10, r12, lr} // Store other registers
+
+ MOV r10, #0 // Clear stack limit
#if (defined(TX_ENABLE_EXECUTION_CHANGE_NOTIFY) || defined(TX_EXECUTION_PROFILE_ENABLE))
-@
-@ /* Call the ISR enter function to indicate an ISR is executing. */
-@
- PUSH {lr} @ Save ISR lr
- BL _tx_execution_isr_enter @ Call the ISR enter function
- POP {lr} @ Recover ISR lr
+
+ /* Call the ISR enter function to indicate an ISR is executing. */
+
+ PUSH {lr} // Save ISR lr
+ BL _tx_execution_isr_enter // Call the ISR enter function
+ POP {lr} // Recover ISR lr
#endif
- B __tx_irq_processing_return @ Continue IRQ processing
-@
-@ }
-@ else
-@ {
-@
+ B __tx_irq_processing_return // Continue IRQ processing
+
__tx_thread_idle_system_save:
-@
-@ /* Interrupt occurred in the scheduling loop. */
-@
-@ /* Not much to do here, just adjust the stack pointer, and return to IRQ
-@ processing. */
-@
- MOV r10, #0 @ Clear stack limit
+
+ /* Interrupt occurred in the scheduling loop. */
+
+ /* Not much to do here, just adjust the stack pointer, and return to IRQ
+ processing. */
+
+ MOV r10, #0 // Clear stack limit
#if (defined(TX_ENABLE_EXECUTION_CHANGE_NOTIFY) || defined(TX_EXECUTION_PROFILE_ENABLE))
-@
-@ /* Call the ISR enter function to indicate an ISR is executing. */
-@
- PUSH {lr} @ Save ISR lr
- BL _tx_execution_isr_enter @ Call the ISR enter function
- POP {lr} @ Recover ISR lr
+
+ /* Call the ISR enter function to indicate an ISR is executing. */
+
+ PUSH {lr} // Save ISR lr
+ BL _tx_execution_isr_enter // Call the ISR enter function
+ POP {lr} // Recover ISR lr
#endif
- ADD sp, sp, #16 @ Recover saved registers
- B __tx_irq_processing_return @ Continue IRQ processing
-@
-@ }
-@}
-
-
-
+ ADD sp, sp, #16 // Recover saved registers
+ B __tx_irq_processing_return // Continue IRQ processing
diff --git a/ports/cortex_a7/ac6/src/tx_thread_fiq_context_restore.S b/ports/cortex_a7/ac6/src/tx_thread_fiq_context_restore.S
index 087e9e26..006be973 100644
--- a/ports/cortex_a7/ac6/src/tx_thread_fiq_context_restore.S
+++ b/ports/cortex_a7/ac6/src/tx_thread_fiq_context_restore.S
@@ -1,43 +1,32 @@
-@/**************************************************************************/
-@/* */
-@/* Copyright (c) Microsoft Corporation. All rights reserved. */
-@/* */
-@/* This software is licensed under the Microsoft Software License */
-@/* Terms for Microsoft Azure RTOS. Full text of the license can be */
-@/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
-@/* and in the root directory of this software. */
-@/* */
-@/**************************************************************************/
-@
-@
-@/**************************************************************************/
-@/**************************************************************************/
-@/** */
-@/** ThreadX Component */
-@/** */
-@/** Thread */
-@/** */
-@/**************************************************************************/
-@/**************************************************************************/
-@
-@
-@#define TX_SOURCE_CODE
-@
-@
-@/* Include necessary system files. */
-@
-@#include "tx_api.h"
-@#include "tx_thread.h"
-@#include "tx_timer.h"
-@
-@
-SVC_MODE = 0xD3 @ SVC mode
-FIQ_MODE = 0xD1 @ FIQ mode
-MODE_MASK = 0x1F @ Mode mask
-THUMB_MASK = 0x20 @ Thumb bit mask
-IRQ_MODE_BITS = 0x12 @ IRQ mode bits
-@
-@
+/**************************************************************************/
+/* */
+/* Copyright (c) Microsoft Corporation. All rights reserved. */
+/* */
+/* This software is licensed under the Microsoft Software License */
+/* Terms for Microsoft Azure RTOS. Full text of the license can be */
+/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
+/* and in the root directory of this software. */
+/* */
+/**************************************************************************/
+
+
+/**************************************************************************/
+/**************************************************************************/
+/** */
+/** ThreadX Component */
+/** */
+/** Thread */
+/** */
+/**************************************************************************/
+/**************************************************************************/
+
+SVC_MODE = 0xD3 // SVC mode
+FIQ_MODE = 0xD1 // FIQ mode
+MODE_MASK = 0x1F // Mode mask
+THUMB_MASK = 0x20 // Thumb bit mask
+IRQ_MODE_BITS = 0x12 // IRQ mode bits
+
+
.global _tx_thread_system_state
.global _tx_thread_current_ptr
.global _tx_thread_system_stack_ptr
@@ -45,218 +34,190 @@ IRQ_MODE_BITS = 0x12 @ IRQ mode bits
.global _tx_timer_time_slice
.global _tx_thread_schedule
.global _tx_thread_preempt_disable
-@
-@
-@/* No 16-bit Thumb mode veneer code is needed for _tx_thread_fiq_context_restore
-@ since it will never be called 16-bit mode. */
-@
+ .global _tx_execution_isr_exit
+
+
+/* No 16-bit Thumb mode veneer code is needed for _tx_thread_fiq_context_restore
+ since it will never be called 16-bit mode. */
+
.arm
.text
.align 2
-@/**************************************************************************/
-@/* */
-@/* FUNCTION RELEASE */
-@/* */
-@/* _tx_thread_fiq_context_restore Cortex-A7/AC6 */
-@/* 6.1.9 */
-@/* AUTHOR */
-@/* */
-@/* William E. Lamie, Microsoft Corporation */
-@/* */
-@/* DESCRIPTION */
-@/* */
-@/* This function restores the fiq interrupt context when processing a */
-@/* nested interrupt. If not, it returns to the interrupt thread if no */
-@/* preemption is necessary. Otherwise, if preemption is necessary or */
-@/* if no thread was running, the function returns to the scheduler. */
-@/* */
-@/* INPUT */
-@/* */
-@/* None */
-@/* */
-@/* OUTPUT */
-@/* */
-@/* None */
-@/* */
-@/* CALLS */
-@/* */
-@/* _tx_thread_schedule Thread scheduling routine */
-@/* */
-@/* CALLED BY */
-@/* */
-@/* FIQ ISR Interrupt Service Routines */
-@/* */
-@/* RELEASE HISTORY */
-@/* */
-@/* DATE NAME DESCRIPTION */
-@/* */
-@/* 09-30-2020 William E. Lamie Initial Version 6.1 */
-@/* 10-15-2021 William E. Lamie Modified comment(s), added */
-@/* execution profile support, */
-@/* resulting in version 6.1.9 */
-@/* */
-@/**************************************************************************/
-@VOID _tx_thread_fiq_context_restore(VOID)
-@{
+/**************************************************************************/
+/* */
+/* FUNCTION RELEASE */
+/* */
+/* _tx_thread_fiq_context_restore ARMv7-A */
+/* 6.1.11 */
+/* AUTHOR */
+/* */
+/* William E. Lamie, Microsoft Corporation */
+/* */
+/* DESCRIPTION */
+/* */
+/* This function restores the fiq interrupt context when processing a */
+/* nested interrupt. If not, it returns to the interrupt thread if no */
+/* preemption is necessary. Otherwise, if preemption is necessary or */
+/* if no thread was running, the function returns to the scheduler. */
+/* */
+/* INPUT */
+/* */
+/* None */
+/* */
+/* OUTPUT */
+/* */
+/* None */
+/* */
+/* CALLS */
+/* */
+/* _tx_thread_schedule Thread scheduling routine */
+/* */
+/* CALLED BY */
+/* */
+/* FIQ ISR Interrupt Service Routines */
+/* */
+/* RELEASE HISTORY */
+/* */
+/* DATE NAME DESCRIPTION */
+/* */
+/* 09-30-2020 William E. Lamie Initial Version 6.1 */
+/* 10-15-2021 William E. Lamie Modified comment(s), added */
+/* execution profile support, */
+/* resulting in version 6.1.9 */
+/* 04-25-2022 Zhen Kong Updated comments, */
+/* resulting in version 6.1.11 */
+/* */
+/**************************************************************************/
.global _tx_thread_fiq_context_restore
.type _tx_thread_fiq_context_restore,function
_tx_thread_fiq_context_restore:
-@
-@ /* Lockout interrupts. */
-@
- CPSID if @ Disable IRQ and FIQ interrupts
+
+ /* Lockout interrupts. */
+
+ CPSID if // Disable IRQ and FIQ interrupts
#if (defined(TX_ENABLE_EXECUTION_CHANGE_NOTIFY) || defined(TX_EXECUTION_PROFILE_ENABLE))
-@
-@ /* Call the ISR exit function to indicate an ISR is complete. */
-@
- BL _tx_execution_isr_exit @ Call the ISR exit function
+
+ /* Call the ISR exit function to indicate an ISR is complete. */
+
+ BL _tx_execution_isr_exit // Call the ISR exit function
#endif
-@
-@ /* Determine if interrupts are nested. */
-@ if (--_tx_thread_system_state)
-@ {
-@
- LDR r3, =_tx_thread_system_state @ Pickup address of system state variable
- LDR r2, [r3] @ Pickup system state
- SUB r2, r2, #1 @ Decrement the counter
- STR r2, [r3] @ Store the counter
- CMP r2, #0 @ Was this the first interrupt?
- BEQ __tx_thread_fiq_not_nested_restore @ If so, not a nested restore
-@
-@ /* Interrupts are nested. */
-@
-@ /* Just recover the saved registers and return to the point of
-@ interrupt. */
-@
- LDMIA sp!, {r0, r10, r12, lr} @ Recover SPSR, POI, and scratch regs
- MSR SPSR_cxsf, r0 @ Put SPSR back
- LDMIA sp!, {r0-r3} @ Recover r0-r3
- MOVS pc, lr @ Return to point of interrupt
-@
-@ }
+
+ /* Determine if interrupts are nested. */
+ LDR r3, =_tx_thread_system_state // Pickup address of system state variable
+ LDR r2, [r3] // Pickup system state
+ SUB r2, r2, #1 // Decrement the counter
+ STR r2, [r3] // Store the counter
+ CMP r2, #0 // Was this the first interrupt?
+ BEQ __tx_thread_fiq_not_nested_restore // If so, not a nested restore
+
+ /* Interrupts are nested. */
+
+ /* Just recover the saved registers and return to the point of
+ interrupt. */
+
+ LDMIA sp!, {r0, r10, r12, lr} // Recover SPSR, POI, and scratch regs
+ MSR SPSR_cxsf, r0 // Put SPSR back
+ LDMIA sp!, {r0-r3} // Recover r0-r3
+ MOVS pc, lr // Return to point of interrupt
+
__tx_thread_fiq_not_nested_restore:
-@
-@ /* Determine if a thread was interrupted and no preemption is required. */
-@ else if (((_tx_thread_current_ptr) && (_tx_thread_current_ptr == _tx_thread_execute_ptr)
-@ || (_tx_thread_preempt_disable))
-@ {
-@
- LDR r1, [sp] @ Pickup the saved SPSR
- MOV r2, #MODE_MASK @ Build mask to isolate the interrupted mode
- AND r1, r1, r2 @ Isolate mode bits
- CMP r1, #IRQ_MODE_BITS @ Was an interrupt taken in IRQ mode before we
- @ got to context save? */
- BEQ __tx_thread_fiq_no_preempt_restore @ Yes, just go back to point of interrupt
+
+ /* Determine if a thread was interrupted and no preemption is required. */
+
+ LDR r1, [sp] // Pickup the saved SPSR
+ MOV r2, #MODE_MASK // Build mask to isolate the interrupted mode
+ AND r1, r1, r2 // Isolate mode bits
+ CMP r1, #IRQ_MODE_BITS // Was an interrupt taken in IRQ mode before we
+ // got to context save? */
+ BEQ __tx_thread_fiq_no_preempt_restore // Yes, just go back to point of interrupt
- LDR r1, =_tx_thread_current_ptr @ Pickup address of current thread ptr
- LDR r0, [r1] @ Pickup actual current thread pointer
- CMP r0, #0 @ Is it NULL?
- BEQ __tx_thread_fiq_idle_system_restore @ Yes, idle system was interrupted
+ LDR r1, =_tx_thread_current_ptr // Pickup address of current thread ptr
+ LDR r0, [r1] // Pickup actual current thread pointer
+ CMP r0, #0 // Is it NULL?
+ BEQ __tx_thread_fiq_idle_system_restore // Yes, idle system was interrupted
- LDR r3, =_tx_thread_preempt_disable @ Pickup preempt disable address
- LDR r2, [r3] @ Pickup actual preempt disable flag
- CMP r2, #0 @ Is it set?
- BNE __tx_thread_fiq_no_preempt_restore @ Yes, don't preempt this thread
- LDR r3, =_tx_thread_execute_ptr @ Pickup address of execute thread ptr
- LDR r2, [r3] @ Pickup actual execute thread pointer
- CMP r0, r2 @ Is the same thread highest priority?
- BNE __tx_thread_fiq_preempt_restore @ No, preemption needs to happen
+ LDR r3, =_tx_thread_preempt_disable // Pickup preempt disable address
+ LDR r2, [r3] // Pickup actual preempt disable flag
+ CMP r2, #0 // Is it set?
+ BNE __tx_thread_fiq_no_preempt_restore // Yes, don't preempt this thread
+ LDR r3, =_tx_thread_execute_ptr // Pickup address of execute thread ptr
+ LDR r2, [r3] // Pickup actual execute thread pointer
+ CMP r0, r2 // Is the same thread highest priority?
+ BNE __tx_thread_fiq_preempt_restore // No, preemption needs to happen
__tx_thread_fiq_no_preempt_restore:
-@
-@ /* Restore interrupted thread or ISR. */
-@
-@ /* Pickup the saved stack pointer. */
-@ tmp_ptr = _tx_thread_current_ptr -> tx_thread_stack_ptr;
-@
-@ /* Recover the saved context and return to the point of interrupt. */
-@
- LDMIA sp!, {r0, lr} @ Recover SPSR, POI, and scratch regs
- MSR SPSR_cxsf, r0 @ Put SPSR back
- LDMIA sp!, {r0-r3} @ Recover r0-r3
- MOVS pc, lr @ Return to point of interrupt
-@
-@ }
-@ else
-@ {
-__tx_thread_fiq_preempt_restore:
-@
- LDMIA sp!, {r3, lr} @ Recover temporarily saved registers
- MOV r1, lr @ Save lr (point of interrupt)
- MOV r2, #SVC_MODE @ Build SVC mode CPSR
- MSR CPSR_c, r2 @ Enter SVC mode
- STR r1, [sp, #-4]! @ Save point of interrupt
- STMDB sp!, {r4-r12, lr} @ Save upper half of registers
- MOV r4, r3 @ Save SPSR in r4
- MOV r2, #FIQ_MODE @ Build FIQ mode CPSR
- MSR CPSR_c, r2 @ Reenter FIQ mode
- LDMIA sp!, {r0-r3} @ Recover r0-r3
- MOV r5, #SVC_MODE @ Build SVC mode CPSR
- MSR CPSR_c, r5 @ Enter SVC mode
- STMDB sp!, {r0-r3} @ Save r0-r3 on thread's stack
- LDR r1, =_tx_thread_current_ptr @ Pickup address of current thread ptr
- LDR r0, [r1] @ Pickup current thread pointer
+ /* Restore interrupted thread or ISR. */
+ /* Recover the saved context and return to the point of interrupt. */
+
+ LDMIA sp!, {r0, lr} // Recover SPSR, POI, and scratch regs
+ MSR SPSR_cxsf, r0 // Put SPSR back
+ LDMIA sp!, {r0-r3} // Recover r0-r3
+ MOVS pc, lr // Return to point of interrupt
+
+__tx_thread_fiq_preempt_restore:
+
+ LDMIA sp!, {r3, lr} // Recover temporarily saved registers
+ MOV r1, lr // Save lr (point of interrupt)
+ MOV r2, #SVC_MODE // Build SVC mode CPSR
+ MSR CPSR_c, r2 // Enter SVC mode
+ STR r1, [sp, #-4]! // Save point of interrupt
+ STMDB sp!, {r4-r12, lr} // Save upper half of registers
+ MOV r4, r3 // Save SPSR in r4
+ MOV r2, #FIQ_MODE // Build FIQ mode CPSR
+ MSR CPSR_c, r2 // Reenter FIQ mode
+ LDMIA sp!, {r0-r3} // Recover r0-r3
+ MOV r5, #SVC_MODE // Build SVC mode CPSR
+ MSR CPSR_c, r5 // Enter SVC mode
+ STMDB sp!, {r0-r3} // Save r0-r3 on thread's stack
+
+ LDR r1, =_tx_thread_current_ptr // Pickup address of current thread ptr
+ LDR r0, [r1] // Pickup current thread pointer
#ifdef TX_ENABLE_VFP_SUPPORT
- LDR r2, [r0, #144] @ Pickup the VFP enabled flag
- CMP r2, #0 @ Is the VFP enabled?
- BEQ _tx_skip_fiq_vfp_save @ No, skip VFP IRQ save
- VMRS r2, FPSCR @ Pickup the FPSCR
- STR r2, [sp, #-4]! @ Save FPSCR
- VSTMDB sp!, {D16-D31} @ Save D16-D31
- VSTMDB sp!, {D0-D15} @ Save D0-D15
+ LDR r2, [r0, #144] // Pickup the VFP enabled flag
+ CMP r2, #0 // Is the VFP enabled?
+ BEQ _tx_skip_fiq_vfp_save // No, skip VFP IRQ save
+ VMRS r2, FPSCR // Pickup the FPSCR
+ STR r2, [sp, #-4]! // Save FPSCR
+ VSTMDB sp!, {D16-D31} // Save D16-D31
+ VSTMDB sp!, {D0-D15} // Save D0-D15
_tx_skip_fiq_vfp_save:
#endif
- MOV r3, #1 @ Build interrupt stack type
- STMDB sp!, {r3, r4} @ Save interrupt stack type and SPSR
- STR sp, [r0, #8] @ Save stack pointer in thread control
- @ block */
-@
-@ /* Save the remaining time-slice and disable it. */
-@ if (_tx_timer_time_slice)
-@ {
-@
- LDR r3, =_tx_timer_time_slice @ Pickup time-slice variable address
- LDR r2, [r3] @ Pickup time-slice
- CMP r2, #0 @ Is it active?
- BEQ __tx_thread_fiq_dont_save_ts @ No, don't save it
-@
-@ _tx_thread_current_ptr -> tx_thread_time_slice = _tx_timer_time_slice;
-@ _tx_timer_time_slice = 0;
-@
- STR r2, [r0, #24] @ Save thread's time-slice
- MOV r2, #0 @ Clear value
- STR r2, [r3] @ Disable global time-slice flag
-@
-@ }
-__tx_thread_fiq_dont_save_ts:
-@
-@
-@ /* Clear the current task pointer. */
-@ _tx_thread_current_ptr = TX_NULL;
-@
- MOV r0, #0 @ NULL value
- STR r0, [r1] @ Clear current thread pointer
-@
-@ /* Return to the scheduler. */
-@ _tx_thread_schedule();
-@
- B _tx_thread_schedule @ Return to scheduler
-@ }
-@
-__tx_thread_fiq_idle_system_restore:
-@
-@ /* Just return back to the scheduler! */
-@
- ADD sp, sp, #24 @ Recover FIQ stack space
- MOV r3, #SVC_MODE @ Build SVC mode CPSR
- MSR CPSR_c, r3 @ Lockout interrupts
- B _tx_thread_schedule @ Return to scheduler
-@
-@}
+ MOV r3, #1 // Build interrupt stack type
+ STMDB sp!, {r3, r4} // Save interrupt stack type and SPSR
+ STR sp, [r0, #8] // Save stack pointer in thread control
+ // block */
+ LDR r3, =_tx_timer_time_slice // Pickup time-slice variable address
+ LDR r2, [r3] // Pickup time-slice
+ CMP r2, #0 // Is it active?
+ BEQ __tx_thread_fiq_dont_save_ts // No, don't save it
+
+ STR r2, [r0, #24] // Save thread's time-slice
+ MOV r2, #0 // Clear value
+ STR r2, [r3] // Disable global time-slice flag
+
+__tx_thread_fiq_dont_save_ts:
+
+ /* Clear the current task pointer. */
+
+ MOV r0, #0 // NULL value
+ STR r0, [r1] // Clear current thread pointer
+
+ /* Return to the scheduler. */
+
+ B _tx_thread_schedule // Return to scheduler
+
+__tx_thread_fiq_idle_system_restore:
+
+ /* Just return back to the scheduler! */
+
+ ADD sp, sp, #24 // Recover FIQ stack space
+ MOV r3, #SVC_MODE // Build SVC mode CPSR
+ MSR CPSR_c, r3 // Lockout interrupts
+ B _tx_thread_schedule // Return to scheduler
diff --git a/ports/cortex_a7/ac6/src/tx_thread_fiq_context_save.S b/ports/cortex_a7/ac6/src/tx_thread_fiq_context_save.S
index 376fa33e..7db6a4c2 100644
--- a/ports/cortex_a7/ac6/src/tx_thread_fiq_context_save.S
+++ b/ports/cortex_a7/ac6/src/tx_thread_fiq_context_save.S
@@ -1,206 +1,178 @@
-@/**************************************************************************/
-@/* */
-@/* Copyright (c) Microsoft Corporation. All rights reserved. */
-@/* */
-@/* This software is licensed under the Microsoft Software License */
-@/* Terms for Microsoft Azure RTOS. Full text of the license can be */
-@/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
-@/* and in the root directory of this software. */
-@/* */
-@/**************************************************************************/
-@
-@
-@/**************************************************************************/
-@/**************************************************************************/
-@/** */
-@/** ThreadX Component */
-@/** */
-@/** Thread */
-@/** */
-@/**************************************************************************/
-@/**************************************************************************/
-@
-@
-@#define TX_SOURCE_CODE
-@
-@
-@/* Include necessary system files. */
-@
-@#include "tx_api.h"
-@#include "tx_thread.h"
-@
-@
+/**************************************************************************/
+/* */
+/* Copyright (c) Microsoft Corporation. All rights reserved. */
+/* */
+/* This software is licensed under the Microsoft Software License */
+/* Terms for Microsoft Azure RTOS. Full text of the license can be */
+/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
+/* and in the root directory of this software. */
+/* */
+/**************************************************************************/
+
+
+/**************************************************************************/
+/**************************************************************************/
+/** */
+/** ThreadX Component */
+/** */
+/** Thread */
+/** */
+/**************************************************************************/
+/**************************************************************************/
+
.global _tx_thread_system_state
.global _tx_thread_current_ptr
.global __tx_fiq_processing_return
-@
-@
-@/* No 16-bit Thumb mode veneer code is needed for _tx_thread_fiq_context_save
-@ since it will never be called 16-bit mode. */
-@
+ .global _tx_execution_isr_enter
+
+
+/* No 16-bit Thumb mode veneer code is needed for _tx_thread_fiq_context_save
+ since it will never be called 16-bit mode. */
+
.arm
.text
.align 2
-@/**************************************************************************/
-@/* */
-@/* FUNCTION RELEASE */
-@/* */
-@/* _tx_thread_fiq_context_save Cortex-A7/AC6 */
-@/* 6.1.9 */
-@/* AUTHOR */
-@/* */
-@/* William E. Lamie, Microsoft Corporation */
-@/* */
-@/* DESCRIPTION */
-@/* */
-@/* This function saves the context of an executing thread in the */
-@/* beginning of interrupt processing. The function also ensures that */
-@/* the system stack is used upon return to the calling ISR. */
-@/* */
-@/* INPUT */
-@/* */
-@/* None */
-@/* */
-@/* OUTPUT */
-@/* */
-@/* None */
-@/* */
-@/* CALLS */
-@/* */
-@/* None */
-@/* */
-@/* CALLED BY */
-@/* */
-@/* ISRs */
-@/* */
-@/* RELEASE HISTORY */
-@/* */
-@/* DATE NAME DESCRIPTION */
-@/* */
-@/* 09-30-2020 William E. Lamie Initial Version 6.1 */
-@/* 10-15-2021 William E. Lamie Modified comment(s), added */
-@/* execution profile support, */
-@/* resulting in version 6.1.9 */
-@/* */
-@/**************************************************************************/
-@ VOID _tx_thread_fiq_context_save(VOID)
-@{
+/**************************************************************************/
+/* */
+/* FUNCTION RELEASE */
+/* */
+/* _tx_thread_fiq_context_save ARMv7-A */
+/* 6.1.11 */
+/* AUTHOR */
+/* */
+/* William E. Lamie, Microsoft Corporation */
+/* */
+/* DESCRIPTION */
+/* */
+/* This function saves the context of an executing thread in the */
+/* beginning of interrupt processing. The function also ensures that */
+/* the system stack is used upon return to the calling ISR. */
+/* */
+/* INPUT */
+/* */
+/* None */
+/* */
+/* OUTPUT */
+/* */
+/* None */
+/* */
+/* CALLS */
+/* */
+/* None */
+/* */
+/* CALLED BY */
+/* */
+/* ISRs */
+/* */
+/* RELEASE HISTORY */
+/* */
+/* DATE NAME DESCRIPTION */
+/* */
+/* 09-30-2020 William E. Lamie Initial Version 6.1 */
+/* 10-15-2021 William E. Lamie Modified comment(s), added */
+/* execution profile support, */
+/* resulting in version 6.1.9 */
+/* 04-25-2022 Zhen Kong Updated comments, */
+/* resulting in version 6.1.11 */
+/* */
+/**************************************************************************/
.global _tx_thread_fiq_context_save
.type _tx_thread_fiq_context_save,function
_tx_thread_fiq_context_save:
-@
-@ /* Upon entry to this routine, it is assumed that IRQ interrupts are locked
-@ out, we are in IRQ mode, and all registers are intact. */
-@
-@ /* Check for a nested interrupt condition. */
-@ if (_tx_thread_system_state++)
-@ {
-@
- STMDB sp!, {r0-r3} @ Save some working registers
- LDR r3, =_tx_thread_system_state @ Pickup address of system state variable
- LDR r2, [r3] @ Pickup system state
- CMP r2, #0 @ Is this the first interrupt?
- BEQ __tx_thread_fiq_not_nested_save @ Yes, not a nested context save
-@
-@ /* Nested interrupt condition. */
-@
- ADD r2, r2, #1 @ Increment the interrupt counter
- STR r2, [r3] @ Store it back in the variable
-@
-@ /* Save the rest of the scratch registers on the stack and return to the
-@ calling ISR. */
-@
- MRS r0, SPSR @ Pickup saved SPSR
- SUB lr, lr, #4 @ Adjust point of interrupt
- STMDB sp!, {r0, r10, r12, lr} @ Store other registers
-@
-@ /* Return to the ISR. */
-@
- MOV r10, #0 @ Clear stack limit
+
+ /* Upon entry to this routine, it is assumed that IRQ interrupts are locked
+ out, we are in IRQ mode, and all registers are intact. */
+
+ /* Check for a nested interrupt condition. */
+
+ STMDB sp!, {r0-r3} // Save some working registers
+ LDR r3, =_tx_thread_system_state // Pickup address of system state variable
+ LDR r2, [r3] // Pickup system state
+ CMP r2, #0 // Is this the first interrupt?
+ BEQ __tx_thread_fiq_not_nested_save // Yes, not a nested context save
+
+ /* Nested interrupt condition. */
+
+ ADD r2, r2, #1 // Increment the interrupt counter
+ STR r2, [r3] // Store it back in the variable
+
+ /* Save the rest of the scratch registers on the stack and return to the
+ calling ISR. */
+
+ MRS r0, SPSR // Pickup saved SPSR
+ SUB lr, lr, #4 // Adjust point of interrupt
+ STMDB sp!, {r0, r10, r12, lr} // Store other registers
+
+ /* Return to the ISR. */
+
+ MOV r10, #0 // Clear stack limit
#if (defined(TX_ENABLE_EXECUTION_CHANGE_NOTIFY) || defined(TX_EXECUTION_PROFILE_ENABLE))
-@
-@ /* Call the ISR enter function to indicate an ISR is executing. */
-@
- PUSH {lr} @ Save ISR lr
- BL _tx_execution_isr_enter @ Call the ISR enter function
- POP {lr} @ Recover ISR lr
+
+ /* Call the ISR enter function to indicate an ISR is executing. */
+
+ PUSH {lr} // Save ISR lr
+ BL _tx_execution_isr_enter // Call the ISR enter function
+ POP {lr} // Recover ISR lr
#endif
- B __tx_fiq_processing_return @ Continue FIQ processing
-@
+ B __tx_fiq_processing_return // Continue FIQ processing
+//
__tx_thread_fiq_not_nested_save:
-@ }
-@
-@ /* Otherwise, not nested, check to see if a thread was running. */
-@ else if (_tx_thread_current_ptr)
-@ {
-@
- ADD r2, r2, #1 @ Increment the interrupt counter
- STR r2, [r3] @ Store it back in the variable
- LDR r1, =_tx_thread_current_ptr @ Pickup address of current thread ptr
- LDR r0, [r1] @ Pickup current thread pointer
- CMP r0, #0 @ Is it NULL?
- BEQ __tx_thread_fiq_idle_system_save @ If so, interrupt occurred in
-@ @ scheduling loop - nothing needs saving!
-@
-@ /* Save minimal context of interrupted thread. */
-@
- MRS r2, SPSR @ Pickup saved SPSR
- SUB lr, lr, #4 @ Adjust point of interrupt
- STMDB sp!, {r2, lr} @ Store other registers, Note that we don't
-@ @ need to save sl and ip since FIQ has
-@ @ copies of these registers. Nested
-@ @ interrupt processing does need to save
-@ @ these registers.
-@
-@ /* Save the current stack pointer in the thread's control block. */
-@ _tx_thread_current_ptr -> tx_thread_stack_ptr = sp;
-@
-@ /* Switch to the system stack. */
-@ sp = _tx_thread_system_stack_ptr;
-@
- MOV r10, #0 @ Clear stack limit
+
+ /* Otherwise, not nested, check to see if a thread was running. */
+
+ ADD r2, r2, #1 // Increment the interrupt counter
+ STR r2, [r3] // Store it back in the variable
+ LDR r1, =_tx_thread_current_ptr // Pickup address of current thread ptr
+ LDR r0, [r1] // Pickup current thread pointer
+ CMP r0, #0 // Is it NULL?
+ BEQ __tx_thread_fiq_idle_system_save // If so, interrupt occurred in
+ // scheduling loop - nothing needs saving!
+
+ /* Save minimal context of interrupted thread. */
+
+ MRS r2, SPSR // Pickup saved SPSR
+ SUB lr, lr, #4 // Adjust point of interrupt
+ STMDB sp!, {r2, lr} // Store other registers, Note that we don't
+ // need to save sl and ip since FIQ has
+ // copies of these registers. Nested
+ // interrupt processing does need to save
+ // these registers.
+
+ MOV r10, #0 // Clear stack limit
#if (defined(TX_ENABLE_EXECUTION_CHANGE_NOTIFY) || defined(TX_EXECUTION_PROFILE_ENABLE))
-@
-@ /* Call the ISR enter function to indicate an ISR is executing. */
-@
- PUSH {lr} @ Save ISR lr
- BL _tx_execution_isr_enter @ Call the ISR enter function
- POP {lr} @ Recover ISR lr
+
+ /* Call the ISR enter function to indicate an ISR is executing. */
+
+ PUSH {lr} // Save ISR lr
+ BL _tx_execution_isr_enter // Call the ISR enter function
+ POP {lr} // Recover ISR lr
#endif
- B __tx_fiq_processing_return @ Continue FIQ processing
-@
-@ }
-@ else
-@ {
-@
+ B __tx_fiq_processing_return // Continue FIQ processing
+
__tx_thread_fiq_idle_system_save:
-@
-@ /* Interrupt occurred in the scheduling loop. */
-@
-#if (defined(TX_ENABLE_EXECUTION_CHANGE_NOTIFY) || defined(TX_EXECUTION_PROFILE_ENABLE))
-@
-@ /* Call the ISR enter function to indicate an ISR is executing. */
-@
- PUSH {lr} @ Save ISR lr
- BL _tx_execution_isr_enter @ Call the ISR enter function
- POP {lr} @ Recover ISR lr
-#endif
-@
-@ /* Not much to do here, save the current SPSR and LR for possible
-@ use in IRQ interrupted in idle system conditions, and return to
-@ FIQ interrupt processing. */
-@
- MRS r0, SPSR @ Pickup saved SPSR
- SUB lr, lr, #4 @ Adjust point of interrupt
- STMDB sp!, {r0, lr} @ Store other registers that will get used
-@ @ or stripped off the stack in context
-@ @ restore
- B __tx_fiq_processing_return @ Continue FIQ processing
-@
-@ }
-@}
+ /* Interrupt occurred in the scheduling loop. */
+
+#if (defined(TX_ENABLE_EXECUTION_CHANGE_NOTIFY) || defined(TX_EXECUTION_PROFILE_ENABLE))
+
+ /* Call the ISR enter function to indicate an ISR is executing. */
+
+ PUSH {lr} // Save ISR lr
+ BL _tx_execution_isr_enter // Call the ISR enter function
+ POP {lr} // Recover ISR lr
+#endif
+
+ /* Not much to do here, save the current SPSR and LR for possible
+ use in IRQ interrupted in idle system conditions, and return to
+ FIQ interrupt processing. */
+
+ MRS r0, SPSR // Pickup saved SPSR
+ SUB lr, lr, #4 // Adjust point of interrupt
+ STMDB sp!, {r0, lr} // Store other registers that will get used
+ // or stripped off the stack in context
+ // restore
+ B __tx_fiq_processing_return // Continue FIQ processing
diff --git a/ports/cortex_a7/ac6/src/tx_thread_fiq_nesting_end.S b/ports/cortex_a7/ac6/src/tx_thread_fiq_nesting_end.S
index 505a4878..b34d881e 100644
--- a/ports/cortex_a7/ac6/src/tx_thread_fiq_nesting_end.S
+++ b/ports/cortex_a7/ac6/src/tx_thread_fiq_nesting_end.S
@@ -1,116 +1,104 @@
-@/**************************************************************************/
-@/* */
-@/* Copyright (c) Microsoft Corporation. All rights reserved. */
-@/* */
-@/* This software is licensed under the Microsoft Software License */
-@/* Terms for Microsoft Azure RTOS. Full text of the license can be */
-@/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
-@/* and in the root directory of this software. */
-@/* */
-@/**************************************************************************/
-@
-@
-@/**************************************************************************/
-@/**************************************************************************/
-@/** */
-@/** ThreadX Component */
-@/** */
-@/** Thread */
-@/** */
-@/**************************************************************************/
-@/**************************************************************************/
-@
-@
-@#define TX_SOURCE_CODE
-@
-@
-@/* Include necessary system files. */
-@
-@#include "tx_api.h"
-@#include "tx_thread.h"
-@
-@
+/**************************************************************************/
+/* */
+/* Copyright (c) Microsoft Corporation. All rights reserved. */
+/* */
+/* This software is licensed under the Microsoft Software License */
+/* Terms for Microsoft Azure RTOS. Full text of the license can be */
+/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
+/* and in the root directory of this software. */
+/* */
+/**************************************************************************/
+
+
+/**************************************************************************/
+/**************************************************************************/
+/** */
+/** ThreadX Component */
+/** */
+/** Thread */
+/** */
+/**************************************************************************/
+/**************************************************************************/
+
#ifdef TX_ENABLE_FIQ_SUPPORT
-DISABLE_INTS = 0xC0 @ Disable IRQ/FIQ interrupts
+DISABLE_INTS = 0xC0 // Disable IRQ/FIQ interrupts
#else
-DISABLE_INTS = 0x80 @ Disable IRQ interrupts
+DISABLE_INTS = 0x80 // Disable IRQ interrupts
#endif
-MODE_MASK = 0x1F @ Mode mask
-FIQ_MODE_BITS = 0x11 @ FIQ mode bits
-@
-@
-@/* No 16-bit Thumb mode veneer code is needed for _tx_thread_fiq_nesting_end
-@ since it will never be called 16-bit mode. */
-@
+MODE_MASK = 0x1F // Mode mask
+FIQ_MODE_BITS = 0x11 // FIQ mode bits
+
+
+/* No 16-bit Thumb mode veneer code is needed for _tx_thread_fiq_nesting_end
+ since it will never be called 16-bit mode. */
+
.arm
.text
.align 2
-@/**************************************************************************/
-@/* */
-@/* FUNCTION RELEASE */
-@/* */
-@/* _tx_thread_fiq_nesting_end Cortex-A7/AC6 */
-@/* 6.1 */
-@/* AUTHOR */
-@/* */
-@/* William E. Lamie, Microsoft Corporation */
-@/* */
-@/* DESCRIPTION */
-@/* */
-@/* This function is called by the application from FIQ mode after */
-@/* _tx_thread_fiq_nesting_start has been called and switches the FIQ */
-@/* processing from system mode back to FIQ mode prior to the ISR */
-@/* calling _tx_thread_fiq_context_restore. Note that this function */
-@/* assumes the system stack pointer is in the same position after */
-@/* nesting start function was called. */
-@/* */
-@/* This function assumes that the system mode stack pointer was setup */
-@/* during low-level initialization (tx_initialize_low_level.s). */
-@/* */
-@/* This function returns with FIQ interrupts disabled. */
-@/* */
-@/* INPUT */
-@/* */
-@/* None */
-@/* */
-@/* OUTPUT */
-@/* */
-@/* None */
-@/* */
-@/* CALLS */
-@/* */
-@/* None */
-@/* */
-@/* CALLED BY */
-@/* */
-@/* ISRs */
-@/* */
-@/* RELEASE HISTORY */
-@/* */
-@/* DATE NAME DESCRIPTION */
-@/* */
-@/* 09-30-2020 William E. Lamie Initial Version 6.1 */
-@/* */
-@/**************************************************************************/
-@VOID _tx_thread_fiq_nesting_end(VOID)
-@{
+/**************************************************************************/
+/* */
+/* FUNCTION RELEASE */
+/* */
+/* _tx_thread_fiq_nesting_end ARMv7-A */
+/* 6.1.11 */
+/* AUTHOR */
+/* */
+/* William E. Lamie, Microsoft Corporation */
+/* */
+/* DESCRIPTION */
+/* */
+/* This function is called by the application from FIQ mode after */
+/* _tx_thread_fiq_nesting_start has been called and switches the FIQ */
+/* processing from system mode back to FIQ mode prior to the ISR */
+/* calling _tx_thread_fiq_context_restore. Note that this function */
+/* assumes the system stack pointer is in the same position after */
+/* nesting start function was called. */
+/* */
+/* This function assumes that the system mode stack pointer was setup */
+/* during low-level initialization (tx_initialize_low_level.s). */
+/* */
+/* This function returns with FIQ interrupts disabled. */
+/* */
+/* INPUT */
+/* */
+/* None */
+/* */
+/* OUTPUT */
+/* */
+/* None */
+/* */
+/* CALLS */
+/* */
+/* None */
+/* */
+/* CALLED BY */
+/* */
+/* ISRs */
+/* */
+/* RELEASE HISTORY */
+/* */
+/* DATE NAME DESCRIPTION */
+/* */
+/* 09-30-2020 William E. Lamie Initial Version 6.1 */
+/* 04-25-2022 Zhen Kong Updated comments, */
+/* resulting in version 6.1.11 */
+/* */
+/**************************************************************************/
.global _tx_thread_fiq_nesting_end
.type _tx_thread_fiq_nesting_end,function
_tx_thread_fiq_nesting_end:
- MOV r3,lr @ Save ISR return address
- MRS r0, CPSR @ Pickup the CPSR
- ORR r0, r0, #DISABLE_INTS @ Build disable interrupt value
- MSR CPSR_c, r0 @ Disable interrupts
- LDMIA sp!, {r1, lr} @ Pickup saved lr (and r1 throw-away for
- @ 8-byte alignment logic)
- BIC r0, r0, #MODE_MASK @ Clear mode bits
- ORR r0, r0, #FIQ_MODE_BITS @ Build IRQ mode CPSR
- MSR CPSR_c, r0 @ Reenter IRQ mode
+ MOV r3,lr // Save ISR return address
+ MRS r0, CPSR // Pickup the CPSR
+ ORR r0, r0, #DISABLE_INTS // Build disable interrupt value
+ MSR CPSR_c, r0 // Disable interrupts
+ LDMIA sp!, {r1, lr} // Pickup saved lr (and r1 throw-away for
+ // 8-byte alignment logic)
+ BIC r0, r0, #MODE_MASK // Clear mode bits
+ ORR r0, r0, #FIQ_MODE_BITS // Build IRQ mode CPSR
+ MSR CPSR_c, r0 // Reenter IRQ mode
#ifdef __THUMB_INTERWORK
- BX r3 @ Return to caller
+ BX r3 // Return to caller
#else
- MOV pc, r3 @ Return to caller
+ MOV pc, r3 // Return to caller
#endif
-@}
-
diff --git a/ports/cortex_a7/ac6/src/tx_thread_fiq_nesting_start.S b/ports/cortex_a7/ac6/src/tx_thread_fiq_nesting_start.S
index 43754d60..c9cd5a06 100644
--- a/ports/cortex_a7/ac6/src/tx_thread_fiq_nesting_start.S
+++ b/ports/cortex_a7/ac6/src/tx_thread_fiq_nesting_start.S
@@ -1,108 +1,96 @@
-@/**************************************************************************/
-@/* */
-@/* Copyright (c) Microsoft Corporation. All rights reserved. */
-@/* */
-@/* This software is licensed under the Microsoft Software License */
-@/* Terms for Microsoft Azure RTOS. Full text of the license can be */
-@/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
-@/* and in the root directory of this software. */
-@/* */
-@/**************************************************************************/
-@
-@
-@/**************************************************************************/
-@/**************************************************************************/
-@/** */
-@/** ThreadX Component */
-@/** */
-@/** Thread */
-@/** */
-@/**************************************************************************/
-@/**************************************************************************/
-@
-@
-@#define TX_SOURCE_CODE
-@
-@
-@/* Include necessary system files. */
-@
-@#include "tx_api.h"
-@#include "tx_thread.h"
-@
-@
-FIQ_DISABLE = 0x40 @ FIQ disable bit
-MODE_MASK = 0x1F @ Mode mask
-SYS_MODE_BITS = 0x1F @ System mode bits
-@
-@
-@/* No 16-bit Thumb mode veneer code is needed for _tx_thread_fiq_nesting_start
-@ since it will never be called 16-bit mode. */
-@
+/**************************************************************************/
+/* */
+/* Copyright (c) Microsoft Corporation. All rights reserved. */
+/* */
+/* This software is licensed under the Microsoft Software License */
+/* Terms for Microsoft Azure RTOS. Full text of the license can be */
+/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
+/* and in the root directory of this software. */
+/* */
+/**************************************************************************/
+
+
+/**************************************************************************/
+/**************************************************************************/
+/** */
+/** ThreadX Component */
+/** */
+/** Thread */
+/** */
+/**************************************************************************/
+/**************************************************************************/
+
+FIQ_DISABLE = 0x40 // FIQ disable bit
+MODE_MASK = 0x1F // Mode mask
+SYS_MODE_BITS = 0x1F // System mode bits
+
+
+/* No 16-bit Thumb mode veneer code is needed for _tx_thread_fiq_nesting_start
+ since it will never be called 16-bit mode. */
+
.arm
.text
.align 2
-@/**************************************************************************/
-@/* */
-@/* FUNCTION RELEASE */
-@/* */
-@/* _tx_thread_fiq_nesting_start Cortex-A7/AC6 */
-@/* 6.1 */
-@/* AUTHOR */
-@/* */
-@/* William E. Lamie, Microsoft Corporation */
-@/* */
-@/* DESCRIPTION */
-@/* */
-@/* This function is called by the application from FIQ mode after */
-@/* _tx_thread_fiq_context_save has been called and switches the FIQ */
-@/* processing to the system mode so nested FIQ interrupt processing */
-@/* is possible (system mode has its own "lr" register). Note that */
-@/* this function assumes that the system mode stack pointer was setup */
-@/* during low-level initialization (tx_initialize_low_level.s). */
-@/* */
-@/* This function returns with FIQ interrupts enabled. */
-@/* */
-@/* INPUT */
-@/* */
-@/* None */
-@/* */
-@/* OUTPUT */
-@/* */
-@/* None */
-@/* */
-@/* CALLS */
-@/* */
-@/* None */
-@/* */
-@/* CALLED BY */
-@/* */
-@/* ISRs */
-@/* */
-@/* RELEASE HISTORY */
-@/* */
-@/* DATE NAME DESCRIPTION */
-@/* */
-@/* 09-30-2020 William E. Lamie Initial Version 6.1 */
-@/* */
-@/**************************************************************************/
-@VOID _tx_thread_fiq_nesting_start(VOID)
-@{
+/**************************************************************************/
+/* */
+/* FUNCTION RELEASE */
+/* */
+/* _tx_thread_fiq_nesting_start ARMv7-A */
+/* 6.1.11 */
+/* AUTHOR */
+/* */
+/* William E. Lamie, Microsoft Corporation */
+/* */
+/* DESCRIPTION */
+/* */
+/* This function is called by the application from FIQ mode after */
+/* _tx_thread_fiq_context_save has been called and switches the FIQ */
+/* processing to the system mode so nested FIQ interrupt processing */
+/* is possible (system mode has its own "lr" register). Note that */
+/* this function assumes that the system mode stack pointer was setup */
+/* during low-level initialization (tx_initialize_low_level.s). */
+/* */
+/* This function returns with FIQ interrupts enabled. */
+/* */
+/* INPUT */
+/* */
+/* None */
+/* */
+/* OUTPUT */
+/* */
+/* None */
+/* */
+/* CALLS */
+/* */
+/* None */
+/* */
+/* CALLED BY */
+/* */
+/* ISRs */
+/* */
+/* RELEASE HISTORY */
+/* */
+/* DATE NAME DESCRIPTION */
+/* */
+/* 09-30-2020 William E. Lamie Initial Version 6.1 */
+/* 04-25-2022 Zhen Kong Updated comments, */
+/* resulting in version 6.1.11 */
+/* */
+/**************************************************************************/
.global _tx_thread_fiq_nesting_start
.type _tx_thread_fiq_nesting_start,function
_tx_thread_fiq_nesting_start:
- MOV r3,lr @ Save ISR return address
- MRS r0, CPSR @ Pickup the CPSR
- BIC r0, r0, #MODE_MASK @ Clear the mode bits
- ORR r0, r0, #SYS_MODE_BITS @ Build system mode CPSR
- MSR CPSR_c, r0 @ Enter system mode
- STMDB sp!, {r1, lr} @ Push the system mode lr on the system mode stack
- @ and push r1 just to keep 8-byte alignment
- BIC r0, r0, #FIQ_DISABLE @ Build enable FIQ CPSR
- MSR CPSR_c, r0 @ Enter system mode
+ MOV r3,lr // Save ISR return address
+ MRS r0, CPSR // Pickup the CPSR
+ BIC r0, r0, #MODE_MASK // Clear the mode bits
+ ORR r0, r0, #SYS_MODE_BITS // Build system mode CPSR
+ MSR CPSR_c, r0 // Enter system mode
+ STMDB sp!, {r1, lr} // Push the system mode lr on the system mode stack
+ // and push r1 just to keep 8-byte alignment
+ BIC r0, r0, #FIQ_DISABLE // Build enable FIQ CPSR
+ MSR CPSR_c, r0 // Enter system mode
#ifdef __THUMB_INTERWORK
- BX r3 @ Return to caller
+ BX r3 // Return to caller
#else
- MOV pc, r3 @ Return to caller
+ MOV pc, r3 // Return to caller
#endif
-@}
-
diff --git a/ports/cortex_a7/ac6/src/tx_thread_interrupt_control.S b/ports/cortex_a7/ac6/src/tx_thread_interrupt_control.S
index 94d09fcd..63b1609a 100644
--- a/ports/cortex_a7/ac6/src/tx_thread_interrupt_control.S
+++ b/ports/cortex_a7/ac6/src/tx_thread_interrupt_control.S
@@ -1,115 +1,104 @@
-@/**************************************************************************/
-@/* */
-@/* Copyright (c) Microsoft Corporation. All rights reserved. */
-@/* */
-@/* This software is licensed under the Microsoft Software License */
-@/* Terms for Microsoft Azure RTOS. Full text of the license can be */
-@/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
-@/* and in the root directory of this software. */
-@/* */
-@/**************************************************************************/
-@
-@
-@/**************************************************************************/
-@/**************************************************************************/
-@/** */
-@/** ThreadX Component */
-@/** */
-@/** Thread */
-@/** */
-@/**************************************************************************/
-@/**************************************************************************/
-@
-@#define TX_SOURCE_CODE
-@
-@
-@/* Include necessary system files. */
-@
-@#include "tx_api.h"
-@#include "tx_thread.h" */
-@
+/**************************************************************************/
+/* */
+/* Copyright (c) Microsoft Corporation. All rights reserved. */
+/* */
+/* This software is licensed under the Microsoft Software License */
+/* Terms for Microsoft Azure RTOS. Full text of the license can be */
+/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
+/* and in the root directory of this software. */
+/* */
+/**************************************************************************/
+
+
+/**************************************************************************/
+/**************************************************************************/
+/** */
+/** ThreadX Component */
+/** */
+/** Thread */
+/** */
+/**************************************************************************/
+/**************************************************************************/
INT_MASK = 0x03F
-@
-@/* Define the 16-bit Thumb mode veneer for _tx_thread_interrupt_control for
-@ applications calling this function from to 16-bit Thumb mode. */
-@
+
+/* Define the 16-bit Thumb mode veneer for _tx_thread_interrupt_control for
+ applications calling this function from to 16-bit Thumb mode. */
+
.text
.align 2
.global $_tx_thread_interrupt_control
$_tx_thread_interrupt_control:
.thumb
- BX pc @ Switch to 32-bit mode
- NOP @
+ BX pc // Switch to 32-bit mode
+ NOP //
.arm
- STMFD sp!, {lr} @ Save return address
- BL _tx_thread_interrupt_control @ Call _tx_thread_interrupt_control function
- LDMFD sp!, {lr} @ Recover saved return address
- BX lr @ Return to 16-bit caller
-@
-@
+ STMFD sp!, {lr} // Save return address
+ BL _tx_thread_interrupt_control // Call _tx_thread_interrupt_control function
+ LDMFD sp!, {lr} // Recover saved return address
+ BX lr // Return to 16-bit caller
+
+
.text
.align 2
-@/**************************************************************************/
-@/* */
-@/* FUNCTION RELEASE */
-@/* */
-@/* _tx_thread_interrupt_control Cortex-A7/AC6 */
-@/* 6.1 */
-@/* AUTHOR */
-@/* */
-@/* William E. Lamie, Microsoft Corporation */
-@/* */
-@/* DESCRIPTION */
-@/* */
-@/* This function is responsible for changing the interrupt lockout */
-@/* posture of the system. */
-@/* */
-@/* INPUT */
-@/* */
-@/* new_posture New interrupt lockout posture */
-@/* */
-@/* OUTPUT */
-@/* */
-@/* old_posture Old interrupt lockout posture */
-@/* */
-@/* CALLS */
-@/* */
-@/* None */
-@/* */
-@/* CALLED BY */
-@/* */
-@/* Application Code */
-@/* */
-@/* RELEASE HISTORY */
-@/* */
-@/* DATE NAME DESCRIPTION */
-@/* */
-@/* 09-30-2020 William E. Lamie Initial Version 6.1 */
-@/* */
-@/**************************************************************************/
-@UINT _tx_thread_interrupt_control(UINT new_posture)
-@{
+/**************************************************************************/
+/* */
+/* FUNCTION RELEASE */
+/* */
+/* _tx_thread_interrupt_control ARMv7-A */
+/* 6.1.11 */
+/* AUTHOR */
+/* */
+/* William E. Lamie, Microsoft Corporation */
+/* */
+/* DESCRIPTION */
+/* */
+/* This function is responsible for changing the interrupt lockout */
+/* posture of the system. */
+/* */
+/* INPUT */
+/* */
+/* new_posture New interrupt lockout posture */
+/* */
+/* OUTPUT */
+/* */
+/* old_posture Old interrupt lockout posture */
+/* */
+/* CALLS */
+/* */
+/* None */
+/* */
+/* CALLED BY */
+/* */
+/* Application Code */
+/* */
+/* RELEASE HISTORY */
+/* */
+/* DATE NAME DESCRIPTION */
+/* */
+/* 09-30-2020 William E. Lamie Initial Version 6.1 */
+/* 04-25-2022 Zhen Kong Updated comments, */
+/* resulting in version 6.1.11 */
+/* */
+/**************************************************************************/
.global _tx_thread_interrupt_control
.type _tx_thread_interrupt_control,function
_tx_thread_interrupt_control:
-@
-@ /* Pickup current interrupt lockout posture. */
-@
- MRS r3, CPSR @ Pickup current CPSR
- MOV r2, #INT_MASK @ Build interrupt mask
- AND r1, r3, r2 @ Clear interrupt lockout bits
- ORR r1, r1, r0 @ Or-in new interrupt lockout bits
-@
-@ /* Apply the new interrupt posture. */
-@
- MSR CPSR_c, r1 @ Setup new CPSR
- BIC r0, r3, r2 @ Return previous interrupt mask
-#ifdef __THUMB_INTERWORK
- BX lr @ Return to caller
-#else
- MOV pc, lr @ Return to caller
-#endif
-@}
+ /* Pickup current interrupt lockout posture. */
+
+ MRS r3, CPSR // Pickup current CPSR
+ MOV r2, #INT_MASK // Build interrupt mask
+ AND r1, r3, r2 // Clear interrupt lockout bits
+ ORR r1, r1, r0 // Or-in new interrupt lockout bits
+
+ /* Apply the new interrupt posture. */
+
+ MSR CPSR_c, r1 // Setup new CPSR
+ BIC r0, r3, r2 // Return previous interrupt mask
+#ifdef __THUMB_INTERWORK
+ BX lr // Return to caller
+#else
+ MOV pc, lr // Return to caller
+#endif
diff --git a/ports/cortex_a7/ac6/src/tx_thread_interrupt_disable.S b/ports/cortex_a7/ac6/src/tx_thread_interrupt_disable.S
index bf82e314..13258808 100644
--- a/ports/cortex_a7/ac6/src/tx_thread_interrupt_disable.S
+++ b/ports/cortex_a7/ac6/src/tx_thread_interrupt_disable.S
@@ -1,113 +1,101 @@
-@/**************************************************************************/
-@/* */
-@/* Copyright (c) Microsoft Corporation. All rights reserved. */
-@/* */
-@/* This software is licensed under the Microsoft Software License */
-@/* Terms for Microsoft Azure RTOS. Full text of the license can be */
-@/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
-@/* and in the root directory of this software. */
-@/* */
-@/**************************************************************************/
-@
-@
-@/**************************************************************************/
-@/**************************************************************************/
-@/** */
-@/** ThreadX Component */
-@/** */
-@/** Thread */
-@/** */
-@/**************************************************************************/
-@/**************************************************************************/
-@
-@#define TX_SOURCE_CODE
-@
-@
-@/* Include necessary system files. */
-@
-@#include "tx_api.h"
-@#include "tx_thread.h"
-@
-@
-@/* Define the 16-bit Thumb mode veneer for _tx_thread_interrupt_disable for
-@ applications calling this function from to 16-bit Thumb mode. */
-@
+/**************************************************************************/
+/* */
+/* Copyright (c) Microsoft Corporation. All rights reserved. */
+/* */
+/* This software is licensed under the Microsoft Software License */
+/* Terms for Microsoft Azure RTOS. Full text of the license can be */
+/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
+/* and in the root directory of this software. */
+/* */
+/**************************************************************************/
+
+
+/**************************************************************************/
+/**************************************************************************/
+/** */
+/** ThreadX Component */
+/** */
+/** Thread */
+/** */
+/**************************************************************************/
+/**************************************************************************/
+
+/* Define the 16-bit Thumb mode veneer for _tx_thread_interrupt_disable for
+ applications calling this function from to 16-bit Thumb mode. */
+
.text
.align 2
.global $_tx_thread_interrupt_disable
$_tx_thread_interrupt_disable:
.thumb
- BX pc @ Switch to 32-bit mode
- NOP @
+ BX pc // Switch to 32-bit mode
+ NOP //
.arm
- STMFD sp!, {lr} @ Save return address
- BL _tx_thread_interrupt_disable @ Call _tx_thread_interrupt_disable function
- LDMFD sp!, {lr} @ Recover saved return address
- BX lr @ Return to 16-bit caller
-@
-@
+ STMFD sp!, {lr} // Save return address
+ BL _tx_thread_interrupt_disable // Call _tx_thread_interrupt_disable function
+ LDMFD sp!, {lr} // Recover saved return address
+ BX lr // Return to 16-bit caller
+
+
.text
.align 2
-@/**************************************************************************/
-@/* */
-@/* FUNCTION RELEASE */
-@/* */
-@/* _tx_thread_interrupt_disable Cortex-A7/AC6 */
-@/* 6.1 */
-@/* AUTHOR */
-@/* */
-@/* William E. Lamie, Microsoft Corporation */
-@/* */
-@/* DESCRIPTION */
-@/* */
-@/* This function is responsible for disabling interrupts */
-@/* */
-@/* INPUT */
-@/* */
-@/* None */
-@/* */
-@/* OUTPUT */
-@/* */
-@/* old_posture Old interrupt lockout posture */
-@/* */
-@/* CALLS */
-@/* */
-@/* None */
-@/* */
-@/* CALLED BY */
-@/* */
-@/* Application Code */
-@/* */
-@/* RELEASE HISTORY */
-@/* */
-@/* DATE NAME DESCRIPTION */
-@/* */
-@/* 09-30-2020 William E. Lamie Initial Version 6.1 */
-@/* */
-@/**************************************************************************/
-@UINT _tx_thread_interrupt_disable(void)
-@{
+/**************************************************************************/
+/* */
+/* FUNCTION RELEASE */
+/* */
+/* _tx_thread_interrupt_disable ARMv7-A */
+/* 6.1.11 */
+/* AUTHOR */
+/* */
+/* William E. Lamie, Microsoft Corporation */
+/* */
+/* DESCRIPTION */
+/* */
+/* This function is responsible for disabling interrupts */
+/* */
+/* INPUT */
+/* */
+/* None */
+/* */
+/* OUTPUT */
+/* */
+/* old_posture Old interrupt lockout posture */
+/* */
+/* CALLS */
+/* */
+/* None */
+/* */
+/* CALLED BY */
+/* */
+/* Application Code */
+/* */
+/* RELEASE HISTORY */
+/* */
+/* DATE NAME DESCRIPTION */
+/* */
+/* 09-30-2020 William E. Lamie Initial Version 6.1 */
+/* 04-25-2022 Zhen Kong Updated comments, */
+/* resulting in version 6.1.11 */
+/* */
+/**************************************************************************/
.global _tx_thread_interrupt_disable
.type _tx_thread_interrupt_disable,function
_tx_thread_interrupt_disable:
-@
-@ /* Pickup current interrupt lockout posture. */
-@
- MRS r0, CPSR @ Pickup current CPSR
-@
-@ /* Mask interrupts. */
-@
+
+ /* Pickup current interrupt lockout posture. */
+
+ MRS r0, CPSR // Pickup current CPSR
+
+ /* Mask interrupts. */
+
#ifdef TX_ENABLE_FIQ_SUPPORT
- CPSID if @ Disable IRQ and FIQ
+ CPSID if // Disable IRQ and FIQ
#else
- CPSID i @ Disable IRQ
+ CPSID i // Disable IRQ
#endif
#ifdef __THUMB_INTERWORK
- BX lr @ Return to caller
+ BX lr // Return to caller
#else
- MOV pc, lr @ Return to caller
+ MOV pc, lr // Return to caller
#endif
-@}
-
-
diff --git a/ports/cortex_a7/ac6/src/tx_thread_interrupt_restore.S b/ports/cortex_a7/ac6/src/tx_thread_interrupt_restore.S
index 2f402e34..2d582511 100644
--- a/ports/cortex_a7/ac6/src/tx_thread_interrupt_restore.S
+++ b/ports/cortex_a7/ac6/src/tx_thread_interrupt_restore.S
@@ -1,104 +1,93 @@
-@/**************************************************************************/
-@/* */
-@/* Copyright (c) Microsoft Corporation. All rights reserved. */
-@/* */
-@/* This software is licensed under the Microsoft Software License */
-@/* Terms for Microsoft Azure RTOS. Full text of the license can be */
-@/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
-@/* and in the root directory of this software. */
-@/* */
-@/**************************************************************************/
-@
-@
-@/**************************************************************************/
-@/**************************************************************************/
-@/** */
-@/** ThreadX Component */
-@/** */
-@/** Thread */
-@/** */
-@/**************************************************************************/
-@/**************************************************************************/
-@
-@#define TX_SOURCE_CODE
-@
-@
-@/* Include necessary system files. */
-@
-@#include "tx_api.h"
-@#include "tx_thread.h"
-@
-@
-@/* Define the 16-bit Thumb mode veneer for _tx_thread_interrupt_restore for
-@ applications calling this function from to 16-bit Thumb mode. */
-@
+/**************************************************************************/
+/* */
+/* Copyright (c) Microsoft Corporation. All rights reserved. */
+/* */
+/* This software is licensed under the Microsoft Software License */
+/* Terms for Microsoft Azure RTOS. Full text of the license can be */
+/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
+/* and in the root directory of this software. */
+/* */
+/**************************************************************************/
+
+
+/**************************************************************************/
+/**************************************************************************/
+/** */
+/** ThreadX Component */
+/** */
+/** Thread */
+/** */
+/**************************************************************************/
+/**************************************************************************/
+
+/* Define the 16-bit Thumb mode veneer for _tx_thread_interrupt_restore for
+ applications calling this function from to 16-bit Thumb mode. */
+
.text
.align 2
.global $_tx_thread_interrupt_restore
$_tx_thread_interrupt_restore:
.thumb
- BX pc @ Switch to 32-bit mode
- NOP @
+ BX pc // Switch to 32-bit mode
+ NOP //
.arm
- STMFD sp!, {lr} @ Save return address
- BL _tx_thread_interrupt_restore @ Call _tx_thread_interrupt_restore function
- LDMFD sp!, {lr} @ Recover saved return address
- BX lr @ Return to 16-bit caller
-@
-@
+ STMFD sp!, {lr} // Save return address
+ BL _tx_thread_interrupt_restore // Call _tx_thread_interrupt_restore function
+ LDMFD sp!, {lr} // Recover saved return address
+ BX lr // Return to 16-bit caller
+
+
.text
.align 2
-@/**************************************************************************/
-@/* */
-@/* FUNCTION RELEASE */
-@/* */
-@/* _tx_thread_interrupt_restore Cortex-A7/AC6 */
-@/* 6.1 */
-@/* AUTHOR */
-@/* */
-@/* William E. Lamie, Microsoft Corporation */
-@/* */
-@/* DESCRIPTION */
-@/* */
-@/* This function is responsible for restoring interrupts to the state */
-@/* returned by a previous _tx_thread_interrupt_disable call. */
-@/* */
-@/* INPUT */
-@/* */
-@/* old_posture Old interrupt lockout posture */
-@/* */
-@/* OUTPUT */
-@/* */
-@/* None */
-@/* */
-@/* CALLS */
-@/* */
-@/* None */
-@/* */
-@/* CALLED BY */
-@/* */
-@/* Application Code */
-@/* */
-@/* RELEASE HISTORY */
-@/* */
-@/* DATE NAME DESCRIPTION */
-@/* */
-@/* 09-30-2020 William E. Lamie Initial Version 6.1 */
-@/* */
-@/**************************************************************************/
-@UINT _tx_thread_interrupt_restore(UINT old_posture)
-@{
+/**************************************************************************/
+/* */
+/* FUNCTION RELEASE */
+/* */
+/* _tx_thread_interrupt_restore ARMv7-A */
+/* 6.1.11 */
+/* AUTHOR */
+/* */
+/* William E. Lamie, Microsoft Corporation */
+/* */
+/* DESCRIPTION */
+/* */
+/* This function is responsible for restoring interrupts to the state */
+/* returned by a previous _tx_thread_interrupt_disable call. */
+/* */
+/* INPUT */
+/* */
+/* old_posture Old interrupt lockout posture */
+/* */
+/* OUTPUT */
+/* */
+/* None */
+/* */
+/* CALLS */
+/* */
+/* None */
+/* */
+/* CALLED BY */
+/* */
+/* Application Code */
+/* */
+/* RELEASE HISTORY */
+/* */
+/* DATE NAME DESCRIPTION */
+/* */
+/* 09-30-2020 William E. Lamie Initial Version 6.1 */
+/* 04-25-2022 Zhen Kong Updated comments, */
+/* resulting in version 6.1.11 */
+/* */
+/**************************************************************************/
.global _tx_thread_interrupt_restore
.type _tx_thread_interrupt_restore,function
_tx_thread_interrupt_restore:
-@
-@ /* Apply the new interrupt posture. */
-@
- MSR CPSR_c, r0 @ Setup new CPSR
-#ifdef __THUMB_INTERWORK
- BX lr @ Return to caller
-#else
- MOV pc, lr @ Return to caller
-#endif
-@}
+ /* Apply the new interrupt posture. */
+
+ MSR CPSR_c, r0 // Setup new CPSR
+#ifdef __THUMB_INTERWORK
+ BX lr // Return to caller
+#else
+ MOV pc, lr // Return to caller
+#endif
diff --git a/ports/cortex_a7/ac6/src/tx_thread_irq_nesting_end.S b/ports/cortex_a7/ac6/src/tx_thread_irq_nesting_end.S
index 0081073b..ec7e63c6 100644
--- a/ports/cortex_a7/ac6/src/tx_thread_irq_nesting_end.S
+++ b/ports/cortex_a7/ac6/src/tx_thread_irq_nesting_end.S
@@ -1,115 +1,103 @@
-@/**************************************************************************/
-@/* */
-@/* Copyright (c) Microsoft Corporation. All rights reserved. */
-@/* */
-@/* This software is licensed under the Microsoft Software License */
-@/* Terms for Microsoft Azure RTOS. Full text of the license can be */
-@/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
-@/* and in the root directory of this software. */
-@/* */
-@/**************************************************************************/
-@
-@
-@/**************************************************************************/
-@/**************************************************************************/
-@/** */
-@/** ThreadX Component */
-@/** */
-@/** Thread */
-@/** */
-@/**************************************************************************/
-@/**************************************************************************/
-@
-@
-@#define TX_SOURCE_CODE
-@
-@
-@/* Include necessary system files. */
-@
-@#include "tx_api.h"
-@#include "tx_thread.h"
-@
-@
+/**************************************************************************/
+/* */
+/* Copyright (c) Microsoft Corporation. All rights reserved. */
+/* */
+/* This software is licensed under the Microsoft Software License */
+/* Terms for Microsoft Azure RTOS. Full text of the license can be */
+/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
+/* and in the root directory of this software. */
+/* */
+/**************************************************************************/
+
+
+/**************************************************************************/
+/**************************************************************************/
+/** */
+/** ThreadX Component */
+/** */
+/** Thread */
+/** */
+/**************************************************************************/
+/**************************************************************************/
+
#ifdef TX_ENABLE_FIQ_SUPPORT
-DISABLE_INTS = 0xC0 @ Disable IRQ/FIQ interrupts
+DISABLE_INTS = 0xC0 // Disable IRQ/FIQ interrupts
#else
-DISABLE_INTS = 0x80 @ Disable IRQ interrupts
+DISABLE_INTS = 0x80 // Disable IRQ interrupts
#endif
-MODE_MASK = 0x1F @ Mode mask
-IRQ_MODE_BITS = 0x12 @ IRQ mode bits
-@
-@
-@/* No 16-bit Thumb mode veneer code is needed for _tx_thread_irq_nesting_end
-@ since it will never be called 16-bit mode. */
-@
+MODE_MASK = 0x1F // Mode mask
+IRQ_MODE_BITS = 0x12 // IRQ mode bits
+
+
+/* No 16-bit Thumb mode veneer code is needed for _tx_thread_irq_nesting_end
+ since it will never be called 16-bit mode. */
+
.arm
.text
.align 2
-@/**************************************************************************/
-@/* */
-@/* FUNCTION RELEASE */
-@/* */
-@/* _tx_thread_irq_nesting_end Cortex-A7/AC6 */
-@/* 6.1 */
-@/* AUTHOR */
-@/* */
-@/* William E. Lamie, Microsoft Corporation */
-@/* */
-@/* DESCRIPTION */
-@/* */
-@/* This function is called by the application from IRQ mode after */
-@/* _tx_thread_irq_nesting_start has been called and switches the IRQ */
-@/* processing from system mode back to IRQ mode prior to the ISR */
-@/* calling _tx_thread_context_restore. Note that this function */
-@/* assumes the system stack pointer is in the same position after */
-@/* nesting start function was called. */
-@/* */
-@/* This function assumes that the system mode stack pointer was setup */
-@/* during low-level initialization (tx_initialize_low_level.s). */
-@/* */
-@/* This function returns with IRQ interrupts disabled. */
-@/* */
-@/* INPUT */
-@/* */
-@/* None */
-@/* */
-@/* OUTPUT */
-@/* */
-@/* None */
-@/* */
-@/* CALLS */
-@/* */
-@/* None */
-@/* */
-@/* CALLED BY */
-@/* */
-@/* ISRs */
-@/* */
-@/* RELEASE HISTORY */
-@/* */
-@/* DATE NAME DESCRIPTION */
-@/* */
-@/* 09-30-2020 William E. Lamie Initial Version 6.1 */
-@/* */
-@/**************************************************************************/
-@VOID _tx_thread_irq_nesting_end(VOID)
-@{
+/**************************************************************************/
+/* */
+/* FUNCTION RELEASE */
+/* */
+/* _tx_thread_irq_nesting_end ARMv7-A */
+/* 6.1.11 */
+/* AUTHOR */
+/* */
+/* William E. Lamie, Microsoft Corporation */
+/* */
+/* DESCRIPTION */
+/* */
+/* This function is called by the application from IRQ mode after */
+/* _tx_thread_irq_nesting_start has been called and switches the IRQ */
+/* processing from system mode back to IRQ mode prior to the ISR */
+/* calling _tx_thread_context_restore. Note that this function */
+/* assumes the system stack pointer is in the same position after */
+/* nesting start function was called. */
+/* */
+/* This function assumes that the system mode stack pointer was setup */
+/* during low-level initialization (tx_initialize_low_level.s). */
+/* */
+/* This function returns with IRQ interrupts disabled. */
+/* */
+/* INPUT */
+/* */
+/* None */
+/* */
+/* OUTPUT */
+/* */
+/* None */
+/* */
+/* CALLS */
+/* */
+/* None */
+/* */
+/* CALLED BY */
+/* */
+/* ISRs */
+/* */
+/* RELEASE HISTORY */
+/* */
+/* DATE NAME DESCRIPTION */
+/* */
+/* 09-30-2020 William E. Lamie Initial Version 6.1 */
+/* 04-25-2022 Zhen Kong Updated comments, */
+/* resulting in version 6.1.11 */
+/* */
+/**************************************************************************/
.global _tx_thread_irq_nesting_end
.type _tx_thread_irq_nesting_end,function
_tx_thread_irq_nesting_end:
- MOV r3,lr @ Save ISR return address
- MRS r0, CPSR @ Pickup the CPSR
- ORR r0, r0, #DISABLE_INTS @ Build disable interrupt value
- MSR CPSR_c, r0 @ Disable interrupts
- LDMIA sp!, {r1, lr} @ Pickup saved lr (and r1 throw-away for
- @ 8-byte alignment logic)
- BIC r0, r0, #MODE_MASK @ Clear mode bits
- ORR r0, r0, #IRQ_MODE_BITS @ Build IRQ mode CPSR
- MSR CPSR_c, r0 @ Reenter IRQ mode
+ MOV r3,lr // Save ISR return address
+ MRS r0, CPSR // Pickup the CPSR
+ ORR r0, r0, #DISABLE_INTS // Build disable interrupt value
+ MSR CPSR_c, r0 // Disable interrupts
+ LDMIA sp!, {r1, lr} // Pickup saved lr (and r1 throw-away for
+ // 8-byte alignment logic)
+ BIC r0, r0, #MODE_MASK // Clear mode bits
+ ORR r0, r0, #IRQ_MODE_BITS // Build IRQ mode CPSR
+ MSR CPSR_c, r0 // Reenter IRQ mode
#ifdef __THUMB_INTERWORK
- BX r3 @ Return to caller
+ BX r3 // Return to caller
#else
- MOV pc, r3 @ Return to caller
+ MOV pc, r3 // Return to caller
#endif
-@}
-
diff --git a/ports/cortex_a7/ac6/src/tx_thread_irq_nesting_start.S b/ports/cortex_a7/ac6/src/tx_thread_irq_nesting_start.S
index ef976b80..c69976ed 100644
--- a/ports/cortex_a7/ac6/src/tx_thread_irq_nesting_start.S
+++ b/ports/cortex_a7/ac6/src/tx_thread_irq_nesting_start.S
@@ -1,108 +1,96 @@
-@/**************************************************************************/
-@/* */
-@/* Copyright (c) Microsoft Corporation. All rights reserved. */
-@/* */
-@/* This software is licensed under the Microsoft Software License */
-@/* Terms for Microsoft Azure RTOS. Full text of the license can be */
-@/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
-@/* and in the root directory of this software. */
-@/* */
-@/**************************************************************************/
-@
-@
-@/**************************************************************************/
-@/**************************************************************************/
-@/** */
-@/** ThreadX Component */
-@/** */
-@/** Thread */
-@/** */
-@/**************************************************************************/
-@/**************************************************************************/
-@
-@
-@#define TX_SOURCE_CODE
-@
-@
-@/* Include necessary system files. */
-@
-@#include "tx_api.h"
-@#include "tx_thread.h"
-@
-@
-IRQ_DISABLE = 0x80 @ IRQ disable bit
-MODE_MASK = 0x1F @ Mode mask
-SYS_MODE_BITS = 0x1F @ System mode bits
-@
-@
-@/* No 16-bit Thumb mode veneer code is needed for _tx_thread_irq_nesting_start
-@ since it will never be called 16-bit mode. */
-@
+/**************************************************************************/
+/* */
+/* Copyright (c) Microsoft Corporation. All rights reserved. */
+/* */
+/* This software is licensed under the Microsoft Software License */
+/* Terms for Microsoft Azure RTOS. Full text of the license can be */
+/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
+/* and in the root directory of this software. */
+/* */
+/**************************************************************************/
+
+
+/**************************************************************************/
+/**************************************************************************/
+/** */
+/** ThreadX Component */
+/** */
+/** Thread */
+/** */
+/**************************************************************************/
+/**************************************************************************/
+
+IRQ_DISABLE = 0x80 // IRQ disable bit
+MODE_MASK = 0x1F // Mode mask
+SYS_MODE_BITS = 0x1F // System mode bits
+
+
+/* No 16-bit Thumb mode veneer code is needed for _tx_thread_irq_nesting_start
+ since it will never be called 16-bit mode. */
+
.arm
.text
.align 2
-@/**************************************************************************/
-@/* */
-@/* FUNCTION RELEASE */
-@/* */
-@/* _tx_thread_irq_nesting_start Cortex-A7/AC6 */
-@/* 6.1 */
-@/* AUTHOR */
-@/* */
-@/* William E. Lamie, Microsoft Corporation */
-@/* */
-@/* DESCRIPTION */
-@/* */
-@/* This function is called by the application from IRQ mode after */
-@/* _tx_thread_context_save has been called and switches the IRQ */
-@/* processing to the system mode so nested IRQ interrupt processing */
-@/* is possible (system mode has its own "lr" register). Note that */
-@/* this function assumes that the system mode stack pointer was setup */
-@/* during low-level initialization (tx_initialize_low_level.s). */
-@/* */
-@/* This function returns with IRQ interrupts enabled. */
-@/* */
-@/* INPUT */
-@/* */
-@/* None */
-@/* */
-@/* OUTPUT */
-@/* */
-@/* None */
-@/* */
-@/* CALLS */
-@/* */
-@/* None */
-@/* */
-@/* CALLED BY */
-@/* */
-@/* ISRs */
-@/* */
-@/* RELEASE HISTORY */
-@/* */
-@/* DATE NAME DESCRIPTION */
-@/* */
-@/* 09-30-2020 William E. Lamie Initial Version 6.1 */
-@/* */
-@/**************************************************************************/
-@VOID _tx_thread_irq_nesting_start(VOID)
-@{
+/**************************************************************************/
+/* */
+/* FUNCTION RELEASE */
+/* */
+/* _tx_thread_irq_nesting_start ARMv7-A */
+/* 6.1.11 */
+/* AUTHOR */
+/* */
+/* William E. Lamie, Microsoft Corporation */
+/* */
+/* DESCRIPTION */
+/* */
+/* This function is called by the application from IRQ mode after */
+/* _tx_thread_context_save has been called and switches the IRQ */
+/* processing to the system mode so nested IRQ interrupt processing */
+/* is possible (system mode has its own "lr" register). Note that */
+/* this function assumes that the system mode stack pointer was setup */
+/* during low-level initialization (tx_initialize_low_level.s). */
+/* */
+/* This function returns with IRQ interrupts enabled. */
+/* */
+/* INPUT */
+/* */
+/* None */
+/* */
+/* OUTPUT */
+/* */
+/* None */
+/* */
+/* CALLS */
+/* */
+/* None */
+/* */
+/* CALLED BY */
+/* */
+/* ISRs */
+/* */
+/* RELEASE HISTORY */
+/* */
+/* DATE NAME DESCRIPTION */
+/* */
+/* 09-30-2020 William E. Lamie Initial Version 6.1 */
+/* 04-25-2022 Zhen Kong Updated comments, */
+/* resulting in version 6.1.11 */
+/* */
+/**************************************************************************/
.global _tx_thread_irq_nesting_start
.type _tx_thread_irq_nesting_start,function
_tx_thread_irq_nesting_start:
- MOV r3,lr @ Save ISR return address
- MRS r0, CPSR @ Pickup the CPSR
- BIC r0, r0, #MODE_MASK @ Clear the mode bits
- ORR r0, r0, #SYS_MODE_BITS @ Build system mode CPSR
- MSR CPSR_c, r0 @ Enter system mode
- STMDB sp!, {r1, lr} @ Push the system mode lr on the system mode stack
- @ and push r1 just to keep 8-byte alignment
- BIC r0, r0, #IRQ_DISABLE @ Build enable IRQ CPSR
- MSR CPSR_c, r0 @ Enter system mode
+ MOV r3,lr // Save ISR return address
+ MRS r0, CPSR // Pickup the CPSR
+ BIC r0, r0, #MODE_MASK // Clear the mode bits
+ ORR r0, r0, #SYS_MODE_BITS // Build system mode CPSR
+ MSR CPSR_c, r0 // Enter system mode
+ STMDB sp!, {r1, lr} // Push the system mode lr on the system mode stack
+ // and push r1 just to keep 8-byte alignment
+ BIC r0, r0, #IRQ_DISABLE // Build enable IRQ CPSR
+ MSR CPSR_c, r0 // Enter system mode
#ifdef __THUMB_INTERWORK
- BX r3 @ Return to caller
+ BX r3 // Return to caller
#else
- MOV pc, r3 @ Return to caller
+ MOV pc, r3 // Return to caller
#endif
-@}
-
diff --git a/ports/cortex_a7/ac6/src/tx_thread_schedule.S b/ports/cortex_a7/ac6/src/tx_thread_schedule.S
index 085ee47a..8330e9df 100644
--- a/ports/cortex_a7/ac6/src/tx_thread_schedule.S
+++ b/ports/cortex_a7/ac6/src/tx_thread_schedule.S
@@ -1,257 +1,230 @@
-@/**************************************************************************/
-@/* */
-@/* Copyright (c) Microsoft Corporation. All rights reserved. */
-@/* */
-@/* This software is licensed under the Microsoft Software License */
-@/* Terms for Microsoft Azure RTOS. Full text of the license can be */
-@/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
-@/* and in the root directory of this software. */
-@/* */
-@/**************************************************************************/
-@
-@
-@/**************************************************************************/
-@/**************************************************************************/
-@/** */
-@/** ThreadX Component */
-@/** */
-@/** Thread */
-@/** */
-@/**************************************************************************/
-@/**************************************************************************/
-@
-@
-@#define TX_SOURCE_CODE
-@
-@
-@/* Include necessary system files. */
-@
-@#include "tx_api.h"
-@#include "tx_thread.h"
-@#include "tx_timer.h"
-@
-@
+/**************************************************************************/
+/* */
+/* Copyright (c) Microsoft Corporation. All rights reserved. */
+/* */
+/* This software is licensed under the Microsoft Software License */
+/* Terms for Microsoft Azure RTOS. Full text of the license can be */
+/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
+/* and in the root directory of this software. */
+/* */
+/**************************************************************************/
+
+
+/**************************************************************************/
+/**************************************************************************/
+/** */
+/** ThreadX Component */
+/** */
+/** Thread */
+/** */
+/**************************************************************************/
+/**************************************************************************/
+
.global _tx_thread_execute_ptr
.global _tx_thread_current_ptr
.global _tx_timer_time_slice
-@
-@
-@/* Define the 16-bit Thumb mode veneer for _tx_thread_schedule for
-@ applications calling this function from to 16-bit Thumb mode. */
-@
+
+
+/* Define the 16-bit Thumb mode veneer for _tx_thread_schedule for
+ applications calling this function from to 16-bit Thumb mode. */
+
.text
.align 2
.global $_tx_thread_schedule
.type $_tx_thread_schedule,function
$_tx_thread_schedule:
.thumb
- BX pc @ Switch to 32-bit mode
- NOP @
+ BX pc // Switch to 32-bit mode
+ NOP //
.arm
- STMFD sp!, {lr} @ Save return address
- BL _tx_thread_schedule @ Call _tx_thread_schedule function
- LDMFD sp!, {lr} @ Recover saved return address
- BX lr @ Return to 16-bit caller
-@
-@
+ STMFD sp!, {lr} // Save return address
+ BL _tx_thread_schedule // Call _tx_thread_schedule function
+ LDMFD sp!, {lr} // Recover saved return address
+ BX lr // Return to 16-bit caller
+
+
.text
.align 2
-@/**************************************************************************/
-@/* */
-@/* FUNCTION RELEASE */
-@/* */
-@/* _tx_thread_schedule Cortex-A7/AC6 */
-@/* 6.1.9 */
-@/* AUTHOR */
-@/* */
-@/* William E. Lamie, Microsoft Corporation */
-@/* */
-@/* DESCRIPTION */
-@/* */
-@/* This function waits for a thread control block pointer to appear in */
-@/* the _tx_thread_execute_ptr variable. Once a thread pointer appears */
-@/* in the variable, the corresponding thread is resumed. */
-@/* */
-@/* INPUT */
-@/* */
-@/* None */
-@/* */
-@/* OUTPUT */
-@/* */
-@/* None */
-@/* */
-@/* CALLS */
-@/* */
-@/* None */
-@/* */
-@/* CALLED BY */
-@/* */
-@/* _tx_initialize_kernel_enter ThreadX entry function */
-@/* _tx_thread_system_return Return to system from thread */
-@/* _tx_thread_context_restore Restore thread's context */
-@/* */
-@/* RELEASE HISTORY */
-@/* */
-@/* DATE NAME DESCRIPTION */
-@/* */
-@/* 09-30-2020 William E. Lamie Initial Version 6.1 */
-@/* 10-15-2021 William E. Lamie Modified comment(s), added */
-@/* execution profile support, */
-@/* resulting in version 6.1.9 */
-@/* */
-@/**************************************************************************/
-@VOID _tx_thread_schedule(VOID)
-@{
+/**************************************************************************/
+/* */
+/* FUNCTION RELEASE */
+/* */
+/* _tx_thread_schedule ARMv7-A */
+/* 6.1.11 */
+/* AUTHOR */
+/* */
+/* William E. Lamie, Microsoft Corporation */
+/* */
+/* DESCRIPTION */
+/* */
+/* This function waits for a thread control block pointer to appear in */
+/* the _tx_thread_execute_ptr variable. Once a thread pointer appears */
+/* in the variable, the corresponding thread is resumed. */
+/* */
+/* INPUT */
+/* */
+/* None */
+/* */
+/* OUTPUT */
+/* */
+/* None */
+/* */
+/* CALLS */
+/* */
+/* None */
+/* */
+/* CALLED BY */
+/* */
+/* _tx_initialize_kernel_enter ThreadX entry function */
+/* _tx_thread_system_return Return to system from thread */
+/* _tx_thread_context_restore Restore thread's context */
+/* */
+/* RELEASE HISTORY */
+/* */
+/* DATE NAME DESCRIPTION */
+/* */
+/* 09-30-2020 William E. Lamie Initial Version 6.1 */
+/* 10-15-2021 William E. Lamie Modified comment(s), added */
+/* execution profile support, */
+/* resulting in version 6.1.9 */
+/* 04-25-2022 Zhen Kong Updated comments, */
+/* resulting in version 6.1.11 */
+/* */
+/**************************************************************************/
.global _tx_thread_schedule
.type _tx_thread_schedule,function
_tx_thread_schedule:
-@
-@ /* Enable interrupts. */
-@
+
+ /* Enable interrupts. */
+
#ifdef TX_ENABLE_FIQ_SUPPORT
- CPSIE if @ Enable IRQ and FIQ interrupts
+ CPSIE if // Enable IRQ and FIQ interrupts
#else
- CPSIE i @ Enable IRQ interrupts
+ CPSIE i // Enable IRQ interrupts
#endif
-@
-@ /* Wait for a thread to execute. */
-@ do
-@ {
- LDR r1, =_tx_thread_execute_ptr @ Address of thread execute ptr
-@
+
+ /* Wait for a thread to execute. */
+ LDR r1, =_tx_thread_execute_ptr // Address of thread execute ptr
+
__tx_thread_schedule_loop:
-@
- LDR r0, [r1] @ Pickup next thread to execute
- CMP r0, #0 @ Is it NULL?
- BEQ __tx_thread_schedule_loop @ If so, keep looking for a thread
-@
-@ }
-@ while(_tx_thread_execute_ptr == TX_NULL);
-@
-@ /* Yes! We have a thread to execute. Lockout interrupts and
-@ transfer control to it. */
-@
+
+ LDR r0, [r1] // Pickup next thread to execute
+ CMP r0, #0 // Is it NULL?
+ BEQ __tx_thread_schedule_loop // If so, keep looking for a thread
+ /* Yes! We have a thread to execute. Lockout interrupts and
+ transfer control to it. */
+
#ifdef TX_ENABLE_FIQ_SUPPORT
- CPSID if @ Disable IRQ and FIQ interrupts
+ CPSID if // Disable IRQ and FIQ interrupts
#else
- CPSID i @ Disable IRQ interrupts
+ CPSID i // Disable IRQ interrupts
#endif
-@
-@ /* Setup the current thread pointer. */
-@ _tx_thread_current_ptr = _tx_thread_execute_ptr;
-@
- LDR r1, =_tx_thread_current_ptr @ Pickup address of current thread
- STR r0, [r1] @ Setup current thread pointer
-@
-@ /* Increment the run count for this thread. */
-@ _tx_thread_current_ptr -> tx_thread_run_count++;
-@
- LDR r2, [r0, #4] @ Pickup run counter
- LDR r3, [r0, #24] @ Pickup time-slice for this thread
- ADD r2, r2, #1 @ Increment thread run-counter
- STR r2, [r0, #4] @ Store the new run counter
-@
-@ /* Setup time-slice, if present. */
-@ _tx_timer_time_slice = _tx_thread_current_ptr -> tx_thread_time_slice;
-@
- LDR r2, =_tx_timer_time_slice @ Pickup address of time-slice
- @ variable
- LDR sp, [r0, #8] @ Switch stack pointers
- STR r3, [r2] @ Setup time-slice
-@
-@ /* Switch to the thread's stack. */
-@ sp = _tx_thread_execute_ptr -> tx_thread_stack_ptr;
-@
+
+ /* Setup the current thread pointer. */
+
+ LDR r1, =_tx_thread_current_ptr // Pickup address of current thread
+ STR r0, [r1] // Setup current thread pointer
+
+ /* Increment the run count for this thread. */
+
+ LDR r2, [r0, #4] // Pickup run counter
+ LDR r3, [r0, #24] // Pickup time-slice for this thread
+ ADD r2, r2, #1 // Increment thread run-counter
+ STR r2, [r0, #4] // Store the new run counter
+
+ /* Setup time-slice, if present. */
+
+ LDR r2, =_tx_timer_time_slice // Pickup address of time-slice
+ // variable
+ LDR sp, [r0, #8] // Switch stack pointers
+ STR r3, [r2] // Setup time-slice
+
#if (defined(TX_ENABLE_EXECUTION_CHANGE_NOTIFY) || defined(TX_EXECUTION_PROFILE_ENABLE))
-@
-@ /* Call the thread entry function to indicate the thread is executing. */
-@
- MOV r5, r0 @ Save r0
- BL _tx_execution_thread_enter @ Call the thread execution enter function
- MOV r0, r5 @ Restore r0
+
+ /* Call the thread entry function to indicate the thread is executing. */
+
+ MOV r5, r0 // Save r0
+ BL _tx_execution_thread_enter // Call the thread execution enter function
+ MOV r0, r5 // Restore r0
#endif
-@
-@ /* Determine if an interrupt frame or a synchronous task suspension frame
-@ is present. */
-@
- LDMIA sp!, {r4, r5} @ Pickup the stack type and saved CPSR
- CMP r4, #0 @ Check for synchronous context switch
+
+ /* Determine if an interrupt frame or a synchronous task suspension frame
+ is present. */
+
+ LDMIA sp!, {r4, r5} // Pickup the stack type and saved CPSR
+ CMP r4, #0 // Check for synchronous context switch
BEQ _tx_solicited_return
- MSR SPSR_cxsf, r5 @ Setup SPSR for return
+ MSR SPSR_cxsf, r5 // Setup SPSR for return
#ifdef TX_ENABLE_VFP_SUPPORT
- LDR r1, [r0, #144] @ Pickup the VFP enabled flag
- CMP r1, #0 @ Is the VFP enabled?
- BEQ _tx_skip_interrupt_vfp_restore @ No, skip VFP interrupt restore
- VLDMIA sp!, {D0-D15} @ Recover D0-D15
- VLDMIA sp!, {D16-D31} @ Recover D16-D31
- LDR r4, [sp], #4 @ Pickup FPSCR
- VMSR FPSCR, r4 @ Restore FPSCR
+ LDR r1, [r0, #144] // Pickup the VFP enabled flag
+ CMP r1, #0 // Is the VFP enabled?
+ BEQ _tx_skip_interrupt_vfp_restore // No, skip VFP interrupt restore
+ VLDMIA sp!, {D0-D15} // Recover D0-D15
+ VLDMIA sp!, {D16-D31} // Recover D16-D31
+ LDR r4, [sp], #4 // Pickup FPSCR
+ VMSR FPSCR, r4 // Restore FPSCR
_tx_skip_interrupt_vfp_restore:
#endif
- LDMIA sp!, {r0-r12, lr, pc}^ @ Return to point of thread interrupt
+ LDMIA sp!, {r0-r12, lr, pc}^ // Return to point of thread interrupt
_tx_solicited_return:
#ifdef TX_ENABLE_VFP_SUPPORT
- LDR r1, [r0, #144] @ Pickup the VFP enabled flag
- CMP r1, #0 @ Is the VFP enabled?
- BEQ _tx_skip_solicited_vfp_restore @ No, skip VFP solicited restore
- VLDMIA sp!, {D8-D15} @ Recover D8-D15
- VLDMIA sp!, {D16-D31} @ Recover D16-D31
- LDR r4, [sp], #4 @ Pickup FPSCR
- VMSR FPSCR, r4 @ Restore FPSCR
+ LDR r1, [r0, #144] // Pickup the VFP enabled flag
+ CMP r1, #0 // Is the VFP enabled?
+ BEQ _tx_skip_solicited_vfp_restore // No, skip VFP solicited restore
+ VLDMIA sp!, {D8-D15} // Recover D8-D15
+ VLDMIA sp!, {D16-D31} // Recover D16-D31
+ LDR r4, [sp], #4 // Pickup FPSCR
+ VMSR FPSCR, r4 // Restore FPSCR
_tx_skip_solicited_vfp_restore:
#endif
- MSR CPSR_cxsf, r5 @ Recover CPSR
- LDMIA sp!, {r4-r11, lr} @ Return to thread synchronously
+ MSR CPSR_cxsf, r5 // Recover CPSR
+ LDMIA sp!, {r4-r11, lr} // Return to thread synchronously
#ifdef __THUMB_INTERWORK
- BX lr @ Return to caller
+ BX lr // Return to caller
#else
- MOV pc, lr @ Return to caller
+ MOV pc, lr // Return to caller
#endif
-@
-@}
-@
#ifdef TX_ENABLE_VFP_SUPPORT
.global tx_thread_vfp_enable
.type tx_thread_vfp_enable,function
tx_thread_vfp_enable:
- MRS r2, CPSR @ Pickup the CPSR
+ MRS r2, CPSR // Pickup the CPSR
#ifdef TX_ENABLE_FIQ_SUPPORT
- CPSID if @ Enable IRQ and FIQ interrupts
+ CPSID if // Enable IRQ and FIQ interrupts
#else
- CPSID i @ Enable IRQ interrupts
+ CPSID i // Enable IRQ interrupts
#endif
- LDR r0, =_tx_thread_current_ptr @ Build current thread pointer address
- LDR r1, [r0] @ Pickup current thread pointer
- CMP r1, #0 @ Check for NULL thread pointer
- BEQ __tx_no_thread_to_enable @ If NULL, skip VFP enable
- MOV r0, #1 @ Build enable value
- STR r0, [r1, #144] @ Set the VFP enable flag (tx_thread_vfp_enable field in TX_THREAD)
+ LDR r0, =_tx_thread_current_ptr // Build current thread pointer address
+ LDR r1, [r0] // Pickup current thread pointer
+ CMP r1, #0 // Check for NULL thread pointer
+ BEQ __tx_no_thread_to_enable // If NULL, skip VFP enable
+ MOV r0, #1 // Build enable value
+ STR r0, [r1, #144] // Set the VFP enable flag (tx_thread_vfp_enable field in TX_THREAD)
__tx_no_thread_to_enable:
- MSR CPSR_cxsf, r2 @ Recover CPSR
- BX LR @ Return to caller
+ MSR CPSR_cxsf, r2 // Recover CPSR
+ BX LR // Return to caller
.global tx_thread_vfp_disable
.type tx_thread_vfp_disable,function
tx_thread_vfp_disable:
- MRS r2, CPSR @ Pickup the CPSR
+ MRS r2, CPSR // Pickup the CPSR
#ifdef TX_ENABLE_FIQ_SUPPORT
- CPSID if @ Enable IRQ and FIQ interrupts
+ CPSID if // Enable IRQ and FIQ interrupts
#else
- CPSID i @ Enable IRQ interrupts
+ CPSID i // Enable IRQ interrupts
#endif
- LDR r0, =_tx_thread_current_ptr @ Build current thread pointer address
- LDR r1, [r0] @ Pickup current thread pointer
- CMP r1, #0 @ Check for NULL thread pointer
- BEQ __tx_no_thread_to_disable @ If NULL, skip VFP disable
- MOV r0, #0 @ Build disable value
- STR r0, [r1, #144] @ Clear the VFP enable flag (tx_thread_vfp_enable field in TX_THREAD)
+ LDR r0, =_tx_thread_current_ptr // Build current thread pointer address
+ LDR r1, [r0] // Pickup current thread pointer
+ CMP r1, #0 // Check for NULL thread pointer
+ BEQ __tx_no_thread_to_disable // If NULL, skip VFP disable
+ MOV r0, #0 // Build disable value
+ STR r0, [r1, #144] // Clear the VFP enable flag (tx_thread_vfp_enable field in TX_THREAD)
__tx_no_thread_to_disable:
- MSR CPSR_cxsf, r2 @ Recover CPSR
- BX LR @ Return to caller
+ MSR CPSR_cxsf, r2 // Recover CPSR
+ BX LR // Return to caller
#endif
-
diff --git a/ports/cortex_a7/ac6/src/tx_thread_stack_build.S b/ports/cortex_a7/ac6/src/tx_thread_stack_build.S
index 8598428e..f413e673 100644
--- a/ports/cortex_a7/ac6/src/tx_thread_stack_build.S
+++ b/ports/cortex_a7/ac6/src/tx_thread_stack_build.S
@@ -1,178 +1,164 @@
-@/**************************************************************************/
-@/* */
-@/* Copyright (c) Microsoft Corporation. All rights reserved. */
-@/* */
-@/* This software is licensed under the Microsoft Software License */
-@/* Terms for Microsoft Azure RTOS. Full text of the license can be */
-@/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
-@/* and in the root directory of this software. */
-@/* */
-@/**************************************************************************/
-@
-@
-@/**************************************************************************/
-@/**************************************************************************/
-@/** */
-@/** ThreadX Component */
-@/** */
-@/** Thread */
-@/** */
-@/**************************************************************************/
-@/**************************************************************************/
-@
-@
-@#define TX_SOURCE_CODE
-@
-@
-@/* Include necessary system files. */
-@
-@#include "tx_api.h"
-@#include "tx_thread.h"
-@
+/**************************************************************************/
+/* */
+/* Copyright (c) Microsoft Corporation. All rights reserved. */
+/* */
+/* This software is licensed under the Microsoft Software License */
+/* Terms for Microsoft Azure RTOS. Full text of the license can be */
+/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
+/* and in the root directory of this software. */
+/* */
+/**************************************************************************/
+
+
+/**************************************************************************/
+/**************************************************************************/
+/** */
+/** ThreadX Component */
+/** */
+/** Thread */
+/** */
+/**************************************************************************/
+/**************************************************************************/
.arm
-SVC_MODE = 0x13 @ SVC mode
+SVC_MODE = 0x13 // SVC mode
#ifdef TX_ENABLE_FIQ_SUPPORT
-CPSR_MASK = 0xDF @ Mask initial CPSR, IRQ & FIQ interrupts enabled
+CPSR_MASK = 0xDF // Mask initial CPSR, IRQ & FIQ interrupts enabled
#else
-CPSR_MASK = 0x9F @ Mask initial CPSR, IRQ interrupts enabled
+CPSR_MASK = 0x9F // Mask initial CPSR, IRQ interrupts enabled
#endif
-@
-@
-@/* Define the 16-bit Thumb mode veneer for _tx_thread_stack_build for
-@ applications calling this function from to 16-bit Thumb mode. */
-@
+
+
+/* Define the 16-bit Thumb mode veneer for _tx_thread_stack_build for
+ applications calling this function from to 16-bit Thumb mode. */
+
.text
.align 2
.thumb
.global $_tx_thread_stack_build
.type $_tx_thread_stack_build,function
$_tx_thread_stack_build:
- BX pc @ Switch to 32-bit mode
- NOP @
+ BX pc // Switch to 32-bit mode
+ NOP //
.arm
- STMFD sp!, {lr} @ Save return address
- BL _tx_thread_stack_build @ Call _tx_thread_stack_build function
- LDMFD sp!, {lr} @ Recover saved return address
- BX lr @ Return to 16-bit caller
-@
-@
+ STMFD sp!, {lr} // Save return address
+ BL _tx_thread_stack_build // Call _tx_thread_stack_build function
+ LDMFD sp!, {lr} // Recover saved return address
+ BX lr // Return to 16-bit caller
+
+
.text
.align 2
-@/**************************************************************************/
-@/* */
-@/* FUNCTION RELEASE */
-@/* */
-@/* _tx_thread_stack_build Cortex-A7/AC6 */
-@/* 6.1 */
-@/* AUTHOR */
-@/* */
-@/* William E. Lamie, Microsoft Corporation */
-@/* */
-@/* DESCRIPTION */
-@/* */
-@/* This function builds a stack frame on the supplied thread's stack. */
-@/* The stack frame results in a fake interrupt return to the supplied */
-@/* function pointer. */
-@/* */
-@/* INPUT */
-@/* */
-@/* thread_ptr Pointer to thread control blk */
-@/* function_ptr Pointer to return function */
-@/* */
-@/* OUTPUT */
-@/* */
-@/* None */
-@/* */
-@/* CALLS */
-@/* */
-@/* None */
-@/* */
-@/* CALLED BY */
-@/* */
-@/* _tx_thread_create Create thread service */
-@/* */
-@/* RELEASE HISTORY */
-@/* */
-@/* DATE NAME DESCRIPTION */
-@/* */
-@/* 09-30-2020 William E. Lamie Initial Version 6.1 */
-@/* */
-@/**************************************************************************/
-@VOID _tx_thread_stack_build(TX_THREAD *thread_ptr, VOID (*function_ptr)(VOID))
-@{
+/**************************************************************************/
+/* */
+/* FUNCTION RELEASE */
+/* */
+/* _tx_thread_stack_build ARMv7-A */
+/* 6.1.11 */
+/* AUTHOR */
+/* */
+/* William E. Lamie, Microsoft Corporation */
+/* */
+/* DESCRIPTION */
+/* */
+/* This function builds a stack frame on the supplied thread's stack. */
+/* The stack frame results in a fake interrupt return to the supplied */
+/* function pointer. */
+/* */
+/* INPUT */
+/* */
+/* thread_ptr Pointer to thread control blk */
+/* function_ptr Pointer to return function */
+/* */
+/* OUTPUT */
+/* */
+/* None */
+/* */
+/* CALLS */
+/* */
+/* None */
+/* */
+/* CALLED BY */
+/* */
+/* _tx_thread_create Create thread service */
+/* */
+/* RELEASE HISTORY */
+/* */
+/* DATE NAME DESCRIPTION */
+/* */
+/* 09-30-2020 William E. Lamie Initial Version 6.1 */
+/* 04-25-2022 Zhen Kong Updated comments, */
+/* resulting in version 6.1.11 */
+/* */
+/**************************************************************************/
.global _tx_thread_stack_build
.type _tx_thread_stack_build,function
_tx_thread_stack_build:
-@
-@
-@ /* Build a fake interrupt frame. The form of the fake interrupt stack
-@ on the Cortex-A7 should look like the following after it is built:
-@
-@ Stack Top: 1 Interrupt stack frame type
-@ CPSR Initial value for CPSR
-@ a1 (r0) Initial value for a1
-@ a2 (r1) Initial value for a2
-@ a3 (r2) Initial value for a3
-@ a4 (r3) Initial value for a4
-@ v1 (r4) Initial value for v1
-@ v2 (r5) Initial value for v2
-@ v3 (r6) Initial value for v3
-@ v4 (r7) Initial value for v4
-@ v5 (r8) Initial value for v5
-@ sb (r9) Initial value for sb
-@ sl (r10) Initial value for sl
-@ fp (r11) Initial value for fp
-@ ip (r12) Initial value for ip
-@ lr (r14) Initial value for lr
-@ pc (r15) Initial value for pc
-@ 0 For stack backtracing
-@
-@ Stack Bottom: (higher memory address) */
-@
- LDR r2, [r0, #16] @ Pickup end of stack area
- BIC r2, r2, #7 @ Ensure 8-byte alignment
- SUB r2, r2, #76 @ Allocate space for the stack frame
-@
-@ /* Actually build the stack frame. */
-@
- MOV r3, #1 @ Build interrupt stack type
- STR r3, [r2, #0] @ Store stack type
- MOV r3, #0 @ Build initial register value
- STR r3, [r2, #8] @ Store initial r0
- STR r3, [r2, #12] @ Store initial r1
- STR r3, [r2, #16] @ Store initial r2
- STR r3, [r2, #20] @ Store initial r3
- STR r3, [r2, #24] @ Store initial r4
- STR r3, [r2, #28] @ Store initial r5
- STR r3, [r2, #32] @ Store initial r6
- STR r3, [r2, #36] @ Store initial r7
- STR r3, [r2, #40] @ Store initial r8
- STR r3, [r2, #44] @ Store initial r9
- LDR r3, [r0, #12] @ Pickup stack starting address
- STR r3, [r2, #48] @ Store initial r10 (sl)
- LDR r3,=_tx_thread_schedule @ Pickup address of _tx_thread_schedule for GDB backtrace
- STR r3, [r2, #60] @ Store initial r14 (lr)
- MOV r3, #0 @ Build initial register value
- STR r3, [r2, #52] @ Store initial r11
- STR r3, [r2, #56] @ Store initial r12
- STR r1, [r2, #64] @ Store initial pc
- STR r3, [r2, #68] @ 0 for back-trace
- MRS r1, CPSR @ Pickup CPSR
- BIC r1, r1, #CPSR_MASK @ Mask mode bits of CPSR
- ORR r3, r1, #SVC_MODE @ Build CPSR, SVC mode, interrupts enabled
- STR r3, [r2, #4] @ Store initial CPSR
-@
-@ /* Setup stack pointer. */
-@ thread_ptr -> tx_thread_stack_ptr = r2;
-@
- STR r2, [r0, #8] @ Save stack pointer in thread's
- @ control block
+
+
+ /* Build a fake interrupt frame. The form of the fake interrupt stack
+ on the ARMv7-A should look like the following after it is built:
+
+ Stack Top: 1 Interrupt stack frame type
+ CPSR Initial value for CPSR
+ a1 (r0) Initial value for a1
+ a2 (r1) Initial value for a2
+ a3 (r2) Initial value for a3
+ a4 (r3) Initial value for a4
+ v1 (r4) Initial value for v1
+ v2 (r5) Initial value for v2
+ v3 (r6) Initial value for v3
+ v4 (r7) Initial value for v4
+ v5 (r8) Initial value for v5
+ sb (r9) Initial value for sb
+ sl (r10) Initial value for sl
+ fp (r11) Initial value for fp
+ ip (r12) Initial value for ip
+ lr (r14) Initial value for lr
+ pc (r15) Initial value for
+ 0 For stack backtracing
+
+ Stack Bottom: (higher memory address) */
+
+ LDR r2, [r0, #16] // Pickup end of stack area
+ BIC r2, r2, #7 // Ensure 8-byte alignment
+ SUB r2, r2, #76 // Allocate space for the stack frame
+
+ /* Actually build the stack frame. */
+
+ MOV r3, #1 // Build interrupt stack type
+ STR r3, [r2, #0] // Store stack type
+ MOV r3, #0 // Build initial register value
+ STR r3, [r2, #8] // Store initial r0
+ STR r3, [r2, #12] // Store initial r1
+ STR r3, [r2, #16] // Store initial r2
+ STR r3, [r2, #20] // Store initial r3
+ STR r3, [r2, #24] // Store initial r4
+ STR r3, [r2, #28] // Store initial r5
+ STR r3, [r2, #32] // Store initial r6
+ STR r3, [r2, #36] // Store initial r7
+ STR r3, [r2, #40] // Store initial r8
+ STR r3, [r2, #44] // Store initial r9
+ LDR r3, [r0, #12] // Pickup stack starting address
+ STR r3, [r2, #48] // Store initial r10 (sl)
+ LDR r3,=_tx_thread_schedule // Pickup address of _tx_thread_schedule for GDB backtrace
+ STR r3, [r2, #60] // Store initial r14 (lr)
+ MOV r3, #0 // Build initial register value
+ STR r3, [r2, #52] // Store initial r11
+ STR r3, [r2, #56] // Store initial r12
+ STR r1, [r2, #64] // Store initial pc
+ STR r3, [r2, #68] // 0 for back-trace
+ MRS r1, CPSR // Pickup CPSR
+ BIC r1, r1, #CPSR_MASK // Mask mode bits of CPSR
+ ORR r3, r1, #SVC_MODE // Build CPSR, SVC mode, interrupts enabled
+ STR r3, [r2, #4] // Store initial CPSR
+
+ /* Setup stack pointer. */
+
+ STR r2, [r0, #8] // Save stack pointer in thread's
+ // control block
#ifdef __THUMB_INTERWORK
- BX lr @ Return to caller
+ BX lr // Return to caller
#else
- MOV pc, lr @ Return to caller
+ MOV pc, lr // Return to caller
#endif
-@}
-
-
diff --git a/ports/cortex_a7/ac6/src/tx_thread_system_return.S b/ports/cortex_a7/ac6/src/tx_thread_system_return.S
index 65b7ec45..cb7d62ce 100644
--- a/ports/cortex_a7/ac6/src/tx_thread_system_return.S
+++ b/ports/cortex_a7/ac6/src/tx_thread_system_return.S
@@ -1,182 +1,162 @@
-@/**************************************************************************/
-@/* */
-@/* Copyright (c) Microsoft Corporation. All rights reserved. */
-@/* */
-@/* This software is licensed under the Microsoft Software License */
-@/* Terms for Microsoft Azure RTOS. Full text of the license can be */
-@/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
-@/* and in the root directory of this software. */
-@/* */
-@/**************************************************************************/
-@
-@
-@/**************************************************************************/
-@/**************************************************************************/
-@/** */
-@/** ThreadX Component */
-@/** */
-@/** Thread */
-@/** */
-@/**************************************************************************/
-@/**************************************************************************/
-@
-@#define TX_SOURCE_CODE
-@
-@
-@/* Include necessary system files. */
-@
-@#include "tx_api.h"
-@#include "tx_thread.h"
-@#include "tx_timer.h"
-@
+/**************************************************************************/
+/* */
+/* Copyright (c) Microsoft Corporation. All rights reserved. */
+/* */
+/* This software is licensed under the Microsoft Software License */
+/* Terms for Microsoft Azure RTOS. Full text of the license can be */
+/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
+/* and in the root directory of this software. */
+/* */
+/**************************************************************************/
+
+
+/**************************************************************************/
+/**************************************************************************/
+/** */
+/** ThreadX Component */
+/** */
+/** Thread */
+/** */
+/**************************************************************************/
+/**************************************************************************/
+
.arm
-@
-@
+
+
.global _tx_thread_current_ptr
.global _tx_timer_time_slice
.global _tx_thread_schedule
-@
-@
-@
-@/* Define the 16-bit Thumb mode veneer for _tx_thread_system_return for
-@ applications calling this function from to 16-bit Thumb mode. */
-@
+
+
+
+/* Define the 16-bit Thumb mode veneer for _tx_thread_system_return for
+ applications calling this function from to 16-bit Thumb mode. */
+
.text
.align 2
.global $_tx_thread_system_return
.type $_tx_thread_system_return,function
$_tx_thread_system_return:
.thumb
- BX pc @ Switch to 32-bit mode
- NOP @
+ BX pc // Switch to 32-bit mode
+ NOP //
.arm
- STMFD sp!, {lr} @ Save return address
- BL _tx_thread_system_return @ Call _tx_thread_system_return function
- LDMFD sp!, {lr} @ Recover saved return address
- BX lr @ Return to 16-bit caller
-@
-@
+ STMFD sp!, {lr} // Save return address
+ BL _tx_thread_system_return // Call _tx_thread_system_return function
+ LDMFD sp!, {lr} // Recover saved return address
+ BX lr // Return to 16-bit caller
+
+
.text
.align 2
-@/**************************************************************************/
-@/* */
-@/* FUNCTION RELEASE */
-@/* */
-@/* _tx_thread_system_return Cortex-A7/AC6 */
-@/* 6.1.9 */
-@/* AUTHOR */
-@/* */
-@/* William E. Lamie, Microsoft Corporation */
-@/* */
-@/* DESCRIPTION */
-@/* */
-@/* This function is target processor specific. It is used to transfer */
-@/* control from a thread back to the ThreadX system. Only a */
-@/* minimal context is saved since the compiler assumes temp registers */
-@/* are going to get slicked by a function call anyway. */
-@/* */
-@/* INPUT */
-@/* */
-@/* None */
-@/* */
-@/* OUTPUT */
-@/* */
-@/* None */
-@/* */
-@/* CALLS */
-@/* */
-@/* _tx_thread_schedule Thread scheduling loop */
-@/* */
-@/* CALLED BY */
-@/* */
-@/* ThreadX components */
-@/* */
-@/* RELEASE HISTORY */
-@/* */
-@/* DATE NAME DESCRIPTION */
-@/* */
-@/* 09-30-2020 William E. Lamie Initial Version 6.1 */
-@/* 10-15-2021 William E. Lamie Modified comment(s), added */
-@/* execution profile support, */
-@/* resulting in version 6.1.9 */
-@/* */
-@/**************************************************************************/
-@VOID _tx_thread_system_return(VOID)
-@{
+/**************************************************************************/
+/* */
+/* FUNCTION RELEASE */
+/* */
+/* _tx_thread_system_return ARMv7-A */
+/* 6.1.11 */
+/* AUTHOR */
+/* */
+/* William E. Lamie, Microsoft Corporation */
+/* */
+/* DESCRIPTION */
+/* */
+/* This function is target processor specific. It is used to transfer */
+/* control from a thread back to the ThreadX system. Only a */
+/* minimal context is saved since the compiler assumes temp registers */
+/* are going to get slicked by a function call anyway. */
+/* */
+/* INPUT */
+/* */
+/* None */
+/* */
+/* OUTPUT */
+/* */
+/* None */
+/* */
+/* CALLS */
+/* */
+/* _tx_thread_schedule Thread scheduling loop */
+/* */
+/* CALLED BY */
+/* */
+/* ThreadX components */
+/* */
+/* RELEASE HISTORY */
+/* */
+/* DATE NAME DESCRIPTION */
+/* */
+/* 09-30-2020 William E. Lamie Initial Version 6.1 */
+/* 10-15-2021 William E. Lamie Modified comment(s), added */
+/* execution profile support, */
+/* resulting in version 6.1.9 */
+/* 04-25-2022 Zhen Kong Updated comments, */
+/* resulting in version 6.1.11 */
+/* */
+/**************************************************************************/
.global _tx_thread_system_return
.type _tx_thread_system_return,function
_tx_thread_system_return:
-@
-@ /* Save minimal context on the stack. */
-@
- STMDB sp!, {r4-r11, lr} @ Save minimal context
- LDR r4, =_tx_thread_current_ptr @ Pickup address of current ptr
- LDR r5, [r4] @ Pickup current thread pointer
-
+ /* Save minimal context on the stack. */
+
+ STMDB sp!, {r4-r11, lr} // Save minimal context
+
+ LDR r4, =_tx_thread_current_ptr // Pickup address of current ptr
+ LDR r5, [r4] // Pickup current thread pointer
+
#ifdef TX_ENABLE_VFP_SUPPORT
- LDR r1, [r5, #144] @ Pickup the VFP enabled flag
- CMP r1, #0 @ Is the VFP enabled?
- BEQ _tx_skip_solicited_vfp_save @ No, skip VFP solicited save
- VMRS r1, FPSCR @ Pickup the FPSCR
- STR r1, [sp, #-4]! @ Save FPSCR
- VSTMDB sp!, {D16-D31} @ Save D16-D31
- VSTMDB sp!, {D8-D15} @ Save D8-D15
+ LDR r1, [r5, #144] // Pickup the VFP enabled flag
+ CMP r1, #0 // Is the VFP enabled?
+ BEQ _tx_skip_solicited_vfp_save // No, skip VFP solicited save
+ VMRS r1, FPSCR // Pickup the FPSCR
+ STR r1, [sp, #-4]! // Save FPSCR
+ VSTMDB sp!, {D16-D31} // Save D16-D31
+ VSTMDB sp!, {D8-D15} // Save D8-D15
_tx_skip_solicited_vfp_save:
#endif
- MOV r0, #0 @ Build a solicited stack type
- MRS r1, CPSR @ Pickup the CPSR
- STMDB sp!, {r0-r1} @ Save type and CPSR
-@
-@ /* Lockout interrupts. */
-@
-#ifdef TX_ENABLE_FIQ_SUPPORT
- CPSID if @ Disable IRQ and FIQ interrupts
-#else
- CPSID i @ Disable IRQ interrupts
-#endif
-
-#if (defined(TX_ENABLE_EXECUTION_CHANGE_NOTIFY) || defined(TX_EXECUTION_PROFILE_ENABLE))
-@
-@ /* Call the thread exit function to indicate the thread is no longer executing. */
-@
- BL _tx_execution_thread_exit @ Call the thread exit function
-#endif
- MOV r3, r4 @ Pickup address of current ptr
- MOV r0, r5 @ Pickup current thread pointer
- LDR r2, =_tx_timer_time_slice @ Pickup address of time slice
- LDR r1, [r2] @ Pickup current time slice
-@
-@ /* Save current stack and switch to system stack. */
-@ _tx_thread_current_ptr -> tx_thread_stack_ptr = sp;
-@ sp = _tx_thread_system_stack_ptr;
-@
- STR sp, [r0, #8] @ Save thread stack pointer
-@
-@ /* Determine if the time-slice is active. */
-@ if (_tx_timer_time_slice)
-@ {
-@
- MOV r4, #0 @ Build clear value
- CMP r1, #0 @ Is a time-slice active?
- BEQ __tx_thread_dont_save_ts @ No, don't save the time-slice
-@
-@ /* Save time-slice for the thread and clear the current time-slice. */
-@ _tx_thread_current_ptr -> tx_thread_time_slice = _tx_timer_time_slice;
-@ _tx_timer_time_slice = 0;
-@
- STR r4, [r2] @ Clear time-slice
- STR r1, [r0, #24] @ Save current time-slice
-@
-@ }
-__tx_thread_dont_save_ts:
-@
-@ /* Clear the current thread pointer. */
-@ _tx_thread_current_ptr = TX_NULL;
-@
- STR r4, [r3] @ Clear current thread pointer
- B _tx_thread_schedule @ Jump to scheduler!
-@
-@}
+ MOV r0, #0 // Build a solicited stack type
+ MRS r1, CPSR // Pickup the CPSR
+ STMDB sp!, {r0-r1} // Save type and CPSR
+ /* Lockout interrupts. */
+
+#ifdef TX_ENABLE_FIQ_SUPPORT
+ CPSID if // Disable IRQ and FIQ interrupts
+#else
+ CPSID i // Disable IRQ interrupts
+#endif
+
+#if (defined(TX_ENABLE_EXECUTION_CHANGE_NOTIFY) || defined(TX_EXECUTION_PROFILE_ENABLE))
+
+ /* Call the thread exit function to indicate the thread is no longer executing. */
+
+ BL _tx_execution_thread_exit // Call the thread exit function
+#endif
+ MOV r3, r4 // Pickup address of current ptr
+ MOV r0, r5 // Pickup current thread pointer
+ LDR r2, =_tx_timer_time_slice // Pickup address of time slice
+ LDR r1, [r2] // Pickup current time slice
+
+ /* Save current stack and switch to system stack. */
+
+ STR sp, [r0, #8] // Save thread stack pointer
+
+ /* Determine if the time-slice is active. */
+
+ MOV r4, #0 // Build clear value
+ CMP r1, #0 // Is a time-slice active?
+ BEQ __tx_thread_dont_save_ts // No, don't save the time-slice
+
+ /* Save time-slice for the thread and clear the current time-slice. */
+
+ STR r4, [r2] // Clear time-slice
+ STR r1, [r0, #24] // Save current time-slice
+
+__tx_thread_dont_save_ts:
+
+ /* Clear the current thread pointer. */
+
+ STR r4, [r3] // Clear current thread pointer
+ B _tx_thread_schedule // Jump to scheduler!
diff --git a/ports/cortex_a7/ac6/src/tx_thread_vectored_context_save.S b/ports/cortex_a7/ac6/src/tx_thread_vectored_context_save.S
index a7501c66..d846223f 100644
--- a/ports/cortex_a7/ac6/src/tx_thread_vectored_context_save.S
+++ b/ports/cortex_a7/ac6/src/tx_thread_vectored_context_save.S
@@ -1,192 +1,165 @@
-@/**************************************************************************/
-@/* */
-@/* Copyright (c) Microsoft Corporation. All rights reserved. */
-@/* */
-@/* This software is licensed under the Microsoft Software License */
-@/* Terms for Microsoft Azure RTOS. Full text of the license can be */
-@/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
-@/* and in the root directory of this software. */
-@/* */
-@/**************************************************************************/
-@
-@
-@/**************************************************************************/
-@/**************************************************************************/
-@/** */
-@/** ThreadX Component */
-@/** */
-@/** Thread */
-@/** */
-@/**************************************************************************/
-@/**************************************************************************/
-@
-@
-@#define TX_SOURCE_CODE
-@
-@
-@/* Include necessary system files. */
-@
-@#include "tx_api.h"
-@#include "tx_thread.h"
-@
-@
+/**************************************************************************/
+/* */
+/* Copyright (c) Microsoft Corporation. All rights reserved. */
+/* */
+/* This software is licensed under the Microsoft Software License */
+/* Terms for Microsoft Azure RTOS. Full text of the license can be */
+/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
+/* and in the root directory of this software. */
+/* */
+/**************************************************************************/
+
+
+/**************************************************************************/
+/**************************************************************************/
+/** */
+/** ThreadX Component */
+/** */
+/** Thread */
+/** */
+/**************************************************************************/
+/**************************************************************************/
+
.global _tx_thread_system_state
.global _tx_thread_current_ptr
-@
-@
-@
-@/* No 16-bit Thumb mode veneer code is needed for _tx_thread_vectored_context_save
-@ since it will never be called 16-bit mode. */
-@
+ .global _tx_execution_isr_enter
+
+
+
+/* No 16-bit Thumb mode veneer code is needed for _tx_thread_vectored_context_save
+ since it will never be called 16-bit mode. */
+
.arm
.text
.align 2
-@/**************************************************************************/
-@/* */
-@/* FUNCTION RELEASE */
-@/* */
-@/* _tx_thread_vectored_context_save Cortex-A7/AC6 */
-@/* 6.1.9 */
-@/* AUTHOR */
-@/* */
-@/* William E. Lamie, Microsoft Corporation */
-@/* */
-@/* DESCRIPTION */
-@/* */
-@/* This function saves the context of an executing thread in the */
-@/* beginning of interrupt processing. The function also ensures that */
-@/* the system stack is used upon return to the calling ISR. */
-@/* */
-@/* INPUT */
-@/* */
-@/* None */
-@/* */
-@/* OUTPUT */
-@/* */
-@/* None */
-@/* */
-@/* CALLS */
-@/* */
-@/* None */
-@/* */
-@/* CALLED BY */
-@/* */
-@/* ISRs */
-@/* */
-@/* RELEASE HISTORY */
-@/* */
-@/* DATE NAME DESCRIPTION */
-@/* */
-@/* 09-30-2020 William E. Lamie Initial Version 6.1 */
-@/* 10-15-2021 William E. Lamie Modified comment(s), added */
-@/* execution profile support, */
-@/* resulting in version 6.1.9 */
-@/* */
-@/**************************************************************************/
-@VOID _tx_thread_vectored_context_save(VOID)
-@{
+/**************************************************************************/
+/* */
+/* FUNCTION RELEASE */
+/* */
+/* _tx_thread_vectored_context_save ARMv7-A */
+/* 6.1.11 */
+/* AUTHOR */
+/* */
+/* William E. Lamie, Microsoft Corporation */
+/* */
+/* DESCRIPTION */
+/* */
+/* This function saves the context of an executing thread in the */
+/* beginning of interrupt processing. The function also ensures that */
+/* the system stack is used upon return to the calling ISR. */
+/* */
+/* INPUT */
+/* */
+/* None */
+/* */
+/* OUTPUT */
+/* */
+/* None */
+/* */
+/* CALLS */
+/* */
+/* None */
+/* */
+/* CALLED BY */
+/* */
+/* ISRs */
+/* */
+/* RELEASE HISTORY */
+/* */
+/* DATE NAME DESCRIPTION */
+/* */
+/* 09-30-2020 William E. Lamie Initial Version 6.1 */
+/* 10-15-2021 William E. Lamie Modified comment(s), added */
+/* execution profile support, */
+/* resulting in version 6.1.9 */
+/* 04-25-2022 Zhen Kong Updated comments, */
+/* resulting in version 6.1.11 */
+/* */
+/**************************************************************************/
.global _tx_thread_vectored_context_save
.type _tx_thread_vectored_context_save,function
_tx_thread_vectored_context_save:
-@
-@ /* Upon entry to this routine, it is assumed that IRQ interrupts are locked
-@ out, we are in IRQ mode, and all registers are intact. */
-@
-@ /* Check for a nested interrupt condition. */
-@ if (_tx_thread_system_state++)
-@ {
-@
+
+ /* Upon entry to this routine, it is assumed that IRQ interrupts are locked
+ out, we are in IRQ mode, and all registers are intact. */
+
+ /* Check for a nested interrupt condition. */
+
#ifdef TX_ENABLE_FIQ_SUPPORT
- CPSID if @ Disable IRQ and FIQ interrupts
+ CPSID if // Disable IRQ and FIQ interrupts
#endif
- LDR r3, =_tx_thread_system_state @ Pickup address of system state variable
- LDR r2, [r3, #0] @ Pickup system state
- CMP r2, #0 @ Is this the first interrupt?
- BEQ __tx_thread_not_nested_save @ Yes, not a nested context save
-@
-@ /* Nested interrupt condition. */
-@
- ADD r2, r2, #1 @ Increment the interrupt counter
- STR r2, [r3, #0] @ Store it back in the variable
-@
-@ /* Note: Minimal context of interrupted thread is already saved. */
-@
-@ /* Return to the ISR. */
-@
- MOV r10, #0 @ Clear stack limit
+ LDR r3, =_tx_thread_system_state // Pickup address of system state variable
+ LDR r2, [r3, #0] // Pickup system state
+ CMP r2, #0 // Is this the first interrupt?
+ BEQ __tx_thread_not_nested_save // Yes, not a nested context save
+
+ /* Nested interrupt condition. */
+
+ ADD r2, r2, #1 // Increment the interrupt counter
+ STR r2, [r3, #0] // Store it back in the variable
+
+ /* Note: Minimal context of interrupted thread is already saved. */
+
+ /* Return to the ISR. */
+
+ MOV r10, #0 // Clear stack limit
#if (defined(TX_ENABLE_EXECUTION_CHANGE_NOTIFY) || defined(TX_EXECUTION_PROFILE_ENABLE))
-@
-@ /* Call the ISR enter function to indicate an ISR is executing. */
-@
- PUSH {lr} @ Save ISR lr
- BL _tx_execution_isr_enter @ Call the ISR enter function
- POP {lr} @ Recover ISR lr
+
+ /* Call the ISR enter function to indicate an ISR is executing. */
+
+ PUSH {lr} // Save ISR lr
+ BL _tx_execution_isr_enter // Call the ISR enter function
+ POP {lr} // Recover ISR lr
#endif
- MOV pc, lr @ Return to caller
-@
+ MOV pc, lr // Return to caller
+
__tx_thread_not_nested_save:
-@ }
-@
-@ /* Otherwise, not nested, check to see if a thread was running. */
-@ else if (_tx_thread_current_ptr)
-@ {
-@
- ADD r2, r2, #1 @ Increment the interrupt counter
- STR r2, [r3, #0] @ Store it back in the variable
- LDR r1, =_tx_thread_current_ptr @ Pickup address of current thread ptr
- LDR r0, [r1, #0] @ Pickup current thread pointer
- CMP r0, #0 @ Is it NULL?
- BEQ __tx_thread_idle_system_save @ If so, interrupt occurred in
- @ scheduling loop - nothing needs saving!
-@
-@ /* Note: Minimal context of interrupted thread is already saved. */
-@
-@ /* Save the current stack pointer in the thread's control block. */
-@ _tx_thread_current_ptr -> tx_thread_stack_ptr = sp;
-@
-@ /* Switch to the system stack. */
-@ sp = _tx_thread_system_stack_ptr;
-@
- MOV r10, #0 @ Clear stack limit
+
+ /* Otherwise, not nested, check to see if a thread was running. */
+
+ ADD r2, r2, #1 // Increment the interrupt counter
+ STR r2, [r3, #0] // Store it back in the variable
+ LDR r1, =_tx_thread_current_ptr // Pickup address of current thread ptr
+ LDR r0, [r1, #0] // Pickup current thread pointer
+ CMP r0, #0 // Is it NULL?
+ BEQ __tx_thread_idle_system_save // If so, interrupt occurred in
+ // scheduling loop - nothing needs saving!
+ /* Note: Minimal context of interrupted thread is already saved. */
+
+ /* Save the current stack pointer in the thread's control block. */
+
+ MOV r10, #0 // Clear stack limit
#if (defined(TX_ENABLE_EXECUTION_CHANGE_NOTIFY) || defined(TX_EXECUTION_PROFILE_ENABLE))
-@
-@ /* Call the ISR enter function to indicate an ISR is executing. */
-@
- PUSH {lr} @ Save ISR lr
- BL _tx_execution_isr_enter @ Call the ISR enter function
- POP {lr} @ Recover ISR lr
+
+ /* Call the ISR enter function to indicate an ISR is executing. */
+
+ PUSH {lr} // Save ISR lr
+ BL _tx_execution_isr_enter // Call the ISR enter function
+ POP {lr} // Recover ISR lr
#endif
- MOV pc, lr @ Return to caller
-@
-@ }
-@ else
-@ {
-@
+ MOV pc, lr // Return to caller
+
__tx_thread_idle_system_save:
-@
-@ /* Interrupt occurred in the scheduling loop. */
-@
-@ /* Not much to do here, just adjust the stack pointer, and return to IRQ
-@ processing. */
-@
- MOV r10, #0 @ Clear stack limit
+
+ /* Interrupt occurred in the scheduling loop. */
+
+ /* Not much to do here, just adjust the stack pointer, and return to IRQ
+ processing. */
+
+ MOV r10, #0 // Clear stack limit
#if (defined(TX_ENABLE_EXECUTION_CHANGE_NOTIFY) || defined(TX_EXECUTION_PROFILE_ENABLE))
-@
-@ /* Call the ISR enter function to indicate an ISR is executing. */
-@
- PUSH {lr} @ Save ISR lr
- BL _tx_execution_isr_enter @ Call the ISR enter function
- POP {lr} @ Recover ISR lr
+
+ /* Call the ISR enter function to indicate an ISR is executing. */
+
+ PUSH {lr} // Save ISR lr
+ BL _tx_execution_isr_enter // Call the ISR enter function
+ POP {lr} // Recover ISR lr
#endif
- ADD sp, sp, #32 @ Recover saved registers
- MOV pc, lr @ Return to caller
-@
-@ }
-@}
-
+ ADD sp, sp, #32 // Recover saved registers
+ MOV pc, lr // Return to caller
diff --git a/ports/cortex_a7/ac6/src/tx_timer_interrupt.S b/ports/cortex_a7/ac6/src/tx_timer_interrupt.S
index 907de9f8..7337ed0c 100644
--- a/ports/cortex_a7/ac6/src/tx_timer_interrupt.S
+++ b/ports/cortex_a7/ac6/src/tx_timer_interrupt.S
@@ -1,40 +1,30 @@
-@/**************************************************************************/
-@/* */
-@/* Copyright (c) Microsoft Corporation. All rights reserved. */
-@/* */
-@/* This software is licensed under the Microsoft Software License */
-@/* Terms for Microsoft Azure RTOS. Full text of the license can be */
-@/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
-@/* and in the root directory of this software. */
-@/* */
-@/**************************************************************************/
-@
-@
-@/**************************************************************************/
-@/**************************************************************************/
-@/** */
-@/** ThreadX Component */
-@/** */
-@/** Timer */
-@/** */
-@/**************************************************************************/
-@/**************************************************************************/
-@
-@#define TX_SOURCE_CODE
-@
-@
-@/* Include necessary system files. */
-@
-@#include "tx_api.h"
-@#include "tx_timer.h"
-@#include "tx_thread.h"
-@
-@
+/**************************************************************************/
+/* */
+/* Copyright (c) Microsoft Corporation. All rights reserved. */
+/* */
+/* This software is licensed under the Microsoft Software License */
+/* Terms for Microsoft Azure RTOS. Full text of the license can be */
+/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
+/* and in the root directory of this software. */
+/* */
+/**************************************************************************/
+
+
+/**************************************************************************/
+/**************************************************************************/
+/** */
+/** ThreadX Component */
+/** */
+/** Timer */
+/** */
+/**************************************************************************/
+/**************************************************************************/
+
.arm
-@
-@/* Define Assembly language external references... */
-@
+
+/* Define Assembly language external references... */
+
.global _tx_timer_time_slice
.global _tx_timer_system_clock
.global _tx_timer_current_ptr
@@ -43,237 +33,199 @@
.global _tx_timer_expired_time_slice
.global _tx_timer_expired
.global _tx_thread_time_slice
-@
-@
-@
-@/* Define the 16-bit Thumb mode veneer for _tx_timer_interrupt for
-@ applications calling this function from to 16-bit Thumb mode. */
-@
+
+
+
+/* Define the 16-bit Thumb mode veneer for _tx_timer_interrupt for
+ applications calling this function from to 16-bit Thumb mode. */
+
.text
.align 2
.thumb
.global $_tx_timer_interrupt
.type $_tx_timer_interrupt,function
$_tx_timer_interrupt:
- BX pc @ Switch to 32-bit mode
- NOP @
+ BX pc // Switch to 32-bit mode
+ NOP //
.arm
- STMFD sp!, {lr} @ Save return address
- BL _tx_timer_interrupt @ Call _tx_timer_interrupt function
- LDMFD sp!, {lr} @ Recover saved return address
- BX lr @ Return to 16-bit caller
-@
-@
+ STMFD sp!, {lr} // Save return address
+ BL _tx_timer_interrupt // Call _tx_timer_interrupt function
+ LDMFD sp!, {lr} // Recover saved return address
+ BX lr // Return to 16-bit caller
+
+
.text
.align 2
-@/**************************************************************************/
-@/* */
-@/* FUNCTION RELEASE */
-@/* */
-@/* _tx_timer_interrupt Cortex-A7/AC6 */
-@/* 6.1 */
-@/* AUTHOR */
-@/* */
-@/* William E. Lamie, Microsoft Corporation */
-@/* */
-@/* DESCRIPTION */
-@/* */
-@/* This function processes the hardware timer interrupt. This */
-@/* processing includes incrementing the system clock and checking for */
-@/* time slice and/or timer expiration. If either is found, the */
-@/* interrupt context save/restore functions are called along with the */
-@/* expiration functions. */
-@/* */
-@/* INPUT */
-@/* */
-@/* None */
-@/* */
-@/* OUTPUT */
-@/* */
-@/* None */
-@/* */
-@/* CALLS */
-@/* */
-@/* _tx_thread_time_slice Time slice interrupted thread */
-@/* _tx_timer_expiration_process Timer expiration processing */
-@/* */
-@/* CALLED BY */
-@/* */
-@/* interrupt vector */
-@/* */
-@/* RELEASE HISTORY */
-@/* */
-@/* DATE NAME DESCRIPTION */
-@/* */
-@/* 09-30-2020 William E. Lamie Initial Version 6.1 */
-@/* */
-@/**************************************************************************/
-@VOID _tx_timer_interrupt(VOID)
-@{
+/**************************************************************************/
+/* */
+/* FUNCTION RELEASE */
+/* */
+/* _tx_timer_interrupt ARMv7-A */
+/* 6.1.11 */
+/* AUTHOR */
+/* */
+/* William E. Lamie, Microsoft Corporation */
+/* */
+/* DESCRIPTION */
+/* */
+/* This function processes the hardware timer interrupt. This */
+/* processing includes incrementing the system clock and checking for */
+/* time slice and/or timer expiration. If either is found, the */
+/* interrupt context save/restore functions are called along with the */
+/* expiration functions. */
+/* */
+/* INPUT */
+/* */
+/* None */
+/* */
+/* OUTPUT */
+/* */
+/* None */
+/* */
+/* CALLS */
+/* */
+/* _tx_thread_time_slice Time slice interrupted thread */
+/* _tx_timer_expiration_process Timer expiration processing */
+/* */
+/* CALLED BY */
+/* */
+/* interrupt vector */
+/* */
+/* RELEASE HISTORY */
+/* */
+/* DATE NAME DESCRIPTION */
+/* */
+/* 09-30-2020 William E. Lamie Initial Version 6.1 */
+/* 04-25-2022 Zhen Kong Updated comments, */
+/* resulting in version 6.1.11 */
+/* */
+/**************************************************************************/
.global _tx_timer_interrupt
.type _tx_timer_interrupt,function
_tx_timer_interrupt:
-@
-@ /* Upon entry to this routine, it is assumed that context save has already
-@ been called, and therefore the compiler scratch registers are available
-@ for use. */
-@
-@ /* Increment the system clock. */
-@ _tx_timer_system_clock++;
-@
- LDR r1, =_tx_timer_system_clock @ Pickup address of system clock
- LDR r0, [r1] @ Pickup system clock
- ADD r0, r0, #1 @ Increment system clock
- STR r0, [r1] @ Store new system clock
-@
-@ /* Test for time-slice expiration. */
-@ if (_tx_timer_time_slice)
-@ {
-@
- LDR r3, =_tx_timer_time_slice @ Pickup address of time-slice
- LDR r2, [r3] @ Pickup time-slice
- CMP r2, #0 @ Is it non-active?
- BEQ __tx_timer_no_time_slice @ Yes, skip time-slice processing
-@
-@ /* Decrement the time_slice. */
-@ _tx_timer_time_slice--;
-@
- SUB r2, r2, #1 @ Decrement the time-slice
- STR r2, [r3] @ Store new time-slice value
-@
-@ /* Check for expiration. */
-@ if (__tx_timer_time_slice == 0)
-@
- CMP r2, #0 @ Has it expired?
- BNE __tx_timer_no_time_slice @ No, skip expiration processing
-@
-@ /* Set the time-slice expired flag. */
-@ _tx_timer_expired_time_slice = TX_TRUE;
-@
- LDR r3, =_tx_timer_expired_time_slice @ Pickup address of expired flag
- MOV r0, #1 @ Build expired value
- STR r0, [r3] @ Set time-slice expiration flag
-@
-@ }
-@
-__tx_timer_no_time_slice:
-@
-@ /* Test for timer expiration. */
-@ if (*_tx_timer_current_ptr)
-@ {
-@
- LDR r1, =_tx_timer_current_ptr @ Pickup current timer pointer address
- LDR r0, [r1] @ Pickup current timer
- LDR r2, [r0] @ Pickup timer list entry
- CMP r2, #0 @ Is there anything in the list?
- BEQ __tx_timer_no_timer @ No, just increment the timer
-@
-@ /* Set expiration flag. */
-@ _tx_timer_expired = TX_TRUE;
-@
- LDR r3, =_tx_timer_expired @ Pickup expiration flag address
- MOV r2, #1 @ Build expired value
- STR r2, [r3] @ Set expired flag
- B __tx_timer_done @ Finished timer processing
-@
-@ }
-@ else
-@ {
-__tx_timer_no_timer:
-@
-@ /* No timer expired, increment the timer pointer. */
-@ _tx_timer_current_ptr++;
-@
- ADD r0, r0, #4 @ Move to next timer
-@
-@ /* Check for wraparound. */
-@ if (_tx_timer_current_ptr == _tx_timer_list_end)
-@
- LDR r3, =_tx_timer_list_end @ Pickup address of timer list end
- LDR r2, [r3] @ Pickup list end
- CMP r0, r2 @ Are we at list end?
- BNE __tx_timer_skip_wrap @ No, skip wraparound logic
-@
-@ /* Wrap to beginning of list. */
-@ _tx_timer_current_ptr = _tx_timer_list_start;
-@
- LDR r3, =_tx_timer_list_start @ Pickup address of timer list start
- LDR r0, [r3] @ Set current pointer to list start
-@
-__tx_timer_skip_wrap:
-@
- STR r0, [r1] @ Store new current timer pointer
-@ }
-@
-__tx_timer_done:
-@
-@
-@ /* See if anything has expired. */
-@ if ((_tx_timer_expired_time_slice) || (_tx_timer_expired))
-@ {
-@
- LDR r3, =_tx_timer_expired_time_slice @ Pickup address of expired flag
- LDR r2, [r3] @ Pickup time-slice expired flag
- CMP r2, #0 @ Did a time-slice expire?
- BNE __tx_something_expired @ If non-zero, time-slice expired
- LDR r1, =_tx_timer_expired @ Pickup address of other expired flag
- LDR r0, [r1] @ Pickup timer expired flag
- CMP r0, #0 @ Did a timer expire?
- BEQ __tx_timer_nothing_expired @ No, nothing expired
-@
-__tx_something_expired:
-@
-@
- STMDB sp!, {r0, lr} @ Save the lr register on the stack
- @ and save r0 just to keep 8-byte alignment
-@
-@ /* Did a timer expire? */
-@ if (_tx_timer_expired)
-@ {
-@
- LDR r1, =_tx_timer_expired @ Pickup address of expired flag
- LDR r0, [r1] @ Pickup timer expired flag
- CMP r0, #0 @ Check for timer expiration
- BEQ __tx_timer_dont_activate @ If not set, skip timer activation
-@
-@ /* Process timer expiration. */
-@ _tx_timer_expiration_process();
-@
- BL _tx_timer_expiration_process @ Call the timer expiration handling routine
-@
-@ }
-__tx_timer_dont_activate:
-@
-@ /* Did time slice expire? */
-@ if (_tx_timer_expired_time_slice)
-@ {
-@
- LDR r3, =_tx_timer_expired_time_slice @ Pickup address of time-slice expired
- LDR r2, [r3] @ Pickup the actual flag
- CMP r2, #0 @ See if the flag is set
- BEQ __tx_timer_not_ts_expiration @ No, skip time-slice processing
-@
-@ /* Time slice interrupted thread. */
-@ _tx_thread_time_slice();
-@
- BL _tx_thread_time_slice @ Call time-slice processing
-@
-@ }
-@
-__tx_timer_not_ts_expiration:
-@
- LDMIA sp!, {r0, lr} @ Recover lr register (r0 is just there for
- @ the 8-byte stack alignment
-@
-@ }
-@
-__tx_timer_nothing_expired:
-@
-#ifdef __THUMB_INTERWORK
- BX lr @ Return to caller
-#else
- MOV pc, lr @ Return to caller
-#endif
-@
-@}
+ /* Upon entry to this routine, it is assumed that context save has already
+ been called, and therefore the compiler scratch registers are available
+ for use. */
+
+ /* Increment the system clock. */
+
+ LDR r1, =_tx_timer_system_clock // Pickup address of system clock
+ LDR r0, [r1] // Pickup system clock
+ ADD r0, r0, #1 // Increment system clock
+ STR r0, [r1] // Store new system clock
+
+ /* Test for time-slice expiration. */
+
+ LDR r3, =_tx_timer_time_slice // Pickup address of time-slice
+ LDR r2, [r3] // Pickup time-slice
+ CMP r2, #0 // Is it non-active?
+ BEQ __tx_timer_no_time_slice // Yes, skip time-slice processing
+
+ /* Decrement the time_slice. */
+
+ SUB r2, r2, #1 // Decrement the time-slice
+ STR r2, [r3] // Store new time-slice value
+
+ /* Check for expiration. */
+
+ CMP r2, #0 // Has it expired?
+ BNE __tx_timer_no_time_slice // No, skip expiration processing
+
+ /* Set the time-slice expired flag. */
+
+ LDR r3, =_tx_timer_expired_time_slice // Pickup address of expired flag
+ MOV r0, #1 // Build expired value
+ STR r0, [r3] // Set time-slice expiration flag
+
+__tx_timer_no_time_slice:
+
+ /* Test for timer expiration. */
+
+ LDR r1, =_tx_timer_current_ptr // Pickup current timer pointer address
+ LDR r0, [r1] // Pickup current timer
+ LDR r2, [r0] // Pickup timer list entry
+ CMP r2, #0 // Is there anything in the list?
+ BEQ __tx_timer_no_timer // No, just increment the timer
+
+ /* Set expiration flag. */
+
+ LDR r3, =_tx_timer_expired // Pickup expiration flag address
+ MOV r2, #1 // Build expired value
+ STR r2, [r3] // Set expired flag
+ B __tx_timer_done // Finished timer processing
+
+__tx_timer_no_timer:
+
+ /* No timer expired, increment the timer pointer. */
+ ADD r0, r0, #4 // Move to next timer
+
+ /* Check for wraparound. */
+
+ LDR r3, =_tx_timer_list_end // Pickup address of timer list end
+ LDR r2, [r3] // Pickup list end
+ CMP r0, r2 // Are we at list end?
+ BNE __tx_timer_skip_wrap // No, skip wraparound logic
+
+ /* Wrap to beginning of list. */
+
+ LDR r3, =_tx_timer_list_start // Pickup address of timer list start
+ LDR r0, [r3] // Set current pointer to list start
+
+__tx_timer_skip_wrap:
+
+ STR r0, [r1] // Store new current timer pointer
+
+__tx_timer_done:
+
+ /* See if anything has expired. */
+
+ LDR r3, =_tx_timer_expired_time_slice // Pickup address of expired flag
+ LDR r2, [r3] // Pickup time-slice expired flag
+ CMP r2, #0 // Did a time-slice expire?
+ BNE __tx_something_expired // If non-zero, time-slice expired
+ LDR r1, =_tx_timer_expired // Pickup address of other expired flag
+ LDR r0, [r1] // Pickup timer expired flag
+ CMP r0, #0 // Did a timer expire?
+ BEQ __tx_timer_nothing_expired // No, nothing expired
+
+__tx_something_expired:
+
+ STMDB sp!, {r0, lr} // Save the lr register on the stack
+ // and save r0 just to keep 8-byte alignment
+
+ /* Did a timer expire? */
+
+ LDR r1, =_tx_timer_expired // Pickup address of expired flag
+ LDR r0, [r1] // Pickup timer expired flag
+ CMP r0, #0 // Check for timer expiration
+ BEQ __tx_timer_dont_activate // If not set, skip timer activation
+
+ /* Process timer expiration. */
+ BL _tx_timer_expiration_process // Call the timer expiration handling routine
+
+__tx_timer_dont_activate:
+
+ /* Did time slice expire? */
+
+ LDR r3, =_tx_timer_expired_time_slice // Pickup address of time-slice expired
+ LDR r2, [r3] // Pickup the actual flag
+ CMP r2, #0 // See if the flag is set
+ BEQ __tx_timer_not_ts_expiration // No, skip time-slice processing
+
+ /* Time slice interrupted thread. */
+
+ BL _tx_thread_time_slice // Call time-slice processing
+
+__tx_timer_not_ts_expiration:
+
+ LDMIA sp!, {r0, lr} // Recover lr register (r0 is just there for
+ // the 8-byte stack alignment
+
+__tx_timer_nothing_expired:
+
+#ifdef __THUMB_INTERWORK
+ BX lr // Return to caller
+#else
+ MOV pc, lr // Return to caller
+#endif
diff --git a/ports/cortex_a7/gnu/example_build/build_threadx_sample.bat b/ports/cortex_a7/gnu/example_build/build_threadx_sample.bat
index 561bab52..d0378cb8 100644
--- a/ports/cortex_a7/gnu/example_build/build_threadx_sample.bat
+++ b/ports/cortex_a7/gnu/example_build/build_threadx_sample.bat
@@ -2,5 +2,7 @@ arm-none-eabi-gcc -c -g -mcpu=cortex-a7 reset.S
arm-none-eabi-gcc -c -g -mcpu=cortex-a7 crt0.S
arm-none-eabi-gcc -c -g -mcpu=cortex-a7 tx_initialize_low_level.S
arm-none-eabi-gcc -c -g -mcpu=cortex-a7 -I../../../../common/inc -I../inc sample_threadx.c
-arm-none-eabi-ld -A cortex-a7 -T sample_threadx.ld reset.o crt0.o tx_initialize_low_level.o sample_threadx.o tx.a libc.a libgcc.a -o sample_threadx.out -M > sample_threadx.map
+arm-none-eabi-gcc -g -mcpu=cortex-a7 -T sample_threadx.ld --specs=nosys.specs -o sample_threadx.out -Wl,-Map=sample_threadx.map tx_initialize_low_level.o sample_threadx.o tx.a
+
+
diff --git a/ports/cortex_a7/gnu/example_build/crt0.S b/ports/cortex_a7/gnu/example_build/crt0.S
index aa0f3239..56b6c958 100644
--- a/ports/cortex_a7/gnu/example_build/crt0.S
+++ b/ports/cortex_a7/gnu/example_build/crt0.S
@@ -26,13 +26,13 @@ _mainCRTStartup:
mov a2, #0 /* Second arg: fill value */
mov fp, a2 /* Null frame pointer */
mov r7, a2 /* Null frame pointer for Thumb */
-
- ldr a1, .LC1 /* First arg: start of memory block */
- ldr a3, .LC2
- sub a3, a3, a1 /* Third arg: length of block */
-
-
+ ldr a1, .LC1 /* First arg: start of memory block */
+ ldr a3, .LC2
+ sub a3, a3, a1 /* Third arg: length of block */
+
+
+
bl memset
mov r0, #0 /* no arguments */
mov r1, #0 /* no argv either */
@@ -48,15 +48,15 @@ _mainCRTStartup:
/* bl init */
mov r0, r4
mov r1, r5
-#endif
+#endif
bl main
bl exit /* Should not return. */
-
- /* For Thumb, constants must be after the code since only
+
+ /* For Thumb, constants must be after the code since only
positive offsets are supported for PC relative addresses. */
-
+
.align 0
.LC0:
.LC1:
diff --git a/ports/cortex_a7/gnu/example_build/reset.S b/ports/cortex_a7/gnu/example_build/reset.S
index 856e31eb..597e9d9a 100644
--- a/ports/cortex_a7/gnu/example_build/reset.S
+++ b/ports/cortex_a7/gnu/example_build/reset.S
@@ -1,35 +1,24 @@
-@/**************************************************************************/
-@/* */
-@/* Copyright (c) Microsoft Corporation. All rights reserved. */
-@/* */
-@/* This software is licensed under the Microsoft Software License */
-@/* Terms for Microsoft Azure RTOS. Full text of the license can be */
-@/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
-@/* and in the root directory of this software. */
-@/* */
-@/**************************************************************************/
-@
-@
-@/**************************************************************************/
-@/**************************************************************************/
-@/** */
-@/** ThreadX Component */
-@/** */
-@/** Initialize */
-@/** */
-@/**************************************************************************/
-@/**************************************************************************/
-@
-@
-@#define TX_SOURCE_CODE
-@
-@
-@/* Include necessary system files. */
-@
-@#include "tx_api.h"
-@#include "tx_initialize.h"
-@#include "tx_thread.h"
-@#include "tx_timer.h"
+/**************************************************************************/
+/* */
+/* Copyright (c) Microsoft Corporation. All rights reserved. */
+/* */
+/* This software is licensed under the Microsoft Software License */
+/* Terms for Microsoft Azure RTOS. Full text of the license can be */
+/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
+/* and in the root directory of this software. */
+/* */
+/**************************************************************************/
+
+
+/**************************************************************************/
+/**************************************************************************/
+/** */
+/** ThreadX Component */
+/** */
+/** Initialize */
+/** */
+/**************************************************************************/
+/**************************************************************************/
.arm
@@ -41,36 +30,35 @@
.global __tx_reserved_handler
.global __tx_irq_handler
.global __tx_fiq_handler
-@
-@
-@/* Define the vector area. This should be located or copied to 0. */
-@
+
+/* Define the vector area. This should be located or copied to 0. */
+
.text
.global __vectors
__vectors:
- LDR pc, STARTUP @ Reset goes to startup function
- LDR pc, UNDEFINED @ Undefined handler
- LDR pc, SWI @ Software interrupt handler
- LDR pc, PREFETCH @ Prefetch exception handler
- LDR pc, ABORT @ Abort exception handler
- LDR pc, RESERVED @ Reserved exception handler
- LDR pc, IRQ @ IRQ interrupt handler
- LDR pc, FIQ @ FIQ interrupt handler
+ LDR pc, STARTUP // Reset goes to startup function
+ LDR pc, UNDEFINED // Undefined handler
+ LDR pc, SWI // Software interrupt handler
+ LDR pc, PREFETCH // Prefetch exception handler
+ LDR pc, ABORT // Abort exception handler
+ LDR pc, RESERVED // Reserved exception handler
+ LDR pc, IRQ // IRQ interrupt handler
+ LDR pc, FIQ // FIQ interrupt handler
STARTUP:
- .word _start @ Reset goes to C startup function
+ .word _start // Reset goes to C startup function
UNDEFINED:
- .word __tx_undefined @ Undefined handler
+ .word __tx_undefined // Undefined handler
SWI:
- .word __tx_swi_interrupt @ Software interrupt handler
+ .word __tx_swi_interrupt // Software interrupt handler
PREFETCH:
- .word __tx_prefetch_handler @ Prefetch exception handler
-ABORT:
- .word __tx_abort_handler @ Abort exception handler
-RESERVED:
- .word __tx_reserved_handler @ Reserved exception handler
-IRQ:
- .word __tx_irq_handler @ IRQ interrupt handler
+ .word __tx_prefetch_handler // Prefetch exception handler
+ABORT:
+ .word __tx_abort_handler // Abort exception handler
+RESERVED:
+ .word __tx_reserved_handler // Reserved exception handler
+IRQ:
+ .word __tx_irq_handler // IRQ interrupt handler
FIQ:
- .word __tx_fiq_handler @ FIQ interrupt handler
+ .word __tx_fiq_handler // FIQ interrupt handler
diff --git a/ports/cortex_a7/gnu/example_build/sample_threadx.c b/ports/cortex_a7/gnu/example_build/sample_threadx.c
index 418ec634..8c61de06 100644
--- a/ports/cortex_a7/gnu/example_build/sample_threadx.c
+++ b/ports/cortex_a7/gnu/example_build/sample_threadx.c
@@ -1,5 +1,5 @@
/* This is a small demo of the high-performance ThreadX kernel. It includes examples of eight
- threads of different priorities, using a message queue, semaphore, mutex, event flags group,
+ threads of different priorities, using a message queue, semaphore, mutex, event flags group,
byte pool, and block pool. */
#include "tx_api.h"
@@ -80,42 +80,42 @@ CHAR *pointer = TX_NULL;
tx_byte_allocate(&byte_pool_0, (VOID **) &pointer, DEMO_STACK_SIZE, TX_NO_WAIT);
/* Create the main thread. */
- tx_thread_create(&thread_0, "thread 0", thread_0_entry, 0,
- pointer, DEMO_STACK_SIZE,
+ tx_thread_create(&thread_0, "thread 0", thread_0_entry, 0,
+ pointer, DEMO_STACK_SIZE,
1, 1, TX_NO_TIME_SLICE, TX_AUTO_START);
/* Allocate the stack for thread 1. */
tx_byte_allocate(&byte_pool_0, (VOID **) &pointer, DEMO_STACK_SIZE, TX_NO_WAIT);
- /* Create threads 1 and 2. These threads pass information through a ThreadX
+ /* Create threads 1 and 2. These threads pass information through a ThreadX
message queue. It is also interesting to note that these threads have a time
slice. */
- tx_thread_create(&thread_1, "thread 1", thread_1_entry, 1,
- pointer, DEMO_STACK_SIZE,
+ tx_thread_create(&thread_1, "thread 1", thread_1_entry, 1,
+ pointer, DEMO_STACK_SIZE,
16, 16, 4, TX_AUTO_START);
/* Allocate the stack for thread 2. */
tx_byte_allocate(&byte_pool_0, (VOID **) &pointer, DEMO_STACK_SIZE, TX_NO_WAIT);
- tx_thread_create(&thread_2, "thread 2", thread_2_entry, 2,
- pointer, DEMO_STACK_SIZE,
+ tx_thread_create(&thread_2, "thread 2", thread_2_entry, 2,
+ pointer, DEMO_STACK_SIZE,
16, 16, 4, TX_AUTO_START);
/* Allocate the stack for thread 3. */
tx_byte_allocate(&byte_pool_0, (VOID **) &pointer, DEMO_STACK_SIZE, TX_NO_WAIT);
- /* Create threads 3 and 4. These threads compete for a ThreadX counting semaphore.
+ /* Create threads 3 and 4. These threads compete for a ThreadX counting semaphore.
An interesting thing here is that both threads share the same instruction area. */
- tx_thread_create(&thread_3, "thread 3", thread_3_and_4_entry, 3,
- pointer, DEMO_STACK_SIZE,
+ tx_thread_create(&thread_3, "thread 3", thread_3_and_4_entry, 3,
+ pointer, DEMO_STACK_SIZE,
8, 8, TX_NO_TIME_SLICE, TX_AUTO_START);
/* Allocate the stack for thread 4. */
tx_byte_allocate(&byte_pool_0, (VOID **) &pointer, DEMO_STACK_SIZE, TX_NO_WAIT);
- tx_thread_create(&thread_4, "thread 4", thread_3_and_4_entry, 4,
- pointer, DEMO_STACK_SIZE,
+ tx_thread_create(&thread_4, "thread 4", thread_3_and_4_entry, 4,
+ pointer, DEMO_STACK_SIZE,
8, 8, TX_NO_TIME_SLICE, TX_AUTO_START);
/* Allocate the stack for thread 5. */
@@ -123,23 +123,23 @@ CHAR *pointer = TX_NULL;
/* Create thread 5. This thread simply pends on an event flag which will be set
by thread_0. */
- tx_thread_create(&thread_5, "thread 5", thread_5_entry, 5,
- pointer, DEMO_STACK_SIZE,
+ tx_thread_create(&thread_5, "thread 5", thread_5_entry, 5,
+ pointer, DEMO_STACK_SIZE,
4, 4, TX_NO_TIME_SLICE, TX_AUTO_START);
/* Allocate the stack for thread 6. */
tx_byte_allocate(&byte_pool_0, (VOID **) &pointer, DEMO_STACK_SIZE, TX_NO_WAIT);
/* Create threads 6 and 7. These threads compete for a ThreadX mutex. */
- tx_thread_create(&thread_6, "thread 6", thread_6_and_7_entry, 6,
- pointer, DEMO_STACK_SIZE,
+ tx_thread_create(&thread_6, "thread 6", thread_6_and_7_entry, 6,
+ pointer, DEMO_STACK_SIZE,
8, 8, TX_NO_TIME_SLICE, TX_AUTO_START);
/* Allocate the stack for thread 7. */
tx_byte_allocate(&byte_pool_0, (VOID **) &pointer, DEMO_STACK_SIZE, TX_NO_WAIT);
- tx_thread_create(&thread_7, "thread 7", thread_6_and_7_entry, 7,
- pointer, DEMO_STACK_SIZE,
+ tx_thread_create(&thread_7, "thread 7", thread_6_and_7_entry, 7,
+ pointer, DEMO_STACK_SIZE,
8, 8, TX_NO_TIME_SLICE, TX_AUTO_START);
/* Allocate the message queue. */
@@ -242,11 +242,11 @@ UINT status;
/* Retrieve a message from the queue. */
status = tx_queue_receive(&queue_0, &received_message, TX_WAIT_FOREVER);
- /* Check completion status and make sure the message is what we
+ /* Check completion status and make sure the message is what we
expected. */
if ((status != TX_SUCCESS) || (received_message != thread_2_messages_received))
break;
-
+
/* Otherwise, all is okay. Increment the received message count. */
thread_2_messages_received++;
}
@@ -305,7 +305,7 @@ ULONG actual_flags;
thread_5_counter++;
/* Wait for event flag 0. */
- status = tx_event_flags_get(&event_flags_0, 0x1, TX_OR_CLEAR,
+ status = tx_event_flags_get(&event_flags_0, 0x1, TX_OR_CLEAR,
&actual_flags, TX_WAIT_FOREVER);
/* Check status. */
@@ -358,7 +358,7 @@ UINT status;
if (status != TX_SUCCESS)
break;
- /* Release the mutex again. This will actually
+ /* Release the mutex again. This will actually
release ownership since it was obtained twice. */
status = tx_mutex_put(&mutex_0);
diff --git a/ports/cortex_a7/gnu/example_build/tx_initialize_low_level.S b/ports/cortex_a7/gnu/example_build/tx_initialize_low_level.S
index 7d2ac9b7..7de5d3ce 100644
--- a/ports/cortex_a7/gnu/example_build/tx_initialize_low_level.S
+++ b/ports/cortex_a7/gnu/example_build/tx_initialize_low_level.S
@@ -1,47 +1,35 @@
-@/**************************************************************************/
-@/* */
-@/* Copyright (c) Microsoft Corporation. All rights reserved. */
-@/* */
-@/* This software is licensed under the Microsoft Software License */
-@/* Terms for Microsoft Azure RTOS. Full text of the license can be */
-@/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
-@/* and in the root directory of this software. */
-@/* */
-@/**************************************************************************/
-@
-@
-@/**************************************************************************/
-@/**************************************************************************/
-@/** */
-@/** ThreadX Component */
-@/** */
-@/** Initialize */
-@/** */
-@/**************************************************************************/
-@/**************************************************************************/
-@
-@
-@#define TX_SOURCE_CODE
-@
-@
-@/* Include necessary system files. */
-@
-@#include "tx_api.h"
-@#include "tx_initialize.h"
-@#include "tx_thread.h"
-@#include "tx_timer.h"
+/**************************************************************************/
+/* */
+/* Copyright (c) Microsoft Corporation. All rights reserved. */
+/* */
+/* This software is licensed under the Microsoft Software License */
+/* Terms for Microsoft Azure RTOS. Full text of the license can be */
+/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
+/* and in the root directory of this software. */
+/* */
+/**************************************************************************/
+
+
+/**************************************************************************/
+/**************************************************************************/
+/** */
+/** ThreadX Component */
+/** */
+/** Initialize */
+/** */
+/**************************************************************************/
+/**************************************************************************/
.arm
-SVC_MODE = 0xD3 @ Disable IRQ/FIQ SVC mode
-IRQ_MODE = 0xD2 @ Disable IRQ/FIQ IRQ mode
-FIQ_MODE = 0xD1 @ Disable IRQ/FIQ FIQ mode
-SYS_MODE = 0xDF @ Disable IRQ/FIQ SYS mode
-FIQ_STACK_SIZE = 512 @ FIQ stack size
-IRQ_STACK_SIZE = 1024 @ IRQ stack size
-SYS_STACK_SIZE = 1024 @ System stack size
-@
-@
+SVC_MODE = 0xD3 // Disable IRQ/FIQ SVC mode
+IRQ_MODE = 0xD2 // Disable IRQ/FIQ IRQ mode
+FIQ_MODE = 0xD1 // Disable IRQ/FIQ FIQ mode
+SYS_MODE = 0xDF // Disable IRQ/FIQ SYS mode
+FIQ_STACK_SIZE = 512 // FIQ stack size
+IRQ_STACK_SIZE = 1024 // IRQ stack size
+SYS_STACK_SIZE = 1024 // System stack size
+
.global _tx_thread_system_stack_ptr
.global _tx_initialize_unused_memory
.global _tx_thread_context_save
@@ -51,297 +39,267 @@ SYS_STACK_SIZE = 1024 @ System stack size
.global _sp
.global _stack_bottom
-@
-@
-@/* Define the 16-bit Thumb mode veneer for _tx_initialize_low_level for
-@ applications calling this function from to 16-bit Thumb mode. */
-@
+
+/* Define the 16-bit Thumb mode veneer for _tx_initialize_low_level for
+ applications calling this function from to 16-bit Thumb mode. */
+
.text
.align 2
.thumb
.global $_tx_initialize_low_level
.type $_tx_initialize_low_level,function
$_tx_initialize_low_level:
- BX pc @ Switch to 32-bit mode
- NOP @
+ BX pc // Switch to 32-bit mode
+ NOP //
.arm
- STMFD sp!, {lr} @ Save return address
- BL _tx_initialize_low_level @ Call _tx_initialize_low_level function
- LDMFD sp!, {lr} @ Recover saved return address
- BX lr @ Return to 16-bit caller
-@
-@
+ STMFD sp!, {lr} // Save return address
+ BL _tx_initialize_low_level // Call _tx_initialize_low_level function
+ LDMFD sp!, {lr} // Recover saved return address
+ BX lr // Return to 16-bit caller
+
.text
.align 2
-@/**************************************************************************/
-@/* */
-@/* FUNCTION RELEASE */
-@/* */
-@/* _tx_initialize_low_level Cortex-A7/GNU */
-@/* 6.1 */
-@/* AUTHOR */
-@/* */
-@/* William E. Lamie, Microsoft Corporation */
-@/* */
-@/* DESCRIPTION */
-@/* */
-@/* This function is responsible for any low-level processor */
-@/* initialization, including setting up interrupt vectors, setting */
-@/* up a periodic timer interrupt source, saving the system stack */
-@/* pointer for use in ISR processing later, and finding the first */
-@/* available RAM memory address for tx_application_define. */
-@/* */
-@/* INPUT */
-@/* */
-@/* None */
-@/* */
-@/* OUTPUT */
-@/* */
-@/* None */
-@/* */
-@/* CALLS */
-@/* */
-@/* None */
-@/* */
-@/* CALLED BY */
-@/* */
-@/* _tx_initialize_kernel_enter ThreadX entry function */
-@/* */
-@/* RELEASE HISTORY */
-@/* */
-@/* DATE NAME DESCRIPTION */
-@/* */
-@/* 09-30-2020 William E. Lamie Initial Version 6.1 */
-@/* */
-@/**************************************************************************/
-@VOID _tx_initialize_low_level(VOID)
-@{
+/**************************************************************************/
+/* */
+/* FUNCTION RELEASE */
+/* */
+/* _tx_initialize_low_level ARMv7-A */
+/* 6.1.11 */
+/* AUTHOR */
+/* */
+/* William E. Lamie, Microsoft Corporation */
+/* */
+/* DESCRIPTION */
+/* */
+/* This function is responsible for any low-level processor */
+/* initialization, including setting up interrupt vectors, setting */
+/* up a periodic timer interrupt source, saving the system stack */
+/* pointer for use in ISR processing later, and finding the first */
+/* available RAM memory address for tx_application_define. */
+/* */
+/* INPUT */
+/* */
+/* None */
+/* */
+/* OUTPUT */
+/* */
+/* None */
+/* */
+/* CALLS */
+/* */
+/* None */
+/* */
+/* CALLED BY */
+/* */
+/* _tx_initialize_kernel_enter ThreadX entry function */
+/* */
+/* RELEASE HISTORY */
+/* */
+/* DATE NAME DESCRIPTION */
+/* */
+/* 09-30-2020 William E. Lamie Initial Version 6.1 */
+/* 04-25-2022 Zhen Kong Updated comments, */
+/* resulting in version 6.1.11 */
+/* */
+/**************************************************************************/
.global _tx_initialize_low_level
.type _tx_initialize_low_level,function
_tx_initialize_low_level:
-@
-@ /* We must be in SVC mode at this point! */
-@
-@ /* Setup various stack pointers. */
-@
- LDR r1, =_sp @ Get pointer to stack area
-#ifdef TX_ENABLE_IRQ_NESTING
-@
-@ /* Setup the system mode stack for nested interrupt support */
-@
- LDR r2, =SYS_STACK_SIZE @ Pickup stack size
- MOV r3, #SYS_MODE @ Build SYS mode CPSR
- MSR CPSR_c, r3 @ Enter SYS mode
- SUB r1, r1, #1 @ Backup 1 byte
- BIC r1, r1, #7 @ Ensure 8-byte alignment
- MOV sp, r1 @ Setup SYS stack pointer
- SUB r1, r1, r2 @ Calculate start of next stack
+ /* We must be in SVC mode at this point! */
+
+ /* Setup various stack pointers. */
+
+ LDR r1, =_sp // Get pointer to stack area
+
+#ifdef TX_ENABLE_IRQ_NESTING
+
+ /* Setup the system mode stack for nested interrupt support */
+
+ LDR r2, =SYS_STACK_SIZE // Pickup stack size
+ MOV r3, #SYS_MODE // Build SYS mode CPSR
+ MSR CPSR_c, r3 // Enter SYS mode
+ SUB r1, r1, #1 // Backup 1 byte
+ BIC r1, r1, #7 // Ensure 8-byte alignment
+ MOV sp, r1 // Setup SYS stack pointer
+ SUB r1, r1, r2 // Calculate start of next stack
#endif
- LDR r2, =FIQ_STACK_SIZE @ Pickup stack size
- MOV r0, #FIQ_MODE @ Build FIQ mode CPSR
- MSR CPSR, r0 @ Enter FIQ mode
- SUB r1, r1, #1 @ Backup 1 byte
- BIC r1, r1, #7 @ Ensure 8-byte alignment
- MOV sp, r1 @ Setup FIQ stack pointer
- SUB r1, r1, r2 @ Calculate start of next stack
- LDR r2, =IRQ_STACK_SIZE @ Pickup IRQ stack size
- MOV r0, #IRQ_MODE @ Build IRQ mode CPSR
- MSR CPSR, r0 @ Enter IRQ mode
- SUB r1, r1, #1 @ Backup 1 byte
- BIC r1, r1, #7 @ Ensure 8-byte alignment
- MOV sp, r1 @ Setup IRQ stack pointer
- SUB r3, r1, r2 @ Calculate end of IRQ stack
- MOV r0, #SVC_MODE @ Build SVC mode CPSR
- MSR CPSR, r0 @ Enter SVC mode
- LDR r2, =_stack_bottom @ Pickup stack bottom
- CMP r3, r2 @ Compare the current stack end with the bottom
-_stack_error_loop:
- BLT _stack_error_loop @ If the IRQ stack exceeds the stack bottom, just sit here!
-@
-@ /* Save the system stack pointer. */
-@ _tx_thread_system_stack_ptr = (VOID_PTR) (sp);
-@
- LDR r2, =_tx_thread_system_stack_ptr @ Pickup stack pointer
- STR r1, [r2] @ Save the system stack
-@
-@ /* Save the first available memory address. */
-@ _tx_initialize_unused_memory = (VOID_PTR) _end;
-@
- LDR r1, =_end @ Get end of non-initialized RAM area
- LDR r2, =_tx_initialize_unused_memory @ Pickup unused memory ptr address
- ADD r1, r1, #8 @ Increment to next free word
- STR r1, [r2] @ Save first free memory address
-@
-@ /* Setup Timer for periodic interrupts. */
-@
-@ /* Done, return to caller. */
-@
+ LDR r2, =FIQ_STACK_SIZE // Pickup stack size
+ MOV r0, #FIQ_MODE // Build FIQ mode CPSR
+ MSR CPSR, r0 // Enter FIQ mode
+ SUB r1, r1, #1 // Backup 1 byte
+ BIC r1, r1, #7 // Ensure 8-byte alignment
+ MOV sp, r1 // Setup FIQ stack pointer
+ SUB r1, r1, r2 // Calculate start of next stack
+ LDR r2, =IRQ_STACK_SIZE // Pickup IRQ stack size
+ MOV r0, #IRQ_MODE // Build IRQ mode CPSR
+ MSR CPSR, r0 // Enter IRQ mode
+ SUB r1, r1, #1 // Backup 1 byte
+ BIC r1, r1, #7 // Ensure 8-byte alignment
+ MOV sp, r1 // Setup IRQ stack pointer
+ SUB r3, r1, r2 // Calculate end of IRQ stack
+ MOV r0, #SVC_MODE // Build SVC mode CPSR
+ MSR CPSR, r0 // Enter SVC mode
+ LDR r2, =_stack_bottom // Pickup stack bottom
+ CMP r3, r2 // Compare the current stack end with the bottom
+_stack_error_loop:
+ BLT _stack_error_loop // If the IRQ stack exceeds the stack bottom, just sit here!
+
+ LDR r2, =_tx_thread_system_stack_ptr // Pickup stack pointer
+ STR r1, [r2] // Save the system stack
+
+ LDR r1, =_end // Get end of non-initialized RAM area
+ LDR r2, =_tx_initialize_unused_memory // Pickup unused memory ptr address
+ ADD r1, r1, #8 // Increment to next free word
+ STR r1, [r2] // Save first free memory address
+
#ifdef __THUMB_INTERWORK
- BX lr @ Return to caller
+ BX lr // Return to caller
#else
- MOV pc, lr @ Return to caller
+ MOV pc, lr // Return to caller
#endif
-@}
-@
-@
-@/* Define shells for each of the interrupt vectors. */
-@
+
+/* Define shells for each of the interrupt vectors. */
+
.global __tx_undefined
__tx_undefined:
- B __tx_undefined @ Undefined handler
-@
+ B __tx_undefined // Undefined handler
+
.global __tx_swi_interrupt
__tx_swi_interrupt:
- B __tx_swi_interrupt @ Software interrupt handler
-@
+ B __tx_swi_interrupt // Software interrupt handler
+
.global __tx_prefetch_handler
__tx_prefetch_handler:
- B __tx_prefetch_handler @ Prefetch exception handler
-@
+ B __tx_prefetch_handler // Prefetch exception handler
+
.global __tx_abort_handler
__tx_abort_handler:
- B __tx_abort_handler @ Abort exception handler
-@
+ B __tx_abort_handler // Abort exception handler
+
.global __tx_reserved_handler
__tx_reserved_handler:
- B __tx_reserved_handler @ Reserved exception handler
-@
+ B __tx_reserved_handler // Reserved exception handler
+
.global __tx_irq_handler
- .global __tx_irq_processing_return
+ .global __tx_irq_processing_return
__tx_irq_handler:
-@
-@ /* Jump to context save to save system context. */
+
+ /* Jump to context save to save system context. */
B _tx_thread_context_save
__tx_irq_processing_return:
-@
-@ /* At this point execution is still in the IRQ mode. The CPSR, point of
-@ interrupt, and all C scratch registers are available for use. In
-@ addition, IRQ interrupts may be re-enabled - with certain restrictions -
-@ if nested IRQ interrupts are desired. Interrupts may be re-enabled over
-@ small code sequences where lr is saved before enabling interrupts and
-@ restored after interrupts are again disabled. */
-@
-@ /* Interrupt nesting is allowed after calling _tx_thread_irq_nesting_start
-@ from IRQ mode with interrupts disabled. This routine switches to the
-@ system mode and returns with IRQ interrupts enabled.
-@
-@ NOTE: It is very important to ensure all IRQ interrupts are cleared
-@ prior to enabling nested IRQ interrupts. */
+//
+ /* At this point execution is still in the IRQ mode. The CPSR, point of
+ interrupt, and all C scratch registers are available for use. In
+ addition, IRQ interrupts may be re-enabled - with certain restrictions -
+ if nested IRQ interrupts are desired. Interrupts may be re-enabled over
+ small code sequences where lr is saved before enabling interrupts and
+ restored after interrupts are again disabled. */
+
+ /* Interrupt nesting is allowed after calling _tx_thread_irq_nesting_start
+ from IRQ mode with interrupts disabled. This routine switches to the
+ system mode and returns with IRQ interrupts enabled.
+
+ NOTE: It is very important to ensure all IRQ interrupts are cleared
+ prior to enabling nested IRQ interrupts. */
#ifdef TX_ENABLE_IRQ_NESTING
BL _tx_thread_irq_nesting_start
#endif
-@
-@ /* For debug purpose, execute the timer interrupt processing here. In
-@ a real system, some kind of status indication would have to be checked
-@ before the timer interrupt handler could be called. */
-@
- BL _tx_timer_interrupt @ Timer interrupt handler
-@
-@
-@ /* If interrupt nesting was started earlier, the end of interrupt nesting
-@ service must be called before returning to _tx_thread_context_restore.
-@ This routine returns in processing in IRQ mode with interrupts disabled. */
+
+ /* For debug purpose, execute the timer interrupt processing here. In
+ a real system, some kind of status indication would have to be checked
+ before the timer interrupt handler could be called. */
+
+ BL _tx_timer_interrupt // Timer interrupt handler
+
+
+ /* If interrupt nesting was started earlier, the end of interrupt nesting
+ service must be called before returning to _tx_thread_context_restore.
+ This routine returns in processing in IRQ mode with interrupts disabled. */
#ifdef TX_ENABLE_IRQ_NESTING
BL _tx_thread_irq_nesting_end
#endif
-@
-@ /* Jump to context restore to restore system context. */
+
+ /* Jump to context restore to restore system context. */
B _tx_thread_context_restore
-@
-@
-@ /* This is an example of a vectored IRQ handler. */
-@
-@ .global __tx_example_vectored_irq_handler
-@__tx_example_vectored_irq_handler:
-@
-@
-@ /* Save initial context and call context save to prepare for
-@ vectored ISR execution. */
-@
-@ STMDB sp!, {r0-r3} @ Save some scratch registers
-@ MRS r0, SPSR @ Pickup saved SPSR
-@ SUB lr, lr, #4 @ Adjust point of interrupt
-@ STMDB sp!, {r0, r10, r12, lr} @ Store other scratch registers
-@ BL _tx_thread_vectored_context_save @ Vectored context save
-@
-@ /* At this point execution is still in the IRQ mode. The CPSR, point of
-@ interrupt, and all C scratch registers are available for use. In
-@ addition, IRQ interrupts may be re-enabled - with certain restrictions -
-@ if nested IRQ interrupts are desired. Interrupts may be re-enabled over
-@ small code sequences where lr is saved before enabling interrupts and
-@ restored after interrupts are again disabled. */
-@
-@
-@ /* Interrupt nesting is allowed after calling _tx_thread_irq_nesting_start
-@ from IRQ mode with interrupts disabled. This routine switches to the
-@ system mode and returns with IRQ interrupts enabled.
-@
-@ NOTE: It is very important to ensure all IRQ interrupts are cleared
-@ prior to enabling nested IRQ interrupts. */
-@#ifdef TX_ENABLE_IRQ_NESTING
-@ BL _tx_thread_irq_nesting_start
-@#endif
-@
-@ /* Application IRQ handlers can be called here! */
-@
-@ /* If interrupt nesting was started earlier, the end of interrupt nesting
-@ service must be called before returning to _tx_thread_context_restore.
-@ This routine returns in processing in IRQ mode with interrupts disabled. */
-@#ifdef TX_ENABLE_IRQ_NESTING
-@ BL _tx_thread_irq_nesting_end
-@#endif
-@
-@ /* Jump to context restore to restore system context. */
-@ B _tx_thread_context_restore
-@
-@
+
+
+ /* This is an example of a vectored IRQ handler. */
+
+
+
+ /* Save initial context and call context save to prepare for
+ vectored ISR execution. */
+
+ /* At this point execution is still in the IRQ mode. The CPSR, point of
+ interrupt, and all C scratch registers are available for use. In
+ addition, IRQ interrupts may be re-enabled - with certain restrictions -
+ if nested IRQ interrupts are desired. Interrupts may be re-enabled over
+ small code sequences where lr is saved before enabling interrupts and
+ restored after interrupts are again disabled. */
+
+
+ /* Interrupt nesting is allowed after calling _tx_thread_irq_nesting_start
+ from IRQ mode with interrupts disabled. This routine switches to the
+ system mode and returns with IRQ interrupts enabled.
+
+ NOTE: It is very important to ensure all IRQ interrupts are cleared
+ prior to enabling nested IRQ interrupts. */
+
+ /* Application IRQ handlers can be called here! */
+
+ /* If interrupt nesting was started earlier, the end of interrupt nesting
+ service must be called before returning to _tx_thread_context_restore.
+ This routine returns in processing in IRQ mode with interrupts disabled. */
+
+
+
#ifdef TX_ENABLE_FIQ_SUPPORT
.global __tx_fiq_handler
.global __tx_fiq_processing_return
__tx_fiq_handler:
-@
-@ /* Jump to fiq context save to save system context. */
+
+ /* Jump to fiq context save to save system context. */
B _tx_thread_fiq_context_save
__tx_fiq_processing_return:
-@
-@ /* At this point execution is still in the FIQ mode. The CPSR, point of
-@ interrupt, and all C scratch registers are available for use. */
-@
-@ /* Interrupt nesting is allowed after calling _tx_thread_fiq_nesting_start
-@ from FIQ mode with interrupts disabled. This routine switches to the
-@ system mode and returns with FIQ interrupts enabled.
-@
-@ NOTE: It is very important to ensure all FIQ interrupts are cleared
-@ prior to enabling nested FIQ interrupts. */
+
+ /* At this point execution is still in the FIQ mode. The CPSR, point of
+ interrupt, and all C scratch registers are available for use. */
+
+ /* Interrupt nesting is allowed after calling _tx_thread_fiq_nesting_start
+ from FIQ mode with interrupts disabled. This routine switches to the
+ system mode and returns with FIQ interrupts enabled.
+
+ NOTE: It is very important to ensure all FIQ interrupts are cleared
+ prior to enabling nested FIQ interrupts. */
#ifdef TX_ENABLE_FIQ_NESTING
BL _tx_thread_fiq_nesting_start
#endif
-@
-@ /* Application FIQ handlers can be called here! */
-@
-@ /* If interrupt nesting was started earlier, the end of interrupt nesting
-@ service must be called before returning to _tx_thread_fiq_context_restore. */
+
+ /* Application FIQ handlers can be called here! */
+
+ /* If interrupt nesting was started earlier, the end of interrupt nesting
+ service must be called before returning to _tx_thread_fiq_context_restore. */
#ifdef TX_ENABLE_FIQ_NESTING
BL _tx_thread_fiq_nesting_end
#endif
-@
-@ /* Jump to fiq context restore to restore system context. */
+
+ /* Jump to fiq context restore to restore system context. */
B _tx_thread_fiq_context_restore
-@
-@
+
+
#else
.global __tx_fiq_handler
__tx_fiq_handler:
- B __tx_fiq_handler @ FIQ interrupt handler
+ B __tx_fiq_handler // FIQ interrupt handler
#endif
-@
-@
+
+
BUILD_OPTIONS:
- .word _tx_build_options @ Reference to bring in
+ .word _tx_build_options // Reference to bring in
VERSION_ID:
- .word _tx_version_id @ Reference to bring in
+ .word _tx_version_id // Reference to bring in
diff --git a/ports/cortex_a7/gnu/inc/tx_port.h b/ports/cortex_a7/gnu/inc/tx_port.h
index b8c47dc6..19463de1 100644
--- a/ports/cortex_a7/gnu/inc/tx_port.h
+++ b/ports/cortex_a7/gnu/inc/tx_port.h
@@ -12,7 +12,7 @@
/**************************************************************************/
/**************************************************************************/
-/** */
+/** */
/** ThreadX Component */
/** */
/** Port Specific */
@@ -21,36 +21,38 @@
/**************************************************************************/
-/**************************************************************************/
-/* */
-/* PORT SPECIFIC C INFORMATION RELEASE */
-/* */
-/* tx_port.h Cortex-A7/GNU */
-/* 6.1.6 */
+/**************************************************************************/
+/* */
+/* PORT SPECIFIC C INFORMATION RELEASE */
+/* */
+/* tx_port.h ARMv7-A */
+/* 6.1.11 */
/* */
/* AUTHOR */
/* */
/* William E. Lamie, Microsoft Corporation */
/* */
/* DESCRIPTION */
-/* */
-/* This file contains data type definitions that make the ThreadX */
-/* real-time kernel function identically on a variety of different */
-/* processor architectures. For example, the size or number of bits */
-/* in an "int" data type vary between microprocessor architectures and */
-/* even C compilers for the same microprocessor. ThreadX does not */
-/* directly use native C data types. Instead, ThreadX creates its */
-/* own special types that can be mapped to actual data types by this */
-/* file to guarantee consistency in the interface and functionality. */
-/* */
-/* RELEASE HISTORY */
-/* */
+/* */
+/* This file contains data type definitions that make the ThreadX */
+/* real-time kernel function identically on a variety of different */
+/* processor architectures. For example, the size or number of bits */
+/* in an "int" data type vary between microprocessor architectures and */
+/* even C compilers for the same microprocessor. ThreadX does not */
+/* directly use native C data types. Instead, ThreadX creates its */
+/* own special types that can be mapped to actual data types by this */
+/* file to guarantee consistency in the interface and functionality. */
+/* */
+/* RELEASE HISTORY */
+/* */
/* DATE NAME DESCRIPTION */
/* */
/* 09-30-2020 William E. Lamie Initial Version 6.1 */
/* 04-02-2021 Bhupendra Naphade Modified comment(s),updated */
/* macro definition, */
/* resulting in version 6.1.6 */
+/* 04-25-2022 Zhen Kong Updated comments, */
+/* resulting in version 6.1.11 */
/* */
/**************************************************************************/
@@ -63,7 +65,7 @@
#ifdef TX_INCLUDE_USER_DEFINE_FILE
-/* Yes, include the user defines in tx_user.h. The defines in this file may
+/* Yes, include the user defines in tx_user.h. The defines in this file may
alternately be defined on the command line. */
#include "tx_user.h"
@@ -76,7 +78,7 @@
#include
-/* Define ThreadX basic types for this port. */
+/* Define ThreadX basic types for this port. */
#define VOID void
typedef char CHAR;
@@ -112,12 +114,12 @@ typedef unsigned short USHORT;
#define TX_TIMER_THREAD_STACK_SIZE 1024 /* Default timer thread stack size */
#endif
-#ifndef TX_TIMER_THREAD_PRIORITY
-#define TX_TIMER_THREAD_PRIORITY 0 /* Default timer thread priority */
+#ifndef TX_TIMER_THREAD_PRIORITY
+#define TX_TIMER_THREAD_PRIORITY 0 /* Default timer thread priority */
#endif
-/* Define various constants for the ThreadX ARM port. */
+/* Define various constants for the ThreadX ARM port. */
#ifdef TX_ENABLE_FIQ_SUPPORT
#define TX_INT_DISABLE 0xC0 /* Disable IRQ & FIQ interrupts */
@@ -127,8 +129,8 @@ typedef unsigned short USHORT;
#define TX_INT_ENABLE 0x00 /* Enable IRQ interrupts */
-/* Define the clock source for trace event entry time stamp. The following two item are port specific.
- For example, if the time source is at the address 0x0a800024 and is 16-bits in size, the clock
+/* Define the clock source for trace event entry time stamp. The following two item are port specific.
+ For example, if the time source is at the address 0x0a800024 and is 16-bits in size, the clock
source constants would be:
#define TX_TRACE_TIME_SOURCE *((ULONG *) 0x0a800024)
@@ -175,7 +177,7 @@ typedef unsigned short USHORT;
#define TX_INLINE_INITIALIZATION
-/* Determine whether or not stack checking is enabled. By default, ThreadX stack checking is
+/* Determine whether or not stack checking is enabled. By default, ThreadX stack checking is
disabled. When the following is defined, ThreadX thread stack checking is enabled. If stack
checking is enabled (TX_ENABLE_STACK_CHECKING is defined), the TX_DISABLE_STACK_FILLING
define is negated, thereby forcing the stack fill which is necessary for the stack checking
@@ -187,13 +189,13 @@ typedef unsigned short USHORT;
/* Define the TX_THREAD control block extensions for this port. The main reason
- for the multiple macros is so that backward compatibility can be maintained with
+ for the multiple macros is so that backward compatibility can be maintained with
existing ThreadX kernel awareness modules. */
-#define TX_THREAD_EXTENSION_0
-#define TX_THREAD_EXTENSION_1
+#define TX_THREAD_EXTENSION_0
+#define TX_THREAD_EXTENSION_1
#define TX_THREAD_EXTENSION_2 ULONG tx_thread_vfp_enable;
-#define TX_THREAD_EXTENSION_3
+#define TX_THREAD_EXTENSION_3
/* Define the port extensions of the remaining ThreadX objects. */
@@ -207,11 +209,11 @@ typedef unsigned short USHORT;
#define TX_TIMER_EXTENSION
-/* Define the user extension field of the thread control block. Nothing
+/* Define the user extension field of the thread control block. Nothing
additional is needed for this port so it is defined as white space. */
#ifndef TX_THREAD_USER_EXTENSION
-#define TX_THREAD_USER_EXTENSION
+#define TX_THREAD_USER_EXTENSION
#endif
@@ -219,8 +221,8 @@ typedef unsigned short USHORT;
tx_thread_shell_entry, and tx_thread_terminate. */
-#define TX_THREAD_CREATE_EXTENSION(thread_ptr)
-#define TX_THREAD_DELETE_EXTENSION(thread_ptr)
+#define TX_THREAD_CREATE_EXTENSION(thread_ptr)
+#define TX_THREAD_DELETE_EXTENSION(thread_ptr)
#define TX_THREAD_COMPLETED_EXTENSION(thread_ptr)
#define TX_THREAD_TERMINATED_EXTENSION(thread_ptr)
@@ -247,24 +249,24 @@ typedef unsigned short USHORT;
#define TX_TIMER_DELETE_EXTENSION(timer_ptr)
-/* Determine if the ARM architecture has the CLZ instruction. This is available on
- architectures v5 and above. If available, redefine the macro for calculating the
+/* Determine if the ARM architecture has the CLZ instruction. This is available on
+ architectures v5 and above. If available, redefine the macro for calculating the
lowest bit set. */
-
+
#if __TARGET_ARCH_ARM > 4
#ifndef __thumb__
#define TX_LOWEST_SET_BIT_CALCULATE(m, b) m = m & ((ULONG) (-((LONG) m))); \
asm volatile (" CLZ %0,%1 ": "=r" (b) : "r" (m) ); \
- b = 31 - b;
+ b = 31 - b;
#endif
#endif
-/* Define ThreadX interrupt lockout and restore macros for protection on
- access of critical kernel information. The restore interrupt macro must
- restore the interrupt posture of the running thread prior to the value
+/* Define ThreadX interrupt lockout and restore macros for protection on
+ access of critical kernel information. The restore interrupt macro must
+ restore the interrupt posture of the running thread prior to the value
present prior to the disable macro. In most cases, the save area macro
is used to define a local function save area for the disable and restore
macros. */
@@ -295,7 +297,7 @@ unsigned int _tx_thread_interrupt_restore(UINT old_posture);
#endif
-/* Define VFP extension for the Cortex-A7. Each is assumed to be called in the context of the executing
+/* Define VFP extension for the ARMv7-A. Each is assumed to be called in the context of the executing
thread. */
void tx_thread_vfp_enable(void);
@@ -315,8 +317,8 @@ void tx_thread_vfp_disable(void);
/* Define the version ID of ThreadX. This may be utilized by the application. */
#ifdef TX_THREAD_INIT
-CHAR _tx_version_id[] =
- "Copyright (c) Microsoft Corporation. All rights reserved. * ThreadX Cortex-A7/GNU Version 6.1.9 *";
+CHAR _tx_version_id[] =
+ "Copyright (c) Microsoft Corporation. All rights reserved. * ThreadX ARMv7-A Version 6.1.11 *";
#else
extern CHAR _tx_version_id[];
#endif
diff --git a/ports/cortex_a7/gnu/src/tx_thread_context_restore.S b/ports/cortex_a7/gnu/src/tx_thread_context_restore.S
index 5139d8ca..fae7e72d 100644
--- a/ports/cortex_a7/gnu/src/tx_thread_context_restore.S
+++ b/ports/cortex_a7/gnu/src/tx_thread_context_restore.S
@@ -1,260 +1,222 @@
-@/**************************************************************************/
-@/* */
-@/* Copyright (c) Microsoft Corporation. All rights reserved. */
-@/* */
-@/* This software is licensed under the Microsoft Software License */
-@/* Terms for Microsoft Azure RTOS. Full text of the license can be */
-@/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
-@/* and in the root directory of this software. */
-@/* */
-@/**************************************************************************/
-@
-@
-@/**************************************************************************/
-@/**************************************************************************/
-@/** */
-@/** ThreadX Component */
-@/** */
-@/** Thread */
-@/** */
-@/**************************************************************************/
-@/**************************************************************************/
-@
-@
-@#define TX_SOURCE_CODE
-@
-@
-@/* Include necessary system files. */
-@
-@#include "tx_api.h"
-@#include "tx_thread.h"
-@#include "tx_timer.h"
-@
+/**************************************************************************/
+/* */
+/* Copyright (c) Microsoft Corporation. All rights reserved. */
+/* */
+/* This software is licensed under the Microsoft Software License */
+/* Terms for Microsoft Azure RTOS. Full text of the license can be */
+/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
+/* and in the root directory of this software. */
+/* */
+/**************************************************************************/
+
+
+/**************************************************************************/
+/**************************************************************************/
+/** */
+/** ThreadX Component */
+/** */
+/** Thread */
+/** */
+/**************************************************************************/
+/**************************************************************************/
+
.arm
#ifdef TX_ENABLE_FIQ_SUPPORT
-SVC_MODE = 0xD3 @ Disable IRQ/FIQ, SVC mode
-IRQ_MODE = 0xD2 @ Disable IRQ/FIQ, IRQ mode
+SVC_MODE = 0xD3 // Disable IRQ/FIQ, SVC mode
+IRQ_MODE = 0xD2 // Disable IRQ/FIQ, IRQ mode
#else
-SVC_MODE = 0x93 @ Disable IRQ, SVC mode
-IRQ_MODE = 0x92 @ Disable IRQ, IRQ mode
+SVC_MODE = 0x93 // Disable IRQ, SVC mode
+IRQ_MODE = 0x92 // Disable IRQ, IRQ mode
#endif
-@
+
.global _tx_thread_system_state
.global _tx_thread_current_ptr
.global _tx_thread_execute_ptr
.global _tx_timer_time_slice
.global _tx_thread_schedule
.global _tx_thread_preempt_disable
- .global _tx_execution_isr_exit
-@
-@
-@/* No 16-bit Thumb mode veneer code is needed for _tx_thread_context_restore
-@ since it will never be called 16-bit mode. */
-@
+
+
+
+/* No 16-bit Thumb mode veneer code is needed for _tx_thread_context_restore
+ since it will never be called 16-bit mode. */
+
.arm
.text
.align 2
-@/**************************************************************************/
-@/* */
-@/* FUNCTION RELEASE */
-@/* */
-@/* _tx_thread_context_restore Cortex-A7/GNU */
-@/* 6.1.9 */
-@/* AUTHOR */
-@/* */
-@/* William E. Lamie, Microsoft Corporation */
-@/* */
-@/* DESCRIPTION */
-@/* */
-@/* This function restores the interrupt context if it is processing a */
-@/* nested interrupt. If not, it returns to the interrupt thread if no */
-@/* preemption is necessary. Otherwise, if preemption is necessary or */
-@/* if no thread was running, the function returns to the scheduler. */
-@/* */
-@/* INPUT */
-@/* */
-@/* None */
-@/* */
-@/* OUTPUT */
-@/* */
-@/* None */
-@/* */
-@/* CALLS */
-@/* */
-@/* _tx_thread_schedule Thread scheduling routine */
-@/* */
-@/* CALLED BY */
-@/* */
-@/* ISRs Interrupt Service Routines */
-@/* */
-@/* RELEASE HISTORY */
-@/* */
-@/* DATE NAME DESCRIPTION */
-@/* */
-@/* 09-30-2020 William E. Lamie Initial Version 6.1 */
-@/* 10-15-2021 William E. Lamie Modified comment(s), added */
-@/* execution profile support, */
-@/* resulting in version 6.1.9 */
-@/* */
-@/**************************************************************************/
-@VOID _tx_thread_context_restore(VOID)
-@{
+/**************************************************************************/
+/* */
+/* FUNCTION RELEASE */
+/* */
+/* _tx_thread_context_restore ARMv7-A */
+/* 6.1.11 */
+/* AUTHOR */
+/* */
+/* William E. Lamie, Microsoft Corporation */
+/* */
+/* DESCRIPTION */
+/* */
+/* This function restores the interrupt context if it is processing a */
+/* nested interrupt. If not, it returns to the interrupt thread if no */
+/* preemption is necessary. Otherwise, if preemption is necessary or */
+/* if no thread was running, the function returns to the scheduler. */
+/* */
+/* INPUT */
+/* */
+/* None */
+/* */
+/* OUTPUT */
+/* */
+/* None */
+/* */
+/* CALLS */
+/* */
+/* _tx_thread_schedule Thread scheduling routine */
+/* */
+/* CALLED BY */
+/* */
+/* ISRs Interrupt Service Routines */
+/* */
+/* RELEASE HISTORY */
+/* */
+/* DATE NAME DESCRIPTION */
+/* */
+/* 09-30-2020 William E. Lamie Initial Version 6.1 */
+/* 10-15-2021 William E. Lamie Modified comment(s), added */
+/* execution profile support, */
+/* resulting in version 6.1.9 */
+/* 04-25-2022 Zhen Kong Updated comments, */
+/* resulting in version 6.1.11 */
+/* */
+/**************************************************************************/
.global _tx_thread_context_restore
.type _tx_thread_context_restore,function
_tx_thread_context_restore:
-@
-@ /* Lockout interrupts. */
-@
+
+ /* Lockout interrupts. */
+
#ifdef TX_ENABLE_FIQ_SUPPORT
- CPSID if @ Disable IRQ and FIQ interrupts
+ CPSID if // Disable IRQ and FIQ interrupts
#else
- CPSID i @ Disable IRQ interrupts
+ CPSID i // Disable IRQ interrupts
#endif
#if (defined(TX_ENABLE_EXECUTION_CHANGE_NOTIFY) || defined(TX_EXECUTION_PROFILE_ENABLE))
-@
-@ /* Call the ISR exit function to indicate an ISR is complete. */
-@
- BL _tx_execution_isr_exit @ Call the ISR exit function
-#endif
-@
-@ /* Determine if interrupts are nested. */
-@ if (--_tx_thread_system_state)
-@ {
-@
- LDR r3, =_tx_thread_system_state @ Pickup address of system state variable
- LDR r2, [r3] @ Pickup system state
- SUB r2, r2, #1 @ Decrement the counter
- STR r2, [r3] @ Store the counter
- CMP r2, #0 @ Was this the first interrupt?
- BEQ __tx_thread_not_nested_restore @ If so, not a nested restore
-@
-@ /* Interrupts are nested. */
-@
-@ /* Just recover the saved registers and return to the point of
-@ interrupt. */
-@
- LDMIA sp!, {r0, r10, r12, lr} @ Recover SPSR, POI, and scratch regs
- MSR SPSR_cxsf, r0 @ Put SPSR back
- LDMIA sp!, {r0-r3} @ Recover r0-r3
- MOVS pc, lr @ Return to point of interrupt
-@
-@ }
-__tx_thread_not_nested_restore:
-@
-@ /* Determine if a thread was interrupted and no preemption is required. */
-@ else if (((_tx_thread_current_ptr) && (_tx_thread_current_ptr == _tx_thread_execute_ptr)
-@ || (_tx_thread_preempt_disable))
-@ {
-@
- LDR r1, =_tx_thread_current_ptr @ Pickup address of current thread ptr
- LDR r0, [r1] @ Pickup actual current thread pointer
- CMP r0, #0 @ Is it NULL?
- BEQ __tx_thread_idle_system_restore @ Yes, idle system was interrupted
-@
- LDR r3, =_tx_thread_preempt_disable @ Pickup preempt disable address
- LDR r2, [r3] @ Pickup actual preempt disable flag
- CMP r2, #0 @ Is it set?
- BNE __tx_thread_no_preempt_restore @ Yes, don't preempt this thread
- LDR r3, =_tx_thread_execute_ptr @ Pickup address of execute thread ptr
- LDR r2, [r3] @ Pickup actual execute thread pointer
- CMP r0, r2 @ Is the same thread highest priority?
- BNE __tx_thread_preempt_restore @ No, preemption needs to happen
-@
-@
-__tx_thread_no_preempt_restore:
-@
-@ /* Restore interrupted thread or ISR. */
-@
-@ /* Pickup the saved stack pointer. */
-@ tmp_ptr = _tx_thread_current_ptr -> tx_thread_stack_ptr;
-@
-@ /* Recover the saved context and return to the point of interrupt. */
-@
- LDMIA sp!, {r0, r10, r12, lr} @ Recover SPSR, POI, and scratch regs
- MSR SPSR_cxsf, r0 @ Put SPSR back
- LDMIA sp!, {r0-r3} @ Recover r0-r3
- MOVS pc, lr @ Return to point of interrupt
-@
-@ }
-@ else
-@ {
-__tx_thread_preempt_restore:
-@
- LDMIA sp!, {r3, r10, r12, lr} @ Recover temporarily saved registers
- MOV r1, lr @ Save lr (point of interrupt)
- MOV r2, #SVC_MODE @ Build SVC mode CPSR
- MSR CPSR_c, r2 @ Enter SVC mode
- STR r1, [sp, #-4]! @ Save point of interrupt
- STMDB sp!, {r4-r12, lr} @ Save upper half of registers
- MOV r4, r3 @ Save SPSR in r4
- MOV r2, #IRQ_MODE @ Build IRQ mode CPSR
- MSR CPSR_c, r2 @ Enter IRQ mode
- LDMIA sp!, {r0-r3} @ Recover r0-r3
- MOV r5, #SVC_MODE @ Build SVC mode CPSR
- MSR CPSR_c, r5 @ Enter SVC mode
- STMDB sp!, {r0-r3} @ Save r0-r3 on thread's stack
- LDR r1, =_tx_thread_current_ptr @ Pickup address of current thread ptr
- LDR r0, [r1] @ Pickup current thread pointer
+ /* Call the ISR exit function to indicate an ISR is complete. */
+
+ BL _tx_execution_isr_exit // Call the ISR exit function
+#endif
+
+ /* Determine if interrupts are nested. */
+
+ LDR r3, =_tx_thread_system_state // Pickup address of system state variable
+ LDR r2, [r3] // Pickup system state
+ SUB r2, r2, #1 // Decrement the counter
+ STR r2, [r3] // Store the counter
+ CMP r2, #0 // Was this the first interrupt?
+ BEQ __tx_thread_not_nested_restore // If so, not a nested restore
+
+ /* Interrupts are nested. */
+
+ /* Just recover the saved registers and return to the point of
+ interrupt. */
+
+ LDMIA sp!, {r0, r10, r12, lr} // Recover SPSR, POI, and scratch regs
+ MSR SPSR_cxsf, r0 // Put SPSR back
+ LDMIA sp!, {r0-r3} // Recover r0-r3
+ MOVS pc, lr // Return to point of interrupt
+
+__tx_thread_not_nested_restore:
+
+ /* Determine if a thread was interrupted and no preemption is required. */
+
+ LDR r1, =_tx_thread_current_ptr // Pickup address of current thread ptr
+ LDR r0, [r1] // Pickup actual current thread pointer
+ CMP r0, #0 // Is it NULL?
+ BEQ __tx_thread_idle_system_restore // Yes, idle system was interrupted
+
+ LDR r3, =_tx_thread_preempt_disable // Pickup preempt disable address
+ LDR r2, [r3] // Pickup actual preempt disable flag
+ CMP r2, #0 // Is it set?
+ BNE __tx_thread_no_preempt_restore // Yes, don't preempt this thread
+ LDR r3, =_tx_thread_execute_ptr // Pickup address of execute thread ptr
+ LDR r2, [r3] // Pickup actual execute thread pointer
+ CMP r0, r2 // Is the same thread highest priority?
+ BNE __tx_thread_preempt_restore // No, preemption needs to happen
+
+
+__tx_thread_no_preempt_restore:
+
+ /* Recover the saved context and return to the point of interrupt. */
+
+ /* Pickup the saved stack pointer. */
+
+ /* Recover the saved context and return to the point of interrupt. */
+ LDMIA sp!, {r0, r10, r12, lr} // Recover SPSR, POI, and scratch regs
+ MSR SPSR_cxsf, r0 // Put SPSR back
+ LDMIA sp!, {r0-r3} // Recover r0-r3
+ MOVS pc, lr // Return to point of interrupt
+
+__tx_thread_preempt_restore:
+
+ LDMIA sp!, {r3, r10, r12, lr} // Recover temporarily saved registers
+ MOV r1, lr // Save lr (point of interrupt)
+ MOV r2, #SVC_MODE // Build SVC mode CPSR
+ MSR CPSR_c, r2 // Enter SVC mode
+ STR r1, [sp, #-4]! // Save point of interrupt
+ STMDB sp!, {r4-r12, lr} // Save upper half of registers
+ MOV r4, r3 // Save SPSR in r4
+ MOV r2, #IRQ_MODE // Build IRQ mode CPSR
+ MSR CPSR_c, r2 // Enter IRQ mode
+ LDMIA sp!, {r0-r3} // Recover r0-r3
+ MOV r5, #SVC_MODE // Build SVC mode CPSR
+ MSR CPSR_c, r5 // Enter SVC mode
+ STMDB sp!, {r0-r3} // Save r0-r3 on thread's stack
+
+ LDR r1, =_tx_thread_current_ptr // Pickup address of current thread ptr
+ LDR r0, [r1] // Pickup current thread pointer
#ifdef TX_ENABLE_VFP_SUPPORT
- LDR r2, [r0, #144] @ Pickup the VFP enabled flag
- CMP r2, #0 @ Is the VFP enabled?
- BEQ _tx_skip_irq_vfp_save @ No, skip VFP IRQ save
- VMRS r2, FPSCR @ Pickup the FPSCR
- STR r2, [sp, #-4]! @ Save FPSCR
- VSTMDB sp!, {D16-D31} @ Save D16-D31
- VSTMDB sp!, {D0-D15} @ Save D0-D15
+ LDR r2, [r0, #144] // Pickup the VFP enabled flag
+ CMP r2, #0 // Is the VFP enabled?
+ BEQ _tx_skip_irq_vfp_save // No, skip VFP IRQ save
+ VMRS r2, FPSCR // Pickup the FPSCR
+ STR r2, [sp, #-4]! // Save FPSCR
+ VSTMDB sp!, {D16-D31} // Save D16-D31
+ VSTMDB sp!, {D0-D15} // Save D0-D15
+
_tx_skip_irq_vfp_save:
+
#endif
- MOV r3, #1 @ Build interrupt stack type
- STMDB sp!, {r3, r4} @ Save interrupt stack type and SPSR
- STR sp, [r0, #8] @ Save stack pointer in thread control
- @ block
-@
-@ /* Save the remaining time-slice and disable it. */
-@ if (_tx_timer_time_slice)
-@ {
-@
- LDR r3, =_tx_timer_time_slice @ Pickup time-slice variable address
- LDR r2, [r3] @ Pickup time-slice
- CMP r2, #0 @ Is it active?
- BEQ __tx_thread_dont_save_ts @ No, don't save it
-@
-@ _tx_thread_current_ptr -> tx_thread_time_slice = _tx_timer_time_slice;
-@ _tx_timer_time_slice = 0;
-@
- STR r2, [r0, #24] @ Save thread's time-slice
- MOV r2, #0 @ Clear value
- STR r2, [r3] @ Disable global time-slice flag
-@
-@ }
+ MOV r3, #1 // Build interrupt stack type
+ STMDB sp!, {r3, r4} // Save interrupt stack type and SPSR
+ STR sp, [r0, #8] // Save stack pointer in thread control
+ // block
+
+ /* Save the remaining time-slice and disable it. */
+ LDR r3, =_tx_timer_time_slice // Pickup time-slice variable address
+ LDR r2, [r3] // Pickup time-slice
+ CMP r2, #0 // Is it active?
+ BEQ __tx_thread_dont_save_ts // No, don't save it
+ STR r2, [r0, #24] // Save thread's time-slice
+ MOV r2, #0 // Clear value
+ STR r2, [r3] // Disable global time-slice flag
+
__tx_thread_dont_save_ts:
-@
-@
-@ /* Clear the current task pointer. */
-@ _tx_thread_current_ptr = TX_NULL;
-@
- MOV r0, #0 @ NULL value
- STR r0, [r1] @ Clear current thread pointer
-@
-@ /* Return to the scheduler. */
-@ _tx_thread_schedule();
-@
- B _tx_thread_schedule @ Return to scheduler
-@ }
-@
+
+ /* Clear the current task pointer. */
+ MOV r0, #0 // NULL value
+ STR r0, [r1] // Clear current thread pointer
+
+ /* Return to the scheduler. */
+ B _tx_thread_schedule // Return to scheduler
+
__tx_thread_idle_system_restore:
-@
-@ /* Just return back to the scheduler! */
-@
- MOV r0, #SVC_MODE @ Build SVC mode CPSR
- MSR CPSR_c, r0 @ Enter SVC mode
- B _tx_thread_schedule @ Return to scheduler
-@}
-
-
+ /* Just return back to the scheduler! */
+ MOV r0, #SVC_MODE // Build SVC mode CPSR
+ MSR CPSR_c, r0 // Enter SVC mode
+ B _tx_thread_schedule // Return to scheduler
diff --git a/ports/cortex_a7/gnu/src/tx_thread_context_save.S b/ports/cortex_a7/gnu/src/tx_thread_context_save.S
index ef8ed6b9..7ac48c2e 100644
--- a/ports/cortex_a7/gnu/src/tx_thread_context_save.S
+++ b/ports/cortex_a7/gnu/src/tx_thread_context_save.S
@@ -1,206 +1,172 @@
-@/**************************************************************************/
-@/* */
-@/* Copyright (c) Microsoft Corporation. All rights reserved. */
-@/* */
-@/* This software is licensed under the Microsoft Software License */
-@/* Terms for Microsoft Azure RTOS. Full text of the license can be */
-@/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
-@/* and in the root directory of this software. */
-@/* */
-@/**************************************************************************/
-@
-@
-@/**************************************************************************/
-@/**************************************************************************/
-@/** */
-@/** ThreadX Component */
-@/** */
-@/** Thread */
-@/** */
-@/**************************************************************************/
-@/**************************************************************************/
-@
-@
-@#define TX_SOURCE_CODE
-@
-@
-@/* Include necessary system files. */
-@
-@#include "tx_api.h"
-@#include "tx_thread.h"
-@#include "tx_timer.h"
-@
-@
+/**************************************************************************/
+/* */
+/* Copyright (c) Microsoft Corporation. All rights reserved. */
+/* */
+/* This software is licensed under the Microsoft Software License */
+/* Terms for Microsoft Azure RTOS. Full text of the license can be */
+/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
+/* and in the root directory of this software. */
+/* */
+/**************************************************************************/
+
+
+/**************************************************************************/
+/**************************************************************************/
+/** */
+/** ThreadX Component */
+/** */
+/** Thread */
+/** */
+/**************************************************************************/
+/**************************************************************************/
+
.global _tx_thread_system_state
.global _tx_thread_current_ptr
- .global _tx_irq_processing_return
- .global _tx_execution_isr_enter
-@
-@
-@/* No 16-bit Thumb mode veneer code is needed for _tx_thread_context_save
-@ since it will never be called 16-bit mode. */
-@
+ .global __tx_irq_processing_return
+
+
+/* No 16-bit Thumb mode veneer code is needed for _tx_thread_context_save
+ since it will never be called 16-bit mode. */
+
.arm
.text
.align 2
-@/**************************************************************************/
-@/* */
-@/* FUNCTION RELEASE */
-@/* */
-@/* _tx_thread_context_save Cortex-A7/GNU */
-@/* 6.1.9 */
-@/* AUTHOR */
-@/* */
-@/* William E. Lamie, Microsoft Corporation */
-@/* */
-@/* DESCRIPTION */
-@/* */
-@/* This function saves the context of an executing thread in the */
-@/* beginning of interrupt processing. The function also ensures that */
-@/* the system stack is used upon return to the calling ISR. */
-@/* */
-@/* INPUT */
-@/* */
-@/* None */
-@/* */
-@/* OUTPUT */
-@/* */
-@/* None */
-@/* */
-@/* CALLS */
-@/* */
-@/* None */
-@/* */
-@/* CALLED BY */
-@/* */
-@/* ISRs */
-@/* */
-@/* RELEASE HISTORY */
-@/* */
-@/* DATE NAME DESCRIPTION */
-@/* */
-@/* 09-30-2020 William E. Lamie Initial Version 6.1 */
-@/* 10-15-2021 William E. Lamie Modified comment(s), added */
-@/* execution profile support, */
-@/* resulting in version 6.1.9 */
-@/* */
-@/**************************************************************************/
-@VOID _tx_thread_context_save(VOID)
-@{
+/**************************************************************************/
+/* */
+/* FUNCTION RELEASE */
+/* */
+/* _tx_thread_context_save ARMv7-A */
+/* 6.1.11 */
+/* AUTHOR */
+/* */
+/* William E. Lamie, Microsoft Corporation */
+/* */
+/* DESCRIPTION */
+/* */
+/* This function saves the context of an executing thread in the */
+/* beginning of interrupt processing. The function also ensures that */
+/* the system stack is used upon return to the calling ISR. */
+/* */
+/* INPUT */
+/* */
+/* None */
+/* */
+/* OUTPUT */
+/* */
+/* None */
+/* */
+/* CALLS */
+/* */
+/* None */
+/* */
+/* CALLED BY */
+/* */
+/* ISRs */
+/* */
+/* RELEASE HISTORY */
+/* */
+/* DATE NAME DESCRIPTION */
+/* */
+/* 09-30-2020 William E. Lamie Initial Version 6.1 */
+/* 10-15-2021 William E. Lamie Modified comment(s), added */
+/* execution profile support, */
+/* resulting in version 6.1.9 */
+/* 04-25-2022 Zhen Kong Updated comments, */
+/* resulting in version 6.1.11 */
+/* */
+/**************************************************************************/
.global _tx_thread_context_save
.type _tx_thread_context_save,function
_tx_thread_context_save:
-@
-@ /* Upon entry to this routine, it is assumed that IRQ interrupts are locked
-@ out, we are in IRQ mode, and all registers are intact. */
-@
-@ /* Check for a nested interrupt condition. */
-@ if (_tx_thread_system_state++)
-@ {
-@
- STMDB sp!, {r0-r3} @ Save some working registers
+
+ /* Upon entry to this routine, it is assumed that IRQ interrupts are locked
+ out, we are in IRQ mode, and all registers are intact. */
+
+ /* Check for a nested interrupt condition. */
+
+ STMDB sp!, {r0-r3} // Save some working registers
#ifdef TX_ENABLE_FIQ_SUPPORT
- CPSID if @ Disable FIQ interrupts
+ CPSID if // Disable FIQ interrupts
#endif
- LDR r3, =_tx_thread_system_state @ Pickup address of system state variable
- LDR r2, [r3] @ Pickup system state
- CMP r2, #0 @ Is this the first interrupt?
- BEQ __tx_thread_not_nested_save @ Yes, not a nested context save
-@
-@ /* Nested interrupt condition. */
-@
- ADD r2, r2, #1 @ Increment the interrupt counter
- STR r2, [r3] @ Store it back in the variable
-@
-@ /* Save the rest of the scratch registers on the stack and return to the
-@ calling ISR. */
-@
- MRS r0, SPSR @ Pickup saved SPSR
- SUB lr, lr, #4 @ Adjust point of interrupt
- STMDB sp!, {r0, r10, r12, lr} @ Store other registers
-@
-@ /* Return to the ISR. */
-@
- MOV r10, #0 @ Clear stack limit
+ LDR r3, =_tx_thread_system_state // Pickup address of system state variable
+ LDR r2, [r3] // Pickup system state
+ CMP r2, #0 // Is this the first interrupt?
+ BEQ __tx_thread_not_nested_save // Yes, not a nested context save
+
+ /* Nested interrupt condition. */
+
+ ADD r2, r2, #1 // Increment the interrupt counter
+ STR r2, [r3] // Store it back in the variable
+
+ /* Save the rest of the scratch registers on the stack and return to the
+ calling ISR. */
+
+ MRS r0, SPSR // Pickup saved SPSR
+ SUB lr, lr, #4 // Adjust point of interrupt
+ STMDB sp!, {r0, r10, r12, lr} // Store other registers
+
+ /* Return to the ISR. */
+
+ MOV r10, #0 // Clear stack limit
#if (defined(TX_ENABLE_EXECUTION_CHANGE_NOTIFY) || defined(TX_EXECUTION_PROFILE_ENABLE))
-@
-@ /* Call the ISR enter function to indicate an ISR is executing. */
-@
- PUSH {lr} @ Save ISR lr
- BL _tx_execution_isr_enter @ Call the ISR enter function
- POP {lr} @ Recover ISR lr
+
+ /* Call the ISR enter function to indicate an ISR is executing. */
+
+ PUSH {lr} // Save ISR lr
+ BL _tx_execution_isr_enter // Call the ISR enter function
+ POP {lr} // Recover ISR lr
#endif
- B __tx_irq_processing_return @ Continue IRQ processing
-@
+ B __tx_irq_processing_return // Continue IRQ processing
+
__tx_thread_not_nested_save:
-@ }
-@
-@ /* Otherwise, not nested, check to see if a thread was running. */
-@ else if (_tx_thread_current_ptr)
-@ {
-@
- ADD r2, r2, #1 @ Increment the interrupt counter
- STR r2, [r3] @ Store it back in the variable
- LDR r1, =_tx_thread_current_ptr @ Pickup address of current thread ptr
- LDR r0, [r1] @ Pickup current thread pointer
- CMP r0, #0 @ Is it NULL?
- BEQ __tx_thread_idle_system_save @ If so, interrupt occurred in
- @ scheduling loop - nothing needs saving!
-@
-@ /* Save minimal context of interrupted thread. */
-@
- MRS r2, SPSR @ Pickup saved SPSR
- SUB lr, lr, #4 @ Adjust point of interrupt
- STMDB sp!, {r2, r10, r12, lr} @ Store other registers
-@
-@ /* Save the current stack pointer in the thread's control block. */
-@ _tx_thread_current_ptr -> tx_thread_stack_ptr = sp;
-@
-@ /* Switch to the system stack. */
-@ sp = _tx_thread_system_stack_ptr@
-@
- MOV r10, #0 @ Clear stack limit
+
+ /* Otherwise, not nested, check to see if a thread was running. */
+ ADD r2, r2, #1 // Increment the interrupt counter
+ STR r2, [r3] // Store it back in the variable
+ LDR r1, =_tx_thread_current_ptr // Pickup address of current thread ptr
+ LDR r0, [r1] // Pickup current thread pointer
+ CMP r0, #0 // Is it NULL?
+ BEQ __tx_thread_idle_system_save // If so, interrupt occurred in
+ // scheduling loop - nothing needs saving!
+
+ /* Save minimal context of interrupted thread. */
+
+ MRS r2, SPSR // Pickup saved SPSR
+ SUB lr, lr, #4 // Adjust point of interrupt
+ STMDB sp!, {r2, r10, r12, lr} // Store other registers
+
+ MOV r10, #0 // Clear stack limit
#if (defined(TX_ENABLE_EXECUTION_CHANGE_NOTIFY) || defined(TX_EXECUTION_PROFILE_ENABLE))
-@
-@ /* Call the ISR enter function to indicate an ISR is executing. */
-@
- PUSH {lr} @ Save ISR lr
- BL _tx_execution_isr_enter @ Call the ISR enter function
- POP {lr} @ Recover ISR lr
+
+ /* Call the ISR enter function to indicate an ISR is executing. */
+
+ PUSH {lr} // Save ISR lr
+ BL _tx_execution_isr_enter // Call the ISR enter function
+ POP {lr} // Recover ISR lr
#endif
- B __tx_irq_processing_return @ Continue IRQ processing
-@
-@ }
-@ else
-@ {
-@
+ B __tx_irq_processing_return // Continue IRQ processing
+
__tx_thread_idle_system_save:
-@
-@ /* Interrupt occurred in the scheduling loop. */
-@
-@ /* Not much to do here, just adjust the stack pointer, and return to IRQ
-@ processing. */
-@
- MOV r10, #0 @ Clear stack limit
+
+ /* Interrupt occurred in the scheduling loop. */
+
+ /* Not much to do here, just adjust the stack pointer, and return to IRQ
+ processing. */
+
+ MOV r10, #0 // Clear stack limit
#if (defined(TX_ENABLE_EXECUTION_CHANGE_NOTIFY) || defined(TX_EXECUTION_PROFILE_ENABLE))
-@
-@ /* Call the ISR enter function to indicate an ISR is executing. */
-@
- PUSH {lr} @ Save ISR lr
- BL _tx_execution_isr_enter @ Call the ISR enter function
- POP {lr} @ Recover ISR lr
+
+ /* Call the ISR enter function to indicate an ISR is executing. */
+
+ PUSH {lr} // Save ISR lr
+ BL _tx_execution_isr_enter // Call the ISR enter function
+ POP {lr} // Recover ISR lr
#endif
- ADD sp, sp, #16 @ Recover saved registers
- B __tx_irq_processing_return @ Continue IRQ processing
-@
-@ }
-@}
-
-
-
+ ADD sp, sp, #16 // Recover saved registers
+ B __tx_irq_processing_return // Continue IRQ processing
diff --git a/ports/cortex_a7/gnu/src/tx_thread_fiq_context_restore.S b/ports/cortex_a7/gnu/src/tx_thread_fiq_context_restore.S
index a0544f05..006be973 100644
--- a/ports/cortex_a7/gnu/src/tx_thread_fiq_context_restore.S
+++ b/ports/cortex_a7/gnu/src/tx_thread_fiq_context_restore.S
@@ -1,43 +1,32 @@
-@/**************************************************************************/
-@/* */
-@/* Copyright (c) Microsoft Corporation. All rights reserved. */
-@/* */
-@/* This software is licensed under the Microsoft Software License */
-@/* Terms for Microsoft Azure RTOS. Full text of the license can be */
-@/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
-@/* and in the root directory of this software. */
-@/* */
-@/**************************************************************************/
-@
-@
-@/**************************************************************************/
-@/**************************************************************************/
-@/** */
-@/** ThreadX Component */
-@/** */
-@/** Thread */
-@/** */
-@/**************************************************************************/
-@/**************************************************************************/
-@
-@
-@#define TX_SOURCE_CODE
-@
-@
-@/* Include necessary system files. */
-@
-@#include "tx_api.h"
-@#include "tx_thread.h"
-@#include "tx_timer.h"
-@
-@
-SVC_MODE = 0xD3 @ SVC mode
-FIQ_MODE = 0xD1 @ FIQ mode
-MODE_MASK = 0x1F @ Mode mask
-THUMB_MASK = 0x20 @ Thumb bit mask
-IRQ_MODE_BITS = 0x12 @ IRQ mode bits
-@
-@
+/**************************************************************************/
+/* */
+/* Copyright (c) Microsoft Corporation. All rights reserved. */
+/* */
+/* This software is licensed under the Microsoft Software License */
+/* Terms for Microsoft Azure RTOS. Full text of the license can be */
+/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
+/* and in the root directory of this software. */
+/* */
+/**************************************************************************/
+
+
+/**************************************************************************/
+/**************************************************************************/
+/** */
+/** ThreadX Component */
+/** */
+/** Thread */
+/** */
+/**************************************************************************/
+/**************************************************************************/
+
+SVC_MODE = 0xD3 // SVC mode
+FIQ_MODE = 0xD1 // FIQ mode
+MODE_MASK = 0x1F // Mode mask
+THUMB_MASK = 0x20 // Thumb bit mask
+IRQ_MODE_BITS = 0x12 // IRQ mode bits
+
+
.global _tx_thread_system_state
.global _tx_thread_current_ptr
.global _tx_thread_system_stack_ptr
@@ -46,218 +35,189 @@ IRQ_MODE_BITS = 0x12 @ IRQ mode bits
.global _tx_thread_schedule
.global _tx_thread_preempt_disable
.global _tx_execution_isr_exit
-@
-@
-@/* No 16-bit Thumb mode veneer code is needed for _tx_thread_fiq_context_restore
-@ since it will never be called 16-bit mode. */
-@
+
+
+/* No 16-bit Thumb mode veneer code is needed for _tx_thread_fiq_context_restore
+ since it will never be called 16-bit mode. */
+
.arm
.text
.align 2
-@/**************************************************************************/
-@/* */
-@/* FUNCTION RELEASE */
-@/* */
-@/* _tx_thread_fiq_context_restore Cortex-A7/GNU */
-@/* 6.1.9 */
-@/* AUTHOR */
-@/* */
-@/* William E. Lamie, Microsoft Corporation */
-@/* */
-@/* DESCRIPTION */
-@/* */
-@/* This function restores the fiq interrupt context when processing a */
-@/* nested interrupt. If not, it returns to the interrupt thread if no */
-@/* preemption is necessary. Otherwise, if preemption is necessary or */
-@/* if no thread was running, the function returns to the scheduler. */
-@/* */
-@/* INPUT */
-@/* */
-@/* None */
-@/* */
-@/* OUTPUT */
-@/* */
-@/* None */
-@/* */
-@/* CALLS */
-@/* */
-@/* _tx_thread_schedule Thread scheduling routine */
-@/* */
-@/* CALLED BY */
-@/* */
-@/* FIQ ISR Interrupt Service Routines */
-@/* */
-@/* RELEASE HISTORY */
-@/* */
-@/* DATE NAME DESCRIPTION */
-@/* */
-@/* 09-30-2020 William E. Lamie Initial Version 6.1 */
-@/* 10-15-2021 William E. Lamie Modified comment(s), added */
-@/* execution profile support, */
-@/* resulting in version 6.1.9 */
-@/* */
-@/**************************************************************************/
-@VOID _tx_thread_fiq_context_restore(VOID)
-@{
+/**************************************************************************/
+/* */
+/* FUNCTION RELEASE */
+/* */
+/* _tx_thread_fiq_context_restore ARMv7-A */
+/* 6.1.11 */
+/* AUTHOR */
+/* */
+/* William E. Lamie, Microsoft Corporation */
+/* */
+/* DESCRIPTION */
+/* */
+/* This function restores the fiq interrupt context when processing a */
+/* nested interrupt. If not, it returns to the interrupt thread if no */
+/* preemption is necessary. Otherwise, if preemption is necessary or */
+/* if no thread was running, the function returns to the scheduler. */
+/* */
+/* INPUT */
+/* */
+/* None */
+/* */
+/* OUTPUT */
+/* */
+/* None */
+/* */
+/* CALLS */
+/* */
+/* _tx_thread_schedule Thread scheduling routine */
+/* */
+/* CALLED BY */
+/* */
+/* FIQ ISR Interrupt Service Routines */
+/* */
+/* RELEASE HISTORY */
+/* */
+/* DATE NAME DESCRIPTION */
+/* */
+/* 09-30-2020 William E. Lamie Initial Version 6.1 */
+/* 10-15-2021 William E. Lamie Modified comment(s), added */
+/* execution profile support, */
+/* resulting in version 6.1.9 */
+/* 04-25-2022 Zhen Kong Updated comments, */
+/* resulting in version 6.1.11 */
+/* */
+/**************************************************************************/
.global _tx_thread_fiq_context_restore
.type _tx_thread_fiq_context_restore,function
_tx_thread_fiq_context_restore:
-@
-@ /* Lockout interrupts. */
-@
- CPSID if @ Disable IRQ and FIQ interrupts
+
+ /* Lockout interrupts. */
+
+ CPSID if // Disable IRQ and FIQ interrupts
#if (defined(TX_ENABLE_EXECUTION_CHANGE_NOTIFY) || defined(TX_EXECUTION_PROFILE_ENABLE))
-@
-@ /* Call the ISR exit function to indicate an ISR is complete. */
-@
- BL _tx_execution_isr_exit @ Call the ISR exit function
+
+ /* Call the ISR exit function to indicate an ISR is complete. */
+
+ BL _tx_execution_isr_exit // Call the ISR exit function
#endif
-@
-@ /* Determine if interrupts are nested. */
-@ if (--_tx_thread_system_state)
-@ {
-@
- LDR r3, =_tx_thread_system_state @ Pickup address of system state variable
- LDR r2, [r3] @ Pickup system state
- SUB r2, r2, #1 @ Decrement the counter
- STR r2, [r3] @ Store the counter
- CMP r2, #0 @ Was this the first interrupt?
- BEQ __tx_thread_fiq_not_nested_restore @ If so, not a nested restore
-@
-@ /* Interrupts are nested. */
-@
-@ /* Just recover the saved registers and return to the point of
-@ interrupt. */
-@
- LDMIA sp!, {r0, r10, r12, lr} @ Recover SPSR, POI, and scratch regs
- MSR SPSR_cxsf, r0 @ Put SPSR back
- LDMIA sp!, {r0-r3} @ Recover r0-r3
- MOVS pc, lr @ Return to point of interrupt
-@
-@ }
+
+ /* Determine if interrupts are nested. */
+ LDR r3, =_tx_thread_system_state // Pickup address of system state variable
+ LDR r2, [r3] // Pickup system state
+ SUB r2, r2, #1 // Decrement the counter
+ STR r2, [r3] // Store the counter
+ CMP r2, #0 // Was this the first interrupt?
+ BEQ __tx_thread_fiq_not_nested_restore // If so, not a nested restore
+
+ /* Interrupts are nested. */
+
+ /* Just recover the saved registers and return to the point of
+ interrupt. */
+
+ LDMIA sp!, {r0, r10, r12, lr} // Recover SPSR, POI, and scratch regs
+ MSR SPSR_cxsf, r0 // Put SPSR back
+ LDMIA sp!, {r0-r3} // Recover r0-r3
+ MOVS pc, lr // Return to point of interrupt
+
__tx_thread_fiq_not_nested_restore:
-@
-@ /* Determine if a thread was interrupted and no preemption is required. */
-@ else if (((_tx_thread_current_ptr) && (_tx_thread_current_ptr == _tx_thread_execute_ptr)
-@ || (_tx_thread_preempt_disable))
-@ {
-@
- LDR r1, [sp] @ Pickup the saved SPSR
- MOV r2, #MODE_MASK @ Build mask to isolate the interrupted mode
- AND r1, r1, r2 @ Isolate mode bits
- CMP r1, #IRQ_MODE_BITS @ Was an interrupt taken in IRQ mode before we
- @ got to context save? */
- BEQ __tx_thread_fiq_no_preempt_restore @ Yes, just go back to point of interrupt
+
+ /* Determine if a thread was interrupted and no preemption is required. */
+
+ LDR r1, [sp] // Pickup the saved SPSR
+ MOV r2, #MODE_MASK // Build mask to isolate the interrupted mode
+ AND r1, r1, r2 // Isolate mode bits
+ CMP r1, #IRQ_MODE_BITS // Was an interrupt taken in IRQ mode before we
+ // got to context save? */
+ BEQ __tx_thread_fiq_no_preempt_restore // Yes, just go back to point of interrupt
- LDR r1, =_tx_thread_current_ptr @ Pickup address of current thread ptr
- LDR r0, [r1] @ Pickup actual current thread pointer
- CMP r0, #0 @ Is it NULL?
- BEQ __tx_thread_fiq_idle_system_restore @ Yes, idle system was interrupted
+ LDR r1, =_tx_thread_current_ptr // Pickup address of current thread ptr
+ LDR r0, [r1] // Pickup actual current thread pointer
+ CMP r0, #0 // Is it NULL?
+ BEQ __tx_thread_fiq_idle_system_restore // Yes, idle system was interrupted
- LDR r3, =_tx_thread_preempt_disable @ Pickup preempt disable address
- LDR r2, [r3] @ Pickup actual preempt disable flag
- CMP r2, #0 @ Is it set?
- BNE __tx_thread_fiq_no_preempt_restore @ Yes, don't preempt this thread
- LDR r3, =_tx_thread_execute_ptr @ Pickup address of execute thread ptr
- LDR r2, [r3] @ Pickup actual execute thread pointer
- CMP r0, r2 @ Is the same thread highest priority?
- BNE __tx_thread_fiq_preempt_restore @ No, preemption needs to happen
+ LDR r3, =_tx_thread_preempt_disable // Pickup preempt disable address
+ LDR r2, [r3] // Pickup actual preempt disable flag
+ CMP r2, #0 // Is it set?
+ BNE __tx_thread_fiq_no_preempt_restore // Yes, don't preempt this thread
+ LDR r3, =_tx_thread_execute_ptr // Pickup address of execute thread ptr
+ LDR r2, [r3] // Pickup actual execute thread pointer
+ CMP r0, r2 // Is the same thread highest priority?
+ BNE __tx_thread_fiq_preempt_restore // No, preemption needs to happen
__tx_thread_fiq_no_preempt_restore:
-@
-@ /* Restore interrupted thread or ISR. */
-@
-@ /* Pickup the saved stack pointer. */
-@ tmp_ptr = _tx_thread_current_ptr -> tx_thread_stack_ptr;
-@
-@ /* Recover the saved context and return to the point of interrupt. */
-@
- LDMIA sp!, {r0, lr} @ Recover SPSR, POI, and scratch regs
- MSR SPSR_cxsf, r0 @ Put SPSR back
- LDMIA sp!, {r0-r3} @ Recover r0-r3
- MOVS pc, lr @ Return to point of interrupt
-@
-@ }
-@ else
-@ {
-__tx_thread_fiq_preempt_restore:
-@
- LDMIA sp!, {r3, lr} @ Recover temporarily saved registers
- MOV r1, lr @ Save lr (point of interrupt)
- MOV r2, #SVC_MODE @ Build SVC mode CPSR
- MSR CPSR_c, r2 @ Enter SVC mode
- STR r1, [sp, #-4]! @ Save point of interrupt
- STMDB sp!, {r4-r12, lr} @ Save upper half of registers
- MOV r4, r3 @ Save SPSR in r4
- MOV r2, #FIQ_MODE @ Build FIQ mode CPSR
- MSR CPSR_c, r2 @ Reenter FIQ mode
- LDMIA sp!, {r0-r3} @ Recover r0-r3
- MOV r5, #SVC_MODE @ Build SVC mode CPSR
- MSR CPSR_c, r5 @ Enter SVC mode
- STMDB sp!, {r0-r3} @ Save r0-r3 on thread's stack
- LDR r1, =_tx_thread_current_ptr @ Pickup address of current thread ptr
- LDR r0, [r1] @ Pickup current thread pointer
+ /* Restore interrupted thread or ISR. */
+ /* Recover the saved context and return to the point of interrupt. */
+
+ LDMIA sp!, {r0, lr} // Recover SPSR, POI, and scratch regs
+ MSR SPSR_cxsf, r0 // Put SPSR back
+ LDMIA sp!, {r0-r3} // Recover r0-r3
+ MOVS pc, lr // Return to point of interrupt
+
+__tx_thread_fiq_preempt_restore:
+
+ LDMIA sp!, {r3, lr} // Recover temporarily saved registers
+ MOV r1, lr // Save lr (point of interrupt)
+ MOV r2, #SVC_MODE // Build SVC mode CPSR
+ MSR CPSR_c, r2 // Enter SVC mode
+ STR r1, [sp, #-4]! // Save point of interrupt
+ STMDB sp!, {r4-r12, lr} // Save upper half of registers
+ MOV r4, r3 // Save SPSR in r4
+ MOV r2, #FIQ_MODE // Build FIQ mode CPSR
+ MSR CPSR_c, r2 // Reenter FIQ mode
+ LDMIA sp!, {r0-r3} // Recover r0-r3
+ MOV r5, #SVC_MODE // Build SVC mode CPSR
+ MSR CPSR_c, r5 // Enter SVC mode
+ STMDB sp!, {r0-r3} // Save r0-r3 on thread's stack
+
+ LDR r1, =_tx_thread_current_ptr // Pickup address of current thread ptr
+ LDR r0, [r1] // Pickup current thread pointer
#ifdef TX_ENABLE_VFP_SUPPORT
- LDR r2, [r0, #144] @ Pickup the VFP enabled flag
- CMP r2, #0 @ Is the VFP enabled?
- BEQ _tx_skip_fiq_vfp_save @ No, skip VFP IRQ save
- VMRS r2, FPSCR @ Pickup the FPSCR
- STR r2, [sp, #-4]! @ Save FPSCR
- VSTMDB sp!, {D16-D31} @ Save D16-D31
- VSTMDB sp!, {D0-D15} @ Save D0-D15
+ LDR r2, [r0, #144] // Pickup the VFP enabled flag
+ CMP r2, #0 // Is the VFP enabled?
+ BEQ _tx_skip_fiq_vfp_save // No, skip VFP IRQ save
+ VMRS r2, FPSCR // Pickup the FPSCR
+ STR r2, [sp, #-4]! // Save FPSCR
+ VSTMDB sp!, {D16-D31} // Save D16-D31
+ VSTMDB sp!, {D0-D15} // Save D0-D15
_tx_skip_fiq_vfp_save:
#endif
- MOV r3, #1 @ Build interrupt stack type
- STMDB sp!, {r3, r4} @ Save interrupt stack type and SPSR
- STR sp, [r0, #8] @ Save stack pointer in thread control
- @ block */
-@
-@ /* Save the remaining time-slice and disable it. */
-@ if (_tx_timer_time_slice)
-@ {
-@
- LDR r3, =_tx_timer_time_slice @ Pickup time-slice variable address
- LDR r2, [r3] @ Pickup time-slice
- CMP r2, #0 @ Is it active?
- BEQ __tx_thread_fiq_dont_save_ts @ No, don't save it
-@
-@ _tx_thread_current_ptr -> tx_thread_time_slice = _tx_timer_time_slice;
-@ _tx_timer_time_slice = 0;
-@
- STR r2, [r0, #24] @ Save thread's time-slice
- MOV r2, #0 @ Clear value
- STR r2, [r3] @ Disable global time-slice flag
-@
-@ }
-__tx_thread_fiq_dont_save_ts:
-@
-@
-@ /* Clear the current task pointer. */
-@ _tx_thread_current_ptr = TX_NULL;
-@
- MOV r0, #0 @ NULL value
- STR r0, [r1] @ Clear current thread pointer
-@
-@ /* Return to the scheduler. */
-@ _tx_thread_schedule();
-@
- B _tx_thread_schedule @ Return to scheduler
-@ }
-@
-__tx_thread_fiq_idle_system_restore:
-@
-@ /* Just return back to the scheduler! */
-@
- ADD sp, sp, #24 @ Recover FIQ stack space
- MOV r3, #SVC_MODE @ Build SVC mode CPSR
- MSR CPSR_c, r3 @ Lockout interrupts
- B _tx_thread_schedule @ Return to scheduler
-@
-@}
+ MOV r3, #1 // Build interrupt stack type
+ STMDB sp!, {r3, r4} // Save interrupt stack type and SPSR
+ STR sp, [r0, #8] // Save stack pointer in thread control
+ // block */
+ LDR r3, =_tx_timer_time_slice // Pickup time-slice variable address
+ LDR r2, [r3] // Pickup time-slice
+ CMP r2, #0 // Is it active?
+ BEQ __tx_thread_fiq_dont_save_ts // No, don't save it
+
+ STR r2, [r0, #24] // Save thread's time-slice
+ MOV r2, #0 // Clear value
+ STR r2, [r3] // Disable global time-slice flag
+
+__tx_thread_fiq_dont_save_ts:
+
+ /* Clear the current task pointer. */
+
+ MOV r0, #0 // NULL value
+ STR r0, [r1] // Clear current thread pointer
+
+ /* Return to the scheduler. */
+
+ B _tx_thread_schedule // Return to scheduler
+
+__tx_thread_fiq_idle_system_restore:
+
+ /* Just return back to the scheduler! */
+
+ ADD sp, sp, #24 // Recover FIQ stack space
+ MOV r3, #SVC_MODE // Build SVC mode CPSR
+ MSR CPSR_c, r3 // Lockout interrupts
+ B _tx_thread_schedule // Return to scheduler
diff --git a/ports/cortex_a7/gnu/src/tx_thread_fiq_context_save.S b/ports/cortex_a7/gnu/src/tx_thread_fiq_context_save.S
index e2d12b36..7db6a4c2 100644
--- a/ports/cortex_a7/gnu/src/tx_thread_fiq_context_save.S
+++ b/ports/cortex_a7/gnu/src/tx_thread_fiq_context_save.S
@@ -1,207 +1,178 @@
-@/**************************************************************************/
-@/* */
-@/* Copyright (c) Microsoft Corporation. All rights reserved. */
-@/* */
-@/* This software is licensed under the Microsoft Software License */
-@/* Terms for Microsoft Azure RTOS. Full text of the license can be */
-@/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
-@/* and in the root directory of this software. */
-@/* */
-@/**************************************************************************/
-@
-@
-@/**************************************************************************/
-@/**************************************************************************/
-@/** */
-@/** ThreadX Component */
-@/** */
-@/** Thread */
-@/** */
-@/**************************************************************************/
-@/**************************************************************************/
-@
-@
-@#define TX_SOURCE_CODE
-@
-@
-@/* Include necessary system files. */
-@
-@#include "tx_api.h"
-@#include "tx_thread.h"
-@
-@
+/**************************************************************************/
+/* */
+/* Copyright (c) Microsoft Corporation. All rights reserved. */
+/* */
+/* This software is licensed under the Microsoft Software License */
+/* Terms for Microsoft Azure RTOS. Full text of the license can be */
+/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
+/* and in the root directory of this software. */
+/* */
+/**************************************************************************/
+
+
+/**************************************************************************/
+/**************************************************************************/
+/** */
+/** ThreadX Component */
+/** */
+/** Thread */
+/** */
+/**************************************************************************/
+/**************************************************************************/
+
.global _tx_thread_system_state
.global _tx_thread_current_ptr
.global __tx_fiq_processing_return
.global _tx_execution_isr_enter
-@
-@
-@/* No 16-bit Thumb mode veneer code is needed for _tx_thread_fiq_context_save
-@ since it will never be called 16-bit mode. */
-@
+
+
+/* No 16-bit Thumb mode veneer code is needed for _tx_thread_fiq_context_save
+ since it will never be called 16-bit mode. */
+
.arm
.text
.align 2
-@/**************************************************************************/
-@/* */
-@/* FUNCTION RELEASE */
-@/* */
-@/* _tx_thread_fiq_context_save Cortex-A7/GNU */
-@/* 6.1.9 */
-@/* AUTHOR */
-@/* */
-@/* William E. Lamie, Microsoft Corporation */
-@/* */
-@/* DESCRIPTION */
-@/* */
-@/* This function saves the context of an executing thread in the */
-@/* beginning of interrupt processing. The function also ensures that */
-@/* the system stack is used upon return to the calling ISR. */
-@/* */
-@/* INPUT */
-@/* */
-@/* None */
-@/* */
-@/* OUTPUT */
-@/* */
-@/* None */
-@/* */
-@/* CALLS */
-@/* */
-@/* None */
-@/* */
-@/* CALLED BY */
-@/* */
-@/* ISRs */
-@/* */
-@/* RELEASE HISTORY */
-@/* */
-@/* DATE NAME DESCRIPTION */
-@/* */
-@/* 09-30-2020 William E. Lamie Initial Version 6.1 */
-@/* 10-15-2021 William E. Lamie Modified comment(s), added */
-@/* execution profile support, */
-@/* resulting in version 6.1.9 */
-@/* */
-@/**************************************************************************/
-@ VOID _tx_thread_fiq_context_save(VOID)
-@{
+/**************************************************************************/
+/* */
+/* FUNCTION RELEASE */
+/* */
+/* _tx_thread_fiq_context_save ARMv7-A */
+/* 6.1.11 */
+/* AUTHOR */
+/* */
+/* William E. Lamie, Microsoft Corporation */
+/* */
+/* DESCRIPTION */
+/* */
+/* This function saves the context of an executing thread in the */
+/* beginning of interrupt processing. The function also ensures that */
+/* the system stack is used upon return to the calling ISR. */
+/* */
+/* INPUT */
+/* */
+/* None */
+/* */
+/* OUTPUT */
+/* */
+/* None */
+/* */
+/* CALLS */
+/* */
+/* None */
+/* */
+/* CALLED BY */
+/* */
+/* ISRs */
+/* */
+/* RELEASE HISTORY */
+/* */
+/* DATE NAME DESCRIPTION */
+/* */
+/* 09-30-2020 William E. Lamie Initial Version 6.1 */
+/* 10-15-2021 William E. Lamie Modified comment(s), added */
+/* execution profile support, */
+/* resulting in version 6.1.9 */
+/* 04-25-2022 Zhen Kong Updated comments, */
+/* resulting in version 6.1.11 */
+/* */
+/**************************************************************************/
.global _tx_thread_fiq_context_save
.type _tx_thread_fiq_context_save,function
_tx_thread_fiq_context_save:
-@
-@ /* Upon entry to this routine, it is assumed that IRQ interrupts are locked
-@ out, we are in IRQ mode, and all registers are intact. */
-@
-@ /* Check for a nested interrupt condition. */
-@ if (_tx_thread_system_state++)
-@ {
-@
- STMDB sp!, {r0-r3} @ Save some working registers
- LDR r3, =_tx_thread_system_state @ Pickup address of system state variable
- LDR r2, [r3] @ Pickup system state
- CMP r2, #0 @ Is this the first interrupt?
- BEQ __tx_thread_fiq_not_nested_save @ Yes, not a nested context save
-@
-@ /* Nested interrupt condition. */
-@
- ADD r2, r2, #1 @ Increment the interrupt counter
- STR r2, [r3] @ Store it back in the variable
-@
-@ /* Save the rest of the scratch registers on the stack and return to the
-@ calling ISR. */
-@
- MRS r0, SPSR @ Pickup saved SPSR
- SUB lr, lr, #4 @ Adjust point of interrupt
- STMDB sp!, {r0, r10, r12, lr} @ Store other registers
-@
-@ /* Return to the ISR. */
-@
- MOV r10, #0 @ Clear stack limit
+
+ /* Upon entry to this routine, it is assumed that IRQ interrupts are locked
+ out, we are in IRQ mode, and all registers are intact. */
+
+ /* Check for a nested interrupt condition. */
+
+ STMDB sp!, {r0-r3} // Save some working registers
+ LDR r3, =_tx_thread_system_state // Pickup address of system state variable
+ LDR r2, [r3] // Pickup system state
+ CMP r2, #0 // Is this the first interrupt?
+ BEQ __tx_thread_fiq_not_nested_save // Yes, not a nested context save
+
+ /* Nested interrupt condition. */
+
+ ADD r2, r2, #1 // Increment the interrupt counter
+ STR r2, [r3] // Store it back in the variable
+
+ /* Save the rest of the scratch registers on the stack and return to the
+ calling ISR. */
+
+ MRS r0, SPSR // Pickup saved SPSR
+ SUB lr, lr, #4 // Adjust point of interrupt
+ STMDB sp!, {r0, r10, r12, lr} // Store other registers
+
+ /* Return to the ISR. */
+
+ MOV r10, #0 // Clear stack limit
#if (defined(TX_ENABLE_EXECUTION_CHANGE_NOTIFY) || defined(TX_EXECUTION_PROFILE_ENABLE))
-@
-@ /* Call the ISR enter function to indicate an ISR is executing. */
-@
- PUSH {lr} @ Save ISR lr
- BL _tx_execution_isr_enter @ Call the ISR enter function
- POP {lr} @ Recover ISR lr
+
+ /* Call the ISR enter function to indicate an ISR is executing. */
+
+ PUSH {lr} // Save ISR lr
+ BL _tx_execution_isr_enter // Call the ISR enter function
+ POP {lr} // Recover ISR lr
#endif
- B __tx_fiq_processing_return @ Continue FIQ processing
-@
+ B __tx_fiq_processing_return // Continue FIQ processing
+//
__tx_thread_fiq_not_nested_save:
-@ }
-@
-@ /* Otherwise, not nested, check to see if a thread was running. */
-@ else if (_tx_thread_current_ptr)
-@ {
-@
- ADD r2, r2, #1 @ Increment the interrupt counter
- STR r2, [r3] @ Store it back in the variable
- LDR r1, =_tx_thread_current_ptr @ Pickup address of current thread ptr
- LDR r0, [r1] @ Pickup current thread pointer
- CMP r0, #0 @ Is it NULL?
- BEQ __tx_thread_fiq_idle_system_save @ If so, interrupt occurred in
-@ @ scheduling loop - nothing needs saving!
-@
-@ /* Save minimal context of interrupted thread. */
-@
- MRS r2, SPSR @ Pickup saved SPSR
- SUB lr, lr, #4 @ Adjust point of interrupt
- STMDB sp!, {r2, lr} @ Store other registers, Note that we don't
-@ @ need to save sl and ip since FIQ has
-@ @ copies of these registers. Nested
-@ @ interrupt processing does need to save
-@ @ these registers.
-@
-@ /* Save the current stack pointer in the thread's control block. */
-@ _tx_thread_current_ptr -> tx_thread_stack_ptr = sp;
-@
-@ /* Switch to the system stack. */
-@ sp = _tx_thread_system_stack_ptr;
-@
- MOV r10, #0 @ Clear stack limit
+
+ /* Otherwise, not nested, check to see if a thread was running. */
+
+ ADD r2, r2, #1 // Increment the interrupt counter
+ STR r2, [r3] // Store it back in the variable
+ LDR r1, =_tx_thread_current_ptr // Pickup address of current thread ptr
+ LDR r0, [r1] // Pickup current thread pointer
+ CMP r0, #0 // Is it NULL?
+ BEQ __tx_thread_fiq_idle_system_save // If so, interrupt occurred in
+ // scheduling loop - nothing needs saving!
+
+ /* Save minimal context of interrupted thread. */
+
+ MRS r2, SPSR // Pickup saved SPSR
+ SUB lr, lr, #4 // Adjust point of interrupt
+ STMDB sp!, {r2, lr} // Store other registers, Note that we don't
+ // need to save sl and ip since FIQ has
+ // copies of these registers. Nested
+ // interrupt processing does need to save
+ // these registers.
+
+ MOV r10, #0 // Clear stack limit
#if (defined(TX_ENABLE_EXECUTION_CHANGE_NOTIFY) || defined(TX_EXECUTION_PROFILE_ENABLE))
-@
-@ /* Call the ISR enter function to indicate an ISR is executing. */
-@
- PUSH {lr} @ Save ISR lr
- BL _tx_execution_isr_enter @ Call the ISR enter function
- POP {lr} @ Recover ISR lr
+
+ /* Call the ISR enter function to indicate an ISR is executing. */
+
+ PUSH {lr} // Save ISR lr
+ BL _tx_execution_isr_enter // Call the ISR enter function
+ POP {lr} // Recover ISR lr
#endif
- B __tx_fiq_processing_return @ Continue FIQ processing
-@
-@ }
-@ else
-@ {
-@
+ B __tx_fiq_processing_return // Continue FIQ processing
+
__tx_thread_fiq_idle_system_save:
-@
-@ /* Interrupt occurred in the scheduling loop. */
-@
-#if (defined(TX_ENABLE_EXECUTION_CHANGE_NOTIFY) || defined(TX_EXECUTION_PROFILE_ENABLE))
-@
-@ /* Call the ISR enter function to indicate an ISR is executing. */
-@
- PUSH {lr} @ Save ISR lr
- BL _tx_execution_isr_enter @ Call the ISR enter function
- POP {lr} @ Recover ISR lr
-#endif
-@
-@ /* Not much to do here, save the current SPSR and LR for possible
-@ use in IRQ interrupted in idle system conditions, and return to
-@ FIQ interrupt processing. */
-@
- MRS r0, SPSR @ Pickup saved SPSR
- SUB lr, lr, #4 @ Adjust point of interrupt
- STMDB sp!, {r0, lr} @ Store other registers that will get used
-@ @ or stripped off the stack in context
-@ @ restore
- B __tx_fiq_processing_return @ Continue FIQ processing
-@
-@ }
-@}
+ /* Interrupt occurred in the scheduling loop. */
+
+#if (defined(TX_ENABLE_EXECUTION_CHANGE_NOTIFY) || defined(TX_EXECUTION_PROFILE_ENABLE))
+
+ /* Call the ISR enter function to indicate an ISR is executing. */
+
+ PUSH {lr} // Save ISR lr
+ BL _tx_execution_isr_enter // Call the ISR enter function
+ POP {lr} // Recover ISR lr
+#endif
+
+ /* Not much to do here, save the current SPSR and LR for possible
+ use in IRQ interrupted in idle system conditions, and return to
+ FIQ interrupt processing. */
+
+ MRS r0, SPSR // Pickup saved SPSR
+ SUB lr, lr, #4 // Adjust point of interrupt
+ STMDB sp!, {r0, lr} // Store other registers that will get used
+ // or stripped off the stack in context
+ // restore
+ B __tx_fiq_processing_return // Continue FIQ processing
diff --git a/ports/cortex_a7/gnu/src/tx_thread_fiq_nesting_end.S b/ports/cortex_a7/gnu/src/tx_thread_fiq_nesting_end.S
index 2df342b3..b34d881e 100644
--- a/ports/cortex_a7/gnu/src/tx_thread_fiq_nesting_end.S
+++ b/ports/cortex_a7/gnu/src/tx_thread_fiq_nesting_end.S
@@ -1,116 +1,104 @@
-@/**************************************************************************/
-@/* */
-@/* Copyright (c) Microsoft Corporation. All rights reserved. */
-@/* */
-@/* This software is licensed under the Microsoft Software License */
-@/* Terms for Microsoft Azure RTOS. Full text of the license can be */
-@/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
-@/* and in the root directory of this software. */
-@/* */
-@/**************************************************************************/
-@
-@
-@/**************************************************************************/
-@/**************************************************************************/
-@/** */
-@/** ThreadX Component */
-@/** */
-@/** Thread */
-@/** */
-@/**************************************************************************/
-@/**************************************************************************/
-@
-@
-@#define TX_SOURCE_CODE
-@
-@
-@/* Include necessary system files. */
-@
-@#include "tx_api.h"
-@#include "tx_thread.h"
-@
-@
+/**************************************************************************/
+/* */
+/* Copyright (c) Microsoft Corporation. All rights reserved. */
+/* */
+/* This software is licensed under the Microsoft Software License */
+/* Terms for Microsoft Azure RTOS. Full text of the license can be */
+/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
+/* and in the root directory of this software. */
+/* */
+/**************************************************************************/
+
+
+/**************************************************************************/
+/**************************************************************************/
+/** */
+/** ThreadX Component */
+/** */
+/** Thread */
+/** */
+/**************************************************************************/
+/**************************************************************************/
+
#ifdef TX_ENABLE_FIQ_SUPPORT
-DISABLE_INTS = 0xC0 @ Disable IRQ/FIQ interrupts
+DISABLE_INTS = 0xC0 // Disable IRQ/FIQ interrupts
#else
-DISABLE_INTS = 0x80 @ Disable IRQ interrupts
+DISABLE_INTS = 0x80 // Disable IRQ interrupts
#endif
-MODE_MASK = 0x1F @ Mode mask
-FIQ_MODE_BITS = 0x11 @ FIQ mode bits
-@
-@
-@/* No 16-bit Thumb mode veneer code is needed for _tx_thread_fiq_nesting_end
-@ since it will never be called 16-bit mode. */
-@
+MODE_MASK = 0x1F // Mode mask
+FIQ_MODE_BITS = 0x11 // FIQ mode bits
+
+
+/* No 16-bit Thumb mode veneer code is needed for _tx_thread_fiq_nesting_end
+ since it will never be called 16-bit mode. */
+
.arm
.text
.align 2
-@/**************************************************************************/
-@/* */
-@/* FUNCTION RELEASE */
-@/* */
-@/* _tx_thread_fiq_nesting_end Cortex-A7/GNU */
-@/* 6.1 */
-@/* AUTHOR */
-@/* */
-@/* William E. Lamie, Microsoft Corporation */
-@/* */
-@/* DESCRIPTION */
-@/* */
-@/* This function is called by the application from FIQ mode after */
-@/* _tx_thread_fiq_nesting_start has been called and switches the FIQ */
-@/* processing from system mode back to FIQ mode prior to the ISR */
-@/* calling _tx_thread_fiq_context_restore. Note that this function */
-@/* assumes the system stack pointer is in the same position after */
-@/* nesting start function was called. */
-@/* */
-@/* This function assumes that the system mode stack pointer was setup */
-@/* during low-level initialization (tx_initialize_low_level.s). */
-@/* */
-@/* This function returns with FIQ interrupts disabled. */
-@/* */
-@/* INPUT */
-@/* */
-@/* None */
-@/* */
-@/* OUTPUT */
-@/* */
-@/* None */
-@/* */
-@/* CALLS */
-@/* */
-@/* None */
-@/* */
-@/* CALLED BY */
-@/* */
-@/* ISRs */
-@/* */
-@/* RELEASE HISTORY */
-@/* */
-@/* DATE NAME DESCRIPTION */
-@/* */
-@/* 09-30-2020 William E. Lamie Initial Version 6.1 */
-@/* */
-@/**************************************************************************/
-@VOID _tx_thread_fiq_nesting_end(VOID)
-@{
+/**************************************************************************/
+/* */
+/* FUNCTION RELEASE */
+/* */
+/* _tx_thread_fiq_nesting_end ARMv7-A */
+/* 6.1.11 */
+/* AUTHOR */
+/* */
+/* William E. Lamie, Microsoft Corporation */
+/* */
+/* DESCRIPTION */
+/* */
+/* This function is called by the application from FIQ mode after */
+/* _tx_thread_fiq_nesting_start has been called and switches the FIQ */
+/* processing from system mode back to FIQ mode prior to the ISR */
+/* calling _tx_thread_fiq_context_restore. Note that this function */
+/* assumes the system stack pointer is in the same position after */
+/* nesting start function was called. */
+/* */
+/* This function assumes that the system mode stack pointer was setup */
+/* during low-level initialization (tx_initialize_low_level.s). */
+/* */
+/* This function returns with FIQ interrupts disabled. */
+/* */
+/* INPUT */
+/* */
+/* None */
+/* */
+/* OUTPUT */
+/* */
+/* None */
+/* */
+/* CALLS */
+/* */
+/* None */
+/* */
+/* CALLED BY */
+/* */
+/* ISRs */
+/* */
+/* RELEASE HISTORY */
+/* */
+/* DATE NAME DESCRIPTION */
+/* */
+/* 09-30-2020 William E. Lamie Initial Version 6.1 */
+/* 04-25-2022 Zhen Kong Updated comments, */
+/* resulting in version 6.1.11 */
+/* */
+/**************************************************************************/
.global _tx_thread_fiq_nesting_end
.type _tx_thread_fiq_nesting_end,function
_tx_thread_fiq_nesting_end:
- MOV r3,lr @ Save ISR return address
- MRS r0, CPSR @ Pickup the CPSR
- ORR r0, r0, #DISABLE_INTS @ Build disable interrupt value
- MSR CPSR_c, r0 @ Disable interrupts
- LDMIA sp!, {r1, lr} @ Pickup saved lr (and r1 throw-away for
- @ 8-byte alignment logic)
- BIC r0, r0, #MODE_MASK @ Clear mode bits
- ORR r0, r0, #FIQ_MODE_BITS @ Build IRQ mode CPSR
- MSR CPSR_c, r0 @ Reenter IRQ mode
+ MOV r3,lr // Save ISR return address
+ MRS r0, CPSR // Pickup the CPSR
+ ORR r0, r0, #DISABLE_INTS // Build disable interrupt value
+ MSR CPSR_c, r0 // Disable interrupts
+ LDMIA sp!, {r1, lr} // Pickup saved lr (and r1 throw-away for
+ // 8-byte alignment logic)
+ BIC r0, r0, #MODE_MASK // Clear mode bits
+ ORR r0, r0, #FIQ_MODE_BITS // Build IRQ mode CPSR
+ MSR CPSR_c, r0 // Reenter IRQ mode
#ifdef __THUMB_INTERWORK
- BX r3 @ Return to caller
+ BX r3 // Return to caller
#else
- MOV pc, r3 @ Return to caller
+ MOV pc, r3 // Return to caller
#endif
-@}
-
diff --git a/ports/cortex_a7/gnu/src/tx_thread_fiq_nesting_start.S b/ports/cortex_a7/gnu/src/tx_thread_fiq_nesting_start.S
index 1028d4cf..c9cd5a06 100644
--- a/ports/cortex_a7/gnu/src/tx_thread_fiq_nesting_start.S
+++ b/ports/cortex_a7/gnu/src/tx_thread_fiq_nesting_start.S
@@ -1,108 +1,96 @@
-@/**************************************************************************/
-@/* */
-@/* Copyright (c) Microsoft Corporation. All rights reserved. */
-@/* */
-@/* This software is licensed under the Microsoft Software License */
-@/* Terms for Microsoft Azure RTOS. Full text of the license can be */
-@/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
-@/* and in the root directory of this software. */
-@/* */
-@/**************************************************************************/
-@
-@
-@/**************************************************************************/
-@/**************************************************************************/
-@/** */
-@/** ThreadX Component */
-@/** */
-@/** Thread */
-@/** */
-@/**************************************************************************/
-@/**************************************************************************/
-@
-@
-@#define TX_SOURCE_CODE
-@
-@
-@/* Include necessary system files. */
-@
-@#include "tx_api.h"
-@#include "tx_thread.h"
-@
-@
-FIQ_DISABLE = 0x40 @ FIQ disable bit
-MODE_MASK = 0x1F @ Mode mask
-SYS_MODE_BITS = 0x1F @ System mode bits
-@
-@
-@/* No 16-bit Thumb mode veneer code is needed for _tx_thread_fiq_nesting_start
-@ since it will never be called 16-bit mode. */
-@
+/**************************************************************************/
+/* */
+/* Copyright (c) Microsoft Corporation. All rights reserved. */
+/* */
+/* This software is licensed under the Microsoft Software License */
+/* Terms for Microsoft Azure RTOS. Full text of the license can be */
+/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
+/* and in the root directory of this software. */
+/* */
+/**************************************************************************/
+
+
+/**************************************************************************/
+/**************************************************************************/
+/** */
+/** ThreadX Component */
+/** */
+/** Thread */
+/** */
+/**************************************************************************/
+/**************************************************************************/
+
+FIQ_DISABLE = 0x40 // FIQ disable bit
+MODE_MASK = 0x1F // Mode mask
+SYS_MODE_BITS = 0x1F // System mode bits
+
+
+/* No 16-bit Thumb mode veneer code is needed for _tx_thread_fiq_nesting_start
+ since it will never be called 16-bit mode. */
+
.arm
.text
.align 2
-@/**************************************************************************/
-@/* */
-@/* FUNCTION RELEASE */
-@/* */
-@/* _tx_thread_fiq_nesting_start Cortex-A7/GNU */
-@/* 6.1 */
-@/* AUTHOR */
-@/* */
-@/* William E. Lamie, Microsoft Corporation */
-@/* */
-@/* DESCRIPTION */
-@/* */
-@/* This function is called by the application from FIQ mode after */
-@/* _tx_thread_fiq_context_save has been called and switches the FIQ */
-@/* processing to the system mode so nested FIQ interrupt processing */
-@/* is possible (system mode has its own "lr" register). Note that */
-@/* this function assumes that the system mode stack pointer was setup */
-@/* during low-level initialization (tx_initialize_low_level.s). */
-@/* */
-@/* This function returns with FIQ interrupts enabled. */
-@/* */
-@/* INPUT */
-@/* */
-@/* None */
-@/* */
-@/* OUTPUT */
-@/* */
-@/* None */
-@/* */
-@/* CALLS */
-@/* */
-@/* None */
-@/* */
-@/* CALLED BY */
-@/* */
-@/* ISRs */
-@/* */
-@/* RELEASE HISTORY */
-@/* */
-@/* DATE NAME DESCRIPTION */
-@/* */
-@/* 09-30-2020 William E. Lamie Initial Version 6.1 */
-@/* */
-@/**************************************************************************/
-@VOID _tx_thread_fiq_nesting_start(VOID)
-@{
+/**************************************************************************/
+/* */
+/* FUNCTION RELEASE */
+/* */
+/* _tx_thread_fiq_nesting_start ARMv7-A */
+/* 6.1.11 */
+/* AUTHOR */
+/* */
+/* William E. Lamie, Microsoft Corporation */
+/* */
+/* DESCRIPTION */
+/* */
+/* This function is called by the application from FIQ mode after */
+/* _tx_thread_fiq_context_save has been called and switches the FIQ */
+/* processing to the system mode so nested FIQ interrupt processing */
+/* is possible (system mode has its own "lr" register). Note that */
+/* this function assumes that the system mode stack pointer was setup */
+/* during low-level initialization (tx_initialize_low_level.s). */
+/* */
+/* This function returns with FIQ interrupts enabled. */
+/* */
+/* INPUT */
+/* */
+/* None */
+/* */
+/* OUTPUT */
+/* */
+/* None */
+/* */
+/* CALLS */
+/* */
+/* None */
+/* */
+/* CALLED BY */
+/* */
+/* ISRs */
+/* */
+/* RELEASE HISTORY */
+/* */
+/* DATE NAME DESCRIPTION */
+/* */
+/* 09-30-2020 William E. Lamie Initial Version 6.1 */
+/* 04-25-2022 Zhen Kong Updated comments, */
+/* resulting in version 6.1.11 */
+/* */
+/**************************************************************************/
.global _tx_thread_fiq_nesting_start
.type _tx_thread_fiq_nesting_start,function
_tx_thread_fiq_nesting_start:
- MOV r3,lr @ Save ISR return address
- MRS r0, CPSR @ Pickup the CPSR
- BIC r0, r0, #MODE_MASK @ Clear the mode bits
- ORR r0, r0, #SYS_MODE_BITS @ Build system mode CPSR
- MSR CPSR_c, r0 @ Enter system mode
- STMDB sp!, {r1, lr} @ Push the system mode lr on the system mode stack
- @ and push r1 just to keep 8-byte alignment
- BIC r0, r0, #FIQ_DISABLE @ Build enable FIQ CPSR
- MSR CPSR_c, r0 @ Enter system mode
+ MOV r3,lr // Save ISR return address
+ MRS r0, CPSR // Pickup the CPSR
+ BIC r0, r0, #MODE_MASK // Clear the mode bits
+ ORR r0, r0, #SYS_MODE_BITS // Build system mode CPSR
+ MSR CPSR_c, r0 // Enter system mode
+ STMDB sp!, {r1, lr} // Push the system mode lr on the system mode stack
+ // and push r1 just to keep 8-byte alignment
+ BIC r0, r0, #FIQ_DISABLE // Build enable FIQ CPSR
+ MSR CPSR_c, r0 // Enter system mode
#ifdef __THUMB_INTERWORK
- BX r3 @ Return to caller
+ BX r3 // Return to caller
#else
- MOV pc, r3 @ Return to caller
+ MOV pc, r3 // Return to caller
#endif
-@}
-
diff --git a/ports/cortex_a7/gnu/src/tx_thread_interrupt_control.S b/ports/cortex_a7/gnu/src/tx_thread_interrupt_control.S
index e3825784..63b1609a 100644
--- a/ports/cortex_a7/gnu/src/tx_thread_interrupt_control.S
+++ b/ports/cortex_a7/gnu/src/tx_thread_interrupt_control.S
@@ -1,115 +1,104 @@
-@/**************************************************************************/
-@/* */
-@/* Copyright (c) Microsoft Corporation. All rights reserved. */
-@/* */
-@/* This software is licensed under the Microsoft Software License */
-@/* Terms for Microsoft Azure RTOS. Full text of the license can be */
-@/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
-@/* and in the root directory of this software. */
-@/* */
-@/**************************************************************************/
-@
-@
-@/**************************************************************************/
-@/**************************************************************************/
-@/** */
-@/** ThreadX Component */
-@/** */
-@/** Thread */
-@/** */
-@/**************************************************************************/
-@/**************************************************************************/
-@
-@#define TX_SOURCE_CODE
-@
-@
-@/* Include necessary system files. */
-@
-@#include "tx_api.h"
-@#include "tx_thread.h" */
-@
+/**************************************************************************/
+/* */
+/* Copyright (c) Microsoft Corporation. All rights reserved. */
+/* */
+/* This software is licensed under the Microsoft Software License */
+/* Terms for Microsoft Azure RTOS. Full text of the license can be */
+/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
+/* and in the root directory of this software. */
+/* */
+/**************************************************************************/
+
+
+/**************************************************************************/
+/**************************************************************************/
+/** */
+/** ThreadX Component */
+/** */
+/** Thread */
+/** */
+/**************************************************************************/
+/**************************************************************************/
INT_MASK = 0x03F
-@
-@/* Define the 16-bit Thumb mode veneer for _tx_thread_interrupt_control for
-@ applications calling this function from to 16-bit Thumb mode. */
-@
+
+/* Define the 16-bit Thumb mode veneer for _tx_thread_interrupt_control for
+ applications calling this function from to 16-bit Thumb mode. */
+
.text
.align 2
.global $_tx_thread_interrupt_control
$_tx_thread_interrupt_control:
.thumb
- BX pc @ Switch to 32-bit mode
- NOP @
+ BX pc // Switch to 32-bit mode
+ NOP //
.arm
- STMFD sp!, {lr} @ Save return address
- BL _tx_thread_interrupt_control @ Call _tx_thread_interrupt_control function
- LDMFD sp!, {lr} @ Recover saved return address
- BX lr @ Return to 16-bit caller
-@
-@
+ STMFD sp!, {lr} // Save return address
+ BL _tx_thread_interrupt_control // Call _tx_thread_interrupt_control function
+ LDMFD sp!, {lr} // Recover saved return address
+ BX lr // Return to 16-bit caller
+
+
.text
.align 2
-@/**************************************************************************/
-@/* */
-@/* FUNCTION RELEASE */
-@/* */
-@/* _tx_thread_interrupt_control Cortex-A7/GNU */
-@/* 6.1 */
-@/* AUTHOR */
-@/* */
-@/* William E. Lamie, Microsoft Corporation */
-@/* */
-@/* DESCRIPTION */
-@/* */
-@/* This function is responsible for changing the interrupt lockout */
-@/* posture of the system. */
-@/* */
-@/* INPUT */
-@/* */
-@/* new_posture New interrupt lockout posture */
-@/* */
-@/* OUTPUT */
-@/* */
-@/* old_posture Old interrupt lockout posture */
-@/* */
-@/* CALLS */
-@/* */
-@/* None */
-@/* */
-@/* CALLED BY */
-@/* */
-@/* Application Code */
-@/* */
-@/* RELEASE HISTORY */
-@/* */
-@/* DATE NAME DESCRIPTION */
-@/* */
-@/* 09-30-2020 William E. Lamie Initial Version 6.1 */
-@/* */
-@/**************************************************************************/
-@UINT _tx_thread_interrupt_control(UINT new_posture)
-@{
+/**************************************************************************/
+/* */
+/* FUNCTION RELEASE */
+/* */
+/* _tx_thread_interrupt_control ARMv7-A */
+/* 6.1.11 */
+/* AUTHOR */
+/* */
+/* William E. Lamie, Microsoft Corporation */
+/* */
+/* DESCRIPTION */
+/* */
+/* This function is responsible for changing the interrupt lockout */
+/* posture of the system. */
+/* */
+/* INPUT */
+/* */
+/* new_posture New interrupt lockout posture */
+/* */
+/* OUTPUT */
+/* */
+/* old_posture Old interrupt lockout posture */
+/* */
+/* CALLS */
+/* */
+/* None */
+/* */
+/* CALLED BY */
+/* */
+/* Application Code */
+/* */
+/* RELEASE HISTORY */
+/* */
+/* DATE NAME DESCRIPTION */
+/* */
+/* 09-30-2020 William E. Lamie Initial Version 6.1 */
+/* 04-25-2022 Zhen Kong Updated comments, */
+/* resulting in version 6.1.11 */
+/* */
+/**************************************************************************/
.global _tx_thread_interrupt_control
.type _tx_thread_interrupt_control,function
_tx_thread_interrupt_control:
-@
-@ /* Pickup current interrupt lockout posture. */
-@
- MRS r3, CPSR @ Pickup current CPSR
- MOV r2, #INT_MASK @ Build interrupt mask
- AND r1, r3, r2 @ Clear interrupt lockout bits
- ORR r1, r1, r0 @ Or-in new interrupt lockout bits
-@
-@ /* Apply the new interrupt posture. */
-@
- MSR CPSR_c, r1 @ Setup new CPSR
- BIC r0, r3, r2 @ Return previous interrupt mask
-#ifdef __THUMB_INTERWORK
- BX lr @ Return to caller
-#else
- MOV pc, lr @ Return to caller
-#endif
-@}
+ /* Pickup current interrupt lockout posture. */
+
+ MRS r3, CPSR // Pickup current CPSR
+ MOV r2, #INT_MASK // Build interrupt mask
+ AND r1, r3, r2 // Clear interrupt lockout bits
+ ORR r1, r1, r0 // Or-in new interrupt lockout bits
+
+ /* Apply the new interrupt posture. */
+
+ MSR CPSR_c, r1 // Setup new CPSR
+ BIC r0, r3, r2 // Return previous interrupt mask
+#ifdef __THUMB_INTERWORK
+ BX lr // Return to caller
+#else
+ MOV pc, lr // Return to caller
+#endif
diff --git a/ports/cortex_a7/gnu/src/tx_thread_interrupt_disable.S b/ports/cortex_a7/gnu/src/tx_thread_interrupt_disable.S
index bb62310b..13258808 100644
--- a/ports/cortex_a7/gnu/src/tx_thread_interrupt_disable.S
+++ b/ports/cortex_a7/gnu/src/tx_thread_interrupt_disable.S
@@ -1,113 +1,101 @@
-@/**************************************************************************/
-@/* */
-@/* Copyright (c) Microsoft Corporation. All rights reserved. */
-@/* */
-@/* This software is licensed under the Microsoft Software License */
-@/* Terms for Microsoft Azure RTOS. Full text of the license can be */
-@/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
-@/* and in the root directory of this software. */
-@/* */
-@/**************************************************************************/
-@
-@
-@/**************************************************************************/
-@/**************************************************************************/
-@/** */
-@/** ThreadX Component */
-@/** */
-@/** Thread */
-@/** */
-@/**************************************************************************/
-@/**************************************************************************/
-@
-@#define TX_SOURCE_CODE
-@
-@
-@/* Include necessary system files. */
-@
-@#include "tx_api.h"
-@#include "tx_thread.h"
-@
-@
-@/* Define the 16-bit Thumb mode veneer for _tx_thread_interrupt_disable for
-@ applications calling this function from to 16-bit Thumb mode. */
-@
+/**************************************************************************/
+/* */
+/* Copyright (c) Microsoft Corporation. All rights reserved. */
+/* */
+/* This software is licensed under the Microsoft Software License */
+/* Terms for Microsoft Azure RTOS. Full text of the license can be */
+/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
+/* and in the root directory of this software. */
+/* */
+/**************************************************************************/
+
+
+/**************************************************************************/
+/**************************************************************************/
+/** */
+/** ThreadX Component */
+/** */
+/** Thread */
+/** */
+/**************************************************************************/
+/**************************************************************************/
+
+/* Define the 16-bit Thumb mode veneer for _tx_thread_interrupt_disable for
+ applications calling this function from to 16-bit Thumb mode. */
+
.text
.align 2
.global $_tx_thread_interrupt_disable
$_tx_thread_interrupt_disable:
.thumb
- BX pc @ Switch to 32-bit mode
- NOP @
+ BX pc // Switch to 32-bit mode
+ NOP //
.arm
- STMFD sp!, {lr} @ Save return address
- BL _tx_thread_interrupt_disable @ Call _tx_thread_interrupt_disable function
- LDMFD sp!, {lr} @ Recover saved return address
- BX lr @ Return to 16-bit caller
-@
-@
+ STMFD sp!, {lr} // Save return address
+ BL _tx_thread_interrupt_disable // Call _tx_thread_interrupt_disable function
+ LDMFD sp!, {lr} // Recover saved return address
+ BX lr // Return to 16-bit caller
+
+
.text
.align 2
-@/**************************************************************************/
-@/* */
-@/* FUNCTION RELEASE */
-@/* */
-@/* _tx_thread_interrupt_disable Cortex-A7/GNU */
-@/* 6.1 */
-@/* AUTHOR */
-@/* */
-@/* William E. Lamie, Microsoft Corporation */
-@/* */
-@/* DESCRIPTION */
-@/* */
-@/* This function is responsible for disabling interrupts */
-@/* */
-@/* INPUT */
-@/* */
-@/* None */
-@/* */
-@/* OUTPUT */
-@/* */
-@/* old_posture Old interrupt lockout posture */
-@/* */
-@/* CALLS */
-@/* */
-@/* None */
-@/* */
-@/* CALLED BY */
-@/* */
-@/* Application Code */
-@/* */
-@/* RELEASE HISTORY */
-@/* */
-@/* DATE NAME DESCRIPTION */
-@/* */
-@/* 09-30-2020 William E. Lamie Initial Version 6.1 */
-@/* */
-@/**************************************************************************/
-@UINT _tx_thread_interrupt_disable(void)
-@{
+/**************************************************************************/
+/* */
+/* FUNCTION RELEASE */
+/* */
+/* _tx_thread_interrupt_disable ARMv7-A */
+/* 6.1.11 */
+/* AUTHOR */
+/* */
+/* William E. Lamie, Microsoft Corporation */
+/* */
+/* DESCRIPTION */
+/* */
+/* This function is responsible for disabling interrupts */
+/* */
+/* INPUT */
+/* */
+/* None */
+/* */
+/* OUTPUT */
+/* */
+/* old_posture Old interrupt lockout posture */
+/* */
+/* CALLS */
+/* */
+/* None */
+/* */
+/* CALLED BY */
+/* */
+/* Application Code */
+/* */
+/* RELEASE HISTORY */
+/* */
+/* DATE NAME DESCRIPTION */
+/* */
+/* 09-30-2020 William E. Lamie Initial Version 6.1 */
+/* 04-25-2022 Zhen Kong Updated comments, */
+/* resulting in version 6.1.11 */
+/* */
+/**************************************************************************/
.global _tx_thread_interrupt_disable
.type _tx_thread_interrupt_disable,function
_tx_thread_interrupt_disable:
-@
-@ /* Pickup current interrupt lockout posture. */
-@
- MRS r0, CPSR @ Pickup current CPSR
-@
-@ /* Mask interrupts. */
-@
+
+ /* Pickup current interrupt lockout posture. */
+
+ MRS r0, CPSR // Pickup current CPSR
+
+ /* Mask interrupts. */
+
#ifdef TX_ENABLE_FIQ_SUPPORT
- CPSID if @ Disable IRQ and FIQ
+ CPSID if // Disable IRQ and FIQ
#else
- CPSID i @ Disable IRQ
+ CPSID i // Disable IRQ
#endif
#ifdef __THUMB_INTERWORK
- BX lr @ Return to caller
+ BX lr // Return to caller
#else
- MOV pc, lr @ Return to caller
+ MOV pc, lr // Return to caller
#endif
-@}
-
-
diff --git a/ports/cortex_a7/gnu/src/tx_thread_interrupt_restore.S b/ports/cortex_a7/gnu/src/tx_thread_interrupt_restore.S
index f914fc31..2d582511 100644
--- a/ports/cortex_a7/gnu/src/tx_thread_interrupt_restore.S
+++ b/ports/cortex_a7/gnu/src/tx_thread_interrupt_restore.S
@@ -1,104 +1,93 @@
-@/**************************************************************************/
-@/* */
-@/* Copyright (c) Microsoft Corporation. All rights reserved. */
-@/* */
-@/* This software is licensed under the Microsoft Software License */
-@/* Terms for Microsoft Azure RTOS. Full text of the license can be */
-@/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
-@/* and in the root directory of this software. */
-@/* */
-@/**************************************************************************/
-@
-@
-@/**************************************************************************/
-@/**************************************************************************/
-@/** */
-@/** ThreadX Component */
-@/** */
-@/** Thread */
-@/** */
-@/**************************************************************************/
-@/**************************************************************************/
-@
-@#define TX_SOURCE_CODE
-@
-@
-@/* Include necessary system files. */
-@
-@#include "tx_api.h"
-@#include "tx_thread.h"
-@
-@
-@/* Define the 16-bit Thumb mode veneer for _tx_thread_interrupt_restore for
-@ applications calling this function from to 16-bit Thumb mode. */
-@
+/**************************************************************************/
+/* */
+/* Copyright (c) Microsoft Corporation. All rights reserved. */
+/* */
+/* This software is licensed under the Microsoft Software License */
+/* Terms for Microsoft Azure RTOS. Full text of the license can be */
+/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
+/* and in the root directory of this software. */
+/* */
+/**************************************************************************/
+
+
+/**************************************************************************/
+/**************************************************************************/
+/** */
+/** ThreadX Component */
+/** */
+/** Thread */
+/** */
+/**************************************************************************/
+/**************************************************************************/
+
+/* Define the 16-bit Thumb mode veneer for _tx_thread_interrupt_restore for
+ applications calling this function from to 16-bit Thumb mode. */
+
.text
.align 2
.global $_tx_thread_interrupt_restore
$_tx_thread_interrupt_restore:
.thumb
- BX pc @ Switch to 32-bit mode
- NOP @
+ BX pc // Switch to 32-bit mode
+ NOP //
.arm
- STMFD sp!, {lr} @ Save return address
- BL _tx_thread_interrupt_restore @ Call _tx_thread_interrupt_restore function
- LDMFD sp!, {lr} @ Recover saved return address
- BX lr @ Return to 16-bit caller
-@
-@
+ STMFD sp!, {lr} // Save return address
+ BL _tx_thread_interrupt_restore // Call _tx_thread_interrupt_restore function
+ LDMFD sp!, {lr} // Recover saved return address
+ BX lr // Return to 16-bit caller
+
+
.text
.align 2
-@/**************************************************************************/
-@/* */
-@/* FUNCTION RELEASE */
-@/* */
-@/* _tx_thread_interrupt_restore Cortex-A7/GNU */
-@/* 6.1 */
-@/* AUTHOR */
-@/* */
-@/* William E. Lamie, Microsoft Corporation */
-@/* */
-@/* DESCRIPTION */
-@/* */
-@/* This function is responsible for restoring interrupts to the state */
-@/* returned by a previous _tx_thread_interrupt_disable call. */
-@/* */
-@/* INPUT */
-@/* */
-@/* old_posture Old interrupt lockout posture */
-@/* */
-@/* OUTPUT */
-@/* */
-@/* None */
-@/* */
-@/* CALLS */
-@/* */
-@/* None */
-@/* */
-@/* CALLED BY */
-@/* */
-@/* Application Code */
-@/* */
-@/* RELEASE HISTORY */
-@/* */
-@/* DATE NAME DESCRIPTION */
-@/* */
-@/* 09-30-2020 William E. Lamie Initial Version 6.1 */
-@/* */
-@/**************************************************************************/
-@UINT _tx_thread_interrupt_restore(UINT old_posture)
-@{
+/**************************************************************************/
+/* */
+/* FUNCTION RELEASE */
+/* */
+/* _tx_thread_interrupt_restore ARMv7-A */
+/* 6.1.11 */
+/* AUTHOR */
+/* */
+/* William E. Lamie, Microsoft Corporation */
+/* */
+/* DESCRIPTION */
+/* */
+/* This function is responsible for restoring interrupts to the state */
+/* returned by a previous _tx_thread_interrupt_disable call. */
+/* */
+/* INPUT */
+/* */
+/* old_posture Old interrupt lockout posture */
+/* */
+/* OUTPUT */
+/* */
+/* None */
+/* */
+/* CALLS */
+/* */
+/* None */
+/* */
+/* CALLED BY */
+/* */
+/* Application Code */
+/* */
+/* RELEASE HISTORY */
+/* */
+/* DATE NAME DESCRIPTION */
+/* */
+/* 09-30-2020 William E. Lamie Initial Version 6.1 */
+/* 04-25-2022 Zhen Kong Updated comments, */
+/* resulting in version 6.1.11 */
+/* */
+/**************************************************************************/
.global _tx_thread_interrupt_restore
.type _tx_thread_interrupt_restore,function
_tx_thread_interrupt_restore:
-@
-@ /* Apply the new interrupt posture. */
-@
- MSR CPSR_c, r0 @ Setup new CPSR
-#ifdef __THUMB_INTERWORK
- BX lr @ Return to caller
-#else
- MOV pc, lr @ Return to caller
-#endif
-@}
+ /* Apply the new interrupt posture. */
+
+ MSR CPSR_c, r0 // Setup new CPSR
+#ifdef __THUMB_INTERWORK
+ BX lr // Return to caller
+#else
+ MOV pc, lr // Return to caller
+#endif
diff --git a/ports/cortex_a7/gnu/src/tx_thread_irq_nesting_end.S b/ports/cortex_a7/gnu/src/tx_thread_irq_nesting_end.S
index 61414de8..ec7e63c6 100644
--- a/ports/cortex_a7/gnu/src/tx_thread_irq_nesting_end.S
+++ b/ports/cortex_a7/gnu/src/tx_thread_irq_nesting_end.S
@@ -1,115 +1,103 @@
-@/**************************************************************************/
-@/* */
-@/* Copyright (c) Microsoft Corporation. All rights reserved. */
-@/* */
-@/* This software is licensed under the Microsoft Software License */
-@/* Terms for Microsoft Azure RTOS. Full text of the license can be */
-@/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
-@/* and in the root directory of this software. */
-@/* */
-@/**************************************************************************/
-@
-@
-@/**************************************************************************/
-@/**************************************************************************/
-@/** */
-@/** ThreadX Component */
-@/** */
-@/** Thread */
-@/** */
-@/**************************************************************************/
-@/**************************************************************************/
-@
-@
-@#define TX_SOURCE_CODE
-@
-@
-@/* Include necessary system files. */
-@
-@#include "tx_api.h"
-@#include "tx_thread.h"
-@
-@
+/**************************************************************************/
+/* */
+/* Copyright (c) Microsoft Corporation. All rights reserved. */
+/* */
+/* This software is licensed under the Microsoft Software License */
+/* Terms for Microsoft Azure RTOS. Full text of the license can be */
+/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
+/* and in the root directory of this software. */
+/* */
+/**************************************************************************/
+
+
+/**************************************************************************/
+/**************************************************************************/
+/** */
+/** ThreadX Component */
+/** */
+/** Thread */
+/** */
+/**************************************************************************/
+/**************************************************************************/
+
#ifdef TX_ENABLE_FIQ_SUPPORT
-DISABLE_INTS = 0xC0 @ Disable IRQ/FIQ interrupts
+DISABLE_INTS = 0xC0 // Disable IRQ/FIQ interrupts
#else
-DISABLE_INTS = 0x80 @ Disable IRQ interrupts
+DISABLE_INTS = 0x80 // Disable IRQ interrupts
#endif
-MODE_MASK = 0x1F @ Mode mask
-IRQ_MODE_BITS = 0x12 @ IRQ mode bits
-@
-@
-@/* No 16-bit Thumb mode veneer code is needed for _tx_thread_irq_nesting_end
-@ since it will never be called 16-bit mode. */
-@
+MODE_MASK = 0x1F // Mode mask
+IRQ_MODE_BITS = 0x12 // IRQ mode bits
+
+
+/* No 16-bit Thumb mode veneer code is needed for _tx_thread_irq_nesting_end
+ since it will never be called 16-bit mode. */
+
.arm
.text
.align 2
-@/**************************************************************************/
-@/* */
-@/* FUNCTION RELEASE */
-@/* */
-@/* _tx_thread_irq_nesting_end Cortex-A7/GNU */
-@/* 6.1 */
-@/* AUTHOR */
-@/* */
-@/* William E. Lamie, Microsoft Corporation */
-@/* */
-@/* DESCRIPTION */
-@/* */
-@/* This function is called by the application from IRQ mode after */
-@/* _tx_thread_irq_nesting_start has been called and switches the IRQ */
-@/* processing from system mode back to IRQ mode prior to the ISR */
-@/* calling _tx_thread_context_restore. Note that this function */
-@/* assumes the system stack pointer is in the same position after */
-@/* nesting start function was called. */
-@/* */
-@/* This function assumes that the system mode stack pointer was setup */
-@/* during low-level initialization (tx_initialize_low_level.s). */
-@/* */
-@/* This function returns with IRQ interrupts disabled. */
-@/* */
-@/* INPUT */
-@/* */
-@/* None */
-@/* */
-@/* OUTPUT */
-@/* */
-@/* None */
-@/* */
-@/* CALLS */
-@/* */
-@/* None */
-@/* */
-@/* CALLED BY */
-@/* */
-@/* ISRs */
-@/* */
-@/* RELEASE HISTORY */
-@/* */
-@/* DATE NAME DESCRIPTION */
-@/* */
-@/* 09-30-2020 William E. Lamie Initial Version 6.1 */
-@/* */
-@/**************************************************************************/
-@VOID _tx_thread_irq_nesting_end(VOID)
-@{
+/**************************************************************************/
+/* */
+/* FUNCTION RELEASE */
+/* */
+/* _tx_thread_irq_nesting_end ARMv7-A */
+/* 6.1.11 */
+/* AUTHOR */
+/* */
+/* William E. Lamie, Microsoft Corporation */
+/* */
+/* DESCRIPTION */
+/* */
+/* This function is called by the application from IRQ mode after */
+/* _tx_thread_irq_nesting_start has been called and switches the IRQ */
+/* processing from system mode back to IRQ mode prior to the ISR */
+/* calling _tx_thread_context_restore. Note that this function */
+/* assumes the system stack pointer is in the same position after */
+/* nesting start function was called. */
+/* */
+/* This function assumes that the system mode stack pointer was setup */
+/* during low-level initialization (tx_initialize_low_level.s). */
+/* */
+/* This function returns with IRQ interrupts disabled. */
+/* */
+/* INPUT */
+/* */
+/* None */
+/* */
+/* OUTPUT */
+/* */
+/* None */
+/* */
+/* CALLS */
+/* */
+/* None */
+/* */
+/* CALLED BY */
+/* */
+/* ISRs */
+/* */
+/* RELEASE HISTORY */
+/* */
+/* DATE NAME DESCRIPTION */
+/* */
+/* 09-30-2020 William E. Lamie Initial Version 6.1 */
+/* 04-25-2022 Zhen Kong Updated comments, */
+/* resulting in version 6.1.11 */
+/* */
+/**************************************************************************/
.global _tx_thread_irq_nesting_end
.type _tx_thread_irq_nesting_end,function
_tx_thread_irq_nesting_end:
- MOV r3,lr @ Save ISR return address
- MRS r0, CPSR @ Pickup the CPSR
- ORR r0, r0, #DISABLE_INTS @ Build disable interrupt value
- MSR CPSR_c, r0 @ Disable interrupts
- LDMIA sp!, {r1, lr} @ Pickup saved lr (and r1 throw-away for
- @ 8-byte alignment logic)
- BIC r0, r0, #MODE_MASK @ Clear mode bits
- ORR r0, r0, #IRQ_MODE_BITS @ Build IRQ mode CPSR
- MSR CPSR_c, r0 @ Reenter IRQ mode
+ MOV r3,lr // Save ISR return address
+ MRS r0, CPSR // Pickup the CPSR
+ ORR r0, r0, #DISABLE_INTS // Build disable interrupt value
+ MSR CPSR_c, r0 // Disable interrupts
+ LDMIA sp!, {r1, lr} // Pickup saved lr (and r1 throw-away for
+ // 8-byte alignment logic)
+ BIC r0, r0, #MODE_MASK // Clear mode bits
+ ORR r0, r0, #IRQ_MODE_BITS // Build IRQ mode CPSR
+ MSR CPSR_c, r0 // Reenter IRQ mode
#ifdef __THUMB_INTERWORK
- BX r3 @ Return to caller
+ BX r3 // Return to caller
#else
- MOV pc, r3 @ Return to caller
+ MOV pc, r3 // Return to caller
#endif
-@}
-
diff --git a/ports/cortex_a7/gnu/src/tx_thread_irq_nesting_start.S b/ports/cortex_a7/gnu/src/tx_thread_irq_nesting_start.S
index 4d606250..c69976ed 100644
--- a/ports/cortex_a7/gnu/src/tx_thread_irq_nesting_start.S
+++ b/ports/cortex_a7/gnu/src/tx_thread_irq_nesting_start.S
@@ -1,108 +1,96 @@
-@/**************************************************************************/
-@/* */
-@/* Copyright (c) Microsoft Corporation. All rights reserved. */
-@/* */
-@/* This software is licensed under the Microsoft Software License */
-@/* Terms for Microsoft Azure RTOS. Full text of the license can be */
-@/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
-@/* and in the root directory of this software. */
-@/* */
-@/**************************************************************************/
-@
-@
-@/**************************************************************************/
-@/**************************************************************************/
-@/** */
-@/** ThreadX Component */
-@/** */
-@/** Thread */
-@/** */
-@/**************************************************************************/
-@/**************************************************************************/
-@
-@
-@#define TX_SOURCE_CODE
-@
-@
-@/* Include necessary system files. */
-@
-@#include "tx_api.h"
-@#include "tx_thread.h"
-@
-@
-IRQ_DISABLE = 0x80 @ IRQ disable bit
-MODE_MASK = 0x1F @ Mode mask
-SYS_MODE_BITS = 0x1F @ System mode bits
-@
-@
-@/* No 16-bit Thumb mode veneer code is needed for _tx_thread_irq_nesting_start
-@ since it will never be called 16-bit mode. */
-@
+/**************************************************************************/
+/* */
+/* Copyright (c) Microsoft Corporation. All rights reserved. */
+/* */
+/* This software is licensed under the Microsoft Software License */
+/* Terms for Microsoft Azure RTOS. Full text of the license can be */
+/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
+/* and in the root directory of this software. */
+/* */
+/**************************************************************************/
+
+
+/**************************************************************************/
+/**************************************************************************/
+/** */
+/** ThreadX Component */
+/** */
+/** Thread */
+/** */
+/**************************************************************************/
+/**************************************************************************/
+
+IRQ_DISABLE = 0x80 // IRQ disable bit
+MODE_MASK = 0x1F // Mode mask
+SYS_MODE_BITS = 0x1F // System mode bits
+
+
+/* No 16-bit Thumb mode veneer code is needed for _tx_thread_irq_nesting_start
+ since it will never be called 16-bit mode. */
+
.arm
.text
.align 2
-@/**************************************************************************/
-@/* */
-@/* FUNCTION RELEASE */
-@/* */
-@/* _tx_thread_irq_nesting_start Cortex-A7/GNU */
-@/* 6.1 */
-@/* AUTHOR */
-@/* */
-@/* William E. Lamie, Microsoft Corporation */
-@/* */
-@/* DESCRIPTION */
-@/* */
-@/* This function is called by the application from IRQ mode after */
-@/* _tx_thread_context_save has been called and switches the IRQ */
-@/* processing to the system mode so nested IRQ interrupt processing */
-@/* is possible (system mode has its own "lr" register). Note that */
-@/* this function assumes that the system mode stack pointer was setup */
-@/* during low-level initialization (tx_initialize_low_level.s). */
-@/* */
-@/* This function returns with IRQ interrupts enabled. */
-@/* */
-@/* INPUT */
-@/* */
-@/* None */
-@/* */
-@/* OUTPUT */
-@/* */
-@/* None */
-@/* */
-@/* CALLS */
-@/* */
-@/* None */
-@/* */
-@/* CALLED BY */
-@/* */
-@/* ISRs */
-@/* */
-@/* RELEASE HISTORY */
-@/* */
-@/* DATE NAME DESCRIPTION */
-@/* */
-@/* 09-30-2020 William E. Lamie Initial Version 6.1 */
-@/* */
-@/**************************************************************************/
-@VOID _tx_thread_irq_nesting_start(VOID)
-@{
+/**************************************************************************/
+/* */
+/* FUNCTION RELEASE */
+/* */
+/* _tx_thread_irq_nesting_start ARMv7-A */
+/* 6.1.11 */
+/* AUTHOR */
+/* */
+/* William E. Lamie, Microsoft Corporation */
+/* */
+/* DESCRIPTION */
+/* */
+/* This function is called by the application from IRQ mode after */
+/* _tx_thread_context_save has been called and switches the IRQ */
+/* processing to the system mode so nested IRQ interrupt processing */
+/* is possible (system mode has its own "lr" register). Note that */
+/* this function assumes that the system mode stack pointer was setup */
+/* during low-level initialization (tx_initialize_low_level.s). */
+/* */
+/* This function returns with IRQ interrupts enabled. */
+/* */
+/* INPUT */
+/* */
+/* None */
+/* */
+/* OUTPUT */
+/* */
+/* None */
+/* */
+/* CALLS */
+/* */
+/* None */
+/* */
+/* CALLED BY */
+/* */
+/* ISRs */
+/* */
+/* RELEASE HISTORY */
+/* */
+/* DATE NAME DESCRIPTION */
+/* */
+/* 09-30-2020 William E. Lamie Initial Version 6.1 */
+/* 04-25-2022 Zhen Kong Updated comments, */
+/* resulting in version 6.1.11 */
+/* */
+/**************************************************************************/
.global _tx_thread_irq_nesting_start
.type _tx_thread_irq_nesting_start,function
_tx_thread_irq_nesting_start:
- MOV r3,lr @ Save ISR return address
- MRS r0, CPSR @ Pickup the CPSR
- BIC r0, r0, #MODE_MASK @ Clear the mode bits
- ORR r0, r0, #SYS_MODE_BITS @ Build system mode CPSR
- MSR CPSR_c, r0 @ Enter system mode
- STMDB sp!, {r1, lr} @ Push the system mode lr on the system mode stack
- @ and push r1 just to keep 8-byte alignment
- BIC r0, r0, #IRQ_DISABLE @ Build enable IRQ CPSR
- MSR CPSR_c, r0 @ Enter system mode
+ MOV r3,lr // Save ISR return address
+ MRS r0, CPSR // Pickup the CPSR
+ BIC r0, r0, #MODE_MASK // Clear the mode bits
+ ORR r0, r0, #SYS_MODE_BITS // Build system mode CPSR
+ MSR CPSR_c, r0 // Enter system mode
+ STMDB sp!, {r1, lr} // Push the system mode lr on the system mode stack
+ // and push r1 just to keep 8-byte alignment
+ BIC r0, r0, #IRQ_DISABLE // Build enable IRQ CPSR
+ MSR CPSR_c, r0 // Enter system mode
#ifdef __THUMB_INTERWORK
- BX r3 @ Return to caller
+ BX r3 // Return to caller
#else
- MOV pc, r3 @ Return to caller
+ MOV pc, r3 // Return to caller
#endif
-@}
-
diff --git a/ports/cortex_a7/gnu/src/tx_thread_schedule.S b/ports/cortex_a7/gnu/src/tx_thread_schedule.S
index c7e9c5c6..8330e9df 100644
--- a/ports/cortex_a7/gnu/src/tx_thread_schedule.S
+++ b/ports/cortex_a7/gnu/src/tx_thread_schedule.S
@@ -1,258 +1,230 @@
-@/**************************************************************************/
-@/* */
-@/* Copyright (c) Microsoft Corporation. All rights reserved. */
-@/* */
-@/* This software is licensed under the Microsoft Software License */
-@/* Terms for Microsoft Azure RTOS. Full text of the license can be */
-@/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
-@/* and in the root directory of this software. */
-@/* */
-@/**************************************************************************/
-@
-@
-@/**************************************************************************/
-@/**************************************************************************/
-@/** */
-@/** ThreadX Component */
-@/** */
-@/** Thread */
-@/** */
-@/**************************************************************************/
-@/**************************************************************************/
-@
-@
-@#define TX_SOURCE_CODE
-@
-@
-@/* Include necessary system files. */
-@
-@#include "tx_api.h"
-@#include "tx_thread.h"
-@#include "tx_timer.h"
-@
-@
+/**************************************************************************/
+/* */
+/* Copyright (c) Microsoft Corporation. All rights reserved. */
+/* */
+/* This software is licensed under the Microsoft Software License */
+/* Terms for Microsoft Azure RTOS. Full text of the license can be */
+/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
+/* and in the root directory of this software. */
+/* */
+/**************************************************************************/
+
+
+/**************************************************************************/
+/**************************************************************************/
+/** */
+/** ThreadX Component */
+/** */
+/** Thread */
+/** */
+/**************************************************************************/
+/**************************************************************************/
+
.global _tx_thread_execute_ptr
.global _tx_thread_current_ptr
.global _tx_timer_time_slice
- .global _tx_execution_thread_enter
-@
-@
-@/* Define the 16-bit Thumb mode veneer for _tx_thread_schedule for
-@ applications calling this function from to 16-bit Thumb mode. */
-@
+
+
+/* Define the 16-bit Thumb mode veneer for _tx_thread_schedule for
+ applications calling this function from to 16-bit Thumb mode. */
+
.text
.align 2
.global $_tx_thread_schedule
.type $_tx_thread_schedule,function
$_tx_thread_schedule:
.thumb
- BX pc @ Switch to 32-bit mode
- NOP @
+ BX pc // Switch to 32-bit mode
+ NOP //
.arm
- STMFD sp!, {lr} @ Save return address
- BL _tx_thread_schedule @ Call _tx_thread_schedule function
- LDMFD sp!, {lr} @ Recover saved return address
- BX lr @ Return to 16-bit caller
-@
-@
+ STMFD sp!, {lr} // Save return address
+ BL _tx_thread_schedule // Call _tx_thread_schedule function
+ LDMFD sp!, {lr} // Recover saved return address
+ BX lr // Return to 16-bit caller
+
+
.text
.align 2
-@/**************************************************************************/
-@/* */
-@/* FUNCTION RELEASE */
-@/* */
-@/* _tx_thread_schedule Cortex-A7/GNU */
-@/* 6.1.9 */
-@/* AUTHOR */
-@/* */
-@/* William E. Lamie, Microsoft Corporation */
-@/* */
-@/* DESCRIPTION */
-@/* */
-@/* This function waits for a thread control block pointer to appear in */
-@/* the _tx_thread_execute_ptr variable. Once a thread pointer appears */
-@/* in the variable, the corresponding thread is resumed. */
-@/* */
-@/* INPUT */
-@/* */
-@/* None */
-@/* */
-@/* OUTPUT */
-@/* */
-@/* None */
-@/* */
-@/* CALLS */
-@/* */
-@/* None */
-@/* */
-@/* CALLED BY */
-@/* */
-@/* _tx_initialize_kernel_enter ThreadX entry function */
-@/* _tx_thread_system_return Return to system from thread */
-@/* _tx_thread_context_restore Restore thread's context */
-@/* */
-@/* RELEASE HISTORY */
-@/* */
-@/* DATE NAME DESCRIPTION */
-@/* */
-@/* 09-30-2020 William E. Lamie Initial Version 6.1 */
-@/* 10-15-2021 William E. Lamie Modified comment(s), added */
-@/* execution profile support, */
-@/* resulting in version 6.1.9 */
-@/* */
-@/**************************************************************************/
-@VOID _tx_thread_schedule(VOID)
-@{
+/**************************************************************************/
+/* */
+/* FUNCTION RELEASE */
+/* */
+/* _tx_thread_schedule ARMv7-A */
+/* 6.1.11 */
+/* AUTHOR */
+/* */
+/* William E. Lamie, Microsoft Corporation */
+/* */
+/* DESCRIPTION */
+/* */
+/* This function waits for a thread control block pointer to appear in */
+/* the _tx_thread_execute_ptr variable. Once a thread pointer appears */
+/* in the variable, the corresponding thread is resumed. */
+/* */
+/* INPUT */
+/* */
+/* None */
+/* */
+/* OUTPUT */
+/* */
+/* None */
+/* */
+/* CALLS */
+/* */
+/* None */
+/* */
+/* CALLED BY */
+/* */
+/* _tx_initialize_kernel_enter ThreadX entry function */
+/* _tx_thread_system_return Return to system from thread */
+/* _tx_thread_context_restore Restore thread's context */
+/* */
+/* RELEASE HISTORY */
+/* */
+/* DATE NAME DESCRIPTION */
+/* */
+/* 09-30-2020 William E. Lamie Initial Version 6.1 */
+/* 10-15-2021 William E. Lamie Modified comment(s), added */
+/* execution profile support, */
+/* resulting in version 6.1.9 */
+/* 04-25-2022 Zhen Kong Updated comments, */
+/* resulting in version 6.1.11 */
+/* */
+/**************************************************************************/
.global _tx_thread_schedule
.type _tx_thread_schedule,function
_tx_thread_schedule:
-@
-@ /* Enable interrupts. */
-@
+
+ /* Enable interrupts. */
+
#ifdef TX_ENABLE_FIQ_SUPPORT
- CPSIE if @ Enable IRQ and FIQ interrupts
+ CPSIE if // Enable IRQ and FIQ interrupts
#else
- CPSIE i @ Enable IRQ interrupts
+ CPSIE i // Enable IRQ interrupts
#endif
-@
-@ /* Wait for a thread to execute. */
-@ do
-@ {
- LDR r1, =_tx_thread_execute_ptr @ Address of thread execute ptr
-@
+
+ /* Wait for a thread to execute. */
+ LDR r1, =_tx_thread_execute_ptr // Address of thread execute ptr
+
__tx_thread_schedule_loop:
-@
- LDR r0, [r1] @ Pickup next thread to execute
- CMP r0, #0 @ Is it NULL?
- BEQ __tx_thread_schedule_loop @ If so, keep looking for a thread
-@
-@ }
-@ while(_tx_thread_execute_ptr == TX_NULL);
-@
-@ /* Yes! We have a thread to execute. Lockout interrupts and
-@ transfer control to it. */
-@
+
+ LDR r0, [r1] // Pickup next thread to execute
+ CMP r0, #0 // Is it NULL?
+ BEQ __tx_thread_schedule_loop // If so, keep looking for a thread
+ /* Yes! We have a thread to execute. Lockout interrupts and
+ transfer control to it. */
+
#ifdef TX_ENABLE_FIQ_SUPPORT
- CPSID if @ Disable IRQ and FIQ interrupts
+ CPSID if // Disable IRQ and FIQ interrupts
#else
- CPSID i @ Disable IRQ interrupts
+ CPSID i // Disable IRQ interrupts
#endif
-@
-@ /* Setup the current thread pointer. */
-@ _tx_thread_current_ptr = _tx_thread_execute_ptr;
-@
- LDR r1, =_tx_thread_current_ptr @ Pickup address of current thread
- STR r0, [r1] @ Setup current thread pointer
-@
-@ /* Increment the run count for this thread. */
-@ _tx_thread_current_ptr -> tx_thread_run_count++;
-@
- LDR r2, [r0, #4] @ Pickup run counter
- LDR r3, [r0, #24] @ Pickup time-slice for this thread
- ADD r2, r2, #1 @ Increment thread run-counter
- STR r2, [r0, #4] @ Store the new run counter
-@
-@ /* Setup time-slice, if present. */
-@ _tx_timer_time_slice = _tx_thread_current_ptr -> tx_thread_time_slice;
-@
- LDR r2, =_tx_timer_time_slice @ Pickup address of time-slice
- @ variable
- LDR sp, [r0, #8] @ Switch stack pointers
- STR r3, [r2] @ Setup time-slice
-@
-@ /* Switch to the thread's stack. */
-@ sp = _tx_thread_execute_ptr -> tx_thread_stack_ptr;
-@
+
+ /* Setup the current thread pointer. */
+
+ LDR r1, =_tx_thread_current_ptr // Pickup address of current thread
+ STR r0, [r1] // Setup current thread pointer
+
+ /* Increment the run count for this thread. */
+
+ LDR r2, [r0, #4] // Pickup run counter
+ LDR r3, [r0, #24] // Pickup time-slice for this thread
+ ADD r2, r2, #1 // Increment thread run-counter
+ STR r2, [r0, #4] // Store the new run counter
+
+ /* Setup time-slice, if present. */
+
+ LDR r2, =_tx_timer_time_slice // Pickup address of time-slice
+ // variable
+ LDR sp, [r0, #8] // Switch stack pointers
+ STR r3, [r2] // Setup time-slice
+
#if (defined(TX_ENABLE_EXECUTION_CHANGE_NOTIFY) || defined(TX_EXECUTION_PROFILE_ENABLE))
-@
-@ /* Call the thread entry function to indicate the thread is executing. */
-@
- MOV r5, r0 @ Save r0
- BL _tx_execution_thread_enter @ Call the thread execution enter function
- MOV r0, r5 @ Restore r0
+
+ /* Call the thread entry function to indicate the thread is executing. */
+
+ MOV r5, r0 // Save r0
+ BL _tx_execution_thread_enter // Call the thread execution enter function
+ MOV r0, r5 // Restore r0
#endif
-@
-@ /* Determine if an interrupt frame or a synchronous task suspension frame
-@ is present. */
-@
- LDMIA sp!, {r4, r5} @ Pickup the stack type and saved CPSR
- CMP r4, #0 @ Check for synchronous context switch
+
+ /* Determine if an interrupt frame or a synchronous task suspension frame
+ is present. */
+
+ LDMIA sp!, {r4, r5} // Pickup the stack type and saved CPSR
+ CMP r4, #0 // Check for synchronous context switch
BEQ _tx_solicited_return
- MSR SPSR_cxsf, r5 @ Setup SPSR for return
+ MSR SPSR_cxsf, r5 // Setup SPSR for return
#ifdef TX_ENABLE_VFP_SUPPORT
- LDR r1, [r0, #144] @ Pickup the VFP enabled flag
- CMP r1, #0 @ Is the VFP enabled?
- BEQ _tx_skip_interrupt_vfp_restore @ No, skip VFP interrupt restore
- VLDMIA sp!, {D0-D15} @ Recover D0-D15
- VLDMIA sp!, {D16-D31} @ Recover D16-D31
- LDR r4, [sp], #4 @ Pickup FPSCR
- VMSR FPSCR, r4 @ Restore FPSCR
+ LDR r1, [r0, #144] // Pickup the VFP enabled flag
+ CMP r1, #0 // Is the VFP enabled?
+ BEQ _tx_skip_interrupt_vfp_restore // No, skip VFP interrupt restore
+ VLDMIA sp!, {D0-D15} // Recover D0-D15
+ VLDMIA sp!, {D16-D31} // Recover D16-D31
+ LDR r4, [sp], #4 // Pickup FPSCR
+ VMSR FPSCR, r4 // Restore FPSCR
_tx_skip_interrupt_vfp_restore:
#endif
- LDMIA sp!, {r0-r12, lr, pc}^ @ Return to point of thread interrupt
+ LDMIA sp!, {r0-r12, lr, pc}^ // Return to point of thread interrupt
_tx_solicited_return:
#ifdef TX_ENABLE_VFP_SUPPORT
- LDR r1, [r0, #144] @ Pickup the VFP enabled flag
- CMP r1, #0 @ Is the VFP enabled?
- BEQ _tx_skip_solicited_vfp_restore @ No, skip VFP solicited restore
- VLDMIA sp!, {D8-D15} @ Recover D8-D15
- VLDMIA sp!, {D16-D31} @ Recover D16-D31
- LDR r4, [sp], #4 @ Pickup FPSCR
- VMSR FPSCR, r4 @ Restore FPSCR
+ LDR r1, [r0, #144] // Pickup the VFP enabled flag
+ CMP r1, #0 // Is the VFP enabled?
+ BEQ _tx_skip_solicited_vfp_restore // No, skip VFP solicited restore
+ VLDMIA sp!, {D8-D15} // Recover D8-D15
+ VLDMIA sp!, {D16-D31} // Recover D16-D31
+ LDR r4, [sp], #4 // Pickup FPSCR
+ VMSR FPSCR, r4 // Restore FPSCR
_tx_skip_solicited_vfp_restore:
#endif
- MSR CPSR_cxsf, r5 @ Recover CPSR
- LDMIA sp!, {r4-r11, lr} @ Return to thread synchronously
+ MSR CPSR_cxsf, r5 // Recover CPSR
+ LDMIA sp!, {r4-r11, lr} // Return to thread synchronously
#ifdef __THUMB_INTERWORK
- BX lr @ Return to caller
+ BX lr // Return to caller
#else
- MOV pc, lr @ Return to caller
+ MOV pc, lr // Return to caller
#endif
-@
-@}
-@
#ifdef TX_ENABLE_VFP_SUPPORT
.global tx_thread_vfp_enable
.type tx_thread_vfp_enable,function
tx_thread_vfp_enable:
- MRS r2, CPSR @ Pickup the CPSR
+ MRS r2, CPSR // Pickup the CPSR
#ifdef TX_ENABLE_FIQ_SUPPORT
- CPSID if @ Enable IRQ and FIQ interrupts
+ CPSID if // Enable IRQ and FIQ interrupts
#else
- CPSID i @ Enable IRQ interrupts
+ CPSID i // Enable IRQ interrupts
#endif
- LDR r0, =_tx_thread_current_ptr @ Build current thread pointer address
- LDR r1, [r0] @ Pickup current thread pointer
- CMP r1, #0 @ Check for NULL thread pointer
- BEQ __tx_no_thread_to_enable @ If NULL, skip VFP enable
- MOV r0, #1 @ Build enable value
- STR r0, [r1, #144] @ Set the VFP enable flag (tx_thread_vfp_enable field in TX_THREAD)
+ LDR r0, =_tx_thread_current_ptr // Build current thread pointer address
+ LDR r1, [r0] // Pickup current thread pointer
+ CMP r1, #0 // Check for NULL thread pointer
+ BEQ __tx_no_thread_to_enable // If NULL, skip VFP enable
+ MOV r0, #1 // Build enable value
+ STR r0, [r1, #144] // Set the VFP enable flag (tx_thread_vfp_enable field in TX_THREAD)
__tx_no_thread_to_enable:
- MSR CPSR_cxsf, r2 @ Recover CPSR
- BX LR @ Return to caller
+ MSR CPSR_cxsf, r2 // Recover CPSR
+ BX LR // Return to caller
.global tx_thread_vfp_disable
.type tx_thread_vfp_disable,function
tx_thread_vfp_disable:
- MRS r2, CPSR @ Pickup the CPSR
+ MRS r2, CPSR // Pickup the CPSR
#ifdef TX_ENABLE_FIQ_SUPPORT
- CPSID if @ Enable IRQ and FIQ interrupts
+ CPSID if // Enable IRQ and FIQ interrupts
#else
- CPSID i @ Enable IRQ interrupts
+ CPSID i // Enable IRQ interrupts
#endif
- LDR r0, =_tx_thread_current_ptr @ Build current thread pointer address
- LDR r1, [r0] @ Pickup current thread pointer
- CMP r1, #0 @ Check for NULL thread pointer
- BEQ __tx_no_thread_to_disable @ If NULL, skip VFP disable
- MOV r0, #0 @ Build disable value
- STR r0, [r1, #144] @ Clear the VFP enable flag (tx_thread_vfp_enable field in TX_THREAD)
+ LDR r0, =_tx_thread_current_ptr // Build current thread pointer address
+ LDR r1, [r0] // Pickup current thread pointer
+ CMP r1, #0 // Check for NULL thread pointer
+ BEQ __tx_no_thread_to_disable // If NULL, skip VFP disable
+ MOV r0, #0 // Build disable value
+ STR r0, [r1, #144] // Clear the VFP enable flag (tx_thread_vfp_enable field in TX_THREAD)
__tx_no_thread_to_disable:
- MSR CPSR_cxsf, r2 @ Recover CPSR
- BX LR @ Return to caller
+ MSR CPSR_cxsf, r2 // Recover CPSR
+ BX LR // Return to caller
#endif
-
diff --git a/ports/cortex_a7/gnu/src/tx_thread_stack_build.S b/ports/cortex_a7/gnu/src/tx_thread_stack_build.S
index b4809307..f413e673 100644
--- a/ports/cortex_a7/gnu/src/tx_thread_stack_build.S
+++ b/ports/cortex_a7/gnu/src/tx_thread_stack_build.S
@@ -1,178 +1,164 @@
-@/**************************************************************************/
-@/* */
-@/* Copyright (c) Microsoft Corporation. All rights reserved. */
-@/* */
-@/* This software is licensed under the Microsoft Software License */
-@/* Terms for Microsoft Azure RTOS. Full text of the license can be */
-@/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
-@/* and in the root directory of this software. */
-@/* */
-@/**************************************************************************/
-@
-@
-@/**************************************************************************/
-@/**************************************************************************/
-@/** */
-@/** ThreadX Component */
-@/** */
-@/** Thread */
-@/** */
-@/**************************************************************************/
-@/**************************************************************************/
-@
-@
-@#define TX_SOURCE_CODE
-@
-@
-@/* Include necessary system files. */
-@
-@#include "tx_api.h"
-@#include "tx_thread.h"
-@
+/**************************************************************************/
+/* */
+/* Copyright (c) Microsoft Corporation. All rights reserved. */
+/* */
+/* This software is licensed under the Microsoft Software License */
+/* Terms for Microsoft Azure RTOS. Full text of the license can be */
+/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
+/* and in the root directory of this software. */
+/* */
+/**************************************************************************/
+
+
+/**************************************************************************/
+/**************************************************************************/
+/** */
+/** ThreadX Component */
+/** */
+/** Thread */
+/** */
+/**************************************************************************/
+/**************************************************************************/
.arm
-SVC_MODE = 0x13 @ SVC mode
+SVC_MODE = 0x13 // SVC mode
#ifdef TX_ENABLE_FIQ_SUPPORT
-CPSR_MASK = 0xDF @ Mask initial CPSR, IRQ & FIQ interrupts enabled
+CPSR_MASK = 0xDF // Mask initial CPSR, IRQ & FIQ interrupts enabled
#else
-CPSR_MASK = 0x9F @ Mask initial CPSR, IRQ interrupts enabled
+CPSR_MASK = 0x9F // Mask initial CPSR, IRQ interrupts enabled
#endif
-@
-@
-@/* Define the 16-bit Thumb mode veneer for _tx_thread_stack_build for
-@ applications calling this function from to 16-bit Thumb mode. */
-@
+
+
+/* Define the 16-bit Thumb mode veneer for _tx_thread_stack_build for
+ applications calling this function from to 16-bit Thumb mode. */
+
.text
.align 2
.thumb
.global $_tx_thread_stack_build
.type $_tx_thread_stack_build,function
$_tx_thread_stack_build:
- BX pc @ Switch to 32-bit mode
- NOP @
+ BX pc // Switch to 32-bit mode
+ NOP //
.arm
- STMFD sp!, {lr} @ Save return address
- BL _tx_thread_stack_build @ Call _tx_thread_stack_build function
- LDMFD sp!, {lr} @ Recover saved return address
- BX lr @ Return to 16-bit caller
-@
-@
+ STMFD sp!, {lr} // Save return address
+ BL _tx_thread_stack_build // Call _tx_thread_stack_build function
+ LDMFD sp!, {lr} // Recover saved return address
+ BX lr // Return to 16-bit caller
+
+
.text
.align 2
-@/**************************************************************************/
-@/* */
-@/* FUNCTION RELEASE */
-@/* */
-@/* _tx_thread_stack_build Cortex-A7/GNU */
-@/* 6.1 */
-@/* AUTHOR */
-@/* */
-@/* William E. Lamie, Microsoft Corporation */
-@/* */
-@/* DESCRIPTION */
-@/* */
-@/* This function builds a stack frame on the supplied thread's stack. */
-@/* The stack frame results in a fake interrupt return to the supplied */
-@/* function pointer. */
-@/* */
-@/* INPUT */
-@/* */
-@/* thread_ptr Pointer to thread control blk */
-@/* function_ptr Pointer to return function */
-@/* */
-@/* OUTPUT */
-@/* */
-@/* None */
-@/* */
-@/* CALLS */
-@/* */
-@/* None */
-@/* */
-@/* CALLED BY */
-@/* */
-@/* _tx_thread_create Create thread service */
-@/* */
-@/* RELEASE HISTORY */
-@/* */
-@/* DATE NAME DESCRIPTION */
-@/* */
-@/* 09-30-2020 William E. Lamie Initial Version 6.1 */
-@/* */
-@/**************************************************************************/
-@VOID _tx_thread_stack_build(TX_THREAD *thread_ptr, VOID (*function_ptr)(VOID))
-@{
+/**************************************************************************/
+/* */
+/* FUNCTION RELEASE */
+/* */
+/* _tx_thread_stack_build ARMv7-A */
+/* 6.1.11 */
+/* AUTHOR */
+/* */
+/* William E. Lamie, Microsoft Corporation */
+/* */
+/* DESCRIPTION */
+/* */
+/* This function builds a stack frame on the supplied thread's stack. */
+/* The stack frame results in a fake interrupt return to the supplied */
+/* function pointer. */
+/* */
+/* INPUT */
+/* */
+/* thread_ptr Pointer to thread control blk */
+/* function_ptr Pointer to return function */
+/* */
+/* OUTPUT */
+/* */
+/* None */
+/* */
+/* CALLS */
+/* */
+/* None */
+/* */
+/* CALLED BY */
+/* */
+/* _tx_thread_create Create thread service */
+/* */
+/* RELEASE HISTORY */
+/* */
+/* DATE NAME DESCRIPTION */
+/* */
+/* 09-30-2020 William E. Lamie Initial Version 6.1 */
+/* 04-25-2022 Zhen Kong Updated comments, */
+/* resulting in version 6.1.11 */
+/* */
+/**************************************************************************/
.global _tx_thread_stack_build
.type _tx_thread_stack_build,function
_tx_thread_stack_build:
-@
-@
-@ /* Build a fake interrupt frame. The form of the fake interrupt stack
-@ on the Cortex-A7 should look like the following after it is built:
-@
-@ Stack Top: 1 Interrupt stack frame type
-@ CPSR Initial value for CPSR
-@ a1 (r0) Initial value for a1
-@ a2 (r1) Initial value for a2
-@ a3 (r2) Initial value for a3
-@ a4 (r3) Initial value for a4
-@ v1 (r4) Initial value for v1
-@ v2 (r5) Initial value for v2
-@ v3 (r6) Initial value for v3
-@ v4 (r7) Initial value for v4
-@ v5 (r8) Initial value for v5
-@ sb (r9) Initial value for sb
-@ sl (r10) Initial value for sl
-@ fp (r11) Initial value for fp
-@ ip (r12) Initial value for ip
-@ lr (r14) Initial value for lr
-@ pc (r15) Initial value for pc
-@ 0 For stack backtracing
-@
-@ Stack Bottom: (higher memory address) */
-@
- LDR r2, [r0, #16] @ Pickup end of stack area
- BIC r2, r2, #7 @ Ensure 8-byte alignment
- SUB r2, r2, #76 @ Allocate space for the stack frame
-@
-@ /* Actually build the stack frame. */
-@
- MOV r3, #1 @ Build interrupt stack type
- STR r3, [r2, #0] @ Store stack type
- MOV r3, #0 @ Build initial register value
- STR r3, [r2, #8] @ Store initial r0
- STR r3, [r2, #12] @ Store initial r1
- STR r3, [r2, #16] @ Store initial r2
- STR r3, [r2, #20] @ Store initial r3
- STR r3, [r2, #24] @ Store initial r4
- STR r3, [r2, #28] @ Store initial r5
- STR r3, [r2, #32] @ Store initial r6
- STR r3, [r2, #36] @ Store initial r7
- STR r3, [r2, #40] @ Store initial r8
- STR r3, [r2, #44] @ Store initial r9
- LDR r3, [r0, #12] @ Pickup stack starting address
- STR r3, [r2, #48] @ Store initial r10 (sl)
- LDR r3,=_tx_thread_schedule @ Pickup address of _tx_thread_schedule for GDB backtrace
- STR r3, [r2, #60] @ Store initial r14 (lr)
- MOV r3, #0 @ Build initial register value
- STR r3, [r2, #52] @ Store initial r11
- STR r3, [r2, #56] @ Store initial r12
- STR r1, [r2, #64] @ Store initial pc
- STR r3, [r2, #68] @ 0 for back-trace
- MRS r1, CPSR @ Pickup CPSR
- BIC r1, r1, #CPSR_MASK @ Mask mode bits of CPSR
- ORR r3, r1, #SVC_MODE @ Build CPSR, SVC mode, interrupts enabled
- STR r3, [r2, #4] @ Store initial CPSR
-@
-@ /* Setup stack pointer. */
-@ thread_ptr -> tx_thread_stack_ptr = r2;
-@
- STR r2, [r0, #8] @ Save stack pointer in thread's
- @ control block
+
+
+ /* Build a fake interrupt frame. The form of the fake interrupt stack
+ on the ARMv7-A should look like the following after it is built:
+
+ Stack Top: 1 Interrupt stack frame type
+ CPSR Initial value for CPSR
+ a1 (r0) Initial value for a1
+ a2 (r1) Initial value for a2
+ a3 (r2) Initial value for a3
+ a4 (r3) Initial value for a4
+ v1 (r4) Initial value for v1
+ v2 (r5) Initial value for v2
+ v3 (r6) Initial value for v3
+ v4 (r7) Initial value for v4
+ v5 (r8) Initial value for v5
+ sb (r9) Initial value for sb
+ sl (r10) Initial value for sl
+ fp (r11) Initial value for fp
+ ip (r12) Initial value for ip
+ lr (r14) Initial value for lr
+ pc (r15) Initial value for
+ 0 For stack backtracing
+
+ Stack Bottom: (higher memory address) */
+
+ LDR r2, [r0, #16] // Pickup end of stack area
+ BIC r2, r2, #7 // Ensure 8-byte alignment
+ SUB r2, r2, #76 // Allocate space for the stack frame
+
+ /* Actually build the stack frame. */
+
+ MOV r3, #1 // Build interrupt stack type
+ STR r3, [r2, #0] // Store stack type
+ MOV r3, #0 // Build initial register value
+ STR r3, [r2, #8] // Store initial r0
+ STR r3, [r2, #12] // Store initial r1
+ STR r3, [r2, #16] // Store initial r2
+ STR r3, [r2, #20] // Store initial r3
+ STR r3, [r2, #24] // Store initial r4
+ STR r3, [r2, #28] // Store initial r5
+ STR r3, [r2, #32] // Store initial r6
+ STR r3, [r2, #36] // Store initial r7
+ STR r3, [r2, #40] // Store initial r8
+ STR r3, [r2, #44] // Store initial r9
+ LDR r3, [r0, #12] // Pickup stack starting address
+ STR r3, [r2, #48] // Store initial r10 (sl)
+ LDR r3,=_tx_thread_schedule // Pickup address of _tx_thread_schedule for GDB backtrace
+ STR r3, [r2, #60] // Store initial r14 (lr)
+ MOV r3, #0 // Build initial register value
+ STR r3, [r2, #52] // Store initial r11
+ STR r3, [r2, #56] // Store initial r12
+ STR r1, [r2, #64] // Store initial pc
+ STR r3, [r2, #68] // 0 for back-trace
+ MRS r1, CPSR // Pickup CPSR
+ BIC r1, r1, #CPSR_MASK // Mask mode bits of CPSR
+ ORR r3, r1, #SVC_MODE // Build CPSR, SVC mode, interrupts enabled
+ STR r3, [r2, #4] // Store initial CPSR
+
+ /* Setup stack pointer. */
+
+ STR r2, [r0, #8] // Save stack pointer in thread's
+ // control block
#ifdef __THUMB_INTERWORK
- BX lr @ Return to caller
+ BX lr // Return to caller
#else
- MOV pc, lr @ Return to caller
+ MOV pc, lr // Return to caller
#endif
-@}
-
-
diff --git a/ports/cortex_a7/gnu/src/tx_thread_system_return.S b/ports/cortex_a7/gnu/src/tx_thread_system_return.S
index 68ec35fa..cb7d62ce 100644
--- a/ports/cortex_a7/gnu/src/tx_thread_system_return.S
+++ b/ports/cortex_a7/gnu/src/tx_thread_system_return.S
@@ -1,183 +1,162 @@
-@/**************************************************************************/
-@/* */
-@/* Copyright (c) Microsoft Corporation. All rights reserved. */
-@/* */
-@/* This software is licensed under the Microsoft Software License */
-@/* Terms for Microsoft Azure RTOS. Full text of the license can be */
-@/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
-@/* and in the root directory of this software. */
-@/* */
-@/**************************************************************************/
-@
-@
-@/**************************************************************************/
-@/**************************************************************************/
-@/** */
-@/** ThreadX Component */
-@/** */
-@/** Thread */
-@/** */
-@/**************************************************************************/
-@/**************************************************************************/
-@
-@#define TX_SOURCE_CODE
-@
-@
-@/* Include necessary system files. */
-@
-@#include "tx_api.h"
-@#include "tx_thread.h"
-@#include "tx_timer.h"
-@
+/**************************************************************************/
+/* */
+/* Copyright (c) Microsoft Corporation. All rights reserved. */
+/* */
+/* This software is licensed under the Microsoft Software License */
+/* Terms for Microsoft Azure RTOS. Full text of the license can be */
+/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
+/* and in the root directory of this software. */
+/* */
+/**************************************************************************/
+
+
+/**************************************************************************/
+/**************************************************************************/
+/** */
+/** ThreadX Component */
+/** */
+/** Thread */
+/** */
+/**************************************************************************/
+/**************************************************************************/
+
.arm
-@
-@
+
+
.global _tx_thread_current_ptr
.global _tx_timer_time_slice
.global _tx_thread_schedule
- .global _tx_execution_thread_exit
-@
-@
-@
-@/* Define the 16-bit Thumb mode veneer for _tx_thread_system_return for
-@ applications calling this function from to 16-bit Thumb mode. */
-@
+
+
+
+/* Define the 16-bit Thumb mode veneer for _tx_thread_system_return for
+ applications calling this function from to 16-bit Thumb mode. */
+
.text
.align 2
.global $_tx_thread_system_return
.type $_tx_thread_system_return,function
$_tx_thread_system_return:
.thumb
- BX pc @ Switch to 32-bit mode
- NOP @
+ BX pc // Switch to 32-bit mode
+ NOP //
.arm
- STMFD sp!, {lr} @ Save return address
- BL _tx_thread_system_return @ Call _tx_thread_system_return function
- LDMFD sp!, {lr} @ Recover saved return address
- BX lr @ Return to 16-bit caller
-@
-@
+ STMFD sp!, {lr} // Save return address
+ BL _tx_thread_system_return // Call _tx_thread_system_return function
+ LDMFD sp!, {lr} // Recover saved return address
+ BX lr // Return to 16-bit caller
+
+
.text
.align 2
-@/**************************************************************************/
-@/* */
-@/* FUNCTION RELEASE */
-@/* */
-@/* _tx_thread_system_return Cortex-A7/GNU */
-@/* 6.1.9 */
-@/* AUTHOR */
-@/* */
-@/* William E. Lamie, Microsoft Corporation */
-@/* */
-@/* DESCRIPTION */
-@/* */
-@/* This function is target processor specific. It is used to transfer */
-@/* control from a thread back to the ThreadX system. Only a */
-@/* minimal context is saved since the compiler assumes temp registers */
-@/* are going to get slicked by a function call anyway. */
-@/* */
-@/* INPUT */
-@/* */
-@/* None */
-@/* */
-@/* OUTPUT */
-@/* */
-@/* None */
-@/* */
-@/* CALLS */
-@/* */
-@/* _tx_thread_schedule Thread scheduling loop */
-@/* */
-@/* CALLED BY */
-@/* */
-@/* ThreadX components */
-@/* */
-@/* RELEASE HISTORY */
-@/* */
-@/* DATE NAME DESCRIPTION */
-@/* */
-@/* 09-30-2020 William E. Lamie Initial Version 6.1 */
-@/* 10-15-2021 William E. Lamie Modified comment(s), added */
-@/* execution profile support, */
-@/* resulting in version 6.1.9 */
-@/* */
-@/**************************************************************************/
-@VOID _tx_thread_system_return(VOID)
-@{
+/**************************************************************************/
+/* */
+/* FUNCTION RELEASE */
+/* */
+/* _tx_thread_system_return ARMv7-A */
+/* 6.1.11 */
+/* AUTHOR */
+/* */
+/* William E. Lamie, Microsoft Corporation */
+/* */
+/* DESCRIPTION */
+/* */
+/* This function is target processor specific. It is used to transfer */
+/* control from a thread back to the ThreadX system. Only a */
+/* minimal context is saved since the compiler assumes temp registers */
+/* are going to get slicked by a function call anyway. */
+/* */
+/* INPUT */
+/* */
+/* None */
+/* */
+/* OUTPUT */
+/* */
+/* None */
+/* */
+/* CALLS */
+/* */
+/* _tx_thread_schedule Thread scheduling loop */
+/* */
+/* CALLED BY */
+/* */
+/* ThreadX components */
+/* */
+/* RELEASE HISTORY */
+/* */
+/* DATE NAME DESCRIPTION */
+/* */
+/* 09-30-2020 William E. Lamie Initial Version 6.1 */
+/* 10-15-2021 William E. Lamie Modified comment(s), added */
+/* execution profile support, */
+/* resulting in version 6.1.9 */
+/* 04-25-2022 Zhen Kong Updated comments, */
+/* resulting in version 6.1.11 */
+/* */
+/**************************************************************************/
.global _tx_thread_system_return
.type _tx_thread_system_return,function
_tx_thread_system_return:
-@
-@ /* Save minimal context on the stack. */
-@
- STMDB sp!, {r4-r11, lr} @ Save minimal context
- LDR r4, =_tx_thread_current_ptr @ Pickup address of current ptr
- LDR r5, [r4] @ Pickup current thread pointer
-
+ /* Save minimal context on the stack. */
+
+ STMDB sp!, {r4-r11, lr} // Save minimal context
+
+ LDR r4, =_tx_thread_current_ptr // Pickup address of current ptr
+ LDR r5, [r4] // Pickup current thread pointer
+
#ifdef TX_ENABLE_VFP_SUPPORT
- LDR r1, [r5, #144] @ Pickup the VFP enabled flag
- CMP r1, #0 @ Is the VFP enabled?
- BEQ _tx_skip_solicited_vfp_save @ No, skip VFP solicited save
- VMRS r1, FPSCR @ Pickup the FPSCR
- STR r1, [sp, #-4]! @ Save FPSCR
- VSTMDB sp!, {D16-D31} @ Save D16-D31
- VSTMDB sp!, {D8-D15} @ Save D8-D15
+ LDR r1, [r5, #144] // Pickup the VFP enabled flag
+ CMP r1, #0 // Is the VFP enabled?
+ BEQ _tx_skip_solicited_vfp_save // No, skip VFP solicited save
+ VMRS r1, FPSCR // Pickup the FPSCR
+ STR r1, [sp, #-4]! // Save FPSCR
+ VSTMDB sp!, {D16-D31} // Save D16-D31
+ VSTMDB sp!, {D8-D15} // Save D8-D15
_tx_skip_solicited_vfp_save:
#endif
- MOV r0, #0 @ Build a solicited stack type
- MRS r1, CPSR @ Pickup the CPSR
- STMDB sp!, {r0-r1} @ Save type and CPSR
-@
-@ /* Lockout interrupts. */
-@
-#ifdef TX_ENABLE_FIQ_SUPPORT
- CPSID if @ Disable IRQ and FIQ interrupts
-#else
- CPSID i @ Disable IRQ interrupts
-#endif
-
-#if (defined(TX_ENABLE_EXECUTION_CHANGE_NOTIFY) || defined(TX_EXECUTION_PROFILE_ENABLE))
-@
-@ /* Call the thread exit function to indicate the thread is no longer executing. */
-@
- BL _tx_execution_thread_exit @ Call the thread exit function
-#endif
- MOV r3, r4 @ Pickup address of current ptr
- MOV r0, r5 @ Pickup current thread pointer
- LDR r2, =_tx_timer_time_slice @ Pickup address of time slice
- LDR r1, [r2] @ Pickup current time slice
-@
-@ /* Save current stack and switch to system stack. */
-@ _tx_thread_current_ptr -> tx_thread_stack_ptr = sp;
-@ sp = _tx_thread_system_stack_ptr;
-@
- STR sp, [r0, #8] @ Save thread stack pointer
-@
-@ /* Determine if the time-slice is active. */
-@ if (_tx_timer_time_slice)
-@ {
-@
- MOV r4, #0 @ Build clear value
- CMP r1, #0 @ Is a time-slice active?
- BEQ __tx_thread_dont_save_ts @ No, don't save the time-slice
-@
-@ /* Save time-slice for the thread and clear the current time-slice. */
-@ _tx_thread_current_ptr -> tx_thread_time_slice = _tx_timer_time_slice;
-@ _tx_timer_time_slice = 0;
-@
- STR r4, [r2] @ Clear time-slice
- STR r1, [r0, #24] @ Save current time-slice
-@
-@ }
-__tx_thread_dont_save_ts:
-@
-@ /* Clear the current thread pointer. */
-@ _tx_thread_current_ptr = TX_NULL;
-@
- STR r4, [r3] @ Clear current thread pointer
- B _tx_thread_schedule @ Jump to scheduler!
-@
-@}
+ MOV r0, #0 // Build a solicited stack type
+ MRS r1, CPSR // Pickup the CPSR
+ STMDB sp!, {r0-r1} // Save type and CPSR
+ /* Lockout interrupts. */
+
+#ifdef TX_ENABLE_FIQ_SUPPORT
+ CPSID if // Disable IRQ and FIQ interrupts
+#else
+ CPSID i // Disable IRQ interrupts
+#endif
+
+#if (defined(TX_ENABLE_EXECUTION_CHANGE_NOTIFY) || defined(TX_EXECUTION_PROFILE_ENABLE))
+
+ /* Call the thread exit function to indicate the thread is no longer executing. */
+
+ BL _tx_execution_thread_exit // Call the thread exit function
+#endif
+ MOV r3, r4 // Pickup address of current ptr
+ MOV r0, r5 // Pickup current thread pointer
+ LDR r2, =_tx_timer_time_slice // Pickup address of time slice
+ LDR r1, [r2] // Pickup current time slice
+
+ /* Save current stack and switch to system stack. */
+
+ STR sp, [r0, #8] // Save thread stack pointer
+
+ /* Determine if the time-slice is active. */
+
+ MOV r4, #0 // Build clear value
+ CMP r1, #0 // Is a time-slice active?
+ BEQ __tx_thread_dont_save_ts // No, don't save the time-slice
+
+ /* Save time-slice for the thread and clear the current time-slice. */
+
+ STR r4, [r2] // Clear time-slice
+ STR r1, [r0, #24] // Save current time-slice
+
+__tx_thread_dont_save_ts:
+
+ /* Clear the current thread pointer. */
+
+ STR r4, [r3] // Clear current thread pointer
+ B _tx_thread_schedule // Jump to scheduler!
diff --git a/ports/cortex_a7/gnu/src/tx_thread_vectored_context_save.S b/ports/cortex_a7/gnu/src/tx_thread_vectored_context_save.S
index 7b39a4c4..d846223f 100644
--- a/ports/cortex_a7/gnu/src/tx_thread_vectored_context_save.S
+++ b/ports/cortex_a7/gnu/src/tx_thread_vectored_context_save.S
@@ -1,193 +1,165 @@
-@/**************************************************************************/
-@/* */
-@/* Copyright (c) Microsoft Corporation. All rights reserved. */
-@/* */
-@/* This software is licensed under the Microsoft Software License */
-@/* Terms for Microsoft Azure RTOS. Full text of the license can be */
-@/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
-@/* and in the root directory of this software. */
-@/* */
-@/**************************************************************************/
-@
-@
-@/**************************************************************************/
-@/**************************************************************************/
-@/** */
-@/** ThreadX Component */
-@/** */
-@/** Thread */
-@/** */
-@/**************************************************************************/
-@/**************************************************************************/
-@
-@
-@#define TX_SOURCE_CODE
-@
-@
-@/* Include necessary system files. */
-@
-@#include "tx_api.h"
-@#include "tx_thread.h"
-@
-@
+/**************************************************************************/
+/* */
+/* Copyright (c) Microsoft Corporation. All rights reserved. */
+/* */
+/* This software is licensed under the Microsoft Software License */
+/* Terms for Microsoft Azure RTOS. Full text of the license can be */
+/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
+/* and in the root directory of this software. */
+/* */
+/**************************************************************************/
+
+
+/**************************************************************************/
+/**************************************************************************/
+/** */
+/** ThreadX Component */
+/** */
+/** Thread */
+/** */
+/**************************************************************************/
+/**************************************************************************/
+
.global _tx_thread_system_state
.global _tx_thread_current_ptr
.global _tx_execution_isr_enter
-@
-@
-@
-@/* No 16-bit Thumb mode veneer code is needed for _tx_thread_vectored_context_save
-@ since it will never be called 16-bit mode. */
-@
+
+
+
+/* No 16-bit Thumb mode veneer code is needed for _tx_thread_vectored_context_save
+ since it will never be called 16-bit mode. */
+
.arm
.text
.align 2
-@/**************************************************************************/
-@/* */
-@/* FUNCTION RELEASE */
-@/* */
-@/* _tx_thread_vectored_context_save Cortex-A7/GNU */
-@/* 6.1.9 */
-@/* AUTHOR */
-@/* */
-@/* William E. Lamie, Microsoft Corporation */
-@/* */
-@/* DESCRIPTION */
-@/* */
-@/* This function saves the context of an executing thread in the */
-@/* beginning of interrupt processing. The function also ensures that */
-@/* the system stack is used upon return to the calling ISR. */
-@/* */
-@/* INPUT */
-@/* */
-@/* None */
-@/* */
-@/* OUTPUT */
-@/* */
-@/* None */
-@/* */
-@/* CALLS */
-@/* */
-@/* None */
-@/* */
-@/* CALLED BY */
-@/* */
-@/* ISRs */
-@/* */
-@/* RELEASE HISTORY */
-@/* */
-@/* DATE NAME DESCRIPTION */
-@/* */
-@/* 09-30-2020 William E. Lamie Initial Version 6.1 */
-@/* 10-15-2021 William E. Lamie Modified comment(s), added */
-@/* execution profile support, */
-@/* resulting in version 6.1.9 */
-@/* */
-@/**************************************************************************/
-@VOID _tx_thread_vectored_context_save(VOID)
-@{
+/**************************************************************************/
+/* */
+/* FUNCTION RELEASE */
+/* */
+/* _tx_thread_vectored_context_save ARMv7-A */
+/* 6.1.11 */
+/* AUTHOR */
+/* */
+/* William E. Lamie, Microsoft Corporation */
+/* */
+/* DESCRIPTION */
+/* */
+/* This function saves the context of an executing thread in the */
+/* beginning of interrupt processing. The function also ensures that */
+/* the system stack is used upon return to the calling ISR. */
+/* */
+/* INPUT */
+/* */
+/* None */
+/* */
+/* OUTPUT */
+/* */
+/* None */
+/* */
+/* CALLS */
+/* */
+/* None */
+/* */
+/* CALLED BY */
+/* */
+/* ISRs */
+/* */
+/* RELEASE HISTORY */
+/* */
+/* DATE NAME DESCRIPTION */
+/* */
+/* 09-30-2020 William E. Lamie Initial Version 6.1 */
+/* 10-15-2021 William E. Lamie Modified comment(s), added */
+/* execution profile support, */
+/* resulting in version 6.1.9 */
+/* 04-25-2022 Zhen Kong Updated comments, */
+/* resulting in version 6.1.11 */
+/* */
+/**************************************************************************/
.global _tx_thread_vectored_context_save
.type _tx_thread_vectored_context_save,function
_tx_thread_vectored_context_save:
-@
-@ /* Upon entry to this routine, it is assumed that IRQ interrupts are locked
-@ out, we are in IRQ mode, and all registers are intact. */
-@
-@ /* Check for a nested interrupt condition. */
-@ if (_tx_thread_system_state++)
-@ {
-@
+
+ /* Upon entry to this routine, it is assumed that IRQ interrupts are locked
+ out, we are in IRQ mode, and all registers are intact. */
+
+ /* Check for a nested interrupt condition. */
+
#ifdef TX_ENABLE_FIQ_SUPPORT
- CPSID if @ Disable IRQ and FIQ interrupts
+ CPSID if // Disable IRQ and FIQ interrupts
#endif
- LDR r3, =_tx_thread_system_state @ Pickup address of system state variable
- LDR r2, [r3, #0] @ Pickup system state
- CMP r2, #0 @ Is this the first interrupt?
- BEQ __tx_thread_not_nested_save @ Yes, not a nested context save
-@
-@ /* Nested interrupt condition. */
-@
- ADD r2, r2, #1 @ Increment the interrupt counter
- STR r2, [r3, #0] @ Store it back in the variable
-@
-@ /* Note: Minimal context of interrupted thread is already saved. */
-@
-@ /* Return to the ISR. */
-@
- MOV r10, #0 @ Clear stack limit
+ LDR r3, =_tx_thread_system_state // Pickup address of system state variable
+ LDR r2, [r3, #0] // Pickup system state
+ CMP r2, #0 // Is this the first interrupt?
+ BEQ __tx_thread_not_nested_save // Yes, not a nested context save
+
+ /* Nested interrupt condition. */
+
+ ADD r2, r2, #1 // Increment the interrupt counter
+ STR r2, [r3, #0] // Store it back in the variable
+
+ /* Note: Minimal context of interrupted thread is already saved. */
+
+ /* Return to the ISR. */
+
+ MOV r10, #0 // Clear stack limit
#if (defined(TX_ENABLE_EXECUTION_CHANGE_NOTIFY) || defined(TX_EXECUTION_PROFILE_ENABLE))
-@
-@ /* Call the ISR enter function to indicate an ISR is executing. */
-@
- PUSH {lr} @ Save ISR lr
- BL _tx_execution_isr_enter @ Call the ISR enter function
- POP {lr} @ Recover ISR lr
+
+ /* Call the ISR enter function to indicate an ISR is executing. */
+
+ PUSH {lr} // Save ISR lr
+ BL _tx_execution_isr_enter // Call the ISR enter function
+ POP {lr} // Recover ISR lr
#endif
- MOV pc, lr @ Return to caller
-@
+ MOV pc, lr // Return to caller
+
__tx_thread_not_nested_save:
-@ }
-@
-@ /* Otherwise, not nested, check to see if a thread was running. */
-@ else if (_tx_thread_current_ptr)
-@ {
-@
- ADD r2, r2, #1 @ Increment the interrupt counter
- STR r2, [r3, #0] @ Store it back in the variable
- LDR r1, =_tx_thread_current_ptr @ Pickup address of current thread ptr
- LDR r0, [r1, #0] @ Pickup current thread pointer
- CMP r0, #0 @ Is it NULL?
- BEQ __tx_thread_idle_system_save @ If so, interrupt occurred in
- @ scheduling loop - nothing needs saving!
-@
-@ /* Note: Minimal context of interrupted thread is already saved. */
-@
-@ /* Save the current stack pointer in the thread's control block. */
-@ _tx_thread_current_ptr -> tx_thread_stack_ptr = sp;
-@
-@ /* Switch to the system stack. */
-@ sp = _tx_thread_system_stack_ptr;
-@
- MOV r10, #0 @ Clear stack limit
+
+ /* Otherwise, not nested, check to see if a thread was running. */
+
+ ADD r2, r2, #1 // Increment the interrupt counter
+ STR r2, [r3, #0] // Store it back in the variable
+ LDR r1, =_tx_thread_current_ptr // Pickup address of current thread ptr
+ LDR r0, [r1, #0] // Pickup current thread pointer
+ CMP r0, #0 // Is it NULL?
+ BEQ __tx_thread_idle_system_save // If so, interrupt occurred in
+ // scheduling loop - nothing needs saving!
+ /* Note: Minimal context of interrupted thread is already saved. */
+
+ /* Save the current stack pointer in the thread's control block. */
+
+ MOV r10, #0 // Clear stack limit
#if (defined(TX_ENABLE_EXECUTION_CHANGE_NOTIFY) || defined(TX_EXECUTION_PROFILE_ENABLE))
-@
-@ /* Call the ISR enter function to indicate an ISR is executing. */
-@
- PUSH {lr} @ Save ISR lr
- BL _tx_execution_isr_enter @ Call the ISR enter function
- POP {lr} @ Recover ISR lr
+
+ /* Call the ISR enter function to indicate an ISR is executing. */
+
+ PUSH {lr} // Save ISR lr
+ BL _tx_execution_isr_enter // Call the ISR enter function
+ POP {lr} // Recover ISR lr
#endif
- MOV pc, lr @ Return to caller
-@
-@ }
-@ else
-@ {
-@
+ MOV pc, lr // Return to caller
+
__tx_thread_idle_system_save:
-@
-@ /* Interrupt occurred in the scheduling loop. */
-@
-@ /* Not much to do here, just adjust the stack pointer, and return to IRQ
-@ processing. */
-@
- MOV r10, #0 @ Clear stack limit
+
+ /* Interrupt occurred in the scheduling loop. */
+
+ /* Not much to do here, just adjust the stack pointer, and return to IRQ
+ processing. */
+
+ MOV r10, #0 // Clear stack limit
#if (defined(TX_ENABLE_EXECUTION_CHANGE_NOTIFY) || defined(TX_EXECUTION_PROFILE_ENABLE))
-@
-@ /* Call the ISR enter function to indicate an ISR is executing. */
-@
- PUSH {lr} @ Save ISR lr
- BL _tx_execution_isr_enter @ Call the ISR enter function
- POP {lr} @ Recover ISR lr
+
+ /* Call the ISR enter function to indicate an ISR is executing. */
+
+ PUSH {lr} // Save ISR lr
+ BL _tx_execution_isr_enter // Call the ISR enter function
+ POP {lr} // Recover ISR lr
#endif
- ADD sp, sp, #32 @ Recover saved registers
- MOV pc, lr @ Return to caller
-@
-@ }
-@}
-
+ ADD sp, sp, #32 // Recover saved registers
+ MOV pc, lr // Return to caller
diff --git a/ports/cortex_a7/gnu/src/tx_timer_interrupt.S b/ports/cortex_a7/gnu/src/tx_timer_interrupt.S
index 1b8f37df..7337ed0c 100644
--- a/ports/cortex_a7/gnu/src/tx_timer_interrupt.S
+++ b/ports/cortex_a7/gnu/src/tx_timer_interrupt.S
@@ -1,40 +1,30 @@
-@/**************************************************************************/
-@/* */
-@/* Copyright (c) Microsoft Corporation. All rights reserved. */
-@/* */
-@/* This software is licensed under the Microsoft Software License */
-@/* Terms for Microsoft Azure RTOS. Full text of the license can be */
-@/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
-@/* and in the root directory of this software. */
-@/* */
-@/**************************************************************************/
-@
-@
-@/**************************************************************************/
-@/**************************************************************************/
-@/** */
-@/** ThreadX Component */
-@/** */
-@/** Timer */
-@/** */
-@/**************************************************************************/
-@/**************************************************************************/
-@
-@#define TX_SOURCE_CODE
-@
-@
-@/* Include necessary system files. */
-@
-@#include "tx_api.h"
-@#include "tx_timer.h"
-@#include "tx_thread.h"
-@
-@
+/**************************************************************************/
+/* */
+/* Copyright (c) Microsoft Corporation. All rights reserved. */
+/* */
+/* This software is licensed under the Microsoft Software License */
+/* Terms for Microsoft Azure RTOS. Full text of the license can be */
+/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
+/* and in the root directory of this software. */
+/* */
+/**************************************************************************/
+
+
+/**************************************************************************/
+/**************************************************************************/
+/** */
+/** ThreadX Component */
+/** */
+/** Timer */
+/** */
+/**************************************************************************/
+/**************************************************************************/
+
.arm
-@
-@/* Define Assembly language external references... */
-@
+
+/* Define Assembly language external references... */
+
.global _tx_timer_time_slice
.global _tx_timer_system_clock
.global _tx_timer_current_ptr
@@ -43,237 +33,199 @@
.global _tx_timer_expired_time_slice
.global _tx_timer_expired
.global _tx_thread_time_slice
-@
-@
-@
-@/* Define the 16-bit Thumb mode veneer for _tx_timer_interrupt for
-@ applications calling this function from to 16-bit Thumb mode. */
-@
+
+
+
+/* Define the 16-bit Thumb mode veneer for _tx_timer_interrupt for
+ applications calling this function from to 16-bit Thumb mode. */
+
.text
.align 2
.thumb
.global $_tx_timer_interrupt
.type $_tx_timer_interrupt,function
$_tx_timer_interrupt:
- BX pc @ Switch to 32-bit mode
- NOP @
+ BX pc // Switch to 32-bit mode
+ NOP //
.arm
- STMFD sp!, {lr} @ Save return address
- BL _tx_timer_interrupt @ Call _tx_timer_interrupt function
- LDMFD sp!, {lr} @ Recover saved return address
- BX lr @ Return to 16-bit caller
-@
-@
+ STMFD sp!, {lr} // Save return address
+ BL _tx_timer_interrupt // Call _tx_timer_interrupt function
+ LDMFD sp!, {lr} // Recover saved return address
+ BX lr // Return to 16-bit caller
+
+
.text
.align 2
-@/**************************************************************************/
-@/* */
-@/* FUNCTION RELEASE */
-@/* */
-@/* _tx_timer_interrupt Cortex-A7/GNU */
-@/* 6.1 */
-@/* AUTHOR */
-@/* */
-@/* William E. Lamie, Microsoft Corporation */
-@/* */
-@/* DESCRIPTION */
-@/* */
-@/* This function processes the hardware timer interrupt. This */
-@/* processing includes incrementing the system clock and checking for */
-@/* time slice and/or timer expiration. If either is found, the */
-@/* interrupt context save/restore functions are called along with the */
-@/* expiration functions. */
-@/* */
-@/* INPUT */
-@/* */
-@/* None */
-@/* */
-@/* OUTPUT */
-@/* */
-@/* None */
-@/* */
-@/* CALLS */
-@/* */
-@/* _tx_thread_time_slice Time slice interrupted thread */
-@/* _tx_timer_expiration_process Timer expiration processing */
-@/* */
-@/* CALLED BY */
-@/* */
-@/* interrupt vector */
-@/* */
-@/* RELEASE HISTORY */
-@/* */
-@/* DATE NAME DESCRIPTION */
-@/* */
-@/* 09-30-2020 William E. Lamie Initial Version 6.1 */
-@/* */
-@/**************************************************************************/
-@VOID _tx_timer_interrupt(VOID)
-@{
+/**************************************************************************/
+/* */
+/* FUNCTION RELEASE */
+/* */
+/* _tx_timer_interrupt ARMv7-A */
+/* 6.1.11 */
+/* AUTHOR */
+/* */
+/* William E. Lamie, Microsoft Corporation */
+/* */
+/* DESCRIPTION */
+/* */
+/* This function processes the hardware timer interrupt. This */
+/* processing includes incrementing the system clock and checking for */
+/* time slice and/or timer expiration. If either is found, the */
+/* interrupt context save/restore functions are called along with the */
+/* expiration functions. */
+/* */
+/* INPUT */
+/* */
+/* None */
+/* */
+/* OUTPUT */
+/* */
+/* None */
+/* */
+/* CALLS */
+/* */
+/* _tx_thread_time_slice Time slice interrupted thread */
+/* _tx_timer_expiration_process Timer expiration processing */
+/* */
+/* CALLED BY */
+/* */
+/* interrupt vector */
+/* */
+/* RELEASE HISTORY */
+/* */
+/* DATE NAME DESCRIPTION */
+/* */
+/* 09-30-2020 William E. Lamie Initial Version 6.1 */
+/* 04-25-2022 Zhen Kong Updated comments, */
+/* resulting in version 6.1.11 */
+/* */
+/**************************************************************************/
.global _tx_timer_interrupt
.type _tx_timer_interrupt,function
_tx_timer_interrupt:
-@
-@ /* Upon entry to this routine, it is assumed that context save has already
-@ been called, and therefore the compiler scratch registers are available
-@ for use. */
-@
-@ /* Increment the system clock. */
-@ _tx_timer_system_clock++;
-@
- LDR r1, =_tx_timer_system_clock @ Pickup address of system clock
- LDR r0, [r1] @ Pickup system clock
- ADD r0, r0, #1 @ Increment system clock
- STR r0, [r1] @ Store new system clock
-@
-@ /* Test for time-slice expiration. */
-@ if (_tx_timer_time_slice)
-@ {
-@
- LDR r3, =_tx_timer_time_slice @ Pickup address of time-slice
- LDR r2, [r3] @ Pickup time-slice
- CMP r2, #0 @ Is it non-active?
- BEQ __tx_timer_no_time_slice @ Yes, skip time-slice processing
-@
-@ /* Decrement the time_slice. */
-@ _tx_timer_time_slice--;
-@
- SUB r2, r2, #1 @ Decrement the time-slice
- STR r2, [r3] @ Store new time-slice value
-@
-@ /* Check for expiration. */
-@ if (__tx_timer_time_slice == 0)
-@
- CMP r2, #0 @ Has it expired?
- BNE __tx_timer_no_time_slice @ No, skip expiration processing
-@
-@ /* Set the time-slice expired flag. */
-@ _tx_timer_expired_time_slice = TX_TRUE;
-@
- LDR r3, =_tx_timer_expired_time_slice @ Pickup address of expired flag
- MOV r0, #1 @ Build expired value
- STR r0, [r3] @ Set time-slice expiration flag
-@
-@ }
-@
-__tx_timer_no_time_slice:
-@
-@ /* Test for timer expiration. */
-@ if (*_tx_timer_current_ptr)
-@ {
-@
- LDR r1, =_tx_timer_current_ptr @ Pickup current timer pointer address
- LDR r0, [r1] @ Pickup current timer
- LDR r2, [r0] @ Pickup timer list entry
- CMP r2, #0 @ Is there anything in the list?
- BEQ __tx_timer_no_timer @ No, just increment the timer
-@
-@ /* Set expiration flag. */
-@ _tx_timer_expired = TX_TRUE;
-@
- LDR r3, =_tx_timer_expired @ Pickup expiration flag address
- MOV r2, #1 @ Build expired value
- STR r2, [r3] @ Set expired flag
- B __tx_timer_done @ Finished timer processing
-@
-@ }
-@ else
-@ {
-__tx_timer_no_timer:
-@
-@ /* No timer expired, increment the timer pointer. */
-@ _tx_timer_current_ptr++;
-@
- ADD r0, r0, #4 @ Move to next timer
-@
-@ /* Check for wraparound. */
-@ if (_tx_timer_current_ptr == _tx_timer_list_end)
-@
- LDR r3, =_tx_timer_list_end @ Pickup address of timer list end
- LDR r2, [r3] @ Pickup list end
- CMP r0, r2 @ Are we at list end?
- BNE __tx_timer_skip_wrap @ No, skip wraparound logic
-@
-@ /* Wrap to beginning of list. */
-@ _tx_timer_current_ptr = _tx_timer_list_start;
-@
- LDR r3, =_tx_timer_list_start @ Pickup address of timer list start
- LDR r0, [r3] @ Set current pointer to list start
-@
-__tx_timer_skip_wrap:
-@
- STR r0, [r1] @ Store new current timer pointer
-@ }
-@
-__tx_timer_done:
-@
-@
-@ /* See if anything has expired. */
-@ if ((_tx_timer_expired_time_slice) || (_tx_timer_expired))
-@ {
-@
- LDR r3, =_tx_timer_expired_time_slice @ Pickup address of expired flag
- LDR r2, [r3] @ Pickup time-slice expired flag
- CMP r2, #0 @ Did a time-slice expire?
- BNE __tx_something_expired @ If non-zero, time-slice expired
- LDR r1, =_tx_timer_expired @ Pickup address of other expired flag
- LDR r0, [r1] @ Pickup timer expired flag
- CMP r0, #0 @ Did a timer expire?
- BEQ __tx_timer_nothing_expired @ No, nothing expired
-@
-__tx_something_expired:
-@
-@
- STMDB sp!, {r0, lr} @ Save the lr register on the stack
- @ and save r0 just to keep 8-byte alignment
-@
-@ /* Did a timer expire? */
-@ if (_tx_timer_expired)
-@ {
-@
- LDR r1, =_tx_timer_expired @ Pickup address of expired flag
- LDR r0, [r1] @ Pickup timer expired flag
- CMP r0, #0 @ Check for timer expiration
- BEQ __tx_timer_dont_activate @ If not set, skip timer activation
-@
-@ /* Process timer expiration. */
-@ _tx_timer_expiration_process();
-@
- BL _tx_timer_expiration_process @ Call the timer expiration handling routine
-@
-@ }
-__tx_timer_dont_activate:
-@
-@ /* Did time slice expire? */
-@ if (_tx_timer_expired_time_slice)
-@ {
-@
- LDR r3, =_tx_timer_expired_time_slice @ Pickup address of time-slice expired
- LDR r2, [r3] @ Pickup the actual flag
- CMP r2, #0 @ See if the flag is set
- BEQ __tx_timer_not_ts_expiration @ No, skip time-slice processing
-@
-@ /* Time slice interrupted thread. */
-@ _tx_thread_time_slice();
-@
- BL _tx_thread_time_slice @ Call time-slice processing
-@
-@ }
-@
-__tx_timer_not_ts_expiration:
-@
- LDMIA sp!, {r0, lr} @ Recover lr register (r0 is just there for
- @ the 8-byte stack alignment
-@
-@ }
-@
-__tx_timer_nothing_expired:
-@
-#ifdef __THUMB_INTERWORK
- BX lr @ Return to caller
-#else
- MOV pc, lr @ Return to caller
-#endif
-@
-@}
+ /* Upon entry to this routine, it is assumed that context save has already
+ been called, and therefore the compiler scratch registers are available
+ for use. */
+
+ /* Increment the system clock. */
+
+ LDR r1, =_tx_timer_system_clock // Pickup address of system clock
+ LDR r0, [r1] // Pickup system clock
+ ADD r0, r0, #1 // Increment system clock
+ STR r0, [r1] // Store new system clock
+
+ /* Test for time-slice expiration. */
+
+ LDR r3, =_tx_timer_time_slice // Pickup address of time-slice
+ LDR r2, [r3] // Pickup time-slice
+ CMP r2, #0 // Is it non-active?
+ BEQ __tx_timer_no_time_slice // Yes, skip time-slice processing
+
+ /* Decrement the time_slice. */
+
+ SUB r2, r2, #1 // Decrement the time-slice
+ STR r2, [r3] // Store new time-slice value
+
+ /* Check for expiration. */
+
+ CMP r2, #0 // Has it expired?
+ BNE __tx_timer_no_time_slice // No, skip expiration processing
+
+ /* Set the time-slice expired flag. */
+
+ LDR r3, =_tx_timer_expired_time_slice // Pickup address of expired flag
+ MOV r0, #1 // Build expired value
+ STR r0, [r3] // Set time-slice expiration flag
+
+__tx_timer_no_time_slice:
+
+ /* Test for timer expiration. */
+
+ LDR r1, =_tx_timer_current_ptr // Pickup current timer pointer address
+ LDR r0, [r1] // Pickup current timer
+ LDR r2, [r0] // Pickup timer list entry
+ CMP r2, #0 // Is there anything in the list?
+ BEQ __tx_timer_no_timer // No, just increment the timer
+
+ /* Set expiration flag. */
+
+ LDR r3, =_tx_timer_expired // Pickup expiration flag address
+ MOV r2, #1 // Build expired value
+ STR r2, [r3] // Set expired flag
+ B __tx_timer_done // Finished timer processing
+
+__tx_timer_no_timer:
+
+ /* No timer expired, increment the timer pointer. */
+ ADD r0, r0, #4 // Move to next timer
+
+ /* Check for wraparound. */
+
+ LDR r3, =_tx_timer_list_end // Pickup address of timer list end
+ LDR r2, [r3] // Pickup list end
+ CMP r0, r2 // Are we at list end?
+ BNE __tx_timer_skip_wrap // No, skip wraparound logic
+
+ /* Wrap to beginning of list. */
+
+ LDR r3, =_tx_timer_list_start // Pickup address of timer list start
+ LDR r0, [r3] // Set current pointer to list start
+
+__tx_timer_skip_wrap:
+
+ STR r0, [r1] // Store new current timer pointer
+
+__tx_timer_done:
+
+ /* See if anything has expired. */
+
+ LDR r3, =_tx_timer_expired_time_slice // Pickup address of expired flag
+ LDR r2, [r3] // Pickup time-slice expired flag
+ CMP r2, #0 // Did a time-slice expire?
+ BNE __tx_something_expired // If non-zero, time-slice expired
+ LDR r1, =_tx_timer_expired // Pickup address of other expired flag
+ LDR r0, [r1] // Pickup timer expired flag
+ CMP r0, #0 // Did a timer expire?
+ BEQ __tx_timer_nothing_expired // No, nothing expired
+
+__tx_something_expired:
+
+ STMDB sp!, {r0, lr} // Save the lr register on the stack
+ // and save r0 just to keep 8-byte alignment
+
+ /* Did a timer expire? */
+
+ LDR r1, =_tx_timer_expired // Pickup address of expired flag
+ LDR r0, [r1] // Pickup timer expired flag
+ CMP r0, #0 // Check for timer expiration
+ BEQ __tx_timer_dont_activate // If not set, skip timer activation
+
+ /* Process timer expiration. */
+ BL _tx_timer_expiration_process // Call the timer expiration handling routine
+
+__tx_timer_dont_activate:
+
+ /* Did time slice expire? */
+
+ LDR r3, =_tx_timer_expired_time_slice // Pickup address of time-slice expired
+ LDR r2, [r3] // Pickup the actual flag
+ CMP r2, #0 // See if the flag is set
+ BEQ __tx_timer_not_ts_expiration // No, skip time-slice processing
+
+ /* Time slice interrupted thread. */
+
+ BL _tx_thread_time_slice // Call time-slice processing
+
+__tx_timer_not_ts_expiration:
+
+ LDMIA sp!, {r0, lr} // Recover lr register (r0 is just there for
+ // the 8-byte stack alignment
+
+__tx_timer_nothing_expired:
+
+#ifdef __THUMB_INTERWORK
+ BX lr // Return to caller
+#else
+ MOV pc, lr // Return to caller
+#endif
diff --git a/ports/cortex_a8/ac6/example_build/sample_threadx.c b/ports/cortex_a8/ac6/example_build/sample_threadx.c
new file mode 100644
index 00000000..8c61de06
--- /dev/null
+++ b/ports/cortex_a8/ac6/example_build/sample_threadx.c
@@ -0,0 +1,369 @@
+/* This is a small demo of the high-performance ThreadX kernel. It includes examples of eight
+ threads of different priorities, using a message queue, semaphore, mutex, event flags group,
+ byte pool, and block pool. */
+
+#include "tx_api.h"
+
+#define DEMO_STACK_SIZE 1024
+#define DEMO_BYTE_POOL_SIZE 9120
+#define DEMO_BLOCK_POOL_SIZE 100
+#define DEMO_QUEUE_SIZE 100
+
+
+/* Define the ThreadX object control blocks... */
+
+TX_THREAD thread_0;
+TX_THREAD thread_1;
+TX_THREAD thread_2;
+TX_THREAD thread_3;
+TX_THREAD thread_4;
+TX_THREAD thread_5;
+TX_THREAD thread_6;
+TX_THREAD thread_7;
+TX_QUEUE queue_0;
+TX_SEMAPHORE semaphore_0;
+TX_MUTEX mutex_0;
+TX_EVENT_FLAGS_GROUP event_flags_0;
+TX_BYTE_POOL byte_pool_0;
+TX_BLOCK_POOL block_pool_0;
+
+
+/* Define the counters used in the demo application... */
+
+ULONG thread_0_counter;
+ULONG thread_1_counter;
+ULONG thread_1_messages_sent;
+ULONG thread_2_counter;
+ULONG thread_2_messages_received;
+ULONG thread_3_counter;
+ULONG thread_4_counter;
+ULONG thread_5_counter;
+ULONG thread_6_counter;
+ULONG thread_7_counter;
+
+
+/* Define thread prototypes. */
+
+void thread_0_entry(ULONG thread_input);
+void thread_1_entry(ULONG thread_input);
+void thread_2_entry(ULONG thread_input);
+void thread_3_and_4_entry(ULONG thread_input);
+void thread_5_entry(ULONG thread_input);
+void thread_6_and_7_entry(ULONG thread_input);
+
+
+/* Define main entry point. */
+
+int main()
+{
+
+ /* Enter the ThreadX kernel. */
+ tx_kernel_enter();
+}
+
+
+/* Define what the initial system looks like. */
+
+void tx_application_define(void *first_unused_memory)
+{
+
+CHAR *pointer = TX_NULL;
+
+
+ /* Create a byte memory pool from which to allocate the thread stacks. */
+ tx_byte_pool_create(&byte_pool_0, "byte pool 0", first_unused_memory, DEMO_BYTE_POOL_SIZE);
+
+ /* Put system definition stuff in here, e.g. thread creates and other assorted
+ create information. */
+
+ /* Allocate the stack for thread 0. */
+ tx_byte_allocate(&byte_pool_0, (VOID **) &pointer, DEMO_STACK_SIZE, TX_NO_WAIT);
+
+ /* Create the main thread. */
+ tx_thread_create(&thread_0, "thread 0", thread_0_entry, 0,
+ pointer, DEMO_STACK_SIZE,
+ 1, 1, TX_NO_TIME_SLICE, TX_AUTO_START);
+
+
+ /* Allocate the stack for thread 1. */
+ tx_byte_allocate(&byte_pool_0, (VOID **) &pointer, DEMO_STACK_SIZE, TX_NO_WAIT);
+
+ /* Create threads 1 and 2. These threads pass information through a ThreadX
+ message queue. It is also interesting to note that these threads have a time
+ slice. */
+ tx_thread_create(&thread_1, "thread 1", thread_1_entry, 1,
+ pointer, DEMO_STACK_SIZE,
+ 16, 16, 4, TX_AUTO_START);
+
+ /* Allocate the stack for thread 2. */
+ tx_byte_allocate(&byte_pool_0, (VOID **) &pointer, DEMO_STACK_SIZE, TX_NO_WAIT);
+
+ tx_thread_create(&thread_2, "thread 2", thread_2_entry, 2,
+ pointer, DEMO_STACK_SIZE,
+ 16, 16, 4, TX_AUTO_START);
+
+ /* Allocate the stack for thread 3. */
+ tx_byte_allocate(&byte_pool_0, (VOID **) &pointer, DEMO_STACK_SIZE, TX_NO_WAIT);
+
+ /* Create threads 3 and 4. These threads compete for a ThreadX counting semaphore.
+ An interesting thing here is that both threads share the same instruction area. */
+ tx_thread_create(&thread_3, "thread 3", thread_3_and_4_entry, 3,
+ pointer, DEMO_STACK_SIZE,
+ 8, 8, TX_NO_TIME_SLICE, TX_AUTO_START);
+
+ /* Allocate the stack for thread 4. */
+ tx_byte_allocate(&byte_pool_0, (VOID **) &pointer, DEMO_STACK_SIZE, TX_NO_WAIT);
+
+ tx_thread_create(&thread_4, "thread 4", thread_3_and_4_entry, 4,
+ pointer, DEMO_STACK_SIZE,
+ 8, 8, TX_NO_TIME_SLICE, TX_AUTO_START);
+
+ /* Allocate the stack for thread 5. */
+ tx_byte_allocate(&byte_pool_0, (VOID **) &pointer, DEMO_STACK_SIZE, TX_NO_WAIT);
+
+ /* Create thread 5. This thread simply pends on an event flag which will be set
+ by thread_0. */
+ tx_thread_create(&thread_5, "thread 5", thread_5_entry, 5,
+ pointer, DEMO_STACK_SIZE,
+ 4, 4, TX_NO_TIME_SLICE, TX_AUTO_START);
+
+ /* Allocate the stack for thread 6. */
+ tx_byte_allocate(&byte_pool_0, (VOID **) &pointer, DEMO_STACK_SIZE, TX_NO_WAIT);
+
+ /* Create threads 6 and 7. These threads compete for a ThreadX mutex. */
+ tx_thread_create(&thread_6, "thread 6", thread_6_and_7_entry, 6,
+ pointer, DEMO_STACK_SIZE,
+ 8, 8, TX_NO_TIME_SLICE, TX_AUTO_START);
+
+ /* Allocate the stack for thread 7. */
+ tx_byte_allocate(&byte_pool_0, (VOID **) &pointer, DEMO_STACK_SIZE, TX_NO_WAIT);
+
+ tx_thread_create(&thread_7, "thread 7", thread_6_and_7_entry, 7,
+ pointer, DEMO_STACK_SIZE,
+ 8, 8, TX_NO_TIME_SLICE, TX_AUTO_START);
+
+ /* Allocate the message queue. */
+ tx_byte_allocate(&byte_pool_0, (VOID **) &pointer, DEMO_QUEUE_SIZE*sizeof(ULONG), TX_NO_WAIT);
+
+ /* Create the message queue shared by threads 1 and 2. */
+ tx_queue_create(&queue_0, "queue 0", TX_1_ULONG, pointer, DEMO_QUEUE_SIZE*sizeof(ULONG));
+
+ /* Create the semaphore used by threads 3 and 4. */
+ tx_semaphore_create(&semaphore_0, "semaphore 0", 1);
+
+ /* Create the event flags group used by threads 1 and 5. */
+ tx_event_flags_create(&event_flags_0, "event flags 0");
+
+ /* Create the mutex used by thread 6 and 7 without priority inheritance. */
+ tx_mutex_create(&mutex_0, "mutex 0", TX_NO_INHERIT);
+
+ /* Allocate the memory for a small block pool. */
+ tx_byte_allocate(&byte_pool_0, (VOID **) &pointer, DEMO_BLOCK_POOL_SIZE, TX_NO_WAIT);
+
+ /* Create a block memory pool to allocate a message buffer from. */
+ tx_block_pool_create(&block_pool_0, "block pool 0", sizeof(ULONG), pointer, DEMO_BLOCK_POOL_SIZE);
+
+ /* Allocate a block and release the block memory. */
+ tx_block_allocate(&block_pool_0, (VOID **) &pointer, TX_NO_WAIT);
+
+ /* Release the block back to the pool. */
+ tx_block_release(pointer);
+}
+
+
+
+/* Define the test threads. */
+
+void thread_0_entry(ULONG thread_input)
+{
+
+UINT status;
+
+
+ /* This thread simply sits in while-forever-sleep loop. */
+ while(1)
+ {
+
+ /* Increment the thread counter. */
+ thread_0_counter++;
+
+ /* Sleep for 10 ticks. */
+ tx_thread_sleep(10);
+
+ /* Set event flag 0 to wakeup thread 5. */
+ status = tx_event_flags_set(&event_flags_0, 0x1, TX_OR);
+
+ /* Check status. */
+ if (status != TX_SUCCESS)
+ break;
+ }
+}
+
+
+void thread_1_entry(ULONG thread_input)
+{
+
+UINT status;
+
+
+ /* This thread simply sends messages to a queue shared by thread 2. */
+ while(1)
+ {
+
+ /* Increment the thread counter. */
+ thread_1_counter++;
+
+ /* Send message to queue 0. */
+ status = tx_queue_send(&queue_0, &thread_1_messages_sent, TX_WAIT_FOREVER);
+
+ /* Check completion status. */
+ if (status != TX_SUCCESS)
+ break;
+
+ /* Increment the message sent. */
+ thread_1_messages_sent++;
+ }
+}
+
+
+void thread_2_entry(ULONG thread_input)
+{
+
+ULONG received_message;
+UINT status;
+
+ /* This thread retrieves messages placed on the queue by thread 1. */
+ while(1)
+ {
+
+ /* Increment the thread counter. */
+ thread_2_counter++;
+
+ /* Retrieve a message from the queue. */
+ status = tx_queue_receive(&queue_0, &received_message, TX_WAIT_FOREVER);
+
+ /* Check completion status and make sure the message is what we
+ expected. */
+ if ((status != TX_SUCCESS) || (received_message != thread_2_messages_received))
+ break;
+
+ /* Otherwise, all is okay. Increment the received message count. */
+ thread_2_messages_received++;
+ }
+}
+
+
+void thread_3_and_4_entry(ULONG thread_input)
+{
+
+UINT status;
+
+
+ /* This function is executed from thread 3 and thread 4. As the loop
+ below shows, these function compete for ownership of semaphore_0. */
+ while(1)
+ {
+
+ /* Increment the thread counter. */
+ if (thread_input == 3)
+ thread_3_counter++;
+ else
+ thread_4_counter++;
+
+ /* Get the semaphore with suspension. */
+ status = tx_semaphore_get(&semaphore_0, TX_WAIT_FOREVER);
+
+ /* Check status. */
+ if (status != TX_SUCCESS)
+ break;
+
+ /* Sleep for 2 ticks to hold the semaphore. */
+ tx_thread_sleep(2);
+
+ /* Release the semaphore. */
+ status = tx_semaphore_put(&semaphore_0);
+
+ /* Check status. */
+ if (status != TX_SUCCESS)
+ break;
+ }
+}
+
+
+void thread_5_entry(ULONG thread_input)
+{
+
+UINT status;
+ULONG actual_flags;
+
+
+ /* This thread simply waits for an event in a forever loop. */
+ while(1)
+ {
+
+ /* Increment the thread counter. */
+ thread_5_counter++;
+
+ /* Wait for event flag 0. */
+ status = tx_event_flags_get(&event_flags_0, 0x1, TX_OR_CLEAR,
+ &actual_flags, TX_WAIT_FOREVER);
+
+ /* Check status. */
+ if ((status != TX_SUCCESS) || (actual_flags != 0x1))
+ break;
+ }
+}
+
+
+void thread_6_and_7_entry(ULONG thread_input)
+{
+
+UINT status;
+
+
+ /* This function is executed from thread 6 and thread 7. As the loop
+ below shows, these function compete for ownership of mutex_0. */
+ while(1)
+ {
+
+ /* Increment the thread counter. */
+ if (thread_input == 6)
+ thread_6_counter++;
+ else
+ thread_7_counter++;
+
+ /* Get the mutex with suspension. */
+ status = tx_mutex_get(&mutex_0, TX_WAIT_FOREVER);
+
+ /* Check status. */
+ if (status != TX_SUCCESS)
+ break;
+
+ /* Get the mutex again with suspension. This shows
+ that an owning thread may retrieve the mutex it
+ owns multiple times. */
+ status = tx_mutex_get(&mutex_0, TX_WAIT_FOREVER);
+
+ /* Check status. */
+ if (status != TX_SUCCESS)
+ break;
+
+ /* Sleep for 2 ticks to hold the mutex. */
+ tx_thread_sleep(2);
+
+ /* Release the mutex. */
+ status = tx_mutex_put(&mutex_0);
+
+ /* Check status. */
+ if (status != TX_SUCCESS)
+ break;
+
+ /* Release the mutex again. This will actually
+ release ownership since it was obtained twice. */
+ status = tx_mutex_put(&mutex_0);
+
+ /* Check status. */
+ if (status != TX_SUCCESS)
+ break;
+ }
+}
diff --git a/ports/cortex_a8/ac6/example_build/sample_threadx/.cproject b/ports/cortex_a8/ac6/example_build/sample_threadx/.cproject
index 2a6227b6..e039b0b0 100644
--- a/ports/cortex_a8/ac6/example_build/sample_threadx/.cproject
+++ b/ports/cortex_a8/ac6/example_build/sample_threadx/.cproject
@@ -3,9 +3,9 @@
-
+
-
+
@@ -23,37 +23,37 @@
-
+
-
+
-
+
-
+
-
+
-
+
-
+
-
+
-
+
-
+
-
+
-
+
-
+
-
+
-
+
-
-
+
-
+
-
+
-
+
-
+
-
+
-
+
-
+
-
+
-
+
-
+
-
+
-
+
-
+
-
+
+
-
+
-
+
-
+
-
+
@@ -117,7 +119,7 @@
-
+
@@ -137,7 +139,7 @@
-
+
@@ -151,8 +153,6 @@
-
-
@@ -168,5 +168,9 @@
+
+
+
+
diff --git a/ports/cortex_a8/ac6/example_build/sample_threadx/sample_threadx.c b/ports/cortex_a8/ac6/example_build/sample_threadx/sample_threadx.c
index 418ec634..8c61de06 100644
--- a/ports/cortex_a8/ac6/example_build/sample_threadx/sample_threadx.c
+++ b/ports/cortex_a8/ac6/example_build/sample_threadx/sample_threadx.c
@@ -1,5 +1,5 @@
/* This is a small demo of the high-performance ThreadX kernel. It includes examples of eight
- threads of different priorities, using a message queue, semaphore, mutex, event flags group,
+ threads of different priorities, using a message queue, semaphore, mutex, event flags group,
byte pool, and block pool. */
#include "tx_api.h"
@@ -80,42 +80,42 @@ CHAR *pointer = TX_NULL;
tx_byte_allocate(&byte_pool_0, (VOID **) &pointer, DEMO_STACK_SIZE, TX_NO_WAIT);
/* Create the main thread. */
- tx_thread_create(&thread_0, "thread 0", thread_0_entry, 0,
- pointer, DEMO_STACK_SIZE,
+ tx_thread_create(&thread_0, "thread 0", thread_0_entry, 0,
+ pointer, DEMO_STACK_SIZE,
1, 1, TX_NO_TIME_SLICE, TX_AUTO_START);
/* Allocate the stack for thread 1. */
tx_byte_allocate(&byte_pool_0, (VOID **) &pointer, DEMO_STACK_SIZE, TX_NO_WAIT);
- /* Create threads 1 and 2. These threads pass information through a ThreadX
+ /* Create threads 1 and 2. These threads pass information through a ThreadX
message queue. It is also interesting to note that these threads have a time
slice. */
- tx_thread_create(&thread_1, "thread 1", thread_1_entry, 1,
- pointer, DEMO_STACK_SIZE,
+ tx_thread_create(&thread_1, "thread 1", thread_1_entry, 1,
+ pointer, DEMO_STACK_SIZE,
16, 16, 4, TX_AUTO_START);
/* Allocate the stack for thread 2. */
tx_byte_allocate(&byte_pool_0, (VOID **) &pointer, DEMO_STACK_SIZE, TX_NO_WAIT);
- tx_thread_create(&thread_2, "thread 2", thread_2_entry, 2,
- pointer, DEMO_STACK_SIZE,
+ tx_thread_create(&thread_2, "thread 2", thread_2_entry, 2,
+ pointer, DEMO_STACK_SIZE,
16, 16, 4, TX_AUTO_START);
/* Allocate the stack for thread 3. */
tx_byte_allocate(&byte_pool_0, (VOID **) &pointer, DEMO_STACK_SIZE, TX_NO_WAIT);
- /* Create threads 3 and 4. These threads compete for a ThreadX counting semaphore.
+ /* Create threads 3 and 4. These threads compete for a ThreadX counting semaphore.
An interesting thing here is that both threads share the same instruction area. */
- tx_thread_create(&thread_3, "thread 3", thread_3_and_4_entry, 3,
- pointer, DEMO_STACK_SIZE,
+ tx_thread_create(&thread_3, "thread 3", thread_3_and_4_entry, 3,
+ pointer, DEMO_STACK_SIZE,
8, 8, TX_NO_TIME_SLICE, TX_AUTO_START);
/* Allocate the stack for thread 4. */
tx_byte_allocate(&byte_pool_0, (VOID **) &pointer, DEMO_STACK_SIZE, TX_NO_WAIT);
- tx_thread_create(&thread_4, "thread 4", thread_3_and_4_entry, 4,
- pointer, DEMO_STACK_SIZE,
+ tx_thread_create(&thread_4, "thread 4", thread_3_and_4_entry, 4,
+ pointer, DEMO_STACK_SIZE,
8, 8, TX_NO_TIME_SLICE, TX_AUTO_START);
/* Allocate the stack for thread 5. */
@@ -123,23 +123,23 @@ CHAR *pointer = TX_NULL;
/* Create thread 5. This thread simply pends on an event flag which will be set
by thread_0. */
- tx_thread_create(&thread_5, "thread 5", thread_5_entry, 5,
- pointer, DEMO_STACK_SIZE,
+ tx_thread_create(&thread_5, "thread 5", thread_5_entry, 5,
+ pointer, DEMO_STACK_SIZE,
4, 4, TX_NO_TIME_SLICE, TX_AUTO_START);
/* Allocate the stack for thread 6. */
tx_byte_allocate(&byte_pool_0, (VOID **) &pointer, DEMO_STACK_SIZE, TX_NO_WAIT);
/* Create threads 6 and 7. These threads compete for a ThreadX mutex. */
- tx_thread_create(&thread_6, "thread 6", thread_6_and_7_entry, 6,
- pointer, DEMO_STACK_SIZE,
+ tx_thread_create(&thread_6, "thread 6", thread_6_and_7_entry, 6,
+ pointer, DEMO_STACK_SIZE,
8, 8, TX_NO_TIME_SLICE, TX_AUTO_START);
/* Allocate the stack for thread 7. */
tx_byte_allocate(&byte_pool_0, (VOID **) &pointer, DEMO_STACK_SIZE, TX_NO_WAIT);
- tx_thread_create(&thread_7, "thread 7", thread_6_and_7_entry, 7,
- pointer, DEMO_STACK_SIZE,
+ tx_thread_create(&thread_7, "thread 7", thread_6_and_7_entry, 7,
+ pointer, DEMO_STACK_SIZE,
8, 8, TX_NO_TIME_SLICE, TX_AUTO_START);
/* Allocate the message queue. */
@@ -242,11 +242,11 @@ UINT status;
/* Retrieve a message from the queue. */
status = tx_queue_receive(&queue_0, &received_message, TX_WAIT_FOREVER);
- /* Check completion status and make sure the message is what we
+ /* Check completion status and make sure the message is what we
expected. */
if ((status != TX_SUCCESS) || (received_message != thread_2_messages_received))
break;
-
+
/* Otherwise, all is okay. Increment the received message count. */
thread_2_messages_received++;
}
@@ -305,7 +305,7 @@ ULONG actual_flags;
thread_5_counter++;
/* Wait for event flag 0. */
- status = tx_event_flags_get(&event_flags_0, 0x1, TX_OR_CLEAR,
+ status = tx_event_flags_get(&event_flags_0, 0x1, TX_OR_CLEAR,
&actual_flags, TX_WAIT_FOREVER);
/* Check status. */
@@ -358,7 +358,7 @@ UINT status;
if (status != TX_SUCCESS)
break;
- /* Release the mutex again. This will actually
+ /* Release the mutex again. This will actually
release ownership since it was obtained twice. */
status = tx_mutex_put(&mutex_0);
diff --git a/ports/cortex_a8/ac6/example_build/sample_threadx/sample_threadx.launch b/ports/cortex_a8/ac6/example_build/sample_threadx/sample_threadx.launch
new file mode 100644
index 00000000..ff3713c3
--- /dev/null
+++ b/ports/cortex_a8/ac6/example_build/sample_threadx/sample_threadx.launch
@@ -0,0 +1,188 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/ports/cortex_a8/ac6/example_build/sample_threadx/sample_threadx.scat b/ports/cortex_a8/ac6/example_build/sample_threadx/sample_threadx.scat
index 013e9f8f..d23881cd 100644
--- a/ports/cortex_a8/ac6/example_build/sample_threadx/sample_threadx.scat
+++ b/ports/cortex_a8/ac6/example_build/sample_threadx/sample_threadx.scat
@@ -1,41 +1,44 @@
;*******************************************************
-; Copyright (c) 2010-2011 Arm Limited (or its affiliates). All rights reserved.
+; Copyright (c) 2011-2016 Arm Limited (or its affiliates). All rights reserved.
; Use, modification and redistribution of this file is subject to your possession of a
; valid End User License Agreement for the Arm Product of which these examples are part of
; and your compliance with all applicable terms and conditions of such licence agreement.
;*******************************************************
-; Scatter-file for bare-metal example on BeagleBoard
+; Scatter-file for ARMv7-A bare-metal example on Versatile Express
; This scatter-file places application code, data, stack and heap at suitable addresses in the memory map.
-; BeagleBoard has 256MB DDR SDRAM in its POP device at 0x80000000 to 0x8FFFFFFF, which this scatter-file uses.
-; Alternatively, OMAP3530 has 64KB internal SRAM, from 0x40200000 to 0x4020FFFF, which could be used for some regions instead.
-SDRAM 0x80000000 0x10000000
+SDRAM 0x80000000 0x20000000
{
- APP_CODE +0
+ VECTORS +0
{
* (VECTORS, +FIRST) ; Vector table and other (assembler) startup code
- * (+RO-CODE) ; Application RO code (.text)
- * (+RO-DATA) ; Application RO data (.constdata)
- * (InRoot$$Sections) ; All library code that must be in a root region
+ * (InRoot$$Sections) ; All (library) code that must be in a root region
}
- APP_DATA +0
- {
- * (+RW, +ZI) ; Application RW (.data) and ZI (.bss) data
- }
+ RO_CODE +0
+ { * (+RO-CODE) } ; Application RO code (.text)
- ARM_LIB_HEAP 0x80040000 EMPTY 0x00040000 ; Application heap
- { }
+ RO_DATA +0
+ { * (+RO-DATA) } ; Application RO data (.constdata)
- ARM_LIB_STACK 0x80090000 EMPTY 0x00010000 ; Application (SVC mode) stack
- { }
+ RW_DATA +0
+ { * (+RW) } ; Application RW data (.data)
- ;IRQ_STACK 0x800A0000 EMPTY 0x00010000 ; IRQ mode stack
- ;{ }
+ ZI_DATA +0
+ { * (+ZI) } ; Application ZI data (.bss)
- TTB 0x80100000 EMPTY 0x4000 ; Level-1 Translation Table for MMU
- { }
+ ARM_LIB_HEAP 0x80040000 EMPTY 0x00040000 ; Application heap
+ { }
+
+ ARM_LIB_STACK 0x80090000 EMPTY 0x00010000 ; Application (SVC mode) stack
+ { }
+
+; IRQ_STACK 0x800A0000 EMPTY -0x00010000 ; IRQ mode stack
+; { }
+
+ TTB 0x80100000 EMPTY 0x4000 ; Level-1 Translation Table for MMU
+ { }
}
diff --git a/ports/cortex_a8/ac6/example_build/sample_threadx/startup.S b/ports/cortex_a8/ac6/example_build/sample_threadx/startup.S
index 376629d5..670fadb9 100644
--- a/ports/cortex_a8/ac6/example_build/sample_threadx/startup.S
+++ b/ports/cortex_a8/ac6/example_build/sample_threadx/startup.S
@@ -1,31 +1,31 @@
//----------------------------------------------------------------
-// Cortex-A8 Embedded example - Startup Code
+// ARMv7-A Embedded example - Startup Code
//
// Copyright (c) 2005-2018 Arm Limited (or its affiliates). All rights reserved.
// Use, modification and redistribution of this file is subject to your possession of a
-// valid End User License Agreement for the Arm Product of which these examples are part of
+// valid End User License Agreement for the Arm Product of which these examples are part of
// and your compliance with all applicable terms and conditions of such licence agreement.
//----------------------------------------------------------------
-
// Standard definitions of mode bits and interrupt (I & F) flags in PSRs
-#define Mode_USR 0x10
-#define Mode_FIQ 0x11
-#define Mode_IRQ 0x12
-#define Mode_SVC 0x13
-#define Mode_ABT 0x17
-#define Mode_UND 0x1B
-#define Mode_SYS 0x1F
+#define Mode_USR 0x10
+#define Mode_FIQ 0x11
+#define Mode_IRQ 0x12
+#define Mode_SVC 0x13
+#define Mode_ABT 0x17
+#define Mode_UND 0x1B
+#define Mode_SYS 0x1F
-#define I_Bit 0x80 // When I bit is set, IRQ is disabled
-#define F_Bit 0x40 // When F bit is set, FIQ is disabled
+#define I_Bit 0x80 // When I bit is set, IRQ is disabled
+#define F_Bit 0x40 // When F bit is set, FIQ is disabled
.section VECTORS, "ax"
.align 3
.cfi_sections .debug_frame // put stack frame info into .debug_frame instead of .eh_frame
+
//----------------------------------------------------------------
// Entry point for the Reset handler
//----------------------------------------------------------------
@@ -39,36 +39,32 @@
// could also be used, unless the exception handlers are >32MB away.
Vectors:
- LDR PC, Reset_Addr
- LDR PC, Undefined_Addr
- LDR PC, SVC_Addr
- LDR PC, Prefetch_Addr
- LDR PC, Abort_Addr
- B . // Reserved vector
- LDR PC, IRQ_Addr
- LDR PC, FIQ_Addr
+ LDR PC, Reset_Addr
+ LDR PC, Undefined_Addr
+ LDR PC, SVC_Addr
+ LDR PC, Prefetch_Addr
+ LDR PC, Abort_Addr
+ LDR PC, Hypervisor_Addr
+ LDR PC, IRQ_Addr
+ LDR PC, FIQ_Addr
.balign 4
Reset_Addr:
.word Reset_Handler
Undefined_Addr:
- //.word Undefined_Handler
.word __tx_undefined
SVC_Addr:
- //.word SVC_Handler
.word __tx_swi_interrupt
Prefetch_Addr:
- //.word Prefetch_Handler
.word __tx_prefetch_handler
Abort_Addr:
- //.word Abort_Handler
.word __tx_abort_handler
+Hypervisor_Addr:
+ .word __tx_reserved_handler
IRQ_Addr:
- //.word IRQ_Handler
.word __tx_irq_handler
FIQ_Addr:
- //.word FIQ_Handler
.word __tx_fiq_handler
@@ -84,6 +80,8 @@ Prefetch_Handler:
B Prefetch_Handler
Abort_Handler:
B Abort_Handler
+Hypervisor_Handler:
+ B Hypervisor_Handler
IRQ_Handler:
B IRQ_Handler
FIQ_Handler:
@@ -96,7 +94,7 @@ FIQ_Handler:
Reset_Handler:
//----------------------------------------------------------------
-// Disable caches, MMU and branch prediction in case they were left enabled from an earlier run
+// Disable caches and MMU in case they were left enabled from an earlier run
// This does not need to be done from a cold reset
//----------------------------------------------------------------
@@ -104,13 +102,31 @@ Reset_Handler:
BIC r0, r0, #(0x1 << 12) // Clear I bit 12 to disable I Cache
BIC r0, r0, #(0x1 << 2) // Clear C bit 2 to disable D Cache
BIC r0, r0, #0x1 // Clear M bit 0 to disable MMU
- BIC r0, r0, #(0x1 << 11) // Clear Z bit 11 to disable branch prediction
+ BIC r0, r0, #(0x1 << 11) // Clear Z bit 11 to disable branch prediction
MCR p15, 0, r0, c1, c0, 0 // Write System Control Register
ISB
-// The MMU is enabled later, before calling main(). Caches and branch prediction are enabled inside main(),
+// The MMU is enabled later, before calling main(). Caches are enabled inside main(),
// after the MMU has been enabled and scatterloading has been performed.
+//----------------------------------------------------------------
+// ACTLR.SMP bit must be set before the caches and MMU are enabled,
+// or any cache and TLB maintenance operations are performed, even for single-core
+//----------------------------------------------------------------
+ MRC p15, 0, r0, c1, c0, 1 // Read ACTLR
+ ORR r0, r0, #(1 << 6) // Set ACTLR.SMP bit
+ MCR p15, 0, r0, c1, c0, 1 // Write ACTLR
+ ISB
+
+//----------------------------------------------------------------
+// Invalidate Data and Instruction TLBs and branch predictor
+// This does not need to be done from a cold reset
+//----------------------------------------------------------------
+
+ MOV r0,#0
+ MCR p15, 0, r0, c8, c7, 0 // I-TLB and D-TLB invalidation
+ MCR p15, 0, r0, c7, c5, 6 // BPIALL - Invalidate entire branch predictor array
+
//----------------------------------------------------------------
// Initialize Supervisor Mode Stack
// Note stack must be 8 byte aligned.
@@ -119,13 +135,24 @@ Reset_Handler:
LDR SP, =Image$$ARM_LIB_STACK$$ZI$$Limit
//----------------------------------------------------------------
-// Invalidate Data and Instruction TLBs and branch predictor
+// Disable loop-buffer to fix errata on A15 r0p0
//----------------------------------------------------------------
-
- MOV r0,#0
- MCR p15, 0, r0, c8, c7, 0 // I-TLB and D-TLB invalidation
- MCR p15, 0, r0, c7, c5, 6 // BPIALL - Invalidate entire branch predictor array
-
+ MRC p15, 0, r0, c0, c0, 0 // Read main ID register MIDR
+ MOV r1, r0, lsr #4 // Extract Primary Part Number
+ LDR r2, =0xFFF
+ AND r1, r1, r2
+ LDR r2, =0xC0F
+ CMP r1, r2 // Is this an A15?
+ BNE notA15r0p0 // Jump if not A15
+ AND r5, r0, #0x00f00000 // Variant
+ AND r6, r0, #0x0000000f // Revision
+ ORRS r6, r6, r5 // Combine variant and revision
+ BNE notA15r0p0 // Jump if not r0p0
+ MRC p15, 0, r0, c1, c0, 1 // Read Aux Ctrl Reg
+ ORR r0, r0, #(1 << 1) // Set bit 1 to Disable Loop Buffer
+ MCR p15, 0, r0, c1, c0, 1 // Write Aux Ctrl Reg
+ ISB
+notA15r0p0:
//----------------------------------------------------------------
// Set Vector Base Address Register (VBAR) to point to this application's vector table
@@ -135,7 +162,9 @@ Reset_Handler:
MCR p15, 0, r0, c12, c0, 0
//----------------------------------------------------------------
-// Cache Invalidation code for Cortex-A8
+// Cache Invalidation code for ARMv7-A
+// The caches, MMU and BTB do not need post-reset invalidation on Cortex-A7,
+// but forcing a cache invalidation makes the code more portable to other CPUs (e.g. Cortex-A9)
//----------------------------------------------------------------
// Invalidate L1 Instruction Cache
@@ -153,7 +182,8 @@ Reset_Handler:
BEQ Finished // If 0, no need to clean
MOV r10, #0 // R10 holds current cache level << 1
-Loop1: ADD r2, r10, r10, LSR #1 // R2 holds cache "Set" position
+Loop1:
+ ADD r2, r10, r10, LSR #1 // R2 holds cache "Set" position
MOV r1, r0, LSR r2 // Bottom 3 bits are the Cache-type for this level
AND r1, r1, #7 // Isolate those lower 3 bits
CMP r1, #2
@@ -170,20 +200,25 @@ Loop1: ADD r2, r10, r10, LSR #1 // R2 holds cache "Set" position
LDR r7, =0x7FFF
ANDS r7, r7, r1, LSR #13 // R7 is the max number of the index size (right aligned)
-Loop2: MOV r9, r4 // R9 working copy of the max way size (right aligned)
+Loop2:
+ MOV r9, r4 // R9 working copy of the max way size (right aligned)
-Loop3: ORR r11, r10, r9, LSL r5 // Factor in the Way number and cache number into R11
+Loop3:
+ ORR r11, r10, r9, LSL r5 // Factor in the Way number and cache number into R11
ORR r11, r11, r7, LSL r2 // Factor in the Set number
MCR p15, 0, r11, c7, c6, 2 // Invalidate by Set/Way
SUBS r9, r9, #1 // Decrement the Way number
BGE Loop3
SUBS r7, r7, #1 // Decrement the Set number
BGE Loop2
-Skip: ADD r10, r10, #2 // Increment the cache number
+Skip:
+ ADD r10, r10, #2 // Increment the cache number
CMP r3, r10
BGT Loop1
Finished:
+
+
//----------------------------------------------------------------
// MMU Configuration
// Set translation table base
@@ -196,12 +231,17 @@ Finished:
MOV r0,#0x0
MCR p15, 0, r0, c2, c0, 2
-
// write the address of our page table base to TTB register 0
-
LDR r0,=Image$$TTB$$ZI$$Base
+
+ MOV r1, #0x08 // RGN=b01 (outer cacheable write-back cached, write allocate)
+ // S=0 (translation table walk to non-shared memory)
+ ORR r1,r1,#0x40 // IRGN=b01 (inner cacheability for the translation table walk is Write-back Write-allocate)
+
+ ORR r0,r0,r1
+
MCR p15, 0, r0, c2, c0, 0
- //MSR TTBR0, r0
+
//----------------------------------------------------------------
// PAGE TABLE generation
@@ -227,50 +267,41 @@ Finished:
// XN[4]=1 - Execute never on Strongly-ordered memory
// Bits[1:0]=10 - Indicate entry is a 1MB section
//----------------------------------------------------------------
-
- LDR r1,=0xfff // Loop counter
- LDR r2,=3554
+ LDR r0,=Image$$TTB$$ZI$$Base
+ LDR r1,=0xfff // loop counter
+ LDR r2,=0b00000000000000000000110111100010
// r0 contains the address of the translation table base
// r1 is loop counter
// r2 is level1 descriptor (bits 19:0)
- // Use loop counter to create 4096 individual table entries.
- // This writes from address 'Image$$TTB$$ZI$$Base' +
- // Offset 0x3FFC down to offset 0x0 in word steps (4 bytes)
+ // use loop counter to create 4096 individual table entries.
+ // this writes from address 'Image$$TTB$$ZI$$Base' +
+ // offset 0x3FFC down to offset 0x0 in word steps (4 bytes)
init_ttb_1:
ORR r3, r2, r1, LSL#20 // R3 now contains full level1 descriptor to write
- ORR r3, r3, #16 // Set XN bit
+ ORR r3, r3, #0b0000000010000 // Set XN bit
STR r3, [r0, r1, LSL#2] // Str table entry at TTB base + loopcount*4
SUBS r1, r1, #1 // Decrement loop counter
BPL init_ttb_1
- // In this example, the 1MB section based at '||Image$$APP_CODE$$Base||' is setup specially as cacheable (write back mode).
- // TEX[14:12]=000 and CB[3:2]= 11, Outer and inner write back, no Write-allocate normal memory.
-
- LDR r1,=Image$$APP_CODE$$Base // Base physical address of code segment
+ // In this example, the 1MB section based at '__code_start' is setup specially as cacheable (write back mode).
+ // TEX[14:12]=001 and CB[3:2]= 11, Outer and inner write back, write allocate normal memory.
+ LDR r1,=Image$$VECTORS$$Base // Base physical address of code segment
LSR r1, #20 // Shift right to align to 1MB boundaries
ORR r3, r2, r1, LSL#20 // Setup the initial level1 descriptor again
- ORR r3, r3, #12 // Set CB bits
+ ORR r3, r3, #0b0000000001100 // Set CB bits
+ ORR r3, r3, #0b1000000000000 // Set TEX bit 12
STR r3, [r0, r1, LSL#2] // str table entry
-
+
//----------------------------------------------------------------
// Setup domain control register - Enable all domains to client mode
//----------------------------------------------------------------
- MRC p15, 0, r0, c3, c0, 0 // Read Domain Access Control Register
- LDR r0, =0x55555555 // Initialize every domain entry to b01 (client)
- MCR p15, 0, r0, c3, c0, 0 // Write Domain Access Control Register
-
-//----------------------------------------------------------------
-// Setup L2 Cache - L2 Cache Auxiliary Control
-//----------------------------------------------------------------
-
-//// Seems to undef on Beagle ?
-//// MOV r0, #0
-//// MCR p15, 1, r0, c9, c0, 2 // Write L2 Auxilary Control Register
-
+ MRC p15, 0, r0, c3, c0, 0 // Read Domain Access Control Register
+ LDR r0, =0x55555555 // Initialize every domain entry to b01 (client)
+ MCR p15, 0, r0, c3, c0, 0 // Write Domain Access Control Register
#if defined(__ARM_NEON) || defined(__ARM_FP)
//----------------------------------------------------------------
@@ -288,17 +319,16 @@ init_ttb_1:
//----------------------------------------------------------------
MOV r0, #0x40000000
- VMSR FPEXC, r0 // Write FPEXC register, EN bit set
+ VMSR FPEXC, r0 // Write FPEXC register, EN bit set
#endif
//----------------------------------------------------------------
-// Enable MMU and Branch to __main
+// Enable MMU and branch to __main
// Leaving the caches disabled until after scatter loading.
//----------------------------------------------------------------
- LDR r12,=__main // Save this in register for possible long jump
-
+ LDR r12,=__main
MRC p15, 0, r0, c1, c0, 0 // Read System Control Register
BIC r0, r0, #(0x1 << 12) // Clear I bit 12 to disable I Cache
@@ -308,15 +338,14 @@ init_ttb_1:
MCR p15, 0, r0, c1, c0, 0 // Write System Control Register
ISB
-
-// Now the MMU is enabled, virtual to physical address translations will occur.
-// This will affect the next instruction fetches.
+// Now the MMU is enabled, virtual to physical address translations will occur. This will affect the next
+// instruction fetch.
//
// The two instructions currently in the pipeline will have been fetched before the MMU was enabled.
// The branch to __main is safe because the Virtual Address (VA) is the same as the Physical Address (PA)
// (flat mapping) of this code that enables the MMU and performs the branch
- BX r12 // Branch to __main() C library entry point
+ BX r12 // Branch to __main C library entry point
@@ -344,21 +373,15 @@ enable_caches:
MCR p15, 0, r0, c1, c0, 0 // Write System Control Register
ISB
-
-//----------------------------------------------------------------
-// Enable Cortex-A8 Level2 Unified Cache
-//----------------------------------------------------------------
-
MRC p15, 0, r0, c1, c0, 1 // Read Auxiliary Control Register
ORR r0, #2 // L2EN bit, enable L2 cache
+ ORR r0, r0, #(0x1 << 2) // Set DP bit 2 to enable L1 Dside prefetch
MCR p15, 0, r0, c1, c0, 1 // Write Auxiliary Control Register
ISB
BX lr
-
.cfi_endproc
-
.global disable_caches
.type disable_caches, "function"
disable_caches:
@@ -371,3 +394,4 @@ disable_caches:
BX lr
+
diff --git a/ports/cortex_a8/ac6/example_build/sample_threadx/tx_initialize_low_level.S b/ports/cortex_a8/ac6/example_build/sample_threadx/tx_initialize_low_level.S
index aa4bee84..715958f0 100644
--- a/ports/cortex_a8/ac6/example_build/sample_threadx/tx_initialize_low_level.S
+++ b/ports/cortex_a8/ac6/example_build/sample_threadx/tx_initialize_low_level.S
@@ -1,345 +1,299 @@
-@/**************************************************************************/
-@/* */
-@/* Copyright (c) Microsoft Corporation. All rights reserved. */
-@/* */
-@/* This software is licensed under the Microsoft Software License */
-@/* Terms for Microsoft Azure RTOS. Full text of the license can be */
-@/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
-@/* and in the root directory of this software. */
-@/* */
-@/**************************************************************************/
-@
-@
-@/**************************************************************************/
-@/**************************************************************************/
-@/** */
-@/** ThreadX Component */
-@/** */
-@/** Initialize */
-@/** */
-@/**************************************************************************/
-@/**************************************************************************/
-@
-@
-@#define TX_SOURCE_CODE
-@
-@
-@/* Include necessary system files. */
-@
-@#include "tx_api.h"
-@#include "tx_initialize.h"
-@#include "tx_thread.h"
-@#include "tx_timer.h"
+/**************************************************************************/
+/* */
+/* Copyright (c) Microsoft Corporation. All rights reserved. */
+/* */
+/* This software is licensed under the Microsoft Software License */
+/* Terms for Microsoft Azure RTOS. Full text of the license can be */
+/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
+/* and in the root directory of this software. */
+/* */
+/**************************************************************************/
+
+
+/**************************************************************************/
+/**************************************************************************/
+/** */
+/** ThreadX Component */
+/** */
+/** Initialize */
+/** */
+/**************************************************************************/
+/**************************************************************************/
.arm
-SVC_MODE = 0xD3 @ Disable IRQ/FIQ SVC mode
-IRQ_MODE = 0xD2 @ Disable IRQ/FIQ IRQ mode
-FIQ_MODE = 0xD1 @ Disable IRQ/FIQ FIQ mode
-SYS_MODE = 0xDF @ Disable IRQ/FIQ SYS mode
-FIQ_STACK_SIZE = 512 @ FIQ stack size
-IRQ_STACK_SIZE = 1024 @ IRQ stack size
-SYS_STACK_SIZE = 1024 @ System stack size
-@
-@
+SVC_MODE = 0xD3 // Disable IRQ/FIQ SVC mode
+IRQ_MODE = 0xD2 // Disable IRQ/FIQ IRQ mode
+FIQ_MODE = 0xD1 // Disable IRQ/FIQ FIQ mode
+SYS_MODE = 0xDF // Disable IRQ/FIQ SYS mode
+FIQ_STACK_SIZE = 512 // FIQ stack size
+IRQ_STACK_SIZE = 1024 // IRQ stack size
+SYS_STACK_SIZE = 1024 // System stack size
+
.global _tx_thread_system_stack_ptr
.global _tx_initialize_unused_memory
.global _tx_thread_context_save
.global _tx_thread_context_restore
.global _tx_timer_interrupt
-@
-@
-@/* Define the 16-bit Thumb mode veneer for _tx_initialize_low_level for
-@ applications calling this function from to 16-bit Thumb mode. */
-@
+
+
+/* Define the 16-bit Thumb mode veneer for _tx_initialize_low_level for
+ applications calling this function from to 16-bit Thumb mode. */
+
.text
.align 2
.thumb
.global $_tx_initialize_low_level
.type $_tx_initialize_low_level,function
$_tx_initialize_low_level:
- BX pc @ Switch to 32-bit mode
- NOP @
+ BX pc // Switch to 32-bit mode
+ NOP //
.arm
- STMFD sp!, {lr} @ Save return address
- BL _tx_initialize_low_level @ Call _tx_initialize_low_level function
- LDMFD sp!, {lr} @ Recover saved return address
- BX lr @ Return to 16-bit caller
-@
-@
+ STMFD sp!, {lr} // Save return address
+ BL _tx_initialize_low_level // Call _tx_initialize_low_level function
+ LDMFD sp!, {lr} // Recover saved return address
+ BX lr // Return to 16-bit caller
+
+
.text
.align 2
-@/**************************************************************************/
-@/* */
-@/* FUNCTION RELEASE */
-@/* */
-@/* _tx_initialize_low_level Cortex-A8/AC6 */
-@/* 6.1 */
-@/* AUTHOR */
-@/* */
-@/* William E. Lamie, Microsoft Corporation */
-@/* */
-@/* DESCRIPTION */
-@/* */
-@/* This function is responsible for any low-level processor */
-@/* initialization, including setting up interrupt vectors, setting */
-@/* up a periodic timer interrupt source, saving the system stack */
-@/* pointer for use in ISR processing later, and finding the first */
-@/* available RAM memory address for tx_application_define. */
-@/* */
-@/* INPUT */
-@/* */
-@/* None */
-@/* */
-@/* OUTPUT */
-@/* */
-@/* None */
-@/* */
-@/* CALLS */
-@/* */
-@/* None */
-@/* */
-@/* CALLED BY */
-@/* */
-@/* _tx_initialize_kernel_enter ThreadX entry function */
-@/* */
-@/* RELEASE HISTORY */
-@/* */
-@/* DATE NAME DESCRIPTION */
-@/* */
-@/* 09-30-2020 William E. Lamie Initial Version 6.1 */
-@/* */
-@/**************************************************************************/
-@VOID _tx_initialize_low_level(VOID)
-@{
+/**************************************************************************/
+/* */
+/* FUNCTION RELEASE */
+/* */
+/* _tx_initialize_low_level ARMV7-A */
+/* 6.1.11 */
+/* AUTHOR */
+/* */
+/* William E. Lamie, Microsoft Corporation */
+/* */
+/* DESCRIPTION */
+/* */
+/* This function is responsible for any low-level processor */
+/* initialization, including setting up interrupt vectors, setting */
+/* up a periodic timer interrupt source, saving the system stack */
+/* pointer for use in ISR processing later, and finding the first */
+/* available RAM memory address for tx_application_define. */
+/* */
+/* INPUT */
+/* */
+/* None */
+/* */
+/* OUTPUT */
+/* */
+/* None */
+/* */
+/* CALLS */
+/* */
+/* None */
+/* */
+/* CALLED BY */
+/* */
+/* _tx_initialize_kernel_enter ThreadX entry function */
+/* */
+/* RELEASE HISTORY */
+/* */
+/* DATE NAME DESCRIPTION */
+/* */
+/* 09-30-2020 William E. Lamie Initial Version 6.1 */
+/* 04-25-2022 Zhen Kong Updated comments, */
+/* resulting in version 6.1.11 */
+/* */
+/**************************************************************************/
.global _tx_initialize_low_level
.type _tx_initialize_low_level,function
_tx_initialize_low_level:
-@
-@ /* We must be in SVC mode at this point! */
-@
-@ /* Setup various stack pointers. */
-@
- LDR r1, =Image$$ARM_LIB_STACK$$ZI$$Limit @ Get pointer to stack area
-#ifdef TX_ENABLE_IRQ_NESTING
-@
-@ /* Setup the system mode stack for nested interrupt support */
-@
- LDR r2, =SYS_STACK_SIZE @ Pickup stack size
- MOV r3, #SYS_MODE @ Build SYS mode CPSR
- MSR CPSR_c, r3 @ Enter SYS mode
- SUB r1, r1, #1 @ Backup 1 byte
- BIC r1, r1, #7 @ Ensure 8-byte alignment
- MOV sp, r1 @ Setup SYS stack pointer
- SUB r1, r1, r2 @ Calculate start of next stack
+ /* We must be in SVC mode at this point! */
+
+ /* Setup various stack pointers. */
+
+ LDR r1, =Image$$ARM_LIB_STACK$$ZI$$Limit // Get pointer to stack area
+
+#ifdef TX_ENABLE_IRQ_NESTING
+
+ /* Setup the system mode stack for nested interrupt support */
+
+ LDR r2, =SYS_STACK_SIZE // Pickup stack size
+ MOV r3, #SYS_MODE // Build SYS mode CPSR
+ MSR CPSR_c, r3 // Enter SYS mode
+ SUB r1, r1, #1 // Backup 1 byte
+ BIC r1, r1, #7 // Ensure 8-byte alignment
+ MOV sp, r1 // Setup SYS stack pointer
+ SUB r1, r1, r2 // Calculate start of next stack
#endif
- LDR r2, =FIQ_STACK_SIZE @ Pickup stack size
- MOV r0, #FIQ_MODE @ Build FIQ mode CPSR
- MSR CPSR, r0 @ Enter FIQ mode
- SUB r1, r1, #1 @ Backup 1 byte
- BIC r1, r1, #7 @ Ensure 8-byte alignment
- MOV sp, r1 @ Setup FIQ stack pointer
- SUB r1, r1, r2 @ Calculate start of next stack
- LDR r2, =IRQ_STACK_SIZE @ Pickup IRQ stack size
- MOV r0, #IRQ_MODE @ Build IRQ mode CPSR
- MSR CPSR, r0 @ Enter IRQ mode
- SUB r1, r1, #1 @ Backup 1 byte
- BIC r1, r1, #7 @ Ensure 8-byte alignment
- MOV sp, r1 @ Setup IRQ stack pointer
- SUB r3, r1, r2 @ Calculate end of IRQ stack
- MOV r0, #SVC_MODE @ Build SVC mode CPSR
- MSR CPSR, r0 @ Enter SVC mode
- LDR r2, =Image$$ARM_LIB_STACK$$Base @ Pickup stack bottom
- CMP r3, r2 @ Compare the current stack end with the bottom
-_stack_error_loop:
- BLT _stack_error_loop @ If the IRQ stack exceeds the stack bottom, just sit here!
-@
-@ /* Save the system stack pointer. */
-@ _tx_thread_system_stack_ptr = (VOID_PTR) (sp);
-@
- LDR r2, =_tx_thread_system_stack_ptr @ Pickup stack pointer
- STR r1, [r2] @ Save the system stack
-@
-@ /* Save the first available memory address. */
-@ _tx_initialize_unused_memory = (VOID_PTR) _end;
-@
- LDR r1, =Image$$APP_DATA$$ZI$$Limit @ Get end of non-initialized RAM area
- LDR r2, =_tx_initialize_unused_memory @ Pickup unused memory ptr address
- ADD r1, r1, #8 @ Increment to next free word
- STR r1, [r2] @ Save first free memory address
-@
-@ /* Setup Timer for periodic interrupts. */
-@
-@ /* Done, return to caller. */
-@
+ LDR r2, =FIQ_STACK_SIZE // Pickup stack size
+ MOV r0, #FIQ_MODE // Build FIQ mode CPSR
+ MSR CPSR, r0 // Enter FIQ mode
+ SUB r1, r1, #1 // Backup 1 byte
+ BIC r1, r1, #7 // Ensure 8-byte alignment
+ MOV sp, r1 // Setup FIQ stack pointer
+ SUB r1, r1, r2 // Calculate start of next stack
+ LDR r2, =IRQ_STACK_SIZE // Pickup IRQ stack size
+ MOV r0, #IRQ_MODE // Build IRQ mode CPSR
+ MSR CPSR, r0 // Enter IRQ mode
+ SUB r1, r1, #1 // Backup 1 byte
+ BIC r1, r1, #7 // Ensure 8-byte alignment
+ MOV sp, r1 // Setup IRQ stack pointer
+ SUB r3, r1, r2 // Calculate end of IRQ stack
+ MOV r0, #SVC_MODE // Build SVC mode CPSR
+ MSR CPSR, r0 // Enter SVC mode
+ LDR r2, =Image$$ARM_LIB_STACK$$Base // Pickup stack bottom
+ CMP r3, r2 // Compare the current stack end with the bottom
+_stack_error_loop:
+ BLT _stack_error_loop // If the IRQ stack exceeds the stack bottom, just sit here!
+
+ LDR r2, =_tx_thread_system_stack_ptr // Pickup stack pointer
+ STR r1, [r2] // Save the system stack
+
+ LDR r1, =Image$$ZI_DATA$$ZI$$Limit // Get end of non-initialized RAM area
+ LDR r2, =_tx_initialize_unused_memory // Pickup unused memory ptr address
+ ADD r1, r1, #8 // Increment to next free word
+ STR r1, [r2] // Save first free memory address
+
#ifdef __THUMB_INTERWORK
- BX lr @ Return to caller
+ BX lr // Return to caller
#else
- MOV pc, lr @ Return to caller
+ MOV pc, lr // Return to caller
#endif
-@}
-@
-@
-@/* Define shells for each of the interrupt vectors. */
-@
+
+/* Define shells for each of the interrupt vectors. */
+
.global __tx_undefined
__tx_undefined:
- B __tx_undefined @ Undefined handler
-@
+ B __tx_undefined // Undefined handler
+
.global __tx_swi_interrupt
__tx_swi_interrupt:
- B __tx_swi_interrupt @ Software interrupt handler
-@
+ B __tx_swi_interrupt // Software interrupt handler
+
.global __tx_prefetch_handler
__tx_prefetch_handler:
- B __tx_prefetch_handler @ Prefetch exception handler
-@
+ B __tx_prefetch_handler // Prefetch exception handler
+
.global __tx_abort_handler
__tx_abort_handler:
- B __tx_abort_handler @ Abort exception handler
-@
+ B __tx_abort_handler // Abort exception handler
+
.global __tx_reserved_handler
__tx_reserved_handler:
- B __tx_reserved_handler @ Reserved exception handler
-@
- .global __tx_irq_processing_return
+ B __tx_reserved_handler // Reserved exception handler
+
+ .global __tx_irq_processing_return
.type __tx_irq_processing_return,function
.global __tx_irq_handler
__tx_irq_handler:
-@
-@ /* Jump to context save to save system context. */
+
+ /* Jump to context save to save system context. */
B _tx_thread_context_save
__tx_irq_processing_return:
-@
-@ /* At this point execution is still in the IRQ mode. The CPSR, point of
-@ interrupt, and all C scratch registers are available for use. In
-@ addition, IRQ interrupts may be re-enabled - with certain restrictions -
-@ if nested IRQ interrupts are desired. Interrupts may be re-enabled over
-@ small code sequences where lr is saved before enabling interrupts and
-@ restored after interrupts are again disabled. */
-@
-@ /* Interrupt nesting is allowed after calling _tx_thread_irq_nesting_start
-@ from IRQ mode with interrupts disabled. This routine switches to the
-@ system mode and returns with IRQ interrupts enabled.
-@
-@ NOTE: It is very important to ensure all IRQ interrupts are cleared
-@ prior to enabling nested IRQ interrupts. */
+
+ /* At this point execution is still in the IRQ mode. The CPSR, point of
+ interrupt, and all C scratch registers are available for use. In
+ addition, IRQ interrupts may be re-enabled - with certain restrictions -
+ if nested IRQ interrupts are desired. Interrupts may be re-enabled over
+ small code sequences where lr is saved before enabling interrupts and
+ restored after interrupts are again disabled. */
+
+ /* Interrupt nesting is allowed after calling _tx_thread_irq_nesting_start
+ from IRQ mode with interrupts disabled. This routine switches to the
+ system mode and returns with IRQ interrupts enabled.
+
+ NOTE: It is very important to ensure all IRQ interrupts are cleared
+ prior to enabling nested IRQ interrupts. */
#ifdef TX_ENABLE_IRQ_NESTING
BL _tx_thread_irq_nesting_start
#endif
-@
-@ /* For debug purpose, execute the timer interrupt processing here. In
-@ a real system, some kind of status indication would have to be checked
-@ before the timer interrupt handler could be called. */
-@
- BL _tx_timer_interrupt @ Timer interrupt handler
-@
-@
-@ /* If interrupt nesting was started earlier, the end of interrupt nesting
-@ service must be called before returning to _tx_thread_context_restore.
-@ This routine returns in processing in IRQ mode with interrupts disabled. */
+
+ /* For debug purpose, execute the timer interrupt processing here. In
+ a real system, some kind of status indication would have to be checked
+ before the timer interrupt handler could be called. */
+
+ BL _tx_timer_interrupt // Timer interrupt handler
+
+
+ /* If interrupt nesting was started earlier, the end of interrupt nesting
+ service must be called before returning to _tx_thread_context_restore.
+ This routine returns in processing in IRQ mode with interrupts disabled. */
#ifdef TX_ENABLE_IRQ_NESTING
BL _tx_thread_irq_nesting_end
#endif
-@
-@ /* Jump to context restore to restore system context. */
+
+ /* Jump to context restore to restore system context. */
B _tx_thread_context_restore
-@
-@
-@ /* This is an example of a vectored IRQ handler. */
-@
-@ .global __tx_example_vectored_irq_handler
-@__tx_example_vectored_irq_handler:
-@
-@
-@ /* Save initial context and call context save to prepare for
-@ vectored ISR execution. */
-@
-@ STMDB sp!, {r0-r3} @ Save some scratch registers
-@ MRS r0, SPSR @ Pickup saved SPSR
-@ SUB lr, lr, #4 @ Adjust point of interrupt
-@ STMDB sp!, {r0, r10, r12, lr} @ Store other scratch registers
-@ BL _tx_thread_vectored_context_save @ Vectored context save
-@
-@ /* At this point execution is still in the IRQ mode. The CPSR, point of
-@ interrupt, and all C scratch registers are available for use. In
-@ addition, IRQ interrupts may be re-enabled - with certain restrictions -
-@ if nested IRQ interrupts are desired. Interrupts may be re-enabled over
-@ small code sequences where lr is saved before enabling interrupts and
-@ restored after interrupts are again disabled. */
-@
-@
-@ /* Interrupt nesting is allowed after calling _tx_thread_irq_nesting_start
-@ from IRQ mode with interrupts disabled. This routine switches to the
-@ system mode and returns with IRQ interrupts enabled.
-@
-@ NOTE: It is very important to ensure all IRQ interrupts are cleared
-@ prior to enabling nested IRQ interrupts. */
-@#ifdef TX_ENABLE_IRQ_NESTING
-@ BL _tx_thread_irq_nesting_start
-@#endif
-@
-@ /* Application IRQ handlers can be called here! */
-@
-@ /* If interrupt nesting was started earlier, the end of interrupt nesting
-@ service must be called before returning to _tx_thread_context_restore.
-@ This routine returns in processing in IRQ mode with interrupts disabled. */
-@#ifdef TX_ENABLE_IRQ_NESTING
-@ BL _tx_thread_irq_nesting_end
-@#endif
-@
-@ /* Jump to context restore to restore system context. */
-@ B _tx_thread_context_restore
-@
-@
+
+
+ /* This is an example of a vectored IRQ handler. */
+
+ /* At this point execution is still in the IRQ mode. The CPSR, point of
+ interrupt, and all C scratch registers are available for use. In
+ addition, IRQ interrupts may be re-enabled - with certain restrictions -
+ if nested IRQ interrupts are desired. Interrupts may be re-enabled over
+ small code sequences where lr is saved before enabling interrupts and
+ restored after interrupts are again disabled. */
+
+
+ /* Interrupt nesting is allowed after calling _tx_thread_irq_nesting_start
+ from IRQ mode with interrupts disabled. This routine switches to the
+ system mode and returns with IRQ interrupts enabled.
+
+ NOTE: It is very important to ensure all IRQ interrupts are cleared
+ prior to enabling nested IRQ interrupts. */
+
+ /* Application IRQ handlers can be called here! */
+
+ /* If interrupt nesting was started earlier, the end of interrupt nesting
+ service must be called before returning to _tx_thread_context_restore.
+ This routine returns in processing in IRQ mode with interrupts disabled. */
+
+ /* Jump to context restore to restore system context. */
+
#ifdef TX_ENABLE_FIQ_SUPPORT
.global __tx_fiq_handler
.global __tx_fiq_processing_return
__tx_fiq_handler:
-@
-@ /* Jump to fiq context save to save system context. */
+
+ /* Jump to fiq context save to save system context. */
B _tx_thread_fiq_context_save
__tx_fiq_processing_return:
-@
-@ /* At this point execution is still in the FIQ mode. The CPSR, point of
-@ interrupt, and all C scratch registers are available for use. */
-@
-@ /* Interrupt nesting is allowed after calling _tx_thread_fiq_nesting_start
-@ from FIQ mode with interrupts disabled. This routine switches to the
-@ system mode and returns with FIQ interrupts enabled.
-@
-@ NOTE: It is very important to ensure all FIQ interrupts are cleared
-@ prior to enabling nested FIQ interrupts. */
+
+ /* At this point execution is still in the FIQ mode. The CPSR, point of
+ interrupt, and all C scratch registers are available for use. */
+
+ /* Interrupt nesting is allowed after calling _tx_thread_fiq_nesting_start
+ from FIQ mode with interrupts disabled. This routine switches to the
+ system mode and returns with FIQ interrupts enabled.
+
+ NOTE: It is very important to ensure all FIQ interrupts are cleared
+ prior to enabling nested FIQ interrupts. */
#ifdef TX_ENABLE_FIQ_NESTING
BL _tx_thread_fiq_nesting_start
#endif
-@
-@ /* Application FIQ handlers can be called here! */
-@
-@ /* If interrupt nesting was started earlier, the end of interrupt nesting
-@ service must be called before returning to _tx_thread_fiq_context_restore. */
+
+ /* Application FIQ handlers can be called here! */
+
+ /* If interrupt nesting was started earlier, the end of interrupt nesting
+ service must be called before returning to _tx_thread_fiq_context_restore. */
#ifdef TX_ENABLE_FIQ_NESTING
BL _tx_thread_fiq_nesting_end
#endif
-@
-@ /* Jump to fiq context restore to restore system context. */
+
+ /* Jump to fiq context restore to restore system context. */
B _tx_thread_fiq_context_restore
-@
-@
+
+
#else
.global __tx_fiq_handler
__tx_fiq_handler:
- B __tx_fiq_handler @ FIQ interrupt handler
+ B __tx_fiq_handler // FIQ interrupt handler
#endif
-@
-@
+
+
BUILD_OPTIONS:
- .word _tx_build_options @ Reference to bring in
+ .word _tx_build_options // Reference to bring in
VERSION_ID:
- .word _tx_version_id @ Reference to bring in
-
+ .word _tx_version_id // Reference to bring in
diff --git a/ports/cortex_a8/ac6/example_build/tx/.cproject b/ports/cortex_a8/ac6/example_build/tx/.cproject
index 56b8532d..3d1f0818 100644
--- a/ports/cortex_a8/ac6/example_build/tx/.cproject
+++ b/ports/cortex_a8/ac6/example_build/tx/.cproject
@@ -3,9 +3,9 @@
-
+
-
+
@@ -23,37 +23,37 @@
-
+
-
+
-
+
-
+
-
+
-
+
-
+
-
+
-
+
-
+
-
+
-
+
-
+
-
+
-
+
-
-
+
-
+
-
+
-
+
-
+
-
+
-
+
-
+
-
+
-
+
-
+
-
+
-
+
-
+
@@ -111,7 +111,7 @@
-
+
@@ -123,6 +123,10 @@
+
+
+
+
@@ -138,9 +142,5 @@
-
-
-
-
diff --git a/ports/cortex_a8/ac6/inc/tx_port.h b/ports/cortex_a8/ac6/inc/tx_port.h
index 67055343..19463de1 100644
--- a/ports/cortex_a8/ac6/inc/tx_port.h
+++ b/ports/cortex_a8/ac6/inc/tx_port.h
@@ -12,7 +12,7 @@
/**************************************************************************/
/**************************************************************************/
-/** */
+/** */
/** ThreadX Component */
/** */
/** Port Specific */
@@ -21,36 +21,38 @@
/**************************************************************************/
-/**************************************************************************/
-/* */
-/* PORT SPECIFIC C INFORMATION RELEASE */
-/* */
-/* tx_port.h Cortex-A8/AC6 */
-/* 6.1.6 */
+/**************************************************************************/
+/* */
+/* PORT SPECIFIC C INFORMATION RELEASE */
+/* */
+/* tx_port.h ARMv7-A */
+/* 6.1.11 */
/* */
/* AUTHOR */
/* */
/* William E. Lamie, Microsoft Corporation */
/* */
/* DESCRIPTION */
-/* */
-/* This file contains data type definitions that make the ThreadX */
-/* real-time kernel function identically on a variety of different */
-/* processor architectures. For example, the size or number of bits */
-/* in an "int" data type vary between microprocessor architectures and */
-/* even C compilers for the same microprocessor. ThreadX does not */
-/* directly use native C data types. Instead, ThreadX creates its */
-/* own special types that can be mapped to actual data types by this */
-/* file to guarantee consistency in the interface and functionality. */
-/* */
-/* RELEASE HISTORY */
-/* */
+/* */
+/* This file contains data type definitions that make the ThreadX */
+/* real-time kernel function identically on a variety of different */
+/* processor architectures. For example, the size or number of bits */
+/* in an "int" data type vary between microprocessor architectures and */
+/* even C compilers for the same microprocessor. ThreadX does not */
+/* directly use native C data types. Instead, ThreadX creates its */
+/* own special types that can be mapped to actual data types by this */
+/* file to guarantee consistency in the interface and functionality. */
+/* */
+/* RELEASE HISTORY */
+/* */
/* DATE NAME DESCRIPTION */
/* */
/* 09-30-2020 William E. Lamie Initial Version 6.1 */
/* 04-02-2021 Bhupendra Naphade Modified comment(s),updated */
/* macro definition, */
/* resulting in version 6.1.6 */
+/* 04-25-2022 Zhen Kong Updated comments, */
+/* resulting in version 6.1.11 */
/* */
/**************************************************************************/
@@ -63,7 +65,7 @@
#ifdef TX_INCLUDE_USER_DEFINE_FILE
-/* Yes, include the user defines in tx_user.h. The defines in this file may
+/* Yes, include the user defines in tx_user.h. The defines in this file may
alternately be defined on the command line. */
#include "tx_user.h"
@@ -76,7 +78,7 @@
#include
-/* Define ThreadX basic types for this port. */
+/* Define ThreadX basic types for this port. */
#define VOID void
typedef char CHAR;
@@ -112,12 +114,12 @@ typedef unsigned short USHORT;
#define TX_TIMER_THREAD_STACK_SIZE 1024 /* Default timer thread stack size */
#endif
-#ifndef TX_TIMER_THREAD_PRIORITY
-#define TX_TIMER_THREAD_PRIORITY 0 /* Default timer thread priority */
+#ifndef TX_TIMER_THREAD_PRIORITY
+#define TX_TIMER_THREAD_PRIORITY 0 /* Default timer thread priority */
#endif
-/* Define various constants for the ThreadX ARM port. */
+/* Define various constants for the ThreadX ARM port. */
#ifdef TX_ENABLE_FIQ_SUPPORT
#define TX_INT_DISABLE 0xC0 /* Disable IRQ & FIQ interrupts */
@@ -127,8 +129,8 @@ typedef unsigned short USHORT;
#define TX_INT_ENABLE 0x00 /* Enable IRQ interrupts */
-/* Define the clock source for trace event entry time stamp. The following two item are port specific.
- For example, if the time source is at the address 0x0a800024 and is 16-bits in size, the clock
+/* Define the clock source for trace event entry time stamp. The following two item are port specific.
+ For example, if the time source is at the address 0x0a800024 and is 16-bits in size, the clock
source constants would be:
#define TX_TRACE_TIME_SOURCE *((ULONG *) 0x0a800024)
@@ -175,7 +177,7 @@ typedef unsigned short USHORT;
#define TX_INLINE_INITIALIZATION
-/* Determine whether or not stack checking is enabled. By default, ThreadX stack checking is
+/* Determine whether or not stack checking is enabled. By default, ThreadX stack checking is
disabled. When the following is defined, ThreadX thread stack checking is enabled. If stack
checking is enabled (TX_ENABLE_STACK_CHECKING is defined), the TX_DISABLE_STACK_FILLING
define is negated, thereby forcing the stack fill which is necessary for the stack checking
@@ -187,13 +189,13 @@ typedef unsigned short USHORT;
/* Define the TX_THREAD control block extensions for this port. The main reason
- for the multiple macros is so that backward compatibility can be maintained with
+ for the multiple macros is so that backward compatibility can be maintained with
existing ThreadX kernel awareness modules. */
-#define TX_THREAD_EXTENSION_0
-#define TX_THREAD_EXTENSION_1
+#define TX_THREAD_EXTENSION_0
+#define TX_THREAD_EXTENSION_1
#define TX_THREAD_EXTENSION_2 ULONG tx_thread_vfp_enable;
-#define TX_THREAD_EXTENSION_3
+#define TX_THREAD_EXTENSION_3
/* Define the port extensions of the remaining ThreadX objects. */
@@ -207,11 +209,11 @@ typedef unsigned short USHORT;
#define TX_TIMER_EXTENSION
-/* Define the user extension field of the thread control block. Nothing
+/* Define the user extension field of the thread control block. Nothing
additional is needed for this port so it is defined as white space. */
#ifndef TX_THREAD_USER_EXTENSION
-#define TX_THREAD_USER_EXTENSION
+#define TX_THREAD_USER_EXTENSION
#endif
@@ -219,8 +221,8 @@ typedef unsigned short USHORT;
tx_thread_shell_entry, and tx_thread_terminate. */
-#define TX_THREAD_CREATE_EXTENSION(thread_ptr)
-#define TX_THREAD_DELETE_EXTENSION(thread_ptr)
+#define TX_THREAD_CREATE_EXTENSION(thread_ptr)
+#define TX_THREAD_DELETE_EXTENSION(thread_ptr)
#define TX_THREAD_COMPLETED_EXTENSION(thread_ptr)
#define TX_THREAD_TERMINATED_EXTENSION(thread_ptr)
@@ -247,24 +249,24 @@ typedef unsigned short USHORT;
#define TX_TIMER_DELETE_EXTENSION(timer_ptr)
-/* Determine if the ARM architecture has the CLZ instruction. This is available on
- architectures v5 and above. If available, redefine the macro for calculating the
+/* Determine if the ARM architecture has the CLZ instruction. This is available on
+ architectures v5 and above. If available, redefine the macro for calculating the
lowest bit set. */
-
+
#if __TARGET_ARCH_ARM > 4
#ifndef __thumb__
#define TX_LOWEST_SET_BIT_CALCULATE(m, b) m = m & ((ULONG) (-((LONG) m))); \
asm volatile (" CLZ %0,%1 ": "=r" (b) : "r" (m) ); \
- b = 31 - b;
+ b = 31 - b;
#endif
#endif
-/* Define ThreadX interrupt lockout and restore macros for protection on
- access of critical kernel information. The restore interrupt macro must
- restore the interrupt posture of the running thread prior to the value
+/* Define ThreadX interrupt lockout and restore macros for protection on
+ access of critical kernel information. The restore interrupt macro must
+ restore the interrupt posture of the running thread prior to the value
present prior to the disable macro. In most cases, the save area macro
is used to define a local function save area for the disable and restore
macros. */
@@ -295,7 +297,7 @@ unsigned int _tx_thread_interrupt_restore(UINT old_posture);
#endif
-/* Define VFP extension for the Cortex-A8. Each is assumed to be called in the context of the executing
+/* Define VFP extension for the ARMv7-A. Each is assumed to be called in the context of the executing
thread. */
void tx_thread_vfp_enable(void);
@@ -315,8 +317,8 @@ void tx_thread_vfp_disable(void);
/* Define the version ID of ThreadX. This may be utilized by the application. */
#ifdef TX_THREAD_INIT
-CHAR _tx_version_id[] =
- "Copyright (c) Microsoft Corporation. All rights reserved. * ThreadX Cortex-A8/AC6 Version 6.1.9 *";
+CHAR _tx_version_id[] =
+ "Copyright (c) Microsoft Corporation. All rights reserved. * ThreadX ARMv7-A Version 6.1.11 *";
#else
extern CHAR _tx_version_id[];
#endif
diff --git a/ports/cortex_a8/ac6/src/tx_thread_context_restore.S b/ports/cortex_a8/ac6/src/tx_thread_context_restore.S
index ce46a277..fae7e72d 100644
--- a/ports/cortex_a8/ac6/src/tx_thread_context_restore.S
+++ b/ports/cortex_a8/ac6/src/tx_thread_context_restore.S
@@ -1,259 +1,222 @@
-@/**************************************************************************/
-@/* */
-@/* Copyright (c) Microsoft Corporation. All rights reserved. */
-@/* */
-@/* This software is licensed under the Microsoft Software License */
-@/* Terms for Microsoft Azure RTOS. Full text of the license can be */
-@/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
-@/* and in the root directory of this software. */
-@/* */
-@/**************************************************************************/
-@
-@
-@/**************************************************************************/
-@/**************************************************************************/
-@/** */
-@/** ThreadX Component */
-@/** */
-@/** Thread */
-@/** */
-@/**************************************************************************/
-@/**************************************************************************/
-@
-@
-@#define TX_SOURCE_CODE
-@
-@
-@/* Include necessary system files. */
-@
-@#include "tx_api.h"
-@#include "tx_thread.h"
-@#include "tx_timer.h"
-@
+/**************************************************************************/
+/* */
+/* Copyright (c) Microsoft Corporation. All rights reserved. */
+/* */
+/* This software is licensed under the Microsoft Software License */
+/* Terms for Microsoft Azure RTOS. Full text of the license can be */
+/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
+/* and in the root directory of this software. */
+/* */
+/**************************************************************************/
+
+
+/**************************************************************************/
+/**************************************************************************/
+/** */
+/** ThreadX Component */
+/** */
+/** Thread */
+/** */
+/**************************************************************************/
+/**************************************************************************/
+
.arm
#ifdef TX_ENABLE_FIQ_SUPPORT
-SVC_MODE = 0xD3 @ Disable IRQ/FIQ, SVC mode
-IRQ_MODE = 0xD2 @ Disable IRQ/FIQ, IRQ mode
+SVC_MODE = 0xD3 // Disable IRQ/FIQ, SVC mode
+IRQ_MODE = 0xD2 // Disable IRQ/FIQ, IRQ mode
#else
-SVC_MODE = 0x93 @ Disable IRQ, SVC mode
-IRQ_MODE = 0x92 @ Disable IRQ, IRQ mode
+SVC_MODE = 0x93 // Disable IRQ, SVC mode
+IRQ_MODE = 0x92 // Disable IRQ, IRQ mode
#endif
-@
+
.global _tx_thread_system_state
.global _tx_thread_current_ptr
.global _tx_thread_execute_ptr
.global _tx_timer_time_slice
.global _tx_thread_schedule
.global _tx_thread_preempt_disable
-@
-@
-@/* No 16-bit Thumb mode veneer code is needed for _tx_thread_context_restore
-@ since it will never be called 16-bit mode. */
-@
+
+
+
+/* No 16-bit Thumb mode veneer code is needed for _tx_thread_context_restore
+ since it will never be called 16-bit mode. */
+
.arm
.text
.align 2
-@/**************************************************************************/
-@/* */
-@/* FUNCTION RELEASE */
-@/* */
-@/* _tx_thread_context_restore Cortex-A8/AC6 */
-@/* 6.1.9 */
-@/* AUTHOR */
-@/* */
-@/* William E. Lamie, Microsoft Corporation */
-@/* */
-@/* DESCRIPTION */
-@/* */
-@/* This function restores the interrupt context if it is processing a */
-@/* nested interrupt. If not, it returns to the interrupt thread if no */
-@/* preemption is necessary. Otherwise, if preemption is necessary or */
-@/* if no thread was running, the function returns to the scheduler. */
-@/* */
-@/* INPUT */
-@/* */
-@/* None */
-@/* */
-@/* OUTPUT */
-@/* */
-@/* None */
-@/* */
-@/* CALLS */
-@/* */
-@/* _tx_thread_schedule Thread scheduling routine */
-@/* */
-@/* CALLED BY */
-@/* */
-@/* ISRs Interrupt Service Routines */
-@/* */
-@/* RELEASE HISTORY */
-@/* */
-@/* DATE NAME DESCRIPTION */
-@/* */
-@/* 09-30-2020 William E. Lamie Initial Version 6.1 */
-@/* 10-15-2021 William E. Lamie Modified comment(s), added */
-@/* execution profile support, */
-@/* resulting in version 6.1.9 */
-@/* */
-@/**************************************************************************/
-@VOID _tx_thread_context_restore(VOID)
-@{
+/**************************************************************************/
+/* */
+/* FUNCTION RELEASE */
+/* */
+/* _tx_thread_context_restore ARMv7-A */
+/* 6.1.11 */
+/* AUTHOR */
+/* */
+/* William E. Lamie, Microsoft Corporation */
+/* */
+/* DESCRIPTION */
+/* */
+/* This function restores the interrupt context if it is processing a */
+/* nested interrupt. If not, it returns to the interrupt thread if no */
+/* preemption is necessary. Otherwise, if preemption is necessary or */
+/* if no thread was running, the function returns to the scheduler. */
+/* */
+/* INPUT */
+/* */
+/* None */
+/* */
+/* OUTPUT */
+/* */
+/* None */
+/* */
+/* CALLS */
+/* */
+/* _tx_thread_schedule Thread scheduling routine */
+/* */
+/* CALLED BY */
+/* */
+/* ISRs Interrupt Service Routines */
+/* */
+/* RELEASE HISTORY */
+/* */
+/* DATE NAME DESCRIPTION */
+/* */
+/* 09-30-2020 William E. Lamie Initial Version 6.1 */
+/* 10-15-2021 William E. Lamie Modified comment(s), added */
+/* execution profile support, */
+/* resulting in version 6.1.9 */
+/* 04-25-2022 Zhen Kong Updated comments, */
+/* resulting in version 6.1.11 */
+/* */
+/**************************************************************************/
.global _tx_thread_context_restore
.type _tx_thread_context_restore,function
_tx_thread_context_restore:
-@
-@ /* Lockout interrupts. */
-@
+
+ /* Lockout interrupts. */
+
#ifdef TX_ENABLE_FIQ_SUPPORT
- CPSID if @ Disable IRQ and FIQ interrupts
+ CPSID if // Disable IRQ and FIQ interrupts
#else
- CPSID i @ Disable IRQ interrupts
+ CPSID i // Disable IRQ interrupts
#endif
#if (defined(TX_ENABLE_EXECUTION_CHANGE_NOTIFY) || defined(TX_EXECUTION_PROFILE_ENABLE))
-@
-@ /* Call the ISR exit function to indicate an ISR is complete. */
-@
- BL _tx_execution_isr_exit @ Call the ISR exit function
-#endif
-@
-@ /* Determine if interrupts are nested. */
-@ if (--_tx_thread_system_state)
-@ {
-@
- LDR r3, =_tx_thread_system_state @ Pickup address of system state variable
- LDR r2, [r3] @ Pickup system state
- SUB r2, r2, #1 @ Decrement the counter
- STR r2, [r3] @ Store the counter
- CMP r2, #0 @ Was this the first interrupt?
- BEQ __tx_thread_not_nested_restore @ If so, not a nested restore
-@
-@ /* Interrupts are nested. */
-@
-@ /* Just recover the saved registers and return to the point of
-@ interrupt. */
-@
- LDMIA sp!, {r0, r10, r12, lr} @ Recover SPSR, POI, and scratch regs
- MSR SPSR_cxsf, r0 @ Put SPSR back
- LDMIA sp!, {r0-r3} @ Recover r0-r3
- MOVS pc, lr @ Return to point of interrupt
-@
-@ }
-__tx_thread_not_nested_restore:
-@
-@ /* Determine if a thread was interrupted and no preemption is required. */
-@ else if (((_tx_thread_current_ptr) && (_tx_thread_current_ptr == _tx_thread_execute_ptr)
-@ || (_tx_thread_preempt_disable))
-@ {
-@
- LDR r1, =_tx_thread_current_ptr @ Pickup address of current thread ptr
- LDR r0, [r1] @ Pickup actual current thread pointer
- CMP r0, #0 @ Is it NULL?
- BEQ __tx_thread_idle_system_restore @ Yes, idle system was interrupted
-@
- LDR r3, =_tx_thread_preempt_disable @ Pickup preempt disable address
- LDR r2, [r3] @ Pickup actual preempt disable flag
- CMP r2, #0 @ Is it set?
- BNE __tx_thread_no_preempt_restore @ Yes, don't preempt this thread
- LDR r3, =_tx_thread_execute_ptr @ Pickup address of execute thread ptr
- LDR r2, [r3] @ Pickup actual execute thread pointer
- CMP r0, r2 @ Is the same thread highest priority?
- BNE __tx_thread_preempt_restore @ No, preemption needs to happen
-@
-@
-__tx_thread_no_preempt_restore:
-@
-@ /* Restore interrupted thread or ISR. */
-@
-@ /* Pickup the saved stack pointer. */
-@ tmp_ptr = _tx_thread_current_ptr -> tx_thread_stack_ptr;
-@
-@ /* Recover the saved context and return to the point of interrupt. */
-@
- LDMIA sp!, {r0, r10, r12, lr} @ Recover SPSR, POI, and scratch regs
- MSR SPSR_cxsf, r0 @ Put SPSR back
- LDMIA sp!, {r0-r3} @ Recover r0-r3
- MOVS pc, lr @ Return to point of interrupt
-@
-@ }
-@ else
-@ {
-__tx_thread_preempt_restore:
-@
- LDMIA sp!, {r3, r10, r12, lr} @ Recover temporarily saved registers
- MOV r1, lr @ Save lr (point of interrupt)
- MOV r2, #SVC_MODE @ Build SVC mode CPSR
- MSR CPSR_c, r2 @ Enter SVC mode
- STR r1, [sp, #-4]! @ Save point of interrupt
- STMDB sp!, {r4-r12, lr} @ Save upper half of registers
- MOV r4, r3 @ Save SPSR in r4
- MOV r2, #IRQ_MODE @ Build IRQ mode CPSR
- MSR CPSR_c, r2 @ Enter IRQ mode
- LDMIA sp!, {r0-r3} @ Recover r0-r3
- MOV r5, #SVC_MODE @ Build SVC mode CPSR
- MSR CPSR_c, r5 @ Enter SVC mode
- STMDB sp!, {r0-r3} @ Save r0-r3 on thread's stack
- LDR r1, =_tx_thread_current_ptr @ Pickup address of current thread ptr
- LDR r0, [r1] @ Pickup current thread pointer
+ /* Call the ISR exit function to indicate an ISR is complete. */
+
+ BL _tx_execution_isr_exit // Call the ISR exit function
+#endif
+
+ /* Determine if interrupts are nested. */
+
+ LDR r3, =_tx_thread_system_state // Pickup address of system state variable
+ LDR r2, [r3] // Pickup system state
+ SUB r2, r2, #1 // Decrement the counter
+ STR r2, [r3] // Store the counter
+ CMP r2, #0 // Was this the first interrupt?
+ BEQ __tx_thread_not_nested_restore // If so, not a nested restore
+
+ /* Interrupts are nested. */
+
+ /* Just recover the saved registers and return to the point of
+ interrupt. */
+
+ LDMIA sp!, {r0, r10, r12, lr} // Recover SPSR, POI, and scratch regs
+ MSR SPSR_cxsf, r0 // Put SPSR back
+ LDMIA sp!, {r0-r3} // Recover r0-r3
+ MOVS pc, lr // Return to point of interrupt
+
+__tx_thread_not_nested_restore:
+
+ /* Determine if a thread was interrupted and no preemption is required. */
+
+ LDR r1, =_tx_thread_current_ptr // Pickup address of current thread ptr
+ LDR r0, [r1] // Pickup actual current thread pointer
+ CMP r0, #0 // Is it NULL?
+ BEQ __tx_thread_idle_system_restore // Yes, idle system was interrupted
+
+ LDR r3, =_tx_thread_preempt_disable // Pickup preempt disable address
+ LDR r2, [r3] // Pickup actual preempt disable flag
+ CMP r2, #0 // Is it set?
+ BNE __tx_thread_no_preempt_restore // Yes, don't preempt this thread
+ LDR r3, =_tx_thread_execute_ptr // Pickup address of execute thread ptr
+ LDR r2, [r3] // Pickup actual execute thread pointer
+ CMP r0, r2 // Is the same thread highest priority?
+ BNE __tx_thread_preempt_restore // No, preemption needs to happen
+
+
+__tx_thread_no_preempt_restore:
+
+ /* Recover the saved context and return to the point of interrupt. */
+
+ /* Pickup the saved stack pointer. */
+
+ /* Recover the saved context and return to the point of interrupt. */
+ LDMIA sp!, {r0, r10, r12, lr} // Recover SPSR, POI, and scratch regs
+ MSR SPSR_cxsf, r0 // Put SPSR back
+ LDMIA sp!, {r0-r3} // Recover r0-r3
+ MOVS pc, lr // Return to point of interrupt
+
+__tx_thread_preempt_restore:
+
+ LDMIA sp!, {r3, r10, r12, lr} // Recover temporarily saved registers
+ MOV r1, lr // Save lr (point of interrupt)
+ MOV r2, #SVC_MODE // Build SVC mode CPSR
+ MSR CPSR_c, r2 // Enter SVC mode
+ STR r1, [sp, #-4]! // Save point of interrupt
+ STMDB sp!, {r4-r12, lr} // Save upper half of registers
+ MOV r4, r3 // Save SPSR in r4
+ MOV r2, #IRQ_MODE // Build IRQ mode CPSR
+ MSR CPSR_c, r2 // Enter IRQ mode
+ LDMIA sp!, {r0-r3} // Recover r0-r3
+ MOV r5, #SVC_MODE // Build SVC mode CPSR
+ MSR CPSR_c, r5 // Enter SVC mode
+ STMDB sp!, {r0-r3} // Save r0-r3 on thread's stack
+
+ LDR r1, =_tx_thread_current_ptr // Pickup address of current thread ptr
+ LDR r0, [r1] // Pickup current thread pointer
#ifdef TX_ENABLE_VFP_SUPPORT
- LDR r2, [r0, #144] @ Pickup the VFP enabled flag
- CMP r2, #0 @ Is the VFP enabled?
- BEQ _tx_skip_irq_vfp_save @ No, skip VFP IRQ save
- VMRS r2, FPSCR @ Pickup the FPSCR
- STR r2, [sp, #-4]! @ Save FPSCR
- VSTMDB sp!, {D16-D31} @ Save D16-D31
- VSTMDB sp!, {D0-D15} @ Save D0-D15
+ LDR r2, [r0, #144] // Pickup the VFP enabled flag
+ CMP r2, #0 // Is the VFP enabled?
+ BEQ _tx_skip_irq_vfp_save // No, skip VFP IRQ save
+ VMRS r2, FPSCR // Pickup the FPSCR
+ STR r2, [sp, #-4]! // Save FPSCR
+ VSTMDB sp!, {D16-D31} // Save D16-D31
+ VSTMDB sp!, {D0-D15} // Save D0-D15
+
_tx_skip_irq_vfp_save:
+
#endif
- MOV r3, #1 @ Build interrupt stack type
- STMDB sp!, {r3, r4} @ Save interrupt stack type and SPSR
- STR sp, [r0, #8] @ Save stack pointer in thread control
- @ block
-@
-@ /* Save the remaining time-slice and disable it. */
-@ if (_tx_timer_time_slice)
-@ {
-@
- LDR r3, =_tx_timer_time_slice @ Pickup time-slice variable address
- LDR r2, [r3] @ Pickup time-slice
- CMP r2, #0 @ Is it active?
- BEQ __tx_thread_dont_save_ts @ No, don't save it
-@
-@ _tx_thread_current_ptr -> tx_thread_time_slice = _tx_timer_time_slice;
-@ _tx_timer_time_slice = 0;
-@
- STR r2, [r0, #24] @ Save thread's time-slice
- MOV r2, #0 @ Clear value
- STR r2, [r3] @ Disable global time-slice flag
-@
-@ }
+ MOV r3, #1 // Build interrupt stack type
+ STMDB sp!, {r3, r4} // Save interrupt stack type and SPSR
+ STR sp, [r0, #8] // Save stack pointer in thread control
+ // block
+
+ /* Save the remaining time-slice and disable it. */
+ LDR r3, =_tx_timer_time_slice // Pickup time-slice variable address
+ LDR r2, [r3] // Pickup time-slice
+ CMP r2, #0 // Is it active?
+ BEQ __tx_thread_dont_save_ts // No, don't save it
+ STR r2, [r0, #24] // Save thread's time-slice
+ MOV r2, #0 // Clear value
+ STR r2, [r3] // Disable global time-slice flag
+
__tx_thread_dont_save_ts:
-@
-@
-@ /* Clear the current task pointer. */
-@ _tx_thread_current_ptr = TX_NULL;
-@
- MOV r0, #0 @ NULL value
- STR r0, [r1] @ Clear current thread pointer
-@
-@ /* Return to the scheduler. */
-@ _tx_thread_schedule();
-@
- B _tx_thread_schedule @ Return to scheduler
-@ }
-@
+
+ /* Clear the current task pointer. */
+ MOV r0, #0 // NULL value
+ STR r0, [r1] // Clear current thread pointer
+
+ /* Return to the scheduler. */
+ B _tx_thread_schedule // Return to scheduler
+
__tx_thread_idle_system_restore:
-@
-@ /* Just return back to the scheduler! */
-@
- MOV r0, #SVC_MODE @ Build SVC mode CPSR
- MSR CPSR_c, r0 @ Enter SVC mode
- B _tx_thread_schedule @ Return to scheduler
-@}
-
-
+ /* Just return back to the scheduler! */
+ MOV r0, #SVC_MODE // Build SVC mode CPSR
+ MSR CPSR_c, r0 // Enter SVC mode
+ B _tx_thread_schedule // Return to scheduler
diff --git a/ports/cortex_a8/ac6/src/tx_thread_context_save.S b/ports/cortex_a8/ac6/src/tx_thread_context_save.S
index 52408580..7ac48c2e 100644
--- a/ports/cortex_a8/ac6/src/tx_thread_context_save.S
+++ b/ports/cortex_a8/ac6/src/tx_thread_context_save.S
@@ -1,205 +1,172 @@
-@/**************************************************************************/
-@/* */
-@/* Copyright (c) Microsoft Corporation. All rights reserved. */
-@/* */
-@/* This software is licensed under the Microsoft Software License */
-@/* Terms for Microsoft Azure RTOS. Full text of the license can be */
-@/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
-@/* and in the root directory of this software. */
-@/* */
-@/**************************************************************************/
-@
-@
-@/**************************************************************************/
-@/**************************************************************************/
-@/** */
-@/** ThreadX Component */
-@/** */
-@/** Thread */
-@/** */
-@/**************************************************************************/
-@/**************************************************************************/
-@
-@
-@#define TX_SOURCE_CODE
-@
-@
-@/* Include necessary system files. */
-@
-@#include "tx_api.h"
-@#include "tx_thread.h"
-@#include "tx_timer.h"
-@
-@
+/**************************************************************************/
+/* */
+/* Copyright (c) Microsoft Corporation. All rights reserved. */
+/* */
+/* This software is licensed under the Microsoft Software License */
+/* Terms for Microsoft Azure RTOS. Full text of the license can be */
+/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
+/* and in the root directory of this software. */
+/* */
+/**************************************************************************/
+
+
+/**************************************************************************/
+/**************************************************************************/
+/** */
+/** ThreadX Component */
+/** */
+/** Thread */
+/** */
+/**************************************************************************/
+/**************************************************************************/
+
.global _tx_thread_system_state
.global _tx_thread_current_ptr
.global __tx_irq_processing_return
-@
-@
-@/* No 16-bit Thumb mode veneer code is needed for _tx_thread_context_save
-@ since it will never be called 16-bit mode. */
-@
+
+
+/* No 16-bit Thumb mode veneer code is needed for _tx_thread_context_save
+ since it will never be called 16-bit mode. */
+
.arm
.text
.align 2
-@/**************************************************************************/
-@/* */
-@/* FUNCTION RELEASE */
-@/* */
-@/* _tx_thread_context_save Cortex-A8/AC6 */
-@/* 6.1.9 */
-@/* AUTHOR */
-@/* */
-@/* William E. Lamie, Microsoft Corporation */
-@/* */
-@/* DESCRIPTION */
-@/* */
-@/* This function saves the context of an executing thread in the */
-@/* beginning of interrupt processing. The function also ensures that */
-@/* the system stack is used upon return to the calling ISR. */
-@/* */
-@/* INPUT */
-@/* */
-@/* None */
-@/* */
-@/* OUTPUT */
-@/* */
-@/* None */
-@/* */
-@/* CALLS */
-@/* */
-@/* None */
-@/* */
-@/* CALLED BY */
-@/* */
-@/* ISRs */
-@/* */
-@/* RELEASE HISTORY */
-@/* */
-@/* DATE NAME DESCRIPTION */
-@/* */
-@/* 09-30-2020 William E. Lamie Initial Version 6.1 */
-@/* 10-15-2021 William E. Lamie Modified comment(s), added */
-@/* execution profile support, */
-@/* resulting in version 6.1.9 */
-@/* */
-@/**************************************************************************/
-@VOID _tx_thread_context_save(VOID)
-@{
+/**************************************************************************/
+/* */
+/* FUNCTION RELEASE */
+/* */
+/* _tx_thread_context_save ARMv7-A */
+/* 6.1.11 */
+/* AUTHOR */
+/* */
+/* William E. Lamie, Microsoft Corporation */
+/* */
+/* DESCRIPTION */
+/* */
+/* This function saves the context of an executing thread in the */
+/* beginning of interrupt processing. The function also ensures that */
+/* the system stack is used upon return to the calling ISR. */
+/* */
+/* INPUT */
+/* */
+/* None */
+/* */
+/* OUTPUT */
+/* */
+/* None */
+/* */
+/* CALLS */
+/* */
+/* None */
+/* */
+/* CALLED BY */
+/* */
+/* ISRs */
+/* */
+/* RELEASE HISTORY */
+/* */
+/* DATE NAME DESCRIPTION */
+/* */
+/* 09-30-2020 William E. Lamie Initial Version 6.1 */
+/* 10-15-2021 William E. Lamie Modified comment(s), added */
+/* execution profile support, */
+/* resulting in version 6.1.9 */
+/* 04-25-2022 Zhen Kong Updated comments, */
+/* resulting in version 6.1.11 */
+/* */
+/**************************************************************************/
.global _tx_thread_context_save
.type _tx_thread_context_save,function
_tx_thread_context_save:
-@
-@ /* Upon entry to this routine, it is assumed that IRQ interrupts are locked
-@ out, we are in IRQ mode, and all registers are intact. */
-@
-@ /* Check for a nested interrupt condition. */
-@ if (_tx_thread_system_state++)
-@ {
-@
- STMDB sp!, {r0-r3} @ Save some working registers
+
+ /* Upon entry to this routine, it is assumed that IRQ interrupts are locked
+ out, we are in IRQ mode, and all registers are intact. */
+
+ /* Check for a nested interrupt condition. */
+
+ STMDB sp!, {r0-r3} // Save some working registers
#ifdef TX_ENABLE_FIQ_SUPPORT
- CPSID if @ Disable FIQ interrupts
+ CPSID if // Disable FIQ interrupts
#endif
- LDR r3, =_tx_thread_system_state @ Pickup address of system state variable
- LDR r2, [r3] @ Pickup system state
- CMP r2, #0 @ Is this the first interrupt?
- BEQ __tx_thread_not_nested_save @ Yes, not a nested context save
-@
-@ /* Nested interrupt condition. */
-@
- ADD r2, r2, #1 @ Increment the interrupt counter
- STR r2, [r3] @ Store it back in the variable
-@
-@ /* Save the rest of the scratch registers on the stack and return to the
-@ calling ISR. */
-@
- MRS r0, SPSR @ Pickup saved SPSR
- SUB lr, lr, #4 @ Adjust point of interrupt
- STMDB sp!, {r0, r10, r12, lr} @ Store other registers
-@
-@ /* Return to the ISR. */
-@
- MOV r10, #0 @ Clear stack limit
+ LDR r3, =_tx_thread_system_state // Pickup address of system state variable
+ LDR r2, [r3] // Pickup system state
+ CMP r2, #0 // Is this the first interrupt?
+ BEQ __tx_thread_not_nested_save // Yes, not a nested context save
+
+ /* Nested interrupt condition. */
+
+ ADD r2, r2, #1 // Increment the interrupt counter
+ STR r2, [r3] // Store it back in the variable
+
+ /* Save the rest of the scratch registers on the stack and return to the
+ calling ISR. */
+
+ MRS r0, SPSR // Pickup saved SPSR
+ SUB lr, lr, #4 // Adjust point of interrupt
+ STMDB sp!, {r0, r10, r12, lr} // Store other registers
+
+ /* Return to the ISR. */
+
+ MOV r10, #0 // Clear stack limit
#if (defined(TX_ENABLE_EXECUTION_CHANGE_NOTIFY) || defined(TX_EXECUTION_PROFILE_ENABLE))
-@
-@ /* Call the ISR enter function to indicate an ISR is executing. */
-@
- PUSH {lr} @ Save ISR lr
- BL _tx_execution_isr_enter @ Call the ISR enter function
- POP {lr} @ Recover ISR lr
+
+ /* Call the ISR enter function to indicate an ISR is executing. */
+
+ PUSH {lr} // Save ISR lr
+ BL _tx_execution_isr_enter // Call the ISR enter function
+ POP {lr} // Recover ISR lr
#endif
- B __tx_irq_processing_return @ Continue IRQ processing
-@
+ B __tx_irq_processing_return // Continue IRQ processing
+
__tx_thread_not_nested_save:
-@ }
-@
-@ /* Otherwise, not nested, check to see if a thread was running. */
-@ else if (_tx_thread_current_ptr)
-@ {
-@
- ADD r2, r2, #1 @ Increment the interrupt counter
- STR r2, [r3] @ Store it back in the variable
- LDR r1, =_tx_thread_current_ptr @ Pickup address of current thread ptr
- LDR r0, [r1] @ Pickup current thread pointer
- CMP r0, #0 @ Is it NULL?
- BEQ __tx_thread_idle_system_save @ If so, interrupt occurred in
- @ scheduling loop - nothing needs saving!
-@
-@ /* Save minimal context of interrupted thread. */
-@
- MRS r2, SPSR @ Pickup saved SPSR
- SUB lr, lr, #4 @ Adjust point of interrupt
- STMDB sp!, {r2, r10, r12, lr} @ Store other registers
-@
-@ /* Save the current stack pointer in the thread's control block. */
-@ _tx_thread_current_ptr -> tx_thread_stack_ptr = sp;
-@
-@ /* Switch to the system stack. */
-@ sp = _tx_thread_system_stack_ptr@
-@
- MOV r10, #0 @ Clear stack limit
+
+ /* Otherwise, not nested, check to see if a thread was running. */
+ ADD r2, r2, #1 // Increment the interrupt counter
+ STR r2, [r3] // Store it back in the variable
+ LDR r1, =_tx_thread_current_ptr // Pickup address of current thread ptr
+ LDR r0, [r1] // Pickup current thread pointer
+ CMP r0, #0 // Is it NULL?
+ BEQ __tx_thread_idle_system_save // If so, interrupt occurred in
+ // scheduling loop - nothing needs saving!
+
+ /* Save minimal context of interrupted thread. */
+
+ MRS r2, SPSR // Pickup saved SPSR
+ SUB lr, lr, #4 // Adjust point of interrupt
+ STMDB sp!, {r2, r10, r12, lr} // Store other registers
+
+ MOV r10, #0 // Clear stack limit
#if (defined(TX_ENABLE_EXECUTION_CHANGE_NOTIFY) || defined(TX_EXECUTION_PROFILE_ENABLE))
-@
-@ /* Call the ISR enter function to indicate an ISR is executing. */
-@
- PUSH {lr} @ Save ISR lr
- BL _tx_execution_isr_enter @ Call the ISR enter function
- POP {lr} @ Recover ISR lr
+
+ /* Call the ISR enter function to indicate an ISR is executing. */
+
+ PUSH {lr} // Save ISR lr
+ BL _tx_execution_isr_enter // Call the ISR enter function
+ POP {lr} // Recover ISR lr
#endif
- B __tx_irq_processing_return @ Continue IRQ processing
-@
-@ }
-@ else
-@ {
-@
+ B __tx_irq_processing_return // Continue IRQ processing
+
__tx_thread_idle_system_save:
-@
-@ /* Interrupt occurred in the scheduling loop. */
-@
-@ /* Not much to do here, just adjust the stack pointer, and return to IRQ
-@ processing. */
-@
- MOV r10, #0 @ Clear stack limit
+
+ /* Interrupt occurred in the scheduling loop. */
+
+ /* Not much to do here, just adjust the stack pointer, and return to IRQ
+ processing. */
+
+ MOV r10, #0 // Clear stack limit
#if (defined(TX_ENABLE_EXECUTION_CHANGE_NOTIFY) || defined(TX_EXECUTION_PROFILE_ENABLE))
-@
-@ /* Call the ISR enter function to indicate an ISR is executing. */
-@
- PUSH {lr} @ Save ISR lr
- BL _tx_execution_isr_enter @ Call the ISR enter function
- POP {lr} @ Recover ISR lr
+
+ /* Call the ISR enter function to indicate an ISR is executing. */
+
+ PUSH {lr} // Save ISR lr
+ BL _tx_execution_isr_enter // Call the ISR enter function
+ POP {lr} // Recover ISR lr
#endif
- ADD sp, sp, #16 @ Recover saved registers
- B __tx_irq_processing_return @ Continue IRQ processing
-@
-@ }
-@}
-
-
-
+ ADD sp, sp, #16 // Recover saved registers
+ B __tx_irq_processing_return // Continue IRQ processing
diff --git a/ports/cortex_a8/ac6/src/tx_thread_fiq_context_restore.S b/ports/cortex_a8/ac6/src/tx_thread_fiq_context_restore.S
index c5d6abc8..006be973 100644
--- a/ports/cortex_a8/ac6/src/tx_thread_fiq_context_restore.S
+++ b/ports/cortex_a8/ac6/src/tx_thread_fiq_context_restore.S
@@ -1,43 +1,32 @@
-@/**************************************************************************/
-@/* */
-@/* Copyright (c) Microsoft Corporation. All rights reserved. */
-@/* */
-@/* This software is licensed under the Microsoft Software License */
-@/* Terms for Microsoft Azure RTOS. Full text of the license can be */
-@/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
-@/* and in the root directory of this software. */
-@/* */
-@/**************************************************************************/
-@
-@
-@/**************************************************************************/
-@/**************************************************************************/
-@/** */
-@/** ThreadX Component */
-@/** */
-@/** Thread */
-@/** */
-@/**************************************************************************/
-@/**************************************************************************/
-@
-@
-@#define TX_SOURCE_CODE
-@
-@
-@/* Include necessary system files. */
-@
-@#include "tx_api.h"
-@#include "tx_thread.h"
-@#include "tx_timer.h"
-@
-@
-SVC_MODE = 0xD3 @ SVC mode
-FIQ_MODE = 0xD1 @ FIQ mode
-MODE_MASK = 0x1F @ Mode mask
-THUMB_MASK = 0x20 @ Thumb bit mask
-IRQ_MODE_BITS = 0x12 @ IRQ mode bits
-@
-@
+/**************************************************************************/
+/* */
+/* Copyright (c) Microsoft Corporation. All rights reserved. */
+/* */
+/* This software is licensed under the Microsoft Software License */
+/* Terms for Microsoft Azure RTOS. Full text of the license can be */
+/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
+/* and in the root directory of this software. */
+/* */
+/**************************************************************************/
+
+
+/**************************************************************************/
+/**************************************************************************/
+/** */
+/** ThreadX Component */
+/** */
+/** Thread */
+/** */
+/**************************************************************************/
+/**************************************************************************/
+
+SVC_MODE = 0xD3 // SVC mode
+FIQ_MODE = 0xD1 // FIQ mode
+MODE_MASK = 0x1F // Mode mask
+THUMB_MASK = 0x20 // Thumb bit mask
+IRQ_MODE_BITS = 0x12 // IRQ mode bits
+
+
.global _tx_thread_system_state
.global _tx_thread_current_ptr
.global _tx_thread_system_stack_ptr
@@ -45,218 +34,190 @@ IRQ_MODE_BITS = 0x12 @ IRQ mode bits
.global _tx_timer_time_slice
.global _tx_thread_schedule
.global _tx_thread_preempt_disable
-@
-@
-@/* No 16-bit Thumb mode veneer code is needed for _tx_thread_fiq_context_restore
-@ since it will never be called 16-bit mode. */
-@
+ .global _tx_execution_isr_exit
+
+
+/* No 16-bit Thumb mode veneer code is needed for _tx_thread_fiq_context_restore
+ since it will never be called 16-bit mode. */
+
.arm
.text
.align 2
-@/**************************************************************************/
-@/* */
-@/* FUNCTION RELEASE */
-@/* */
-@/* _tx_thread_fiq_context_restore Cortex-A8/AC6 */
-@/* 6.1.9 */
-@/* AUTHOR */
-@/* */
-@/* William E. Lamie, Microsoft Corporation */
-@/* */
-@/* DESCRIPTION */
-@/* */
-@/* This function restores the fiq interrupt context when processing a */
-@/* nested interrupt. If not, it returns to the interrupt thread if no */
-@/* preemption is necessary. Otherwise, if preemption is necessary or */
-@/* if no thread was running, the function returns to the scheduler. */
-@/* */
-@/* INPUT */
-@/* */
-@/* None */
-@/* */
-@/* OUTPUT */
-@/* */
-@/* None */
-@/* */
-@/* CALLS */
-@/* */
-@/* _tx_thread_schedule Thread scheduling routine */
-@/* */
-@/* CALLED BY */
-@/* */
-@/* FIQ ISR Interrupt Service Routines */
-@/* */
-@/* RELEASE HISTORY */
-@/* */
-@/* DATE NAME DESCRIPTION */
-@/* */
-@/* 09-30-2020 William E. Lamie Initial Version 6.1 */
-@/* 10-15-2021 William E. Lamie Modified comment(s), added */
-@/* execution profile support, */
-@/* resulting in version 6.1.9 */
-@/* */
-@/**************************************************************************/
-@VOID _tx_thread_fiq_context_restore(VOID)
-@{
+/**************************************************************************/
+/* */
+/* FUNCTION RELEASE */
+/* */
+/* _tx_thread_fiq_context_restore ARMv7-A */
+/* 6.1.11 */
+/* AUTHOR */
+/* */
+/* William E. Lamie, Microsoft Corporation */
+/* */
+/* DESCRIPTION */
+/* */
+/* This function restores the fiq interrupt context when processing a */
+/* nested interrupt. If not, it returns to the interrupt thread if no */
+/* preemption is necessary. Otherwise, if preemption is necessary or */
+/* if no thread was running, the function returns to the scheduler. */
+/* */
+/* INPUT */
+/* */
+/* None */
+/* */
+/* OUTPUT */
+/* */
+/* None */
+/* */
+/* CALLS */
+/* */
+/* _tx_thread_schedule Thread scheduling routine */
+/* */
+/* CALLED BY */
+/* */
+/* FIQ ISR Interrupt Service Routines */
+/* */
+/* RELEASE HISTORY */
+/* */
+/* DATE NAME DESCRIPTION */
+/* */
+/* 09-30-2020 William E. Lamie Initial Version 6.1 */
+/* 10-15-2021 William E. Lamie Modified comment(s), added */
+/* execution profile support, */
+/* resulting in version 6.1.9 */
+/* 04-25-2022 Zhen Kong Updated comments, */
+/* resulting in version 6.1.11 */
+/* */
+/**************************************************************************/
.global _tx_thread_fiq_context_restore
.type _tx_thread_fiq_context_restore,function
_tx_thread_fiq_context_restore:
-@
-@ /* Lockout interrupts. */
-@
- CPSID if @ Disable IRQ and FIQ interrupts
+
+ /* Lockout interrupts. */
+
+ CPSID if // Disable IRQ and FIQ interrupts
#if (defined(TX_ENABLE_EXECUTION_CHANGE_NOTIFY) || defined(TX_EXECUTION_PROFILE_ENABLE))
-@
-@ /* Call the ISR exit function to indicate an ISR is complete. */
-@
- BL _tx_execution_isr_exit @ Call the ISR exit function
+
+ /* Call the ISR exit function to indicate an ISR is complete. */
+
+ BL _tx_execution_isr_exit // Call the ISR exit function
#endif
-@
-@ /* Determine if interrupts are nested. */
-@ if (--_tx_thread_system_state)
-@ {
-@
- LDR r3, =_tx_thread_system_state @ Pickup address of system state variable
- LDR r2, [r3] @ Pickup system state
- SUB r2, r2, #1 @ Decrement the counter
- STR r2, [r3] @ Store the counter
- CMP r2, #0 @ Was this the first interrupt?
- BEQ __tx_thread_fiq_not_nested_restore @ If so, not a nested restore
-@
-@ /* Interrupts are nested. */
-@
-@ /* Just recover the saved registers and return to the point of
-@ interrupt. */
-@
- LDMIA sp!, {r0, r10, r12, lr} @ Recover SPSR, POI, and scratch regs
- MSR SPSR_cxsf, r0 @ Put SPSR back
- LDMIA sp!, {r0-r3} @ Recover r0-r3
- MOVS pc, lr @ Return to point of interrupt
-@
-@ }
+
+ /* Determine if interrupts are nested. */
+ LDR r3, =_tx_thread_system_state // Pickup address of system state variable
+ LDR r2, [r3] // Pickup system state
+ SUB r2, r2, #1 // Decrement the counter
+ STR r2, [r3] // Store the counter
+ CMP r2, #0 // Was this the first interrupt?
+ BEQ __tx_thread_fiq_not_nested_restore // If so, not a nested restore
+
+ /* Interrupts are nested. */
+
+ /* Just recover the saved registers and return to the point of
+ interrupt. */
+
+ LDMIA sp!, {r0, r10, r12, lr} // Recover SPSR, POI, and scratch regs
+ MSR SPSR_cxsf, r0 // Put SPSR back
+ LDMIA sp!, {r0-r3} // Recover r0-r3
+ MOVS pc, lr // Return to point of interrupt
+
__tx_thread_fiq_not_nested_restore:
-@
-@ /* Determine if a thread was interrupted and no preemption is required. */
-@ else if (((_tx_thread_current_ptr) && (_tx_thread_current_ptr == _tx_thread_execute_ptr)
-@ || (_tx_thread_preempt_disable))
-@ {
-@
- LDR r1, [sp] @ Pickup the saved SPSR
- MOV r2, #MODE_MASK @ Build mask to isolate the interrupted mode
- AND r1, r1, r2 @ Isolate mode bits
- CMP r1, #IRQ_MODE_BITS @ Was an interrupt taken in IRQ mode before we
- @ got to context save? */
- BEQ __tx_thread_fiq_no_preempt_restore @ Yes, just go back to point of interrupt
+
+ /* Determine if a thread was interrupted and no preemption is required. */
+
+ LDR r1, [sp] // Pickup the saved SPSR
+ MOV r2, #MODE_MASK // Build mask to isolate the interrupted mode
+ AND r1, r1, r2 // Isolate mode bits
+ CMP r1, #IRQ_MODE_BITS // Was an interrupt taken in IRQ mode before we
+ // got to context save? */
+ BEQ __tx_thread_fiq_no_preempt_restore // Yes, just go back to point of interrupt
- LDR r1, =_tx_thread_current_ptr @ Pickup address of current thread ptr
- LDR r0, [r1] @ Pickup actual current thread pointer
- CMP r0, #0 @ Is it NULL?
- BEQ __tx_thread_fiq_idle_system_restore @ Yes, idle system was interrupted
+ LDR r1, =_tx_thread_current_ptr // Pickup address of current thread ptr
+ LDR r0, [r1] // Pickup actual current thread pointer
+ CMP r0, #0 // Is it NULL?
+ BEQ __tx_thread_fiq_idle_system_restore // Yes, idle system was interrupted
- LDR r3, =_tx_thread_preempt_disable @ Pickup preempt disable address
- LDR r2, [r3] @ Pickup actual preempt disable flag
- CMP r2, #0 @ Is it set?
- BNE __tx_thread_fiq_no_preempt_restore @ Yes, don't preempt this thread
- LDR r3, =_tx_thread_execute_ptr @ Pickup address of execute thread ptr
- LDR r2, [r3] @ Pickup actual execute thread pointer
- CMP r0, r2 @ Is the same thread highest priority?
- BNE __tx_thread_fiq_preempt_restore @ No, preemption needs to happen
+ LDR r3, =_tx_thread_preempt_disable // Pickup preempt disable address
+ LDR r2, [r3] // Pickup actual preempt disable flag
+ CMP r2, #0 // Is it set?
+ BNE __tx_thread_fiq_no_preempt_restore // Yes, don't preempt this thread
+ LDR r3, =_tx_thread_execute_ptr // Pickup address of execute thread ptr
+ LDR r2, [r3] // Pickup actual execute thread pointer
+ CMP r0, r2 // Is the same thread highest priority?
+ BNE __tx_thread_fiq_preempt_restore // No, preemption needs to happen
__tx_thread_fiq_no_preempt_restore:
-@
-@ /* Restore interrupted thread or ISR. */
-@
-@ /* Pickup the saved stack pointer. */
-@ tmp_ptr = _tx_thread_current_ptr -> tx_thread_stack_ptr;
-@
-@ /* Recover the saved context and return to the point of interrupt. */
-@
- LDMIA sp!, {r0, lr} @ Recover SPSR, POI, and scratch regs
- MSR SPSR_cxsf, r0 @ Put SPSR back
- LDMIA sp!, {r0-r3} @ Recover r0-r3
- MOVS pc, lr @ Return to point of interrupt
-@
-@ }
-@ else
-@ {
-__tx_thread_fiq_preempt_restore:
-@
- LDMIA sp!, {r3, lr} @ Recover temporarily saved registers
- MOV r1, lr @ Save lr (point of interrupt)
- MOV r2, #SVC_MODE @ Build SVC mode CPSR
- MSR CPSR_c, r2 @ Enter SVC mode
- STR r1, [sp, #-4]! @ Save point of interrupt
- STMDB sp!, {r4-r12, lr} @ Save upper half of registers
- MOV r4, r3 @ Save SPSR in r4
- MOV r2, #FIQ_MODE @ Build FIQ mode CPSR
- MSR CPSR_c, r2 @ Reenter FIQ mode
- LDMIA sp!, {r0-r3} @ Recover r0-r3
- MOV r5, #SVC_MODE @ Build SVC mode CPSR
- MSR CPSR_c, r5 @ Enter SVC mode
- STMDB sp!, {r0-r3} @ Save r0-r3 on thread's stack
- LDR r1, =_tx_thread_current_ptr @ Pickup address of current thread ptr
- LDR r0, [r1] @ Pickup current thread pointer
+ /* Restore interrupted thread or ISR. */
+ /* Recover the saved context and return to the point of interrupt. */
+
+ LDMIA sp!, {r0, lr} // Recover SPSR, POI, and scratch regs
+ MSR SPSR_cxsf, r0 // Put SPSR back
+ LDMIA sp!, {r0-r3} // Recover r0-r3
+ MOVS pc, lr // Return to point of interrupt
+
+__tx_thread_fiq_preempt_restore:
+
+ LDMIA sp!, {r3, lr} // Recover temporarily saved registers
+ MOV r1, lr // Save lr (point of interrupt)
+ MOV r2, #SVC_MODE // Build SVC mode CPSR
+ MSR CPSR_c, r2 // Enter SVC mode
+ STR r1, [sp, #-4]! // Save point of interrupt
+ STMDB sp!, {r4-r12, lr} // Save upper half of registers
+ MOV r4, r3 // Save SPSR in r4
+ MOV r2, #FIQ_MODE // Build FIQ mode CPSR
+ MSR CPSR_c, r2 // Reenter FIQ mode
+ LDMIA sp!, {r0-r3} // Recover r0-r3
+ MOV r5, #SVC_MODE // Build SVC mode CPSR
+ MSR CPSR_c, r5 // Enter SVC mode
+ STMDB sp!, {r0-r3} // Save r0-r3 on thread's stack
+
+ LDR r1, =_tx_thread_current_ptr // Pickup address of current thread ptr
+ LDR r0, [r1] // Pickup current thread pointer
#ifdef TX_ENABLE_VFP_SUPPORT
- LDR r2, [r0, #144] @ Pickup the VFP enabled flag
- CMP r2, #0 @ Is the VFP enabled?
- BEQ _tx_skip_fiq_vfp_save @ No, skip VFP IRQ save
- VMRS r2, FPSCR @ Pickup the FPSCR
- STR r2, [sp, #-4]! @ Save FPSCR
- VSTMDB sp!, {D16-D31} @ Save D16-D31
- VSTMDB sp!, {D0-D15} @ Save D0-D15
+ LDR r2, [r0, #144] // Pickup the VFP enabled flag
+ CMP r2, #0 // Is the VFP enabled?
+ BEQ _tx_skip_fiq_vfp_save // No, skip VFP IRQ save
+ VMRS r2, FPSCR // Pickup the FPSCR
+ STR r2, [sp, #-4]! // Save FPSCR
+ VSTMDB sp!, {D16-D31} // Save D16-D31
+ VSTMDB sp!, {D0-D15} // Save D0-D15
_tx_skip_fiq_vfp_save:
#endif
- MOV r3, #1 @ Build interrupt stack type
- STMDB sp!, {r3, r4} @ Save interrupt stack type and SPSR
- STR sp, [r0, #8] @ Save stack pointer in thread control
- @ block */
-@
-@ /* Save the remaining time-slice and disable it. */
-@ if (_tx_timer_time_slice)
-@ {
-@
- LDR r3, =_tx_timer_time_slice @ Pickup time-slice variable address
- LDR r2, [r3] @ Pickup time-slice
- CMP r2, #0 @ Is it active?
- BEQ __tx_thread_fiq_dont_save_ts @ No, don't save it
-@
-@ _tx_thread_current_ptr -> tx_thread_time_slice = _tx_timer_time_slice;
-@ _tx_timer_time_slice = 0;
-@
- STR r2, [r0, #24] @ Save thread's time-slice
- MOV r2, #0 @ Clear value
- STR r2, [r3] @ Disable global time-slice flag
-@
-@ }
-__tx_thread_fiq_dont_save_ts:
-@
-@
-@ /* Clear the current task pointer. */
-@ _tx_thread_current_ptr = TX_NULL;
-@
- MOV r0, #0 @ NULL value
- STR r0, [r1] @ Clear current thread pointer
-@
-@ /* Return to the scheduler. */
-@ _tx_thread_schedule();
-@
- B _tx_thread_schedule @ Return to scheduler
-@ }
-@
-__tx_thread_fiq_idle_system_restore:
-@
-@ /* Just return back to the scheduler! */
-@
- ADD sp, sp, #24 @ Recover FIQ stack space
- MOV r3, #SVC_MODE @ Build SVC mode CPSR
- MSR CPSR_c, r3 @ Lockout interrupts
- B _tx_thread_schedule @ Return to scheduler
-@
-@}
+ MOV r3, #1 // Build interrupt stack type
+ STMDB sp!, {r3, r4} // Save interrupt stack type and SPSR
+ STR sp, [r0, #8] // Save stack pointer in thread control
+ // block */
+ LDR r3, =_tx_timer_time_slice // Pickup time-slice variable address
+ LDR r2, [r3] // Pickup time-slice
+ CMP r2, #0 // Is it active?
+ BEQ __tx_thread_fiq_dont_save_ts // No, don't save it
+
+ STR r2, [r0, #24] // Save thread's time-slice
+ MOV r2, #0 // Clear value
+ STR r2, [r3] // Disable global time-slice flag
+
+__tx_thread_fiq_dont_save_ts:
+
+ /* Clear the current task pointer. */
+
+ MOV r0, #0 // NULL value
+ STR r0, [r1] // Clear current thread pointer
+
+ /* Return to the scheduler. */
+
+ B _tx_thread_schedule // Return to scheduler
+
+__tx_thread_fiq_idle_system_restore:
+
+ /* Just return back to the scheduler! */
+
+ ADD sp, sp, #24 // Recover FIQ stack space
+ MOV r3, #SVC_MODE // Build SVC mode CPSR
+ MSR CPSR_c, r3 // Lockout interrupts
+ B _tx_thread_schedule // Return to scheduler
diff --git a/ports/cortex_a8/ac6/src/tx_thread_fiq_context_save.S b/ports/cortex_a8/ac6/src/tx_thread_fiq_context_save.S
index fa7cb043..7db6a4c2 100644
--- a/ports/cortex_a8/ac6/src/tx_thread_fiq_context_save.S
+++ b/ports/cortex_a8/ac6/src/tx_thread_fiq_context_save.S
@@ -1,206 +1,178 @@
-@/**************************************************************************/
-@/* */
-@/* Copyright (c) Microsoft Corporation. All rights reserved. */
-@/* */
-@/* This software is licensed under the Microsoft Software License */
-@/* Terms for Microsoft Azure RTOS. Full text of the license can be */
-@/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
-@/* and in the root directory of this software. */
-@/* */
-@/**************************************************************************/
-@
-@
-@/**************************************************************************/
-@/**************************************************************************/
-@/** */
-@/** ThreadX Component */
-@/** */
-@/** Thread */
-@/** */
-@/**************************************************************************/
-@/**************************************************************************/
-@
-@
-@#define TX_SOURCE_CODE
-@
-@
-@/* Include necessary system files. */
-@
-@#include "tx_api.h"
-@#include "tx_thread.h"
-@
-@
+/**************************************************************************/
+/* */
+/* Copyright (c) Microsoft Corporation. All rights reserved. */
+/* */
+/* This software is licensed under the Microsoft Software License */
+/* Terms for Microsoft Azure RTOS. Full text of the license can be */
+/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
+/* and in the root directory of this software. */
+/* */
+/**************************************************************************/
+
+
+/**************************************************************************/
+/**************************************************************************/
+/** */
+/** ThreadX Component */
+/** */
+/** Thread */
+/** */
+/**************************************************************************/
+/**************************************************************************/
+
.global _tx_thread_system_state
.global _tx_thread_current_ptr
.global __tx_fiq_processing_return
-@
-@
-@/* No 16-bit Thumb mode veneer code is needed for _tx_thread_fiq_context_save
-@ since it will never be called 16-bit mode. */
-@
+ .global _tx_execution_isr_enter
+
+
+/* No 16-bit Thumb mode veneer code is needed for _tx_thread_fiq_context_save
+ since it will never be called 16-bit mode. */
+
.arm
.text
.align 2
-@/**************************************************************************/
-@/* */
-@/* FUNCTION RELEASE */
-@/* */
-@/* _tx_thread_fiq_context_save Cortex-A8/AC6 */
-@/* 6.1.9 */
-@/* AUTHOR */
-@/* */
-@/* William E. Lamie, Microsoft Corporation */
-@/* */
-@/* DESCRIPTION */
-@/* */
-@/* This function saves the context of an executing thread in the */
-@/* beginning of interrupt processing. The function also ensures that */
-@/* the system stack is used upon return to the calling ISR. */
-@/* */
-@/* INPUT */
-@/* */
-@/* None */
-@/* */
-@/* OUTPUT */
-@/* */
-@/* None */
-@/* */
-@/* CALLS */
-@/* */
-@/* None */
-@/* */
-@/* CALLED BY */
-@/* */
-@/* ISRs */
-@/* */
-@/* RELEASE HISTORY */
-@/* */
-@/* DATE NAME DESCRIPTION */
-@/* */
-@/* 09-30-2020 William E. Lamie Initial Version 6.1 */
-@/* 10-15-2021 William E. Lamie Modified comment(s), added */
-@/* execution profile support, */
-@/* resulting in version 6.1.9 */
-@/* */
-@/**************************************************************************/
-@ VOID _tx_thread_fiq_context_save(VOID)
-@{
+/**************************************************************************/
+/* */
+/* FUNCTION RELEASE */
+/* */
+/* _tx_thread_fiq_context_save ARMv7-A */
+/* 6.1.11 */
+/* AUTHOR */
+/* */
+/* William E. Lamie, Microsoft Corporation */
+/* */
+/* DESCRIPTION */
+/* */
+/* This function saves the context of an executing thread in the */
+/* beginning of interrupt processing. The function also ensures that */
+/* the system stack is used upon return to the calling ISR. */
+/* */
+/* INPUT */
+/* */
+/* None */
+/* */
+/* OUTPUT */
+/* */
+/* None */
+/* */
+/* CALLS */
+/* */
+/* None */
+/* */
+/* CALLED BY */
+/* */
+/* ISRs */
+/* */
+/* RELEASE HISTORY */
+/* */
+/* DATE NAME DESCRIPTION */
+/* */
+/* 09-30-2020 William E. Lamie Initial Version 6.1 */
+/* 10-15-2021 William E. Lamie Modified comment(s), added */
+/* execution profile support, */
+/* resulting in version 6.1.9 */
+/* 04-25-2022 Zhen Kong Updated comments, */
+/* resulting in version 6.1.11 */
+/* */
+/**************************************************************************/
.global _tx_thread_fiq_context_save
.type _tx_thread_fiq_context_save,function
_tx_thread_fiq_context_save:
-@
-@ /* Upon entry to this routine, it is assumed that IRQ interrupts are locked
-@ out, we are in IRQ mode, and all registers are intact. */
-@
-@ /* Check for a nested interrupt condition. */
-@ if (_tx_thread_system_state++)
-@ {
-@
- STMDB sp!, {r0-r3} @ Save some working registers
- LDR r3, =_tx_thread_system_state @ Pickup address of system state variable
- LDR r2, [r3] @ Pickup system state
- CMP r2, #0 @ Is this the first interrupt?
- BEQ __tx_thread_fiq_not_nested_save @ Yes, not a nested context save
-@
-@ /* Nested interrupt condition. */
-@
- ADD r2, r2, #1 @ Increment the interrupt counter
- STR r2, [r3] @ Store it back in the variable
-@
-@ /* Save the rest of the scratch registers on the stack and return to the
-@ calling ISR. */
-@
- MRS r0, SPSR @ Pickup saved SPSR
- SUB lr, lr, #4 @ Adjust point of interrupt
- STMDB sp!, {r0, r10, r12, lr} @ Store other registers
-@
-@ /* Return to the ISR. */
-@
- MOV r10, #0 @ Clear stack limit
+
+ /* Upon entry to this routine, it is assumed that IRQ interrupts are locked
+ out, we are in IRQ mode, and all registers are intact. */
+
+ /* Check for a nested interrupt condition. */
+
+ STMDB sp!, {r0-r3} // Save some working registers
+ LDR r3, =_tx_thread_system_state // Pickup address of system state variable
+ LDR r2, [r3] // Pickup system state
+ CMP r2, #0 // Is this the first interrupt?
+ BEQ __tx_thread_fiq_not_nested_save // Yes, not a nested context save
+
+ /* Nested interrupt condition. */
+
+ ADD r2, r2, #1 // Increment the interrupt counter
+ STR r2, [r3] // Store it back in the variable
+
+ /* Save the rest of the scratch registers on the stack and return to the
+ calling ISR. */
+
+ MRS r0, SPSR // Pickup saved SPSR
+ SUB lr, lr, #4 // Adjust point of interrupt
+ STMDB sp!, {r0, r10, r12, lr} // Store other registers
+
+ /* Return to the ISR. */
+
+ MOV r10, #0 // Clear stack limit
#if (defined(TX_ENABLE_EXECUTION_CHANGE_NOTIFY) || defined(TX_EXECUTION_PROFILE_ENABLE))
-@
-@ /* Call the ISR enter function to indicate an ISR is executing. */
-@
- PUSH {lr} @ Save ISR lr
- BL _tx_execution_isr_enter @ Call the ISR enter function
- POP {lr} @ Recover ISR lr
+
+ /* Call the ISR enter function to indicate an ISR is executing. */
+
+ PUSH {lr} // Save ISR lr
+ BL _tx_execution_isr_enter // Call the ISR enter function
+ POP {lr} // Recover ISR lr
#endif
- B __tx_fiq_processing_return @ Continue FIQ processing
-@
+ B __tx_fiq_processing_return // Continue FIQ processing
+//
__tx_thread_fiq_not_nested_save:
-@ }
-@
-@ /* Otherwise, not nested, check to see if a thread was running. */
-@ else if (_tx_thread_current_ptr)
-@ {
-@
- ADD r2, r2, #1 @ Increment the interrupt counter
- STR r2, [r3] @ Store it back in the variable
- LDR r1, =_tx_thread_current_ptr @ Pickup address of current thread ptr
- LDR r0, [r1] @ Pickup current thread pointer
- CMP r0, #0 @ Is it NULL?
- BEQ __tx_thread_fiq_idle_system_save @ If so, interrupt occurred in
-@ @ scheduling loop - nothing needs saving!
-@
-@ /* Save minimal context of interrupted thread. */
-@
- MRS r2, SPSR @ Pickup saved SPSR
- SUB lr, lr, #4 @ Adjust point of interrupt
- STMDB sp!, {r2, lr} @ Store other registers, Note that we don't
-@ @ need to save sl and ip since FIQ has
-@ @ copies of these registers. Nested
-@ @ interrupt processing does need to save
-@ @ these registers.
-@
-@ /* Save the current stack pointer in the thread's control block. */
-@ _tx_thread_current_ptr -> tx_thread_stack_ptr = sp;
-@
-@ /* Switch to the system stack. */
-@ sp = _tx_thread_system_stack_ptr;
-@
- MOV r10, #0 @ Clear stack limit
+
+ /* Otherwise, not nested, check to see if a thread was running. */
+
+ ADD r2, r2, #1 // Increment the interrupt counter
+ STR r2, [r3] // Store it back in the variable
+ LDR r1, =_tx_thread_current_ptr // Pickup address of current thread ptr
+ LDR r0, [r1] // Pickup current thread pointer
+ CMP r0, #0 // Is it NULL?
+ BEQ __tx_thread_fiq_idle_system_save // If so, interrupt occurred in
+ // scheduling loop - nothing needs saving!
+
+ /* Save minimal context of interrupted thread. */
+
+ MRS r2, SPSR // Pickup saved SPSR
+ SUB lr, lr, #4 // Adjust point of interrupt
+ STMDB sp!, {r2, lr} // Store other registers, Note that we don't
+ // need to save sl and ip since FIQ has
+ // copies of these registers. Nested
+ // interrupt processing does need to save
+ // these registers.
+
+ MOV r10, #0 // Clear stack limit
#if (defined(TX_ENABLE_EXECUTION_CHANGE_NOTIFY) || defined(TX_EXECUTION_PROFILE_ENABLE))
-@
-@ /* Call the ISR enter function to indicate an ISR is executing. */
-@
- PUSH {lr} @ Save ISR lr
- BL _tx_execution_isr_enter @ Call the ISR enter function
- POP {lr} @ Recover ISR lr
+
+ /* Call the ISR enter function to indicate an ISR is executing. */
+
+ PUSH {lr} // Save ISR lr
+ BL _tx_execution_isr_enter // Call the ISR enter function
+ POP {lr} // Recover ISR lr
#endif
- B __tx_fiq_processing_return @ Continue FIQ processing
-@
-@ }
-@ else
-@ {
-@
+ B __tx_fiq_processing_return // Continue FIQ processing
+
__tx_thread_fiq_idle_system_save:
-@
-@ /* Interrupt occurred in the scheduling loop. */
-@
-#if (defined(TX_ENABLE_EXECUTION_CHANGE_NOTIFY) || defined(TX_EXECUTION_PROFILE_ENABLE))
-@
-@ /* Call the ISR enter function to indicate an ISR is executing. */
-@
- PUSH {lr} @ Save ISR lr
- BL _tx_execution_isr_enter @ Call the ISR enter function
- POP {lr} @ Recover ISR lr
-#endif
-@
-@ /* Not much to do here, save the current SPSR and LR for possible
-@ use in IRQ interrupted in idle system conditions, and return to
-@ FIQ interrupt processing. */
-@
- MRS r0, SPSR @ Pickup saved SPSR
- SUB lr, lr, #4 @ Adjust point of interrupt
- STMDB sp!, {r0, lr} @ Store other registers that will get used
-@ @ or stripped off the stack in context
-@ @ restore
- B __tx_fiq_processing_return @ Continue FIQ processing
-@
-@ }
-@}
+ /* Interrupt occurred in the scheduling loop. */
+
+#if (defined(TX_ENABLE_EXECUTION_CHANGE_NOTIFY) || defined(TX_EXECUTION_PROFILE_ENABLE))
+
+ /* Call the ISR enter function to indicate an ISR is executing. */
+
+ PUSH {lr} // Save ISR lr
+ BL _tx_execution_isr_enter // Call the ISR enter function
+ POP {lr} // Recover ISR lr
+#endif
+
+ /* Not much to do here, save the current SPSR and LR for possible
+ use in IRQ interrupted in idle system conditions, and return to
+ FIQ interrupt processing. */
+
+ MRS r0, SPSR // Pickup saved SPSR
+ SUB lr, lr, #4 // Adjust point of interrupt
+ STMDB sp!, {r0, lr} // Store other registers that will get used
+ // or stripped off the stack in context
+ // restore
+ B __tx_fiq_processing_return // Continue FIQ processing
diff --git a/ports/cortex_a8/ac6/src/tx_thread_fiq_nesting_end.S b/ports/cortex_a8/ac6/src/tx_thread_fiq_nesting_end.S
index 18375623..b34d881e 100644
--- a/ports/cortex_a8/ac6/src/tx_thread_fiq_nesting_end.S
+++ b/ports/cortex_a8/ac6/src/tx_thread_fiq_nesting_end.S
@@ -1,116 +1,104 @@
-@/**************************************************************************/
-@/* */
-@/* Copyright (c) Microsoft Corporation. All rights reserved. */
-@/* */
-@/* This software is licensed under the Microsoft Software License */
-@/* Terms for Microsoft Azure RTOS. Full text of the license can be */
-@/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
-@/* and in the root directory of this software. */
-@/* */
-@/**************************************************************************/
-@
-@
-@/**************************************************************************/
-@/**************************************************************************/
-@/** */
-@/** ThreadX Component */
-@/** */
-@/** Thread */
-@/** */
-@/**************************************************************************/
-@/**************************************************************************/
-@
-@
-@#define TX_SOURCE_CODE
-@
-@
-@/* Include necessary system files. */
-@
-@#include "tx_api.h"
-@#include "tx_thread.h"
-@
-@
+/**************************************************************************/
+/* */
+/* Copyright (c) Microsoft Corporation. All rights reserved. */
+/* */
+/* This software is licensed under the Microsoft Software License */
+/* Terms for Microsoft Azure RTOS. Full text of the license can be */
+/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
+/* and in the root directory of this software. */
+/* */
+/**************************************************************************/
+
+
+/**************************************************************************/
+/**************************************************************************/
+/** */
+/** ThreadX Component */
+/** */
+/** Thread */
+/** */
+/**************************************************************************/
+/**************************************************************************/
+
#ifdef TX_ENABLE_FIQ_SUPPORT
-DISABLE_INTS = 0xC0 @ Disable IRQ/FIQ interrupts
+DISABLE_INTS = 0xC0 // Disable IRQ/FIQ interrupts
#else
-DISABLE_INTS = 0x80 @ Disable IRQ interrupts
+DISABLE_INTS = 0x80 // Disable IRQ interrupts
#endif
-MODE_MASK = 0x1F @ Mode mask
-FIQ_MODE_BITS = 0x11 @ FIQ mode bits
-@
-@
-@/* No 16-bit Thumb mode veneer code is needed for _tx_thread_fiq_nesting_end
-@ since it will never be called 16-bit mode. */
-@
+MODE_MASK = 0x1F // Mode mask
+FIQ_MODE_BITS = 0x11 // FIQ mode bits
+
+
+/* No 16-bit Thumb mode veneer code is needed for _tx_thread_fiq_nesting_end
+ since it will never be called 16-bit mode. */
+
.arm
.text
.align 2
-@/**************************************************************************/
-@/* */
-@/* FUNCTION RELEASE */
-@/* */
-@/* _tx_thread_fiq_nesting_end Cortex-A8/AC6 */
-@/* 6.1 */
-@/* AUTHOR */
-@/* */
-@/* William E. Lamie, Microsoft Corporation */
-@/* */
-@/* DESCRIPTION */
-@/* */
-@/* This function is called by the application from FIQ mode after */
-@/* _tx_thread_fiq_nesting_start has been called and switches the FIQ */
-@/* processing from system mode back to FIQ mode prior to the ISR */
-@/* calling _tx_thread_fiq_context_restore. Note that this function */
-@/* assumes the system stack pointer is in the same position after */
-@/* nesting start function was called. */
-@/* */
-@/* This function assumes that the system mode stack pointer was setup */
-@/* during low-level initialization (tx_initialize_low_level.s). */
-@/* */
-@/* This function returns with FIQ interrupts disabled. */
-@/* */
-@/* INPUT */
-@/* */
-@/* None */
-@/* */
-@/* OUTPUT */
-@/* */
-@/* None */
-@/* */
-@/* CALLS */
-@/* */
-@/* None */
-@/* */
-@/* CALLED BY */
-@/* */
-@/* ISRs */
-@/* */
-@/* RELEASE HISTORY */
-@/* */
-@/* DATE NAME DESCRIPTION */
-@/* */
-@/* 09-30-2020 William E. Lamie Initial Version 6.1 */
-@/* */
-@/**************************************************************************/
-@VOID _tx_thread_fiq_nesting_end(VOID)
-@{
+/**************************************************************************/
+/* */
+/* FUNCTION RELEASE */
+/* */
+/* _tx_thread_fiq_nesting_end ARMv7-A */
+/* 6.1.11 */
+/* AUTHOR */
+/* */
+/* William E. Lamie, Microsoft Corporation */
+/* */
+/* DESCRIPTION */
+/* */
+/* This function is called by the application from FIQ mode after */
+/* _tx_thread_fiq_nesting_start has been called and switches the FIQ */
+/* processing from system mode back to FIQ mode prior to the ISR */
+/* calling _tx_thread_fiq_context_restore. Note that this function */
+/* assumes the system stack pointer is in the same position after */
+/* nesting start function was called. */
+/* */
+/* This function assumes that the system mode stack pointer was setup */
+/* during low-level initialization (tx_initialize_low_level.s). */
+/* */
+/* This function returns with FIQ interrupts disabled. */
+/* */
+/* INPUT */
+/* */
+/* None */
+/* */
+/* OUTPUT */
+/* */
+/* None */
+/* */
+/* CALLS */
+/* */
+/* None */
+/* */
+/* CALLED BY */
+/* */
+/* ISRs */
+/* */
+/* RELEASE HISTORY */
+/* */
+/* DATE NAME DESCRIPTION */
+/* */
+/* 09-30-2020 William E. Lamie Initial Version 6.1 */
+/* 04-25-2022 Zhen Kong Updated comments, */
+/* resulting in version 6.1.11 */
+/* */
+/**************************************************************************/
.global _tx_thread_fiq_nesting_end
.type _tx_thread_fiq_nesting_end,function
_tx_thread_fiq_nesting_end:
- MOV r3,lr @ Save ISR return address
- MRS r0, CPSR @ Pickup the CPSR
- ORR r0, r0, #DISABLE_INTS @ Build disable interrupt value
- MSR CPSR_c, r0 @ Disable interrupts
- LDMIA sp!, {r1, lr} @ Pickup saved lr (and r1 throw-away for
- @ 8-byte alignment logic)
- BIC r0, r0, #MODE_MASK @ Clear mode bits
- ORR r0, r0, #FIQ_MODE_BITS @ Build IRQ mode CPSR
- MSR CPSR_c, r0 @ Reenter IRQ mode
+ MOV r3,lr // Save ISR return address
+ MRS r0, CPSR // Pickup the CPSR
+ ORR r0, r0, #DISABLE_INTS // Build disable interrupt value
+ MSR CPSR_c, r0 // Disable interrupts
+ LDMIA sp!, {r1, lr} // Pickup saved lr (and r1 throw-away for
+ // 8-byte alignment logic)
+ BIC r0, r0, #MODE_MASK // Clear mode bits
+ ORR r0, r0, #FIQ_MODE_BITS // Build IRQ mode CPSR
+ MSR CPSR_c, r0 // Reenter IRQ mode
#ifdef __THUMB_INTERWORK
- BX r3 @ Return to caller
+ BX r3 // Return to caller
#else
- MOV pc, r3 @ Return to caller
+ MOV pc, r3 // Return to caller
#endif
-@}
-
diff --git a/ports/cortex_a8/ac6/src/tx_thread_fiq_nesting_start.S b/ports/cortex_a8/ac6/src/tx_thread_fiq_nesting_start.S
index 54bc9312..c9cd5a06 100644
--- a/ports/cortex_a8/ac6/src/tx_thread_fiq_nesting_start.S
+++ b/ports/cortex_a8/ac6/src/tx_thread_fiq_nesting_start.S
@@ -1,108 +1,96 @@
-@/**************************************************************************/
-@/* */
-@/* Copyright (c) Microsoft Corporation. All rights reserved. */
-@/* */
-@/* This software is licensed under the Microsoft Software License */
-@/* Terms for Microsoft Azure RTOS. Full text of the license can be */
-@/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
-@/* and in the root directory of this software. */
-@/* */
-@/**************************************************************************/
-@
-@
-@/**************************************************************************/
-@/**************************************************************************/
-@/** */
-@/** ThreadX Component */
-@/** */
-@/** Thread */
-@/** */
-@/**************************************************************************/
-@/**************************************************************************/
-@
-@
-@#define TX_SOURCE_CODE
-@
-@
-@/* Include necessary system files. */
-@
-@#include "tx_api.h"
-@#include "tx_thread.h"
-@
-@
-FIQ_DISABLE = 0x40 @ FIQ disable bit
-MODE_MASK = 0x1F @ Mode mask
-SYS_MODE_BITS = 0x1F @ System mode bits
-@
-@
-@/* No 16-bit Thumb mode veneer code is needed for _tx_thread_fiq_nesting_start
-@ since it will never be called 16-bit mode. */
-@
+/**************************************************************************/
+/* */
+/* Copyright (c) Microsoft Corporation. All rights reserved. */
+/* */
+/* This software is licensed under the Microsoft Software License */
+/* Terms for Microsoft Azure RTOS. Full text of the license can be */
+/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
+/* and in the root directory of this software. */
+/* */
+/**************************************************************************/
+
+
+/**************************************************************************/
+/**************************************************************************/
+/** */
+/** ThreadX Component */
+/** */
+/** Thread */
+/** */
+/**************************************************************************/
+/**************************************************************************/
+
+FIQ_DISABLE = 0x40 // FIQ disable bit
+MODE_MASK = 0x1F // Mode mask
+SYS_MODE_BITS = 0x1F // System mode bits
+
+
+/* No 16-bit Thumb mode veneer code is needed for _tx_thread_fiq_nesting_start
+ since it will never be called 16-bit mode. */
+
.arm
.text
.align 2
-@/**************************************************************************/
-@/* */
-@/* FUNCTION RELEASE */
-@/* */
-@/* _tx_thread_fiq_nesting_start Cortex-A8/AC6 */
-@/* 6.1 */
-@/* AUTHOR */
-@/* */
-@/* William E. Lamie, Microsoft Corporation */
-@/* */
-@/* DESCRIPTION */
-@/* */
-@/* This function is called by the application from FIQ mode after */
-@/* _tx_thread_fiq_context_save has been called and switches the FIQ */
-@/* processing to the system mode so nested FIQ interrupt processing */
-@/* is possible (system mode has its own "lr" register). Note that */
-@/* this function assumes that the system mode stack pointer was setup */
-@/* during low-level initialization (tx_initialize_low_level.s). */
-@/* */
-@/* This function returns with FIQ interrupts enabled. */
-@/* */
-@/* INPUT */
-@/* */
-@/* None */
-@/* */
-@/* OUTPUT */
-@/* */
-@/* None */
-@/* */
-@/* CALLS */
-@/* */
-@/* None */
-@/* */
-@/* CALLED BY */
-@/* */
-@/* ISRs */
-@/* */
-@/* RELEASE HISTORY */
-@/* */
-@/* DATE NAME DESCRIPTION */
-@/* */
-@/* 09-30-2020 William E. Lamie Initial Version 6.1 */
-@/* */
-@/**************************************************************************/
-@VOID _tx_thread_fiq_nesting_start(VOID)
-@{
+/**************************************************************************/
+/* */
+/* FUNCTION RELEASE */
+/* */
+/* _tx_thread_fiq_nesting_start ARMv7-A */
+/* 6.1.11 */
+/* AUTHOR */
+/* */
+/* William E. Lamie, Microsoft Corporation */
+/* */
+/* DESCRIPTION */
+/* */
+/* This function is called by the application from FIQ mode after */
+/* _tx_thread_fiq_context_save has been called and switches the FIQ */
+/* processing to the system mode so nested FIQ interrupt processing */
+/* is possible (system mode has its own "lr" register). Note that */
+/* this function assumes that the system mode stack pointer was setup */
+/* during low-level initialization (tx_initialize_low_level.s). */
+/* */
+/* This function returns with FIQ interrupts enabled. */
+/* */
+/* INPUT */
+/* */
+/* None */
+/* */
+/* OUTPUT */
+/* */
+/* None */
+/* */
+/* CALLS */
+/* */
+/* None */
+/* */
+/* CALLED BY */
+/* */
+/* ISRs */
+/* */
+/* RELEASE HISTORY */
+/* */
+/* DATE NAME DESCRIPTION */
+/* */
+/* 09-30-2020 William E. Lamie Initial Version 6.1 */
+/* 04-25-2022 Zhen Kong Updated comments, */
+/* resulting in version 6.1.11 */
+/* */
+/**************************************************************************/
.global _tx_thread_fiq_nesting_start
.type _tx_thread_fiq_nesting_start,function
_tx_thread_fiq_nesting_start:
- MOV r3,lr @ Save ISR return address
- MRS r0, CPSR @ Pickup the CPSR
- BIC r0, r0, #MODE_MASK @ Clear the mode bits
- ORR r0, r0, #SYS_MODE_BITS @ Build system mode CPSR
- MSR CPSR_c, r0 @ Enter system mode
- STMDB sp!, {r1, lr} @ Push the system mode lr on the system mode stack
- @ and push r1 just to keep 8-byte alignment
- BIC r0, r0, #FIQ_DISABLE @ Build enable FIQ CPSR
- MSR CPSR_c, r0 @ Enter system mode
+ MOV r3,lr // Save ISR return address
+ MRS r0, CPSR // Pickup the CPSR
+ BIC r0, r0, #MODE_MASK // Clear the mode bits
+ ORR r0, r0, #SYS_MODE_BITS // Build system mode CPSR
+ MSR CPSR_c, r0 // Enter system mode
+ STMDB sp!, {r1, lr} // Push the system mode lr on the system mode stack
+ // and push r1 just to keep 8-byte alignment
+ BIC r0, r0, #FIQ_DISABLE // Build enable FIQ CPSR
+ MSR CPSR_c, r0 // Enter system mode
#ifdef __THUMB_INTERWORK
- BX r3 @ Return to caller
+ BX r3 // Return to caller
#else
- MOV pc, r3 @ Return to caller
+ MOV pc, r3 // Return to caller
#endif
-@}
-
diff --git a/ports/cortex_a8/ac6/src/tx_thread_interrupt_control.S b/ports/cortex_a8/ac6/src/tx_thread_interrupt_control.S
index d6a6c3e3..63b1609a 100644
--- a/ports/cortex_a8/ac6/src/tx_thread_interrupt_control.S
+++ b/ports/cortex_a8/ac6/src/tx_thread_interrupt_control.S
@@ -1,115 +1,104 @@
-@/**************************************************************************/
-@/* */
-@/* Copyright (c) Microsoft Corporation. All rights reserved. */
-@/* */
-@/* This software is licensed under the Microsoft Software License */
-@/* Terms for Microsoft Azure RTOS. Full text of the license can be */
-@/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
-@/* and in the root directory of this software. */
-@/* */
-@/**************************************************************************/
-@
-@
-@/**************************************************************************/
-@/**************************************************************************/
-@/** */
-@/** ThreadX Component */
-@/** */
-@/** Thread */
-@/** */
-@/**************************************************************************/
-@/**************************************************************************/
-@
-@#define TX_SOURCE_CODE
-@
-@
-@/* Include necessary system files. */
-@
-@#include "tx_api.h"
-@#include "tx_thread.h" */
-@
+/**************************************************************************/
+/* */
+/* Copyright (c) Microsoft Corporation. All rights reserved. */
+/* */
+/* This software is licensed under the Microsoft Software License */
+/* Terms for Microsoft Azure RTOS. Full text of the license can be */
+/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
+/* and in the root directory of this software. */
+/* */
+/**************************************************************************/
+
+
+/**************************************************************************/
+/**************************************************************************/
+/** */
+/** ThreadX Component */
+/** */
+/** Thread */
+/** */
+/**************************************************************************/
+/**************************************************************************/
INT_MASK = 0x03F
-@
-@/* Define the 16-bit Thumb mode veneer for _tx_thread_interrupt_control for
-@ applications calling this function from to 16-bit Thumb mode. */
-@
+
+/* Define the 16-bit Thumb mode veneer for _tx_thread_interrupt_control for
+ applications calling this function from to 16-bit Thumb mode. */
+
.text
.align 2
.global $_tx_thread_interrupt_control
$_tx_thread_interrupt_control:
.thumb
- BX pc @ Switch to 32-bit mode
- NOP @
+ BX pc // Switch to 32-bit mode
+ NOP //
.arm
- STMFD sp!, {lr} @ Save return address
- BL _tx_thread_interrupt_control @ Call _tx_thread_interrupt_control function
- LDMFD sp!, {lr} @ Recover saved return address
- BX lr @ Return to 16-bit caller
-@
-@
+ STMFD sp!, {lr} // Save return address
+ BL _tx_thread_interrupt_control // Call _tx_thread_interrupt_control function
+ LDMFD sp!, {lr} // Recover saved return address
+ BX lr // Return to 16-bit caller
+
+
.text
.align 2
-@/**************************************************************************/
-@/* */
-@/* FUNCTION RELEASE */
-@/* */
-@/* _tx_thread_interrupt_control Cortex-A8/AC6 */
-@/* 6.1 */
-@/* AUTHOR */
-@/* */
-@/* William E. Lamie, Microsoft Corporation */
-@/* */
-@/* DESCRIPTION */
-@/* */
-@/* This function is responsible for changing the interrupt lockout */
-@/* posture of the system. */
-@/* */
-@/* INPUT */
-@/* */
-@/* new_posture New interrupt lockout posture */
-@/* */
-@/* OUTPUT */
-@/* */
-@/* old_posture Old interrupt lockout posture */
-@/* */
-@/* CALLS */
-@/* */
-@/* None */
-@/* */
-@/* CALLED BY */
-@/* */
-@/* Application Code */
-@/* */
-@/* RELEASE HISTORY */
-@/* */
-@/* DATE NAME DESCRIPTION */
-@/* */
-@/* 09-30-2020 William E. Lamie Initial Version 6.1 */
-@/* */
-@/**************************************************************************/
-@UINT _tx_thread_interrupt_control(UINT new_posture)
-@{
+/**************************************************************************/
+/* */
+/* FUNCTION RELEASE */
+/* */
+/* _tx_thread_interrupt_control ARMv7-A */
+/* 6.1.11 */
+/* AUTHOR */
+/* */
+/* William E. Lamie, Microsoft Corporation */
+/* */
+/* DESCRIPTION */
+/* */
+/* This function is responsible for changing the interrupt lockout */
+/* posture of the system. */
+/* */
+/* INPUT */
+/* */
+/* new_posture New interrupt lockout posture */
+/* */
+/* OUTPUT */
+/* */
+/* old_posture Old interrupt lockout posture */
+/* */
+/* CALLS */
+/* */
+/* None */
+/* */
+/* CALLED BY */
+/* */
+/* Application Code */
+/* */
+/* RELEASE HISTORY */
+/* */
+/* DATE NAME DESCRIPTION */
+/* */
+/* 09-30-2020 William E. Lamie Initial Version 6.1 */
+/* 04-25-2022 Zhen Kong Updated comments, */
+/* resulting in version 6.1.11 */
+/* */
+/**************************************************************************/
.global _tx_thread_interrupt_control
.type _tx_thread_interrupt_control,function
_tx_thread_interrupt_control:
-@
-@ /* Pickup current interrupt lockout posture. */
-@
- MRS r3, CPSR @ Pickup current CPSR
- MOV r2, #INT_MASK @ Build interrupt mask
- AND r1, r3, r2 @ Clear interrupt lockout bits
- ORR r1, r1, r0 @ Or-in new interrupt lockout bits
-@
-@ /* Apply the new interrupt posture. */
-@
- MSR CPSR_c, r1 @ Setup new CPSR
- BIC r0, r3, r2 @ Return previous interrupt mask
-#ifdef __THUMB_INTERWORK
- BX lr @ Return to caller
-#else
- MOV pc, lr @ Return to caller
-#endif
-@}
+ /* Pickup current interrupt lockout posture. */
+
+ MRS r3, CPSR // Pickup current CPSR
+ MOV r2, #INT_MASK // Build interrupt mask
+ AND r1, r3, r2 // Clear interrupt lockout bits
+ ORR r1, r1, r0 // Or-in new interrupt lockout bits
+
+ /* Apply the new interrupt posture. */
+
+ MSR CPSR_c, r1 // Setup new CPSR
+ BIC r0, r3, r2 // Return previous interrupt mask
+#ifdef __THUMB_INTERWORK
+ BX lr // Return to caller
+#else
+ MOV pc, lr // Return to caller
+#endif
diff --git a/ports/cortex_a8/ac6/src/tx_thread_interrupt_disable.S b/ports/cortex_a8/ac6/src/tx_thread_interrupt_disable.S
index 984ea3c5..13258808 100644
--- a/ports/cortex_a8/ac6/src/tx_thread_interrupt_disable.S
+++ b/ports/cortex_a8/ac6/src/tx_thread_interrupt_disable.S
@@ -1,113 +1,101 @@
-@/**************************************************************************/
-@/* */
-@/* Copyright (c) Microsoft Corporation. All rights reserved. */
-@/* */
-@/* This software is licensed under the Microsoft Software License */
-@/* Terms for Microsoft Azure RTOS. Full text of the license can be */
-@/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
-@/* and in the root directory of this software. */
-@/* */
-@/**************************************************************************/
-@
-@
-@/**************************************************************************/
-@/**************************************************************************/
-@/** */
-@/** ThreadX Component */
-@/** */
-@/** Thread */
-@/** */
-@/**************************************************************************/
-@/**************************************************************************/
-@
-@#define TX_SOURCE_CODE
-@
-@
-@/* Include necessary system files. */
-@
-@#include "tx_api.h"
-@#include "tx_thread.h"
-@
-@
-@/* Define the 16-bit Thumb mode veneer for _tx_thread_interrupt_disable for
-@ applications calling this function from to 16-bit Thumb mode. */
-@
+/**************************************************************************/
+/* */
+/* Copyright (c) Microsoft Corporation. All rights reserved. */
+/* */
+/* This software is licensed under the Microsoft Software License */
+/* Terms for Microsoft Azure RTOS. Full text of the license can be */
+/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
+/* and in the root directory of this software. */
+/* */
+/**************************************************************************/
+
+
+/**************************************************************************/
+/**************************************************************************/
+/** */
+/** ThreadX Component */
+/** */
+/** Thread */
+/** */
+/**************************************************************************/
+/**************************************************************************/
+
+/* Define the 16-bit Thumb mode veneer for _tx_thread_interrupt_disable for
+ applications calling this function from to 16-bit Thumb mode. */
+
.text
.align 2
.global $_tx_thread_interrupt_disable
$_tx_thread_interrupt_disable:
.thumb
- BX pc @ Switch to 32-bit mode
- NOP @
+ BX pc // Switch to 32-bit mode
+ NOP //
.arm
- STMFD sp!, {lr} @ Save return address
- BL _tx_thread_interrupt_disable @ Call _tx_thread_interrupt_disable function
- LDMFD sp!, {lr} @ Recover saved return address
- BX lr @ Return to 16-bit caller
-@
-@
+ STMFD sp!, {lr} // Save return address
+ BL _tx_thread_interrupt_disable // Call _tx_thread_interrupt_disable function
+ LDMFD sp!, {lr} // Recover saved return address
+ BX lr // Return to 16-bit caller
+
+
.text
.align 2
-@/**************************************************************************/
-@/* */
-@/* FUNCTION RELEASE */
-@/* */
-@/* _tx_thread_interrupt_disable Cortex-A8/AC6 */
-@/* 6.1 */
-@/* AUTHOR */
-@/* */
-@/* William E. Lamie, Microsoft Corporation */
-@/* */
-@/* DESCRIPTION */
-@/* */
-@/* This function is responsible for disabling interrupts */
-@/* */
-@/* INPUT */
-@/* */
-@/* None */
-@/* */
-@/* OUTPUT */
-@/* */
-@/* old_posture Old interrupt lockout posture */
-@/* */
-@/* CALLS */
-@/* */
-@/* None */
-@/* */
-@/* CALLED BY */
-@/* */
-@/* Application Code */
-@/* */
-@/* RELEASE HISTORY */
-@/* */
-@/* DATE NAME DESCRIPTION */
-@/* */
-@/* 09-30-2020 William E. Lamie Initial Version 6.1 */
-@/* */
-@/**************************************************************************/
-@UINT _tx_thread_interrupt_disable(void)
-@{
+/**************************************************************************/
+/* */
+/* FUNCTION RELEASE */
+/* */
+/* _tx_thread_interrupt_disable ARMv7-A */
+/* 6.1.11 */
+/* AUTHOR */
+/* */
+/* William E. Lamie, Microsoft Corporation */
+/* */
+/* DESCRIPTION */
+/* */
+/* This function is responsible for disabling interrupts */
+/* */
+/* INPUT */
+/* */
+/* None */
+/* */
+/* OUTPUT */
+/* */
+/* old_posture Old interrupt lockout posture */
+/* */
+/* CALLS */
+/* */
+/* None */
+/* */
+/* CALLED BY */
+/* */
+/* Application Code */
+/* */
+/* RELEASE HISTORY */
+/* */
+/* DATE NAME DESCRIPTION */
+/* */
+/* 09-30-2020 William E. Lamie Initial Version 6.1 */
+/* 04-25-2022 Zhen Kong Updated comments, */
+/* resulting in version 6.1.11 */
+/* */
+/**************************************************************************/
.global _tx_thread_interrupt_disable
.type _tx_thread_interrupt_disable,function
_tx_thread_interrupt_disable:
-@
-@ /* Pickup current interrupt lockout posture. */
-@
- MRS r0, CPSR @ Pickup current CPSR
-@
-@ /* Mask interrupts. */
-@
+
+ /* Pickup current interrupt lockout posture. */
+
+ MRS r0, CPSR // Pickup current CPSR
+
+ /* Mask interrupts. */
+
#ifdef TX_ENABLE_FIQ_SUPPORT
- CPSID if @ Disable IRQ and FIQ
+ CPSID if // Disable IRQ and FIQ
#else
- CPSID i @ Disable IRQ
+ CPSID i // Disable IRQ
#endif
#ifdef __THUMB_INTERWORK
- BX lr @ Return to caller
+ BX lr // Return to caller
#else
- MOV pc, lr @ Return to caller
+ MOV pc, lr // Return to caller
#endif
-@}
-
-
diff --git a/ports/cortex_a8/ac6/src/tx_thread_interrupt_restore.S b/ports/cortex_a8/ac6/src/tx_thread_interrupt_restore.S
index 563eaa2d..2d582511 100644
--- a/ports/cortex_a8/ac6/src/tx_thread_interrupt_restore.S
+++ b/ports/cortex_a8/ac6/src/tx_thread_interrupt_restore.S
@@ -1,104 +1,93 @@
-@/**************************************************************************/
-@/* */
-@/* Copyright (c) Microsoft Corporation. All rights reserved. */
-@/* */
-@/* This software is licensed under the Microsoft Software License */
-@/* Terms for Microsoft Azure RTOS. Full text of the license can be */
-@/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
-@/* and in the root directory of this software. */
-@/* */
-@/**************************************************************************/
-@
-@
-@/**************************************************************************/
-@/**************************************************************************/
-@/** */
-@/** ThreadX Component */
-@/** */
-@/** Thread */
-@/** */
-@/**************************************************************************/
-@/**************************************************************************/
-@
-@#define TX_SOURCE_CODE
-@
-@
-@/* Include necessary system files. */
-@
-@#include "tx_api.h"
-@#include "tx_thread.h"
-@
-@
-@/* Define the 16-bit Thumb mode veneer for _tx_thread_interrupt_restore for
-@ applications calling this function from to 16-bit Thumb mode. */
-@
+/**************************************************************************/
+/* */
+/* Copyright (c) Microsoft Corporation. All rights reserved. */
+/* */
+/* This software is licensed under the Microsoft Software License */
+/* Terms for Microsoft Azure RTOS. Full text of the license can be */
+/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
+/* and in the root directory of this software. */
+/* */
+/**************************************************************************/
+
+
+/**************************************************************************/
+/**************************************************************************/
+/** */
+/** ThreadX Component */
+/** */
+/** Thread */
+/** */
+/**************************************************************************/
+/**************************************************************************/
+
+/* Define the 16-bit Thumb mode veneer for _tx_thread_interrupt_restore for
+ applications calling this function from to 16-bit Thumb mode. */
+
.text
.align 2
.global $_tx_thread_interrupt_restore
$_tx_thread_interrupt_restore:
.thumb
- BX pc @ Switch to 32-bit mode
- NOP @
+ BX pc // Switch to 32-bit mode
+ NOP //
.arm
- STMFD sp!, {lr} @ Save return address
- BL _tx_thread_interrupt_restore @ Call _tx_thread_interrupt_restore function
- LDMFD sp!, {lr} @ Recover saved return address
- BX lr @ Return to 16-bit caller
-@
-@
+ STMFD sp!, {lr} // Save return address
+ BL _tx_thread_interrupt_restore // Call _tx_thread_interrupt_restore function
+ LDMFD sp!, {lr} // Recover saved return address
+ BX lr // Return to 16-bit caller
+
+
.text
.align 2
-@/**************************************************************************/
-@/* */
-@/* FUNCTION RELEASE */
-@/* */
-@/* _tx_thread_interrupt_restore Cortex-A8/AC6 */
-@/* 6.1 */
-@/* AUTHOR */
-@/* */
-@/* William E. Lamie, Microsoft Corporation */
-@/* */
-@/* DESCRIPTION */
-@/* */
-@/* This function is responsible for restoring interrupts to the state */
-@/* returned by a previous _tx_thread_interrupt_disable call. */
-@/* */
-@/* INPUT */
-@/* */
-@/* old_posture Old interrupt lockout posture */
-@/* */
-@/* OUTPUT */
-@/* */
-@/* None */
-@/* */
-@/* CALLS */
-@/* */
-@/* None */
-@/* */
-@/* CALLED BY */
-@/* */
-@/* Application Code */
-@/* */
-@/* RELEASE HISTORY */
-@/* */
-@/* DATE NAME DESCRIPTION */
-@/* */
-@/* 09-30-2020 William E. Lamie Initial Version 6.1 */
-@/* */
-@/**************************************************************************/
-@UINT _tx_thread_interrupt_restore(UINT old_posture)
-@{
+/**************************************************************************/
+/* */
+/* FUNCTION RELEASE */
+/* */
+/* _tx_thread_interrupt_restore ARMv7-A */
+/* 6.1.11 */
+/* AUTHOR */
+/* */
+/* William E. Lamie, Microsoft Corporation */
+/* */
+/* DESCRIPTION */
+/* */
+/* This function is responsible for restoring interrupts to the state */
+/* returned by a previous _tx_thread_interrupt_disable call. */
+/* */
+/* INPUT */
+/* */
+/* old_posture Old interrupt lockout posture */
+/* */
+/* OUTPUT */
+/* */
+/* None */
+/* */
+/* CALLS */
+/* */
+/* None */
+/* */
+/* CALLED BY */
+/* */
+/* Application Code */
+/* */
+/* RELEASE HISTORY */
+/* */
+/* DATE NAME DESCRIPTION */
+/* */
+/* 09-30-2020 William E. Lamie Initial Version 6.1 */
+/* 04-25-2022 Zhen Kong Updated comments, */
+/* resulting in version 6.1.11 */
+/* */
+/**************************************************************************/
.global _tx_thread_interrupt_restore
.type _tx_thread_interrupt_restore,function
_tx_thread_interrupt_restore:
-@
-@ /* Apply the new interrupt posture. */
-@
- MSR CPSR_c, r0 @ Setup new CPSR
-#ifdef __THUMB_INTERWORK
- BX lr @ Return to caller
-#else
- MOV pc, lr @ Return to caller
-#endif
-@}
+ /* Apply the new interrupt posture. */
+
+ MSR CPSR_c, r0 // Setup new CPSR
+#ifdef __THUMB_INTERWORK
+ BX lr // Return to caller
+#else
+ MOV pc, lr // Return to caller
+#endif
diff --git a/ports/cortex_a8/ac6/src/tx_thread_irq_nesting_end.S b/ports/cortex_a8/ac6/src/tx_thread_irq_nesting_end.S
index 5e152c09..ec7e63c6 100644
--- a/ports/cortex_a8/ac6/src/tx_thread_irq_nesting_end.S
+++ b/ports/cortex_a8/ac6/src/tx_thread_irq_nesting_end.S
@@ -1,115 +1,103 @@
-@/**************************************************************************/
-@/* */
-@/* Copyright (c) Microsoft Corporation. All rights reserved. */
-@/* */
-@/* This software is licensed under the Microsoft Software License */
-@/* Terms for Microsoft Azure RTOS. Full text of the license can be */
-@/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
-@/* and in the root directory of this software. */
-@/* */
-@/**************************************************************************/
-@
-@
-@/**************************************************************************/
-@/**************************************************************************/
-@/** */
-@/** ThreadX Component */
-@/** */
-@/** Thread */
-@/** */
-@/**************************************************************************/
-@/**************************************************************************/
-@
-@
-@#define TX_SOURCE_CODE
-@
-@
-@/* Include necessary system files. */
-@
-@#include "tx_api.h"
-@#include "tx_thread.h"
-@
-@
+/**************************************************************************/
+/* */
+/* Copyright (c) Microsoft Corporation. All rights reserved. */
+/* */
+/* This software is licensed under the Microsoft Software License */
+/* Terms for Microsoft Azure RTOS. Full text of the license can be */
+/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
+/* and in the root directory of this software. */
+/* */
+/**************************************************************************/
+
+
+/**************************************************************************/
+/**************************************************************************/
+/** */
+/** ThreadX Component */
+/** */
+/** Thread */
+/** */
+/**************************************************************************/
+/**************************************************************************/
+
#ifdef TX_ENABLE_FIQ_SUPPORT
-DISABLE_INTS = 0xC0 @ Disable IRQ/FIQ interrupts
+DISABLE_INTS = 0xC0 // Disable IRQ/FIQ interrupts
#else
-DISABLE_INTS = 0x80 @ Disable IRQ interrupts
+DISABLE_INTS = 0x80 // Disable IRQ interrupts
#endif
-MODE_MASK = 0x1F @ Mode mask
-IRQ_MODE_BITS = 0x12 @ IRQ mode bits
-@
-@
-@/* No 16-bit Thumb mode veneer code is needed for _tx_thread_irq_nesting_end
-@ since it will never be called 16-bit mode. */
-@
+MODE_MASK = 0x1F // Mode mask
+IRQ_MODE_BITS = 0x12 // IRQ mode bits
+
+
+/* No 16-bit Thumb mode veneer code is needed for _tx_thread_irq_nesting_end
+ since it will never be called 16-bit mode. */
+
.arm
.text
.align 2
-@/**************************************************************************/
-@/* */
-@/* FUNCTION RELEASE */
-@/* */
-@/* _tx_thread_irq_nesting_end Cortex-A8/AC6 */
-@/* 6.1 */
-@/* AUTHOR */
-@/* */
-@/* William E. Lamie, Microsoft Corporation */
-@/* */
-@/* DESCRIPTION */
-@/* */
-@/* This function is called by the application from IRQ mode after */
-@/* _tx_thread_irq_nesting_start has been called and switches the IRQ */
-@/* processing from system mode back to IRQ mode prior to the ISR */
-@/* calling _tx_thread_context_restore. Note that this function */
-@/* assumes the system stack pointer is in the same position after */
-@/* nesting start function was called. */
-@/* */
-@/* This function assumes that the system mode stack pointer was setup */
-@/* during low-level initialization (tx_initialize_low_level.s). */
-@/* */
-@/* This function returns with IRQ interrupts disabled. */
-@/* */
-@/* INPUT */
-@/* */
-@/* None */
-@/* */
-@/* OUTPUT */
-@/* */
-@/* None */
-@/* */
-@/* CALLS */
-@/* */
-@/* None */
-@/* */
-@/* CALLED BY */
-@/* */
-@/* ISRs */
-@/* */
-@/* RELEASE HISTORY */
-@/* */
-@/* DATE NAME DESCRIPTION */
-@/* */
-@/* 09-30-2020 William E. Lamie Initial Version 6.1 */
-@/* */
-@/**************************************************************************/
-@VOID _tx_thread_irq_nesting_end(VOID)
-@{
+/**************************************************************************/
+/* */
+/* FUNCTION RELEASE */
+/* */
+/* _tx_thread_irq_nesting_end ARMv7-A */
+/* 6.1.11 */
+/* AUTHOR */
+/* */
+/* William E. Lamie, Microsoft Corporation */
+/* */
+/* DESCRIPTION */
+/* */
+/* This function is called by the application from IRQ mode after */
+/* _tx_thread_irq_nesting_start has been called and switches the IRQ */
+/* processing from system mode back to IRQ mode prior to the ISR */
+/* calling _tx_thread_context_restore. Note that this function */
+/* assumes the system stack pointer is in the same position after */
+/* nesting start function was called. */
+/* */
+/* This function assumes that the system mode stack pointer was setup */
+/* during low-level initialization (tx_initialize_low_level.s). */
+/* */
+/* This function returns with IRQ interrupts disabled. */
+/* */
+/* INPUT */
+/* */
+/* None */
+/* */
+/* OUTPUT */
+/* */
+/* None */
+/* */
+/* CALLS */
+/* */
+/* None */
+/* */
+/* CALLED BY */
+/* */
+/* ISRs */
+/* */
+/* RELEASE HISTORY */
+/* */
+/* DATE NAME DESCRIPTION */
+/* */
+/* 09-30-2020 William E. Lamie Initial Version 6.1 */
+/* 04-25-2022 Zhen Kong Updated comments, */
+/* resulting in version 6.1.11 */
+/* */
+/**************************************************************************/
.global _tx_thread_irq_nesting_end
.type _tx_thread_irq_nesting_end,function
_tx_thread_irq_nesting_end:
- MOV r3,lr @ Save ISR return address
- MRS r0, CPSR @ Pickup the CPSR
- ORR r0, r0, #DISABLE_INTS @ Build disable interrupt value
- MSR CPSR_c, r0 @ Disable interrupts
- LDMIA sp!, {r1, lr} @ Pickup saved lr (and r1 throw-away for
- @ 8-byte alignment logic)
- BIC r0, r0, #MODE_MASK @ Clear mode bits
- ORR r0, r0, #IRQ_MODE_BITS @ Build IRQ mode CPSR
- MSR CPSR_c, r0 @ Reenter IRQ mode
+ MOV r3,lr // Save ISR return address
+ MRS r0, CPSR // Pickup the CPSR
+ ORR r0, r0, #DISABLE_INTS // Build disable interrupt value
+ MSR CPSR_c, r0 // Disable interrupts
+ LDMIA sp!, {r1, lr} // Pickup saved lr (and r1 throw-away for
+ // 8-byte alignment logic)
+ BIC r0, r0, #MODE_MASK // Clear mode bits
+ ORR r0, r0, #IRQ_MODE_BITS // Build IRQ mode CPSR
+ MSR CPSR_c, r0 // Reenter IRQ mode
#ifdef __THUMB_INTERWORK
- BX r3 @ Return to caller
+ BX r3 // Return to caller
#else
- MOV pc, r3 @ Return to caller
+ MOV pc, r3 // Return to caller
#endif
-@}
-
diff --git a/ports/cortex_a8/ac6/src/tx_thread_irq_nesting_start.S b/ports/cortex_a8/ac6/src/tx_thread_irq_nesting_start.S
index 27836bec..c69976ed 100644
--- a/ports/cortex_a8/ac6/src/tx_thread_irq_nesting_start.S
+++ b/ports/cortex_a8/ac6/src/tx_thread_irq_nesting_start.S
@@ -1,108 +1,96 @@
-@/**************************************************************************/
-@/* */
-@/* Copyright (c) Microsoft Corporation. All rights reserved. */
-@/* */
-@/* This software is licensed under the Microsoft Software License */
-@/* Terms for Microsoft Azure RTOS. Full text of the license can be */
-@/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
-@/* and in the root directory of this software. */
-@/* */
-@/**************************************************************************/
-@
-@
-@/**************************************************************************/
-@/**************************************************************************/
-@/** */
-@/** ThreadX Component */
-@/** */
-@/** Thread */
-@/** */
-@/**************************************************************************/
-@/**************************************************************************/
-@
-@
-@#define TX_SOURCE_CODE
-@
-@
-@/* Include necessary system files. */
-@
-@#include "tx_api.h"
-@#include "tx_thread.h"
-@
-@
-IRQ_DISABLE = 0x80 @ IRQ disable bit
-MODE_MASK = 0x1F @ Mode mask
-SYS_MODE_BITS = 0x1F @ System mode bits
-@
-@
-@/* No 16-bit Thumb mode veneer code is needed for _tx_thread_irq_nesting_start
-@ since it will never be called 16-bit mode. */
-@
+/**************************************************************************/
+/* */
+/* Copyright (c) Microsoft Corporation. All rights reserved. */
+/* */
+/* This software is licensed under the Microsoft Software License */
+/* Terms for Microsoft Azure RTOS. Full text of the license can be */
+/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
+/* and in the root directory of this software. */
+/* */
+/**************************************************************************/
+
+
+/**************************************************************************/
+/**************************************************************************/
+/** */
+/** ThreadX Component */
+/** */
+/** Thread */
+/** */
+/**************************************************************************/
+/**************************************************************************/
+
+IRQ_DISABLE = 0x80 // IRQ disable bit
+MODE_MASK = 0x1F // Mode mask
+SYS_MODE_BITS = 0x1F // System mode bits
+
+
+/* No 16-bit Thumb mode veneer code is needed for _tx_thread_irq_nesting_start
+ since it will never be called 16-bit mode. */
+
.arm
.text
.align 2
-@/**************************************************************************/
-@/* */
-@/* FUNCTION RELEASE */
-@/* */
-@/* _tx_thread_irq_nesting_start Cortex-A8/AC6 */
-@/* 6.1 */
-@/* AUTHOR */
-@/* */
-@/* William E. Lamie, Microsoft Corporation */
-@/* */
-@/* DESCRIPTION */
-@/* */
-@/* This function is called by the application from IRQ mode after */
-@/* _tx_thread_context_save has been called and switches the IRQ */
-@/* processing to the system mode so nested IRQ interrupt processing */
-@/* is possible (system mode has its own "lr" register). Note that */
-@/* this function assumes that the system mode stack pointer was setup */
-@/* during low-level initialization (tx_initialize_low_level.s). */
-@/* */
-@/* This function returns with IRQ interrupts enabled. */
-@/* */
-@/* INPUT */
-@/* */
-@/* None */
-@/* */
-@/* OUTPUT */
-@/* */
-@/* None */
-@/* */
-@/* CALLS */
-@/* */
-@/* None */
-@/* */
-@/* CALLED BY */
-@/* */
-@/* ISRs */
-@/* */
-@/* RELEASE HISTORY */
-@/* */
-@/* DATE NAME DESCRIPTION */
-@/* */
-@/* 09-30-2020 William E. Lamie Initial Version 6.1 */
-@/* */
-@/**************************************************************************/
-@VOID _tx_thread_irq_nesting_start(VOID)
-@{
+/**************************************************************************/
+/* */
+/* FUNCTION RELEASE */
+/* */
+/* _tx_thread_irq_nesting_start ARMv7-A */
+/* 6.1.11 */
+/* AUTHOR */
+/* */
+/* William E. Lamie, Microsoft Corporation */
+/* */
+/* DESCRIPTION */
+/* */
+/* This function is called by the application from IRQ mode after */
+/* _tx_thread_context_save has been called and switches the IRQ */
+/* processing to the system mode so nested IRQ interrupt processing */
+/* is possible (system mode has its own "lr" register). Note that */
+/* this function assumes that the system mode stack pointer was setup */
+/* during low-level initialization (tx_initialize_low_level.s). */
+/* */
+/* This function returns with IRQ interrupts enabled. */
+/* */
+/* INPUT */
+/* */
+/* None */
+/* */
+/* OUTPUT */
+/* */
+/* None */
+/* */
+/* CALLS */
+/* */
+/* None */
+/* */
+/* CALLED BY */
+/* */
+/* ISRs */
+/* */
+/* RELEASE HISTORY */
+/* */
+/* DATE NAME DESCRIPTION */
+/* */
+/* 09-30-2020 William E. Lamie Initial Version 6.1 */
+/* 04-25-2022 Zhen Kong Updated comments, */
+/* resulting in version 6.1.11 */
+/* */
+/**************************************************************************/
.global _tx_thread_irq_nesting_start
.type _tx_thread_irq_nesting_start,function
_tx_thread_irq_nesting_start:
- MOV r3,lr @ Save ISR return address
- MRS r0, CPSR @ Pickup the CPSR
- BIC r0, r0, #MODE_MASK @ Clear the mode bits
- ORR r0, r0, #SYS_MODE_BITS @ Build system mode CPSR
- MSR CPSR_c, r0 @ Enter system mode
- STMDB sp!, {r1, lr} @ Push the system mode lr on the system mode stack
- @ and push r1 just to keep 8-byte alignment
- BIC r0, r0, #IRQ_DISABLE @ Build enable IRQ CPSR
- MSR CPSR_c, r0 @ Enter system mode
+ MOV r3,lr // Save ISR return address
+ MRS r0, CPSR // Pickup the CPSR
+ BIC r0, r0, #MODE_MASK // Clear the mode bits
+ ORR r0, r0, #SYS_MODE_BITS // Build system mode CPSR
+ MSR CPSR_c, r0 // Enter system mode
+ STMDB sp!, {r1, lr} // Push the system mode lr on the system mode stack
+ // and push r1 just to keep 8-byte alignment
+ BIC r0, r0, #IRQ_DISABLE // Build enable IRQ CPSR
+ MSR CPSR_c, r0 // Enter system mode
#ifdef __THUMB_INTERWORK
- BX r3 @ Return to caller
+ BX r3 // Return to caller
#else
- MOV pc, r3 @ Return to caller
+ MOV pc, r3 // Return to caller
#endif
-@}
-
diff --git a/ports/cortex_a8/ac6/src/tx_thread_schedule.S b/ports/cortex_a8/ac6/src/tx_thread_schedule.S
index 31e8bb67..8330e9df 100644
--- a/ports/cortex_a8/ac6/src/tx_thread_schedule.S
+++ b/ports/cortex_a8/ac6/src/tx_thread_schedule.S
@@ -1,257 +1,230 @@
-@/**************************************************************************/
-@/* */
-@/* Copyright (c) Microsoft Corporation. All rights reserved. */
-@/* */
-@/* This software is licensed under the Microsoft Software License */
-@/* Terms for Microsoft Azure RTOS. Full text of the license can be */
-@/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
-@/* and in the root directory of this software. */
-@/* */
-@/**************************************************************************/
-@
-@
-@/**************************************************************************/
-@/**************************************************************************/
-@/** */
-@/** ThreadX Component */
-@/** */
-@/** Thread */
-@/** */
-@/**************************************************************************/
-@/**************************************************************************/
-@
-@
-@#define TX_SOURCE_CODE
-@
-@
-@/* Include necessary system files. */
-@
-@#include "tx_api.h"
-@#include "tx_thread.h"
-@#include "tx_timer.h"
-@
-@
+/**************************************************************************/
+/* */
+/* Copyright (c) Microsoft Corporation. All rights reserved. */
+/* */
+/* This software is licensed under the Microsoft Software License */
+/* Terms for Microsoft Azure RTOS. Full text of the license can be */
+/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
+/* and in the root directory of this software. */
+/* */
+/**************************************************************************/
+
+
+/**************************************************************************/
+/**************************************************************************/
+/** */
+/** ThreadX Component */
+/** */
+/** Thread */
+/** */
+/**************************************************************************/
+/**************************************************************************/
+
.global _tx_thread_execute_ptr
.global _tx_thread_current_ptr
.global _tx_timer_time_slice
-@
-@
-@/* Define the 16-bit Thumb mode veneer for _tx_thread_schedule for
-@ applications calling this function from to 16-bit Thumb mode. */
-@
+
+
+/* Define the 16-bit Thumb mode veneer for _tx_thread_schedule for
+ applications calling this function from to 16-bit Thumb mode. */
+
.text
.align 2
.global $_tx_thread_schedule
.type $_tx_thread_schedule,function
$_tx_thread_schedule:
.thumb
- BX pc @ Switch to 32-bit mode
- NOP @
+ BX pc // Switch to 32-bit mode
+ NOP //
.arm
- STMFD sp!, {lr} @ Save return address
- BL _tx_thread_schedule @ Call _tx_thread_schedule function
- LDMFD sp!, {lr} @ Recover saved return address
- BX lr @ Return to 16-bit caller
-@
-@
+ STMFD sp!, {lr} // Save return address
+ BL _tx_thread_schedule // Call _tx_thread_schedule function
+ LDMFD sp!, {lr} // Recover saved return address
+ BX lr // Return to 16-bit caller
+
+
.text
.align 2
-@/**************************************************************************/
-@/* */
-@/* FUNCTION RELEASE */
-@/* */
-@/* _tx_thread_schedule Cortex-A8/AC6 */
-@/* 6.1.9 */
-@/* AUTHOR */
-@/* */
-@/* William E. Lamie, Microsoft Corporation */
-@/* */
-@/* DESCRIPTION */
-@/* */
-@/* This function waits for a thread control block pointer to appear in */
-@/* the _tx_thread_execute_ptr variable. Once a thread pointer appears */
-@/* in the variable, the corresponding thread is resumed. */
-@/* */
-@/* INPUT */
-@/* */
-@/* None */
-@/* */
-@/* OUTPUT */
-@/* */
-@/* None */
-@/* */
-@/* CALLS */
-@/* */
-@/* None */
-@/* */
-@/* CALLED BY */
-@/* */
-@/* _tx_initialize_kernel_enter ThreadX entry function */
-@/* _tx_thread_system_return Return to system from thread */
-@/* _tx_thread_context_restore Restore thread's context */
-@/* */
-@/* RELEASE HISTORY */
-@/* */
-@/* DATE NAME DESCRIPTION */
-@/* */
-@/* 09-30-2020 William E. Lamie Initial Version 6.1 */
-@/* 10-15-2021 William E. Lamie Modified comment(s), added */
-@/* execution profile support, */
-@/* resulting in version 6.1.9 */
-@/* */
-@/**************************************************************************/
-@VOID _tx_thread_schedule(VOID)
-@{
+/**************************************************************************/
+/* */
+/* FUNCTION RELEASE */
+/* */
+/* _tx_thread_schedule ARMv7-A */
+/* 6.1.11 */
+/* AUTHOR */
+/* */
+/* William E. Lamie, Microsoft Corporation */
+/* */
+/* DESCRIPTION */
+/* */
+/* This function waits for a thread control block pointer to appear in */
+/* the _tx_thread_execute_ptr variable. Once a thread pointer appears */
+/* in the variable, the corresponding thread is resumed. */
+/* */
+/* INPUT */
+/* */
+/* None */
+/* */
+/* OUTPUT */
+/* */
+/* None */
+/* */
+/* CALLS */
+/* */
+/* None */
+/* */
+/* CALLED BY */
+/* */
+/* _tx_initialize_kernel_enter ThreadX entry function */
+/* _tx_thread_system_return Return to system from thread */
+/* _tx_thread_context_restore Restore thread's context */
+/* */
+/* RELEASE HISTORY */
+/* */
+/* DATE NAME DESCRIPTION */
+/* */
+/* 09-30-2020 William E. Lamie Initial Version 6.1 */
+/* 10-15-2021 William E. Lamie Modified comment(s), added */
+/* execution profile support, */
+/* resulting in version 6.1.9 */
+/* 04-25-2022 Zhen Kong Updated comments, */
+/* resulting in version 6.1.11 */
+/* */
+/**************************************************************************/
.global _tx_thread_schedule
.type _tx_thread_schedule,function
_tx_thread_schedule:
-@
-@ /* Enable interrupts. */
-@
+
+ /* Enable interrupts. */
+
#ifdef TX_ENABLE_FIQ_SUPPORT
- CPSIE if @ Enable IRQ and FIQ interrupts
+ CPSIE if // Enable IRQ and FIQ interrupts
#else
- CPSIE i @ Enable IRQ interrupts
+ CPSIE i // Enable IRQ interrupts
#endif
-@
-@ /* Wait for a thread to execute. */
-@ do
-@ {
- LDR r1, =_tx_thread_execute_ptr @ Address of thread execute ptr
-@
+
+ /* Wait for a thread to execute. */
+ LDR r1, =_tx_thread_execute_ptr // Address of thread execute ptr
+
__tx_thread_schedule_loop:
-@
- LDR r0, [r1] @ Pickup next thread to execute
- CMP r0, #0 @ Is it NULL?
- BEQ __tx_thread_schedule_loop @ If so, keep looking for a thread
-@
-@ }
-@ while(_tx_thread_execute_ptr == TX_NULL);
-@
-@ /* Yes! We have a thread to execute. Lockout interrupts and
-@ transfer control to it. */
-@
+
+ LDR r0, [r1] // Pickup next thread to execute
+ CMP r0, #0 // Is it NULL?
+ BEQ __tx_thread_schedule_loop // If so, keep looking for a thread
+ /* Yes! We have a thread to execute. Lockout interrupts and
+ transfer control to it. */
+
#ifdef TX_ENABLE_FIQ_SUPPORT
- CPSID if @ Disable IRQ and FIQ interrupts
+ CPSID if // Disable IRQ and FIQ interrupts
#else
- CPSID i @ Disable IRQ interrupts
+ CPSID i // Disable IRQ interrupts
#endif
-@
-@ /* Setup the current thread pointer. */
-@ _tx_thread_current_ptr = _tx_thread_execute_ptr;
-@
- LDR r1, =_tx_thread_current_ptr @ Pickup address of current thread
- STR r0, [r1] @ Setup current thread pointer
-@
-@ /* Increment the run count for this thread. */
-@ _tx_thread_current_ptr -> tx_thread_run_count++;
-@
- LDR r2, [r0, #4] @ Pickup run counter
- LDR r3, [r0, #24] @ Pickup time-slice for this thread
- ADD r2, r2, #1 @ Increment thread run-counter
- STR r2, [r0, #4] @ Store the new run counter
-@
-@ /* Setup time-slice, if present. */
-@ _tx_timer_time_slice = _tx_thread_current_ptr -> tx_thread_time_slice;
-@
- LDR r2, =_tx_timer_time_slice @ Pickup address of time-slice
- @ variable
- LDR sp, [r0, #8] @ Switch stack pointers
- STR r3, [r2] @ Setup time-slice
-@
-@ /* Switch to the thread's stack. */
-@ sp = _tx_thread_execute_ptr -> tx_thread_stack_ptr;
-@
+
+ /* Setup the current thread pointer. */
+
+ LDR r1, =_tx_thread_current_ptr // Pickup address of current thread
+ STR r0, [r1] // Setup current thread pointer
+
+ /* Increment the run count for this thread. */
+
+ LDR r2, [r0, #4] // Pickup run counter
+ LDR r3, [r0, #24] // Pickup time-slice for this thread
+ ADD r2, r2, #1 // Increment thread run-counter
+ STR r2, [r0, #4] // Store the new run counter
+
+ /* Setup time-slice, if present. */
+
+ LDR r2, =_tx_timer_time_slice // Pickup address of time-slice
+ // variable
+ LDR sp, [r0, #8] // Switch stack pointers
+ STR r3, [r2] // Setup time-slice
+
#if (defined(TX_ENABLE_EXECUTION_CHANGE_NOTIFY) || defined(TX_EXECUTION_PROFILE_ENABLE))
-@
-@ /* Call the thread entry function to indicate the thread is executing. */
-@
- MOV r5, r0 @ Save r0
- BL _tx_execution_thread_enter @ Call the thread execution enter function
- MOV r0, r5 @ Restore r0
+
+ /* Call the thread entry function to indicate the thread is executing. */
+
+ MOV r5, r0 // Save r0
+ BL _tx_execution_thread_enter // Call the thread execution enter function
+ MOV r0, r5 // Restore r0
#endif
-@
-@ /* Determine if an interrupt frame or a synchronous task suspension frame
-@ is present. */
-@
- LDMIA sp!, {r4, r5} @ Pickup the stack type and saved CPSR
- CMP r4, #0 @ Check for synchronous context switch
+
+ /* Determine if an interrupt frame or a synchronous task suspension frame
+ is present. */
+
+ LDMIA sp!, {r4, r5} // Pickup the stack type and saved CPSR
+ CMP r4, #0 // Check for synchronous context switch
BEQ _tx_solicited_return
- MSR SPSR_cxsf, r5 @ Setup SPSR for return
+ MSR SPSR_cxsf, r5 // Setup SPSR for return
#ifdef TX_ENABLE_VFP_SUPPORT
- LDR r1, [r0, #144] @ Pickup the VFP enabled flag
- CMP r1, #0 @ Is the VFP enabled?
- BEQ _tx_skip_interrupt_vfp_restore @ No, skip VFP interrupt restore
- VLDMIA sp!, {D0-D15} @ Recover D0-D15
- VLDMIA sp!, {D16-D31} @ Recover D16-D31
- LDR r4, [sp], #4 @ Pickup FPSCR
- VMSR FPSCR, r4 @ Restore FPSCR
+ LDR r1, [r0, #144] // Pickup the VFP enabled flag
+ CMP r1, #0 // Is the VFP enabled?
+ BEQ _tx_skip_interrupt_vfp_restore // No, skip VFP interrupt restore
+ VLDMIA sp!, {D0-D15} // Recover D0-D15
+ VLDMIA sp!, {D16-D31} // Recover D16-D31
+ LDR r4, [sp], #4 // Pickup FPSCR
+ VMSR FPSCR, r4 // Restore FPSCR
_tx_skip_interrupt_vfp_restore:
#endif
- LDMIA sp!, {r0-r12, lr, pc}^ @ Return to point of thread interrupt
+ LDMIA sp!, {r0-r12, lr, pc}^ // Return to point of thread interrupt
_tx_solicited_return:
#ifdef TX_ENABLE_VFP_SUPPORT
- LDR r1, [r0, #144] @ Pickup the VFP enabled flag
- CMP r1, #0 @ Is the VFP enabled?
- BEQ _tx_skip_solicited_vfp_restore @ No, skip VFP solicited restore
- VLDMIA sp!, {D8-D15} @ Recover D8-D15
- VLDMIA sp!, {D16-D31} @ Recover D16-D31
- LDR r4, [sp], #4 @ Pickup FPSCR
- VMSR FPSCR, r4 @ Restore FPSCR
+ LDR r1, [r0, #144] // Pickup the VFP enabled flag
+ CMP r1, #0 // Is the VFP enabled?
+ BEQ _tx_skip_solicited_vfp_restore // No, skip VFP solicited restore
+ VLDMIA sp!, {D8-D15} // Recover D8-D15
+ VLDMIA sp!, {D16-D31} // Recover D16-D31
+ LDR r4, [sp], #4 // Pickup FPSCR
+ VMSR FPSCR, r4 // Restore FPSCR
_tx_skip_solicited_vfp_restore:
#endif
- MSR CPSR_cxsf, r5 @ Recover CPSR
- LDMIA sp!, {r4-r11, lr} @ Return to thread synchronously
+ MSR CPSR_cxsf, r5 // Recover CPSR
+ LDMIA sp!, {r4-r11, lr} // Return to thread synchronously
#ifdef __THUMB_INTERWORK
- BX lr @ Return to caller
+ BX lr // Return to caller
#else
- MOV pc, lr @ Return to caller
+ MOV pc, lr // Return to caller
#endif
-@
-@}
-@
#ifdef TX_ENABLE_VFP_SUPPORT
.global tx_thread_vfp_enable
.type tx_thread_vfp_enable,function
tx_thread_vfp_enable:
- MRS r2, CPSR @ Pickup the CPSR
+ MRS r2, CPSR // Pickup the CPSR
#ifdef TX_ENABLE_FIQ_SUPPORT
- CPSID if @ Enable IRQ and FIQ interrupts
+ CPSID if // Enable IRQ and FIQ interrupts
#else
- CPSID i @ Enable IRQ interrupts
+ CPSID i // Enable IRQ interrupts
#endif
- LDR r0, =_tx_thread_current_ptr @ Build current thread pointer address
- LDR r1, [r0] @ Pickup current thread pointer
- CMP r1, #0 @ Check for NULL thread pointer
- BEQ __tx_no_thread_to_enable @ If NULL, skip VFP enable
- MOV r0, #1 @ Build enable value
- STR r0, [r1, #144] @ Set the VFP enable flag (tx_thread_vfp_enable field in TX_THREAD)
+ LDR r0, =_tx_thread_current_ptr // Build current thread pointer address
+ LDR r1, [r0] // Pickup current thread pointer
+ CMP r1, #0 // Check for NULL thread pointer
+ BEQ __tx_no_thread_to_enable // If NULL, skip VFP enable
+ MOV r0, #1 // Build enable value
+ STR r0, [r1, #144] // Set the VFP enable flag (tx_thread_vfp_enable field in TX_THREAD)
__tx_no_thread_to_enable:
- MSR CPSR_cxsf, r2 @ Recover CPSR
- BX LR @ Return to caller
+ MSR CPSR_cxsf, r2 // Recover CPSR
+ BX LR // Return to caller
.global tx_thread_vfp_disable
.type tx_thread_vfp_disable,function
tx_thread_vfp_disable:
- MRS r2, CPSR @ Pickup the CPSR
+ MRS r2, CPSR // Pickup the CPSR
#ifdef TX_ENABLE_FIQ_SUPPORT
- CPSID if @ Enable IRQ and FIQ interrupts
+ CPSID if // Enable IRQ and FIQ interrupts
#else
- CPSID i @ Enable IRQ interrupts
+ CPSID i // Enable IRQ interrupts
#endif
- LDR r0, =_tx_thread_current_ptr @ Build current thread pointer address
- LDR r1, [r0] @ Pickup current thread pointer
- CMP r1, #0 @ Check for NULL thread pointer
- BEQ __tx_no_thread_to_disable @ If NULL, skip VFP disable
- MOV r0, #0 @ Build disable value
- STR r0, [r1, #144] @ Clear the VFP enable flag (tx_thread_vfp_enable field in TX_THREAD)
+ LDR r0, =_tx_thread_current_ptr // Build current thread pointer address
+ LDR r1, [r0] // Pickup current thread pointer
+ CMP r1, #0 // Check for NULL thread pointer
+ BEQ __tx_no_thread_to_disable // If NULL, skip VFP disable
+ MOV r0, #0 // Build disable value
+ STR r0, [r1, #144] // Clear the VFP enable flag (tx_thread_vfp_enable field in TX_THREAD)
__tx_no_thread_to_disable:
- MSR CPSR_cxsf, r2 @ Recover CPSR
- BX LR @ Return to caller
+ MSR CPSR_cxsf, r2 // Recover CPSR
+ BX LR // Return to caller
#endif
-
diff --git a/ports/cortex_a8/ac6/src/tx_thread_stack_build.S b/ports/cortex_a8/ac6/src/tx_thread_stack_build.S
index 506a0e3d..f413e673 100644
--- a/ports/cortex_a8/ac6/src/tx_thread_stack_build.S
+++ b/ports/cortex_a8/ac6/src/tx_thread_stack_build.S
@@ -1,178 +1,164 @@
-@/**************************************************************************/
-@/* */
-@/* Copyright (c) Microsoft Corporation. All rights reserved. */
-@/* */
-@/* This software is licensed under the Microsoft Software License */
-@/* Terms for Microsoft Azure RTOS. Full text of the license can be */
-@/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
-@/* and in the root directory of this software. */
-@/* */
-@/**************************************************************************/
-@
-@
-@/**************************************************************************/
-@/**************************************************************************/
-@/** */
-@/** ThreadX Component */
-@/** */
-@/** Thread */
-@/** */
-@/**************************************************************************/
-@/**************************************************************************/
-@
-@
-@#define TX_SOURCE_CODE
-@
-@
-@/* Include necessary system files. */
-@
-@#include "tx_api.h"
-@#include "tx_thread.h"
-@
+/**************************************************************************/
+/* */
+/* Copyright (c) Microsoft Corporation. All rights reserved. */
+/* */
+/* This software is licensed under the Microsoft Software License */
+/* Terms for Microsoft Azure RTOS. Full text of the license can be */
+/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
+/* and in the root directory of this software. */
+/* */
+/**************************************************************************/
+
+
+/**************************************************************************/
+/**************************************************************************/
+/** */
+/** ThreadX Component */
+/** */
+/** Thread */
+/** */
+/**************************************************************************/
+/**************************************************************************/
.arm
-SVC_MODE = 0x13 @ SVC mode
+SVC_MODE = 0x13 // SVC mode
#ifdef TX_ENABLE_FIQ_SUPPORT
-CPSR_MASK = 0xDF @ Mask initial CPSR, IRQ & FIQ interrupts enabled
+CPSR_MASK = 0xDF // Mask initial CPSR, IRQ & FIQ interrupts enabled
#else
-CPSR_MASK = 0x9F @ Mask initial CPSR, IRQ interrupts enabled
+CPSR_MASK = 0x9F // Mask initial CPSR, IRQ interrupts enabled
#endif
-@
-@
-@/* Define the 16-bit Thumb mode veneer for _tx_thread_stack_build for
-@ applications calling this function from to 16-bit Thumb mode. */
-@
+
+
+/* Define the 16-bit Thumb mode veneer for _tx_thread_stack_build for
+ applications calling this function from to 16-bit Thumb mode. */
+
.text
.align 2
.thumb
.global $_tx_thread_stack_build
.type $_tx_thread_stack_build,function
$_tx_thread_stack_build:
- BX pc @ Switch to 32-bit mode
- NOP @
+ BX pc // Switch to 32-bit mode
+ NOP //
.arm
- STMFD sp!, {lr} @ Save return address
- BL _tx_thread_stack_build @ Call _tx_thread_stack_build function
- LDMFD sp!, {lr} @ Recover saved return address
- BX lr @ Return to 16-bit caller
-@
-@
+ STMFD sp!, {lr} // Save return address
+ BL _tx_thread_stack_build // Call _tx_thread_stack_build function
+ LDMFD sp!, {lr} // Recover saved return address
+ BX lr // Return to 16-bit caller
+
+
.text
.align 2
-@/**************************************************************************/
-@/* */
-@/* FUNCTION RELEASE */
-@/* */
-@/* _tx_thread_stack_build Cortex-A8/AC6 */
-@/* 6.1 */
-@/* AUTHOR */
-@/* */
-@/* William E. Lamie, Microsoft Corporation */
-@/* */
-@/* DESCRIPTION */
-@/* */
-@/* This function builds a stack frame on the supplied thread's stack. */
-@/* The stack frame results in a fake interrupt return to the supplied */
-@/* function pointer. */
-@/* */
-@/* INPUT */
-@/* */
-@/* thread_ptr Pointer to thread control blk */
-@/* function_ptr Pointer to return function */
-@/* */
-@/* OUTPUT */
-@/* */
-@/* None */
-@/* */
-@/* CALLS */
-@/* */
-@/* None */
-@/* */
-@/* CALLED BY */
-@/* */
-@/* _tx_thread_create Create thread service */
-@/* */
-@/* RELEASE HISTORY */
-@/* */
-@/* DATE NAME DESCRIPTION */
-@/* */
-@/* 09-30-2020 William E. Lamie Initial Version 6.1 */
-@/* */
-@/**************************************************************************/
-@VOID _tx_thread_stack_build(TX_THREAD *thread_ptr, VOID (*function_ptr)(VOID))
-@{
+/**************************************************************************/
+/* */
+/* FUNCTION RELEASE */
+/* */
+/* _tx_thread_stack_build ARMv7-A */
+/* 6.1.11 */
+/* AUTHOR */
+/* */
+/* William E. Lamie, Microsoft Corporation */
+/* */
+/* DESCRIPTION */
+/* */
+/* This function builds a stack frame on the supplied thread's stack. */
+/* The stack frame results in a fake interrupt return to the supplied */
+/* function pointer. */
+/* */
+/* INPUT */
+/* */
+/* thread_ptr Pointer to thread control blk */
+/* function_ptr Pointer to return function */
+/* */
+/* OUTPUT */
+/* */
+/* None */
+/* */
+/* CALLS */
+/* */
+/* None */
+/* */
+/* CALLED BY */
+/* */
+/* _tx_thread_create Create thread service */
+/* */
+/* RELEASE HISTORY */
+/* */
+/* DATE NAME DESCRIPTION */
+/* */
+/* 09-30-2020 William E. Lamie Initial Version 6.1 */
+/* 04-25-2022 Zhen Kong Updated comments, */
+/* resulting in version 6.1.11 */
+/* */
+/**************************************************************************/
.global _tx_thread_stack_build
.type _tx_thread_stack_build,function
_tx_thread_stack_build:
-@
-@
-@ /* Build a fake interrupt frame. The form of the fake interrupt stack
-@ on the Cortex-A8 should look like the following after it is built:
-@
-@ Stack Top: 1 Interrupt stack frame type
-@ CPSR Initial value for CPSR
-@ a1 (r0) Initial value for a1
-@ a2 (r1) Initial value for a2
-@ a3 (r2) Initial value for a3
-@ a4 (r3) Initial value for a4
-@ v1 (r4) Initial value for v1
-@ v2 (r5) Initial value for v2
-@ v3 (r6) Initial value for v3
-@ v4 (r7) Initial value for v4
-@ v5 (r8) Initial value for v5
-@ sb (r9) Initial value for sb
-@ sl (r10) Initial value for sl
-@ fp (r11) Initial value for fp
-@ ip (r12) Initial value for ip
-@ lr (r14) Initial value for lr
-@ pc (r15) Initial value for pc
-@ 0 For stack backtracing
-@
-@ Stack Bottom: (higher memory address) */
-@
- LDR r2, [r0, #16] @ Pickup end of stack area
- BIC r2, r2, #7 @ Ensure 8-byte alignment
- SUB r2, r2, #76 @ Allocate space for the stack frame
-@
-@ /* Actually build the stack frame. */
-@
- MOV r3, #1 @ Build interrupt stack type
- STR r3, [r2, #0] @ Store stack type
- MOV r3, #0 @ Build initial register value
- STR r3, [r2, #8] @ Store initial r0
- STR r3, [r2, #12] @ Store initial r1
- STR r3, [r2, #16] @ Store initial r2
- STR r3, [r2, #20] @ Store initial r3
- STR r3, [r2, #24] @ Store initial r4
- STR r3, [r2, #28] @ Store initial r5
- STR r3, [r2, #32] @ Store initial r6
- STR r3, [r2, #36] @ Store initial r7
- STR r3, [r2, #40] @ Store initial r8
- STR r3, [r2, #44] @ Store initial r9
- LDR r3, [r0, #12] @ Pickup stack starting address
- STR r3, [r2, #48] @ Store initial r10 (sl)
- LDR r3,=_tx_thread_schedule @ Pickup address of _tx_thread_schedule for GDB backtrace
- STR r3, [r2, #60] @ Store initial r14 (lr)
- MOV r3, #0 @ Build initial register value
- STR r3, [r2, #52] @ Store initial r11
- STR r3, [r2, #56] @ Store initial r12
- STR r1, [r2, #64] @ Store initial pc
- STR r3, [r2, #68] @ 0 for back-trace
- MRS r1, CPSR @ Pickup CPSR
- BIC r1, r1, #CPSR_MASK @ Mask mode bits of CPSR
- ORR r3, r1, #SVC_MODE @ Build CPSR, SVC mode, interrupts enabled
- STR r3, [r2, #4] @ Store initial CPSR
-@
-@ /* Setup stack pointer. */
-@ thread_ptr -> tx_thread_stack_ptr = r2;
-@
- STR r2, [r0, #8] @ Save stack pointer in thread's
- @ control block
+
+
+ /* Build a fake interrupt frame. The form of the fake interrupt stack
+ on the ARMv7-A should look like the following after it is built:
+
+ Stack Top: 1 Interrupt stack frame type
+ CPSR Initial value for CPSR
+ a1 (r0) Initial value for a1
+ a2 (r1) Initial value for a2
+ a3 (r2) Initial value for a3
+ a4 (r3) Initial value for a4
+ v1 (r4) Initial value for v1
+ v2 (r5) Initial value for v2
+ v3 (r6) Initial value for v3
+ v4 (r7) Initial value for v4
+ v5 (r8) Initial value for v5
+ sb (r9) Initial value for sb
+ sl (r10) Initial value for sl
+ fp (r11) Initial value for fp
+ ip (r12) Initial value for ip
+ lr (r14) Initial value for lr
+ pc (r15) Initial value for
+ 0 For stack backtracing
+
+ Stack Bottom: (higher memory address) */
+
+ LDR r2, [r0, #16] // Pickup end of stack area
+ BIC r2, r2, #7 // Ensure 8-byte alignment
+ SUB r2, r2, #76 // Allocate space for the stack frame
+
+ /* Actually build the stack frame. */
+
+ MOV r3, #1 // Build interrupt stack type
+ STR r3, [r2, #0] // Store stack type
+ MOV r3, #0 // Build initial register value
+ STR r3, [r2, #8] // Store initial r0
+ STR r3, [r2, #12] // Store initial r1
+ STR r3, [r2, #16] // Store initial r2
+ STR r3, [r2, #20] // Store initial r3
+ STR r3, [r2, #24] // Store initial r4
+ STR r3, [r2, #28] // Store initial r5
+ STR r3, [r2, #32] // Store initial r6
+ STR r3, [r2, #36] // Store initial r7
+ STR r3, [r2, #40] // Store initial r8
+ STR r3, [r2, #44] // Store initial r9
+ LDR r3, [r0, #12] // Pickup stack starting address
+ STR r3, [r2, #48] // Store initial r10 (sl)
+ LDR r3,=_tx_thread_schedule // Pickup address of _tx_thread_schedule for GDB backtrace
+ STR r3, [r2, #60] // Store initial r14 (lr)
+ MOV r3, #0 // Build initial register value
+ STR r3, [r2, #52] // Store initial r11
+ STR r3, [r2, #56] // Store initial r12
+ STR r1, [r2, #64] // Store initial pc
+ STR r3, [r2, #68] // 0 for back-trace
+ MRS r1, CPSR // Pickup CPSR
+ BIC r1, r1, #CPSR_MASK // Mask mode bits of CPSR
+ ORR r3, r1, #SVC_MODE // Build CPSR, SVC mode, interrupts enabled
+ STR r3, [r2, #4] // Store initial CPSR
+
+ /* Setup stack pointer. */
+
+ STR r2, [r0, #8] // Save stack pointer in thread's
+ // control block
#ifdef __THUMB_INTERWORK
- BX lr @ Return to caller
+ BX lr // Return to caller
#else
- MOV pc, lr @ Return to caller
+ MOV pc, lr // Return to caller
#endif
-@}
-
-
diff --git a/ports/cortex_a8/ac6/src/tx_thread_system_return.S b/ports/cortex_a8/ac6/src/tx_thread_system_return.S
index 4c925246..cb7d62ce 100644
--- a/ports/cortex_a8/ac6/src/tx_thread_system_return.S
+++ b/ports/cortex_a8/ac6/src/tx_thread_system_return.S
@@ -1,182 +1,162 @@
-@/**************************************************************************/
-@/* */
-@/* Copyright (c) Microsoft Corporation. All rights reserved. */
-@/* */
-@/* This software is licensed under the Microsoft Software License */
-@/* Terms for Microsoft Azure RTOS. Full text of the license can be */
-@/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
-@/* and in the root directory of this software. */
-@/* */
-@/**************************************************************************/
-@
-@
-@/**************************************************************************/
-@/**************************************************************************/
-@/** */
-@/** ThreadX Component */
-@/** */
-@/** Thread */
-@/** */
-@/**************************************************************************/
-@/**************************************************************************/
-@
-@#define TX_SOURCE_CODE
-@
-@
-@/* Include necessary system files. */
-@
-@#include "tx_api.h"
-@#include "tx_thread.h"
-@#include "tx_timer.h"
-@
+/**************************************************************************/
+/* */
+/* Copyright (c) Microsoft Corporation. All rights reserved. */
+/* */
+/* This software is licensed under the Microsoft Software License */
+/* Terms for Microsoft Azure RTOS. Full text of the license can be */
+/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
+/* and in the root directory of this software. */
+/* */
+/**************************************************************************/
+
+
+/**************************************************************************/
+/**************************************************************************/
+/** */
+/** ThreadX Component */
+/** */
+/** Thread */
+/** */
+/**************************************************************************/
+/**************************************************************************/
+
.arm
-@
-@
+
+
.global _tx_thread_current_ptr
.global _tx_timer_time_slice
.global _tx_thread_schedule
-@
-@
-@
-@/* Define the 16-bit Thumb mode veneer for _tx_thread_system_return for
-@ applications calling this function from to 16-bit Thumb mode. */
-@
+
+
+
+/* Define the 16-bit Thumb mode veneer for _tx_thread_system_return for
+ applications calling this function from to 16-bit Thumb mode. */
+
.text
.align 2
.global $_tx_thread_system_return
.type $_tx_thread_system_return,function
$_tx_thread_system_return:
.thumb
- BX pc @ Switch to 32-bit mode
- NOP @
+ BX pc // Switch to 32-bit mode
+ NOP //
.arm
- STMFD sp!, {lr} @ Save return address
- BL _tx_thread_system_return @ Call _tx_thread_system_return function
- LDMFD sp!, {lr} @ Recover saved return address
- BX lr @ Return to 16-bit caller
-@
-@
+ STMFD sp!, {lr} // Save return address
+ BL _tx_thread_system_return // Call _tx_thread_system_return function
+ LDMFD sp!, {lr} // Recover saved return address
+ BX lr // Return to 16-bit caller
+
+
.text
.align 2
-@/**************************************************************************/
-@/* */
-@/* FUNCTION RELEASE */
-@/* */
-@/* _tx_thread_system_return Cortex-A8/AC6 */
-@/* 6.1.9 */
-@/* AUTHOR */
-@/* */
-@/* William E. Lamie, Microsoft Corporation */
-@/* */
-@/* DESCRIPTION */
-@/* */
-@/* This function is target processor specific. It is used to transfer */
-@/* control from a thread back to the ThreadX system. Only a */
-@/* minimal context is saved since the compiler assumes temp registers */
-@/* are going to get slicked by a function call anyway. */
-@/* */
-@/* INPUT */
-@/* */
-@/* None */
-@/* */
-@/* OUTPUT */
-@/* */
-@/* None */
-@/* */
-@/* CALLS */
-@/* */
-@/* _tx_thread_schedule Thread scheduling loop */
-@/* */
-@/* CALLED BY */
-@/* */
-@/* ThreadX components */
-@/* */
-@/* RELEASE HISTORY */
-@/* */
-@/* DATE NAME DESCRIPTION */
-@/* */
-@/* 09-30-2020 William E. Lamie Initial Version 6.1 */
-@/* 10-15-2021 William E. Lamie Modified comment(s), added */
-@/* execution profile support, */
-@/* resulting in version 6.1.9 */
-@/* */
-@/**************************************************************************/
-@VOID _tx_thread_system_return(VOID)
-@{
+/**************************************************************************/
+/* */
+/* FUNCTION RELEASE */
+/* */
+/* _tx_thread_system_return ARMv7-A */
+/* 6.1.11 */
+/* AUTHOR */
+/* */
+/* William E. Lamie, Microsoft Corporation */
+/* */
+/* DESCRIPTION */
+/* */
+/* This function is target processor specific. It is used to transfer */
+/* control from a thread back to the ThreadX system. Only a */
+/* minimal context is saved since the compiler assumes temp registers */
+/* are going to get slicked by a function call anyway. */
+/* */
+/* INPUT */
+/* */
+/* None */
+/* */
+/* OUTPUT */
+/* */
+/* None */
+/* */
+/* CALLS */
+/* */
+/* _tx_thread_schedule Thread scheduling loop */
+/* */
+/* CALLED BY */
+/* */
+/* ThreadX components */
+/* */
+/* RELEASE HISTORY */
+/* */
+/* DATE NAME DESCRIPTION */
+/* */
+/* 09-30-2020 William E. Lamie Initial Version 6.1 */
+/* 10-15-2021 William E. Lamie Modified comment(s), added */
+/* execution profile support, */
+/* resulting in version 6.1.9 */
+/* 04-25-2022 Zhen Kong Updated comments, */
+/* resulting in version 6.1.11 */
+/* */
+/**************************************************************************/
.global _tx_thread_system_return
.type _tx_thread_system_return,function
_tx_thread_system_return:
-@
-@ /* Save minimal context on the stack. */
-@
- STMDB sp!, {r4-r11, lr} @ Save minimal context
- LDR r4, =_tx_thread_current_ptr @ Pickup address of current ptr
- LDR r5, [r4] @ Pickup current thread pointer
-
+ /* Save minimal context on the stack. */
+
+ STMDB sp!, {r4-r11, lr} // Save minimal context
+
+ LDR r4, =_tx_thread_current_ptr // Pickup address of current ptr
+ LDR r5, [r4] // Pickup current thread pointer
+
#ifdef TX_ENABLE_VFP_SUPPORT
- LDR r1, [r5, #144] @ Pickup the VFP enabled flag
- CMP r1, #0 @ Is the VFP enabled?
- BEQ _tx_skip_solicited_vfp_save @ No, skip VFP solicited save
- VMRS r1, FPSCR @ Pickup the FPSCR
- STR r1, [sp, #-4]! @ Save FPSCR
- VSTMDB sp!, {D16-D31} @ Save D16-D31
- VSTMDB sp!, {D8-D15} @ Save D8-D15
+ LDR r1, [r5, #144] // Pickup the VFP enabled flag
+ CMP r1, #0 // Is the VFP enabled?
+ BEQ _tx_skip_solicited_vfp_save // No, skip VFP solicited save
+ VMRS r1, FPSCR // Pickup the FPSCR
+ STR r1, [sp, #-4]! // Save FPSCR
+ VSTMDB sp!, {D16-D31} // Save D16-D31
+ VSTMDB sp!, {D8-D15} // Save D8-D15
_tx_skip_solicited_vfp_save:
#endif
- MOV r0, #0 @ Build a solicited stack type
- MRS r1, CPSR @ Pickup the CPSR
- STMDB sp!, {r0-r1} @ Save type and CPSR
-@
-@ /* Lockout interrupts. */
-@
-#ifdef TX_ENABLE_FIQ_SUPPORT
- CPSID if @ Disable IRQ and FIQ interrupts
-#else
- CPSID i @ Disable IRQ interrupts
-#endif
-
-#if (defined(TX_ENABLE_EXECUTION_CHANGE_NOTIFY) || defined(TX_EXECUTION_PROFILE_ENABLE))
-@
-@ /* Call the thread exit function to indicate the thread is no longer executing. */
-@
- BL _tx_execution_thread_exit @ Call the thread exit function
-#endif
- MOV r3, r4 @ Pickup address of current ptr
- MOV r0, r5 @ Pickup current thread pointer
- LDR r2, =_tx_timer_time_slice @ Pickup address of time slice
- LDR r1, [r2] @ Pickup current time slice
-@
-@ /* Save current stack and switch to system stack. */
-@ _tx_thread_current_ptr -> tx_thread_stack_ptr = sp;
-@ sp = _tx_thread_system_stack_ptr;
-@
- STR sp, [r0, #8] @ Save thread stack pointer
-@
-@ /* Determine if the time-slice is active. */
-@ if (_tx_timer_time_slice)
-@ {
-@
- MOV r4, #0 @ Build clear value
- CMP r1, #0 @ Is a time-slice active?
- BEQ __tx_thread_dont_save_ts @ No, don't save the time-slice
-@
-@ /* Save time-slice for the thread and clear the current time-slice. */
-@ _tx_thread_current_ptr -> tx_thread_time_slice = _tx_timer_time_slice;
-@ _tx_timer_time_slice = 0;
-@
- STR r4, [r2] @ Clear time-slice
- STR r1, [r0, #24] @ Save current time-slice
-@
-@ }
-__tx_thread_dont_save_ts:
-@
-@ /* Clear the current thread pointer. */
-@ _tx_thread_current_ptr = TX_NULL;
-@
- STR r4, [r3] @ Clear current thread pointer
- B _tx_thread_schedule @ Jump to scheduler!
-@
-@}
+ MOV r0, #0 // Build a solicited stack type
+ MRS r1, CPSR // Pickup the CPSR
+ STMDB sp!, {r0-r1} // Save type and CPSR
+ /* Lockout interrupts. */
+
+#ifdef TX_ENABLE_FIQ_SUPPORT
+ CPSID if // Disable IRQ and FIQ interrupts
+#else
+ CPSID i // Disable IRQ interrupts
+#endif
+
+#if (defined(TX_ENABLE_EXECUTION_CHANGE_NOTIFY) || defined(TX_EXECUTION_PROFILE_ENABLE))
+
+ /* Call the thread exit function to indicate the thread is no longer executing. */
+
+ BL _tx_execution_thread_exit // Call the thread exit function
+#endif
+ MOV r3, r4 // Pickup address of current ptr
+ MOV r0, r5 // Pickup current thread pointer
+ LDR r2, =_tx_timer_time_slice // Pickup address of time slice
+ LDR r1, [r2] // Pickup current time slice
+
+ /* Save current stack and switch to system stack. */
+
+ STR sp, [r0, #8] // Save thread stack pointer
+
+ /* Determine if the time-slice is active. */
+
+ MOV r4, #0 // Build clear value
+ CMP r1, #0 // Is a time-slice active?
+ BEQ __tx_thread_dont_save_ts // No, don't save the time-slice
+
+ /* Save time-slice for the thread and clear the current time-slice. */
+
+ STR r4, [r2] // Clear time-slice
+ STR r1, [r0, #24] // Save current time-slice
+
+__tx_thread_dont_save_ts:
+
+ /* Clear the current thread pointer. */
+
+ STR r4, [r3] // Clear current thread pointer
+ B _tx_thread_schedule // Jump to scheduler!
diff --git a/ports/cortex_a8/ac6/src/tx_thread_vectored_context_save.S b/ports/cortex_a8/ac6/src/tx_thread_vectored_context_save.S
index a149643b..d846223f 100644
--- a/ports/cortex_a8/ac6/src/tx_thread_vectored_context_save.S
+++ b/ports/cortex_a8/ac6/src/tx_thread_vectored_context_save.S
@@ -1,192 +1,165 @@
-@/**************************************************************************/
-@/* */
-@/* Copyright (c) Microsoft Corporation. All rights reserved. */
-@/* */
-@/* This software is licensed under the Microsoft Software License */
-@/* Terms for Microsoft Azure RTOS. Full text of the license can be */
-@/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
-@/* and in the root directory of this software. */
-@/* */
-@/**************************************************************************/
-@
-@
-@/**************************************************************************/
-@/**************************************************************************/
-@/** */
-@/** ThreadX Component */
-@/** */
-@/** Thread */
-@/** */
-@/**************************************************************************/
-@/**************************************************************************/
-@
-@
-@#define TX_SOURCE_CODE
-@
-@
-@/* Include necessary system files. */
-@
-@#include "tx_api.h"
-@#include "tx_thread.h"
-@
-@
+/**************************************************************************/
+/* */
+/* Copyright (c) Microsoft Corporation. All rights reserved. */
+/* */
+/* This software is licensed under the Microsoft Software License */
+/* Terms for Microsoft Azure RTOS. Full text of the license can be */
+/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
+/* and in the root directory of this software. */
+/* */
+/**************************************************************************/
+
+
+/**************************************************************************/
+/**************************************************************************/
+/** */
+/** ThreadX Component */
+/** */
+/** Thread */
+/** */
+/**************************************************************************/
+/**************************************************************************/
+
.global _tx_thread_system_state
.global _tx_thread_current_ptr
-@
-@
-@
-@/* No 16-bit Thumb mode veneer code is needed for _tx_thread_vectored_context_save
-@ since it will never be called 16-bit mode. */
-@
+ .global _tx_execution_isr_enter
+
+
+
+/* No 16-bit Thumb mode veneer code is needed for _tx_thread_vectored_context_save
+ since it will never be called 16-bit mode. */
+
.arm
.text
.align 2
-@/**************************************************************************/
-@/* */
-@/* FUNCTION RELEASE */
-@/* */
-@/* _tx_thread_vectored_context_save Cortex-A8/AC6 */
-@/* 6.1.9 */
-@/* AUTHOR */
-@/* */
-@/* William E. Lamie, Microsoft Corporation */
-@/* */
-@/* DESCRIPTION */
-@/* */
-@/* This function saves the context of an executing thread in the */
-@/* beginning of interrupt processing. The function also ensures that */
-@/* the system stack is used upon return to the calling ISR. */
-@/* */
-@/* INPUT */
-@/* */
-@/* None */
-@/* */
-@/* OUTPUT */
-@/* */
-@/* None */
-@/* */
-@/* CALLS */
-@/* */
-@/* None */
-@/* */
-@/* CALLED BY */
-@/* */
-@/* ISRs */
-@/* */
-@/* RELEASE HISTORY */
-@/* */
-@/* DATE NAME DESCRIPTION */
-@/* */
-@/* 09-30-2020 William E. Lamie Initial Version 6.1 */
-@/* 10-15-2021 William E. Lamie Modified comment(s), added */
-@/* execution profile support, */
-@/* resulting in version 6.1.9 */
-@/* */
-@/**************************************************************************/
-@VOID _tx_thread_vectored_context_save(VOID)
-@{
+/**************************************************************************/
+/* */
+/* FUNCTION RELEASE */
+/* */
+/* _tx_thread_vectored_context_save ARMv7-A */
+/* 6.1.11 */
+/* AUTHOR */
+/* */
+/* William E. Lamie, Microsoft Corporation */
+/* */
+/* DESCRIPTION */
+/* */
+/* This function saves the context of an executing thread in the */
+/* beginning of interrupt processing. The function also ensures that */
+/* the system stack is used upon return to the calling ISR. */
+/* */
+/* INPUT */
+/* */
+/* None */
+/* */
+/* OUTPUT */
+/* */
+/* None */
+/* */
+/* CALLS */
+/* */
+/* None */
+/* */
+/* CALLED BY */
+/* */
+/* ISRs */
+/* */
+/* RELEASE HISTORY */
+/* */
+/* DATE NAME DESCRIPTION */
+/* */
+/* 09-30-2020 William E. Lamie Initial Version 6.1 */
+/* 10-15-2021 William E. Lamie Modified comment(s), added */
+/* execution profile support, */
+/* resulting in version 6.1.9 */
+/* 04-25-2022 Zhen Kong Updated comments, */
+/* resulting in version 6.1.11 */
+/* */
+/**************************************************************************/
.global _tx_thread_vectored_context_save
.type _tx_thread_vectored_context_save,function
_tx_thread_vectored_context_save:
-@
-@ /* Upon entry to this routine, it is assumed that IRQ interrupts are locked
-@ out, we are in IRQ mode, and all registers are intact. */
-@
-@ /* Check for a nested interrupt condition. */
-@ if (_tx_thread_system_state++)
-@ {
-@
+
+ /* Upon entry to this routine, it is assumed that IRQ interrupts are locked
+ out, we are in IRQ mode, and all registers are intact. */
+
+ /* Check for a nested interrupt condition. */
+
#ifdef TX_ENABLE_FIQ_SUPPORT
- CPSID if @ Disable IRQ and FIQ interrupts
+ CPSID if // Disable IRQ and FIQ interrupts
#endif
- LDR r3, =_tx_thread_system_state @ Pickup address of system state variable
- LDR r2, [r3, #0] @ Pickup system state
- CMP r2, #0 @ Is this the first interrupt?
- BEQ __tx_thread_not_nested_save @ Yes, not a nested context save
-@
-@ /* Nested interrupt condition. */
-@
- ADD r2, r2, #1 @ Increment the interrupt counter
- STR r2, [r3, #0] @ Store it back in the variable
-@
-@ /* Note: Minimal context of interrupted thread is already saved. */
-@
-@ /* Return to the ISR. */
-@
- MOV r10, #0 @ Clear stack limit
+ LDR r3, =_tx_thread_system_state // Pickup address of system state variable
+ LDR r2, [r3, #0] // Pickup system state
+ CMP r2, #0 // Is this the first interrupt?
+ BEQ __tx_thread_not_nested_save // Yes, not a nested context save
+
+ /* Nested interrupt condition. */
+
+ ADD r2, r2, #1 // Increment the interrupt counter
+ STR r2, [r3, #0] // Store it back in the variable
+
+ /* Note: Minimal context of interrupted thread is already saved. */
+
+ /* Return to the ISR. */
+
+ MOV r10, #0 // Clear stack limit
#if (defined(TX_ENABLE_EXECUTION_CHANGE_NOTIFY) || defined(TX_EXECUTION_PROFILE_ENABLE))
-@
-@ /* Call the ISR enter function to indicate an ISR is executing. */
-@
- PUSH {lr} @ Save ISR lr
- BL _tx_execution_isr_enter @ Call the ISR enter function
- POP {lr} @ Recover ISR lr
+
+ /* Call the ISR enter function to indicate an ISR is executing. */
+
+ PUSH {lr} // Save ISR lr
+ BL _tx_execution_isr_enter // Call the ISR enter function
+ POP {lr} // Recover ISR lr
#endif
- MOV pc, lr @ Return to caller
-@
+ MOV pc, lr // Return to caller
+
__tx_thread_not_nested_save:
-@ }
-@
-@ /* Otherwise, not nested, check to see if a thread was running. */
-@ else if (_tx_thread_current_ptr)
-@ {
-@
- ADD r2, r2, #1 @ Increment the interrupt counter
- STR r2, [r3, #0] @ Store it back in the variable
- LDR r1, =_tx_thread_current_ptr @ Pickup address of current thread ptr
- LDR r0, [r1, #0] @ Pickup current thread pointer
- CMP r0, #0 @ Is it NULL?
- BEQ __tx_thread_idle_system_save @ If so, interrupt occurred in
- @ scheduling loop - nothing needs saving!
-@
-@ /* Note: Minimal context of interrupted thread is already saved. */
-@
-@ /* Save the current stack pointer in the thread's control block. */
-@ _tx_thread_current_ptr -> tx_thread_stack_ptr = sp;
-@
-@ /* Switch to the system stack. */
-@ sp = _tx_thread_system_stack_ptr;
-@
- MOV r10, #0 @ Clear stack limit
+
+ /* Otherwise, not nested, check to see if a thread was running. */
+
+ ADD r2, r2, #1 // Increment the interrupt counter
+ STR r2, [r3, #0] // Store it back in the variable
+ LDR r1, =_tx_thread_current_ptr // Pickup address of current thread ptr
+ LDR r0, [r1, #0] // Pickup current thread pointer
+ CMP r0, #0 // Is it NULL?
+ BEQ __tx_thread_idle_system_save // If so, interrupt occurred in
+ // scheduling loop - nothing needs saving!
+ /* Note: Minimal context of interrupted thread is already saved. */
+
+ /* Save the current stack pointer in the thread's control block. */
+
+ MOV r10, #0 // Clear stack limit
#if (defined(TX_ENABLE_EXECUTION_CHANGE_NOTIFY) || defined(TX_EXECUTION_PROFILE_ENABLE))
-@
-@ /* Call the ISR enter function to indicate an ISR is executing. */
-@
- PUSH {lr} @ Save ISR lr
- BL _tx_execution_isr_enter @ Call the ISR enter function
- POP {lr} @ Recover ISR lr
+
+ /* Call the ISR enter function to indicate an ISR is executing. */
+
+ PUSH {lr} // Save ISR lr
+ BL _tx_execution_isr_enter // Call the ISR enter function
+ POP {lr} // Recover ISR lr
#endif
- MOV pc, lr @ Return to caller
-@
-@ }
-@ else
-@ {
-@
+ MOV pc, lr // Return to caller
+
__tx_thread_idle_system_save:
-@
-@ /* Interrupt occurred in the scheduling loop. */
-@
-@ /* Not much to do here, just adjust the stack pointer, and return to IRQ
-@ processing. */
-@
- MOV r10, #0 @ Clear stack limit
+
+ /* Interrupt occurred in the scheduling loop. */
+
+ /* Not much to do here, just adjust the stack pointer, and return to IRQ
+ processing. */
+
+ MOV r10, #0 // Clear stack limit
#if (defined(TX_ENABLE_EXECUTION_CHANGE_NOTIFY) || defined(TX_EXECUTION_PROFILE_ENABLE))
-@
-@ /* Call the ISR enter function to indicate an ISR is executing. */
-@
- PUSH {lr} @ Save ISR lr
- BL _tx_execution_isr_enter @ Call the ISR enter function
- POP {lr} @ Recover ISR lr
+
+ /* Call the ISR enter function to indicate an ISR is executing. */
+
+ PUSH {lr} // Save ISR lr
+ BL _tx_execution_isr_enter // Call the ISR enter function
+ POP {lr} // Recover ISR lr
#endif
- ADD sp, sp, #32 @ Recover saved registers
- MOV pc, lr @ Return to caller
-@
-@ }
-@}
-
+ ADD sp, sp, #32 // Recover saved registers
+ MOV pc, lr // Return to caller
diff --git a/ports/cortex_a8/ac6/src/tx_timer_interrupt.S b/ports/cortex_a8/ac6/src/tx_timer_interrupt.S
index 9529953d..7337ed0c 100644
--- a/ports/cortex_a8/ac6/src/tx_timer_interrupt.S
+++ b/ports/cortex_a8/ac6/src/tx_timer_interrupt.S
@@ -1,40 +1,30 @@
-@/**************************************************************************/
-@/* */
-@/* Copyright (c) Microsoft Corporation. All rights reserved. */
-@/* */
-@/* This software is licensed under the Microsoft Software License */
-@/* Terms for Microsoft Azure RTOS. Full text of the license can be */
-@/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
-@/* and in the root directory of this software. */
-@/* */
-@/**************************************************************************/
-@
-@
-@/**************************************************************************/
-@/**************************************************************************/
-@/** */
-@/** ThreadX Component */
-@/** */
-@/** Timer */
-@/** */
-@/**************************************************************************/
-@/**************************************************************************/
-@
-@#define TX_SOURCE_CODE
-@
-@
-@/* Include necessary system files. */
-@
-@#include "tx_api.h"
-@#include "tx_timer.h"
-@#include "tx_thread.h"
-@
-@
+/**************************************************************************/
+/* */
+/* Copyright (c) Microsoft Corporation. All rights reserved. */
+/* */
+/* This software is licensed under the Microsoft Software License */
+/* Terms for Microsoft Azure RTOS. Full text of the license can be */
+/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
+/* and in the root directory of this software. */
+/* */
+/**************************************************************************/
+
+
+/**************************************************************************/
+/**************************************************************************/
+/** */
+/** ThreadX Component */
+/** */
+/** Timer */
+/** */
+/**************************************************************************/
+/**************************************************************************/
+
.arm
-@
-@/* Define Assembly language external references... */
-@
+
+/* Define Assembly language external references... */
+
.global _tx_timer_time_slice
.global _tx_timer_system_clock
.global _tx_timer_current_ptr
@@ -43,237 +33,199 @@
.global _tx_timer_expired_time_slice
.global _tx_timer_expired
.global _tx_thread_time_slice
-@
-@
-@
-@/* Define the 16-bit Thumb mode veneer for _tx_timer_interrupt for
-@ applications calling this function from to 16-bit Thumb mode. */
-@
+
+
+
+/* Define the 16-bit Thumb mode veneer for _tx_timer_interrupt for
+ applications calling this function from to 16-bit Thumb mode. */
+
.text
.align 2
.thumb
.global $_tx_timer_interrupt
.type $_tx_timer_interrupt,function
$_tx_timer_interrupt:
- BX pc @ Switch to 32-bit mode
- NOP @
+ BX pc // Switch to 32-bit mode
+ NOP //
.arm
- STMFD sp!, {lr} @ Save return address
- BL _tx_timer_interrupt @ Call _tx_timer_interrupt function
- LDMFD sp!, {lr} @ Recover saved return address
- BX lr @ Return to 16-bit caller
-@
-@
+ STMFD sp!, {lr} // Save return address
+ BL _tx_timer_interrupt // Call _tx_timer_interrupt function
+ LDMFD sp!, {lr} // Recover saved return address
+ BX lr // Return to 16-bit caller
+
+
.text
.align 2
-@/**************************************************************************/
-@/* */
-@/* FUNCTION RELEASE */
-@/* */
-@/* _tx_timer_interrupt Cortex-A8/AC6 */
-@/* 6.1 */
-@/* AUTHOR */
-@/* */
-@/* William E. Lamie, Microsoft Corporation */
-@/* */
-@/* DESCRIPTION */
-@/* */
-@/* This function processes the hardware timer interrupt. This */
-@/* processing includes incrementing the system clock and checking for */
-@/* time slice and/or timer expiration. If either is found, the */
-@/* interrupt context save/restore functions are called along with the */
-@/* expiration functions. */
-@/* */
-@/* INPUT */
-@/* */
-@/* None */
-@/* */
-@/* OUTPUT */
-@/* */
-@/* None */
-@/* */
-@/* CALLS */
-@/* */
-@/* _tx_thread_time_slice Time slice interrupted thread */
-@/* _tx_timer_expiration_process Timer expiration processing */
-@/* */
-@/* CALLED BY */
-@/* */
-@/* interrupt vector */
-@/* */
-@/* RELEASE HISTORY */
-@/* */
-@/* DATE NAME DESCRIPTION */
-@/* */
-@/* 09-30-2020 William E. Lamie Initial Version 6.1 */
-@/* */
-@/**************************************************************************/
-@VOID _tx_timer_interrupt(VOID)
-@{
+/**************************************************************************/
+/* */
+/* FUNCTION RELEASE */
+/* */
+/* _tx_timer_interrupt ARMv7-A */
+/* 6.1.11 */
+/* AUTHOR */
+/* */
+/* William E. Lamie, Microsoft Corporation */
+/* */
+/* DESCRIPTION */
+/* */
+/* This function processes the hardware timer interrupt. This */
+/* processing includes incrementing the system clock and checking for */
+/* time slice and/or timer expiration. If either is found, the */
+/* interrupt context save/restore functions are called along with the */
+/* expiration functions. */
+/* */
+/* INPUT */
+/* */
+/* None */
+/* */
+/* OUTPUT */
+/* */
+/* None */
+/* */
+/* CALLS */
+/* */
+/* _tx_thread_time_slice Time slice interrupted thread */
+/* _tx_timer_expiration_process Timer expiration processing */
+/* */
+/* CALLED BY */
+/* */
+/* interrupt vector */
+/* */
+/* RELEASE HISTORY */
+/* */
+/* DATE NAME DESCRIPTION */
+/* */
+/* 09-30-2020 William E. Lamie Initial Version 6.1 */
+/* 04-25-2022 Zhen Kong Updated comments, */
+/* resulting in version 6.1.11 */
+/* */
+/**************************************************************************/
.global _tx_timer_interrupt
.type _tx_timer_interrupt,function
_tx_timer_interrupt:
-@
-@ /* Upon entry to this routine, it is assumed that context save has already
-@ been called, and therefore the compiler scratch registers are available
-@ for use. */
-@
-@ /* Increment the system clock. */
-@ _tx_timer_system_clock++;
-@
- LDR r1, =_tx_timer_system_clock @ Pickup address of system clock
- LDR r0, [r1] @ Pickup system clock
- ADD r0, r0, #1 @ Increment system clock
- STR r0, [r1] @ Store new system clock
-@
-@ /* Test for time-slice expiration. */
-@ if (_tx_timer_time_slice)
-@ {
-@
- LDR r3, =_tx_timer_time_slice @ Pickup address of time-slice
- LDR r2, [r3] @ Pickup time-slice
- CMP r2, #0 @ Is it non-active?
- BEQ __tx_timer_no_time_slice @ Yes, skip time-slice processing
-@
-@ /* Decrement the time_slice. */
-@ _tx_timer_time_slice--;
-@
- SUB r2, r2, #1 @ Decrement the time-slice
- STR r2, [r3] @ Store new time-slice value
-@
-@ /* Check for expiration. */
-@ if (__tx_timer_time_slice == 0)
-@
- CMP r2, #0 @ Has it expired?
- BNE __tx_timer_no_time_slice @ No, skip expiration processing
-@
-@ /* Set the time-slice expired flag. */
-@ _tx_timer_expired_time_slice = TX_TRUE;
-@
- LDR r3, =_tx_timer_expired_time_slice @ Pickup address of expired flag
- MOV r0, #1 @ Build expired value
- STR r0, [r3] @ Set time-slice expiration flag
-@
-@ }
-@
-__tx_timer_no_time_slice:
-@
-@ /* Test for timer expiration. */
-@ if (*_tx_timer_current_ptr)
-@ {
-@
- LDR r1, =_tx_timer_current_ptr @ Pickup current timer pointer address
- LDR r0, [r1] @ Pickup current timer
- LDR r2, [r0] @ Pickup timer list entry
- CMP r2, #0 @ Is there anything in the list?
- BEQ __tx_timer_no_timer @ No, just increment the timer
-@
-@ /* Set expiration flag. */
-@ _tx_timer_expired = TX_TRUE;
-@
- LDR r3, =_tx_timer_expired @ Pickup expiration flag address
- MOV r2, #1 @ Build expired value
- STR r2, [r3] @ Set expired flag
- B __tx_timer_done @ Finished timer processing
-@
-@ }
-@ else
-@ {
-__tx_timer_no_timer:
-@
-@ /* No timer expired, increment the timer pointer. */
-@ _tx_timer_current_ptr++;
-@
- ADD r0, r0, #4 @ Move to next timer
-@
-@ /* Check for wraparound. */
-@ if (_tx_timer_current_ptr == _tx_timer_list_end)
-@
- LDR r3, =_tx_timer_list_end @ Pickup address of timer list end
- LDR r2, [r3] @ Pickup list end
- CMP r0, r2 @ Are we at list end?
- BNE __tx_timer_skip_wrap @ No, skip wraparound logic
-@
-@ /* Wrap to beginning of list. */
-@ _tx_timer_current_ptr = _tx_timer_list_start;
-@
- LDR r3, =_tx_timer_list_start @ Pickup address of timer list start
- LDR r0, [r3] @ Set current pointer to list start
-@
-__tx_timer_skip_wrap:
-@
- STR r0, [r1] @ Store new current timer pointer
-@ }
-@
-__tx_timer_done:
-@
-@
-@ /* See if anything has expired. */
-@ if ((_tx_timer_expired_time_slice) || (_tx_timer_expired))
-@ {
-@
- LDR r3, =_tx_timer_expired_time_slice @ Pickup address of expired flag
- LDR r2, [r3] @ Pickup time-slice expired flag
- CMP r2, #0 @ Did a time-slice expire?
- BNE __tx_something_expired @ If non-zero, time-slice expired
- LDR r1, =_tx_timer_expired @ Pickup address of other expired flag
- LDR r0, [r1] @ Pickup timer expired flag
- CMP r0, #0 @ Did a timer expire?
- BEQ __tx_timer_nothing_expired @ No, nothing expired
-@
-__tx_something_expired:
-@
-@
- STMDB sp!, {r0, lr} @ Save the lr register on the stack
- @ and save r0 just to keep 8-byte alignment
-@
-@ /* Did a timer expire? */
-@ if (_tx_timer_expired)
-@ {
-@
- LDR r1, =_tx_timer_expired @ Pickup address of expired flag
- LDR r0, [r1] @ Pickup timer expired flag
- CMP r0, #0 @ Check for timer expiration
- BEQ __tx_timer_dont_activate @ If not set, skip timer activation
-@
-@ /* Process timer expiration. */
-@ _tx_timer_expiration_process();
-@
- BL _tx_timer_expiration_process @ Call the timer expiration handling routine
-@
-@ }
-__tx_timer_dont_activate:
-@
-@ /* Did time slice expire? */
-@ if (_tx_timer_expired_time_slice)
-@ {
-@
- LDR r3, =_tx_timer_expired_time_slice @ Pickup address of time-slice expired
- LDR r2, [r3] @ Pickup the actual flag
- CMP r2, #0 @ See if the flag is set
- BEQ __tx_timer_not_ts_expiration @ No, skip time-slice processing
-@
-@ /* Time slice interrupted thread. */
-@ _tx_thread_time_slice();
-@
- BL _tx_thread_time_slice @ Call time-slice processing
-@
-@ }
-@
-__tx_timer_not_ts_expiration:
-@
- LDMIA sp!, {r0, lr} @ Recover lr register (r0 is just there for
- @ the 8-byte stack alignment
-@
-@ }
-@
-__tx_timer_nothing_expired:
-@
-#ifdef __THUMB_INTERWORK
- BX lr @ Return to caller
-#else
- MOV pc, lr @ Return to caller
-#endif
-@
-@}
+ /* Upon entry to this routine, it is assumed that context save has already
+ been called, and therefore the compiler scratch registers are available
+ for use. */
+
+ /* Increment the system clock. */
+
+ LDR r1, =_tx_timer_system_clock // Pickup address of system clock
+ LDR r0, [r1] // Pickup system clock
+ ADD r0, r0, #1 // Increment system clock
+ STR r0, [r1] // Store new system clock
+
+ /* Test for time-slice expiration. */
+
+ LDR r3, =_tx_timer_time_slice // Pickup address of time-slice
+ LDR r2, [r3] // Pickup time-slice
+ CMP r2, #0 // Is it non-active?
+ BEQ __tx_timer_no_time_slice // Yes, skip time-slice processing
+
+ /* Decrement the time_slice. */
+
+ SUB r2, r2, #1 // Decrement the time-slice
+ STR r2, [r3] // Store new time-slice value
+
+ /* Check for expiration. */
+
+ CMP r2, #0 // Has it expired?
+ BNE __tx_timer_no_time_slice // No, skip expiration processing
+
+ /* Set the time-slice expired flag. */
+
+ LDR r3, =_tx_timer_expired_time_slice // Pickup address of expired flag
+ MOV r0, #1 // Build expired value
+ STR r0, [r3] // Set time-slice expiration flag
+
+__tx_timer_no_time_slice:
+
+ /* Test for timer expiration. */
+
+ LDR r1, =_tx_timer_current_ptr // Pickup current timer pointer address
+ LDR r0, [r1] // Pickup current timer
+ LDR r2, [r0] // Pickup timer list entry
+ CMP r2, #0 // Is there anything in the list?
+ BEQ __tx_timer_no_timer // No, just increment the timer
+
+ /* Set expiration flag. */
+
+ LDR r3, =_tx_timer_expired // Pickup expiration flag address
+ MOV r2, #1 // Build expired value
+ STR r2, [r3] // Set expired flag
+ B __tx_timer_done // Finished timer processing
+
+__tx_timer_no_timer:
+
+ /* No timer expired, increment the timer pointer. */
+ ADD r0, r0, #4 // Move to next timer
+
+ /* Check for wraparound. */
+
+ LDR r3, =_tx_timer_list_end // Pickup address of timer list end
+ LDR r2, [r3] // Pickup list end
+ CMP r0, r2 // Are we at list end?
+ BNE __tx_timer_skip_wrap // No, skip wraparound logic
+
+ /* Wrap to beginning of list. */
+
+ LDR r3, =_tx_timer_list_start // Pickup address of timer list start
+ LDR r0, [r3] // Set current pointer to list start
+
+__tx_timer_skip_wrap:
+
+ STR r0, [r1] // Store new current timer pointer
+
+__tx_timer_done:
+
+ /* See if anything has expired. */
+
+ LDR r3, =_tx_timer_expired_time_slice // Pickup address of expired flag
+ LDR r2, [r3] // Pickup time-slice expired flag
+ CMP r2, #0 // Did a time-slice expire?
+ BNE __tx_something_expired // If non-zero, time-slice expired
+ LDR r1, =_tx_timer_expired // Pickup address of other expired flag
+ LDR r0, [r1] // Pickup timer expired flag
+ CMP r0, #0 // Did a timer expire?
+ BEQ __tx_timer_nothing_expired // No, nothing expired
+
+__tx_something_expired:
+
+ STMDB sp!, {r0, lr} // Save the lr register on the stack
+ // and save r0 just to keep 8-byte alignment
+
+ /* Did a timer expire? */
+
+ LDR r1, =_tx_timer_expired // Pickup address of expired flag
+ LDR r0, [r1] // Pickup timer expired flag
+ CMP r0, #0 // Check for timer expiration
+ BEQ __tx_timer_dont_activate // If not set, skip timer activation
+
+ /* Process timer expiration. */
+ BL _tx_timer_expiration_process // Call the timer expiration handling routine
+
+__tx_timer_dont_activate:
+
+ /* Did time slice expire? */
+
+ LDR r3, =_tx_timer_expired_time_slice // Pickup address of time-slice expired
+ LDR r2, [r3] // Pickup the actual flag
+ CMP r2, #0 // See if the flag is set
+ BEQ __tx_timer_not_ts_expiration // No, skip time-slice processing
+
+ /* Time slice interrupted thread. */
+
+ BL _tx_thread_time_slice // Call time-slice processing
+
+__tx_timer_not_ts_expiration:
+
+ LDMIA sp!, {r0, lr} // Recover lr register (r0 is just there for
+ // the 8-byte stack alignment
+
+__tx_timer_nothing_expired:
+
+#ifdef __THUMB_INTERWORK
+ BX lr // Return to caller
+#else
+ MOV pc, lr // Return to caller
+#endif
diff --git a/ports/cortex_a8/gnu/example_build/build_threadx_sample.bat b/ports/cortex_a8/gnu/example_build/build_threadx_sample.bat
index aa8c8006..725aeda9 100644
--- a/ports/cortex_a8/gnu/example_build/build_threadx_sample.bat
+++ b/ports/cortex_a8/gnu/example_build/build_threadx_sample.bat
@@ -2,5 +2,5 @@ arm-none-eabi-gcc -c -g -mcpu=cortex-a8 reset.S
arm-none-eabi-gcc -c -g -mcpu=cortex-a8 crt0.S
arm-none-eabi-gcc -c -g -mcpu=cortex-a8 tx_initialize_low_level.S
arm-none-eabi-gcc -c -g -mcpu=cortex-a8 -I../../../../common/inc -I../inc sample_threadx.c
-arm-none-eabi-ld -A cortex-a8 -T sample_threadx.ld reset.o crt0.o tx_initialize_low_level.o sample_threadx.o tx.a libc.a libgcc.a -o sample_threadx.out -M > sample_threadx.map
+arm-none-eabi-gcc -g -mcpu=cortex-a8 -T sample_threadx.ld --specs=nosys.specs -o sample_threadx.out -Wl,-Map=sample_threadx.map tx_initialize_low_level.o sample_threadx.o tx.a
diff --git a/ports/cortex_a8/gnu/example_build/crt0.S b/ports/cortex_a8/gnu/example_build/crt0.S
index aa0f3239..56b6c958 100644
--- a/ports/cortex_a8/gnu/example_build/crt0.S
+++ b/ports/cortex_a8/gnu/example_build/crt0.S
@@ -26,13 +26,13 @@ _mainCRTStartup:
mov a2, #0 /* Second arg: fill value */
mov fp, a2 /* Null frame pointer */
mov r7, a2 /* Null frame pointer for Thumb */
-
- ldr a1, .LC1 /* First arg: start of memory block */
- ldr a3, .LC2
- sub a3, a3, a1 /* Third arg: length of block */
-
-
+ ldr a1, .LC1 /* First arg: start of memory block */
+ ldr a3, .LC2
+ sub a3, a3, a1 /* Third arg: length of block */
+
+
+
bl memset
mov r0, #0 /* no arguments */
mov r1, #0 /* no argv either */
@@ -48,15 +48,15 @@ _mainCRTStartup:
/* bl init */
mov r0, r4
mov r1, r5
-#endif
+#endif
bl main
bl exit /* Should not return. */
-
- /* For Thumb, constants must be after the code since only
+
+ /* For Thumb, constants must be after the code since only
positive offsets are supported for PC relative addresses. */
-
+
.align 0
.LC0:
.LC1:
diff --git a/ports/cortex_a8/gnu/example_build/reset.S b/ports/cortex_a8/gnu/example_build/reset.S
index 856e31eb..597e9d9a 100644
--- a/ports/cortex_a8/gnu/example_build/reset.S
+++ b/ports/cortex_a8/gnu/example_build/reset.S
@@ -1,35 +1,24 @@
-@/**************************************************************************/
-@/* */
-@/* Copyright (c) Microsoft Corporation. All rights reserved. */
-@/* */
-@/* This software is licensed under the Microsoft Software License */
-@/* Terms for Microsoft Azure RTOS. Full text of the license can be */
-@/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
-@/* and in the root directory of this software. */
-@/* */
-@/**************************************************************************/
-@
-@
-@/**************************************************************************/
-@/**************************************************************************/
-@/** */
-@/** ThreadX Component */
-@/** */
-@/** Initialize */
-@/** */
-@/**************************************************************************/
-@/**************************************************************************/
-@
-@
-@#define TX_SOURCE_CODE
-@
-@
-@/* Include necessary system files. */
-@
-@#include "tx_api.h"
-@#include "tx_initialize.h"
-@#include "tx_thread.h"
-@#include "tx_timer.h"
+/**************************************************************************/
+/* */
+/* Copyright (c) Microsoft Corporation. All rights reserved. */
+/* */
+/* This software is licensed under the Microsoft Software License */
+/* Terms for Microsoft Azure RTOS. Full text of the license can be */
+/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
+/* and in the root directory of this software. */
+/* */
+/**************************************************************************/
+
+
+/**************************************************************************/
+/**************************************************************************/
+/** */
+/** ThreadX Component */
+/** */
+/** Initialize */
+/** */
+/**************************************************************************/
+/**************************************************************************/
.arm
@@ -41,36 +30,35 @@
.global __tx_reserved_handler
.global __tx_irq_handler
.global __tx_fiq_handler
-@
-@
-@/* Define the vector area. This should be located or copied to 0. */
-@
+
+/* Define the vector area. This should be located or copied to 0. */
+
.text
.global __vectors
__vectors:
- LDR pc, STARTUP @ Reset goes to startup function
- LDR pc, UNDEFINED @ Undefined handler
- LDR pc, SWI @ Software interrupt handler
- LDR pc, PREFETCH @ Prefetch exception handler
- LDR pc, ABORT @ Abort exception handler
- LDR pc, RESERVED @ Reserved exception handler
- LDR pc, IRQ @ IRQ interrupt handler
- LDR pc, FIQ @ FIQ interrupt handler
+ LDR pc, STARTUP // Reset goes to startup function
+ LDR pc, UNDEFINED // Undefined handler
+ LDR pc, SWI // Software interrupt handler
+ LDR pc, PREFETCH // Prefetch exception handler
+ LDR pc, ABORT // Abort exception handler
+ LDR pc, RESERVED // Reserved exception handler
+ LDR pc, IRQ // IRQ interrupt handler
+ LDR pc, FIQ // FIQ interrupt handler
STARTUP:
- .word _start @ Reset goes to C startup function
+ .word _start // Reset goes to C startup function
UNDEFINED:
- .word __tx_undefined @ Undefined handler
+ .word __tx_undefined // Undefined handler
SWI:
- .word __tx_swi_interrupt @ Software interrupt handler
+ .word __tx_swi_interrupt // Software interrupt handler
PREFETCH:
- .word __tx_prefetch_handler @ Prefetch exception handler
-ABORT:
- .word __tx_abort_handler @ Abort exception handler
-RESERVED:
- .word __tx_reserved_handler @ Reserved exception handler
-IRQ:
- .word __tx_irq_handler @ IRQ interrupt handler
+ .word __tx_prefetch_handler // Prefetch exception handler
+ABORT:
+ .word __tx_abort_handler // Abort exception handler
+RESERVED:
+ .word __tx_reserved_handler // Reserved exception handler
+IRQ:
+ .word __tx_irq_handler // IRQ interrupt handler
FIQ:
- .word __tx_fiq_handler @ FIQ interrupt handler
+ .word __tx_fiq_handler // FIQ interrupt handler
diff --git a/ports/cortex_a8/gnu/example_build/sample_threadx.c b/ports/cortex_a8/gnu/example_build/sample_threadx.c
index 418ec634..8c61de06 100644
--- a/ports/cortex_a8/gnu/example_build/sample_threadx.c
+++ b/ports/cortex_a8/gnu/example_build/sample_threadx.c
@@ -1,5 +1,5 @@
/* This is a small demo of the high-performance ThreadX kernel. It includes examples of eight
- threads of different priorities, using a message queue, semaphore, mutex, event flags group,
+ threads of different priorities, using a message queue, semaphore, mutex, event flags group,
byte pool, and block pool. */
#include "tx_api.h"
@@ -80,42 +80,42 @@ CHAR *pointer = TX_NULL;
tx_byte_allocate(&byte_pool_0, (VOID **) &pointer, DEMO_STACK_SIZE, TX_NO_WAIT);
/* Create the main thread. */
- tx_thread_create(&thread_0, "thread 0", thread_0_entry, 0,
- pointer, DEMO_STACK_SIZE,
+ tx_thread_create(&thread_0, "thread 0", thread_0_entry, 0,
+ pointer, DEMO_STACK_SIZE,
1, 1, TX_NO_TIME_SLICE, TX_AUTO_START);
/* Allocate the stack for thread 1. */
tx_byte_allocate(&byte_pool_0, (VOID **) &pointer, DEMO_STACK_SIZE, TX_NO_WAIT);
- /* Create threads 1 and 2. These threads pass information through a ThreadX
+ /* Create threads 1 and 2. These threads pass information through a ThreadX
message queue. It is also interesting to note that these threads have a time
slice. */
- tx_thread_create(&thread_1, "thread 1", thread_1_entry, 1,
- pointer, DEMO_STACK_SIZE,
+ tx_thread_create(&thread_1, "thread 1", thread_1_entry, 1,
+ pointer, DEMO_STACK_SIZE,
16, 16, 4, TX_AUTO_START);
/* Allocate the stack for thread 2. */
tx_byte_allocate(&byte_pool_0, (VOID **) &pointer, DEMO_STACK_SIZE, TX_NO_WAIT);
- tx_thread_create(&thread_2, "thread 2", thread_2_entry, 2,
- pointer, DEMO_STACK_SIZE,
+ tx_thread_create(&thread_2, "thread 2", thread_2_entry, 2,
+ pointer, DEMO_STACK_SIZE,
16, 16, 4, TX_AUTO_START);
/* Allocate the stack for thread 3. */
tx_byte_allocate(&byte_pool_0, (VOID **) &pointer, DEMO_STACK_SIZE, TX_NO_WAIT);
- /* Create threads 3 and 4. These threads compete for a ThreadX counting semaphore.
+ /* Create threads 3 and 4. These threads compete for a ThreadX counting semaphore.
An interesting thing here is that both threads share the same instruction area. */
- tx_thread_create(&thread_3, "thread 3", thread_3_and_4_entry, 3,
- pointer, DEMO_STACK_SIZE,
+ tx_thread_create(&thread_3, "thread 3", thread_3_and_4_entry, 3,
+ pointer, DEMO_STACK_SIZE,
8, 8, TX_NO_TIME_SLICE, TX_AUTO_START);
/* Allocate the stack for thread 4. */
tx_byte_allocate(&byte_pool_0, (VOID **) &pointer, DEMO_STACK_SIZE, TX_NO_WAIT);
- tx_thread_create(&thread_4, "thread 4", thread_3_and_4_entry, 4,
- pointer, DEMO_STACK_SIZE,
+ tx_thread_create(&thread_4, "thread 4", thread_3_and_4_entry, 4,
+ pointer, DEMO_STACK_SIZE,
8, 8, TX_NO_TIME_SLICE, TX_AUTO_START);
/* Allocate the stack for thread 5. */
@@ -123,23 +123,23 @@ CHAR *pointer = TX_NULL;
/* Create thread 5. This thread simply pends on an event flag which will be set
by thread_0. */
- tx_thread_create(&thread_5, "thread 5", thread_5_entry, 5,
- pointer, DEMO_STACK_SIZE,
+ tx_thread_create(&thread_5, "thread 5", thread_5_entry, 5,
+ pointer, DEMO_STACK_SIZE,
4, 4, TX_NO_TIME_SLICE, TX_AUTO_START);
/* Allocate the stack for thread 6. */
tx_byte_allocate(&byte_pool_0, (VOID **) &pointer, DEMO_STACK_SIZE, TX_NO_WAIT);
/* Create threads 6 and 7. These threads compete for a ThreadX mutex. */
- tx_thread_create(&thread_6, "thread 6", thread_6_and_7_entry, 6,
- pointer, DEMO_STACK_SIZE,
+ tx_thread_create(&thread_6, "thread 6", thread_6_and_7_entry, 6,
+ pointer, DEMO_STACK_SIZE,
8, 8, TX_NO_TIME_SLICE, TX_AUTO_START);
/* Allocate the stack for thread 7. */
tx_byte_allocate(&byte_pool_0, (VOID **) &pointer, DEMO_STACK_SIZE, TX_NO_WAIT);
- tx_thread_create(&thread_7, "thread 7", thread_6_and_7_entry, 7,
- pointer, DEMO_STACK_SIZE,
+ tx_thread_create(&thread_7, "thread 7", thread_6_and_7_entry, 7,
+ pointer, DEMO_STACK_SIZE,
8, 8, TX_NO_TIME_SLICE, TX_AUTO_START);
/* Allocate the message queue. */
@@ -242,11 +242,11 @@ UINT status;
/* Retrieve a message from the queue. */
status = tx_queue_receive(&queue_0, &received_message, TX_WAIT_FOREVER);
- /* Check completion status and make sure the message is what we
+ /* Check completion status and make sure the message is what we
expected. */
if ((status != TX_SUCCESS) || (received_message != thread_2_messages_received))
break;
-
+
/* Otherwise, all is okay. Increment the received message count. */
thread_2_messages_received++;
}
@@ -305,7 +305,7 @@ ULONG actual_flags;
thread_5_counter++;
/* Wait for event flag 0. */
- status = tx_event_flags_get(&event_flags_0, 0x1, TX_OR_CLEAR,
+ status = tx_event_flags_get(&event_flags_0, 0x1, TX_OR_CLEAR,
&actual_flags, TX_WAIT_FOREVER);
/* Check status. */
@@ -358,7 +358,7 @@ UINT status;
if (status != TX_SUCCESS)
break;
- /* Release the mutex again. This will actually
+ /* Release the mutex again. This will actually
release ownership since it was obtained twice. */
status = tx_mutex_put(&mutex_0);
diff --git a/ports/cortex_a8/gnu/example_build/tx_initialize_low_level.S b/ports/cortex_a8/gnu/example_build/tx_initialize_low_level.S
index 82bf7825..7de5d3ce 100644
--- a/ports/cortex_a8/gnu/example_build/tx_initialize_low_level.S
+++ b/ports/cortex_a8/gnu/example_build/tx_initialize_low_level.S
@@ -1,47 +1,35 @@
-@/**************************************************************************/
-@/* */
-@/* Copyright (c) Microsoft Corporation. All rights reserved. */
-@/* */
-@/* This software is licensed under the Microsoft Software License */
-@/* Terms for Microsoft Azure RTOS. Full text of the license can be */
-@/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
-@/* and in the root directory of this software. */
-@/* */
-@/**************************************************************************/
-@
-@
-@/**************************************************************************/
-@/**************************************************************************/
-@/** */
-@/** ThreadX Component */
-@/** */
-@/** Initialize */
-@/** */
-@/**************************************************************************/
-@/**************************************************************************/
-@
-@
-@#define TX_SOURCE_CODE
-@
-@
-@/* Include necessary system files. */
-@
-@#include "tx_api.h"
-@#include "tx_initialize.h"
-@#include "tx_thread.h"
-@#include "tx_timer.h"
+/**************************************************************************/
+/* */
+/* Copyright (c) Microsoft Corporation. All rights reserved. */
+/* */
+/* This software is licensed under the Microsoft Software License */
+/* Terms for Microsoft Azure RTOS. Full text of the license can be */
+/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
+/* and in the root directory of this software. */
+/* */
+/**************************************************************************/
+
+
+/**************************************************************************/
+/**************************************************************************/
+/** */
+/** ThreadX Component */
+/** */
+/** Initialize */
+/** */
+/**************************************************************************/
+/**************************************************************************/
.arm
-SVC_MODE = 0xD3 @ Disable IRQ/FIQ SVC mode
-IRQ_MODE = 0xD2 @ Disable IRQ/FIQ IRQ mode
-FIQ_MODE = 0xD1 @ Disable IRQ/FIQ FIQ mode
-SYS_MODE = 0xDF @ Disable IRQ/FIQ SYS mode
-FIQ_STACK_SIZE = 512 @ FIQ stack size
-IRQ_STACK_SIZE = 1024 @ IRQ stack size
-SYS_STACK_SIZE = 1024 @ System stack size
-@
-@
+SVC_MODE = 0xD3 // Disable IRQ/FIQ SVC mode
+IRQ_MODE = 0xD2 // Disable IRQ/FIQ IRQ mode
+FIQ_MODE = 0xD1 // Disable IRQ/FIQ FIQ mode
+SYS_MODE = 0xDF // Disable IRQ/FIQ SYS mode
+FIQ_STACK_SIZE = 512 // FIQ stack size
+IRQ_STACK_SIZE = 1024 // IRQ stack size
+SYS_STACK_SIZE = 1024 // System stack size
+
.global _tx_thread_system_stack_ptr
.global _tx_initialize_unused_memory
.global _tx_thread_context_save
@@ -51,297 +39,267 @@ SYS_STACK_SIZE = 1024 @ System stack size
.global _sp
.global _stack_bottom
-@
-@
-@/* Define the 16-bit Thumb mode veneer for _tx_initialize_low_level for
-@ applications calling this function from to 16-bit Thumb mode. */
-@
+
+/* Define the 16-bit Thumb mode veneer for _tx_initialize_low_level for
+ applications calling this function from to 16-bit Thumb mode. */
+
.text
.align 2
.thumb
.global $_tx_initialize_low_level
.type $_tx_initialize_low_level,function
$_tx_initialize_low_level:
- BX pc @ Switch to 32-bit mode
- NOP @
+ BX pc // Switch to 32-bit mode
+ NOP //
.arm
- STMFD sp!, {lr} @ Save return address
- BL _tx_initialize_low_level @ Call _tx_initialize_low_level function
- LDMFD sp!, {lr} @ Recover saved return address
- BX lr @ Return to 16-bit caller
-@
-@
+ STMFD sp!, {lr} // Save return address
+ BL _tx_initialize_low_level // Call _tx_initialize_low_level function
+ LDMFD sp!, {lr} // Recover saved return address
+ BX lr // Return to 16-bit caller
+
.text
.align 2
-@/**************************************************************************/
-@/* */
-@/* FUNCTION RELEASE */
-@/* */
-@/* _tx_initialize_low_level Cortex-A8/GNU */
-@/* 6.1 */
-@/* AUTHOR */
-@/* */
-@/* William E. Lamie, Microsoft Corporation */
-@/* */
-@/* DESCRIPTION */
-@/* */
-@/* This function is responsible for any low-level processor */
-@/* initialization, including setting up interrupt vectors, setting */
-@/* up a periodic timer interrupt source, saving the system stack */
-@/* pointer for use in ISR processing later, and finding the first */
-@/* available RAM memory address for tx_application_define. */
-@/* */
-@/* INPUT */
-@/* */
-@/* None */
-@/* */
-@/* OUTPUT */
-@/* */
-@/* None */
-@/* */
-@/* CALLS */
-@/* */
-@/* None */
-@/* */
-@/* CALLED BY */
-@/* */
-@/* _tx_initialize_kernel_enter ThreadX entry function */
-@/* */
-@/* RELEASE HISTORY */
-@/* */
-@/* DATE NAME DESCRIPTION */
-@/* */
-@/* 09-30-2020 William E. Lamie Initial Version 6.1 */
-@/* */
-@/**************************************************************************/
-@VOID _tx_initialize_low_level(VOID)
-@{
+/**************************************************************************/
+/* */
+/* FUNCTION RELEASE */
+/* */
+/* _tx_initialize_low_level ARMv7-A */
+/* 6.1.11 */
+/* AUTHOR */
+/* */
+/* William E. Lamie, Microsoft Corporation */
+/* */
+/* DESCRIPTION */
+/* */
+/* This function is responsible for any low-level processor */
+/* initialization, including setting up interrupt vectors, setting */
+/* up a periodic timer interrupt source, saving the system stack */
+/* pointer for use in ISR processing later, and finding the first */
+/* available RAM memory address for tx_application_define. */
+/* */
+/* INPUT */
+/* */
+/* None */
+/* */
+/* OUTPUT */
+/* */
+/* None */
+/* */
+/* CALLS */
+/* */
+/* None */
+/* */
+/* CALLED BY */
+/* */
+/* _tx_initialize_kernel_enter ThreadX entry function */
+/* */
+/* RELEASE HISTORY */
+/* */
+/* DATE NAME DESCRIPTION */
+/* */
+/* 09-30-2020 William E. Lamie Initial Version 6.1 */
+/* 04-25-2022 Zhen Kong Updated comments, */
+/* resulting in version 6.1.11 */
+/* */
+/**************************************************************************/
.global _tx_initialize_low_level
.type _tx_initialize_low_level,function
_tx_initialize_low_level:
-@
-@ /* We must be in SVC mode at this point! */
-@
-@ /* Setup various stack pointers. */
-@
- LDR r1, =_sp @ Get pointer to stack area
-#ifdef TX_ENABLE_IRQ_NESTING
-@
-@ /* Setup the system mode stack for nested interrupt support */
-@
- LDR r2, =SYS_STACK_SIZE @ Pickup stack size
- MOV r3, #SYS_MODE @ Build SYS mode CPSR
- MSR CPSR_c, r3 @ Enter SYS mode
- SUB r1, r1, #1 @ Backup 1 byte
- BIC r1, r1, #7 @ Ensure 8-byte alignment
- MOV sp, r1 @ Setup SYS stack pointer
- SUB r1, r1, r2 @ Calculate start of next stack
+ /* We must be in SVC mode at this point! */
+
+ /* Setup various stack pointers. */
+
+ LDR r1, =_sp // Get pointer to stack area
+
+#ifdef TX_ENABLE_IRQ_NESTING
+
+ /* Setup the system mode stack for nested interrupt support */
+
+ LDR r2, =SYS_STACK_SIZE // Pickup stack size
+ MOV r3, #SYS_MODE // Build SYS mode CPSR
+ MSR CPSR_c, r3 // Enter SYS mode
+ SUB r1, r1, #1 // Backup 1 byte
+ BIC r1, r1, #7 // Ensure 8-byte alignment
+ MOV sp, r1 // Setup SYS stack pointer
+ SUB r1, r1, r2 // Calculate start of next stack
#endif
- LDR r2, =FIQ_STACK_SIZE @ Pickup stack size
- MOV r0, #FIQ_MODE @ Build FIQ mode CPSR
- MSR CPSR, r0 @ Enter FIQ mode
- SUB r1, r1, #1 @ Backup 1 byte
- BIC r1, r1, #7 @ Ensure 8-byte alignment
- MOV sp, r1 @ Setup FIQ stack pointer
- SUB r1, r1, r2 @ Calculate start of next stack
- LDR r2, =IRQ_STACK_SIZE @ Pickup IRQ stack size
- MOV r0, #IRQ_MODE @ Build IRQ mode CPSR
- MSR CPSR, r0 @ Enter IRQ mode
- SUB r1, r1, #1 @ Backup 1 byte
- BIC r1, r1, #7 @ Ensure 8-byte alignment
- MOV sp, r1 @ Setup IRQ stack pointer
- SUB r3, r1, r2 @ Calculate end of IRQ stack
- MOV r0, #SVC_MODE @ Build SVC mode CPSR
- MSR CPSR, r0 @ Enter SVC mode
- LDR r2, =_stack_bottom @ Pickup stack bottom
- CMP r3, r2 @ Compare the current stack end with the bottom
-_stack_error_loop:
- BLT _stack_error_loop @ If the IRQ stack exceeds the stack bottom, just sit here!
-@
-@ /* Save the system stack pointer. */
-@ _tx_thread_system_stack_ptr = (VOID_PTR) (sp);
-@
- LDR r2, =_tx_thread_system_stack_ptr @ Pickup stack pointer
- STR r1, [r2] @ Save the system stack
-@
-@ /* Save the first available memory address. */
-@ _tx_initialize_unused_memory = (VOID_PTR) _end;
-@
- LDR r1, =_end @ Get end of non-initialized RAM area
- LDR r2, =_tx_initialize_unused_memory @ Pickup unused memory ptr address
- ADD r1, r1, #8 @ Increment to next free word
- STR r1, [r2] @ Save first free memory address
-@
-@ /* Setup Timer for periodic interrupts. */
-@
-@ /* Done, return to caller. */
-@
+ LDR r2, =FIQ_STACK_SIZE // Pickup stack size
+ MOV r0, #FIQ_MODE // Build FIQ mode CPSR
+ MSR CPSR, r0 // Enter FIQ mode
+ SUB r1, r1, #1 // Backup 1 byte
+ BIC r1, r1, #7 // Ensure 8-byte alignment
+ MOV sp, r1 // Setup FIQ stack pointer
+ SUB r1, r1, r2 // Calculate start of next stack
+ LDR r2, =IRQ_STACK_SIZE // Pickup IRQ stack size
+ MOV r0, #IRQ_MODE // Build IRQ mode CPSR
+ MSR CPSR, r0 // Enter IRQ mode
+ SUB r1, r1, #1 // Backup 1 byte
+ BIC r1, r1, #7 // Ensure 8-byte alignment
+ MOV sp, r1 // Setup IRQ stack pointer
+ SUB r3, r1, r2 // Calculate end of IRQ stack
+ MOV r0, #SVC_MODE // Build SVC mode CPSR
+ MSR CPSR, r0 // Enter SVC mode
+ LDR r2, =_stack_bottom // Pickup stack bottom
+ CMP r3, r2 // Compare the current stack end with the bottom
+_stack_error_loop:
+ BLT _stack_error_loop // If the IRQ stack exceeds the stack bottom, just sit here!
+
+ LDR r2, =_tx_thread_system_stack_ptr // Pickup stack pointer
+ STR r1, [r2] // Save the system stack
+
+ LDR r1, =_end // Get end of non-initialized RAM area
+ LDR r2, =_tx_initialize_unused_memory // Pickup unused memory ptr address
+ ADD r1, r1, #8 // Increment to next free word
+ STR r1, [r2] // Save first free memory address
+
#ifdef __THUMB_INTERWORK
- BX lr @ Return to caller
+ BX lr // Return to caller
#else
- MOV pc, lr @ Return to caller
+ MOV pc, lr // Return to caller
#endif
-@}
-@
-@
-@/* Define shells for each of the interrupt vectors. */
-@
+
+/* Define shells for each of the interrupt vectors. */
+
.global __tx_undefined
__tx_undefined:
- B __tx_undefined @ Undefined handler
-@
+ B __tx_undefined // Undefined handler
+
.global __tx_swi_interrupt
__tx_swi_interrupt:
- B __tx_swi_interrupt @ Software interrupt handler
-@
+ B __tx_swi_interrupt // Software interrupt handler
+
.global __tx_prefetch_handler
__tx_prefetch_handler:
- B __tx_prefetch_handler @ Prefetch exception handler
-@
+ B __tx_prefetch_handler // Prefetch exception handler
+
.global __tx_abort_handler
__tx_abort_handler:
- B __tx_abort_handler @ Abort exception handler
-@
+ B __tx_abort_handler // Abort exception handler
+
.global __tx_reserved_handler
__tx_reserved_handler:
- B __tx_reserved_handler @ Reserved exception handler
-@
+ B __tx_reserved_handler // Reserved exception handler
+
.global __tx_irq_handler
- .global __tx_irq_processing_return
+ .global __tx_irq_processing_return
__tx_irq_handler:
-@
-@ /* Jump to context save to save system context. */
+
+ /* Jump to context save to save system context. */
B _tx_thread_context_save
__tx_irq_processing_return:
-@
-@ /* At this point execution is still in the IRQ mode. The CPSR, point of
-@ interrupt, and all C scratch registers are available for use. In
-@ addition, IRQ interrupts may be re-enabled - with certain restrictions -
-@ if nested IRQ interrupts are desired. Interrupts may be re-enabled over
-@ small code sequences where lr is saved before enabling interrupts and
-@ restored after interrupts are again disabled. */
-@
-@ /* Interrupt nesting is allowed after calling _tx_thread_irq_nesting_start
-@ from IRQ mode with interrupts disabled. This routine switches to the
-@ system mode and returns with IRQ interrupts enabled.
-@
-@ NOTE: It is very important to ensure all IRQ interrupts are cleared
-@ prior to enabling nested IRQ interrupts. */
+//
+ /* At this point execution is still in the IRQ mode. The CPSR, point of
+ interrupt, and all C scratch registers are available for use. In
+ addition, IRQ interrupts may be re-enabled - with certain restrictions -
+ if nested IRQ interrupts are desired. Interrupts may be re-enabled over
+ small code sequences where lr is saved before enabling interrupts and
+ restored after interrupts are again disabled. */
+
+ /* Interrupt nesting is allowed after calling _tx_thread_irq_nesting_start
+ from IRQ mode with interrupts disabled. This routine switches to the
+ system mode and returns with IRQ interrupts enabled.
+
+ NOTE: It is very important to ensure all IRQ interrupts are cleared
+ prior to enabling nested IRQ interrupts. */
#ifdef TX_ENABLE_IRQ_NESTING
BL _tx_thread_irq_nesting_start
#endif
-@
-@ /* For debug purpose, execute the timer interrupt processing here. In
-@ a real system, some kind of status indication would have to be checked
-@ before the timer interrupt handler could be called. */
-@
- BL _tx_timer_interrupt @ Timer interrupt handler
-@
-@
-@ /* If interrupt nesting was started earlier, the end of interrupt nesting
-@ service must be called before returning to _tx_thread_context_restore.
-@ This routine returns in processing in IRQ mode with interrupts disabled. */
+
+ /* For debug purpose, execute the timer interrupt processing here. In
+ a real system, some kind of status indication would have to be checked
+ before the timer interrupt handler could be called. */
+
+ BL _tx_timer_interrupt // Timer interrupt handler
+
+
+ /* If interrupt nesting was started earlier, the end of interrupt nesting
+ service must be called before returning to _tx_thread_context_restore.
+ This routine returns in processing in IRQ mode with interrupts disabled. */
#ifdef TX_ENABLE_IRQ_NESTING
BL _tx_thread_irq_nesting_end
#endif
-@
-@ /* Jump to context restore to restore system context. */
+
+ /* Jump to context restore to restore system context. */
B _tx_thread_context_restore
-@
-@
-@ /* This is an example of a vectored IRQ handler. */
-@
-@ .global __tx_example_vectored_irq_handler
-@__tx_example_vectored_irq_handler:
-@
-@
-@ /* Save initial context and call context save to prepare for
-@ vectored ISR execution. */
-@
-@ STMDB sp!, {r0-r3} @ Save some scratch registers
-@ MRS r0, SPSR @ Pickup saved SPSR
-@ SUB lr, lr, #4 @ Adjust point of interrupt
-@ STMDB sp!, {r0, r10, r12, lr} @ Store other scratch registers
-@ BL _tx_thread_vectored_context_save @ Vectored context save
-@
-@ /* At this point execution is still in the IRQ mode. The CPSR, point of
-@ interrupt, and all C scratch registers are available for use. In
-@ addition, IRQ interrupts may be re-enabled - with certain restrictions -
-@ if nested IRQ interrupts are desired. Interrupts may be re-enabled over
-@ small code sequences where lr is saved before enabling interrupts and
-@ restored after interrupts are again disabled. */
-@
-@
-@ /* Interrupt nesting is allowed after calling _tx_thread_irq_nesting_start
-@ from IRQ mode with interrupts disabled. This routine switches to the
-@ system mode and returns with IRQ interrupts enabled.
-@
-@ NOTE: It is very important to ensure all IRQ interrupts are cleared
-@ prior to enabling nested IRQ interrupts. */
-@#ifdef TX_ENABLE_IRQ_NESTING
-@ BL _tx_thread_irq_nesting_start
-@#endif
-@
-@ /* Application IRQ handlers can be called here! */
-@
-@ /* If interrupt nesting was started earlier, the end of interrupt nesting
-@ service must be called before returning to _tx_thread_context_restore.
-@ This routine returns in processing in IRQ mode with interrupts disabled. */
-@#ifdef TX_ENABLE_IRQ_NESTING
-@ BL _tx_thread_irq_nesting_end
-@#endif
-@
-@ /* Jump to context restore to restore system context. */
-@ B _tx_thread_context_restore
-@
-@
+
+
+ /* This is an example of a vectored IRQ handler. */
+
+
+
+ /* Save initial context and call context save to prepare for
+ vectored ISR execution. */
+
+ /* At this point execution is still in the IRQ mode. The CPSR, point of
+ interrupt, and all C scratch registers are available for use. In
+ addition, IRQ interrupts may be re-enabled - with certain restrictions -
+ if nested IRQ interrupts are desired. Interrupts may be re-enabled over
+ small code sequences where lr is saved before enabling interrupts and
+ restored after interrupts are again disabled. */
+
+
+ /* Interrupt nesting is allowed after calling _tx_thread_irq_nesting_start
+ from IRQ mode with interrupts disabled. This routine switches to the
+ system mode and returns with IRQ interrupts enabled.
+
+ NOTE: It is very important to ensure all IRQ interrupts are cleared
+ prior to enabling nested IRQ interrupts. */
+
+ /* Application IRQ handlers can be called here! */
+
+ /* If interrupt nesting was started earlier, the end of interrupt nesting
+ service must be called before returning to _tx_thread_context_restore.
+ This routine returns in processing in IRQ mode with interrupts disabled. */
+
+
+
#ifdef TX_ENABLE_FIQ_SUPPORT
.global __tx_fiq_handler
.global __tx_fiq_processing_return
__tx_fiq_handler:
-@
-@ /* Jump to fiq context save to save system context. */
+
+ /* Jump to fiq context save to save system context. */
B _tx_thread_fiq_context_save
__tx_fiq_processing_return:
-@
-@ /* At this point execution is still in the FIQ mode. The CPSR, point of
-@ interrupt, and all C scratch registers are available for use. */
-@
-@ /* Interrupt nesting is allowed after calling _tx_thread_fiq_nesting_start
-@ from FIQ mode with interrupts disabled. This routine switches to the
-@ system mode and returns with FIQ interrupts enabled.
-@
-@ NOTE: It is very important to ensure all FIQ interrupts are cleared
-@ prior to enabling nested FIQ interrupts. */
+
+ /* At this point execution is still in the FIQ mode. The CPSR, point of
+ interrupt, and all C scratch registers are available for use. */
+
+ /* Interrupt nesting is allowed after calling _tx_thread_fiq_nesting_start
+ from FIQ mode with interrupts disabled. This routine switches to the
+ system mode and returns with FIQ interrupts enabled.
+
+ NOTE: It is very important to ensure all FIQ interrupts are cleared
+ prior to enabling nested FIQ interrupts. */
#ifdef TX_ENABLE_FIQ_NESTING
BL _tx_thread_fiq_nesting_start
#endif
-@
-@ /* Application FIQ handlers can be called here! */
-@
-@ /* If interrupt nesting was started earlier, the end of interrupt nesting
-@ service must be called before returning to _tx_thread_fiq_context_restore. */
+
+ /* Application FIQ handlers can be called here! */
+
+ /* If interrupt nesting was started earlier, the end of interrupt nesting
+ service must be called before returning to _tx_thread_fiq_context_restore. */
#ifdef TX_ENABLE_FIQ_NESTING
BL _tx_thread_fiq_nesting_end
#endif
-@
-@ /* Jump to fiq context restore to restore system context. */
+
+ /* Jump to fiq context restore to restore system context. */
B _tx_thread_fiq_context_restore
-@
-@
+
+
#else
.global __tx_fiq_handler
__tx_fiq_handler:
- B __tx_fiq_handler @ FIQ interrupt handler
+ B __tx_fiq_handler // FIQ interrupt handler
#endif
-@
-@
+
+
BUILD_OPTIONS:
- .word _tx_build_options @ Reference to bring in
+ .word _tx_build_options // Reference to bring in
VERSION_ID:
- .word _tx_version_id @ Reference to bring in
+ .word _tx_version_id // Reference to bring in
diff --git a/ports/cortex_a8/gnu/inc/tx_port.h b/ports/cortex_a8/gnu/inc/tx_port.h
index d721109b..19463de1 100644
--- a/ports/cortex_a8/gnu/inc/tx_port.h
+++ b/ports/cortex_a8/gnu/inc/tx_port.h
@@ -12,7 +12,7 @@
/**************************************************************************/
/**************************************************************************/
-/** */
+/** */
/** ThreadX Component */
/** */
/** Port Specific */
@@ -21,36 +21,38 @@
/**************************************************************************/
-/**************************************************************************/
-/* */
-/* PORT SPECIFIC C INFORMATION RELEASE */
-/* */
-/* tx_port.h Cortex-A8/GNU */
-/* 6.1.6 */
+/**************************************************************************/
+/* */
+/* PORT SPECIFIC C INFORMATION RELEASE */
+/* */
+/* tx_port.h ARMv7-A */
+/* 6.1.11 */
/* */
/* AUTHOR */
/* */
/* William E. Lamie, Microsoft Corporation */
/* */
/* DESCRIPTION */
-/* */
-/* This file contains data type definitions that make the ThreadX */
-/* real-time kernel function identically on a variety of different */
-/* processor architectures. For example, the size or number of bits */
-/* in an "int" data type vary between microprocessor architectures and */
-/* even C compilers for the same microprocessor. ThreadX does not */
-/* directly use native C data types. Instead, ThreadX creates its */
-/* own special types that can be mapped to actual data types by this */
-/* file to guarantee consistency in the interface and functionality. */
-/* */
-/* RELEASE HISTORY */
-/* */
+/* */
+/* This file contains data type definitions that make the ThreadX */
+/* real-time kernel function identically on a variety of different */
+/* processor architectures. For example, the size or number of bits */
+/* in an "int" data type vary between microprocessor architectures and */
+/* even C compilers for the same microprocessor. ThreadX does not */
+/* directly use native C data types. Instead, ThreadX creates its */
+/* own special types that can be mapped to actual data types by this */
+/* file to guarantee consistency in the interface and functionality. */
+/* */
+/* RELEASE HISTORY */
+/* */
/* DATE NAME DESCRIPTION */
/* */
/* 09-30-2020 William E. Lamie Initial Version 6.1 */
/* 04-02-2021 Bhupendra Naphade Modified comment(s),updated */
/* macro definition, */
/* resulting in version 6.1.6 */
+/* 04-25-2022 Zhen Kong Updated comments, */
+/* resulting in version 6.1.11 */
/* */
/**************************************************************************/
@@ -63,7 +65,7 @@
#ifdef TX_INCLUDE_USER_DEFINE_FILE
-/* Yes, include the user defines in tx_user.h. The defines in this file may
+/* Yes, include the user defines in tx_user.h. The defines in this file may
alternately be defined on the command line. */
#include "tx_user.h"
@@ -76,7 +78,7 @@
#include
-/* Define ThreadX basic types for this port. */
+/* Define ThreadX basic types for this port. */
#define VOID void
typedef char CHAR;
@@ -112,12 +114,12 @@ typedef unsigned short USHORT;
#define TX_TIMER_THREAD_STACK_SIZE 1024 /* Default timer thread stack size */
#endif
-#ifndef TX_TIMER_THREAD_PRIORITY
-#define TX_TIMER_THREAD_PRIORITY 0 /* Default timer thread priority */
+#ifndef TX_TIMER_THREAD_PRIORITY
+#define TX_TIMER_THREAD_PRIORITY 0 /* Default timer thread priority */
#endif
-/* Define various constants for the ThreadX ARM port. */
+/* Define various constants for the ThreadX ARM port. */
#ifdef TX_ENABLE_FIQ_SUPPORT
#define TX_INT_DISABLE 0xC0 /* Disable IRQ & FIQ interrupts */
@@ -127,8 +129,8 @@ typedef unsigned short USHORT;
#define TX_INT_ENABLE 0x00 /* Enable IRQ interrupts */
-/* Define the clock source for trace event entry time stamp. The following two item are port specific.
- For example, if the time source is at the address 0x0a800024 and is 16-bits in size, the clock
+/* Define the clock source for trace event entry time stamp. The following two item are port specific.
+ For example, if the time source is at the address 0x0a800024 and is 16-bits in size, the clock
source constants would be:
#define TX_TRACE_TIME_SOURCE *((ULONG *) 0x0a800024)
@@ -175,7 +177,7 @@ typedef unsigned short USHORT;
#define TX_INLINE_INITIALIZATION
-/* Determine whether or not stack checking is enabled. By default, ThreadX stack checking is
+/* Determine whether or not stack checking is enabled. By default, ThreadX stack checking is
disabled. When the following is defined, ThreadX thread stack checking is enabled. If stack
checking is enabled (TX_ENABLE_STACK_CHECKING is defined), the TX_DISABLE_STACK_FILLING
define is negated, thereby forcing the stack fill which is necessary for the stack checking
@@ -187,13 +189,13 @@ typedef unsigned short USHORT;
/* Define the TX_THREAD control block extensions for this port. The main reason
- for the multiple macros is so that backward compatibility can be maintained with
+ for the multiple macros is so that backward compatibility can be maintained with
existing ThreadX kernel awareness modules. */
-#define TX_THREAD_EXTENSION_0
-#define TX_THREAD_EXTENSION_1
+#define TX_THREAD_EXTENSION_0
+#define TX_THREAD_EXTENSION_1
#define TX_THREAD_EXTENSION_2 ULONG tx_thread_vfp_enable;
-#define TX_THREAD_EXTENSION_3
+#define TX_THREAD_EXTENSION_3
/* Define the port extensions of the remaining ThreadX objects. */
@@ -207,11 +209,11 @@ typedef unsigned short USHORT;
#define TX_TIMER_EXTENSION
-/* Define the user extension field of the thread control block. Nothing
+/* Define the user extension field of the thread control block. Nothing
additional is needed for this port so it is defined as white space. */
#ifndef TX_THREAD_USER_EXTENSION
-#define TX_THREAD_USER_EXTENSION
+#define TX_THREAD_USER_EXTENSION
#endif
@@ -219,8 +221,8 @@ typedef unsigned short USHORT;
tx_thread_shell_entry, and tx_thread_terminate. */
-#define TX_THREAD_CREATE_EXTENSION(thread_ptr)
-#define TX_THREAD_DELETE_EXTENSION(thread_ptr)
+#define TX_THREAD_CREATE_EXTENSION(thread_ptr)
+#define TX_THREAD_DELETE_EXTENSION(thread_ptr)
#define TX_THREAD_COMPLETED_EXTENSION(thread_ptr)
#define TX_THREAD_TERMINATED_EXTENSION(thread_ptr)
@@ -247,24 +249,24 @@ typedef unsigned short USHORT;
#define TX_TIMER_DELETE_EXTENSION(timer_ptr)
-/* Determine if the ARM architecture has the CLZ instruction. This is available on
- architectures v5 and above. If available, redefine the macro for calculating the
+/* Determine if the ARM architecture has the CLZ instruction. This is available on
+ architectures v5 and above. If available, redefine the macro for calculating the
lowest bit set. */
-
+
#if __TARGET_ARCH_ARM > 4
#ifndef __thumb__
#define TX_LOWEST_SET_BIT_CALCULATE(m, b) m = m & ((ULONG) (-((LONG) m))); \
asm volatile (" CLZ %0,%1 ": "=r" (b) : "r" (m) ); \
- b = 31 - b;
+ b = 31 - b;
#endif
#endif
-/* Define ThreadX interrupt lockout and restore macros for protection on
- access of critical kernel information. The restore interrupt macro must
- restore the interrupt posture of the running thread prior to the value
+/* Define ThreadX interrupt lockout and restore macros for protection on
+ access of critical kernel information. The restore interrupt macro must
+ restore the interrupt posture of the running thread prior to the value
present prior to the disable macro. In most cases, the save area macro
is used to define a local function save area for the disable and restore
macros. */
@@ -295,7 +297,7 @@ unsigned int _tx_thread_interrupt_restore(UINT old_posture);
#endif
-/* Define VFP extension for the Cortex-A8. Each is assumed to be called in the context of the executing
+/* Define VFP extension for the ARMv7-A. Each is assumed to be called in the context of the executing
thread. */
void tx_thread_vfp_enable(void);
@@ -315,8 +317,8 @@ void tx_thread_vfp_disable(void);
/* Define the version ID of ThreadX. This may be utilized by the application. */
#ifdef TX_THREAD_INIT
-CHAR _tx_version_id[] =
- "Copyright (c) Microsoft Corporation. All rights reserved. * ThreadX Cortex-A8/GNU Version 6.1.9 *";
+CHAR _tx_version_id[] =
+ "Copyright (c) Microsoft Corporation. All rights reserved. * ThreadX ARMv7-A Version 6.1.11 *";
#else
extern CHAR _tx_version_id[];
#endif
diff --git a/ports/cortex_a8/gnu/src/tx_thread_context_restore.S b/ports/cortex_a8/gnu/src/tx_thread_context_restore.S
index 685844fc..fae7e72d 100644
--- a/ports/cortex_a8/gnu/src/tx_thread_context_restore.S
+++ b/ports/cortex_a8/gnu/src/tx_thread_context_restore.S
@@ -1,260 +1,222 @@
-@/**************************************************************************/
-@/* */
-@/* Copyright (c) Microsoft Corporation. All rights reserved. */
-@/* */
-@/* This software is licensed under the Microsoft Software License */
-@/* Terms for Microsoft Azure RTOS. Full text of the license can be */
-@/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
-@/* and in the root directory of this software. */
-@/* */
-@/**************************************************************************/
-@
-@
-@/**************************************************************************/
-@/**************************************************************************/
-@/** */
-@/** ThreadX Component */
-@/** */
-@/** Thread */
-@/** */
-@/**************************************************************************/
-@/**************************************************************************/
-@
-@
-@#define TX_SOURCE_CODE
-@
-@
-@/* Include necessary system files. */
-@
-@#include "tx_api.h"
-@#include "tx_thread.h"
-@#include "tx_timer.h"
-@
+/**************************************************************************/
+/* */
+/* Copyright (c) Microsoft Corporation. All rights reserved. */
+/* */
+/* This software is licensed under the Microsoft Software License */
+/* Terms for Microsoft Azure RTOS. Full text of the license can be */
+/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
+/* and in the root directory of this software. */
+/* */
+/**************************************************************************/
+
+
+/**************************************************************************/
+/**************************************************************************/
+/** */
+/** ThreadX Component */
+/** */
+/** Thread */
+/** */
+/**************************************************************************/
+/**************************************************************************/
+
.arm
#ifdef TX_ENABLE_FIQ_SUPPORT
-SVC_MODE = 0xD3 @ Disable IRQ/FIQ, SVC mode
-IRQ_MODE = 0xD2 @ Disable IRQ/FIQ, IRQ mode
+SVC_MODE = 0xD3 // Disable IRQ/FIQ, SVC mode
+IRQ_MODE = 0xD2 // Disable IRQ/FIQ, IRQ mode
#else
-SVC_MODE = 0x93 @ Disable IRQ, SVC mode
-IRQ_MODE = 0x92 @ Disable IRQ, IRQ mode
+SVC_MODE = 0x93 // Disable IRQ, SVC mode
+IRQ_MODE = 0x92 // Disable IRQ, IRQ mode
#endif
-@
+
.global _tx_thread_system_state
.global _tx_thread_current_ptr
.global _tx_thread_execute_ptr
.global _tx_timer_time_slice
.global _tx_thread_schedule
.global _tx_thread_preempt_disable
- .global _tx_execution_isr_exit
-@
-@
-@/* No 16-bit Thumb mode veneer code is needed for _tx_thread_context_restore
-@ since it will never be called 16-bit mode. */
-@
+
+
+
+/* No 16-bit Thumb mode veneer code is needed for _tx_thread_context_restore
+ since it will never be called 16-bit mode. */
+
.arm
.text
.align 2
-@/**************************************************************************/
-@/* */
-@/* FUNCTION RELEASE */
-@/* */
-@/* _tx_thread_context_restore Cortex-A8/GNU */
-@/* 6.1.9 */
-@/* AUTHOR */
-@/* */
-@/* William E. Lamie, Microsoft Corporation */
-@/* */
-@/* DESCRIPTION */
-@/* */
-@/* This function restores the interrupt context if it is processing a */
-@/* nested interrupt. If not, it returns to the interrupt thread if no */
-@/* preemption is necessary. Otherwise, if preemption is necessary or */
-@/* if no thread was running, the function returns to the scheduler. */
-@/* */
-@/* INPUT */
-@/* */
-@/* None */
-@/* */
-@/* OUTPUT */
-@/* */
-@/* None */
-@/* */
-@/* CALLS */
-@/* */
-@/* _tx_thread_schedule Thread scheduling routine */
-@/* */
-@/* CALLED BY */
-@/* */
-@/* ISRs Interrupt Service Routines */
-@/* */
-@/* RELEASE HISTORY */
-@/* */
-@/* DATE NAME DESCRIPTION */
-@/* */
-@/* 09-30-2020 William E. Lamie Initial Version 6.1 */
-@/* 10-15-2021 William E. Lamie Modified comment(s), added */
-@/* execution profile support, */
-@/* resulting in version 6.1.9 */
-@/* */
-@/**************************************************************************/
-@VOID _tx_thread_context_restore(VOID)
-@{
+/**************************************************************************/
+/* */
+/* FUNCTION RELEASE */
+/* */
+/* _tx_thread_context_restore ARMv7-A */
+/* 6.1.11 */
+/* AUTHOR */
+/* */
+/* William E. Lamie, Microsoft Corporation */
+/* */
+/* DESCRIPTION */
+/* */
+/* This function restores the interrupt context if it is processing a */
+/* nested interrupt. If not, it returns to the interrupt thread if no */
+/* preemption is necessary. Otherwise, if preemption is necessary or */
+/* if no thread was running, the function returns to the scheduler. */
+/* */
+/* INPUT */
+/* */
+/* None */
+/* */
+/* OUTPUT */
+/* */
+/* None */
+/* */
+/* CALLS */
+/* */
+/* _tx_thread_schedule Thread scheduling routine */
+/* */
+/* CALLED BY */
+/* */
+/* ISRs Interrupt Service Routines */
+/* */
+/* RELEASE HISTORY */
+/* */
+/* DATE NAME DESCRIPTION */
+/* */
+/* 09-30-2020 William E. Lamie Initial Version 6.1 */
+/* 10-15-2021 William E. Lamie Modified comment(s), added */
+/* execution profile support, */
+/* resulting in version 6.1.9 */
+/* 04-25-2022 Zhen Kong Updated comments, */
+/* resulting in version 6.1.11 */
+/* */
+/**************************************************************************/
.global _tx_thread_context_restore
.type _tx_thread_context_restore,function
_tx_thread_context_restore:
-@
-@ /* Lockout interrupts. */
-@
+
+ /* Lockout interrupts. */
+
#ifdef TX_ENABLE_FIQ_SUPPORT
- CPSID if @ Disable IRQ and FIQ interrupts
+ CPSID if // Disable IRQ and FIQ interrupts
#else
- CPSID i @ Disable IRQ interrupts
+ CPSID i // Disable IRQ interrupts
#endif
#if (defined(TX_ENABLE_EXECUTION_CHANGE_NOTIFY) || defined(TX_EXECUTION_PROFILE_ENABLE))
-@
-@ /* Call the ISR exit function to indicate an ISR is complete. */
-@
- BL _tx_execution_isr_exit @ Call the ISR exit function
-#endif
-@
-@ /* Determine if interrupts are nested. */
-@ if (--_tx_thread_system_state)
-@ {
-@
- LDR r3, =_tx_thread_system_state @ Pickup address of system state variable
- LDR r2, [r3] @ Pickup system state
- SUB r2, r2, #1 @ Decrement the counter
- STR r2, [r3] @ Store the counter
- CMP r2, #0 @ Was this the first interrupt?
- BEQ __tx_thread_not_nested_restore @ If so, not a nested restore
-@
-@ /* Interrupts are nested. */
-@
-@ /* Just recover the saved registers and return to the point of
-@ interrupt. */
-@
- LDMIA sp!, {r0, r10, r12, lr} @ Recover SPSR, POI, and scratch regs
- MSR SPSR_cxsf, r0 @ Put SPSR back
- LDMIA sp!, {r0-r3} @ Recover r0-r3
- MOVS pc, lr @ Return to point of interrupt
-@
-@ }
-__tx_thread_not_nested_restore:
-@
-@ /* Determine if a thread was interrupted and no preemption is required. */
-@ else if (((_tx_thread_current_ptr) && (_tx_thread_current_ptr == _tx_thread_execute_ptr)
-@ || (_tx_thread_preempt_disable))
-@ {
-@
- LDR r1, =_tx_thread_current_ptr @ Pickup address of current thread ptr
- LDR r0, [r1] @ Pickup actual current thread pointer
- CMP r0, #0 @ Is it NULL?
- BEQ __tx_thread_idle_system_restore @ Yes, idle system was interrupted
-@
- LDR r3, =_tx_thread_preempt_disable @ Pickup preempt disable address
- LDR r2, [r3] @ Pickup actual preempt disable flag
- CMP r2, #0 @ Is it set?
- BNE __tx_thread_no_preempt_restore @ Yes, don't preempt this thread
- LDR r3, =_tx_thread_execute_ptr @ Pickup address of execute thread ptr
- LDR r2, [r3] @ Pickup actual execute thread pointer
- CMP r0, r2 @ Is the same thread highest priority?
- BNE __tx_thread_preempt_restore @ No, preemption needs to happen
-@
-@
-__tx_thread_no_preempt_restore:
-@
-@ /* Restore interrupted thread or ISR. */
-@
-@ /* Pickup the saved stack pointer. */
-@ tmp_ptr = _tx_thread_current_ptr -> tx_thread_stack_ptr;
-@
-@ /* Recover the saved context and return to the point of interrupt. */
-@
- LDMIA sp!, {r0, r10, r12, lr} @ Recover SPSR, POI, and scratch regs
- MSR SPSR_cxsf, r0 @ Put SPSR back
- LDMIA sp!, {r0-r3} @ Recover r0-r3
- MOVS pc, lr @ Return to point of interrupt
-@
-@ }
-@ else
-@ {
-__tx_thread_preempt_restore:
-@
- LDMIA sp!, {r3, r10, r12, lr} @ Recover temporarily saved registers
- MOV r1, lr @ Save lr (point of interrupt)
- MOV r2, #SVC_MODE @ Build SVC mode CPSR
- MSR CPSR_c, r2 @ Enter SVC mode
- STR r1, [sp, #-4]! @ Save point of interrupt
- STMDB sp!, {r4-r12, lr} @ Save upper half of registers
- MOV r4, r3 @ Save SPSR in r4
- MOV r2, #IRQ_MODE @ Build IRQ mode CPSR
- MSR CPSR_c, r2 @ Enter IRQ mode
- LDMIA sp!, {r0-r3} @ Recover r0-r3
- MOV r5, #SVC_MODE @ Build SVC mode CPSR
- MSR CPSR_c, r5 @ Enter SVC mode
- STMDB sp!, {r0-r3} @ Save r0-r3 on thread's stack
- LDR r1, =_tx_thread_current_ptr @ Pickup address of current thread ptr
- LDR r0, [r1] @ Pickup current thread pointer
+ /* Call the ISR exit function to indicate an ISR is complete. */
+
+ BL _tx_execution_isr_exit // Call the ISR exit function
+#endif
+
+ /* Determine if interrupts are nested. */
+
+ LDR r3, =_tx_thread_system_state // Pickup address of system state variable
+ LDR r2, [r3] // Pickup system state
+ SUB r2, r2, #1 // Decrement the counter
+ STR r2, [r3] // Store the counter
+ CMP r2, #0 // Was this the first interrupt?
+ BEQ __tx_thread_not_nested_restore // If so, not a nested restore
+
+ /* Interrupts are nested. */
+
+ /* Just recover the saved registers and return to the point of
+ interrupt. */
+
+ LDMIA sp!, {r0, r10, r12, lr} // Recover SPSR, POI, and scratch regs
+ MSR SPSR_cxsf, r0 // Put SPSR back
+ LDMIA sp!, {r0-r3} // Recover r0-r3
+ MOVS pc, lr // Return to point of interrupt
+
+__tx_thread_not_nested_restore:
+
+ /* Determine if a thread was interrupted and no preemption is required. */
+
+ LDR r1, =_tx_thread_current_ptr // Pickup address of current thread ptr
+ LDR r0, [r1] // Pickup actual current thread pointer
+ CMP r0, #0 // Is it NULL?
+ BEQ __tx_thread_idle_system_restore // Yes, idle system was interrupted
+
+ LDR r3, =_tx_thread_preempt_disable // Pickup preempt disable address
+ LDR r2, [r3] // Pickup actual preempt disable flag
+ CMP r2, #0 // Is it set?
+ BNE __tx_thread_no_preempt_restore // Yes, don't preempt this thread
+ LDR r3, =_tx_thread_execute_ptr // Pickup address of execute thread ptr
+ LDR r2, [r3] // Pickup actual execute thread pointer
+ CMP r0, r2 // Is the same thread highest priority?
+ BNE __tx_thread_preempt_restore // No, preemption needs to happen
+
+
+__tx_thread_no_preempt_restore:
+
+ /* Recover the saved context and return to the point of interrupt. */
+
+ /* Pickup the saved stack pointer. */
+
+ /* Recover the saved context and return to the point of interrupt. */
+ LDMIA sp!, {r0, r10, r12, lr} // Recover SPSR, POI, and scratch regs
+ MSR SPSR_cxsf, r0 // Put SPSR back
+ LDMIA sp!, {r0-r3} // Recover r0-r3
+ MOVS pc, lr // Return to point of interrupt
+
+__tx_thread_preempt_restore:
+
+ LDMIA sp!, {r3, r10, r12, lr} // Recover temporarily saved registers
+ MOV r1, lr // Save lr (point of interrupt)
+ MOV r2, #SVC_MODE // Build SVC mode CPSR
+ MSR CPSR_c, r2 // Enter SVC mode
+ STR r1, [sp, #-4]! // Save point of interrupt
+ STMDB sp!, {r4-r12, lr} // Save upper half of registers
+ MOV r4, r3 // Save SPSR in r4
+ MOV r2, #IRQ_MODE // Build IRQ mode CPSR
+ MSR CPSR_c, r2 // Enter IRQ mode
+ LDMIA sp!, {r0-r3} // Recover r0-r3
+ MOV r5, #SVC_MODE // Build SVC mode CPSR
+ MSR CPSR_c, r5 // Enter SVC mode
+ STMDB sp!, {r0-r3} // Save r0-r3 on thread's stack
+
+ LDR r1, =_tx_thread_current_ptr // Pickup address of current thread ptr
+ LDR r0, [r1] // Pickup current thread pointer
#ifdef TX_ENABLE_VFP_SUPPORT
- LDR r2, [r0, #144] @ Pickup the VFP enabled flag
- CMP r2, #0 @ Is the VFP enabled?
- BEQ _tx_skip_irq_vfp_save @ No, skip VFP IRQ save
- VMRS r2, FPSCR @ Pickup the FPSCR
- STR r2, [sp, #-4]! @ Save FPSCR
- VSTMDB sp!, {D16-D31} @ Save D16-D31
- VSTMDB sp!, {D0-D15} @ Save D0-D15
+ LDR r2, [r0, #144] // Pickup the VFP enabled flag
+ CMP r2, #0 // Is the VFP enabled?
+ BEQ _tx_skip_irq_vfp_save // No, skip VFP IRQ save
+ VMRS r2, FPSCR // Pickup the FPSCR
+ STR r2, [sp, #-4]! // Save FPSCR
+ VSTMDB sp!, {D16-D31} // Save D16-D31
+ VSTMDB sp!, {D0-D15} // Save D0-D15
+
_tx_skip_irq_vfp_save:
+
#endif
- MOV r3, #1 @ Build interrupt stack type
- STMDB sp!, {r3, r4} @ Save interrupt stack type and SPSR
- STR sp, [r0, #8] @ Save stack pointer in thread control
- @ block
-@
-@ /* Save the remaining time-slice and disable it. */
-@ if (_tx_timer_time_slice)
-@ {
-@
- LDR r3, =_tx_timer_time_slice @ Pickup time-slice variable address
- LDR r2, [r3] @ Pickup time-slice
- CMP r2, #0 @ Is it active?
- BEQ __tx_thread_dont_save_ts @ No, don't save it
-@
-@ _tx_thread_current_ptr -> tx_thread_time_slice = _tx_timer_time_slice;
-@ _tx_timer_time_slice = 0;
-@
- STR r2, [r0, #24] @ Save thread's time-slice
- MOV r2, #0 @ Clear value
- STR r2, [r3] @ Disable global time-slice flag
-@
-@ }
+ MOV r3, #1 // Build interrupt stack type
+ STMDB sp!, {r3, r4} // Save interrupt stack type and SPSR
+ STR sp, [r0, #8] // Save stack pointer in thread control
+ // block
+
+ /* Save the remaining time-slice and disable it. */
+ LDR r3, =_tx_timer_time_slice // Pickup time-slice variable address
+ LDR r2, [r3] // Pickup time-slice
+ CMP r2, #0 // Is it active?
+ BEQ __tx_thread_dont_save_ts // No, don't save it
+ STR r2, [r0, #24] // Save thread's time-slice
+ MOV r2, #0 // Clear value
+ STR r2, [r3] // Disable global time-slice flag
+
__tx_thread_dont_save_ts:
-@
-@
-@ /* Clear the current task pointer. */
-@ _tx_thread_current_ptr = TX_NULL;
-@
- MOV r0, #0 @ NULL value
- STR r0, [r1] @ Clear current thread pointer
-@
-@ /* Return to the scheduler. */
-@ _tx_thread_schedule();
-@
- B _tx_thread_schedule @ Return to scheduler
-@ }
-@
+
+ /* Clear the current task pointer. */
+ MOV r0, #0 // NULL value
+ STR r0, [r1] // Clear current thread pointer
+
+ /* Return to the scheduler. */
+ B _tx_thread_schedule // Return to scheduler
+
__tx_thread_idle_system_restore:
-@
-@ /* Just return back to the scheduler! */
-@
- MOV r0, #SVC_MODE @ Build SVC mode CPSR
- MSR CPSR_c, r0 @ Enter SVC mode
- B _tx_thread_schedule @ Return to scheduler
-@}
-
-
+ /* Just return back to the scheduler! */
+ MOV r0, #SVC_MODE // Build SVC mode CPSR
+ MSR CPSR_c, r0 // Enter SVC mode
+ B _tx_thread_schedule // Return to scheduler
diff --git a/ports/cortex_a8/gnu/src/tx_thread_context_save.S b/ports/cortex_a8/gnu/src/tx_thread_context_save.S
index ed2232e9..7ac48c2e 100644
--- a/ports/cortex_a8/gnu/src/tx_thread_context_save.S
+++ b/ports/cortex_a8/gnu/src/tx_thread_context_save.S
@@ -1,206 +1,172 @@
-@/**************************************************************************/
-@/* */
-@/* Copyright (c) Microsoft Corporation. All rights reserved. */
-@/* */
-@/* This software is licensed under the Microsoft Software License */
-@/* Terms for Microsoft Azure RTOS. Full text of the license can be */
-@/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
-@/* and in the root directory of this software. */
-@/* */
-@/**************************************************************************/
-@
-@
-@/**************************************************************************/
-@/**************************************************************************/
-@/** */
-@/** ThreadX Component */
-@/** */
-@/** Thread */
-@/** */
-@/**************************************************************************/
-@/**************************************************************************/
-@
-@
-@#define TX_SOURCE_CODE
-@
-@
-@/* Include necessary system files. */
-@
-@#include "tx_api.h"
-@#include "tx_thread.h"
-@#include "tx_timer.h"
-@
-@
+/**************************************************************************/
+/* */
+/* Copyright (c) Microsoft Corporation. All rights reserved. */
+/* */
+/* This software is licensed under the Microsoft Software License */
+/* Terms for Microsoft Azure RTOS. Full text of the license can be */
+/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
+/* and in the root directory of this software. */
+/* */
+/**************************************************************************/
+
+
+/**************************************************************************/
+/**************************************************************************/
+/** */
+/** ThreadX Component */
+/** */
+/** Thread */
+/** */
+/**************************************************************************/
+/**************************************************************************/
+
.global _tx_thread_system_state
.global _tx_thread_current_ptr
- .global _tx_irq_processing_return
- .global _tx_execution_isr_enter
-@
-@
-@/* No 16-bit Thumb mode veneer code is needed for _tx_thread_context_save
-@ since it will never be called 16-bit mode. */
-@
+ .global __tx_irq_processing_return
+
+
+/* No 16-bit Thumb mode veneer code is needed for _tx_thread_context_save
+ since it will never be called 16-bit mode. */
+
.arm
.text
.align 2
-@/**************************************************************************/
-@/* */
-@/* FUNCTION RELEASE */
-@/* */
-@/* _tx_thread_context_save Cortex-A8/GNU */
-@/* 6.1.9 */
-@/* AUTHOR */
-@/* */
-@/* William E. Lamie, Microsoft Corporation */
-@/* */
-@/* DESCRIPTION */
-@/* */
-@/* This function saves the context of an executing thread in the */
-@/* beginning of interrupt processing. The function also ensures that */
-@/* the system stack is used upon return to the calling ISR. */
-@/* */
-@/* INPUT */
-@/* */
-@/* None */
-@/* */
-@/* OUTPUT */
-@/* */
-@/* None */
-@/* */
-@/* CALLS */
-@/* */
-@/* None */
-@/* */
-@/* CALLED BY */
-@/* */
-@/* ISRs */
-@/* */
-@/* RELEASE HISTORY */
-@/* */
-@/* DATE NAME DESCRIPTION */
-@/* */
-@/* 09-30-2020 William E. Lamie Initial Version 6.1 */
-@/* 10-15-2021 William E. Lamie Modified comment(s), added */
-@/* execution profile support, */
-@/* resulting in version 6.1.9 */
-@/* */
-@/**************************************************************************/
-@VOID _tx_thread_context_save(VOID)
-@{
+/**************************************************************************/
+/* */
+/* FUNCTION RELEASE */
+/* */
+/* _tx_thread_context_save ARMv7-A */
+/* 6.1.11 */
+/* AUTHOR */
+/* */
+/* William E. Lamie, Microsoft Corporation */
+/* */
+/* DESCRIPTION */
+/* */
+/* This function saves the context of an executing thread in the */
+/* beginning of interrupt processing. The function also ensures that */
+/* the system stack is used upon return to the calling ISR. */
+/* */
+/* INPUT */
+/* */
+/* None */
+/* */
+/* OUTPUT */
+/* */
+/* None */
+/* */
+/* CALLS */
+/* */
+/* None */
+/* */
+/* CALLED BY */
+/* */
+/* ISRs */
+/* */
+/* RELEASE HISTORY */
+/* */
+/* DATE NAME DESCRIPTION */
+/* */
+/* 09-30-2020 William E. Lamie Initial Version 6.1 */
+/* 10-15-2021 William E. Lamie Modified comment(s), added */
+/* execution profile support, */
+/* resulting in version 6.1.9 */
+/* 04-25-2022 Zhen Kong Updated comments, */
+/* resulting in version 6.1.11 */
+/* */
+/**************************************************************************/
.global _tx_thread_context_save
.type _tx_thread_context_save,function
_tx_thread_context_save:
-@
-@ /* Upon entry to this routine, it is assumed that IRQ interrupts are locked
-@ out, we are in IRQ mode, and all registers are intact. */
-@
-@ /* Check for a nested interrupt condition. */
-@ if (_tx_thread_system_state++)
-@ {
-@
- STMDB sp!, {r0-r3} @ Save some working registers
+
+ /* Upon entry to this routine, it is assumed that IRQ interrupts are locked
+ out, we are in IRQ mode, and all registers are intact. */
+
+ /* Check for a nested interrupt condition. */
+
+ STMDB sp!, {r0-r3} // Save some working registers
#ifdef TX_ENABLE_FIQ_SUPPORT
- CPSID if @ Disable FIQ interrupts
+ CPSID if // Disable FIQ interrupts
#endif
- LDR r3, =_tx_thread_system_state @ Pickup address of system state variable
- LDR r2, [r3] @ Pickup system state
- CMP r2, #0 @ Is this the first interrupt?
- BEQ __tx_thread_not_nested_save @ Yes, not a nested context save
-@
-@ /* Nested interrupt condition. */
-@
- ADD r2, r2, #1 @ Increment the interrupt counter
- STR r2, [r3] @ Store it back in the variable
-@
-@ /* Save the rest of the scratch registers on the stack and return to the
-@ calling ISR. */
-@
- MRS r0, SPSR @ Pickup saved SPSR
- SUB lr, lr, #4 @ Adjust point of interrupt
- STMDB sp!, {r0, r10, r12, lr} @ Store other registers
-@
-@ /* Return to the ISR. */
-@
- MOV r10, #0 @ Clear stack limit
+ LDR r3, =_tx_thread_system_state // Pickup address of system state variable
+ LDR r2, [r3] // Pickup system state
+ CMP r2, #0 // Is this the first interrupt?
+ BEQ __tx_thread_not_nested_save // Yes, not a nested context save
+
+ /* Nested interrupt condition. */
+
+ ADD r2, r2, #1 // Increment the interrupt counter
+ STR r2, [r3] // Store it back in the variable
+
+ /* Save the rest of the scratch registers on the stack and return to the
+ calling ISR. */
+
+ MRS r0, SPSR // Pickup saved SPSR
+ SUB lr, lr, #4 // Adjust point of interrupt
+ STMDB sp!, {r0, r10, r12, lr} // Store other registers
+
+ /* Return to the ISR. */
+
+ MOV r10, #0 // Clear stack limit
#if (defined(TX_ENABLE_EXECUTION_CHANGE_NOTIFY) || defined(TX_EXECUTION_PROFILE_ENABLE))
-@
-@ /* Call the ISR enter function to indicate an ISR is executing. */
-@
- PUSH {lr} @ Save ISR lr
- BL _tx_execution_isr_enter @ Call the ISR enter function
- POP {lr} @ Recover ISR lr
+
+ /* Call the ISR enter function to indicate an ISR is executing. */
+
+ PUSH {lr} // Save ISR lr
+ BL _tx_execution_isr_enter // Call the ISR enter function
+ POP {lr} // Recover ISR lr
#endif
- B __tx_irq_processing_return @ Continue IRQ processing
-@
+ B __tx_irq_processing_return // Continue IRQ processing
+
__tx_thread_not_nested_save:
-@ }
-@
-@ /* Otherwise, not nested, check to see if a thread was running. */
-@ else if (_tx_thread_current_ptr)
-@ {
-@
- ADD r2, r2, #1 @ Increment the interrupt counter
- STR r2, [r3] @ Store it back in the variable
- LDR r1, =_tx_thread_current_ptr @ Pickup address of current thread ptr
- LDR r0, [r1] @ Pickup current thread pointer
- CMP r0, #0 @ Is it NULL?
- BEQ __tx_thread_idle_system_save @ If so, interrupt occurred in
- @ scheduling loop - nothing needs saving!
-@
-@ /* Save minimal context of interrupted thread. */
-@
- MRS r2, SPSR @ Pickup saved SPSR
- SUB lr, lr, #4 @ Adjust point of interrupt
- STMDB sp!, {r2, r10, r12, lr} @ Store other registers
-@
-@ /* Save the current stack pointer in the thread's control block. */
-@ _tx_thread_current_ptr -> tx_thread_stack_ptr = sp;
-@
-@ /* Switch to the system stack. */
-@ sp = _tx_thread_system_stack_ptr@
-@
- MOV r10, #0 @ Clear stack limit
+
+ /* Otherwise, not nested, check to see if a thread was running. */
+ ADD r2, r2, #1 // Increment the interrupt counter
+ STR r2, [r3] // Store it back in the variable
+ LDR r1, =_tx_thread_current_ptr // Pickup address of current thread ptr
+ LDR r0, [r1] // Pickup current thread pointer
+ CMP r0, #0 // Is it NULL?
+ BEQ __tx_thread_idle_system_save // If so, interrupt occurred in
+ // scheduling loop - nothing needs saving!
+
+ /* Save minimal context of interrupted thread. */
+
+ MRS r2, SPSR // Pickup saved SPSR
+ SUB lr, lr, #4 // Adjust point of interrupt
+ STMDB sp!, {r2, r10, r12, lr} // Store other registers
+
+ MOV r10, #0 // Clear stack limit
#if (defined(TX_ENABLE_EXECUTION_CHANGE_NOTIFY) || defined(TX_EXECUTION_PROFILE_ENABLE))
-@
-@ /* Call the ISR enter function to indicate an ISR is executing. */
-@
- PUSH {lr} @ Save ISR lr
- BL _tx_execution_isr_enter @ Call the ISR enter function
- POP {lr} @ Recover ISR lr
+
+ /* Call the ISR enter function to indicate an ISR is executing. */
+
+ PUSH {lr} // Save ISR lr
+ BL _tx_execution_isr_enter // Call the ISR enter function
+ POP {lr} // Recover ISR lr
#endif
- B __tx_irq_processing_return @ Continue IRQ processing
-@
-@ }
-@ else
-@ {
-@
+ B __tx_irq_processing_return // Continue IRQ processing
+
__tx_thread_idle_system_save:
-@
-@ /* Interrupt occurred in the scheduling loop. */
-@
-@ /* Not much to do here, just adjust the stack pointer, and return to IRQ
-@ processing. */
-@
- MOV r10, #0 @ Clear stack limit
+
+ /* Interrupt occurred in the scheduling loop. */
+
+ /* Not much to do here, just adjust the stack pointer, and return to IRQ
+ processing. */
+
+ MOV r10, #0 // Clear stack limit
#if (defined(TX_ENABLE_EXECUTION_CHANGE_NOTIFY) || defined(TX_EXECUTION_PROFILE_ENABLE))
-@
-@ /* Call the ISR enter function to indicate an ISR is executing. */
-@
- PUSH {lr} @ Save ISR lr
- BL _tx_execution_isr_enter @ Call the ISR enter function
- POP {lr} @ Recover ISR lr
+
+ /* Call the ISR enter function to indicate an ISR is executing. */
+
+ PUSH {lr} // Save ISR lr
+ BL _tx_execution_isr_enter // Call the ISR enter function
+ POP {lr} // Recover ISR lr
#endif
- ADD sp, sp, #16 @ Recover saved registers
- B __tx_irq_processing_return @ Continue IRQ processing
-@
-@ }
-@}
-
-
-
+ ADD sp, sp, #16 // Recover saved registers
+ B __tx_irq_processing_return // Continue IRQ processing
diff --git a/ports/cortex_a8/gnu/src/tx_thread_fiq_context_restore.S b/ports/cortex_a8/gnu/src/tx_thread_fiq_context_restore.S
index 87488b7e..006be973 100644
--- a/ports/cortex_a8/gnu/src/tx_thread_fiq_context_restore.S
+++ b/ports/cortex_a8/gnu/src/tx_thread_fiq_context_restore.S
@@ -1,43 +1,32 @@
-@/**************************************************************************/
-@/* */
-@/* Copyright (c) Microsoft Corporation. All rights reserved. */
-@/* */
-@/* This software is licensed under the Microsoft Software License */
-@/* Terms for Microsoft Azure RTOS. Full text of the license can be */
-@/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
-@/* and in the root directory of this software. */
-@/* */
-@/**************************************************************************/
-@
-@
-@/**************************************************************************/
-@/**************************************************************************/
-@/** */
-@/** ThreadX Component */
-@/** */
-@/** Thread */
-@/** */
-@/**************************************************************************/
-@/**************************************************************************/
-@
-@
-@#define TX_SOURCE_CODE
-@
-@
-@/* Include necessary system files. */
-@
-@#include "tx_api.h"
-@#include "tx_thread.h"
-@#include "tx_timer.h"
-@
-@
-SVC_MODE = 0xD3 @ SVC mode
-FIQ_MODE = 0xD1 @ FIQ mode
-MODE_MASK = 0x1F @ Mode mask
-THUMB_MASK = 0x20 @ Thumb bit mask
-IRQ_MODE_BITS = 0x12 @ IRQ mode bits
-@
-@
+/**************************************************************************/
+/* */
+/* Copyright (c) Microsoft Corporation. All rights reserved. */
+/* */
+/* This software is licensed under the Microsoft Software License */
+/* Terms for Microsoft Azure RTOS. Full text of the license can be */
+/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
+/* and in the root directory of this software. */
+/* */
+/**************************************************************************/
+
+
+/**************************************************************************/
+/**************************************************************************/
+/** */
+/** ThreadX Component */
+/** */
+/** Thread */
+/** */
+/**************************************************************************/
+/**************************************************************************/
+
+SVC_MODE = 0xD3 // SVC mode
+FIQ_MODE = 0xD1 // FIQ mode
+MODE_MASK = 0x1F // Mode mask
+THUMB_MASK = 0x20 // Thumb bit mask
+IRQ_MODE_BITS = 0x12 // IRQ mode bits
+
+
.global _tx_thread_system_state
.global _tx_thread_current_ptr
.global _tx_thread_system_stack_ptr
@@ -46,218 +35,189 @@ IRQ_MODE_BITS = 0x12 @ IRQ mode bits
.global _tx_thread_schedule
.global _tx_thread_preempt_disable
.global _tx_execution_isr_exit
-@
-@
-@/* No 16-bit Thumb mode veneer code is needed for _tx_thread_fiq_context_restore
-@ since it will never be called 16-bit mode. */
-@
+
+
+/* No 16-bit Thumb mode veneer code is needed for _tx_thread_fiq_context_restore
+ since it will never be called 16-bit mode. */
+
.arm
.text
.align 2
-@/**************************************************************************/
-@/* */
-@/* FUNCTION RELEASE */
-@/* */
-@/* _tx_thread_fiq_context_restore Cortex-A8/GNU */
-@/* 6.1.9 */
-@/* AUTHOR */
-@/* */
-@/* William E. Lamie, Microsoft Corporation */
-@/* */
-@/* DESCRIPTION */
-@/* */
-@/* This function restores the fiq interrupt context when processing a */
-@/* nested interrupt. If not, it returns to the interrupt thread if no */
-@/* preemption is necessary. Otherwise, if preemption is necessary or */
-@/* if no thread was running, the function returns to the scheduler. */
-@/* */
-@/* INPUT */
-@/* */
-@/* None */
-@/* */
-@/* OUTPUT */
-@/* */
-@/* None */
-@/* */
-@/* CALLS */
-@/* */
-@/* _tx_thread_schedule Thread scheduling routine */
-@/* */
-@/* CALLED BY */
-@/* */
-@/* FIQ ISR Interrupt Service Routines */
-@/* */
-@/* RELEASE HISTORY */
-@/* */
-@/* DATE NAME DESCRIPTION */
-@/* */
-@/* 09-30-2020 William E. Lamie Initial Version 6.1 */
-@/* 10-15-2021 William E. Lamie Modified comment(s), added */
-@/* execution profile support, */
-@/* resulting in version 6.1.9 */
-@/* */
-@/**************************************************************************/
-@VOID _tx_thread_fiq_context_restore(VOID)
-@{
+/**************************************************************************/
+/* */
+/* FUNCTION RELEASE */
+/* */
+/* _tx_thread_fiq_context_restore ARMv7-A */
+/* 6.1.11 */
+/* AUTHOR */
+/* */
+/* William E. Lamie, Microsoft Corporation */
+/* */
+/* DESCRIPTION */
+/* */
+/* This function restores the fiq interrupt context when processing a */
+/* nested interrupt. If not, it returns to the interrupt thread if no */
+/* preemption is necessary. Otherwise, if preemption is necessary or */
+/* if no thread was running, the function returns to the scheduler. */
+/* */
+/* INPUT */
+/* */
+/* None */
+/* */
+/* OUTPUT */
+/* */
+/* None */
+/* */
+/* CALLS */
+/* */
+/* _tx_thread_schedule Thread scheduling routine */
+/* */
+/* CALLED BY */
+/* */
+/* FIQ ISR Interrupt Service Routines */
+/* */
+/* RELEASE HISTORY */
+/* */
+/* DATE NAME DESCRIPTION */
+/* */
+/* 09-30-2020 William E. Lamie Initial Version 6.1 */
+/* 10-15-2021 William E. Lamie Modified comment(s), added */
+/* execution profile support, */
+/* resulting in version 6.1.9 */
+/* 04-25-2022 Zhen Kong Updated comments, */
+/* resulting in version 6.1.11 */
+/* */
+/**************************************************************************/
.global _tx_thread_fiq_context_restore
.type _tx_thread_fiq_context_restore,function
_tx_thread_fiq_context_restore:
-@
-@ /* Lockout interrupts. */
-@
- CPSID if @ Disable IRQ and FIQ interrupts
+
+ /* Lockout interrupts. */
+
+ CPSID if // Disable IRQ and FIQ interrupts
#if (defined(TX_ENABLE_EXECUTION_CHANGE_NOTIFY) || defined(TX_EXECUTION_PROFILE_ENABLE))
-@
-@ /* Call the ISR exit function to indicate an ISR is complete. */
-@
- BL _tx_execution_isr_exit @ Call the ISR exit function
+
+ /* Call the ISR exit function to indicate an ISR is complete. */
+
+ BL _tx_execution_isr_exit // Call the ISR exit function
#endif
-@
-@ /* Determine if interrupts are nested. */
-@ if (--_tx_thread_system_state)
-@ {
-@
- LDR r3, =_tx_thread_system_state @ Pickup address of system state variable
- LDR r2, [r3] @ Pickup system state
- SUB r2, r2, #1 @ Decrement the counter
- STR r2, [r3] @ Store the counter
- CMP r2, #0 @ Was this the first interrupt?
- BEQ __tx_thread_fiq_not_nested_restore @ If so, not a nested restore
-@
-@ /* Interrupts are nested. */
-@
-@ /* Just recover the saved registers and return to the point of
-@ interrupt. */
-@
- LDMIA sp!, {r0, r10, r12, lr} @ Recover SPSR, POI, and scratch regs
- MSR SPSR_cxsf, r0 @ Put SPSR back
- LDMIA sp!, {r0-r3} @ Recover r0-r3
- MOVS pc, lr @ Return to point of interrupt
-@
-@ }
+
+ /* Determine if interrupts are nested. */
+ LDR r3, =_tx_thread_system_state // Pickup address of system state variable
+ LDR r2, [r3] // Pickup system state
+ SUB r2, r2, #1 // Decrement the counter
+ STR r2, [r3] // Store the counter
+ CMP r2, #0 // Was this the first interrupt?
+ BEQ __tx_thread_fiq_not_nested_restore // If so, not a nested restore
+
+ /* Interrupts are nested. */
+
+ /* Just recover the saved registers and return to the point of
+ interrupt. */
+
+ LDMIA sp!, {r0, r10, r12, lr} // Recover SPSR, POI, and scratch regs
+ MSR SPSR_cxsf, r0 // Put SPSR back
+ LDMIA sp!, {r0-r3} // Recover r0-r3
+ MOVS pc, lr // Return to point of interrupt
+
__tx_thread_fiq_not_nested_restore:
-@
-@ /* Determine if a thread was interrupted and no preemption is required. */
-@ else if (((_tx_thread_current_ptr) && (_tx_thread_current_ptr == _tx_thread_execute_ptr)
-@ || (_tx_thread_preempt_disable))
-@ {
-@
- LDR r1, [sp] @ Pickup the saved SPSR
- MOV r2, #MODE_MASK @ Build mask to isolate the interrupted mode
- AND r1, r1, r2 @ Isolate mode bits
- CMP r1, #IRQ_MODE_BITS @ Was an interrupt taken in IRQ mode before we
- @ got to context save? */
- BEQ __tx_thread_fiq_no_preempt_restore @ Yes, just go back to point of interrupt
+
+ /* Determine if a thread was interrupted and no preemption is required. */
+
+ LDR r1, [sp] // Pickup the saved SPSR
+ MOV r2, #MODE_MASK // Build mask to isolate the interrupted mode
+ AND r1, r1, r2 // Isolate mode bits
+ CMP r1, #IRQ_MODE_BITS // Was an interrupt taken in IRQ mode before we
+ // got to context save? */
+ BEQ __tx_thread_fiq_no_preempt_restore // Yes, just go back to point of interrupt
- LDR r1, =_tx_thread_current_ptr @ Pickup address of current thread ptr
- LDR r0, [r1] @ Pickup actual current thread pointer
- CMP r0, #0 @ Is it NULL?
- BEQ __tx_thread_fiq_idle_system_restore @ Yes, idle system was interrupted
+ LDR r1, =_tx_thread_current_ptr // Pickup address of current thread ptr
+ LDR r0, [r1] // Pickup actual current thread pointer
+ CMP r0, #0 // Is it NULL?
+ BEQ __tx_thread_fiq_idle_system_restore // Yes, idle system was interrupted
- LDR r3, =_tx_thread_preempt_disable @ Pickup preempt disable address
- LDR r2, [r3] @ Pickup actual preempt disable flag
- CMP r2, #0 @ Is it set?
- BNE __tx_thread_fiq_no_preempt_restore @ Yes, don't preempt this thread
- LDR r3, =_tx_thread_execute_ptr @ Pickup address of execute thread ptr
- LDR r2, [r3] @ Pickup actual execute thread pointer
- CMP r0, r2 @ Is the same thread highest priority?
- BNE __tx_thread_fiq_preempt_restore @ No, preemption needs to happen
+ LDR r3, =_tx_thread_preempt_disable // Pickup preempt disable address
+ LDR r2, [r3] // Pickup actual preempt disable flag
+ CMP r2, #0 // Is it set?
+ BNE __tx_thread_fiq_no_preempt_restore // Yes, don't preempt this thread
+ LDR r3, =_tx_thread_execute_ptr // Pickup address of execute thread ptr
+ LDR r2, [r3] // Pickup actual execute thread pointer
+ CMP r0, r2 // Is the same thread highest priority?
+ BNE __tx_thread_fiq_preempt_restore // No, preemption needs to happen
__tx_thread_fiq_no_preempt_restore:
-@
-@ /* Restore interrupted thread or ISR. */
-@
-@ /* Pickup the saved stack pointer. */
-@ tmp_ptr = _tx_thread_current_ptr -> tx_thread_stack_ptr;
-@
-@ /* Recover the saved context and return to the point of interrupt. */
-@
- LDMIA sp!, {r0, lr} @ Recover SPSR, POI, and scratch regs
- MSR SPSR_cxsf, r0 @ Put SPSR back
- LDMIA sp!, {r0-r3} @ Recover r0-r3
- MOVS pc, lr @ Return to point of interrupt
-@
-@ }
-@ else
-@ {
-__tx_thread_fiq_preempt_restore:
-@
- LDMIA sp!, {r3, lr} @ Recover temporarily saved registers
- MOV r1, lr @ Save lr (point of interrupt)
- MOV r2, #SVC_MODE @ Build SVC mode CPSR
- MSR CPSR_c, r2 @ Enter SVC mode
- STR r1, [sp, #-4]! @ Save point of interrupt
- STMDB sp!, {r4-r12, lr} @ Save upper half of registers
- MOV r4, r3 @ Save SPSR in r4
- MOV r2, #FIQ_MODE @ Build FIQ mode CPSR
- MSR CPSR_c, r2 @ Reenter FIQ mode
- LDMIA sp!, {r0-r3} @ Recover r0-r3
- MOV r5, #SVC_MODE @ Build SVC mode CPSR
- MSR CPSR_c, r5 @ Enter SVC mode
- STMDB sp!, {r0-r3} @ Save r0-r3 on thread's stack
- LDR r1, =_tx_thread_current_ptr @ Pickup address of current thread ptr
- LDR r0, [r1] @ Pickup current thread pointer
+ /* Restore interrupted thread or ISR. */
+ /* Recover the saved context and return to the point of interrupt. */
+
+ LDMIA sp!, {r0, lr} // Recover SPSR, POI, and scratch regs
+ MSR SPSR_cxsf, r0 // Put SPSR back
+ LDMIA sp!, {r0-r3} // Recover r0-r3
+ MOVS pc, lr // Return to point of interrupt
+
+__tx_thread_fiq_preempt_restore:
+
+ LDMIA sp!, {r3, lr} // Recover temporarily saved registers
+ MOV r1, lr // Save lr (point of interrupt)
+ MOV r2, #SVC_MODE // Build SVC mode CPSR
+ MSR CPSR_c, r2 // Enter SVC mode
+ STR r1, [sp, #-4]! // Save point of interrupt
+ STMDB sp!, {r4-r12, lr} // Save upper half of registers
+ MOV r4, r3 // Save SPSR in r4
+ MOV r2, #FIQ_MODE // Build FIQ mode CPSR
+ MSR CPSR_c, r2 // Reenter FIQ mode
+ LDMIA sp!, {r0-r3} // Recover r0-r3
+ MOV r5, #SVC_MODE // Build SVC mode CPSR
+ MSR CPSR_c, r5 // Enter SVC mode
+ STMDB sp!, {r0-r3} // Save r0-r3 on thread's stack
+
+ LDR r1, =_tx_thread_current_ptr // Pickup address of current thread ptr
+ LDR r0, [r1] // Pickup current thread pointer
#ifdef TX_ENABLE_VFP_SUPPORT
- LDR r2, [r0, #144] @ Pickup the VFP enabled flag
- CMP r2, #0 @ Is the VFP enabled?
- BEQ _tx_skip_fiq_vfp_save @ No, skip VFP IRQ save
- VMRS r2, FPSCR @ Pickup the FPSCR
- STR r2, [sp, #-4]! @ Save FPSCR
- VSTMDB sp!, {D16-D31} @ Save D16-D31
- VSTMDB sp!, {D0-D15} @ Save D0-D15
+ LDR r2, [r0, #144] // Pickup the VFP enabled flag
+ CMP r2, #0 // Is the VFP enabled?
+ BEQ _tx_skip_fiq_vfp_save // No, skip VFP IRQ save
+ VMRS r2, FPSCR // Pickup the FPSCR
+ STR r2, [sp, #-4]! // Save FPSCR
+ VSTMDB sp!, {D16-D31} // Save D16-D31
+ VSTMDB sp!, {D0-D15} // Save D0-D15
_tx_skip_fiq_vfp_save:
#endif
- MOV r3, #1 @ Build interrupt stack type
- STMDB sp!, {r3, r4} @ Save interrupt stack type and SPSR
- STR sp, [r0, #8] @ Save stack pointer in thread control
- @ block */
-@
-@ /* Save the remaining time-slice and disable it. */
-@ if (_tx_timer_time_slice)
-@ {
-@
- LDR r3, =_tx_timer_time_slice @ Pickup time-slice variable address
- LDR r2, [r3] @ Pickup time-slice
- CMP r2, #0 @ Is it active?
- BEQ __tx_thread_fiq_dont_save_ts @ No, don't save it
-@
-@ _tx_thread_current_ptr -> tx_thread_time_slice = _tx_timer_time_slice;
-@ _tx_timer_time_slice = 0;
-@
- STR r2, [r0, #24] @ Save thread's time-slice
- MOV r2, #0 @ Clear value
- STR r2, [r3] @ Disable global time-slice flag
-@
-@ }
-__tx_thread_fiq_dont_save_ts:
-@
-@
-@ /* Clear the current task pointer. */
-@ _tx_thread_current_ptr = TX_NULL;
-@
- MOV r0, #0 @ NULL value
- STR r0, [r1] @ Clear current thread pointer
-@
-@ /* Return to the scheduler. */
-@ _tx_thread_schedule();
-@
- B _tx_thread_schedule @ Return to scheduler
-@ }
-@
-__tx_thread_fiq_idle_system_restore:
-@
-@ /* Just return back to the scheduler! */
-@
- ADD sp, sp, #24 @ Recover FIQ stack space
- MOV r3, #SVC_MODE @ Build SVC mode CPSR
- MSR CPSR_c, r3 @ Lockout interrupts
- B _tx_thread_schedule @ Return to scheduler
-@
-@}
+ MOV r3, #1 // Build interrupt stack type
+ STMDB sp!, {r3, r4} // Save interrupt stack type and SPSR
+ STR sp, [r0, #8] // Save stack pointer in thread control
+ // block */
+ LDR r3, =_tx_timer_time_slice // Pickup time-slice variable address
+ LDR r2, [r3] // Pickup time-slice
+ CMP r2, #0 // Is it active?
+ BEQ __tx_thread_fiq_dont_save_ts // No, don't save it
+
+ STR r2, [r0, #24] // Save thread's time-slice
+ MOV r2, #0 // Clear value
+ STR r2, [r3] // Disable global time-slice flag
+
+__tx_thread_fiq_dont_save_ts:
+
+ /* Clear the current task pointer. */
+
+ MOV r0, #0 // NULL value
+ STR r0, [r1] // Clear current thread pointer
+
+ /* Return to the scheduler. */
+
+ B _tx_thread_schedule // Return to scheduler
+
+__tx_thread_fiq_idle_system_restore:
+
+ /* Just return back to the scheduler! */
+
+ ADD sp, sp, #24 // Recover FIQ stack space
+ MOV r3, #SVC_MODE // Build SVC mode CPSR
+ MSR CPSR_c, r3 // Lockout interrupts
+ B _tx_thread_schedule // Return to scheduler
diff --git a/ports/cortex_a8/gnu/src/tx_thread_fiq_context_save.S b/ports/cortex_a8/gnu/src/tx_thread_fiq_context_save.S
index a0820138..7db6a4c2 100644
--- a/ports/cortex_a8/gnu/src/tx_thread_fiq_context_save.S
+++ b/ports/cortex_a8/gnu/src/tx_thread_fiq_context_save.S
@@ -1,207 +1,178 @@
-@/**************************************************************************/
-@/* */
-@/* Copyright (c) Microsoft Corporation. All rights reserved. */
-@/* */
-@/* This software is licensed under the Microsoft Software License */
-@/* Terms for Microsoft Azure RTOS. Full text of the license can be */
-@/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
-@/* and in the root directory of this software. */
-@/* */
-@/**************************************************************************/
-@
-@
-@/**************************************************************************/
-@/**************************************************************************/
-@/** */
-@/** ThreadX Component */
-@/** */
-@/** Thread */
-@/** */
-@/**************************************************************************/
-@/**************************************************************************/
-@
-@
-@#define TX_SOURCE_CODE
-@
-@
-@/* Include necessary system files. */
-@
-@#include "tx_api.h"
-@#include "tx_thread.h"
-@
-@
+/**************************************************************************/
+/* */
+/* Copyright (c) Microsoft Corporation. All rights reserved. */
+/* */
+/* This software is licensed under the Microsoft Software License */
+/* Terms for Microsoft Azure RTOS. Full text of the license can be */
+/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
+/* and in the root directory of this software. */
+/* */
+/**************************************************************************/
+
+
+/**************************************************************************/
+/**************************************************************************/
+/** */
+/** ThreadX Component */
+/** */
+/** Thread */
+/** */
+/**************************************************************************/
+/**************************************************************************/
+
.global _tx_thread_system_state
.global _tx_thread_current_ptr
.global __tx_fiq_processing_return
.global _tx_execution_isr_enter
-@
-@
-@/* No 16-bit Thumb mode veneer code is needed for _tx_thread_fiq_context_save
-@ since it will never be called 16-bit mode. */
-@
+
+
+/* No 16-bit Thumb mode veneer code is needed for _tx_thread_fiq_context_save
+ since it will never be called 16-bit mode. */
+
.arm
.text
.align 2
-@/**************************************************************************/
-@/* */
-@/* FUNCTION RELEASE */
-@/* */
-@/* _tx_thread_fiq_context_save Cortex-A8/GNU */
-@/* 6.1.9 */
-@/* AUTHOR */
-@/* */
-@/* William E. Lamie, Microsoft Corporation */
-@/* */
-@/* DESCRIPTION */
-@/* */
-@/* This function saves the context of an executing thread in the */
-@/* beginning of interrupt processing. The function also ensures that */
-@/* the system stack is used upon return to the calling ISR. */
-@/* */
-@/* INPUT */
-@/* */
-@/* None */
-@/* */
-@/* OUTPUT */
-@/* */
-@/* None */
-@/* */
-@/* CALLS */
-@/* */
-@/* None */
-@/* */
-@/* CALLED BY */
-@/* */
-@/* ISRs */
-@/* */
-@/* RELEASE HISTORY */
-@/* */
-@/* DATE NAME DESCRIPTION */
-@/* */
-@/* 09-30-2020 William E. Lamie Initial Version 6.1 */
-@/* 10-15-2021 William E. Lamie Modified comment(s), added */
-@/* execution profile support, */
-@/* resulting in version 6.1.9 */
-@/* */
-@/**************************************************************************/
-@ VOID _tx_thread_fiq_context_save(VOID)
-@{
+/**************************************************************************/
+/* */
+/* FUNCTION RELEASE */
+/* */
+/* _tx_thread_fiq_context_save ARMv7-A */
+/* 6.1.11 */
+/* AUTHOR */
+/* */
+/* William E. Lamie, Microsoft Corporation */
+/* */
+/* DESCRIPTION */
+/* */
+/* This function saves the context of an executing thread in the */
+/* beginning of interrupt processing. The function also ensures that */
+/* the system stack is used upon return to the calling ISR. */
+/* */
+/* INPUT */
+/* */
+/* None */
+/* */
+/* OUTPUT */
+/* */
+/* None */
+/* */
+/* CALLS */
+/* */
+/* None */
+/* */
+/* CALLED BY */
+/* */
+/* ISRs */
+/* */
+/* RELEASE HISTORY */
+/* */
+/* DATE NAME DESCRIPTION */
+/* */
+/* 09-30-2020 William E. Lamie Initial Version 6.1 */
+/* 10-15-2021 William E. Lamie Modified comment(s), added */
+/* execution profile support, */
+/* resulting in version 6.1.9 */
+/* 04-25-2022 Zhen Kong Updated comments, */
+/* resulting in version 6.1.11 */
+/* */
+/**************************************************************************/
.global _tx_thread_fiq_context_save
.type _tx_thread_fiq_context_save,function
_tx_thread_fiq_context_save:
-@
-@ /* Upon entry to this routine, it is assumed that IRQ interrupts are locked
-@ out, we are in IRQ mode, and all registers are intact. */
-@
-@ /* Check for a nested interrupt condition. */
-@ if (_tx_thread_system_state++)
-@ {
-@
- STMDB sp!, {r0-r3} @ Save some working registers
- LDR r3, =_tx_thread_system_state @ Pickup address of system state variable
- LDR r2, [r3] @ Pickup system state
- CMP r2, #0 @ Is this the first interrupt?
- BEQ __tx_thread_fiq_not_nested_save @ Yes, not a nested context save
-@
-@ /* Nested interrupt condition. */
-@
- ADD r2, r2, #1 @ Increment the interrupt counter
- STR r2, [r3] @ Store it back in the variable
-@
-@ /* Save the rest of the scratch registers on the stack and return to the
-@ calling ISR. */
-@
- MRS r0, SPSR @ Pickup saved SPSR
- SUB lr, lr, #4 @ Adjust point of interrupt
- STMDB sp!, {r0, r10, r12, lr} @ Store other registers
-@
-@ /* Return to the ISR. */
-@
- MOV r10, #0 @ Clear stack limit
+
+ /* Upon entry to this routine, it is assumed that IRQ interrupts are locked
+ out, we are in IRQ mode, and all registers are intact. */
+
+ /* Check for a nested interrupt condition. */
+
+ STMDB sp!, {r0-r3} // Save some working registers
+ LDR r3, =_tx_thread_system_state // Pickup address of system state variable
+ LDR r2, [r3] // Pickup system state
+ CMP r2, #0 // Is this the first interrupt?
+ BEQ __tx_thread_fiq_not_nested_save // Yes, not a nested context save
+
+ /* Nested interrupt condition. */
+
+ ADD r2, r2, #1 // Increment the interrupt counter
+ STR r2, [r3] // Store it back in the variable
+
+ /* Save the rest of the scratch registers on the stack and return to the
+ calling ISR. */
+
+ MRS r0, SPSR // Pickup saved SPSR
+ SUB lr, lr, #4 // Adjust point of interrupt
+ STMDB sp!, {r0, r10, r12, lr} // Store other registers
+
+ /* Return to the ISR. */
+
+ MOV r10, #0 // Clear stack limit
#if (defined(TX_ENABLE_EXECUTION_CHANGE_NOTIFY) || defined(TX_EXECUTION_PROFILE_ENABLE))
-@
-@ /* Call the ISR enter function to indicate an ISR is executing. */
-@
- PUSH {lr} @ Save ISR lr
- BL _tx_execution_isr_enter @ Call the ISR enter function
- POP {lr} @ Recover ISR lr
+
+ /* Call the ISR enter function to indicate an ISR is executing. */
+
+ PUSH {lr} // Save ISR lr
+ BL _tx_execution_isr_enter // Call the ISR enter function
+ POP {lr} // Recover ISR lr
#endif
- B __tx_fiq_processing_return @ Continue FIQ processing
-@
+ B __tx_fiq_processing_return // Continue FIQ processing
+//
__tx_thread_fiq_not_nested_save:
-@ }
-@
-@ /* Otherwise, not nested, check to see if a thread was running. */
-@ else if (_tx_thread_current_ptr)
-@ {
-@
- ADD r2, r2, #1 @ Increment the interrupt counter
- STR r2, [r3] @ Store it back in the variable
- LDR r1, =_tx_thread_current_ptr @ Pickup address of current thread ptr
- LDR r0, [r1] @ Pickup current thread pointer
- CMP r0, #0 @ Is it NULL?
- BEQ __tx_thread_fiq_idle_system_save @ If so, interrupt occurred in
-@ @ scheduling loop - nothing needs saving!
-@
-@ /* Save minimal context of interrupted thread. */
-@
- MRS r2, SPSR @ Pickup saved SPSR
- SUB lr, lr, #4 @ Adjust point of interrupt
- STMDB sp!, {r2, lr} @ Store other registers, Note that we don't
-@ @ need to save sl and ip since FIQ has
-@ @ copies of these registers. Nested
-@ @ interrupt processing does need to save
-@ @ these registers.
-@
-@ /* Save the current stack pointer in the thread's control block. */
-@ _tx_thread_current_ptr -> tx_thread_stack_ptr = sp;
-@
-@ /* Switch to the system stack. */
-@ sp = _tx_thread_system_stack_ptr;
-@
- MOV r10, #0 @ Clear stack limit
+
+ /* Otherwise, not nested, check to see if a thread was running. */
+
+ ADD r2, r2, #1 // Increment the interrupt counter
+ STR r2, [r3] // Store it back in the variable
+ LDR r1, =_tx_thread_current_ptr // Pickup address of current thread ptr
+ LDR r0, [r1] // Pickup current thread pointer
+ CMP r0, #0 // Is it NULL?
+ BEQ __tx_thread_fiq_idle_system_save // If so, interrupt occurred in
+ // scheduling loop - nothing needs saving!
+
+ /* Save minimal context of interrupted thread. */
+
+ MRS r2, SPSR // Pickup saved SPSR
+ SUB lr, lr, #4 // Adjust point of interrupt
+ STMDB sp!, {r2, lr} // Store other registers, Note that we don't
+ // need to save sl and ip since FIQ has
+ // copies of these registers. Nested
+ // interrupt processing does need to save
+ // these registers.
+
+ MOV r10, #0 // Clear stack limit
#if (defined(TX_ENABLE_EXECUTION_CHANGE_NOTIFY) || defined(TX_EXECUTION_PROFILE_ENABLE))
-@
-@ /* Call the ISR enter function to indicate an ISR is executing. */
-@
- PUSH {lr} @ Save ISR lr
- BL _tx_execution_isr_enter @ Call the ISR enter function
- POP {lr} @ Recover ISR lr
+
+ /* Call the ISR enter function to indicate an ISR is executing. */
+
+ PUSH {lr} // Save ISR lr
+ BL _tx_execution_isr_enter // Call the ISR enter function
+ POP {lr} // Recover ISR lr
#endif
- B __tx_fiq_processing_return @ Continue FIQ processing
-@
-@ }
-@ else
-@ {
-@
+ B __tx_fiq_processing_return // Continue FIQ processing
+
__tx_thread_fiq_idle_system_save:
-@
-@ /* Interrupt occurred in the scheduling loop. */
-@
-#if (defined(TX_ENABLE_EXECUTION_CHANGE_NOTIFY) || defined(TX_EXECUTION_PROFILE_ENABLE))
-@
-@ /* Call the ISR enter function to indicate an ISR is executing. */
-@
- PUSH {lr} @ Save ISR lr
- BL _tx_execution_isr_enter @ Call the ISR enter function
- POP {lr} @ Recover ISR lr
-#endif
-@
-@ /* Not much to do here, save the current SPSR and LR for possible
-@ use in IRQ interrupted in idle system conditions, and return to
-@ FIQ interrupt processing. */
-@
- MRS r0, SPSR @ Pickup saved SPSR
- SUB lr, lr, #4 @ Adjust point of interrupt
- STMDB sp!, {r0, lr} @ Store other registers that will get used
-@ @ or stripped off the stack in context
-@ @ restore
- B __tx_fiq_processing_return @ Continue FIQ processing
-@
-@ }
-@}
+ /* Interrupt occurred in the scheduling loop. */
+
+#if (defined(TX_ENABLE_EXECUTION_CHANGE_NOTIFY) || defined(TX_EXECUTION_PROFILE_ENABLE))
+
+ /* Call the ISR enter function to indicate an ISR is executing. */
+
+ PUSH {lr} // Save ISR lr
+ BL _tx_execution_isr_enter // Call the ISR enter function
+ POP {lr} // Recover ISR lr
+#endif
+
+ /* Not much to do here, save the current SPSR and LR for possible
+ use in IRQ interrupted in idle system conditions, and return to
+ FIQ interrupt processing. */
+
+ MRS r0, SPSR // Pickup saved SPSR
+ SUB lr, lr, #4 // Adjust point of interrupt
+ STMDB sp!, {r0, lr} // Store other registers that will get used
+ // or stripped off the stack in context
+ // restore
+ B __tx_fiq_processing_return // Continue FIQ processing
diff --git a/ports/cortex_a8/gnu/src/tx_thread_fiq_nesting_end.S b/ports/cortex_a8/gnu/src/tx_thread_fiq_nesting_end.S
index 5667d51c..b34d881e 100644
--- a/ports/cortex_a8/gnu/src/tx_thread_fiq_nesting_end.S
+++ b/ports/cortex_a8/gnu/src/tx_thread_fiq_nesting_end.S
@@ -1,116 +1,104 @@
-@/**************************************************************************/
-@/* */
-@/* Copyright (c) Microsoft Corporation. All rights reserved. */
-@/* */
-@/* This software is licensed under the Microsoft Software License */
-@/* Terms for Microsoft Azure RTOS. Full text of the license can be */
-@/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
-@/* and in the root directory of this software. */
-@/* */
-@/**************************************************************************/
-@
-@
-@/**************************************************************************/
-@/**************************************************************************/
-@/** */
-@/** ThreadX Component */
-@/** */
-@/** Thread */
-@/** */
-@/**************************************************************************/
-@/**************************************************************************/
-@
-@
-@#define TX_SOURCE_CODE
-@
-@
-@/* Include necessary system files. */
-@
-@#include "tx_api.h"
-@#include "tx_thread.h"
-@
-@
+/**************************************************************************/
+/* */
+/* Copyright (c) Microsoft Corporation. All rights reserved. */
+/* */
+/* This software is licensed under the Microsoft Software License */
+/* Terms for Microsoft Azure RTOS. Full text of the license can be */
+/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
+/* and in the root directory of this software. */
+/* */
+/**************************************************************************/
+
+
+/**************************************************************************/
+/**************************************************************************/
+/** */
+/** ThreadX Component */
+/** */
+/** Thread */
+/** */
+/**************************************************************************/
+/**************************************************************************/
+
#ifdef TX_ENABLE_FIQ_SUPPORT
-DISABLE_INTS = 0xC0 @ Disable IRQ/FIQ interrupts
+DISABLE_INTS = 0xC0 // Disable IRQ/FIQ interrupts
#else
-DISABLE_INTS = 0x80 @ Disable IRQ interrupts
+DISABLE_INTS = 0x80 // Disable IRQ interrupts
#endif
-MODE_MASK = 0x1F @ Mode mask
-FIQ_MODE_BITS = 0x11 @ FIQ mode bits
-@
-@
-@/* No 16-bit Thumb mode veneer code is needed for _tx_thread_fiq_nesting_end
-@ since it will never be called 16-bit mode. */
-@
+MODE_MASK = 0x1F // Mode mask
+FIQ_MODE_BITS = 0x11 // FIQ mode bits
+
+
+/* No 16-bit Thumb mode veneer code is needed for _tx_thread_fiq_nesting_end
+ since it will never be called 16-bit mode. */
+
.arm
.text
.align 2
-@/**************************************************************************/
-@/* */
-@/* FUNCTION RELEASE */
-@/* */
-@/* _tx_thread_fiq_nesting_end Cortex-A8/GNU */
-@/* 6.1 */
-@/* AUTHOR */
-@/* */
-@/* William E. Lamie, Microsoft Corporation */
-@/* */
-@/* DESCRIPTION */
-@/* */
-@/* This function is called by the application from FIQ mode after */
-@/* _tx_thread_fiq_nesting_start has been called and switches the FIQ */
-@/* processing from system mode back to FIQ mode prior to the ISR */
-@/* calling _tx_thread_fiq_context_restore. Note that this function */
-@/* assumes the system stack pointer is in the same position after */
-@/* nesting start function was called. */
-@/* */
-@/* This function assumes that the system mode stack pointer was setup */
-@/* during low-level initialization (tx_initialize_low_level.s). */
-@/* */
-@/* This function returns with FIQ interrupts disabled. */
-@/* */
-@/* INPUT */
-@/* */
-@/* None */
-@/* */
-@/* OUTPUT */
-@/* */
-@/* None */
-@/* */
-@/* CALLS */
-@/* */
-@/* None */
-@/* */
-@/* CALLED BY */
-@/* */
-@/* ISRs */
-@/* */
-@/* RELEASE HISTORY */
-@/* */
-@/* DATE NAME DESCRIPTION */
-@/* */
-@/* 09-30-2020 William E. Lamie Initial Version 6.1 */
-@/* */
-@/**************************************************************************/
-@VOID _tx_thread_fiq_nesting_end(VOID)
-@{
+/**************************************************************************/
+/* */
+/* FUNCTION RELEASE */
+/* */
+/* _tx_thread_fiq_nesting_end ARMv7-A */
+/* 6.1.11 */
+/* AUTHOR */
+/* */
+/* William E. Lamie, Microsoft Corporation */
+/* */
+/* DESCRIPTION */
+/* */
+/* This function is called by the application from FIQ mode after */
+/* _tx_thread_fiq_nesting_start has been called and switches the FIQ */
+/* processing from system mode back to FIQ mode prior to the ISR */
+/* calling _tx_thread_fiq_context_restore. Note that this function */
+/* assumes the system stack pointer is in the same position after */
+/* nesting start function was called. */
+/* */
+/* This function assumes that the system mode stack pointer was setup */
+/* during low-level initialization (tx_initialize_low_level.s). */
+/* */
+/* This function returns with FIQ interrupts disabled. */
+/* */
+/* INPUT */
+/* */
+/* None */
+/* */
+/* OUTPUT */
+/* */
+/* None */
+/* */
+/* CALLS */
+/* */
+/* None */
+/* */
+/* CALLED BY */
+/* */
+/* ISRs */
+/* */
+/* RELEASE HISTORY */
+/* */
+/* DATE NAME DESCRIPTION */
+/* */
+/* 09-30-2020 William E. Lamie Initial Version 6.1 */
+/* 04-25-2022 Zhen Kong Updated comments, */
+/* resulting in version 6.1.11 */
+/* */
+/**************************************************************************/
.global _tx_thread_fiq_nesting_end
.type _tx_thread_fiq_nesting_end,function
_tx_thread_fiq_nesting_end:
- MOV r3,lr @ Save ISR return address
- MRS r0, CPSR @ Pickup the CPSR
- ORR r0, r0, #DISABLE_INTS @ Build disable interrupt value
- MSR CPSR_c, r0 @ Disable interrupts
- LDMIA sp!, {r1, lr} @ Pickup saved lr (and r1 throw-away for
- @ 8-byte alignment logic)
- BIC r0, r0, #MODE_MASK @ Clear mode bits
- ORR r0, r0, #FIQ_MODE_BITS @ Build IRQ mode CPSR
- MSR CPSR_c, r0 @ Reenter IRQ mode
+ MOV r3,lr // Save ISR return address
+ MRS r0, CPSR // Pickup the CPSR
+ ORR r0, r0, #DISABLE_INTS // Build disable interrupt value
+ MSR CPSR_c, r0 // Disable interrupts
+ LDMIA sp!, {r1, lr} // Pickup saved lr (and r1 throw-away for
+ // 8-byte alignment logic)
+ BIC r0, r0, #MODE_MASK // Clear mode bits
+ ORR r0, r0, #FIQ_MODE_BITS // Build IRQ mode CPSR
+ MSR CPSR_c, r0 // Reenter IRQ mode
#ifdef __THUMB_INTERWORK
- BX r3 @ Return to caller
+ BX r3 // Return to caller
#else
- MOV pc, r3 @ Return to caller
+ MOV pc, r3 // Return to caller
#endif
-@}
-
diff --git a/ports/cortex_a8/gnu/src/tx_thread_fiq_nesting_start.S b/ports/cortex_a8/gnu/src/tx_thread_fiq_nesting_start.S
index ddda6089..c9cd5a06 100644
--- a/ports/cortex_a8/gnu/src/tx_thread_fiq_nesting_start.S
+++ b/ports/cortex_a8/gnu/src/tx_thread_fiq_nesting_start.S
@@ -1,108 +1,96 @@
-@/**************************************************************************/
-@/* */
-@/* Copyright (c) Microsoft Corporation. All rights reserved. */
-@/* */
-@/* This software is licensed under the Microsoft Software License */
-@/* Terms for Microsoft Azure RTOS. Full text of the license can be */
-@/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
-@/* and in the root directory of this software. */
-@/* */
-@/**************************************************************************/
-@
-@
-@/**************************************************************************/
-@/**************************************************************************/
-@/** */
-@/** ThreadX Component */
-@/** */
-@/** Thread */
-@/** */
-@/**************************************************************************/
-@/**************************************************************************/
-@
-@
-@#define TX_SOURCE_CODE
-@
-@
-@/* Include necessary system files. */
-@
-@#include "tx_api.h"
-@#include "tx_thread.h"
-@
-@
-FIQ_DISABLE = 0x40 @ FIQ disable bit
-MODE_MASK = 0x1F @ Mode mask
-SYS_MODE_BITS = 0x1F @ System mode bits
-@
-@
-@/* No 16-bit Thumb mode veneer code is needed for _tx_thread_fiq_nesting_start
-@ since it will never be called 16-bit mode. */
-@
+/**************************************************************************/
+/* */
+/* Copyright (c) Microsoft Corporation. All rights reserved. */
+/* */
+/* This software is licensed under the Microsoft Software License */
+/* Terms for Microsoft Azure RTOS. Full text of the license can be */
+/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
+/* and in the root directory of this software. */
+/* */
+/**************************************************************************/
+
+
+/**************************************************************************/
+/**************************************************************************/
+/** */
+/** ThreadX Component */
+/** */
+/** Thread */
+/** */
+/**************************************************************************/
+/**************************************************************************/
+
+FIQ_DISABLE = 0x40 // FIQ disable bit
+MODE_MASK = 0x1F // Mode mask
+SYS_MODE_BITS = 0x1F // System mode bits
+
+
+/* No 16-bit Thumb mode veneer code is needed for _tx_thread_fiq_nesting_start
+ since it will never be called 16-bit mode. */
+
.arm
.text
.align 2
-@/**************************************************************************/
-@/* */
-@/* FUNCTION RELEASE */
-@/* */
-@/* _tx_thread_fiq_nesting_start Cortex-A8/GNU */
-@/* 6.1 */
-@/* AUTHOR */
-@/* */
-@/* William E. Lamie, Microsoft Corporation */
-@/* */
-@/* DESCRIPTION */
-@/* */
-@/* This function is called by the application from FIQ mode after */
-@/* _tx_thread_fiq_context_save has been called and switches the FIQ */
-@/* processing to the system mode so nested FIQ interrupt processing */
-@/* is possible (system mode has its own "lr" register). Note that */
-@/* this function assumes that the system mode stack pointer was setup */
-@/* during low-level initialization (tx_initialize_low_level.s). */
-@/* */
-@/* This function returns with FIQ interrupts enabled. */
-@/* */
-@/* INPUT */
-@/* */
-@/* None */
-@/* */
-@/* OUTPUT */
-@/* */
-@/* None */
-@/* */
-@/* CALLS */
-@/* */
-@/* None */
-@/* */
-@/* CALLED BY */
-@/* */
-@/* ISRs */
-@/* */
-@/* RELEASE HISTORY */
-@/* */
-@/* DATE NAME DESCRIPTION */
-@/* */
-@/* 09-30-2020 William E. Lamie Initial Version 6.1 */
-@/* */
-@/**************************************************************************/
-@VOID _tx_thread_fiq_nesting_start(VOID)
-@{
+/**************************************************************************/
+/* */
+/* FUNCTION RELEASE */
+/* */
+/* _tx_thread_fiq_nesting_start ARMv7-A */
+/* 6.1.11 */
+/* AUTHOR */
+/* */
+/* William E. Lamie, Microsoft Corporation */
+/* */
+/* DESCRIPTION */
+/* */
+/* This function is called by the application from FIQ mode after */
+/* _tx_thread_fiq_context_save has been called and switches the FIQ */
+/* processing to the system mode so nested FIQ interrupt processing */
+/* is possible (system mode has its own "lr" register). Note that */
+/* this function assumes that the system mode stack pointer was setup */
+/* during low-level initialization (tx_initialize_low_level.s). */
+/* */
+/* This function returns with FIQ interrupts enabled. */
+/* */
+/* INPUT */
+/* */
+/* None */
+/* */
+/* OUTPUT */
+/* */
+/* None */
+/* */
+/* CALLS */
+/* */
+/* None */
+/* */
+/* CALLED BY */
+/* */
+/* ISRs */
+/* */
+/* RELEASE HISTORY */
+/* */
+/* DATE NAME DESCRIPTION */
+/* */
+/* 09-30-2020 William E. Lamie Initial Version 6.1 */
+/* 04-25-2022 Zhen Kong Updated comments, */
+/* resulting in version 6.1.11 */
+/* */
+/**************************************************************************/
.global _tx_thread_fiq_nesting_start
.type _tx_thread_fiq_nesting_start,function
_tx_thread_fiq_nesting_start:
- MOV r3,lr @ Save ISR return address
- MRS r0, CPSR @ Pickup the CPSR
- BIC r0, r0, #MODE_MASK @ Clear the mode bits
- ORR r0, r0, #SYS_MODE_BITS @ Build system mode CPSR
- MSR CPSR_c, r0 @ Enter system mode
- STMDB sp!, {r1, lr} @ Push the system mode lr on the system mode stack
- @ and push r1 just to keep 8-byte alignment
- BIC r0, r0, #FIQ_DISABLE @ Build enable FIQ CPSR
- MSR CPSR_c, r0 @ Enter system mode
+ MOV r3,lr // Save ISR return address
+ MRS r0, CPSR // Pickup the CPSR
+ BIC r0, r0, #MODE_MASK // Clear the mode bits
+ ORR r0, r0, #SYS_MODE_BITS // Build system mode CPSR
+ MSR CPSR_c, r0 // Enter system mode
+ STMDB sp!, {r1, lr} // Push the system mode lr on the system mode stack
+ // and push r1 just to keep 8-byte alignment
+ BIC r0, r0, #FIQ_DISABLE // Build enable FIQ CPSR
+ MSR CPSR_c, r0 // Enter system mode
#ifdef __THUMB_INTERWORK
- BX r3 @ Return to caller
+ BX r3 // Return to caller
#else
- MOV pc, r3 @ Return to caller
+ MOV pc, r3 // Return to caller
#endif
-@}
-
diff --git a/ports/cortex_a8/gnu/src/tx_thread_interrupt_control.S b/ports/cortex_a8/gnu/src/tx_thread_interrupt_control.S
index 7f3c0f78..63b1609a 100644
--- a/ports/cortex_a8/gnu/src/tx_thread_interrupt_control.S
+++ b/ports/cortex_a8/gnu/src/tx_thread_interrupt_control.S
@@ -1,115 +1,104 @@
-@/**************************************************************************/
-@/* */
-@/* Copyright (c) Microsoft Corporation. All rights reserved. */
-@/* */
-@/* This software is licensed under the Microsoft Software License */
-@/* Terms for Microsoft Azure RTOS. Full text of the license can be */
-@/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
-@/* and in the root directory of this software. */
-@/* */
-@/**************************************************************************/
-@
-@
-@/**************************************************************************/
-@/**************************************************************************/
-@/** */
-@/** ThreadX Component */
-@/** */
-@/** Thread */
-@/** */
-@/**************************************************************************/
-@/**************************************************************************/
-@
-@#define TX_SOURCE_CODE
-@
-@
-@/* Include necessary system files. */
-@
-@#include "tx_api.h"
-@#include "tx_thread.h" */
-@
+/**************************************************************************/
+/* */
+/* Copyright (c) Microsoft Corporation. All rights reserved. */
+/* */
+/* This software is licensed under the Microsoft Software License */
+/* Terms for Microsoft Azure RTOS. Full text of the license can be */
+/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
+/* and in the root directory of this software. */
+/* */
+/**************************************************************************/
+
+
+/**************************************************************************/
+/**************************************************************************/
+/** */
+/** ThreadX Component */
+/** */
+/** Thread */
+/** */
+/**************************************************************************/
+/**************************************************************************/
INT_MASK = 0x03F
-@
-@/* Define the 16-bit Thumb mode veneer for _tx_thread_interrupt_control for
-@ applications calling this function from to 16-bit Thumb mode. */
-@
+
+/* Define the 16-bit Thumb mode veneer for _tx_thread_interrupt_control for
+ applications calling this function from to 16-bit Thumb mode. */
+
.text
.align 2
.global $_tx_thread_interrupt_control
$_tx_thread_interrupt_control:
.thumb
- BX pc @ Switch to 32-bit mode
- NOP @
+ BX pc // Switch to 32-bit mode
+ NOP //
.arm
- STMFD sp!, {lr} @ Save return address
- BL _tx_thread_interrupt_control @ Call _tx_thread_interrupt_control function
- LDMFD sp!, {lr} @ Recover saved return address
- BX lr @ Return to 16-bit caller
-@
-@
+ STMFD sp!, {lr} // Save return address
+ BL _tx_thread_interrupt_control // Call _tx_thread_interrupt_control function
+ LDMFD sp!, {lr} // Recover saved return address
+ BX lr // Return to 16-bit caller
+
+
.text
.align 2
-@/**************************************************************************/
-@/* */
-@/* FUNCTION RELEASE */
-@/* */
-@/* _tx_thread_interrupt_control Cortex-A8/GNU */
-@/* 6.1 */
-@/* AUTHOR */
-@/* */
-@/* William E. Lamie, Microsoft Corporation */
-@/* */
-@/* DESCRIPTION */
-@/* */
-@/* This function is responsible for changing the interrupt lockout */
-@/* posture of the system. */
-@/* */
-@/* INPUT */
-@/* */
-@/* new_posture New interrupt lockout posture */
-@/* */
-@/* OUTPUT */
-@/* */
-@/* old_posture Old interrupt lockout posture */
-@/* */
-@/* CALLS */
-@/* */
-@/* None */
-@/* */
-@/* CALLED BY */
-@/* */
-@/* Application Code */
-@/* */
-@/* RELEASE HISTORY */
-@/* */
-@/* DATE NAME DESCRIPTION */
-@/* */
-@/* 09-30-2020 William E. Lamie Initial Version 6.1 */
-@/* */
-@/**************************************************************************/
-@UINT _tx_thread_interrupt_control(UINT new_posture)
-@{
+/**************************************************************************/
+/* */
+/* FUNCTION RELEASE */
+/* */
+/* _tx_thread_interrupt_control ARMv7-A */
+/* 6.1.11 */
+/* AUTHOR */
+/* */
+/* William E. Lamie, Microsoft Corporation */
+/* */
+/* DESCRIPTION */
+/* */
+/* This function is responsible for changing the interrupt lockout */
+/* posture of the system. */
+/* */
+/* INPUT */
+/* */
+/* new_posture New interrupt lockout posture */
+/* */
+/* OUTPUT */
+/* */
+/* old_posture Old interrupt lockout posture */
+/* */
+/* CALLS */
+/* */
+/* None */
+/* */
+/* CALLED BY */
+/* */
+/* Application Code */
+/* */
+/* RELEASE HISTORY */
+/* */
+/* DATE NAME DESCRIPTION */
+/* */
+/* 09-30-2020 William E. Lamie Initial Version 6.1 */
+/* 04-25-2022 Zhen Kong Updated comments, */
+/* resulting in version 6.1.11 */
+/* */
+/**************************************************************************/
.global _tx_thread_interrupt_control
.type _tx_thread_interrupt_control,function
_tx_thread_interrupt_control:
-@
-@ /* Pickup current interrupt lockout posture. */
-@
- MRS r3, CPSR @ Pickup current CPSR
- MOV r2, #INT_MASK @ Build interrupt mask
- AND r1, r3, r2 @ Clear interrupt lockout bits
- ORR r1, r1, r0 @ Or-in new interrupt lockout bits
-@
-@ /* Apply the new interrupt posture. */
-@
- MSR CPSR_c, r1 @ Setup new CPSR
- BIC r0, r3, r2 @ Return previous interrupt mask
-#ifdef __THUMB_INTERWORK
- BX lr @ Return to caller
-#else
- MOV pc, lr @ Return to caller
-#endif
-@}
+ /* Pickup current interrupt lockout posture. */
+
+ MRS r3, CPSR // Pickup current CPSR
+ MOV r2, #INT_MASK // Build interrupt mask
+ AND r1, r3, r2 // Clear interrupt lockout bits
+ ORR r1, r1, r0 // Or-in new interrupt lockout bits
+
+ /* Apply the new interrupt posture. */
+
+ MSR CPSR_c, r1 // Setup new CPSR
+ BIC r0, r3, r2 // Return previous interrupt mask
+#ifdef __THUMB_INTERWORK
+ BX lr // Return to caller
+#else
+ MOV pc, lr // Return to caller
+#endif
diff --git a/ports/cortex_a8/gnu/src/tx_thread_interrupt_disable.S b/ports/cortex_a8/gnu/src/tx_thread_interrupt_disable.S
index 3bce811f..13258808 100644
--- a/ports/cortex_a8/gnu/src/tx_thread_interrupt_disable.S
+++ b/ports/cortex_a8/gnu/src/tx_thread_interrupt_disable.S
@@ -1,113 +1,101 @@
-@/**************************************************************************/
-@/* */
-@/* Copyright (c) Microsoft Corporation. All rights reserved. */
-@/* */
-@/* This software is licensed under the Microsoft Software License */
-@/* Terms for Microsoft Azure RTOS. Full text of the license can be */
-@/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
-@/* and in the root directory of this software. */
-@/* */
-@/**************************************************************************/
-@
-@
-@/**************************************************************************/
-@/**************************************************************************/
-@/** */
-@/** ThreadX Component */
-@/** */
-@/** Thread */
-@/** */
-@/**************************************************************************/
-@/**************************************************************************/
-@
-@#define TX_SOURCE_CODE
-@
-@
-@/* Include necessary system files. */
-@
-@#include "tx_api.h"
-@#include "tx_thread.h"
-@
-@
-@/* Define the 16-bit Thumb mode veneer for _tx_thread_interrupt_disable for
-@ applications calling this function from to 16-bit Thumb mode. */
-@
+/**************************************************************************/
+/* */
+/* Copyright (c) Microsoft Corporation. All rights reserved. */
+/* */
+/* This software is licensed under the Microsoft Software License */
+/* Terms for Microsoft Azure RTOS. Full text of the license can be */
+/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
+/* and in the root directory of this software. */
+/* */
+/**************************************************************************/
+
+
+/**************************************************************************/
+/**************************************************************************/
+/** */
+/** ThreadX Component */
+/** */
+/** Thread */
+/** */
+/**************************************************************************/
+/**************************************************************************/
+
+/* Define the 16-bit Thumb mode veneer for _tx_thread_interrupt_disable for
+ applications calling this function from to 16-bit Thumb mode. */
+
.text
.align 2
.global $_tx_thread_interrupt_disable
$_tx_thread_interrupt_disable:
.thumb
- BX pc @ Switch to 32-bit mode
- NOP @
+ BX pc // Switch to 32-bit mode
+ NOP //
.arm
- STMFD sp!, {lr} @ Save return address
- BL _tx_thread_interrupt_disable @ Call _tx_thread_interrupt_disable function
- LDMFD sp!, {lr} @ Recover saved return address
- BX lr @ Return to 16-bit caller
-@
-@
+ STMFD sp!, {lr} // Save return address
+ BL _tx_thread_interrupt_disable // Call _tx_thread_interrupt_disable function
+ LDMFD sp!, {lr} // Recover saved return address
+ BX lr // Return to 16-bit caller
+
+
.text
.align 2
-@/**************************************************************************/
-@/* */
-@/* FUNCTION RELEASE */
-@/* */
-@/* _tx_thread_interrupt_disable Cortex-A8/GNU */
-@/* 6.1 */
-@/* AUTHOR */
-@/* */
-@/* William E. Lamie, Microsoft Corporation */
-@/* */
-@/* DESCRIPTION */
-@/* */
-@/* This function is responsible for disabling interrupts */
-@/* */
-@/* INPUT */
-@/* */
-@/* None */
-@/* */
-@/* OUTPUT */
-@/* */
-@/* old_posture Old interrupt lockout posture */
-@/* */
-@/* CALLS */
-@/* */
-@/* None */
-@/* */
-@/* CALLED BY */
-@/* */
-@/* Application Code */
-@/* */
-@/* RELEASE HISTORY */
-@/* */
-@/* DATE NAME DESCRIPTION */
-@/* */
-@/* 09-30-2020 William E. Lamie Initial Version 6.1 */
-@/* */
-@/**************************************************************************/
-@UINT _tx_thread_interrupt_disable(void)
-@{
+/**************************************************************************/
+/* */
+/* FUNCTION RELEASE */
+/* */
+/* _tx_thread_interrupt_disable ARMv7-A */
+/* 6.1.11 */
+/* AUTHOR */
+/* */
+/* William E. Lamie, Microsoft Corporation */
+/* */
+/* DESCRIPTION */
+/* */
+/* This function is responsible for disabling interrupts */
+/* */
+/* INPUT */
+/* */
+/* None */
+/* */
+/* OUTPUT */
+/* */
+/* old_posture Old interrupt lockout posture */
+/* */
+/* CALLS */
+/* */
+/* None */
+/* */
+/* CALLED BY */
+/* */
+/* Application Code */
+/* */
+/* RELEASE HISTORY */
+/* */
+/* DATE NAME DESCRIPTION */
+/* */
+/* 09-30-2020 William E. Lamie Initial Version 6.1 */
+/* 04-25-2022 Zhen Kong Updated comments, */
+/* resulting in version 6.1.11 */
+/* */
+/**************************************************************************/
.global _tx_thread_interrupt_disable
.type _tx_thread_interrupt_disable,function
_tx_thread_interrupt_disable:
-@
-@ /* Pickup current interrupt lockout posture. */
-@
- MRS r0, CPSR @ Pickup current CPSR
-@
-@ /* Mask interrupts. */
-@
+
+ /* Pickup current interrupt lockout posture. */
+
+ MRS r0, CPSR // Pickup current CPSR
+
+ /* Mask interrupts. */
+
#ifdef TX_ENABLE_FIQ_SUPPORT
- CPSID if @ Disable IRQ and FIQ
+ CPSID if // Disable IRQ and FIQ
#else
- CPSID i @ Disable IRQ
+ CPSID i // Disable IRQ
#endif
#ifdef __THUMB_INTERWORK
- BX lr @ Return to caller
+ BX lr // Return to caller
#else
- MOV pc, lr @ Return to caller
+ MOV pc, lr // Return to caller
#endif
-@}
-
-
diff --git a/ports/cortex_a8/gnu/src/tx_thread_interrupt_restore.S b/ports/cortex_a8/gnu/src/tx_thread_interrupt_restore.S
index 4efdfb75..2d582511 100644
--- a/ports/cortex_a8/gnu/src/tx_thread_interrupt_restore.S
+++ b/ports/cortex_a8/gnu/src/tx_thread_interrupt_restore.S
@@ -1,104 +1,93 @@
-@/**************************************************************************/
-@/* */
-@/* Copyright (c) Microsoft Corporation. All rights reserved. */
-@/* */
-@/* This software is licensed under the Microsoft Software License */
-@/* Terms for Microsoft Azure RTOS. Full text of the license can be */
-@/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
-@/* and in the root directory of this software. */
-@/* */
-@/**************************************************************************/
-@
-@
-@/**************************************************************************/
-@/**************************************************************************/
-@/** */
-@/** ThreadX Component */
-@/** */
-@/** Thread */
-@/** */
-@/**************************************************************************/
-@/**************************************************************************/
-@
-@#define TX_SOURCE_CODE
-@
-@
-@/* Include necessary system files. */
-@
-@#include "tx_api.h"
-@#include "tx_thread.h"
-@
-@
-@/* Define the 16-bit Thumb mode veneer for _tx_thread_interrupt_restore for
-@ applications calling this function from to 16-bit Thumb mode. */
-@
+/**************************************************************************/
+/* */
+/* Copyright (c) Microsoft Corporation. All rights reserved. */
+/* */
+/* This software is licensed under the Microsoft Software License */
+/* Terms for Microsoft Azure RTOS. Full text of the license can be */
+/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
+/* and in the root directory of this software. */
+/* */
+/**************************************************************************/
+
+
+/**************************************************************************/
+/**************************************************************************/
+/** */
+/** ThreadX Component */
+/** */
+/** Thread */
+/** */
+/**************************************************************************/
+/**************************************************************************/
+
+/* Define the 16-bit Thumb mode veneer for _tx_thread_interrupt_restore for
+ applications calling this function from to 16-bit Thumb mode. */
+
.text
.align 2
.global $_tx_thread_interrupt_restore
$_tx_thread_interrupt_restore:
.thumb
- BX pc @ Switch to 32-bit mode
- NOP @
+ BX pc // Switch to 32-bit mode
+ NOP //
.arm
- STMFD sp!, {lr} @ Save return address
- BL _tx_thread_interrupt_restore @ Call _tx_thread_interrupt_restore function
- LDMFD sp!, {lr} @ Recover saved return address
- BX lr @ Return to 16-bit caller
-@
-@
+ STMFD sp!, {lr} // Save return address
+ BL _tx_thread_interrupt_restore // Call _tx_thread_interrupt_restore function
+ LDMFD sp!, {lr} // Recover saved return address
+ BX lr // Return to 16-bit caller
+
+
.text
.align 2
-@/**************************************************************************/
-@/* */
-@/* FUNCTION RELEASE */
-@/* */
-@/* _tx_thread_interrupt_restore Cortex-A8/GNU */
-@/* 6.1 */
-@/* AUTHOR */
-@/* */
-@/* William E. Lamie, Microsoft Corporation */
-@/* */
-@/* DESCRIPTION */
-@/* */
-@/* This function is responsible for restoring interrupts to the state */
-@/* returned by a previous _tx_thread_interrupt_disable call. */
-@/* */
-@/* INPUT */
-@/* */
-@/* old_posture Old interrupt lockout posture */
-@/* */
-@/* OUTPUT */
-@/* */
-@/* None */
-@/* */
-@/* CALLS */
-@/* */
-@/* None */
-@/* */
-@/* CALLED BY */
-@/* */
-@/* Application Code */
-@/* */
-@/* RELEASE HISTORY */
-@/* */
-@/* DATE NAME DESCRIPTION */
-@/* */
-@/* 09-30-2020 William E. Lamie Initial Version 6.1 */
-@/* */
-@/**************************************************************************/
-@UINT _tx_thread_interrupt_restore(UINT old_posture)
-@{
+/**************************************************************************/
+/* */
+/* FUNCTION RELEASE */
+/* */
+/* _tx_thread_interrupt_restore ARMv7-A */
+/* 6.1.11 */
+/* AUTHOR */
+/* */
+/* William E. Lamie, Microsoft Corporation */
+/* */
+/* DESCRIPTION */
+/* */
+/* This function is responsible for restoring interrupts to the state */
+/* returned by a previous _tx_thread_interrupt_disable call. */
+/* */
+/* INPUT */
+/* */
+/* old_posture Old interrupt lockout posture */
+/* */
+/* OUTPUT */
+/* */
+/* None */
+/* */
+/* CALLS */
+/* */
+/* None */
+/* */
+/* CALLED BY */
+/* */
+/* Application Code */
+/* */
+/* RELEASE HISTORY */
+/* */
+/* DATE NAME DESCRIPTION */
+/* */
+/* 09-30-2020 William E. Lamie Initial Version 6.1 */
+/* 04-25-2022 Zhen Kong Updated comments, */
+/* resulting in version 6.1.11 */
+/* */
+/**************************************************************************/
.global _tx_thread_interrupt_restore
.type _tx_thread_interrupt_restore,function
_tx_thread_interrupt_restore:
-@
-@ /* Apply the new interrupt posture. */
-@
- MSR CPSR_c, r0 @ Setup new CPSR
-#ifdef __THUMB_INTERWORK
- BX lr @ Return to caller
-#else
- MOV pc, lr @ Return to caller
-#endif
-@}
+ /* Apply the new interrupt posture. */
+
+ MSR CPSR_c, r0 // Setup new CPSR
+#ifdef __THUMB_INTERWORK
+ BX lr // Return to caller
+#else
+ MOV pc, lr // Return to caller
+#endif
diff --git a/ports/cortex_a8/gnu/src/tx_thread_irq_nesting_end.S b/ports/cortex_a8/gnu/src/tx_thread_irq_nesting_end.S
index 473348d7..ec7e63c6 100644
--- a/ports/cortex_a8/gnu/src/tx_thread_irq_nesting_end.S
+++ b/ports/cortex_a8/gnu/src/tx_thread_irq_nesting_end.S
@@ -1,115 +1,103 @@
-@/**************************************************************************/
-@/* */
-@/* Copyright (c) Microsoft Corporation. All rights reserved. */
-@/* */
-@/* This software is licensed under the Microsoft Software License */
-@/* Terms for Microsoft Azure RTOS. Full text of the license can be */
-@/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
-@/* and in the root directory of this software. */
-@/* */
-@/**************************************************************************/
-@
-@
-@/**************************************************************************/
-@/**************************************************************************/
-@/** */
-@/** ThreadX Component */
-@/** */
-@/** Thread */
-@/** */
-@/**************************************************************************/
-@/**************************************************************************/
-@
-@
-@#define TX_SOURCE_CODE
-@
-@
-@/* Include necessary system files. */
-@
-@#include "tx_api.h"
-@#include "tx_thread.h"
-@
-@
+/**************************************************************************/
+/* */
+/* Copyright (c) Microsoft Corporation. All rights reserved. */
+/* */
+/* This software is licensed under the Microsoft Software License */
+/* Terms for Microsoft Azure RTOS. Full text of the license can be */
+/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
+/* and in the root directory of this software. */
+/* */
+/**************************************************************************/
+
+
+/**************************************************************************/
+/**************************************************************************/
+/** */
+/** ThreadX Component */
+/** */
+/** Thread */
+/** */
+/**************************************************************************/
+/**************************************************************************/
+
#ifdef TX_ENABLE_FIQ_SUPPORT
-DISABLE_INTS = 0xC0 @ Disable IRQ/FIQ interrupts
+DISABLE_INTS = 0xC0 // Disable IRQ/FIQ interrupts
#else
-DISABLE_INTS = 0x80 @ Disable IRQ interrupts
+DISABLE_INTS = 0x80 // Disable IRQ interrupts
#endif
-MODE_MASK = 0x1F @ Mode mask
-IRQ_MODE_BITS = 0x12 @ IRQ mode bits
-@
-@
-@/* No 16-bit Thumb mode veneer code is needed for _tx_thread_irq_nesting_end
-@ since it will never be called 16-bit mode. */
-@
+MODE_MASK = 0x1F // Mode mask
+IRQ_MODE_BITS = 0x12 // IRQ mode bits
+
+
+/* No 16-bit Thumb mode veneer code is needed for _tx_thread_irq_nesting_end
+ since it will never be called 16-bit mode. */
+
.arm
.text
.align 2
-@/**************************************************************************/
-@/* */
-@/* FUNCTION RELEASE */
-@/* */
-@/* _tx_thread_irq_nesting_end Cortex-A8/GNU */
-@/* 6.1 */
-@/* AUTHOR */
-@/* */
-@/* William E. Lamie, Microsoft Corporation */
-@/* */
-@/* DESCRIPTION */
-@/* */
-@/* This function is called by the application from IRQ mode after */
-@/* _tx_thread_irq_nesting_start has been called and switches the IRQ */
-@/* processing from system mode back to IRQ mode prior to the ISR */
-@/* calling _tx_thread_context_restore. Note that this function */
-@/* assumes the system stack pointer is in the same position after */
-@/* nesting start function was called. */
-@/* */
-@/* This function assumes that the system mode stack pointer was setup */
-@/* during low-level initialization (tx_initialize_low_level.s). */
-@/* */
-@/* This function returns with IRQ interrupts disabled. */
-@/* */
-@/* INPUT */
-@/* */
-@/* None */
-@/* */
-@/* OUTPUT */
-@/* */
-@/* None */
-@/* */
-@/* CALLS */
-@/* */
-@/* None */
-@/* */
-@/* CALLED BY */
-@/* */
-@/* ISRs */
-@/* */
-@/* RELEASE HISTORY */
-@/* */
-@/* DATE NAME DESCRIPTION */
-@/* */
-@/* 09-30-2020 William E. Lamie Initial Version 6.1 */
-@/* */
-@/**************************************************************************/
-@VOID _tx_thread_irq_nesting_end(VOID)
-@{
+/**************************************************************************/
+/* */
+/* FUNCTION RELEASE */
+/* */
+/* _tx_thread_irq_nesting_end ARMv7-A */
+/* 6.1.11 */
+/* AUTHOR */
+/* */
+/* William E. Lamie, Microsoft Corporation */
+/* */
+/* DESCRIPTION */
+/* */
+/* This function is called by the application from IRQ mode after */
+/* _tx_thread_irq_nesting_start has been called and switches the IRQ */
+/* processing from system mode back to IRQ mode prior to the ISR */
+/* calling _tx_thread_context_restore. Note that this function */
+/* assumes the system stack pointer is in the same position after */
+/* nesting start function was called. */
+/* */
+/* This function assumes that the system mode stack pointer was setup */
+/* during low-level initialization (tx_initialize_low_level.s). */
+/* */
+/* This function returns with IRQ interrupts disabled. */
+/* */
+/* INPUT */
+/* */
+/* None */
+/* */
+/* OUTPUT */
+/* */
+/* None */
+/* */
+/* CALLS */
+/* */
+/* None */
+/* */
+/* CALLED BY */
+/* */
+/* ISRs */
+/* */
+/* RELEASE HISTORY */
+/* */
+/* DATE NAME DESCRIPTION */
+/* */
+/* 09-30-2020 William E. Lamie Initial Version 6.1 */
+/* 04-25-2022 Zhen Kong Updated comments, */
+/* resulting in version 6.1.11 */
+/* */
+/**************************************************************************/
.global _tx_thread_irq_nesting_end
.type _tx_thread_irq_nesting_end,function
_tx_thread_irq_nesting_end:
- MOV r3,lr @ Save ISR return address
- MRS r0, CPSR @ Pickup the CPSR
- ORR r0, r0, #DISABLE_INTS @ Build disable interrupt value
- MSR CPSR_c, r0 @ Disable interrupts
- LDMIA sp!, {r1, lr} @ Pickup saved lr (and r1 throw-away for
- @ 8-byte alignment logic)
- BIC r0, r0, #MODE_MASK @ Clear mode bits
- ORR r0, r0, #IRQ_MODE_BITS @ Build IRQ mode CPSR
- MSR CPSR_c, r0 @ Reenter IRQ mode
+ MOV r3,lr // Save ISR return address
+ MRS r0, CPSR // Pickup the CPSR
+ ORR r0, r0, #DISABLE_INTS // Build disable interrupt value
+ MSR CPSR_c, r0 // Disable interrupts
+ LDMIA sp!, {r1, lr} // Pickup saved lr (and r1 throw-away for
+ // 8-byte alignment logic)
+ BIC r0, r0, #MODE_MASK // Clear mode bits
+ ORR r0, r0, #IRQ_MODE_BITS // Build IRQ mode CPSR
+ MSR CPSR_c, r0 // Reenter IRQ mode
#ifdef __THUMB_INTERWORK
- BX r3 @ Return to caller
+ BX r3 // Return to caller
#else
- MOV pc, r3 @ Return to caller
+ MOV pc, r3 // Return to caller
#endif
-@}
-
diff --git a/ports/cortex_a8/gnu/src/tx_thread_irq_nesting_start.S b/ports/cortex_a8/gnu/src/tx_thread_irq_nesting_start.S
index 766e83d3..c69976ed 100644
--- a/ports/cortex_a8/gnu/src/tx_thread_irq_nesting_start.S
+++ b/ports/cortex_a8/gnu/src/tx_thread_irq_nesting_start.S
@@ -1,108 +1,96 @@
-@/**************************************************************************/
-@/* */
-@/* Copyright (c) Microsoft Corporation. All rights reserved. */
-@/* */
-@/* This software is licensed under the Microsoft Software License */
-@/* Terms for Microsoft Azure RTOS. Full text of the license can be */
-@/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
-@/* and in the root directory of this software. */
-@/* */
-@/**************************************************************************/
-@
-@
-@/**************************************************************************/
-@/**************************************************************************/
-@/** */
-@/** ThreadX Component */
-@/** */
-@/** Thread */
-@/** */
-@/**************************************************************************/
-@/**************************************************************************/
-@
-@
-@#define TX_SOURCE_CODE
-@
-@
-@/* Include necessary system files. */
-@
-@#include "tx_api.h"
-@#include "tx_thread.h"
-@
-@
-IRQ_DISABLE = 0x80 @ IRQ disable bit
-MODE_MASK = 0x1F @ Mode mask
-SYS_MODE_BITS = 0x1F @ System mode bits
-@
-@
-@/* No 16-bit Thumb mode veneer code is needed for _tx_thread_irq_nesting_start
-@ since it will never be called 16-bit mode. */
-@
+/**************************************************************************/
+/* */
+/* Copyright (c) Microsoft Corporation. All rights reserved. */
+/* */
+/* This software is licensed under the Microsoft Software License */
+/* Terms for Microsoft Azure RTOS. Full text of the license can be */
+/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
+/* and in the root directory of this software. */
+/* */
+/**************************************************************************/
+
+
+/**************************************************************************/
+/**************************************************************************/
+/** */
+/** ThreadX Component */
+/** */
+/** Thread */
+/** */
+/**************************************************************************/
+/**************************************************************************/
+
+IRQ_DISABLE = 0x80 // IRQ disable bit
+MODE_MASK = 0x1F // Mode mask
+SYS_MODE_BITS = 0x1F // System mode bits
+
+
+/* No 16-bit Thumb mode veneer code is needed for _tx_thread_irq_nesting_start
+ since it will never be called 16-bit mode. */
+
.arm
.text
.align 2
-@/**************************************************************************/
-@/* */
-@/* FUNCTION RELEASE */
-@/* */
-@/* _tx_thread_irq_nesting_start Cortex-A8/GNU */
-@/* 6.1 */
-@/* AUTHOR */
-@/* */
-@/* William E. Lamie, Microsoft Corporation */
-@/* */
-@/* DESCRIPTION */
-@/* */
-@/* This function is called by the application from IRQ mode after */
-@/* _tx_thread_context_save has been called and switches the IRQ */
-@/* processing to the system mode so nested IRQ interrupt processing */
-@/* is possible (system mode has its own "lr" register). Note that */
-@/* this function assumes that the system mode stack pointer was setup */
-@/* during low-level initialization (tx_initialize_low_level.s). */
-@/* */
-@/* This function returns with IRQ interrupts enabled. */
-@/* */
-@/* INPUT */
-@/* */
-@/* None */
-@/* */
-@/* OUTPUT */
-@/* */
-@/* None */
-@/* */
-@/* CALLS */
-@/* */
-@/* None */
-@/* */
-@/* CALLED BY */
-@/* */
-@/* ISRs */
-@/* */
-@/* RELEASE HISTORY */
-@/* */
-@/* DATE NAME DESCRIPTION */
-@/* */
-@/* 09-30-2020 William E. Lamie Initial Version 6.1 */
-@/* */
-@/**************************************************************************/
-@VOID _tx_thread_irq_nesting_start(VOID)
-@{
+/**************************************************************************/
+/* */
+/* FUNCTION RELEASE */
+/* */
+/* _tx_thread_irq_nesting_start ARMv7-A */
+/* 6.1.11 */
+/* AUTHOR */
+/* */
+/* William E. Lamie, Microsoft Corporation */
+/* */
+/* DESCRIPTION */
+/* */
+/* This function is called by the application from IRQ mode after */
+/* _tx_thread_context_save has been called and switches the IRQ */
+/* processing to the system mode so nested IRQ interrupt processing */
+/* is possible (system mode has its own "lr" register). Note that */
+/* this function assumes that the system mode stack pointer was setup */
+/* during low-level initialization (tx_initialize_low_level.s). */
+/* */
+/* This function returns with IRQ interrupts enabled. */
+/* */
+/* INPUT */
+/* */
+/* None */
+/* */
+/* OUTPUT */
+/* */
+/* None */
+/* */
+/* CALLS */
+/* */
+/* None */
+/* */
+/* CALLED BY */
+/* */
+/* ISRs */
+/* */
+/* RELEASE HISTORY */
+/* */
+/* DATE NAME DESCRIPTION */
+/* */
+/* 09-30-2020 William E. Lamie Initial Version 6.1 */
+/* 04-25-2022 Zhen Kong Updated comments, */
+/* resulting in version 6.1.11 */
+/* */
+/**************************************************************************/
.global _tx_thread_irq_nesting_start
.type _tx_thread_irq_nesting_start,function
_tx_thread_irq_nesting_start:
- MOV r3,lr @ Save ISR return address
- MRS r0, CPSR @ Pickup the CPSR
- BIC r0, r0, #MODE_MASK @ Clear the mode bits
- ORR r0, r0, #SYS_MODE_BITS @ Build system mode CPSR
- MSR CPSR_c, r0 @ Enter system mode
- STMDB sp!, {r1, lr} @ Push the system mode lr on the system mode stack
- @ and push r1 just to keep 8-byte alignment
- BIC r0, r0, #IRQ_DISABLE @ Build enable IRQ CPSR
- MSR CPSR_c, r0 @ Enter system mode
+ MOV r3,lr // Save ISR return address
+ MRS r0, CPSR // Pickup the CPSR
+ BIC r0, r0, #MODE_MASK // Clear the mode bits
+ ORR r0, r0, #SYS_MODE_BITS // Build system mode CPSR
+ MSR CPSR_c, r0 // Enter system mode
+ STMDB sp!, {r1, lr} // Push the system mode lr on the system mode stack
+ // and push r1 just to keep 8-byte alignment
+ BIC r0, r0, #IRQ_DISABLE // Build enable IRQ CPSR
+ MSR CPSR_c, r0 // Enter system mode
#ifdef __THUMB_INTERWORK
- BX r3 @ Return to caller
+ BX r3 // Return to caller
#else
- MOV pc, r3 @ Return to caller
+ MOV pc, r3 // Return to caller
#endif
-@}
-
diff --git a/ports/cortex_a8/gnu/src/tx_thread_schedule.S b/ports/cortex_a8/gnu/src/tx_thread_schedule.S
index 642e1989..8330e9df 100644
--- a/ports/cortex_a8/gnu/src/tx_thread_schedule.S
+++ b/ports/cortex_a8/gnu/src/tx_thread_schedule.S
@@ -1,258 +1,230 @@
-@/**************************************************************************/
-@/* */
-@/* Copyright (c) Microsoft Corporation. All rights reserved. */
-@/* */
-@/* This software is licensed under the Microsoft Software License */
-@/* Terms for Microsoft Azure RTOS. Full text of the license can be */
-@/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
-@/* and in the root directory of this software. */
-@/* */
-@/**************************************************************************/
-@
-@
-@/**************************************************************************/
-@/**************************************************************************/
-@/** */
-@/** ThreadX Component */
-@/** */
-@/** Thread */
-@/** */
-@/**************************************************************************/
-@/**************************************************************************/
-@
-@
-@#define TX_SOURCE_CODE
-@
-@
-@/* Include necessary system files. */
-@
-@#include "tx_api.h"
-@#include "tx_thread.h"
-@#include "tx_timer.h"
-@
-@
+/**************************************************************************/
+/* */
+/* Copyright (c) Microsoft Corporation. All rights reserved. */
+/* */
+/* This software is licensed under the Microsoft Software License */
+/* Terms for Microsoft Azure RTOS. Full text of the license can be */
+/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
+/* and in the root directory of this software. */
+/* */
+/**************************************************************************/
+
+
+/**************************************************************************/
+/**************************************************************************/
+/** */
+/** ThreadX Component */
+/** */
+/** Thread */
+/** */
+/**************************************************************************/
+/**************************************************************************/
+
.global _tx_thread_execute_ptr
.global _tx_thread_current_ptr
.global _tx_timer_time_slice
- .global _tx_execution_thread_enter
-@
-@
-@/* Define the 16-bit Thumb mode veneer for _tx_thread_schedule for
-@ applications calling this function from to 16-bit Thumb mode. */
-@
+
+
+/* Define the 16-bit Thumb mode veneer for _tx_thread_schedule for
+ applications calling this function from to 16-bit Thumb mode. */
+
.text
.align 2
.global $_tx_thread_schedule
.type $_tx_thread_schedule,function
$_tx_thread_schedule:
.thumb
- BX pc @ Switch to 32-bit mode
- NOP @
+ BX pc // Switch to 32-bit mode
+ NOP //
.arm
- STMFD sp!, {lr} @ Save return address
- BL _tx_thread_schedule @ Call _tx_thread_schedule function
- LDMFD sp!, {lr} @ Recover saved return address
- BX lr @ Return to 16-bit caller
-@
-@
+ STMFD sp!, {lr} // Save return address
+ BL _tx_thread_schedule // Call _tx_thread_schedule function
+ LDMFD sp!, {lr} // Recover saved return address
+ BX lr // Return to 16-bit caller
+
+
.text
.align 2
-@/**************************************************************************/
-@/* */
-@/* FUNCTION RELEASE */
-@/* */
-@/* _tx_thread_schedule Cortex-A8/GNU */
-@/* 6.1.9 */
-@/* AUTHOR */
-@/* */
-@/* William E. Lamie, Microsoft Corporation */
-@/* */
-@/* DESCRIPTION */
-@/* */
-@/* This function waits for a thread control block pointer to appear in */
-@/* the _tx_thread_execute_ptr variable. Once a thread pointer appears */
-@/* in the variable, the corresponding thread is resumed. */
-@/* */
-@/* INPUT */
-@/* */
-@/* None */
-@/* */
-@/* OUTPUT */
-@/* */
-@/* None */
-@/* */
-@/* CALLS */
-@/* */
-@/* None */
-@/* */
-@/* CALLED BY */
-@/* */
-@/* _tx_initialize_kernel_enter ThreadX entry function */
-@/* _tx_thread_system_return Return to system from thread */
-@/* _tx_thread_context_restore Restore thread's context */
-@/* */
-@/* RELEASE HISTORY */
-@/* */
-@/* DATE NAME DESCRIPTION */
-@/* */
-@/* 09-30-2020 William E. Lamie Initial Version 6.1 */
-@/* 10-15-2021 William E. Lamie Modified comment(s), added */
-@/* execution profile support, */
-@/* resulting in version 6.1.9 */
-@/* */
-@/**************************************************************************/
-@VOID _tx_thread_schedule(VOID)
-@{
+/**************************************************************************/
+/* */
+/* FUNCTION RELEASE */
+/* */
+/* _tx_thread_schedule ARMv7-A */
+/* 6.1.11 */
+/* AUTHOR */
+/* */
+/* William E. Lamie, Microsoft Corporation */
+/* */
+/* DESCRIPTION */
+/* */
+/* This function waits for a thread control block pointer to appear in */
+/* the _tx_thread_execute_ptr variable. Once a thread pointer appears */
+/* in the variable, the corresponding thread is resumed. */
+/* */
+/* INPUT */
+/* */
+/* None */
+/* */
+/* OUTPUT */
+/* */
+/* None */
+/* */
+/* CALLS */
+/* */
+/* None */
+/* */
+/* CALLED BY */
+/* */
+/* _tx_initialize_kernel_enter ThreadX entry function */
+/* _tx_thread_system_return Return to system from thread */
+/* _tx_thread_context_restore Restore thread's context */
+/* */
+/* RELEASE HISTORY */
+/* */
+/* DATE NAME DESCRIPTION */
+/* */
+/* 09-30-2020 William E. Lamie Initial Version 6.1 */
+/* 10-15-2021 William E. Lamie Modified comment(s), added */
+/* execution profile support, */
+/* resulting in version 6.1.9 */
+/* 04-25-2022 Zhen Kong Updated comments, */
+/* resulting in version 6.1.11 */
+/* */
+/**************************************************************************/
.global _tx_thread_schedule
.type _tx_thread_schedule,function
_tx_thread_schedule:
-@
-@ /* Enable interrupts. */
-@
+
+ /* Enable interrupts. */
+
#ifdef TX_ENABLE_FIQ_SUPPORT
- CPSIE if @ Enable IRQ and FIQ interrupts
+ CPSIE if // Enable IRQ and FIQ interrupts
#else
- CPSIE i @ Enable IRQ interrupts
+ CPSIE i // Enable IRQ interrupts
#endif
-@
-@ /* Wait for a thread to execute. */
-@ do
-@ {
- LDR r1, =_tx_thread_execute_ptr @ Address of thread execute ptr
-@
+
+ /* Wait for a thread to execute. */
+ LDR r1, =_tx_thread_execute_ptr // Address of thread execute ptr
+
__tx_thread_schedule_loop:
-@
- LDR r0, [r1] @ Pickup next thread to execute
- CMP r0, #0 @ Is it NULL?
- BEQ __tx_thread_schedule_loop @ If so, keep looking for a thread
-@
-@ }
-@ while(_tx_thread_execute_ptr == TX_NULL);
-@
-@ /* Yes! We have a thread to execute. Lockout interrupts and
-@ transfer control to it. */
-@
+
+ LDR r0, [r1] // Pickup next thread to execute
+ CMP r0, #0 // Is it NULL?
+ BEQ __tx_thread_schedule_loop // If so, keep looking for a thread
+ /* Yes! We have a thread to execute. Lockout interrupts and
+ transfer control to it. */
+
#ifdef TX_ENABLE_FIQ_SUPPORT
- CPSID if @ Disable IRQ and FIQ interrupts
+ CPSID if // Disable IRQ and FIQ interrupts
#else
- CPSID i @ Disable IRQ interrupts
+ CPSID i // Disable IRQ interrupts
#endif
-@
-@ /* Setup the current thread pointer. */
-@ _tx_thread_current_ptr = _tx_thread_execute_ptr;
-@
- LDR r1, =_tx_thread_current_ptr @ Pickup address of current thread
- STR r0, [r1] @ Setup current thread pointer
-@
-@ /* Increment the run count for this thread. */
-@ _tx_thread_current_ptr -> tx_thread_run_count++;
-@
- LDR r2, [r0, #4] @ Pickup run counter
- LDR r3, [r0, #24] @ Pickup time-slice for this thread
- ADD r2, r2, #1 @ Increment thread run-counter
- STR r2, [r0, #4] @ Store the new run counter
-@
-@ /* Setup time-slice, if present. */
-@ _tx_timer_time_slice = _tx_thread_current_ptr -> tx_thread_time_slice;
-@
- LDR r2, =_tx_timer_time_slice @ Pickup address of time-slice
- @ variable
- LDR sp, [r0, #8] @ Switch stack pointers
- STR r3, [r2] @ Setup time-slice
-@
-@ /* Switch to the thread's stack. */
-@ sp = _tx_thread_execute_ptr -> tx_thread_stack_ptr;
-@
+
+ /* Setup the current thread pointer. */
+
+ LDR r1, =_tx_thread_current_ptr // Pickup address of current thread
+ STR r0, [r1] // Setup current thread pointer
+
+ /* Increment the run count for this thread. */
+
+ LDR r2, [r0, #4] // Pickup run counter
+ LDR r3, [r0, #24] // Pickup time-slice for this thread
+ ADD r2, r2, #1 // Increment thread run-counter
+ STR r2, [r0, #4] // Store the new run counter
+
+ /* Setup time-slice, if present. */
+
+ LDR r2, =_tx_timer_time_slice // Pickup address of time-slice
+ // variable
+ LDR sp, [r0, #8] // Switch stack pointers
+ STR r3, [r2] // Setup time-slice
+
#if (defined(TX_ENABLE_EXECUTION_CHANGE_NOTIFY) || defined(TX_EXECUTION_PROFILE_ENABLE))
-@
-@ /* Call the thread entry function to indicate the thread is executing. */
-@
- MOV r5, r0 @ Save r0
- BL _tx_execution_thread_enter @ Call the thread execution enter function
- MOV r0, r5 @ Restore r0
+
+ /* Call the thread entry function to indicate the thread is executing. */
+
+ MOV r5, r0 // Save r0
+ BL _tx_execution_thread_enter // Call the thread execution enter function
+ MOV r0, r5 // Restore r0
#endif
-@
-@ /* Determine if an interrupt frame or a synchronous task suspension frame
-@ is present. */
-@
- LDMIA sp!, {r4, r5} @ Pickup the stack type and saved CPSR
- CMP r4, #0 @ Check for synchronous context switch
+
+ /* Determine if an interrupt frame or a synchronous task suspension frame
+ is present. */
+
+ LDMIA sp!, {r4, r5} // Pickup the stack type and saved CPSR
+ CMP r4, #0 // Check for synchronous context switch
BEQ _tx_solicited_return
- MSR SPSR_cxsf, r5 @ Setup SPSR for return
+ MSR SPSR_cxsf, r5 // Setup SPSR for return
#ifdef TX_ENABLE_VFP_SUPPORT
- LDR r1, [r0, #144] @ Pickup the VFP enabled flag
- CMP r1, #0 @ Is the VFP enabled?
- BEQ _tx_skip_interrupt_vfp_restore @ No, skip VFP interrupt restore
- VLDMIA sp!, {D0-D15} @ Recover D0-D15
- VLDMIA sp!, {D16-D31} @ Recover D16-D31
- LDR r4, [sp], #4 @ Pickup FPSCR
- VMSR FPSCR, r4 @ Restore FPSCR
+ LDR r1, [r0, #144] // Pickup the VFP enabled flag
+ CMP r1, #0 // Is the VFP enabled?
+ BEQ _tx_skip_interrupt_vfp_restore // No, skip VFP interrupt restore
+ VLDMIA sp!, {D0-D15} // Recover D0-D15
+ VLDMIA sp!, {D16-D31} // Recover D16-D31
+ LDR r4, [sp], #4 // Pickup FPSCR
+ VMSR FPSCR, r4 // Restore FPSCR
_tx_skip_interrupt_vfp_restore:
#endif
- LDMIA sp!, {r0-r12, lr, pc}^ @ Return to point of thread interrupt
+ LDMIA sp!, {r0-r12, lr, pc}^ // Return to point of thread interrupt
_tx_solicited_return:
#ifdef TX_ENABLE_VFP_SUPPORT
- LDR r1, [r0, #144] @ Pickup the VFP enabled flag
- CMP r1, #0 @ Is the VFP enabled?
- BEQ _tx_skip_solicited_vfp_restore @ No, skip VFP solicited restore
- VLDMIA sp!, {D8-D15} @ Recover D8-D15
- VLDMIA sp!, {D16-D31} @ Recover D16-D31
- LDR r4, [sp], #4 @ Pickup FPSCR
- VMSR FPSCR, r4 @ Restore FPSCR
+ LDR r1, [r0, #144] // Pickup the VFP enabled flag
+ CMP r1, #0 // Is the VFP enabled?
+ BEQ _tx_skip_solicited_vfp_restore // No, skip VFP solicited restore
+ VLDMIA sp!, {D8-D15} // Recover D8-D15
+ VLDMIA sp!, {D16-D31} // Recover D16-D31
+ LDR r4, [sp], #4 // Pickup FPSCR
+ VMSR FPSCR, r4 // Restore FPSCR
_tx_skip_solicited_vfp_restore:
#endif
- MSR CPSR_cxsf, r5 @ Recover CPSR
- LDMIA sp!, {r4-r11, lr} @ Return to thread synchronously
+ MSR CPSR_cxsf, r5 // Recover CPSR
+ LDMIA sp!, {r4-r11, lr} // Return to thread synchronously
#ifdef __THUMB_INTERWORK
- BX lr @ Return to caller
+ BX lr // Return to caller
#else
- MOV pc, lr @ Return to caller
+ MOV pc, lr // Return to caller
#endif
-@
-@}
-@
#ifdef TX_ENABLE_VFP_SUPPORT
.global tx_thread_vfp_enable
.type tx_thread_vfp_enable,function
tx_thread_vfp_enable:
- MRS r2, CPSR @ Pickup the CPSR
+ MRS r2, CPSR // Pickup the CPSR
#ifdef TX_ENABLE_FIQ_SUPPORT
- CPSID if @ Enable IRQ and FIQ interrupts
+ CPSID if // Enable IRQ and FIQ interrupts
#else
- CPSID i @ Enable IRQ interrupts
+ CPSID i // Enable IRQ interrupts
#endif
- LDR r0, =_tx_thread_current_ptr @ Build current thread pointer address
- LDR r1, [r0] @ Pickup current thread pointer
- CMP r1, #0 @ Check for NULL thread pointer
- BEQ __tx_no_thread_to_enable @ If NULL, skip VFP enable
- MOV r0, #1 @ Build enable value
- STR r0, [r1, #144] @ Set the VFP enable flag (tx_thread_vfp_enable field in TX_THREAD)
+ LDR r0, =_tx_thread_current_ptr // Build current thread pointer address
+ LDR r1, [r0] // Pickup current thread pointer
+ CMP r1, #0 // Check for NULL thread pointer
+ BEQ __tx_no_thread_to_enable // If NULL, skip VFP enable
+ MOV r0, #1 // Build enable value
+ STR r0, [r1, #144] // Set the VFP enable flag (tx_thread_vfp_enable field in TX_THREAD)
__tx_no_thread_to_enable:
- MSR CPSR_cxsf, r2 @ Recover CPSR
- BX LR @ Return to caller
+ MSR CPSR_cxsf, r2 // Recover CPSR
+ BX LR // Return to caller
.global tx_thread_vfp_disable
.type tx_thread_vfp_disable,function
tx_thread_vfp_disable:
- MRS r2, CPSR @ Pickup the CPSR
+ MRS r2, CPSR // Pickup the CPSR
#ifdef TX_ENABLE_FIQ_SUPPORT
- CPSID if @ Enable IRQ and FIQ interrupts
+ CPSID if // Enable IRQ and FIQ interrupts
#else
- CPSID i @ Enable IRQ interrupts
+ CPSID i // Enable IRQ interrupts
#endif
- LDR r0, =_tx_thread_current_ptr @ Build current thread pointer address
- LDR r1, [r0] @ Pickup current thread pointer
- CMP r1, #0 @ Check for NULL thread pointer
- BEQ __tx_no_thread_to_disable @ If NULL, skip VFP disable
- MOV r0, #0 @ Build disable value
- STR r0, [r1, #144] @ Clear the VFP enable flag (tx_thread_vfp_enable field in TX_THREAD)
+ LDR r0, =_tx_thread_current_ptr // Build current thread pointer address
+ LDR r1, [r0] // Pickup current thread pointer
+ CMP r1, #0 // Check for NULL thread pointer
+ BEQ __tx_no_thread_to_disable // If NULL, skip VFP disable
+ MOV r0, #0 // Build disable value
+ STR r0, [r1, #144] // Clear the VFP enable flag (tx_thread_vfp_enable field in TX_THREAD)
__tx_no_thread_to_disable:
- MSR CPSR_cxsf, r2 @ Recover CPSR
- BX LR @ Return to caller
+ MSR CPSR_cxsf, r2 // Recover CPSR
+ BX LR // Return to caller
#endif
-
diff --git a/ports/cortex_a8/gnu/src/tx_thread_stack_build.S b/ports/cortex_a8/gnu/src/tx_thread_stack_build.S
index 3946d652..f413e673 100644
--- a/ports/cortex_a8/gnu/src/tx_thread_stack_build.S
+++ b/ports/cortex_a8/gnu/src/tx_thread_stack_build.S
@@ -1,178 +1,164 @@
-@/**************************************************************************/
-@/* */
-@/* Copyright (c) Microsoft Corporation. All rights reserved. */
-@/* */
-@/* This software is licensed under the Microsoft Software License */
-@/* Terms for Microsoft Azure RTOS. Full text of the license can be */
-@/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
-@/* and in the root directory of this software. */
-@/* */
-@/**************************************************************************/
-@
-@
-@/**************************************************************************/
-@/**************************************************************************/
-@/** */
-@/** ThreadX Component */
-@/** */
-@/** Thread */
-@/** */
-@/**************************************************************************/
-@/**************************************************************************/
-@
-@
-@#define TX_SOURCE_CODE
-@
-@
-@/* Include necessary system files. */
-@
-@#include "tx_api.h"
-@#include "tx_thread.h"
-@
+/**************************************************************************/
+/* */
+/* Copyright (c) Microsoft Corporation. All rights reserved. */
+/* */
+/* This software is licensed under the Microsoft Software License */
+/* Terms for Microsoft Azure RTOS. Full text of the license can be */
+/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
+/* and in the root directory of this software. */
+/* */
+/**************************************************************************/
+
+
+/**************************************************************************/
+/**************************************************************************/
+/** */
+/** ThreadX Component */
+/** */
+/** Thread */
+/** */
+/**************************************************************************/
+/**************************************************************************/
.arm
-SVC_MODE = 0x13 @ SVC mode
+SVC_MODE = 0x13 // SVC mode
#ifdef TX_ENABLE_FIQ_SUPPORT
-CPSR_MASK = 0xDF @ Mask initial CPSR, IRQ & FIQ interrupts enabled
+CPSR_MASK = 0xDF // Mask initial CPSR, IRQ & FIQ interrupts enabled
#else
-CPSR_MASK = 0x9F @ Mask initial CPSR, IRQ interrupts enabled
+CPSR_MASK = 0x9F // Mask initial CPSR, IRQ interrupts enabled
#endif
-@
-@
-@/* Define the 16-bit Thumb mode veneer for _tx_thread_stack_build for
-@ applications calling this function from to 16-bit Thumb mode. */
-@
+
+
+/* Define the 16-bit Thumb mode veneer for _tx_thread_stack_build for
+ applications calling this function from to 16-bit Thumb mode. */
+
.text
.align 2
.thumb
.global $_tx_thread_stack_build
.type $_tx_thread_stack_build,function
$_tx_thread_stack_build:
- BX pc @ Switch to 32-bit mode
- NOP @
+ BX pc // Switch to 32-bit mode
+ NOP //
.arm
- STMFD sp!, {lr} @ Save return address
- BL _tx_thread_stack_build @ Call _tx_thread_stack_build function
- LDMFD sp!, {lr} @ Recover saved return address
- BX lr @ Return to 16-bit caller
-@
-@
+ STMFD sp!, {lr} // Save return address
+ BL _tx_thread_stack_build // Call _tx_thread_stack_build function
+ LDMFD sp!, {lr} // Recover saved return address
+ BX lr // Return to 16-bit caller
+
+
.text
.align 2
-@/**************************************************************************/
-@/* */
-@/* FUNCTION RELEASE */
-@/* */
-@/* _tx_thread_stack_build Cortex-A8/GNU */
-@/* 6.1 */
-@/* AUTHOR */
-@/* */
-@/* William E. Lamie, Microsoft Corporation */
-@/* */
-@/* DESCRIPTION */
-@/* */
-@/* This function builds a stack frame on the supplied thread's stack. */
-@/* The stack frame results in a fake interrupt return to the supplied */
-@/* function pointer. */
-@/* */
-@/* INPUT */
-@/* */
-@/* thread_ptr Pointer to thread control blk */
-@/* function_ptr Pointer to return function */
-@/* */
-@/* OUTPUT */
-@/* */
-@/* None */
-@/* */
-@/* CALLS */
-@/* */
-@/* None */
-@/* */
-@/* CALLED BY */
-@/* */
-@/* _tx_thread_create Create thread service */
-@/* */
-@/* RELEASE HISTORY */
-@/* */
-@/* DATE NAME DESCRIPTION */
-@/* */
-@/* 09-30-2020 William E. Lamie Initial Version 6.1 */
-@/* */
-@/**************************************************************************/
-@VOID _tx_thread_stack_build(TX_THREAD *thread_ptr, VOID (*function_ptr)(VOID))
-@{
+/**************************************************************************/
+/* */
+/* FUNCTION RELEASE */
+/* */
+/* _tx_thread_stack_build ARMv7-A */
+/* 6.1.11 */
+/* AUTHOR */
+/* */
+/* William E. Lamie, Microsoft Corporation */
+/* */
+/* DESCRIPTION */
+/* */
+/* This function builds a stack frame on the supplied thread's stack. */
+/* The stack frame results in a fake interrupt return to the supplied */
+/* function pointer. */
+/* */
+/* INPUT */
+/* */
+/* thread_ptr Pointer to thread control blk */
+/* function_ptr Pointer to return function */
+/* */
+/* OUTPUT */
+/* */
+/* None */
+/* */
+/* CALLS */
+/* */
+/* None */
+/* */
+/* CALLED BY */
+/* */
+/* _tx_thread_create Create thread service */
+/* */
+/* RELEASE HISTORY */
+/* */
+/* DATE NAME DESCRIPTION */
+/* */
+/* 09-30-2020 William E. Lamie Initial Version 6.1 */
+/* 04-25-2022 Zhen Kong Updated comments, */
+/* resulting in version 6.1.11 */
+/* */
+/**************************************************************************/
.global _tx_thread_stack_build
.type _tx_thread_stack_build,function
_tx_thread_stack_build:
-@
-@
-@ /* Build a fake interrupt frame. The form of the fake interrupt stack
-@ on the Cortex-A8 should look like the following after it is built:
-@
-@ Stack Top: 1 Interrupt stack frame type
-@ CPSR Initial value for CPSR
-@ a1 (r0) Initial value for a1
-@ a2 (r1) Initial value for a2
-@ a3 (r2) Initial value for a3
-@ a4 (r3) Initial value for a4
-@ v1 (r4) Initial value for v1
-@ v2 (r5) Initial value for v2
-@ v3 (r6) Initial value for v3
-@ v4 (r7) Initial value for v4
-@ v5 (r8) Initial value for v5
-@ sb (r9) Initial value for sb
-@ sl (r10) Initial value for sl
-@ fp (r11) Initial value for fp
-@ ip (r12) Initial value for ip
-@ lr (r14) Initial value for lr
-@ pc (r15) Initial value for pc
-@ 0 For stack backtracing
-@
-@ Stack Bottom: (higher memory address) */
-@
- LDR r2, [r0, #16] @ Pickup end of stack area
- BIC r2, r2, #7 @ Ensure 8-byte alignment
- SUB r2, r2, #76 @ Allocate space for the stack frame
-@
-@ /* Actually build the stack frame. */
-@
- MOV r3, #1 @ Build interrupt stack type
- STR r3, [r2, #0] @ Store stack type
- MOV r3, #0 @ Build initial register value
- STR r3, [r2, #8] @ Store initial r0
- STR r3, [r2, #12] @ Store initial r1
- STR r3, [r2, #16] @ Store initial r2
- STR r3, [r2, #20] @ Store initial r3
- STR r3, [r2, #24] @ Store initial r4
- STR r3, [r2, #28] @ Store initial r5
- STR r3, [r2, #32] @ Store initial r6
- STR r3, [r2, #36] @ Store initial r7
- STR r3, [r2, #40] @ Store initial r8
- STR r3, [r2, #44] @ Store initial r9
- LDR r3, [r0, #12] @ Pickup stack starting address
- STR r3, [r2, #48] @ Store initial r10 (sl)
- LDR r3,=_tx_thread_schedule @ Pickup address of _tx_thread_schedule for GDB backtrace
- STR r3, [r2, #60] @ Store initial r14 (lr)
- MOV r3, #0 @ Build initial register value
- STR r3, [r2, #52] @ Store initial r11
- STR r3, [r2, #56] @ Store initial r12
- STR r1, [r2, #64] @ Store initial pc
- STR r3, [r2, #68] @ 0 for back-trace
- MRS r1, CPSR @ Pickup CPSR
- BIC r1, r1, #CPSR_MASK @ Mask mode bits of CPSR
- ORR r3, r1, #SVC_MODE @ Build CPSR, SVC mode, interrupts enabled
- STR r3, [r2, #4] @ Store initial CPSR
-@
-@ /* Setup stack pointer. */
-@ thread_ptr -> tx_thread_stack_ptr = r2;
-@
- STR r2, [r0, #8] @ Save stack pointer in thread's
- @ control block
+
+
+ /* Build a fake interrupt frame. The form of the fake interrupt stack
+ on the ARMv7-A should look like the following after it is built:
+
+ Stack Top: 1 Interrupt stack frame type
+ CPSR Initial value for CPSR
+ a1 (r0) Initial value for a1
+ a2 (r1) Initial value for a2
+ a3 (r2) Initial value for a3
+ a4 (r3) Initial value for a4
+ v1 (r4) Initial value for v1
+ v2 (r5) Initial value for v2
+ v3 (r6) Initial value for v3
+ v4 (r7) Initial value for v4
+ v5 (r8) Initial value for v5
+ sb (r9) Initial value for sb
+ sl (r10) Initial value for sl
+ fp (r11) Initial value for fp
+ ip (r12) Initial value for ip
+ lr (r14) Initial value for lr
+ pc (r15) Initial value for
+ 0 For stack backtracing
+
+ Stack Bottom: (higher memory address) */
+
+ LDR r2, [r0, #16] // Pickup end of stack area
+ BIC r2, r2, #7 // Ensure 8-byte alignment
+ SUB r2, r2, #76 // Allocate space for the stack frame
+
+ /* Actually build the stack frame. */
+
+ MOV r3, #1 // Build interrupt stack type
+ STR r3, [r2, #0] // Store stack type
+ MOV r3, #0 // Build initial register value
+ STR r3, [r2, #8] // Store initial r0
+ STR r3, [r2, #12] // Store initial r1
+ STR r3, [r2, #16] // Store initial r2
+ STR r3, [r2, #20] // Store initial r3
+ STR r3, [r2, #24] // Store initial r4
+ STR r3, [r2, #28] // Store initial r5
+ STR r3, [r2, #32] // Store initial r6
+ STR r3, [r2, #36] // Store initial r7
+ STR r3, [r2, #40] // Store initial r8
+ STR r3, [r2, #44] // Store initial r9
+ LDR r3, [r0, #12] // Pickup stack starting address
+ STR r3, [r2, #48] // Store initial r10 (sl)
+ LDR r3,=_tx_thread_schedule // Pickup address of _tx_thread_schedule for GDB backtrace
+ STR r3, [r2, #60] // Store initial r14 (lr)
+ MOV r3, #0 // Build initial register value
+ STR r3, [r2, #52] // Store initial r11
+ STR r3, [r2, #56] // Store initial r12
+ STR r1, [r2, #64] // Store initial pc
+ STR r3, [r2, #68] // 0 for back-trace
+ MRS r1, CPSR // Pickup CPSR
+ BIC r1, r1, #CPSR_MASK // Mask mode bits of CPSR
+ ORR r3, r1, #SVC_MODE // Build CPSR, SVC mode, interrupts enabled
+ STR r3, [r2, #4] // Store initial CPSR
+
+ /* Setup stack pointer. */
+
+ STR r2, [r0, #8] // Save stack pointer in thread's
+ // control block
#ifdef __THUMB_INTERWORK
- BX lr @ Return to caller
+ BX lr // Return to caller
#else
- MOV pc, lr @ Return to caller
+ MOV pc, lr // Return to caller
#endif
-@}
-
-
diff --git a/ports/cortex_a8/gnu/src/tx_thread_system_return.S b/ports/cortex_a8/gnu/src/tx_thread_system_return.S
index 05fe5793..cb7d62ce 100644
--- a/ports/cortex_a8/gnu/src/tx_thread_system_return.S
+++ b/ports/cortex_a8/gnu/src/tx_thread_system_return.S
@@ -1,183 +1,162 @@
-@/**************************************************************************/
-@/* */
-@/* Copyright (c) Microsoft Corporation. All rights reserved. */
-@/* */
-@/* This software is licensed under the Microsoft Software License */
-@/* Terms for Microsoft Azure RTOS. Full text of the license can be */
-@/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
-@/* and in the root directory of this software. */
-@/* */
-@/**************************************************************************/
-@
-@
-@/**************************************************************************/
-@/**************************************************************************/
-@/** */
-@/** ThreadX Component */
-@/** */
-@/** Thread */
-@/** */
-@/**************************************************************************/
-@/**************************************************************************/
-@
-@#define TX_SOURCE_CODE
-@
-@
-@/* Include necessary system files. */
-@
-@#include "tx_api.h"
-@#include "tx_thread.h"
-@#include "tx_timer.h"
-@
+/**************************************************************************/
+/* */
+/* Copyright (c) Microsoft Corporation. All rights reserved. */
+/* */
+/* This software is licensed under the Microsoft Software License */
+/* Terms for Microsoft Azure RTOS. Full text of the license can be */
+/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
+/* and in the root directory of this software. */
+/* */
+/**************************************************************************/
+
+
+/**************************************************************************/
+/**************************************************************************/
+/** */
+/** ThreadX Component */
+/** */
+/** Thread */
+/** */
+/**************************************************************************/
+/**************************************************************************/
+
.arm
-@
-@
+
+
.global _tx_thread_current_ptr
.global _tx_timer_time_slice
.global _tx_thread_schedule
- .global _tx_execution_thread_exit
-@
-@
-@
-@/* Define the 16-bit Thumb mode veneer for _tx_thread_system_return for
-@ applications calling this function from to 16-bit Thumb mode. */
-@
+
+
+
+/* Define the 16-bit Thumb mode veneer for _tx_thread_system_return for
+ applications calling this function from to 16-bit Thumb mode. */
+
.text
.align 2
.global $_tx_thread_system_return
.type $_tx_thread_system_return,function
$_tx_thread_system_return:
.thumb
- BX pc @ Switch to 32-bit mode
- NOP @
+ BX pc // Switch to 32-bit mode
+ NOP //
.arm
- STMFD sp!, {lr} @ Save return address
- BL _tx_thread_system_return @ Call _tx_thread_system_return function
- LDMFD sp!, {lr} @ Recover saved return address
- BX lr @ Return to 16-bit caller
-@
-@
+ STMFD sp!, {lr} // Save return address
+ BL _tx_thread_system_return // Call _tx_thread_system_return function
+ LDMFD sp!, {lr} // Recover saved return address
+ BX lr // Return to 16-bit caller
+
+
.text
.align 2
-@/**************************************************************************/
-@/* */
-@/* FUNCTION RELEASE */
-@/* */
-@/* _tx_thread_system_return Cortex-A8/GNU */
-@/* 6.1.9 */
-@/* AUTHOR */
-@/* */
-@/* William E. Lamie, Microsoft Corporation */
-@/* */
-@/* DESCRIPTION */
-@/* */
-@/* This function is target processor specific. It is used to transfer */
-@/* control from a thread back to the ThreadX system. Only a */
-@/* minimal context is saved since the compiler assumes temp registers */
-@/* are going to get slicked by a function call anyway. */
-@/* */
-@/* INPUT */
-@/* */
-@/* None */
-@/* */
-@/* OUTPUT */
-@/* */
-@/* None */
-@/* */
-@/* CALLS */
-@/* */
-@/* _tx_thread_schedule Thread scheduling loop */
-@/* */
-@/* CALLED BY */
-@/* */
-@/* ThreadX components */
-@/* */
-@/* RELEASE HISTORY */
-@/* */
-@/* DATE NAME DESCRIPTION */
-@/* */
-@/* 09-30-2020 William E. Lamie Initial Version 6.1 */
-@/* 10-15-2021 William E. Lamie Modified comment(s), added */
-@/* execution profile support, */
-@/* resulting in version 6.1.9 */
-@/* */
-@/**************************************************************************/
-@VOID _tx_thread_system_return(VOID)
-@{
+/**************************************************************************/
+/* */
+/* FUNCTION RELEASE */
+/* */
+/* _tx_thread_system_return ARMv7-A */
+/* 6.1.11 */
+/* AUTHOR */
+/* */
+/* William E. Lamie, Microsoft Corporation */
+/* */
+/* DESCRIPTION */
+/* */
+/* This function is target processor specific. It is used to transfer */
+/* control from a thread back to the ThreadX system. Only a */
+/* minimal context is saved since the compiler assumes temp registers */
+/* are going to get slicked by a function call anyway. */
+/* */
+/* INPUT */
+/* */
+/* None */
+/* */
+/* OUTPUT */
+/* */
+/* None */
+/* */
+/* CALLS */
+/* */
+/* _tx_thread_schedule Thread scheduling loop */
+/* */
+/* CALLED BY */
+/* */
+/* ThreadX components */
+/* */
+/* RELEASE HISTORY */
+/* */
+/* DATE NAME DESCRIPTION */
+/* */
+/* 09-30-2020 William E. Lamie Initial Version 6.1 */
+/* 10-15-2021 William E. Lamie Modified comment(s), added */
+/* execution profile support, */
+/* resulting in version 6.1.9 */
+/* 04-25-2022 Zhen Kong Updated comments, */
+/* resulting in version 6.1.11 */
+/* */
+/**************************************************************************/
.global _tx_thread_system_return
.type _tx_thread_system_return,function
_tx_thread_system_return:
-@
-@ /* Save minimal context on the stack. */
-@
- STMDB sp!, {r4-r11, lr} @ Save minimal context
- LDR r4, =_tx_thread_current_ptr @ Pickup address of current ptr
- LDR r5, [r4] @ Pickup current thread pointer
-
+ /* Save minimal context on the stack. */
+
+ STMDB sp!, {r4-r11, lr} // Save minimal context
+
+ LDR r4, =_tx_thread_current_ptr // Pickup address of current ptr
+ LDR r5, [r4] // Pickup current thread pointer
+
#ifdef TX_ENABLE_VFP_SUPPORT
- LDR r1, [r5, #144] @ Pickup the VFP enabled flag
- CMP r1, #0 @ Is the VFP enabled?
- BEQ _tx_skip_solicited_vfp_save @ No, skip VFP solicited save
- VMRS r1, FPSCR @ Pickup the FPSCR
- STR r1, [sp, #-4]! @ Save FPSCR
- VSTMDB sp!, {D16-D31} @ Save D16-D31
- VSTMDB sp!, {D8-D15} @ Save D8-D15
+ LDR r1, [r5, #144] // Pickup the VFP enabled flag
+ CMP r1, #0 // Is the VFP enabled?
+ BEQ _tx_skip_solicited_vfp_save // No, skip VFP solicited save
+ VMRS r1, FPSCR // Pickup the FPSCR
+ STR r1, [sp, #-4]! // Save FPSCR
+ VSTMDB sp!, {D16-D31} // Save D16-D31
+ VSTMDB sp!, {D8-D15} // Save D8-D15
_tx_skip_solicited_vfp_save:
#endif
- MOV r0, #0 @ Build a solicited stack type
- MRS r1, CPSR @ Pickup the CPSR
- STMDB sp!, {r0-r1} @ Save type and CPSR
-@
-@ /* Lockout interrupts. */
-@
-#ifdef TX_ENABLE_FIQ_SUPPORT
- CPSID if @ Disable IRQ and FIQ interrupts
-#else
- CPSID i @ Disable IRQ interrupts
-#endif
-
-#if (defined(TX_ENABLE_EXECUTION_CHANGE_NOTIFY) || defined(TX_EXECUTION_PROFILE_ENABLE))
-@
-@ /* Call the thread exit function to indicate the thread is no longer executing. */
-@
- BL _tx_execution_thread_exit @ Call the thread exit function
-#endif
- MOV r3, r4 @ Pickup address of current ptr
- MOV r0, r5 @ Pickup current thread pointer
- LDR r2, =_tx_timer_time_slice @ Pickup address of time slice
- LDR r1, [r2] @ Pickup current time slice
-@
-@ /* Save current stack and switch to system stack. */
-@ _tx_thread_current_ptr -> tx_thread_stack_ptr = sp;
-@ sp = _tx_thread_system_stack_ptr;
-@
- STR sp, [r0, #8] @ Save thread stack pointer
-@
-@ /* Determine if the time-slice is active. */
-@ if (_tx_timer_time_slice)
-@ {
-@
- MOV r4, #0 @ Build clear value
- CMP r1, #0 @ Is a time-slice active?
- BEQ __tx_thread_dont_save_ts @ No, don't save the time-slice
-@
-@ /* Save time-slice for the thread and clear the current time-slice. */
-@ _tx_thread_current_ptr -> tx_thread_time_slice = _tx_timer_time_slice;
-@ _tx_timer_time_slice = 0;
-@
- STR r4, [r2] @ Clear time-slice
- STR r1, [r0, #24] @ Save current time-slice
-@
-@ }
-__tx_thread_dont_save_ts:
-@
-@ /* Clear the current thread pointer. */
-@ _tx_thread_current_ptr = TX_NULL;
-@
- STR r4, [r3] @ Clear current thread pointer
- B _tx_thread_schedule @ Jump to scheduler!
-@
-@}
+ MOV r0, #0 // Build a solicited stack type
+ MRS r1, CPSR // Pickup the CPSR
+ STMDB sp!, {r0-r1} // Save type and CPSR
+ /* Lockout interrupts. */
+
+#ifdef TX_ENABLE_FIQ_SUPPORT
+ CPSID if // Disable IRQ and FIQ interrupts
+#else
+ CPSID i // Disable IRQ interrupts
+#endif
+
+#if (defined(TX_ENABLE_EXECUTION_CHANGE_NOTIFY) || defined(TX_EXECUTION_PROFILE_ENABLE))
+
+ /* Call the thread exit function to indicate the thread is no longer executing. */
+
+ BL _tx_execution_thread_exit // Call the thread exit function
+#endif
+ MOV r3, r4 // Pickup address of current ptr
+ MOV r0, r5 // Pickup current thread pointer
+ LDR r2, =_tx_timer_time_slice // Pickup address of time slice
+ LDR r1, [r2] // Pickup current time slice
+
+ /* Save current stack and switch to system stack. */
+
+ STR sp, [r0, #8] // Save thread stack pointer
+
+ /* Determine if the time-slice is active. */
+
+ MOV r4, #0 // Build clear value
+ CMP r1, #0 // Is a time-slice active?
+ BEQ __tx_thread_dont_save_ts // No, don't save the time-slice
+
+ /* Save time-slice for the thread and clear the current time-slice. */
+
+ STR r4, [r2] // Clear time-slice
+ STR r1, [r0, #24] // Save current time-slice
+
+__tx_thread_dont_save_ts:
+
+ /* Clear the current thread pointer. */
+
+ STR r4, [r3] // Clear current thread pointer
+ B _tx_thread_schedule // Jump to scheduler!
diff --git a/ports/cortex_a8/gnu/src/tx_thread_vectored_context_save.S b/ports/cortex_a8/gnu/src/tx_thread_vectored_context_save.S
index a11ac4b4..d846223f 100644
--- a/ports/cortex_a8/gnu/src/tx_thread_vectored_context_save.S
+++ b/ports/cortex_a8/gnu/src/tx_thread_vectored_context_save.S
@@ -1,193 +1,165 @@
-@/**************************************************************************/
-@/* */
-@/* Copyright (c) Microsoft Corporation. All rights reserved. */
-@/* */
-@/* This software is licensed under the Microsoft Software License */
-@/* Terms for Microsoft Azure RTOS. Full text of the license can be */
-@/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
-@/* and in the root directory of this software. */
-@/* */
-@/**************************************************************************/
-@
-@
-@/**************************************************************************/
-@/**************************************************************************/
-@/** */
-@/** ThreadX Component */
-@/** */
-@/** Thread */
-@/** */
-@/**************************************************************************/
-@/**************************************************************************/
-@
-@
-@#define TX_SOURCE_CODE
-@
-@
-@/* Include necessary system files. */
-@
-@#include "tx_api.h"
-@#include "tx_thread.h"
-@
-@
+/**************************************************************************/
+/* */
+/* Copyright (c) Microsoft Corporation. All rights reserved. */
+/* */
+/* This software is licensed under the Microsoft Software License */
+/* Terms for Microsoft Azure RTOS. Full text of the license can be */
+/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
+/* and in the root directory of this software. */
+/* */
+/**************************************************************************/
+
+
+/**************************************************************************/
+/**************************************************************************/
+/** */
+/** ThreadX Component */
+/** */
+/** Thread */
+/** */
+/**************************************************************************/
+/**************************************************************************/
+
.global _tx_thread_system_state
.global _tx_thread_current_ptr
.global _tx_execution_isr_enter
-@
-@
-@
-@/* No 16-bit Thumb mode veneer code is needed for _tx_thread_vectored_context_save
-@ since it will never be called 16-bit mode. */
-@
+
+
+
+/* No 16-bit Thumb mode veneer code is needed for _tx_thread_vectored_context_save
+ since it will never be called 16-bit mode. */
+
.arm
.text
.align 2
-@/**************************************************************************/
-@/* */
-@/* FUNCTION RELEASE */
-@/* */
-@/* _tx_thread_vectored_context_save Cortex-A8/GNU */
-@/* 6.1.9 */
-@/* AUTHOR */
-@/* */
-@/* William E. Lamie, Microsoft Corporation */
-@/* */
-@/* DESCRIPTION */
-@/* */
-@/* This function saves the context of an executing thread in the */
-@/* beginning of interrupt processing. The function also ensures that */
-@/* the system stack is used upon return to the calling ISR. */
-@/* */
-@/* INPUT */
-@/* */
-@/* None */
-@/* */
-@/* OUTPUT */
-@/* */
-@/* None */
-@/* */
-@/* CALLS */
-@/* */
-@/* None */
-@/* */
-@/* CALLED BY */
-@/* */
-@/* ISRs */
-@/* */
-@/* RELEASE HISTORY */
-@/* */
-@/* DATE NAME DESCRIPTION */
-@/* */
-@/* 09-30-2020 William E. Lamie Initial Version 6.1 */
-@/* 10-15-2021 William E. Lamie Modified comment(s), added */
-@/* execution profile support, */
-@/* resulting in version 6.1.9 */
-@/* */
-@/**************************************************************************/
-@VOID _tx_thread_vectored_context_save(VOID)
-@{
+/**************************************************************************/
+/* */
+/* FUNCTION RELEASE */
+/* */
+/* _tx_thread_vectored_context_save ARMv7-A */
+/* 6.1.11 */
+/* AUTHOR */
+/* */
+/* William E. Lamie, Microsoft Corporation */
+/* */
+/* DESCRIPTION */
+/* */
+/* This function saves the context of an executing thread in the */
+/* beginning of interrupt processing. The function also ensures that */
+/* the system stack is used upon return to the calling ISR. */
+/* */
+/* INPUT */
+/* */
+/* None */
+/* */
+/* OUTPUT */
+/* */
+/* None */
+/* */
+/* CALLS */
+/* */
+/* None */
+/* */
+/* CALLED BY */
+/* */
+/* ISRs */
+/* */
+/* RELEASE HISTORY */
+/* */
+/* DATE NAME DESCRIPTION */
+/* */
+/* 09-30-2020 William E. Lamie Initial Version 6.1 */
+/* 10-15-2021 William E. Lamie Modified comment(s), added */
+/* execution profile support, */
+/* resulting in version 6.1.9 */
+/* 04-25-2022 Zhen Kong Updated comments, */
+/* resulting in version 6.1.11 */
+/* */
+/**************************************************************************/
.global _tx_thread_vectored_context_save
.type _tx_thread_vectored_context_save,function
_tx_thread_vectored_context_save:
-@
-@ /* Upon entry to this routine, it is assumed that IRQ interrupts are locked
-@ out, we are in IRQ mode, and all registers are intact. */
-@
-@ /* Check for a nested interrupt condition. */
-@ if (_tx_thread_system_state++)
-@ {
-@
+
+ /* Upon entry to this routine, it is assumed that IRQ interrupts are locked
+ out, we are in IRQ mode, and all registers are intact. */
+
+ /* Check for a nested interrupt condition. */
+
#ifdef TX_ENABLE_FIQ_SUPPORT
- CPSID if @ Disable IRQ and FIQ interrupts
+ CPSID if // Disable IRQ and FIQ interrupts
#endif
- LDR r3, =_tx_thread_system_state @ Pickup address of system state variable
- LDR r2, [r3, #0] @ Pickup system state
- CMP r2, #0 @ Is this the first interrupt?
- BEQ __tx_thread_not_nested_save @ Yes, not a nested context save
-@
-@ /* Nested interrupt condition. */
-@
- ADD r2, r2, #1 @ Increment the interrupt counter
- STR r2, [r3, #0] @ Store it back in the variable
-@
-@ /* Note: Minimal context of interrupted thread is already saved. */
-@
-@ /* Return to the ISR. */
-@
- MOV r10, #0 @ Clear stack limit
+ LDR r3, =_tx_thread_system_state // Pickup address of system state variable
+ LDR r2, [r3, #0] // Pickup system state
+ CMP r2, #0 // Is this the first interrupt?
+ BEQ __tx_thread_not_nested_save // Yes, not a nested context save
+
+ /* Nested interrupt condition. */
+
+ ADD r2, r2, #1 // Increment the interrupt counter
+ STR r2, [r3, #0] // Store it back in the variable
+
+ /* Note: Minimal context of interrupted thread is already saved. */
+
+ /* Return to the ISR. */
+
+ MOV r10, #0 // Clear stack limit
#if (defined(TX_ENABLE_EXECUTION_CHANGE_NOTIFY) || defined(TX_EXECUTION_PROFILE_ENABLE))
-@
-@ /* Call the ISR enter function to indicate an ISR is executing. */
-@
- PUSH {lr} @ Save ISR lr
- BL _tx_execution_isr_enter @ Call the ISR enter function
- POP {lr} @ Recover ISR lr
+
+ /* Call the ISR enter function to indicate an ISR is executing. */
+
+ PUSH {lr} // Save ISR lr
+ BL _tx_execution_isr_enter // Call the ISR enter function
+ POP {lr} // Recover ISR lr
#endif
- MOV pc, lr @ Return to caller
-@
+ MOV pc, lr // Return to caller
+
__tx_thread_not_nested_save:
-@ }
-@
-@ /* Otherwise, not nested, check to see if a thread was running. */
-@ else if (_tx_thread_current_ptr)
-@ {
-@
- ADD r2, r2, #1 @ Increment the interrupt counter
- STR r2, [r3, #0] @ Store it back in the variable
- LDR r1, =_tx_thread_current_ptr @ Pickup address of current thread ptr
- LDR r0, [r1, #0] @ Pickup current thread pointer
- CMP r0, #0 @ Is it NULL?
- BEQ __tx_thread_idle_system_save @ If so, interrupt occurred in
- @ scheduling loop - nothing needs saving!
-@
-@ /* Note: Minimal context of interrupted thread is already saved. */
-@
-@ /* Save the current stack pointer in the thread's control block. */
-@ _tx_thread_current_ptr -> tx_thread_stack_ptr = sp;
-@
-@ /* Switch to the system stack. */
-@ sp = _tx_thread_system_stack_ptr;
-@
- MOV r10, #0 @ Clear stack limit
+
+ /* Otherwise, not nested, check to see if a thread was running. */
+
+ ADD r2, r2, #1 // Increment the interrupt counter
+ STR r2, [r3, #0] // Store it back in the variable
+ LDR r1, =_tx_thread_current_ptr // Pickup address of current thread ptr
+ LDR r0, [r1, #0] // Pickup current thread pointer
+ CMP r0, #0 // Is it NULL?
+ BEQ __tx_thread_idle_system_save // If so, interrupt occurred in
+ // scheduling loop - nothing needs saving!
+ /* Note: Minimal context of interrupted thread is already saved. */
+
+ /* Save the current stack pointer in the thread's control block. */
+
+ MOV r10, #0 // Clear stack limit
#if (defined(TX_ENABLE_EXECUTION_CHANGE_NOTIFY) || defined(TX_EXECUTION_PROFILE_ENABLE))
-@
-@ /* Call the ISR enter function to indicate an ISR is executing. */
-@
- PUSH {lr} @ Save ISR lr
- BL _tx_execution_isr_enter @ Call the ISR enter function
- POP {lr} @ Recover ISR lr
+
+ /* Call the ISR enter function to indicate an ISR is executing. */
+
+ PUSH {lr} // Save ISR lr
+ BL _tx_execution_isr_enter // Call the ISR enter function
+ POP {lr} // Recover ISR lr
#endif
- MOV pc, lr @ Return to caller
-@
-@ }
-@ else
-@ {
-@
+ MOV pc, lr // Return to caller
+
__tx_thread_idle_system_save:
-@
-@ /* Interrupt occurred in the scheduling loop. */
-@
-@ /* Not much to do here, just adjust the stack pointer, and return to IRQ
-@ processing. */
-@
- MOV r10, #0 @ Clear stack limit
+
+ /* Interrupt occurred in the scheduling loop. */
+
+ /* Not much to do here, just adjust the stack pointer, and return to IRQ
+ processing. */
+
+ MOV r10, #0 // Clear stack limit
#if (defined(TX_ENABLE_EXECUTION_CHANGE_NOTIFY) || defined(TX_EXECUTION_PROFILE_ENABLE))
-@
-@ /* Call the ISR enter function to indicate an ISR is executing. */
-@
- PUSH {lr} @ Save ISR lr
- BL _tx_execution_isr_enter @ Call the ISR enter function
- POP {lr} @ Recover ISR lr
+
+ /* Call the ISR enter function to indicate an ISR is executing. */
+
+ PUSH {lr} // Save ISR lr
+ BL _tx_execution_isr_enter // Call the ISR enter function
+ POP {lr} // Recover ISR lr
#endif
- ADD sp, sp, #32 @ Recover saved registers
- MOV pc, lr @ Return to caller
-@
-@ }
-@}
-
+ ADD sp, sp, #32 // Recover saved registers
+ MOV pc, lr // Return to caller
diff --git a/ports/cortex_a8/gnu/src/tx_timer_interrupt.S b/ports/cortex_a8/gnu/src/tx_timer_interrupt.S
index 2f8ffccf..7337ed0c 100644
--- a/ports/cortex_a8/gnu/src/tx_timer_interrupt.S
+++ b/ports/cortex_a8/gnu/src/tx_timer_interrupt.S
@@ -1,40 +1,30 @@
-@/**************************************************************************/
-@/* */
-@/* Copyright (c) Microsoft Corporation. All rights reserved. */
-@/* */
-@/* This software is licensed under the Microsoft Software License */
-@/* Terms for Microsoft Azure RTOS. Full text of the license can be */
-@/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
-@/* and in the root directory of this software. */
-@/* */
-@/**************************************************************************/
-@
-@
-@/**************************************************************************/
-@/**************************************************************************/
-@/** */
-@/** ThreadX Component */
-@/** */
-@/** Timer */
-@/** */
-@/**************************************************************************/
-@/**************************************************************************/
-@
-@#define TX_SOURCE_CODE
-@
-@
-@/* Include necessary system files. */
-@
-@#include "tx_api.h"
-@#include "tx_timer.h"
-@#include "tx_thread.h"
-@
-@
+/**************************************************************************/
+/* */
+/* Copyright (c) Microsoft Corporation. All rights reserved. */
+/* */
+/* This software is licensed under the Microsoft Software License */
+/* Terms for Microsoft Azure RTOS. Full text of the license can be */
+/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
+/* and in the root directory of this software. */
+/* */
+/**************************************************************************/
+
+
+/**************************************************************************/
+/**************************************************************************/
+/** */
+/** ThreadX Component */
+/** */
+/** Timer */
+/** */
+/**************************************************************************/
+/**************************************************************************/
+
.arm
-@
-@/* Define Assembly language external references... */
-@
+
+/* Define Assembly language external references... */
+
.global _tx_timer_time_slice
.global _tx_timer_system_clock
.global _tx_timer_current_ptr
@@ -43,237 +33,199 @@
.global _tx_timer_expired_time_slice
.global _tx_timer_expired
.global _tx_thread_time_slice
-@
-@
-@
-@/* Define the 16-bit Thumb mode veneer for _tx_timer_interrupt for
-@ applications calling this function from to 16-bit Thumb mode. */
-@
+
+
+
+/* Define the 16-bit Thumb mode veneer for _tx_timer_interrupt for
+ applications calling this function from to 16-bit Thumb mode. */
+
.text
.align 2
.thumb
.global $_tx_timer_interrupt
.type $_tx_timer_interrupt,function
$_tx_timer_interrupt:
- BX pc @ Switch to 32-bit mode
- NOP @
+ BX pc // Switch to 32-bit mode
+ NOP //
.arm
- STMFD sp!, {lr} @ Save return address
- BL _tx_timer_interrupt @ Call _tx_timer_interrupt function
- LDMFD sp!, {lr} @ Recover saved return address
- BX lr @ Return to 16-bit caller
-@
-@
+ STMFD sp!, {lr} // Save return address
+ BL _tx_timer_interrupt // Call _tx_timer_interrupt function
+ LDMFD sp!, {lr} // Recover saved return address
+ BX lr // Return to 16-bit caller
+
+
.text
.align 2
-@/**************************************************************************/
-@/* */
-@/* FUNCTION RELEASE */
-@/* */
-@/* _tx_timer_interrupt Cortex-A8/GNU */
-@/* 6.1 */
-@/* AUTHOR */
-@/* */
-@/* William E. Lamie, Microsoft Corporation */
-@/* */
-@/* DESCRIPTION */
-@/* */
-@/* This function processes the hardware timer interrupt. This */
-@/* processing includes incrementing the system clock and checking for */
-@/* time slice and/or timer expiration. If either is found, the */
-@/* interrupt context save/restore functions are called along with the */
-@/* expiration functions. */
-@/* */
-@/* INPUT */
-@/* */
-@/* None */
-@/* */
-@/* OUTPUT */
-@/* */
-@/* None */
-@/* */
-@/* CALLS */
-@/* */
-@/* _tx_thread_time_slice Time slice interrupted thread */
-@/* _tx_timer_expiration_process Timer expiration processing */
-@/* */
-@/* CALLED BY */
-@/* */
-@/* interrupt vector */
-@/* */
-@/* RELEASE HISTORY */
-@/* */
-@/* DATE NAME DESCRIPTION */
-@/* */
-@/* 09-30-2020 William E. Lamie Initial Version 6.1 */
-@/* */
-@/**************************************************************************/
-@VOID _tx_timer_interrupt(VOID)
-@{
+/**************************************************************************/
+/* */
+/* FUNCTION RELEASE */
+/* */
+/* _tx_timer_interrupt ARMv7-A */
+/* 6.1.11 */
+/* AUTHOR */
+/* */
+/* William E. Lamie, Microsoft Corporation */
+/* */
+/* DESCRIPTION */
+/* */
+/* This function processes the hardware timer interrupt. This */
+/* processing includes incrementing the system clock and checking for */
+/* time slice and/or timer expiration. If either is found, the */
+/* interrupt context save/restore functions are called along with the */
+/* expiration functions. */
+/* */
+/* INPUT */
+/* */
+/* None */
+/* */
+/* OUTPUT */
+/* */
+/* None */
+/* */
+/* CALLS */
+/* */
+/* _tx_thread_time_slice Time slice interrupted thread */
+/* _tx_timer_expiration_process Timer expiration processing */
+/* */
+/* CALLED BY */
+/* */
+/* interrupt vector */
+/* */
+/* RELEASE HISTORY */
+/* */
+/* DATE NAME DESCRIPTION */
+/* */
+/* 09-30-2020 William E. Lamie Initial Version 6.1 */
+/* 04-25-2022 Zhen Kong Updated comments, */
+/* resulting in version 6.1.11 */
+/* */
+/**************************************************************************/
.global _tx_timer_interrupt
.type _tx_timer_interrupt,function
_tx_timer_interrupt:
-@
-@ /* Upon entry to this routine, it is assumed that context save has already
-@ been called, and therefore the compiler scratch registers are available
-@ for use. */
-@
-@ /* Increment the system clock. */
-@ _tx_timer_system_clock++;
-@
- LDR r1, =_tx_timer_system_clock @ Pickup address of system clock
- LDR r0, [r1] @ Pickup system clock
- ADD r0, r0, #1 @ Increment system clock
- STR r0, [r1] @ Store new system clock
-@
-@ /* Test for time-slice expiration. */
-@ if (_tx_timer_time_slice)
-@ {
-@
- LDR r3, =_tx_timer_time_slice @ Pickup address of time-slice
- LDR r2, [r3] @ Pickup time-slice
- CMP r2, #0 @ Is it non-active?
- BEQ __tx_timer_no_time_slice @ Yes, skip time-slice processing
-@
-@ /* Decrement the time_slice. */
-@ _tx_timer_time_slice--;
-@
- SUB r2, r2, #1 @ Decrement the time-slice
- STR r2, [r3] @ Store new time-slice value
-@
-@ /* Check for expiration. */
-@ if (__tx_timer_time_slice == 0)
-@
- CMP r2, #0 @ Has it expired?
- BNE __tx_timer_no_time_slice @ No, skip expiration processing
-@
-@ /* Set the time-slice expired flag. */
-@ _tx_timer_expired_time_slice = TX_TRUE;
-@
- LDR r3, =_tx_timer_expired_time_slice @ Pickup address of expired flag
- MOV r0, #1 @ Build expired value
- STR r0, [r3] @ Set time-slice expiration flag
-@
-@ }
-@
-__tx_timer_no_time_slice:
-@
-@ /* Test for timer expiration. */
-@ if (*_tx_timer_current_ptr)
-@ {
-@
- LDR r1, =_tx_timer_current_ptr @ Pickup current timer pointer address
- LDR r0, [r1] @ Pickup current timer
- LDR r2, [r0] @ Pickup timer list entry
- CMP r2, #0 @ Is there anything in the list?
- BEQ __tx_timer_no_timer @ No, just increment the timer
-@
-@ /* Set expiration flag. */
-@ _tx_timer_expired = TX_TRUE;
-@
- LDR r3, =_tx_timer_expired @ Pickup expiration flag address
- MOV r2, #1 @ Build expired value
- STR r2, [r3] @ Set expired flag
- B __tx_timer_done @ Finished timer processing
-@
-@ }
-@ else
-@ {
-__tx_timer_no_timer:
-@
-@ /* No timer expired, increment the timer pointer. */
-@ _tx_timer_current_ptr++;
-@
- ADD r0, r0, #4 @ Move to next timer
-@
-@ /* Check for wraparound. */
-@ if (_tx_timer_current_ptr == _tx_timer_list_end)
-@
- LDR r3, =_tx_timer_list_end @ Pickup address of timer list end
- LDR r2, [r3] @ Pickup list end
- CMP r0, r2 @ Are we at list end?
- BNE __tx_timer_skip_wrap @ No, skip wraparound logic
-@
-@ /* Wrap to beginning of list. */
-@ _tx_timer_current_ptr = _tx_timer_list_start;
-@
- LDR r3, =_tx_timer_list_start @ Pickup address of timer list start
- LDR r0, [r3] @ Set current pointer to list start
-@
-__tx_timer_skip_wrap:
-@
- STR r0, [r1] @ Store new current timer pointer
-@ }
-@
-__tx_timer_done:
-@
-@
-@ /* See if anything has expired. */
-@ if ((_tx_timer_expired_time_slice) || (_tx_timer_expired))
-@ {
-@
- LDR r3, =_tx_timer_expired_time_slice @ Pickup address of expired flag
- LDR r2, [r3] @ Pickup time-slice expired flag
- CMP r2, #0 @ Did a time-slice expire?
- BNE __tx_something_expired @ If non-zero, time-slice expired
- LDR r1, =_tx_timer_expired @ Pickup address of other expired flag
- LDR r0, [r1] @ Pickup timer expired flag
- CMP r0, #0 @ Did a timer expire?
- BEQ __tx_timer_nothing_expired @ No, nothing expired
-@
-__tx_something_expired:
-@
-@
- STMDB sp!, {r0, lr} @ Save the lr register on the stack
- @ and save r0 just to keep 8-byte alignment
-@
-@ /* Did a timer expire? */
-@ if (_tx_timer_expired)
-@ {
-@
- LDR r1, =_tx_timer_expired @ Pickup address of expired flag
- LDR r0, [r1] @ Pickup timer expired flag
- CMP r0, #0 @ Check for timer expiration
- BEQ __tx_timer_dont_activate @ If not set, skip timer activation
-@
-@ /* Process timer expiration. */
-@ _tx_timer_expiration_process();
-@
- BL _tx_timer_expiration_process @ Call the timer expiration handling routine
-@
-@ }
-__tx_timer_dont_activate:
-@
-@ /* Did time slice expire? */
-@ if (_tx_timer_expired_time_slice)
-@ {
-@
- LDR r3, =_tx_timer_expired_time_slice @ Pickup address of time-slice expired
- LDR r2, [r3] @ Pickup the actual flag
- CMP r2, #0 @ See if the flag is set
- BEQ __tx_timer_not_ts_expiration @ No, skip time-slice processing
-@
-@ /* Time slice interrupted thread. */
-@ _tx_thread_time_slice();
-@
- BL _tx_thread_time_slice @ Call time-slice processing
-@
-@ }
-@
-__tx_timer_not_ts_expiration:
-@
- LDMIA sp!, {r0, lr} @ Recover lr register (r0 is just there for
- @ the 8-byte stack alignment
-@
-@ }
-@
-__tx_timer_nothing_expired:
-@
-#ifdef __THUMB_INTERWORK
- BX lr @ Return to caller
-#else
- MOV pc, lr @ Return to caller
-#endif
-@
-@}
+ /* Upon entry to this routine, it is assumed that context save has already
+ been called, and therefore the compiler scratch registers are available
+ for use. */
+
+ /* Increment the system clock. */
+
+ LDR r1, =_tx_timer_system_clock // Pickup address of system clock
+ LDR r0, [r1] // Pickup system clock
+ ADD r0, r0, #1 // Increment system clock
+ STR r0, [r1] // Store new system clock
+
+ /* Test for time-slice expiration. */
+
+ LDR r3, =_tx_timer_time_slice // Pickup address of time-slice
+ LDR r2, [r3] // Pickup time-slice
+ CMP r2, #0 // Is it non-active?
+ BEQ __tx_timer_no_time_slice // Yes, skip time-slice processing
+
+ /* Decrement the time_slice. */
+
+ SUB r2, r2, #1 // Decrement the time-slice
+ STR r2, [r3] // Store new time-slice value
+
+ /* Check for expiration. */
+
+ CMP r2, #0 // Has it expired?
+ BNE __tx_timer_no_time_slice // No, skip expiration processing
+
+ /* Set the time-slice expired flag. */
+
+ LDR r3, =_tx_timer_expired_time_slice // Pickup address of expired flag
+ MOV r0, #1 // Build expired value
+ STR r0, [r3] // Set time-slice expiration flag
+
+__tx_timer_no_time_slice:
+
+ /* Test for timer expiration. */
+
+ LDR r1, =_tx_timer_current_ptr // Pickup current timer pointer address
+ LDR r0, [r1] // Pickup current timer
+ LDR r2, [r0] // Pickup timer list entry
+ CMP r2, #0 // Is there anything in the list?
+ BEQ __tx_timer_no_timer // No, just increment the timer
+
+ /* Set expiration flag. */
+
+ LDR r3, =_tx_timer_expired // Pickup expiration flag address
+ MOV r2, #1 // Build expired value
+ STR r2, [r3] // Set expired flag
+ B __tx_timer_done // Finished timer processing
+
+__tx_timer_no_timer:
+
+ /* No timer expired, increment the timer pointer. */
+ ADD r0, r0, #4 // Move to next timer
+
+ /* Check for wraparound. */
+
+ LDR r3, =_tx_timer_list_end // Pickup address of timer list end
+ LDR r2, [r3] // Pickup list end
+ CMP r0, r2 // Are we at list end?
+ BNE __tx_timer_skip_wrap // No, skip wraparound logic
+
+ /* Wrap to beginning of list. */
+
+ LDR r3, =_tx_timer_list_start // Pickup address of timer list start
+ LDR r0, [r3] // Set current pointer to list start
+
+__tx_timer_skip_wrap:
+
+ STR r0, [r1] // Store new current timer pointer
+
+__tx_timer_done:
+
+ /* See if anything has expired. */
+
+ LDR r3, =_tx_timer_expired_time_slice // Pickup address of expired flag
+ LDR r2, [r3] // Pickup time-slice expired flag
+ CMP r2, #0 // Did a time-slice expire?
+ BNE __tx_something_expired // If non-zero, time-slice expired
+ LDR r1, =_tx_timer_expired // Pickup address of other expired flag
+ LDR r0, [r1] // Pickup timer expired flag
+ CMP r0, #0 // Did a timer expire?
+ BEQ __tx_timer_nothing_expired // No, nothing expired
+
+__tx_something_expired:
+
+ STMDB sp!, {r0, lr} // Save the lr register on the stack
+ // and save r0 just to keep 8-byte alignment
+
+ /* Did a timer expire? */
+
+ LDR r1, =_tx_timer_expired // Pickup address of expired flag
+ LDR r0, [r1] // Pickup timer expired flag
+ CMP r0, #0 // Check for timer expiration
+ BEQ __tx_timer_dont_activate // If not set, skip timer activation
+
+ /* Process timer expiration. */
+ BL _tx_timer_expiration_process // Call the timer expiration handling routine
+
+__tx_timer_dont_activate:
+
+ /* Did time slice expire? */
+
+ LDR r3, =_tx_timer_expired_time_slice // Pickup address of time-slice expired
+ LDR r2, [r3] // Pickup the actual flag
+ CMP r2, #0 // See if the flag is set
+ BEQ __tx_timer_not_ts_expiration // No, skip time-slice processing
+
+ /* Time slice interrupted thread. */
+
+ BL _tx_thread_time_slice // Call time-slice processing
+
+__tx_timer_not_ts_expiration:
+
+ LDMIA sp!, {r0, lr} // Recover lr register (r0 is just there for
+ // the 8-byte stack alignment
+
+__tx_timer_nothing_expired:
+
+#ifdef __THUMB_INTERWORK
+ BX lr // Return to caller
+#else
+ MOV pc, lr // Return to caller
+#endif
diff --git a/ports/cortex_a9/ac6/example_build/sample_threadx.c b/ports/cortex_a9/ac6/example_build/sample_threadx.c
new file mode 100644
index 00000000..8c61de06
--- /dev/null
+++ b/ports/cortex_a9/ac6/example_build/sample_threadx.c
@@ -0,0 +1,369 @@
+/* This is a small demo of the high-performance ThreadX kernel. It includes examples of eight
+ threads of different priorities, using a message queue, semaphore, mutex, event flags group,
+ byte pool, and block pool. */
+
+#include "tx_api.h"
+
+#define DEMO_STACK_SIZE 1024
+#define DEMO_BYTE_POOL_SIZE 9120
+#define DEMO_BLOCK_POOL_SIZE 100
+#define DEMO_QUEUE_SIZE 100
+
+
+/* Define the ThreadX object control blocks... */
+
+TX_THREAD thread_0;
+TX_THREAD thread_1;
+TX_THREAD thread_2;
+TX_THREAD thread_3;
+TX_THREAD thread_4;
+TX_THREAD thread_5;
+TX_THREAD thread_6;
+TX_THREAD thread_7;
+TX_QUEUE queue_0;
+TX_SEMAPHORE semaphore_0;
+TX_MUTEX mutex_0;
+TX_EVENT_FLAGS_GROUP event_flags_0;
+TX_BYTE_POOL byte_pool_0;
+TX_BLOCK_POOL block_pool_0;
+
+
+/* Define the counters used in the demo application... */
+
+ULONG thread_0_counter;
+ULONG thread_1_counter;
+ULONG thread_1_messages_sent;
+ULONG thread_2_counter;
+ULONG thread_2_messages_received;
+ULONG thread_3_counter;
+ULONG thread_4_counter;
+ULONG thread_5_counter;
+ULONG thread_6_counter;
+ULONG thread_7_counter;
+
+
+/* Define thread prototypes. */
+
+void thread_0_entry(ULONG thread_input);
+void thread_1_entry(ULONG thread_input);
+void thread_2_entry(ULONG thread_input);
+void thread_3_and_4_entry(ULONG thread_input);
+void thread_5_entry(ULONG thread_input);
+void thread_6_and_7_entry(ULONG thread_input);
+
+
+/* Define main entry point. */
+
+int main()
+{
+
+ /* Enter the ThreadX kernel. */
+ tx_kernel_enter();
+}
+
+
+/* Define what the initial system looks like. */
+
+void tx_application_define(void *first_unused_memory)
+{
+
+CHAR *pointer = TX_NULL;
+
+
+ /* Create a byte memory pool from which to allocate the thread stacks. */
+ tx_byte_pool_create(&byte_pool_0, "byte pool 0", first_unused_memory, DEMO_BYTE_POOL_SIZE);
+
+ /* Put system definition stuff in here, e.g. thread creates and other assorted
+ create information. */
+
+ /* Allocate the stack for thread 0. */
+ tx_byte_allocate(&byte_pool_0, (VOID **) &pointer, DEMO_STACK_SIZE, TX_NO_WAIT);
+
+ /* Create the main thread. */
+ tx_thread_create(&thread_0, "thread 0", thread_0_entry, 0,
+ pointer, DEMO_STACK_SIZE,
+ 1, 1, TX_NO_TIME_SLICE, TX_AUTO_START);
+
+
+ /* Allocate the stack for thread 1. */
+ tx_byte_allocate(&byte_pool_0, (VOID **) &pointer, DEMO_STACK_SIZE, TX_NO_WAIT);
+
+ /* Create threads 1 and 2. These threads pass information through a ThreadX
+ message queue. It is also interesting to note that these threads have a time
+ slice. */
+ tx_thread_create(&thread_1, "thread 1", thread_1_entry, 1,
+ pointer, DEMO_STACK_SIZE,
+ 16, 16, 4, TX_AUTO_START);
+
+ /* Allocate the stack for thread 2. */
+ tx_byte_allocate(&byte_pool_0, (VOID **) &pointer, DEMO_STACK_SIZE, TX_NO_WAIT);
+
+ tx_thread_create(&thread_2, "thread 2", thread_2_entry, 2,
+ pointer, DEMO_STACK_SIZE,
+ 16, 16, 4, TX_AUTO_START);
+
+ /* Allocate the stack for thread 3. */
+ tx_byte_allocate(&byte_pool_0, (VOID **) &pointer, DEMO_STACK_SIZE, TX_NO_WAIT);
+
+ /* Create threads 3 and 4. These threads compete for a ThreadX counting semaphore.
+ An interesting thing here is that both threads share the same instruction area. */
+ tx_thread_create(&thread_3, "thread 3", thread_3_and_4_entry, 3,
+ pointer, DEMO_STACK_SIZE,
+ 8, 8, TX_NO_TIME_SLICE, TX_AUTO_START);
+
+ /* Allocate the stack for thread 4. */
+ tx_byte_allocate(&byte_pool_0, (VOID **) &pointer, DEMO_STACK_SIZE, TX_NO_WAIT);
+
+ tx_thread_create(&thread_4, "thread 4", thread_3_and_4_entry, 4,
+ pointer, DEMO_STACK_SIZE,
+ 8, 8, TX_NO_TIME_SLICE, TX_AUTO_START);
+
+ /* Allocate the stack for thread 5. */
+ tx_byte_allocate(&byte_pool_0, (VOID **) &pointer, DEMO_STACK_SIZE, TX_NO_WAIT);
+
+ /* Create thread 5. This thread simply pends on an event flag which will be set
+ by thread_0. */
+ tx_thread_create(&thread_5, "thread 5", thread_5_entry, 5,
+ pointer, DEMO_STACK_SIZE,
+ 4, 4, TX_NO_TIME_SLICE, TX_AUTO_START);
+
+ /* Allocate the stack for thread 6. */
+ tx_byte_allocate(&byte_pool_0, (VOID **) &pointer, DEMO_STACK_SIZE, TX_NO_WAIT);
+
+ /* Create threads 6 and 7. These threads compete for a ThreadX mutex. */
+ tx_thread_create(&thread_6, "thread 6", thread_6_and_7_entry, 6,
+ pointer, DEMO_STACK_SIZE,
+ 8, 8, TX_NO_TIME_SLICE, TX_AUTO_START);
+
+ /* Allocate the stack for thread 7. */
+ tx_byte_allocate(&byte_pool_0, (VOID **) &pointer, DEMO_STACK_SIZE, TX_NO_WAIT);
+
+ tx_thread_create(&thread_7, "thread 7", thread_6_and_7_entry, 7,
+ pointer, DEMO_STACK_SIZE,
+ 8, 8, TX_NO_TIME_SLICE, TX_AUTO_START);
+
+ /* Allocate the message queue. */
+ tx_byte_allocate(&byte_pool_0, (VOID **) &pointer, DEMO_QUEUE_SIZE*sizeof(ULONG), TX_NO_WAIT);
+
+ /* Create the message queue shared by threads 1 and 2. */
+ tx_queue_create(&queue_0, "queue 0", TX_1_ULONG, pointer, DEMO_QUEUE_SIZE*sizeof(ULONG));
+
+ /* Create the semaphore used by threads 3 and 4. */
+ tx_semaphore_create(&semaphore_0, "semaphore 0", 1);
+
+ /* Create the event flags group used by threads 1 and 5. */
+ tx_event_flags_create(&event_flags_0, "event flags 0");
+
+ /* Create the mutex used by thread 6 and 7 without priority inheritance. */
+ tx_mutex_create(&mutex_0, "mutex 0", TX_NO_INHERIT);
+
+ /* Allocate the memory for a small block pool. */
+ tx_byte_allocate(&byte_pool_0, (VOID **) &pointer, DEMO_BLOCK_POOL_SIZE, TX_NO_WAIT);
+
+ /* Create a block memory pool to allocate a message buffer from. */
+ tx_block_pool_create(&block_pool_0, "block pool 0", sizeof(ULONG), pointer, DEMO_BLOCK_POOL_SIZE);
+
+ /* Allocate a block and release the block memory. */
+ tx_block_allocate(&block_pool_0, (VOID **) &pointer, TX_NO_WAIT);
+
+ /* Release the block back to the pool. */
+ tx_block_release(pointer);
+}
+
+
+
+/* Define the test threads. */
+
+void thread_0_entry(ULONG thread_input)
+{
+
+UINT status;
+
+
+ /* This thread simply sits in while-forever-sleep loop. */
+ while(1)
+ {
+
+ /* Increment the thread counter. */
+ thread_0_counter++;
+
+ /* Sleep for 10 ticks. */
+ tx_thread_sleep(10);
+
+ /* Set event flag 0 to wakeup thread 5. */
+ status = tx_event_flags_set(&event_flags_0, 0x1, TX_OR);
+
+ /* Check status. */
+ if (status != TX_SUCCESS)
+ break;
+ }
+}
+
+
+void thread_1_entry(ULONG thread_input)
+{
+
+UINT status;
+
+
+ /* This thread simply sends messages to a queue shared by thread 2. */
+ while(1)
+ {
+
+ /* Increment the thread counter. */
+ thread_1_counter++;
+
+ /* Send message to queue 0. */
+ status = tx_queue_send(&queue_0, &thread_1_messages_sent, TX_WAIT_FOREVER);
+
+ /* Check completion status. */
+ if (status != TX_SUCCESS)
+ break;
+
+ /* Increment the message sent. */
+ thread_1_messages_sent++;
+ }
+}
+
+
+void thread_2_entry(ULONG thread_input)
+{
+
+ULONG received_message;
+UINT status;
+
+ /* This thread retrieves messages placed on the queue by thread 1. */
+ while(1)
+ {
+
+ /* Increment the thread counter. */
+ thread_2_counter++;
+
+ /* Retrieve a message from the queue. */
+ status = tx_queue_receive(&queue_0, &received_message, TX_WAIT_FOREVER);
+
+ /* Check completion status and make sure the message is what we
+ expected. */
+ if ((status != TX_SUCCESS) || (received_message != thread_2_messages_received))
+ break;
+
+ /* Otherwise, all is okay. Increment the received message count. */
+ thread_2_messages_received++;
+ }
+}
+
+
+void thread_3_and_4_entry(ULONG thread_input)
+{
+
+UINT status;
+
+
+ /* This function is executed from thread 3 and thread 4. As the loop
+ below shows, these function compete for ownership of semaphore_0. */
+ while(1)
+ {
+
+ /* Increment the thread counter. */
+ if (thread_input == 3)
+ thread_3_counter++;
+ else
+ thread_4_counter++;
+
+ /* Get the semaphore with suspension. */
+ status = tx_semaphore_get(&semaphore_0, TX_WAIT_FOREVER);
+
+ /* Check status. */
+ if (status != TX_SUCCESS)
+ break;
+
+ /* Sleep for 2 ticks to hold the semaphore. */
+ tx_thread_sleep(2);
+
+ /* Release the semaphore. */
+ status = tx_semaphore_put(&semaphore_0);
+
+ /* Check status. */
+ if (status != TX_SUCCESS)
+ break;
+ }
+}
+
+
+void thread_5_entry(ULONG thread_input)
+{
+
+UINT status;
+ULONG actual_flags;
+
+
+ /* This thread simply waits for an event in a forever loop. */
+ while(1)
+ {
+
+ /* Increment the thread counter. */
+ thread_5_counter++;
+
+ /* Wait for event flag 0. */
+ status = tx_event_flags_get(&event_flags_0, 0x1, TX_OR_CLEAR,
+ &actual_flags, TX_WAIT_FOREVER);
+
+ /* Check status. */
+ if ((status != TX_SUCCESS) || (actual_flags != 0x1))
+ break;
+ }
+}
+
+
+void thread_6_and_7_entry(ULONG thread_input)
+{
+
+UINT status;
+
+
+ /* This function is executed from thread 6 and thread 7. As the loop
+ below shows, these function compete for ownership of mutex_0. */
+ while(1)
+ {
+
+ /* Increment the thread counter. */
+ if (thread_input == 6)
+ thread_6_counter++;
+ else
+ thread_7_counter++;
+
+ /* Get the mutex with suspension. */
+ status = tx_mutex_get(&mutex_0, TX_WAIT_FOREVER);
+
+ /* Check status. */
+ if (status != TX_SUCCESS)
+ break;
+
+ /* Get the mutex again with suspension. This shows
+ that an owning thread may retrieve the mutex it
+ owns multiple times. */
+ status = tx_mutex_get(&mutex_0, TX_WAIT_FOREVER);
+
+ /* Check status. */
+ if (status != TX_SUCCESS)
+ break;
+
+ /* Sleep for 2 ticks to hold the mutex. */
+ tx_thread_sleep(2);
+
+ /* Release the mutex. */
+ status = tx_mutex_put(&mutex_0);
+
+ /* Check status. */
+ if (status != TX_SUCCESS)
+ break;
+
+ /* Release the mutex again. This will actually
+ release ownership since it was obtained twice. */
+ status = tx_mutex_put(&mutex_0);
+
+ /* Check status. */
+ if (status != TX_SUCCESS)
+ break;
+ }
+}
diff --git a/ports/cortex_a9/ac6/example_build/sample_threadx/.cproject b/ports/cortex_a9/ac6/example_build/sample_threadx/.cproject
index da15bba5..72d51c5b 100644
--- a/ports/cortex_a9/ac6/example_build/sample_threadx/.cproject
+++ b/ports/cortex_a9/ac6/example_build/sample_threadx/.cproject
@@ -3,9 +3,9 @@
-
+
-
+
@@ -23,37 +23,37 @@
-
+
-
+
-
+
-
+
-
+
-
+
-
+
-
+
-
+
-
+
-
+
-
+
-
+
-
+
-
+
-
-
+
-
+
-
+
-
+
-
+
-
+
-
+
-
+
-
+
-
+
-
+
-
+
-
+
-
+
-
+
-
-
+
+
+
+
+
-
+
@@ -115,7 +119,7 @@
-
+
@@ -135,7 +139,7 @@
-
+
@@ -149,8 +153,6 @@
-
-
@@ -166,5 +168,9 @@
+
+
+
+
diff --git a/ports/cortex_a9/ac6/example_build/sample_threadx/.project b/ports/cortex_a9/ac6/example_build/sample_threadx/.project
index 2a6b3cb1..ed4c0885 100644
--- a/ports/cortex_a9/ac6/example_build/sample_threadx/.project
+++ b/ports/cortex_a9/ac6/example_build/sample_threadx/.project
@@ -20,7 +20,6 @@
- com.arm.debug.ds.natureorg.eclipse.cdt.core.cnatureorg.eclipse.cdt.managedbuilder.core.managedBuildNatureorg.eclipse.cdt.managedbuilder.core.ScannerConfigNature
diff --git a/ports/cortex_a9/ac6/example_build/sample_threadx/sample_threadx.c b/ports/cortex_a9/ac6/example_build/sample_threadx/sample_threadx.c
index 418ec634..8c61de06 100644
--- a/ports/cortex_a9/ac6/example_build/sample_threadx/sample_threadx.c
+++ b/ports/cortex_a9/ac6/example_build/sample_threadx/sample_threadx.c
@@ -1,5 +1,5 @@
/* This is a small demo of the high-performance ThreadX kernel. It includes examples of eight
- threads of different priorities, using a message queue, semaphore, mutex, event flags group,
+ threads of different priorities, using a message queue, semaphore, mutex, event flags group,
byte pool, and block pool. */
#include "tx_api.h"
@@ -80,42 +80,42 @@ CHAR *pointer = TX_NULL;
tx_byte_allocate(&byte_pool_0, (VOID **) &pointer, DEMO_STACK_SIZE, TX_NO_WAIT);
/* Create the main thread. */
- tx_thread_create(&thread_0, "thread 0", thread_0_entry, 0,
- pointer, DEMO_STACK_SIZE,
+ tx_thread_create(&thread_0, "thread 0", thread_0_entry, 0,
+ pointer, DEMO_STACK_SIZE,
1, 1, TX_NO_TIME_SLICE, TX_AUTO_START);
/* Allocate the stack for thread 1. */
tx_byte_allocate(&byte_pool_0, (VOID **) &pointer, DEMO_STACK_SIZE, TX_NO_WAIT);
- /* Create threads 1 and 2. These threads pass information through a ThreadX
+ /* Create threads 1 and 2. These threads pass information through a ThreadX
message queue. It is also interesting to note that these threads have a time
slice. */
- tx_thread_create(&thread_1, "thread 1", thread_1_entry, 1,
- pointer, DEMO_STACK_SIZE,
+ tx_thread_create(&thread_1, "thread 1", thread_1_entry, 1,
+ pointer, DEMO_STACK_SIZE,
16, 16, 4, TX_AUTO_START);
/* Allocate the stack for thread 2. */
tx_byte_allocate(&byte_pool_0, (VOID **) &pointer, DEMO_STACK_SIZE, TX_NO_WAIT);
- tx_thread_create(&thread_2, "thread 2", thread_2_entry, 2,
- pointer, DEMO_STACK_SIZE,
+ tx_thread_create(&thread_2, "thread 2", thread_2_entry, 2,
+ pointer, DEMO_STACK_SIZE,
16, 16, 4, TX_AUTO_START);
/* Allocate the stack for thread 3. */
tx_byte_allocate(&byte_pool_0, (VOID **) &pointer, DEMO_STACK_SIZE, TX_NO_WAIT);
- /* Create threads 3 and 4. These threads compete for a ThreadX counting semaphore.
+ /* Create threads 3 and 4. These threads compete for a ThreadX counting semaphore.
An interesting thing here is that both threads share the same instruction area. */
- tx_thread_create(&thread_3, "thread 3", thread_3_and_4_entry, 3,
- pointer, DEMO_STACK_SIZE,
+ tx_thread_create(&thread_3, "thread 3", thread_3_and_4_entry, 3,
+ pointer, DEMO_STACK_SIZE,
8, 8, TX_NO_TIME_SLICE, TX_AUTO_START);
/* Allocate the stack for thread 4. */
tx_byte_allocate(&byte_pool_0, (VOID **) &pointer, DEMO_STACK_SIZE, TX_NO_WAIT);
- tx_thread_create(&thread_4, "thread 4", thread_3_and_4_entry, 4,
- pointer, DEMO_STACK_SIZE,
+ tx_thread_create(&thread_4, "thread 4", thread_3_and_4_entry, 4,
+ pointer, DEMO_STACK_SIZE,
8, 8, TX_NO_TIME_SLICE, TX_AUTO_START);
/* Allocate the stack for thread 5. */
@@ -123,23 +123,23 @@ CHAR *pointer = TX_NULL;
/* Create thread 5. This thread simply pends on an event flag which will be set
by thread_0. */
- tx_thread_create(&thread_5, "thread 5", thread_5_entry, 5,
- pointer, DEMO_STACK_SIZE,
+ tx_thread_create(&thread_5, "thread 5", thread_5_entry, 5,
+ pointer, DEMO_STACK_SIZE,
4, 4, TX_NO_TIME_SLICE, TX_AUTO_START);
/* Allocate the stack for thread 6. */
tx_byte_allocate(&byte_pool_0, (VOID **) &pointer, DEMO_STACK_SIZE, TX_NO_WAIT);
/* Create threads 6 and 7. These threads compete for a ThreadX mutex. */
- tx_thread_create(&thread_6, "thread 6", thread_6_and_7_entry, 6,
- pointer, DEMO_STACK_SIZE,
+ tx_thread_create(&thread_6, "thread 6", thread_6_and_7_entry, 6,
+ pointer, DEMO_STACK_SIZE,
8, 8, TX_NO_TIME_SLICE, TX_AUTO_START);
/* Allocate the stack for thread 7. */
tx_byte_allocate(&byte_pool_0, (VOID **) &pointer, DEMO_STACK_SIZE, TX_NO_WAIT);
- tx_thread_create(&thread_7, "thread 7", thread_6_and_7_entry, 7,
- pointer, DEMO_STACK_SIZE,
+ tx_thread_create(&thread_7, "thread 7", thread_6_and_7_entry, 7,
+ pointer, DEMO_STACK_SIZE,
8, 8, TX_NO_TIME_SLICE, TX_AUTO_START);
/* Allocate the message queue. */
@@ -242,11 +242,11 @@ UINT status;
/* Retrieve a message from the queue. */
status = tx_queue_receive(&queue_0, &received_message, TX_WAIT_FOREVER);
- /* Check completion status and make sure the message is what we
+ /* Check completion status and make sure the message is what we
expected. */
if ((status != TX_SUCCESS) || (received_message != thread_2_messages_received))
break;
-
+
/* Otherwise, all is okay. Increment the received message count. */
thread_2_messages_received++;
}
@@ -305,7 +305,7 @@ ULONG actual_flags;
thread_5_counter++;
/* Wait for event flag 0. */
- status = tx_event_flags_get(&event_flags_0, 0x1, TX_OR_CLEAR,
+ status = tx_event_flags_get(&event_flags_0, 0x1, TX_OR_CLEAR,
&actual_flags, TX_WAIT_FOREVER);
/* Check status. */
@@ -358,7 +358,7 @@ UINT status;
if (status != TX_SUCCESS)
break;
- /* Release the mutex again. This will actually
+ /* Release the mutex again. This will actually
release ownership since it was obtained twice. */
status = tx_mutex_put(&mutex_0);
diff --git a/ports/cortex_a9/ac6/example_build/sample_threadx/sample_threadx.launch b/ports/cortex_a9/ac6/example_build/sample_threadx/sample_threadx.launch
new file mode 100644
index 00000000..d4805aef
--- /dev/null
+++ b/ports/cortex_a9/ac6/example_build/sample_threadx/sample_threadx.launch
@@ -0,0 +1,188 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/ports/cortex_a9/ac6/example_build/sample_threadx/sample_threadx.scat b/ports/cortex_a9/ac6/example_build/sample_threadx/sample_threadx.scat
index 71410dbc..d23881cd 100644
--- a/ports/cortex_a9/ac6/example_build/sample_threadx/sample_threadx.scat
+++ b/ports/cortex_a9/ac6/example_build/sample_threadx/sample_threadx.scat
@@ -5,12 +5,10 @@
; and your compliance with all applicable terms and conditions of such licence agreement.
;*******************************************************
-; Scatter-file for Cortex-A9 bare-metal example on Versatile Express
+; Scatter-file for ARMv7-A bare-metal example on Versatile Express
; This scatter-file places application code, data, stack and heap at suitable addresses in the memory map.
-; Versatile Express with Cortex-A9 has 1GB SDRAM at 0x60000000 to 0x9FFFFFFF, which this scatter-file uses.
-
SDRAM 0x80000000 0x20000000
{
@@ -35,11 +33,11 @@ SDRAM 0x80000000 0x20000000
ARM_LIB_HEAP 0x80040000 EMPTY 0x00040000 ; Application heap
{ }
- ARM_LIB_STACK 0x80090000 EMPTY 0x00010000 ; Application (SVC mode) stack
+ ARM_LIB_STACK 0x80090000 EMPTY 0x00010000 ; Application (SVC mode) stack
{ }
- ;IRQ_STACK 0x800A0000 EMPTY -0x00010000 ; IRQ mode stack
- ;{ }
+; IRQ_STACK 0x800A0000 EMPTY -0x00010000 ; IRQ mode stack
+; { }
TTB 0x80100000 EMPTY 0x4000 ; Level-1 Translation Table for MMU
{ }
diff --git a/ports/cortex_a9/ac6/example_build/sample_threadx/startup.S b/ports/cortex_a9/ac6/example_build/sample_threadx/startup.S
index 76ad6539..670fadb9 100644
--- a/ports/cortex_a9/ac6/example_build/sample_threadx/startup.S
+++ b/ports/cortex_a9/ac6/example_build/sample_threadx/startup.S
@@ -1,13 +1,12 @@
//----------------------------------------------------------------
-// Cortex-A9 Embedded example - Startup Code
+// ARMv7-A Embedded example - Startup Code
//
// Copyright (c) 2005-2018 Arm Limited (or its affiliates). All rights reserved.
// Use, modification and redistribution of this file is subject to your possession of a
-// valid End User License Agreement for the Arm Product of which these examples are part of
+// valid End User License Agreement for the Arm Product of which these examples are part of
// and your compliance with all applicable terms and conditions of such licence agreement.
//----------------------------------------------------------------
-
// Standard definitions of mode bits and interrupt (I & F) flags in PSRs
#define Mode_USR 0x10
@@ -26,7 +25,7 @@
.align 3
.cfi_sections .debug_frame // put stack frame info into .debug_frame instead of .eh_frame
-
+
//----------------------------------------------------------------
// Entry point for the Reset handler
//----------------------------------------------------------------
@@ -45,7 +44,7 @@ Vectors:
LDR PC, SVC_Addr
LDR PC, Prefetch_Addr
LDR PC, Abort_Addr
- B . // Reserved vector
+ LDR PC, Hypervisor_Addr
LDR PC, IRQ_Addr
LDR PC, FIQ_Addr
@@ -54,22 +53,18 @@ Vectors:
Reset_Addr:
.word Reset_Handler
Undefined_Addr:
- //.word Undefined_Handler
.word __tx_undefined
SVC_Addr:
- //.word SVC_Handler
.word __tx_swi_interrupt
Prefetch_Addr:
- //.word Prefetch_Handler
.word __tx_prefetch_handler
Abort_Addr:
- //.word Abort_Handler
.word __tx_abort_handler
+Hypervisor_Addr:
+ .word __tx_reserved_handler
IRQ_Addr:
- //.word IRQ_Handler
.word __tx_irq_handler
FIQ_Addr:
- //.word FIQ_Handler
.word __tx_fiq_handler
@@ -85,21 +80,21 @@ Prefetch_Handler:
B Prefetch_Handler
Abort_Handler:
B Abort_Handler
+Hypervisor_Handler:
+ B Hypervisor_Handler
IRQ_Handler:
B IRQ_Handler
FIQ_Handler:
B FIQ_Handler
-
-
//----------------------------------------------------------------
// Reset Handler
//----------------------------------------------------------------
Reset_Handler:
//----------------------------------------------------------------
-// Disable caches, MMU and branch prediction in case they were left enabled from an earlier run
+// Disable caches and MMU in case they were left enabled from an earlier run
// This does not need to be done from a cold reset
//----------------------------------------------------------------
@@ -107,13 +102,31 @@ Reset_Handler:
BIC r0, r0, #(0x1 << 12) // Clear I bit 12 to disable I Cache
BIC r0, r0, #(0x1 << 2) // Clear C bit 2 to disable D Cache
BIC r0, r0, #0x1 // Clear M bit 0 to disable MMU
- BIC r0, r0, #(0x1 << 11) // Clear Z bit 11 to disable branch prediction
+ BIC r0, r0, #(0x1 << 11) // Clear Z bit 11 to disable branch prediction
MCR p15, 0, r0, c1, c0, 0 // Write System Control Register
ISB
-// The MMU is enabled later, before calling main(). Caches and branch prediction are enabled inside main(),
+// The MMU is enabled later, before calling main(). Caches are enabled inside main(),
// after the MMU has been enabled and scatterloading has been performed.
+//----------------------------------------------------------------
+// ACTLR.SMP bit must be set before the caches and MMU are enabled,
+// or any cache and TLB maintenance operations are performed, even for single-core
+//----------------------------------------------------------------
+ MRC p15, 0, r0, c1, c0, 1 // Read ACTLR
+ ORR r0, r0, #(1 << 6) // Set ACTLR.SMP bit
+ MCR p15, 0, r0, c1, c0, 1 // Write ACTLR
+ ISB
+
+//----------------------------------------------------------------
+// Invalidate Data and Instruction TLBs and branch predictor
+// This does not need to be done from a cold reset
+//----------------------------------------------------------------
+
+ MOV r0,#0
+ MCR p15, 0, r0, c8, c7, 0 // I-TLB and D-TLB invalidation
+ MCR p15, 0, r0, c7, c5, 6 // BPIALL - Invalidate entire branch predictor array
+
//----------------------------------------------------------------
// Initialize Supervisor Mode Stack
// Note stack must be 8 byte aligned.
@@ -122,12 +135,24 @@ Reset_Handler:
LDR SP, =Image$$ARM_LIB_STACK$$ZI$$Limit
//----------------------------------------------------------------
-// Invalidate Data and Instruction TLBs and branch predictor
+// Disable loop-buffer to fix errata on A15 r0p0
//----------------------------------------------------------------
-
- MOV r0,#0
- MCR p15, 0, r0, c8, c7, 0 // I-TLB and D-TLB invalidation
- MCR p15, 0, r0, c7, c5, 6 // BPIALL - Invalidate entire branch predictor array
+ MRC p15, 0, r0, c0, c0, 0 // Read main ID register MIDR
+ MOV r1, r0, lsr #4 // Extract Primary Part Number
+ LDR r2, =0xFFF
+ AND r1, r1, r2
+ LDR r2, =0xC0F
+ CMP r1, r2 // Is this an A15?
+ BNE notA15r0p0 // Jump if not A15
+ AND r5, r0, #0x00f00000 // Variant
+ AND r6, r0, #0x0000000f // Revision
+ ORRS r6, r6, r5 // Combine variant and revision
+ BNE notA15r0p0 // Jump if not r0p0
+ MRC p15, 0, r0, c1, c0, 1 // Read Aux Ctrl Reg
+ ORR r0, r0, #(1 << 1) // Set bit 1 to Disable Loop Buffer
+ MCR p15, 0, r0, c1, c0, 1 // Write Aux Ctrl Reg
+ ISB
+notA15r0p0:
//----------------------------------------------------------------
// Set Vector Base Address Register (VBAR) to point to this application's vector table
@@ -137,7 +162,9 @@ Reset_Handler:
MCR p15, 0, r0, c12, c0, 0
//----------------------------------------------------------------
-// Cache Invalidation code for Cortex-A9
+// Cache Invalidation code for ARMv7-A
+// The caches, MMU and BTB do not need post-reset invalidation on Cortex-A7,
+// but forcing a cache invalidation makes the code more portable to other CPUs (e.g. Cortex-A9)
//----------------------------------------------------------------
// Invalidate L1 Instruction Cache
@@ -206,16 +233,18 @@ Finished:
// write the address of our page table base to TTB register 0
LDR r0,=Image$$TTB$$ZI$$Base
+
MOV r1, #0x08 // RGN=b01 (outer cacheable write-back cached, write allocate)
// S=0 (translation table walk to non-shared memory)
ORR r1,r1,#0x40 // IRGN=b01 (inner cacheability for the translation table walk is Write-back Write-allocate)
ORR r0,r0,r1
+
MCR p15, 0, r0, c2, c0, 0
//----------------------------------------------------------------
-// PAGE TABLE generation
+// PAGE TABLE generation
// Generate the page tables
// Build a flat translation table for the whole address space.
@@ -228,7 +257,7 @@ Finished:
// Bits[31:20] - Top 12 bits of VA is pointer into table
// nG[17]=0 - Non global, enables matching against ASID in the TLB when set.
// S[16]=0 - Indicates normal memory is shared when set.
-// AP2[15]=0
+// AP2[15]=0
// AP[11:10]=11 - Configure for full read/write access in all modes
// TEX[14:12]=000
// CB[3:2]= 00 - Set attributes to Strongly-ordered memory.
@@ -247,7 +276,7 @@ Finished:
// r2 is level1 descriptor (bits 19:0)
// use loop counter to create 4096 individual table entries.
- // this writes from address 'Image$$TTB$$ZI$$Base' +
+ // this writes from address 'Image$$TTB$$ZI$$Base' +
// offset 0x3FFC down to offset 0x0 in word steps (4 bytes)
init_ttb_1:
@@ -344,12 +373,8 @@ enable_caches:
MCR p15, 0, r0, c1, c0, 0 // Write System Control Register
ISB
-
-//----------------------------------------------------------------
-// Enable L1 D-side prefetch (A9 specific)
-//----------------------------------------------------------------
-
MRC p15, 0, r0, c1, c0, 1 // Read Auxiliary Control Register
+ ORR r0, #2 // L2EN bit, enable L2 cache
ORR r0, r0, #(0x1 << 2) // Set DP bit 2 to enable L1 Dside prefetch
MCR p15, 0, r0, c1, c0, 1 // Write Auxiliary Control Register
ISB
@@ -357,3 +382,16 @@ enable_caches:
BX lr
.cfi_endproc
+ .global disable_caches
+ .type disable_caches, "function"
+disable_caches:
+
+ MRC p15, 0, r0, c1, c0, 0 // Read System Control Register
+ BIC r0, r0, #(0x1 << 12) // Clear I bit 12 to disable I Cache
+ BIC r0, r0, #(0x1 << 2) // Clear C bit 2 to disable D Cache
+ MCR p15, 0, r0, c1, c0, 0 // Write System Control Register
+ ISB
+
+ BX lr
+
+
diff --git a/ports/cortex_a9/ac6/example_build/sample_threadx/tx_initialize_low_level.S b/ports/cortex_a9/ac6/example_build/sample_threadx/tx_initialize_low_level.S
index e3907d1e..715958f0 100644
--- a/ports/cortex_a9/ac6/example_build/sample_threadx/tx_initialize_low_level.S
+++ b/ports/cortex_a9/ac6/example_build/sample_threadx/tx_initialize_low_level.S
@@ -1,345 +1,299 @@
-@/**************************************************************************/
-@/* */
-@/* Copyright (c) Microsoft Corporation. All rights reserved. */
-@/* */
-@/* This software is licensed under the Microsoft Software License */
-@/* Terms for Microsoft Azure RTOS. Full text of the license can be */
-@/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
-@/* and in the root directory of this software. */
-@/* */
-@/**************************************************************************/
-@
-@
-@/**************************************************************************/
-@/**************************************************************************/
-@/** */
-@/** ThreadX Component */
-@/** */
-@/** Initialize */
-@/** */
-@/**************************************************************************/
-@/**************************************************************************/
-@
-@
-@#define TX_SOURCE_CODE
-@
-@
-@/* Include necessary system files. */
-@
-@#include "tx_api.h"
-@#include "tx_initialize.h"
-@#include "tx_thread.h"
-@#include "tx_timer.h"
+/**************************************************************************/
+/* */
+/* Copyright (c) Microsoft Corporation. All rights reserved. */
+/* */
+/* This software is licensed under the Microsoft Software License */
+/* Terms for Microsoft Azure RTOS. Full text of the license can be */
+/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
+/* and in the root directory of this software. */
+/* */
+/**************************************************************************/
+
+
+/**************************************************************************/
+/**************************************************************************/
+/** */
+/** ThreadX Component */
+/** */
+/** Initialize */
+/** */
+/**************************************************************************/
+/**************************************************************************/
.arm
-SVC_MODE = 0xD3 @ Disable IRQ/FIQ SVC mode
-IRQ_MODE = 0xD2 @ Disable IRQ/FIQ IRQ mode
-FIQ_MODE = 0xD1 @ Disable IRQ/FIQ FIQ mode
-SYS_MODE = 0xDF @ Disable IRQ/FIQ SYS mode
-FIQ_STACK_SIZE = 512 @ FIQ stack size
-IRQ_STACK_SIZE = 1024 @ IRQ stack size
-SYS_STACK_SIZE = 1024 @ System stack size
-@
-@
+SVC_MODE = 0xD3 // Disable IRQ/FIQ SVC mode
+IRQ_MODE = 0xD2 // Disable IRQ/FIQ IRQ mode
+FIQ_MODE = 0xD1 // Disable IRQ/FIQ FIQ mode
+SYS_MODE = 0xDF // Disable IRQ/FIQ SYS mode
+FIQ_STACK_SIZE = 512 // FIQ stack size
+IRQ_STACK_SIZE = 1024 // IRQ stack size
+SYS_STACK_SIZE = 1024 // System stack size
+
.global _tx_thread_system_stack_ptr
.global _tx_initialize_unused_memory
.global _tx_thread_context_save
.global _tx_thread_context_restore
.global _tx_timer_interrupt
-@
-@
-@/* Define the 16-bit Thumb mode veneer for _tx_initialize_low_level for
-@ applications calling this function from to 16-bit Thumb mode. */
-@
+
+
+/* Define the 16-bit Thumb mode veneer for _tx_initialize_low_level for
+ applications calling this function from to 16-bit Thumb mode. */
+
.text
.align 2
.thumb
.global $_tx_initialize_low_level
.type $_tx_initialize_low_level,function
$_tx_initialize_low_level:
- BX pc @ Switch to 32-bit mode
- NOP @
+ BX pc // Switch to 32-bit mode
+ NOP //
.arm
- STMFD sp!, {lr} @ Save return address
- BL _tx_initialize_low_level @ Call _tx_initialize_low_level function
- LDMFD sp!, {lr} @ Recover saved return address
- BX lr @ Return to 16-bit caller
-@
-@
+ STMFD sp!, {lr} // Save return address
+ BL _tx_initialize_low_level // Call _tx_initialize_low_level function
+ LDMFD sp!, {lr} // Recover saved return address
+ BX lr // Return to 16-bit caller
+
+
.text
.align 2
-@/**************************************************************************/
-@/* */
-@/* FUNCTION RELEASE */
-@/* */
-@/* _tx_initialize_low_level Cortex-A9/AC6 */
-@/* 6.1 */
-@/* AUTHOR */
-@/* */
-@/* William E. Lamie, Microsoft Corporation */
-@/* */
-@/* DESCRIPTION */
-@/* */
-@/* This function is responsible for any low-level processor */
-@/* initialization, including setting up interrupt vectors, setting */
-@/* up a periodic timer interrupt source, saving the system stack */
-@/* pointer for use in ISR processing later, and finding the first */
-@/* available RAM memory address for tx_application_define. */
-@/* */
-@/* INPUT */
-@/* */
-@/* None */
-@/* */
-@/* OUTPUT */
-@/* */
-@/* None */
-@/* */
-@/* CALLS */
-@/* */
-@/* None */
-@/* */
-@/* CALLED BY */
-@/* */
-@/* _tx_initialize_kernel_enter ThreadX entry function */
-@/* */
-@/* RELEASE HISTORY */
-@/* */
-@/* DATE NAME DESCRIPTION */
-@/* */
-@/* 09-30-2020 William E. Lamie Initial Version 6.1 */
-@/* */
-@/**************************************************************************/
-@VOID _tx_initialize_low_level(VOID)
-@{
+/**************************************************************************/
+/* */
+/* FUNCTION RELEASE */
+/* */
+/* _tx_initialize_low_level ARMV7-A */
+/* 6.1.11 */
+/* AUTHOR */
+/* */
+/* William E. Lamie, Microsoft Corporation */
+/* */
+/* DESCRIPTION */
+/* */
+/* This function is responsible for any low-level processor */
+/* initialization, including setting up interrupt vectors, setting */
+/* up a periodic timer interrupt source, saving the system stack */
+/* pointer for use in ISR processing later, and finding the first */
+/* available RAM memory address for tx_application_define. */
+/* */
+/* INPUT */
+/* */
+/* None */
+/* */
+/* OUTPUT */
+/* */
+/* None */
+/* */
+/* CALLS */
+/* */
+/* None */
+/* */
+/* CALLED BY */
+/* */
+/* _tx_initialize_kernel_enter ThreadX entry function */
+/* */
+/* RELEASE HISTORY */
+/* */
+/* DATE NAME DESCRIPTION */
+/* */
+/* 09-30-2020 William E. Lamie Initial Version 6.1 */
+/* 04-25-2022 Zhen Kong Updated comments, */
+/* resulting in version 6.1.11 */
+/* */
+/**************************************************************************/
.global _tx_initialize_low_level
.type _tx_initialize_low_level,function
_tx_initialize_low_level:
-@
-@ /* We must be in SVC mode at this point! */
-@
-@ /* Setup various stack pointers. */
-@
- LDR r1, =Image$$ARM_LIB_STACK$$ZI$$Limit @ Get pointer to stack area
-#ifdef TX_ENABLE_IRQ_NESTING
-@
-@ /* Setup the system mode stack for nested interrupt support */
-@
- LDR r2, =SYS_STACK_SIZE @ Pickup stack size
- MOV r3, #SYS_MODE @ Build SYS mode CPSR
- MSR CPSR_c, r3 @ Enter SYS mode
- SUB r1, r1, #1 @ Backup 1 byte
- BIC r1, r1, #7 @ Ensure 8-byte alignment
- MOV sp, r1 @ Setup SYS stack pointer
- SUB r1, r1, r2 @ Calculate start of next stack
+ /* We must be in SVC mode at this point! */
+
+ /* Setup various stack pointers. */
+
+ LDR r1, =Image$$ARM_LIB_STACK$$ZI$$Limit // Get pointer to stack area
+
+#ifdef TX_ENABLE_IRQ_NESTING
+
+ /* Setup the system mode stack for nested interrupt support */
+
+ LDR r2, =SYS_STACK_SIZE // Pickup stack size
+ MOV r3, #SYS_MODE // Build SYS mode CPSR
+ MSR CPSR_c, r3 // Enter SYS mode
+ SUB r1, r1, #1 // Backup 1 byte
+ BIC r1, r1, #7 // Ensure 8-byte alignment
+ MOV sp, r1 // Setup SYS stack pointer
+ SUB r1, r1, r2 // Calculate start of next stack
#endif
- LDR r2, =FIQ_STACK_SIZE @ Pickup stack size
- MOV r0, #FIQ_MODE @ Build FIQ mode CPSR
- MSR CPSR, r0 @ Enter FIQ mode
- SUB r1, r1, #1 @ Backup 1 byte
- BIC r1, r1, #7 @ Ensure 8-byte alignment
- MOV sp, r1 @ Setup FIQ stack pointer
- SUB r1, r1, r2 @ Calculate start of next stack
- LDR r2, =IRQ_STACK_SIZE @ Pickup IRQ stack size
- MOV r0, #IRQ_MODE @ Build IRQ mode CPSR
- MSR CPSR, r0 @ Enter IRQ mode
- SUB r1, r1, #1 @ Backup 1 byte
- BIC r1, r1, #7 @ Ensure 8-byte alignment
- MOV sp, r1 @ Setup IRQ stack pointer
- SUB r3, r1, r2 @ Calculate end of IRQ stack
- MOV r0, #SVC_MODE @ Build SVC mode CPSR
- MSR CPSR, r0 @ Enter SVC mode
- LDR r2, =Image$$ARM_LIB_STACK$$Base @ Pickup stack bottom
- CMP r3, r2 @ Compare the current stack end with the bottom
-_stack_error_loop:
- BLT _stack_error_loop @ If the IRQ stack exceeds the stack bottom, just sit here!
-@
-@ /* Save the system stack pointer. */
-@ _tx_thread_system_stack_ptr = (VOID_PTR) (sp);
-@
- LDR r2, =_tx_thread_system_stack_ptr @ Pickup stack pointer
- STR r1, [r2] @ Save the system stack
-@
-@ /* Save the first available memory address. */
-@ _tx_initialize_unused_memory = (VOID_PTR) _end;
-@
- LDR r1, =Image$$ZI_DATA$$ZI$$Limit @ Get end of non-initialized RAM area
- LDR r2, =_tx_initialize_unused_memory @ Pickup unused memory ptr address
- ADD r1, r1, #8 @ Increment to next free word
- STR r1, [r2] @ Save first free memory address
-@
-@ /* Setup Timer for periodic interrupts. */
-@
-@ /* Done, return to caller. */
-@
+ LDR r2, =FIQ_STACK_SIZE // Pickup stack size
+ MOV r0, #FIQ_MODE // Build FIQ mode CPSR
+ MSR CPSR, r0 // Enter FIQ mode
+ SUB r1, r1, #1 // Backup 1 byte
+ BIC r1, r1, #7 // Ensure 8-byte alignment
+ MOV sp, r1 // Setup FIQ stack pointer
+ SUB r1, r1, r2 // Calculate start of next stack
+ LDR r2, =IRQ_STACK_SIZE // Pickup IRQ stack size
+ MOV r0, #IRQ_MODE // Build IRQ mode CPSR
+ MSR CPSR, r0 // Enter IRQ mode
+ SUB r1, r1, #1 // Backup 1 byte
+ BIC r1, r1, #7 // Ensure 8-byte alignment
+ MOV sp, r1 // Setup IRQ stack pointer
+ SUB r3, r1, r2 // Calculate end of IRQ stack
+ MOV r0, #SVC_MODE // Build SVC mode CPSR
+ MSR CPSR, r0 // Enter SVC mode
+ LDR r2, =Image$$ARM_LIB_STACK$$Base // Pickup stack bottom
+ CMP r3, r2 // Compare the current stack end with the bottom
+_stack_error_loop:
+ BLT _stack_error_loop // If the IRQ stack exceeds the stack bottom, just sit here!
+
+ LDR r2, =_tx_thread_system_stack_ptr // Pickup stack pointer
+ STR r1, [r2] // Save the system stack
+
+ LDR r1, =Image$$ZI_DATA$$ZI$$Limit // Get end of non-initialized RAM area
+ LDR r2, =_tx_initialize_unused_memory // Pickup unused memory ptr address
+ ADD r1, r1, #8 // Increment to next free word
+ STR r1, [r2] // Save first free memory address
+
#ifdef __THUMB_INTERWORK
- BX lr @ Return to caller
+ BX lr // Return to caller
#else
- MOV pc, lr @ Return to caller
+ MOV pc, lr // Return to caller
#endif
-@}
-@
-@
-@/* Define shells for each of the interrupt vectors. */
-@
+
+/* Define shells for each of the interrupt vectors. */
+
.global __tx_undefined
__tx_undefined:
- B __tx_undefined @ Undefined handler
-@
+ B __tx_undefined // Undefined handler
+
.global __tx_swi_interrupt
__tx_swi_interrupt:
- B __tx_swi_interrupt @ Software interrupt handler
-@
+ B __tx_swi_interrupt // Software interrupt handler
+
.global __tx_prefetch_handler
__tx_prefetch_handler:
- B __tx_prefetch_handler @ Prefetch exception handler
-@
+ B __tx_prefetch_handler // Prefetch exception handler
+
.global __tx_abort_handler
__tx_abort_handler:
- B __tx_abort_handler @ Abort exception handler
-@
+ B __tx_abort_handler // Abort exception handler
+
.global __tx_reserved_handler
__tx_reserved_handler:
- B __tx_reserved_handler @ Reserved exception handler
-@
- .global __tx_irq_processing_return
+ B __tx_reserved_handler // Reserved exception handler
+
+ .global __tx_irq_processing_return
.type __tx_irq_processing_return,function
.global __tx_irq_handler
__tx_irq_handler:
-@
-@ /* Jump to context save to save system context. */
+
+ /* Jump to context save to save system context. */
B _tx_thread_context_save
__tx_irq_processing_return:
-@
-@ /* At this point execution is still in the IRQ mode. The CPSR, point of
-@ interrupt, and all C scratch registers are available for use. In
-@ addition, IRQ interrupts may be re-enabled - with certain restrictions -
-@ if nested IRQ interrupts are desired. Interrupts may be re-enabled over
-@ small code sequences where lr is saved before enabling interrupts and
-@ restored after interrupts are again disabled. */
-@
-@ /* Interrupt nesting is allowed after calling _tx_thread_irq_nesting_start
-@ from IRQ mode with interrupts disabled. This routine switches to the
-@ system mode and returns with IRQ interrupts enabled.
-@
-@ NOTE: It is very important to ensure all IRQ interrupts are cleared
-@ prior to enabling nested IRQ interrupts. */
+
+ /* At this point execution is still in the IRQ mode. The CPSR, point of
+ interrupt, and all C scratch registers are available for use. In
+ addition, IRQ interrupts may be re-enabled - with certain restrictions -
+ if nested IRQ interrupts are desired. Interrupts may be re-enabled over
+ small code sequences where lr is saved before enabling interrupts and
+ restored after interrupts are again disabled. */
+
+ /* Interrupt nesting is allowed after calling _tx_thread_irq_nesting_start
+ from IRQ mode with interrupts disabled. This routine switches to the
+ system mode and returns with IRQ interrupts enabled.
+
+ NOTE: It is very important to ensure all IRQ interrupts are cleared
+ prior to enabling nested IRQ interrupts. */
#ifdef TX_ENABLE_IRQ_NESTING
BL _tx_thread_irq_nesting_start
#endif
-@
-@ /* For debug purpose, execute the timer interrupt processing here. In
-@ a real system, some kind of status indication would have to be checked
-@ before the timer interrupt handler could be called. */
-@
- BL _tx_timer_interrupt @ Timer interrupt handler
-@
-@
-@ /* If interrupt nesting was started earlier, the end of interrupt nesting
-@ service must be called before returning to _tx_thread_context_restore.
-@ This routine returns in processing in IRQ mode with interrupts disabled. */
+
+ /* For debug purpose, execute the timer interrupt processing here. In
+ a real system, some kind of status indication would have to be checked
+ before the timer interrupt handler could be called. */
+
+ BL _tx_timer_interrupt // Timer interrupt handler
+
+
+ /* If interrupt nesting was started earlier, the end of interrupt nesting
+ service must be called before returning to _tx_thread_context_restore.
+ This routine returns in processing in IRQ mode with interrupts disabled. */
#ifdef TX_ENABLE_IRQ_NESTING
BL _tx_thread_irq_nesting_end
#endif
-@
-@ /* Jump to context restore to restore system context. */
+
+ /* Jump to context restore to restore system context. */
B _tx_thread_context_restore
-@
-@
-@ /* This is an example of a vectored IRQ handler. */
-@
-@ .global __tx_example_vectored_irq_handler
-@__tx_example_vectored_irq_handler:
-@
-@
-@ /* Save initial context and call context save to prepare for
-@ vectored ISR execution. */
-@
-@ STMDB sp!, {r0-r3} @ Save some scratch registers
-@ MRS r0, SPSR @ Pickup saved SPSR
-@ SUB lr, lr, #4 @ Adjust point of interrupt
-@ STMDB sp!, {r0, r10, r12, lr} @ Store other scratch registers
-@ BL _tx_thread_vectored_context_save @ Vectored context save
-@
-@ /* At this point execution is still in the IRQ mode. The CPSR, point of
-@ interrupt, and all C scratch registers are available for use. In
-@ addition, IRQ interrupts may be re-enabled - with certain restrictions -
-@ if nested IRQ interrupts are desired. Interrupts may be re-enabled over
-@ small code sequences where lr is saved before enabling interrupts and
-@ restored after interrupts are again disabled. */
-@
-@
-@ /* Interrupt nesting is allowed after calling _tx_thread_irq_nesting_start
-@ from IRQ mode with interrupts disabled. This routine switches to the
-@ system mode and returns with IRQ interrupts enabled.
-@
-@ NOTE: It is very important to ensure all IRQ interrupts are cleared
-@ prior to enabling nested IRQ interrupts. */
-@#ifdef TX_ENABLE_IRQ_NESTING
-@ BL _tx_thread_irq_nesting_start
-@#endif
-@
-@ /* Application IRQ handlers can be called here! */
-@
-@ /* If interrupt nesting was started earlier, the end of interrupt nesting
-@ service must be called before returning to _tx_thread_context_restore.
-@ This routine returns in processing in IRQ mode with interrupts disabled. */
-@#ifdef TX_ENABLE_IRQ_NESTING
-@ BL _tx_thread_irq_nesting_end
-@#endif
-@
-@ /* Jump to context restore to restore system context. */
-@ B _tx_thread_context_restore
-@
-@
+
+
+ /* This is an example of a vectored IRQ handler. */
+
+ /* At this point execution is still in the IRQ mode. The CPSR, point of
+ interrupt, and all C scratch registers are available for use. In
+ addition, IRQ interrupts may be re-enabled - with certain restrictions -
+ if nested IRQ interrupts are desired. Interrupts may be re-enabled over
+ small code sequences where lr is saved before enabling interrupts and
+ restored after interrupts are again disabled. */
+
+
+ /* Interrupt nesting is allowed after calling _tx_thread_irq_nesting_start
+ from IRQ mode with interrupts disabled. This routine switches to the
+ system mode and returns with IRQ interrupts enabled.
+
+ NOTE: It is very important to ensure all IRQ interrupts are cleared
+ prior to enabling nested IRQ interrupts. */
+
+ /* Application IRQ handlers can be called here! */
+
+ /* If interrupt nesting was started earlier, the end of interrupt nesting
+ service must be called before returning to _tx_thread_context_restore.
+ This routine returns in processing in IRQ mode with interrupts disabled. */
+
+ /* Jump to context restore to restore system context. */
+
#ifdef TX_ENABLE_FIQ_SUPPORT
.global __tx_fiq_handler
.global __tx_fiq_processing_return
__tx_fiq_handler:
-@
-@ /* Jump to fiq context save to save system context. */
+
+ /* Jump to fiq context save to save system context. */
B _tx_thread_fiq_context_save
__tx_fiq_processing_return:
-@
-@ /* At this point execution is still in the FIQ mode. The CPSR, point of
-@ interrupt, and all C scratch registers are available for use. */
-@
-@ /* Interrupt nesting is allowed after calling _tx_thread_fiq_nesting_start
-@ from FIQ mode with interrupts disabled. This routine switches to the
-@ system mode and returns with FIQ interrupts enabled.
-@
-@ NOTE: It is very important to ensure all FIQ interrupts are cleared
-@ prior to enabling nested FIQ interrupts. */
+
+ /* At this point execution is still in the FIQ mode. The CPSR, point of
+ interrupt, and all C scratch registers are available for use. */
+
+ /* Interrupt nesting is allowed after calling _tx_thread_fiq_nesting_start
+ from FIQ mode with interrupts disabled. This routine switches to the
+ system mode and returns with FIQ interrupts enabled.
+
+ NOTE: It is very important to ensure all FIQ interrupts are cleared
+ prior to enabling nested FIQ interrupts. */
#ifdef TX_ENABLE_FIQ_NESTING
BL _tx_thread_fiq_nesting_start
#endif
-@
-@ /* Application FIQ handlers can be called here! */
-@
-@ /* If interrupt nesting was started earlier, the end of interrupt nesting
-@ service must be called before returning to _tx_thread_fiq_context_restore. */
+
+ /* Application FIQ handlers can be called here! */
+
+ /* If interrupt nesting was started earlier, the end of interrupt nesting
+ service must be called before returning to _tx_thread_fiq_context_restore. */
#ifdef TX_ENABLE_FIQ_NESTING
BL _tx_thread_fiq_nesting_end
#endif
-@
-@ /* Jump to fiq context restore to restore system context. */
+
+ /* Jump to fiq context restore to restore system context. */
B _tx_thread_fiq_context_restore
-@
-@
+
+
#else
.global __tx_fiq_handler
__tx_fiq_handler:
- B __tx_fiq_handler @ FIQ interrupt handler
+ B __tx_fiq_handler // FIQ interrupt handler
#endif
-@
-@
+
+
BUILD_OPTIONS:
- .word _tx_build_options @ Reference to bring in
+ .word _tx_build_options // Reference to bring in
VERSION_ID:
- .word _tx_version_id @ Reference to bring in
-
+ .word _tx_version_id // Reference to bring in
diff --git a/ports/cortex_a9/ac6/example_build/tx/.cproject b/ports/cortex_a9/ac6/example_build/tx/.cproject
index 40ccf832..52a6e44b 100644
--- a/ports/cortex_a9/ac6/example_build/tx/.cproject
+++ b/ports/cortex_a9/ac6/example_build/tx/.cproject
@@ -3,9 +3,9 @@
-
+
-
+
@@ -23,37 +23,37 @@
-
+
-
+
-
+
-
+
-
+
-
+
-
+
-
+
-
+
-
+
-
+
-
+
-
+
-
+
-
+
-
-
+
-
+
-
+
-
+
-
+
-
+
-
+
-
+
-
+
-
+
-
+
-
+
-
+
-
+
@@ -111,7 +111,7 @@
-
+
@@ -123,6 +123,10 @@
+
+
+
+
@@ -138,9 +142,5 @@
-
-
-
-
diff --git a/ports/cortex_a9/ac6/inc/tx_port.h b/ports/cortex_a9/ac6/inc/tx_port.h
index e685bc6f..19463de1 100644
--- a/ports/cortex_a9/ac6/inc/tx_port.h
+++ b/ports/cortex_a9/ac6/inc/tx_port.h
@@ -12,7 +12,7 @@
/**************************************************************************/
/**************************************************************************/
-/** */
+/** */
/** ThreadX Component */
/** */
/** Port Specific */
@@ -21,36 +21,38 @@
/**************************************************************************/
-/**************************************************************************/
-/* */
-/* PORT SPECIFIC C INFORMATION RELEASE */
-/* */
-/* tx_port.h Cortex-A9/AC6 */
-/* 6.1.6 */
+/**************************************************************************/
+/* */
+/* PORT SPECIFIC C INFORMATION RELEASE */
+/* */
+/* tx_port.h ARMv7-A */
+/* 6.1.11 */
/* */
/* AUTHOR */
/* */
/* William E. Lamie, Microsoft Corporation */
/* */
/* DESCRIPTION */
-/* */
-/* This file contains data type definitions that make the ThreadX */
-/* real-time kernel function identically on a variety of different */
-/* processor architectures. For example, the size or number of bits */
-/* in an "int" data type vary between microprocessor architectures and */
-/* even C compilers for the same microprocessor. ThreadX does not */
-/* directly use native C data types. Instead, ThreadX creates its */
-/* own special types that can be mapped to actual data types by this */
-/* file to guarantee consistency in the interface and functionality. */
-/* */
-/* RELEASE HISTORY */
-/* */
+/* */
+/* This file contains data type definitions that make the ThreadX */
+/* real-time kernel function identically on a variety of different */
+/* processor architectures. For example, the size or number of bits */
+/* in an "int" data type vary between microprocessor architectures and */
+/* even C compilers for the same microprocessor. ThreadX does not */
+/* directly use native C data types. Instead, ThreadX creates its */
+/* own special types that can be mapped to actual data types by this */
+/* file to guarantee consistency in the interface and functionality. */
+/* */
+/* RELEASE HISTORY */
+/* */
/* DATE NAME DESCRIPTION */
/* */
/* 09-30-2020 William E. Lamie Initial Version 6.1 */
/* 04-02-2021 Bhupendra Naphade Modified comment(s),updated */
/* macro definition, */
/* resulting in version 6.1.6 */
+/* 04-25-2022 Zhen Kong Updated comments, */
+/* resulting in version 6.1.11 */
/* */
/**************************************************************************/
@@ -63,7 +65,7 @@
#ifdef TX_INCLUDE_USER_DEFINE_FILE
-/* Yes, include the user defines in tx_user.h. The defines in this file may
+/* Yes, include the user defines in tx_user.h. The defines in this file may
alternately be defined on the command line. */
#include "tx_user.h"
@@ -76,7 +78,7 @@
#include
-/* Define ThreadX basic types for this port. */
+/* Define ThreadX basic types for this port. */
#define VOID void
typedef char CHAR;
@@ -112,12 +114,12 @@ typedef unsigned short USHORT;
#define TX_TIMER_THREAD_STACK_SIZE 1024 /* Default timer thread stack size */
#endif
-#ifndef TX_TIMER_THREAD_PRIORITY
-#define TX_TIMER_THREAD_PRIORITY 0 /* Default timer thread priority */
+#ifndef TX_TIMER_THREAD_PRIORITY
+#define TX_TIMER_THREAD_PRIORITY 0 /* Default timer thread priority */
#endif
-/* Define various constants for the ThreadX ARM port. */
+/* Define various constants for the ThreadX ARM port. */
#ifdef TX_ENABLE_FIQ_SUPPORT
#define TX_INT_DISABLE 0xC0 /* Disable IRQ & FIQ interrupts */
@@ -127,8 +129,8 @@ typedef unsigned short USHORT;
#define TX_INT_ENABLE 0x00 /* Enable IRQ interrupts */
-/* Define the clock source for trace event entry time stamp. The following two item are port specific.
- For example, if the time source is at the address 0x0a800024 and is 16-bits in size, the clock
+/* Define the clock source for trace event entry time stamp. The following two item are port specific.
+ For example, if the time source is at the address 0x0a800024 and is 16-bits in size, the clock
source constants would be:
#define TX_TRACE_TIME_SOURCE *((ULONG *) 0x0a800024)
@@ -175,7 +177,7 @@ typedef unsigned short USHORT;
#define TX_INLINE_INITIALIZATION
-/* Determine whether or not stack checking is enabled. By default, ThreadX stack checking is
+/* Determine whether or not stack checking is enabled. By default, ThreadX stack checking is
disabled. When the following is defined, ThreadX thread stack checking is enabled. If stack
checking is enabled (TX_ENABLE_STACK_CHECKING is defined), the TX_DISABLE_STACK_FILLING
define is negated, thereby forcing the stack fill which is necessary for the stack checking
@@ -187,13 +189,13 @@ typedef unsigned short USHORT;
/* Define the TX_THREAD control block extensions for this port. The main reason
- for the multiple macros is so that backward compatibility can be maintained with
+ for the multiple macros is so that backward compatibility can be maintained with
existing ThreadX kernel awareness modules. */
-#define TX_THREAD_EXTENSION_0
-#define TX_THREAD_EXTENSION_1
+#define TX_THREAD_EXTENSION_0
+#define TX_THREAD_EXTENSION_1
#define TX_THREAD_EXTENSION_2 ULONG tx_thread_vfp_enable;
-#define TX_THREAD_EXTENSION_3
+#define TX_THREAD_EXTENSION_3
/* Define the port extensions of the remaining ThreadX objects. */
@@ -207,11 +209,11 @@ typedef unsigned short USHORT;
#define TX_TIMER_EXTENSION
-/* Define the user extension field of the thread control block. Nothing
+/* Define the user extension field of the thread control block. Nothing
additional is needed for this port so it is defined as white space. */
#ifndef TX_THREAD_USER_EXTENSION
-#define TX_THREAD_USER_EXTENSION
+#define TX_THREAD_USER_EXTENSION
#endif
@@ -219,8 +221,8 @@ typedef unsigned short USHORT;
tx_thread_shell_entry, and tx_thread_terminate. */
-#define TX_THREAD_CREATE_EXTENSION(thread_ptr)
-#define TX_THREAD_DELETE_EXTENSION(thread_ptr)
+#define TX_THREAD_CREATE_EXTENSION(thread_ptr)
+#define TX_THREAD_DELETE_EXTENSION(thread_ptr)
#define TX_THREAD_COMPLETED_EXTENSION(thread_ptr)
#define TX_THREAD_TERMINATED_EXTENSION(thread_ptr)
@@ -247,24 +249,24 @@ typedef unsigned short USHORT;
#define TX_TIMER_DELETE_EXTENSION(timer_ptr)
-/* Determine if the ARM architecture has the CLZ instruction. This is available on
- architectures v5 and above. If available, redefine the macro for calculating the
+/* Determine if the ARM architecture has the CLZ instruction. This is available on
+ architectures v5 and above. If available, redefine the macro for calculating the
lowest bit set. */
-
+
#if __TARGET_ARCH_ARM > 4
#ifndef __thumb__
#define TX_LOWEST_SET_BIT_CALCULATE(m, b) m = m & ((ULONG) (-((LONG) m))); \
asm volatile (" CLZ %0,%1 ": "=r" (b) : "r" (m) ); \
- b = 31 - b;
+ b = 31 - b;
#endif
#endif
-/* Define ThreadX interrupt lockout and restore macros for protection on
- access of critical kernel information. The restore interrupt macro must
- restore the interrupt posture of the running thread prior to the value
+/* Define ThreadX interrupt lockout and restore macros for protection on
+ access of critical kernel information. The restore interrupt macro must
+ restore the interrupt posture of the running thread prior to the value
present prior to the disable macro. In most cases, the save area macro
is used to define a local function save area for the disable and restore
macros. */
@@ -295,7 +297,7 @@ unsigned int _tx_thread_interrupt_restore(UINT old_posture);
#endif
-/* Define VFP extension for the Cortex-A9. Each is assumed to be called in the context of the executing
+/* Define VFP extension for the ARMv7-A. Each is assumed to be called in the context of the executing
thread. */
void tx_thread_vfp_enable(void);
@@ -315,8 +317,8 @@ void tx_thread_vfp_disable(void);
/* Define the version ID of ThreadX. This may be utilized by the application. */
#ifdef TX_THREAD_INIT
-CHAR _tx_version_id[] =
- "Copyright (c) Microsoft Corporation. All rights reserved. * ThreadX Cortex-A9/AC6 Version 6.1.9 *";
+CHAR _tx_version_id[] =
+ "Copyright (c) Microsoft Corporation. All rights reserved. * ThreadX ARMv7-A Version 6.1.11 *";
#else
extern CHAR _tx_version_id[];
#endif
diff --git a/ports/cortex_a9/ac6/src/tx_thread_context_restore.S b/ports/cortex_a9/ac6/src/tx_thread_context_restore.S
index 1385cff1..fae7e72d 100644
--- a/ports/cortex_a9/ac6/src/tx_thread_context_restore.S
+++ b/ports/cortex_a9/ac6/src/tx_thread_context_restore.S
@@ -1,259 +1,222 @@
-@/**************************************************************************/
-@/* */
-@/* Copyright (c) Microsoft Corporation. All rights reserved. */
-@/* */
-@/* This software is licensed under the Microsoft Software License */
-@/* Terms for Microsoft Azure RTOS. Full text of the license can be */
-@/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
-@/* and in the root directory of this software. */
-@/* */
-@/**************************************************************************/
-@
-@
-@/**************************************************************************/
-@/**************************************************************************/
-@/** */
-@/** ThreadX Component */
-@/** */
-@/** Thread */
-@/** */
-@/**************************************************************************/
-@/**************************************************************************/
-@
-@
-@#define TX_SOURCE_CODE
-@
-@
-@/* Include necessary system files. */
-@
-@#include "tx_api.h"
-@#include "tx_thread.h"
-@#include "tx_timer.h"
-@
+/**************************************************************************/
+/* */
+/* Copyright (c) Microsoft Corporation. All rights reserved. */
+/* */
+/* This software is licensed under the Microsoft Software License */
+/* Terms for Microsoft Azure RTOS. Full text of the license can be */
+/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
+/* and in the root directory of this software. */
+/* */
+/**************************************************************************/
+
+
+/**************************************************************************/
+/**************************************************************************/
+/** */
+/** ThreadX Component */
+/** */
+/** Thread */
+/** */
+/**************************************************************************/
+/**************************************************************************/
+
.arm
#ifdef TX_ENABLE_FIQ_SUPPORT
-SVC_MODE = 0xD3 @ Disable IRQ/FIQ, SVC mode
-IRQ_MODE = 0xD2 @ Disable IRQ/FIQ, IRQ mode
+SVC_MODE = 0xD3 // Disable IRQ/FIQ, SVC mode
+IRQ_MODE = 0xD2 // Disable IRQ/FIQ, IRQ mode
#else
-SVC_MODE = 0x93 @ Disable IRQ, SVC mode
-IRQ_MODE = 0x92 @ Disable IRQ, IRQ mode
+SVC_MODE = 0x93 // Disable IRQ, SVC mode
+IRQ_MODE = 0x92 // Disable IRQ, IRQ mode
#endif
-@
+
.global _tx_thread_system_state
.global _tx_thread_current_ptr
.global _tx_thread_execute_ptr
.global _tx_timer_time_slice
.global _tx_thread_schedule
.global _tx_thread_preempt_disable
-@
-@
-@/* No 16-bit Thumb mode veneer code is needed for _tx_thread_context_restore
-@ since it will never be called 16-bit mode. */
-@
+
+
+
+/* No 16-bit Thumb mode veneer code is needed for _tx_thread_context_restore
+ since it will never be called 16-bit mode. */
+
.arm
.text
.align 2
-@/**************************************************************************/
-@/* */
-@/* FUNCTION RELEASE */
-@/* */
-@/* _tx_thread_context_restore Cortex-A9/AC6 */
-@/* 6.1.9 */
-@/* AUTHOR */
-@/* */
-@/* William E. Lamie, Microsoft Corporation */
-@/* */
-@/* DESCRIPTION */
-@/* */
-@/* This function restores the interrupt context if it is processing a */
-@/* nested interrupt. If not, it returns to the interrupt thread if no */
-@/* preemption is necessary. Otherwise, if preemption is necessary or */
-@/* if no thread was running, the function returns to the scheduler. */
-@/* */
-@/* INPUT */
-@/* */
-@/* None */
-@/* */
-@/* OUTPUT */
-@/* */
-@/* None */
-@/* */
-@/* CALLS */
-@/* */
-@/* _tx_thread_schedule Thread scheduling routine */
-@/* */
-@/* CALLED BY */
-@/* */
-@/* ISRs Interrupt Service Routines */
-@/* */
-@/* RELEASE HISTORY */
-@/* */
-@/* DATE NAME DESCRIPTION */
-@/* */
-@/* 09-30-2020 William E. Lamie Initial Version 6.1 */
-@/* 10-15-2021 William E. Lamie Modified comment(s), added */
-@/* execution profile support, */
-@/* resulting in version 6.1.9 */
-@/* */
-@/**************************************************************************/
-@VOID _tx_thread_context_restore(VOID)
-@{
+/**************************************************************************/
+/* */
+/* FUNCTION RELEASE */
+/* */
+/* _tx_thread_context_restore ARMv7-A */
+/* 6.1.11 */
+/* AUTHOR */
+/* */
+/* William E. Lamie, Microsoft Corporation */
+/* */
+/* DESCRIPTION */
+/* */
+/* This function restores the interrupt context if it is processing a */
+/* nested interrupt. If not, it returns to the interrupt thread if no */
+/* preemption is necessary. Otherwise, if preemption is necessary or */
+/* if no thread was running, the function returns to the scheduler. */
+/* */
+/* INPUT */
+/* */
+/* None */
+/* */
+/* OUTPUT */
+/* */
+/* None */
+/* */
+/* CALLS */
+/* */
+/* _tx_thread_schedule Thread scheduling routine */
+/* */
+/* CALLED BY */
+/* */
+/* ISRs Interrupt Service Routines */
+/* */
+/* RELEASE HISTORY */
+/* */
+/* DATE NAME DESCRIPTION */
+/* */
+/* 09-30-2020 William E. Lamie Initial Version 6.1 */
+/* 10-15-2021 William E. Lamie Modified comment(s), added */
+/* execution profile support, */
+/* resulting in version 6.1.9 */
+/* 04-25-2022 Zhen Kong Updated comments, */
+/* resulting in version 6.1.11 */
+/* */
+/**************************************************************************/
.global _tx_thread_context_restore
.type _tx_thread_context_restore,function
_tx_thread_context_restore:
-@
-@ /* Lockout interrupts. */
-@
+
+ /* Lockout interrupts. */
+
#ifdef TX_ENABLE_FIQ_SUPPORT
- CPSID if @ Disable IRQ and FIQ interrupts
+ CPSID if // Disable IRQ and FIQ interrupts
#else
- CPSID i @ Disable IRQ interrupts
+ CPSID i // Disable IRQ interrupts
#endif
#if (defined(TX_ENABLE_EXECUTION_CHANGE_NOTIFY) || defined(TX_EXECUTION_PROFILE_ENABLE))
-@
-@ /* Call the ISR exit function to indicate an ISR is complete. */
-@
- BL _tx_execution_isr_exit @ Call the ISR exit function
-#endif
-@
-@ /* Determine if interrupts are nested. */
-@ if (--_tx_thread_system_state)
-@ {
-@
- LDR r3, =_tx_thread_system_state @ Pickup address of system state variable
- LDR r2, [r3] @ Pickup system state
- SUB r2, r2, #1 @ Decrement the counter
- STR r2, [r3] @ Store the counter
- CMP r2, #0 @ Was this the first interrupt?
- BEQ __tx_thread_not_nested_restore @ If so, not a nested restore
-@
-@ /* Interrupts are nested. */
-@
-@ /* Just recover the saved registers and return to the point of
-@ interrupt. */
-@
- LDMIA sp!, {r0, r10, r12, lr} @ Recover SPSR, POI, and scratch regs
- MSR SPSR_cxsf, r0 @ Put SPSR back
- LDMIA sp!, {r0-r3} @ Recover r0-r3
- MOVS pc, lr @ Return to point of interrupt
-@
-@ }
-__tx_thread_not_nested_restore:
-@
-@ /* Determine if a thread was interrupted and no preemption is required. */
-@ else if (((_tx_thread_current_ptr) && (_tx_thread_current_ptr == _tx_thread_execute_ptr)
-@ || (_tx_thread_preempt_disable))
-@ {
-@
- LDR r1, =_tx_thread_current_ptr @ Pickup address of current thread ptr
- LDR r0, [r1] @ Pickup actual current thread pointer
- CMP r0, #0 @ Is it NULL?
- BEQ __tx_thread_idle_system_restore @ Yes, idle system was interrupted
-@
- LDR r3, =_tx_thread_preempt_disable @ Pickup preempt disable address
- LDR r2, [r3] @ Pickup actual preempt disable flag
- CMP r2, #0 @ Is it set?
- BNE __tx_thread_no_preempt_restore @ Yes, don't preempt this thread
- LDR r3, =_tx_thread_execute_ptr @ Pickup address of execute thread ptr
- LDR r2, [r3] @ Pickup actual execute thread pointer
- CMP r0, r2 @ Is the same thread highest priority?
- BNE __tx_thread_preempt_restore @ No, preemption needs to happen
-@
-@
-__tx_thread_no_preempt_restore:
-@
-@ /* Restore interrupted thread or ISR. */
-@
-@ /* Pickup the saved stack pointer. */
-@ tmp_ptr = _tx_thread_current_ptr -> tx_thread_stack_ptr;
-@
-@ /* Recover the saved context and return to the point of interrupt. */
-@
- LDMIA sp!, {r0, r10, r12, lr} @ Recover SPSR, POI, and scratch regs
- MSR SPSR_cxsf, r0 @ Put SPSR back
- LDMIA sp!, {r0-r3} @ Recover r0-r3
- MOVS pc, lr @ Return to point of interrupt
-@
-@ }
-@ else
-@ {
-__tx_thread_preempt_restore:
-@
- LDMIA sp!, {r3, r10, r12, lr} @ Recover temporarily saved registers
- MOV r1, lr @ Save lr (point of interrupt)
- MOV r2, #SVC_MODE @ Build SVC mode CPSR
- MSR CPSR_c, r2 @ Enter SVC mode
- STR r1, [sp, #-4]! @ Save point of interrupt
- STMDB sp!, {r4-r12, lr} @ Save upper half of registers
- MOV r4, r3 @ Save SPSR in r4
- MOV r2, #IRQ_MODE @ Build IRQ mode CPSR
- MSR CPSR_c, r2 @ Enter IRQ mode
- LDMIA sp!, {r0-r3} @ Recover r0-r3
- MOV r5, #SVC_MODE @ Build SVC mode CPSR
- MSR CPSR_c, r5 @ Enter SVC mode
- STMDB sp!, {r0-r3} @ Save r0-r3 on thread's stack
- LDR r1, =_tx_thread_current_ptr @ Pickup address of current thread ptr
- LDR r0, [r1] @ Pickup current thread pointer
+ /* Call the ISR exit function to indicate an ISR is complete. */
+
+ BL _tx_execution_isr_exit // Call the ISR exit function
+#endif
+
+ /* Determine if interrupts are nested. */
+
+ LDR r3, =_tx_thread_system_state // Pickup address of system state variable
+ LDR r2, [r3] // Pickup system state
+ SUB r2, r2, #1 // Decrement the counter
+ STR r2, [r3] // Store the counter
+ CMP r2, #0 // Was this the first interrupt?
+ BEQ __tx_thread_not_nested_restore // If so, not a nested restore
+
+ /* Interrupts are nested. */
+
+ /* Just recover the saved registers and return to the point of
+ interrupt. */
+
+ LDMIA sp!, {r0, r10, r12, lr} // Recover SPSR, POI, and scratch regs
+ MSR SPSR_cxsf, r0 // Put SPSR back
+ LDMIA sp!, {r0-r3} // Recover r0-r3
+ MOVS pc, lr // Return to point of interrupt
+
+__tx_thread_not_nested_restore:
+
+ /* Determine if a thread was interrupted and no preemption is required. */
+
+ LDR r1, =_tx_thread_current_ptr // Pickup address of current thread ptr
+ LDR r0, [r1] // Pickup actual current thread pointer
+ CMP r0, #0 // Is it NULL?
+ BEQ __tx_thread_idle_system_restore // Yes, idle system was interrupted
+
+ LDR r3, =_tx_thread_preempt_disable // Pickup preempt disable address
+ LDR r2, [r3] // Pickup actual preempt disable flag
+ CMP r2, #0 // Is it set?
+ BNE __tx_thread_no_preempt_restore // Yes, don't preempt this thread
+ LDR r3, =_tx_thread_execute_ptr // Pickup address of execute thread ptr
+ LDR r2, [r3] // Pickup actual execute thread pointer
+ CMP r0, r2 // Is the same thread highest priority?
+ BNE __tx_thread_preempt_restore // No, preemption needs to happen
+
+
+__tx_thread_no_preempt_restore:
+
+ /* Recover the saved context and return to the point of interrupt. */
+
+ /* Pickup the saved stack pointer. */
+
+ /* Recover the saved context and return to the point of interrupt. */
+ LDMIA sp!, {r0, r10, r12, lr} // Recover SPSR, POI, and scratch regs
+ MSR SPSR_cxsf, r0 // Put SPSR back
+ LDMIA sp!, {r0-r3} // Recover r0-r3
+ MOVS pc, lr // Return to point of interrupt
+
+__tx_thread_preempt_restore:
+
+ LDMIA sp!, {r3, r10, r12, lr} // Recover temporarily saved registers
+ MOV r1, lr // Save lr (point of interrupt)
+ MOV r2, #SVC_MODE // Build SVC mode CPSR
+ MSR CPSR_c, r2 // Enter SVC mode
+ STR r1, [sp, #-4]! // Save point of interrupt
+ STMDB sp!, {r4-r12, lr} // Save upper half of registers
+ MOV r4, r3 // Save SPSR in r4
+ MOV r2, #IRQ_MODE // Build IRQ mode CPSR
+ MSR CPSR_c, r2 // Enter IRQ mode
+ LDMIA sp!, {r0-r3} // Recover r0-r3
+ MOV r5, #SVC_MODE // Build SVC mode CPSR
+ MSR CPSR_c, r5 // Enter SVC mode
+ STMDB sp!, {r0-r3} // Save r0-r3 on thread's stack
+
+ LDR r1, =_tx_thread_current_ptr // Pickup address of current thread ptr
+ LDR r0, [r1] // Pickup current thread pointer
#ifdef TX_ENABLE_VFP_SUPPORT
- LDR r2, [r0, #144] @ Pickup the VFP enabled flag
- CMP r2, #0 @ Is the VFP enabled?
- BEQ _tx_skip_irq_vfp_save @ No, skip VFP IRQ save
- VMRS r2, FPSCR @ Pickup the FPSCR
- STR r2, [sp, #-4]! @ Save FPSCR
- VSTMDB sp!, {D16-D31} @ Save D16-D31
- VSTMDB sp!, {D0-D15} @ Save D0-D15
+ LDR r2, [r0, #144] // Pickup the VFP enabled flag
+ CMP r2, #0 // Is the VFP enabled?
+ BEQ _tx_skip_irq_vfp_save // No, skip VFP IRQ save
+ VMRS r2, FPSCR // Pickup the FPSCR
+ STR r2, [sp, #-4]! // Save FPSCR
+ VSTMDB sp!, {D16-D31} // Save D16-D31
+ VSTMDB sp!, {D0-D15} // Save D0-D15
+
_tx_skip_irq_vfp_save:
+
#endif
- MOV r3, #1 @ Build interrupt stack type
- STMDB sp!, {r3, r4} @ Save interrupt stack type and SPSR
- STR sp, [r0, #8] @ Save stack pointer in thread control
- @ block
-@
-@ /* Save the remaining time-slice and disable it. */
-@ if (_tx_timer_time_slice)
-@ {
-@
- LDR r3, =_tx_timer_time_slice @ Pickup time-slice variable address
- LDR r2, [r3] @ Pickup time-slice
- CMP r2, #0 @ Is it active?
- BEQ __tx_thread_dont_save_ts @ No, don't save it
-@
-@ _tx_thread_current_ptr -> tx_thread_time_slice = _tx_timer_time_slice;
-@ _tx_timer_time_slice = 0;
-@
- STR r2, [r0, #24] @ Save thread's time-slice
- MOV r2, #0 @ Clear value
- STR r2, [r3] @ Disable global time-slice flag
-@
-@ }
+ MOV r3, #1 // Build interrupt stack type
+ STMDB sp!, {r3, r4} // Save interrupt stack type and SPSR
+ STR sp, [r0, #8] // Save stack pointer in thread control
+ // block
+
+ /* Save the remaining time-slice and disable it. */
+ LDR r3, =_tx_timer_time_slice // Pickup time-slice variable address
+ LDR r2, [r3] // Pickup time-slice
+ CMP r2, #0 // Is it active?
+ BEQ __tx_thread_dont_save_ts // No, don't save it
+ STR r2, [r0, #24] // Save thread's time-slice
+ MOV r2, #0 // Clear value
+ STR r2, [r3] // Disable global time-slice flag
+
__tx_thread_dont_save_ts:
-@
-@
-@ /* Clear the current task pointer. */
-@ _tx_thread_current_ptr = TX_NULL;
-@
- MOV r0, #0 @ NULL value
- STR r0, [r1] @ Clear current thread pointer
-@
-@ /* Return to the scheduler. */
-@ _tx_thread_schedule();
-@
- B _tx_thread_schedule @ Return to scheduler
-@ }
-@
+
+ /* Clear the current task pointer. */
+ MOV r0, #0 // NULL value
+ STR r0, [r1] // Clear current thread pointer
+
+ /* Return to the scheduler. */
+ B _tx_thread_schedule // Return to scheduler
+
__tx_thread_idle_system_restore:
-@
-@ /* Just return back to the scheduler! */
-@
- MOV r0, #SVC_MODE @ Build SVC mode CPSR
- MSR CPSR_c, r0 @ Enter SVC mode
- B _tx_thread_schedule @ Return to scheduler
-@}
-
-
+ /* Just return back to the scheduler! */
+ MOV r0, #SVC_MODE // Build SVC mode CPSR
+ MSR CPSR_c, r0 // Enter SVC mode
+ B _tx_thread_schedule // Return to scheduler
diff --git a/ports/cortex_a9/ac6/src/tx_thread_context_save.S b/ports/cortex_a9/ac6/src/tx_thread_context_save.S
index a04cf555..7ac48c2e 100644
--- a/ports/cortex_a9/ac6/src/tx_thread_context_save.S
+++ b/ports/cortex_a9/ac6/src/tx_thread_context_save.S
@@ -1,205 +1,172 @@
-@/**************************************************************************/
-@/* */
-@/* Copyright (c) Microsoft Corporation. All rights reserved. */
-@/* */
-@/* This software is licensed under the Microsoft Software License */
-@/* Terms for Microsoft Azure RTOS. Full text of the license can be */
-@/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
-@/* and in the root directory of this software. */
-@/* */
-@/**************************************************************************/
-@
-@
-@/**************************************************************************/
-@/**************************************************************************/
-@/** */
-@/** ThreadX Component */
-@/** */
-@/** Thread */
-@/** */
-@/**************************************************************************/
-@/**************************************************************************/
-@
-@
-@#define TX_SOURCE_CODE
-@
-@
-@/* Include necessary system files. */
-@
-@#include "tx_api.h"
-@#include "tx_thread.h"
-@#include "tx_timer.h"
-@
-@
+/**************************************************************************/
+/* */
+/* Copyright (c) Microsoft Corporation. All rights reserved. */
+/* */
+/* This software is licensed under the Microsoft Software License */
+/* Terms for Microsoft Azure RTOS. Full text of the license can be */
+/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
+/* and in the root directory of this software. */
+/* */
+/**************************************************************************/
+
+
+/**************************************************************************/
+/**************************************************************************/
+/** */
+/** ThreadX Component */
+/** */
+/** Thread */
+/** */
+/**************************************************************************/
+/**************************************************************************/
+
.global _tx_thread_system_state
.global _tx_thread_current_ptr
.global __tx_irq_processing_return
-@
-@
-@/* No 16-bit Thumb mode veneer code is needed for _tx_thread_context_save
-@ since it will never be called 16-bit mode. */
-@
+
+
+/* No 16-bit Thumb mode veneer code is needed for _tx_thread_context_save
+ since it will never be called 16-bit mode. */
+
.arm
.text
.align 2
-@/**************************************************************************/
-@/* */
-@/* FUNCTION RELEASE */
-@/* */
-@/* _tx_thread_context_save Cortex-A9/AC6 */
-@/* 6.1.9 */
-@/* AUTHOR */
-@/* */
-@/* William E. Lamie, Microsoft Corporation */
-@/* */
-@/* DESCRIPTION */
-@/* */
-@/* This function saves the context of an executing thread in the */
-@/* beginning of interrupt processing. The function also ensures that */
-@/* the system stack is used upon return to the calling ISR. */
-@/* */
-@/* INPUT */
-@/* */
-@/* None */
-@/* */
-@/* OUTPUT */
-@/* */
-@/* None */
-@/* */
-@/* CALLS */
-@/* */
-@/* None */
-@/* */
-@/* CALLED BY */
-@/* */
-@/* ISRs */
-@/* */
-@/* RELEASE HISTORY */
-@/* */
-@/* DATE NAME DESCRIPTION */
-@/* */
-@/* 09-30-2020 William E. Lamie Initial Version 6.1 */
-@/* 10-15-2021 William E. Lamie Modified comment(s), added */
-@/* execution profile support, */
-@/* resulting in version 6.1.9 */
-@/* */
-@/**************************************************************************/
-@VOID _tx_thread_context_save(VOID)
-@{
+/**************************************************************************/
+/* */
+/* FUNCTION RELEASE */
+/* */
+/* _tx_thread_context_save ARMv7-A */
+/* 6.1.11 */
+/* AUTHOR */
+/* */
+/* William E. Lamie, Microsoft Corporation */
+/* */
+/* DESCRIPTION */
+/* */
+/* This function saves the context of an executing thread in the */
+/* beginning of interrupt processing. The function also ensures that */
+/* the system stack is used upon return to the calling ISR. */
+/* */
+/* INPUT */
+/* */
+/* None */
+/* */
+/* OUTPUT */
+/* */
+/* None */
+/* */
+/* CALLS */
+/* */
+/* None */
+/* */
+/* CALLED BY */
+/* */
+/* ISRs */
+/* */
+/* RELEASE HISTORY */
+/* */
+/* DATE NAME DESCRIPTION */
+/* */
+/* 09-30-2020 William E. Lamie Initial Version 6.1 */
+/* 10-15-2021 William E. Lamie Modified comment(s), added */
+/* execution profile support, */
+/* resulting in version 6.1.9 */
+/* 04-25-2022 Zhen Kong Updated comments, */
+/* resulting in version 6.1.11 */
+/* */
+/**************************************************************************/
.global _tx_thread_context_save
.type _tx_thread_context_save,function
_tx_thread_context_save:
-@
-@ /* Upon entry to this routine, it is assumed that IRQ interrupts are locked
-@ out, we are in IRQ mode, and all registers are intact. */
-@
-@ /* Check for a nested interrupt condition. */
-@ if (_tx_thread_system_state++)
-@ {
-@
- STMDB sp!, {r0-r3} @ Save some working registers
+
+ /* Upon entry to this routine, it is assumed that IRQ interrupts are locked
+ out, we are in IRQ mode, and all registers are intact. */
+
+ /* Check for a nested interrupt condition. */
+
+ STMDB sp!, {r0-r3} // Save some working registers
#ifdef TX_ENABLE_FIQ_SUPPORT
- CPSID if @ Disable FIQ interrupts
+ CPSID if // Disable FIQ interrupts
#endif
- LDR r3, =_tx_thread_system_state @ Pickup address of system state variable
- LDR r2, [r3] @ Pickup system state
- CMP r2, #0 @ Is this the first interrupt?
- BEQ __tx_thread_not_nested_save @ Yes, not a nested context save
-@
-@ /* Nested interrupt condition. */
-@
- ADD r2, r2, #1 @ Increment the interrupt counter
- STR r2, [r3] @ Store it back in the variable
-@
-@ /* Save the rest of the scratch registers on the stack and return to the
-@ calling ISR. */
-@
- MRS r0, SPSR @ Pickup saved SPSR
- SUB lr, lr, #4 @ Adjust point of interrupt
- STMDB sp!, {r0, r10, r12, lr} @ Store other registers
-@
-@ /* Return to the ISR. */
-@
- MOV r10, #0 @ Clear stack limit
+ LDR r3, =_tx_thread_system_state // Pickup address of system state variable
+ LDR r2, [r3] // Pickup system state
+ CMP r2, #0 // Is this the first interrupt?
+ BEQ __tx_thread_not_nested_save // Yes, not a nested context save
+
+ /* Nested interrupt condition. */
+
+ ADD r2, r2, #1 // Increment the interrupt counter
+ STR r2, [r3] // Store it back in the variable
+
+ /* Save the rest of the scratch registers on the stack and return to the
+ calling ISR. */
+
+ MRS r0, SPSR // Pickup saved SPSR
+ SUB lr, lr, #4 // Adjust point of interrupt
+ STMDB sp!, {r0, r10, r12, lr} // Store other registers
+
+ /* Return to the ISR. */
+
+ MOV r10, #0 // Clear stack limit
#if (defined(TX_ENABLE_EXECUTION_CHANGE_NOTIFY) || defined(TX_EXECUTION_PROFILE_ENABLE))
-@
-@ /* Call the ISR enter function to indicate an ISR is executing. */
-@
- PUSH {lr} @ Save ISR lr
- BL _tx_execution_isr_enter @ Call the ISR enter function
- POP {lr} @ Recover ISR lr
+
+ /* Call the ISR enter function to indicate an ISR is executing. */
+
+ PUSH {lr} // Save ISR lr
+ BL _tx_execution_isr_enter // Call the ISR enter function
+ POP {lr} // Recover ISR lr
#endif
- B __tx_irq_processing_return @ Continue IRQ processing
-@
+ B __tx_irq_processing_return // Continue IRQ processing
+
__tx_thread_not_nested_save:
-@ }
-@
-@ /* Otherwise, not nested, check to see if a thread was running. */
-@ else if (_tx_thread_current_ptr)
-@ {
-@
- ADD r2, r2, #1 @ Increment the interrupt counter
- STR r2, [r3] @ Store it back in the variable
- LDR r1, =_tx_thread_current_ptr @ Pickup address of current thread ptr
- LDR r0, [r1] @ Pickup current thread pointer
- CMP r0, #0 @ Is it NULL?
- BEQ __tx_thread_idle_system_save @ If so, interrupt occurred in
- @ scheduling loop - nothing needs saving!
-@
-@ /* Save minimal context of interrupted thread. */
-@
- MRS r2, SPSR @ Pickup saved SPSR
- SUB lr, lr, #4 @ Adjust point of interrupt
- STMDB sp!, {r2, r10, r12, lr} @ Store other registers
-@
-@ /* Save the current stack pointer in the thread's control block. */
-@ _tx_thread_current_ptr -> tx_thread_stack_ptr = sp;
-@
-@ /* Switch to the system stack. */
-@ sp = _tx_thread_system_stack_ptr@
-@
- MOV r10, #0 @ Clear stack limit
+
+ /* Otherwise, not nested, check to see if a thread was running. */
+ ADD r2, r2, #1 // Increment the interrupt counter
+ STR r2, [r3] // Store it back in the variable
+ LDR r1, =_tx_thread_current_ptr // Pickup address of current thread ptr
+ LDR r0, [r1] // Pickup current thread pointer
+ CMP r0, #0 // Is it NULL?
+ BEQ __tx_thread_idle_system_save // If so, interrupt occurred in
+ // scheduling loop - nothing needs saving!
+
+ /* Save minimal context of interrupted thread. */
+
+ MRS r2, SPSR // Pickup saved SPSR
+ SUB lr, lr, #4 // Adjust point of interrupt
+ STMDB sp!, {r2, r10, r12, lr} // Store other registers
+
+ MOV r10, #0 // Clear stack limit
#if (defined(TX_ENABLE_EXECUTION_CHANGE_NOTIFY) || defined(TX_EXECUTION_PROFILE_ENABLE))
-@
-@ /* Call the ISR enter function to indicate an ISR is executing. */
-@
- PUSH {lr} @ Save ISR lr
- BL _tx_execution_isr_enter @ Call the ISR enter function
- POP {lr} @ Recover ISR lr
+
+ /* Call the ISR enter function to indicate an ISR is executing. */
+
+ PUSH {lr} // Save ISR lr
+ BL _tx_execution_isr_enter // Call the ISR enter function
+ POP {lr} // Recover ISR lr
#endif
- B __tx_irq_processing_return @ Continue IRQ processing
-@
-@ }
-@ else
-@ {
-@
+ B __tx_irq_processing_return // Continue IRQ processing
+
__tx_thread_idle_system_save:
-@
-@ /* Interrupt occurred in the scheduling loop. */
-@
-@ /* Not much to do here, just adjust the stack pointer, and return to IRQ
-@ processing. */
-@
- MOV r10, #0 @ Clear stack limit
+
+ /* Interrupt occurred in the scheduling loop. */
+
+ /* Not much to do here, just adjust the stack pointer, and return to IRQ
+ processing. */
+
+ MOV r10, #0 // Clear stack limit
#if (defined(TX_ENABLE_EXECUTION_CHANGE_NOTIFY) || defined(TX_EXECUTION_PROFILE_ENABLE))
-@
-@ /* Call the ISR enter function to indicate an ISR is executing. */
-@
- PUSH {lr} @ Save ISR lr
- BL _tx_execution_isr_enter @ Call the ISR enter function
- POP {lr} @ Recover ISR lr
+
+ /* Call the ISR enter function to indicate an ISR is executing. */
+
+ PUSH {lr} // Save ISR lr
+ BL _tx_execution_isr_enter // Call the ISR enter function
+ POP {lr} // Recover ISR lr
#endif
- ADD sp, sp, #16 @ Recover saved registers
- B __tx_irq_processing_return @ Continue IRQ processing
-@
-@ }
-@}
-
-
-
+ ADD sp, sp, #16 // Recover saved registers
+ B __tx_irq_processing_return // Continue IRQ processing
diff --git a/ports/cortex_a9/ac6/src/tx_thread_fiq_context_restore.S b/ports/cortex_a9/ac6/src/tx_thread_fiq_context_restore.S
index 00191310..006be973 100644
--- a/ports/cortex_a9/ac6/src/tx_thread_fiq_context_restore.S
+++ b/ports/cortex_a9/ac6/src/tx_thread_fiq_context_restore.S
@@ -1,43 +1,32 @@
-@/**************************************************************************/
-@/* */
-@/* Copyright (c) Microsoft Corporation. All rights reserved. */
-@/* */
-@/* This software is licensed under the Microsoft Software License */
-@/* Terms for Microsoft Azure RTOS. Full text of the license can be */
-@/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
-@/* and in the root directory of this software. */
-@/* */
-@/**************************************************************************/
-@
-@
-@/**************************************************************************/
-@/**************************************************************************/
-@/** */
-@/** ThreadX Component */
-@/** */
-@/** Thread */
-@/** */
-@/**************************************************************************/
-@/**************************************************************************/
-@
-@
-@#define TX_SOURCE_CODE
-@
-@
-@/* Include necessary system files. */
-@
-@#include "tx_api.h"
-@#include "tx_thread.h"
-@#include "tx_timer.h"
-@
-@
-SVC_MODE = 0xD3 @ SVC mode
-FIQ_MODE = 0xD1 @ FIQ mode
-MODE_MASK = 0x1F @ Mode mask
-THUMB_MASK = 0x20 @ Thumb bit mask
-IRQ_MODE_BITS = 0x12 @ IRQ mode bits
-@
-@
+/**************************************************************************/
+/* */
+/* Copyright (c) Microsoft Corporation. All rights reserved. */
+/* */
+/* This software is licensed under the Microsoft Software License */
+/* Terms for Microsoft Azure RTOS. Full text of the license can be */
+/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
+/* and in the root directory of this software. */
+/* */
+/**************************************************************************/
+
+
+/**************************************************************************/
+/**************************************************************************/
+/** */
+/** ThreadX Component */
+/** */
+/** Thread */
+/** */
+/**************************************************************************/
+/**************************************************************************/
+
+SVC_MODE = 0xD3 // SVC mode
+FIQ_MODE = 0xD1 // FIQ mode
+MODE_MASK = 0x1F // Mode mask
+THUMB_MASK = 0x20 // Thumb bit mask
+IRQ_MODE_BITS = 0x12 // IRQ mode bits
+
+
.global _tx_thread_system_state
.global _tx_thread_current_ptr
.global _tx_thread_system_stack_ptr
@@ -45,218 +34,190 @@ IRQ_MODE_BITS = 0x12 @ IRQ mode bits
.global _tx_timer_time_slice
.global _tx_thread_schedule
.global _tx_thread_preempt_disable
-@
-@
-@/* No 16-bit Thumb mode veneer code is needed for _tx_thread_fiq_context_restore
-@ since it will never be called 16-bit mode. */
-@
+ .global _tx_execution_isr_exit
+
+
+/* No 16-bit Thumb mode veneer code is needed for _tx_thread_fiq_context_restore
+ since it will never be called 16-bit mode. */
+
.arm
.text
.align 2
-@/**************************************************************************/
-@/* */
-@/* FUNCTION RELEASE */
-@/* */
-@/* _tx_thread_fiq_context_restore Cortex-A9/AC6 */
-@/* 6.1.9 */
-@/* AUTHOR */
-@/* */
-@/* William E. Lamie, Microsoft Corporation */
-@/* */
-@/* DESCRIPTION */
-@/* */
-@/* This function restores the fiq interrupt context when processing a */
-@/* nested interrupt. If not, it returns to the interrupt thread if no */
-@/* preemption is necessary. Otherwise, if preemption is necessary or */
-@/* if no thread was running, the function returns to the scheduler. */
-@/* */
-@/* INPUT */
-@/* */
-@/* None */
-@/* */
-@/* OUTPUT */
-@/* */
-@/* None */
-@/* */
-@/* CALLS */
-@/* */
-@/* _tx_thread_schedule Thread scheduling routine */
-@/* */
-@/* CALLED BY */
-@/* */
-@/* FIQ ISR Interrupt Service Routines */
-@/* */
-@/* RELEASE HISTORY */
-@/* */
-@/* DATE NAME DESCRIPTION */
-@/* */
-@/* 09-30-2020 William E. Lamie Initial Version 6.1 */
-@/* 10-15-2021 William E. Lamie Modified comment(s), added */
-@/* execution profile support, */
-@/* resulting in version 6.1.9 */
-@/* */
-@/**************************************************************************/
-@VOID _tx_thread_fiq_context_restore(VOID)
-@{
+/**************************************************************************/
+/* */
+/* FUNCTION RELEASE */
+/* */
+/* _tx_thread_fiq_context_restore ARMv7-A */
+/* 6.1.11 */
+/* AUTHOR */
+/* */
+/* William E. Lamie, Microsoft Corporation */
+/* */
+/* DESCRIPTION */
+/* */
+/* This function restores the fiq interrupt context when processing a */
+/* nested interrupt. If not, it returns to the interrupt thread if no */
+/* preemption is necessary. Otherwise, if preemption is necessary or */
+/* if no thread was running, the function returns to the scheduler. */
+/* */
+/* INPUT */
+/* */
+/* None */
+/* */
+/* OUTPUT */
+/* */
+/* None */
+/* */
+/* CALLS */
+/* */
+/* _tx_thread_schedule Thread scheduling routine */
+/* */
+/* CALLED BY */
+/* */
+/* FIQ ISR Interrupt Service Routines */
+/* */
+/* RELEASE HISTORY */
+/* */
+/* DATE NAME DESCRIPTION */
+/* */
+/* 09-30-2020 William E. Lamie Initial Version 6.1 */
+/* 10-15-2021 William E. Lamie Modified comment(s), added */
+/* execution profile support, */
+/* resulting in version 6.1.9 */
+/* 04-25-2022 Zhen Kong Updated comments, */
+/* resulting in version 6.1.11 */
+/* */
+/**************************************************************************/
.global _tx_thread_fiq_context_restore
.type _tx_thread_fiq_context_restore,function
_tx_thread_fiq_context_restore:
-@
-@ /* Lockout interrupts. */
-@
- CPSID if @ Disable IRQ and FIQ interrupts
+
+ /* Lockout interrupts. */
+
+ CPSID if // Disable IRQ and FIQ interrupts
#if (defined(TX_ENABLE_EXECUTION_CHANGE_NOTIFY) || defined(TX_EXECUTION_PROFILE_ENABLE))
-@
-@ /* Call the ISR exit function to indicate an ISR is complete. */
-@
- BL _tx_execution_isr_exit @ Call the ISR exit function
+
+ /* Call the ISR exit function to indicate an ISR is complete. */
+
+ BL _tx_execution_isr_exit // Call the ISR exit function
#endif
-@
-@ /* Determine if interrupts are nested. */
-@ if (--_tx_thread_system_state)
-@ {
-@
- LDR r3, =_tx_thread_system_state @ Pickup address of system state variable
- LDR r2, [r3] @ Pickup system state
- SUB r2, r2, #1 @ Decrement the counter
- STR r2, [r3] @ Store the counter
- CMP r2, #0 @ Was this the first interrupt?
- BEQ __tx_thread_fiq_not_nested_restore @ If so, not a nested restore
-@
-@ /* Interrupts are nested. */
-@
-@ /* Just recover the saved registers and return to the point of
-@ interrupt. */
-@
- LDMIA sp!, {r0, r10, r12, lr} @ Recover SPSR, POI, and scratch regs
- MSR SPSR_cxsf, r0 @ Put SPSR back
- LDMIA sp!, {r0-r3} @ Recover r0-r3
- MOVS pc, lr @ Return to point of interrupt
-@
-@ }
+
+ /* Determine if interrupts are nested. */
+ LDR r3, =_tx_thread_system_state // Pickup address of system state variable
+ LDR r2, [r3] // Pickup system state
+ SUB r2, r2, #1 // Decrement the counter
+ STR r2, [r3] // Store the counter
+ CMP r2, #0 // Was this the first interrupt?
+ BEQ __tx_thread_fiq_not_nested_restore // If so, not a nested restore
+
+ /* Interrupts are nested. */
+
+ /* Just recover the saved registers and return to the point of
+ interrupt. */
+
+ LDMIA sp!, {r0, r10, r12, lr} // Recover SPSR, POI, and scratch regs
+ MSR SPSR_cxsf, r0 // Put SPSR back
+ LDMIA sp!, {r0-r3} // Recover r0-r3
+ MOVS pc, lr // Return to point of interrupt
+
__tx_thread_fiq_not_nested_restore:
-@
-@ /* Determine if a thread was interrupted and no preemption is required. */
-@ else if (((_tx_thread_current_ptr) && (_tx_thread_current_ptr == _tx_thread_execute_ptr)
-@ || (_tx_thread_preempt_disable))
-@ {
-@
- LDR r1, [sp] @ Pickup the saved SPSR
- MOV r2, #MODE_MASK @ Build mask to isolate the interrupted mode
- AND r1, r1, r2 @ Isolate mode bits
- CMP r1, #IRQ_MODE_BITS @ Was an interrupt taken in IRQ mode before we
- @ got to context save? */
- BEQ __tx_thread_fiq_no_preempt_restore @ Yes, just go back to point of interrupt
+
+ /* Determine if a thread was interrupted and no preemption is required. */
+
+ LDR r1, [sp] // Pickup the saved SPSR
+ MOV r2, #MODE_MASK // Build mask to isolate the interrupted mode
+ AND r1, r1, r2 // Isolate mode bits
+ CMP r1, #IRQ_MODE_BITS // Was an interrupt taken in IRQ mode before we
+ // got to context save? */
+ BEQ __tx_thread_fiq_no_preempt_restore // Yes, just go back to point of interrupt
- LDR r1, =_tx_thread_current_ptr @ Pickup address of current thread ptr
- LDR r0, [r1] @ Pickup actual current thread pointer
- CMP r0, #0 @ Is it NULL?
- BEQ __tx_thread_fiq_idle_system_restore @ Yes, idle system was interrupted
+ LDR r1, =_tx_thread_current_ptr // Pickup address of current thread ptr
+ LDR r0, [r1] // Pickup actual current thread pointer
+ CMP r0, #0 // Is it NULL?
+ BEQ __tx_thread_fiq_idle_system_restore // Yes, idle system was interrupted
- LDR r3, =_tx_thread_preempt_disable @ Pickup preempt disable address
- LDR r2, [r3] @ Pickup actual preempt disable flag
- CMP r2, #0 @ Is it set?
- BNE __tx_thread_fiq_no_preempt_restore @ Yes, don't preempt this thread
- LDR r3, =_tx_thread_execute_ptr @ Pickup address of execute thread ptr
- LDR r2, [r3] @ Pickup actual execute thread pointer
- CMP r0, r2 @ Is the same thread highest priority?
- BNE __tx_thread_fiq_preempt_restore @ No, preemption needs to happen
+ LDR r3, =_tx_thread_preempt_disable // Pickup preempt disable address
+ LDR r2, [r3] // Pickup actual preempt disable flag
+ CMP r2, #0 // Is it set?
+ BNE __tx_thread_fiq_no_preempt_restore // Yes, don't preempt this thread
+ LDR r3, =_tx_thread_execute_ptr // Pickup address of execute thread ptr
+ LDR r2, [r3] // Pickup actual execute thread pointer
+ CMP r0, r2 // Is the same thread highest priority?
+ BNE __tx_thread_fiq_preempt_restore // No, preemption needs to happen
__tx_thread_fiq_no_preempt_restore:
-@
-@ /* Restore interrupted thread or ISR. */
-@
-@ /* Pickup the saved stack pointer. */
-@ tmp_ptr = _tx_thread_current_ptr -> tx_thread_stack_ptr;
-@
-@ /* Recover the saved context and return to the point of interrupt. */
-@
- LDMIA sp!, {r0, lr} @ Recover SPSR, POI, and scratch regs
- MSR SPSR_cxsf, r0 @ Put SPSR back
- LDMIA sp!, {r0-r3} @ Recover r0-r3
- MOVS pc, lr @ Return to point of interrupt
-@
-@ }
-@ else
-@ {
-__tx_thread_fiq_preempt_restore:
-@
- LDMIA sp!, {r3, lr} @ Recover temporarily saved registers
- MOV r1, lr @ Save lr (point of interrupt)
- MOV r2, #SVC_MODE @ Build SVC mode CPSR
- MSR CPSR_c, r2 @ Enter SVC mode
- STR r1, [sp, #-4]! @ Save point of interrupt
- STMDB sp!, {r4-r12, lr} @ Save upper half of registers
- MOV r4, r3 @ Save SPSR in r4
- MOV r2, #FIQ_MODE @ Build FIQ mode CPSR
- MSR CPSR_c, r2 @ Reenter FIQ mode
- LDMIA sp!, {r0-r3} @ Recover r0-r3
- MOV r5, #SVC_MODE @ Build SVC mode CPSR
- MSR CPSR_c, r5 @ Enter SVC mode
- STMDB sp!, {r0-r3} @ Save r0-r3 on thread's stack
- LDR r1, =_tx_thread_current_ptr @ Pickup address of current thread ptr
- LDR r0, [r1] @ Pickup current thread pointer
+ /* Restore interrupted thread or ISR. */
+ /* Recover the saved context and return to the point of interrupt. */
+
+ LDMIA sp!, {r0, lr} // Recover SPSR, POI, and scratch regs
+ MSR SPSR_cxsf, r0 // Put SPSR back
+ LDMIA sp!, {r0-r3} // Recover r0-r3
+ MOVS pc, lr // Return to point of interrupt
+
+__tx_thread_fiq_preempt_restore:
+
+ LDMIA sp!, {r3, lr} // Recover temporarily saved registers
+ MOV r1, lr // Save lr (point of interrupt)
+ MOV r2, #SVC_MODE // Build SVC mode CPSR
+ MSR CPSR_c, r2 // Enter SVC mode
+ STR r1, [sp, #-4]! // Save point of interrupt
+ STMDB sp!, {r4-r12, lr} // Save upper half of registers
+ MOV r4, r3 // Save SPSR in r4
+ MOV r2, #FIQ_MODE // Build FIQ mode CPSR
+ MSR CPSR_c, r2 // Reenter FIQ mode
+ LDMIA sp!, {r0-r3} // Recover r0-r3
+ MOV r5, #SVC_MODE // Build SVC mode CPSR
+ MSR CPSR_c, r5 // Enter SVC mode
+ STMDB sp!, {r0-r3} // Save r0-r3 on thread's stack
+
+ LDR r1, =_tx_thread_current_ptr // Pickup address of current thread ptr
+ LDR r0, [r1] // Pickup current thread pointer
#ifdef TX_ENABLE_VFP_SUPPORT
- LDR r2, [r0, #144] @ Pickup the VFP enabled flag
- CMP r2, #0 @ Is the VFP enabled?
- BEQ _tx_skip_fiq_vfp_save @ No, skip VFP IRQ save
- VMRS r2, FPSCR @ Pickup the FPSCR
- STR r2, [sp, #-4]! @ Save FPSCR
- VSTMDB sp!, {D16-D31} @ Save D16-D31
- VSTMDB sp!, {D0-D15} @ Save D0-D15
+ LDR r2, [r0, #144] // Pickup the VFP enabled flag
+ CMP r2, #0 // Is the VFP enabled?
+ BEQ _tx_skip_fiq_vfp_save // No, skip VFP IRQ save
+ VMRS r2, FPSCR // Pickup the FPSCR
+ STR r2, [sp, #-4]! // Save FPSCR
+ VSTMDB sp!, {D16-D31} // Save D16-D31
+ VSTMDB sp!, {D0-D15} // Save D0-D15
_tx_skip_fiq_vfp_save:
#endif
- MOV r3, #1 @ Build interrupt stack type
- STMDB sp!, {r3, r4} @ Save interrupt stack type and SPSR
- STR sp, [r0, #8] @ Save stack pointer in thread control
- @ block */
-@
-@ /* Save the remaining time-slice and disable it. */
-@ if (_tx_timer_time_slice)
-@ {
-@
- LDR r3, =_tx_timer_time_slice @ Pickup time-slice variable address
- LDR r2, [r3] @ Pickup time-slice
- CMP r2, #0 @ Is it active?
- BEQ __tx_thread_fiq_dont_save_ts @ No, don't save it
-@
-@ _tx_thread_current_ptr -> tx_thread_time_slice = _tx_timer_time_slice;
-@ _tx_timer_time_slice = 0;
-@
- STR r2, [r0, #24] @ Save thread's time-slice
- MOV r2, #0 @ Clear value
- STR r2, [r3] @ Disable global time-slice flag
-@
-@ }
-__tx_thread_fiq_dont_save_ts:
-@
-@
-@ /* Clear the current task pointer. */
-@ _tx_thread_current_ptr = TX_NULL;
-@
- MOV r0, #0 @ NULL value
- STR r0, [r1] @ Clear current thread pointer
-@
-@ /* Return to the scheduler. */
-@ _tx_thread_schedule();
-@
- B _tx_thread_schedule @ Return to scheduler
-@ }
-@
-__tx_thread_fiq_idle_system_restore:
-@
-@ /* Just return back to the scheduler! */
-@
- ADD sp, sp, #24 @ Recover FIQ stack space
- MOV r3, #SVC_MODE @ Build SVC mode CPSR
- MSR CPSR_c, r3 @ Lockout interrupts
- B _tx_thread_schedule @ Return to scheduler
-@
-@}
+ MOV r3, #1 // Build interrupt stack type
+ STMDB sp!, {r3, r4} // Save interrupt stack type and SPSR
+ STR sp, [r0, #8] // Save stack pointer in thread control
+ // block */
+ LDR r3, =_tx_timer_time_slice // Pickup time-slice variable address
+ LDR r2, [r3] // Pickup time-slice
+ CMP r2, #0 // Is it active?
+ BEQ __tx_thread_fiq_dont_save_ts // No, don't save it
+
+ STR r2, [r0, #24] // Save thread's time-slice
+ MOV r2, #0 // Clear value
+ STR r2, [r3] // Disable global time-slice flag
+
+__tx_thread_fiq_dont_save_ts:
+
+ /* Clear the current task pointer. */
+
+ MOV r0, #0 // NULL value
+ STR r0, [r1] // Clear current thread pointer
+
+ /* Return to the scheduler. */
+
+ B _tx_thread_schedule // Return to scheduler
+
+__tx_thread_fiq_idle_system_restore:
+
+ /* Just return back to the scheduler! */
+
+ ADD sp, sp, #24 // Recover FIQ stack space
+ MOV r3, #SVC_MODE // Build SVC mode CPSR
+ MSR CPSR_c, r3 // Lockout interrupts
+ B _tx_thread_schedule // Return to scheduler
diff --git a/ports/cortex_a9/ac6/src/tx_thread_fiq_context_save.S b/ports/cortex_a9/ac6/src/tx_thread_fiq_context_save.S
index dc980ac6..7db6a4c2 100644
--- a/ports/cortex_a9/ac6/src/tx_thread_fiq_context_save.S
+++ b/ports/cortex_a9/ac6/src/tx_thread_fiq_context_save.S
@@ -1,206 +1,178 @@
-@/**************************************************************************/
-@/* */
-@/* Copyright (c) Microsoft Corporation. All rights reserved. */
-@/* */
-@/* This software is licensed under the Microsoft Software License */
-@/* Terms for Microsoft Azure RTOS. Full text of the license can be */
-@/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
-@/* and in the root directory of this software. */
-@/* */
-@/**************************************************************************/
-@
-@
-@/**************************************************************************/
-@/**************************************************************************/
-@/** */
-@/** ThreadX Component */
-@/** */
-@/** Thread */
-@/** */
-@/**************************************************************************/
-@/**************************************************************************/
-@
-@
-@#define TX_SOURCE_CODE
-@
-@
-@/* Include necessary system files. */
-@
-@#include "tx_api.h"
-@#include "tx_thread.h"
-@
-@
+/**************************************************************************/
+/* */
+/* Copyright (c) Microsoft Corporation. All rights reserved. */
+/* */
+/* This software is licensed under the Microsoft Software License */
+/* Terms for Microsoft Azure RTOS. Full text of the license can be */
+/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
+/* and in the root directory of this software. */
+/* */
+/**************************************************************************/
+
+
+/**************************************************************************/
+/**************************************************************************/
+/** */
+/** ThreadX Component */
+/** */
+/** Thread */
+/** */
+/**************************************************************************/
+/**************************************************************************/
+
.global _tx_thread_system_state
.global _tx_thread_current_ptr
.global __tx_fiq_processing_return
-@
-@
-@/* No 16-bit Thumb mode veneer code is needed for _tx_thread_fiq_context_save
-@ since it will never be called 16-bit mode. */
-@
+ .global _tx_execution_isr_enter
+
+
+/* No 16-bit Thumb mode veneer code is needed for _tx_thread_fiq_context_save
+ since it will never be called 16-bit mode. */
+
.arm
.text
.align 2
-@/**************************************************************************/
-@/* */
-@/* FUNCTION RELEASE */
-@/* */
-@/* _tx_thread_fiq_context_save Cortex-A9/AC6 */
-@/* 6.1.9 */
-@/* AUTHOR */
-@/* */
-@/* William E. Lamie, Microsoft Corporation */
-@/* */
-@/* DESCRIPTION */
-@/* */
-@/* This function saves the context of an executing thread in the */
-@/* beginning of interrupt processing. The function also ensures that */
-@/* the system stack is used upon return to the calling ISR. */
-@/* */
-@/* INPUT */
-@/* */
-@/* None */
-@/* */
-@/* OUTPUT */
-@/* */
-@/* None */
-@/* */
-@/* CALLS */
-@/* */
-@/* None */
-@/* */
-@/* CALLED BY */
-@/* */
-@/* ISRs */
-@/* */
-@/* RELEASE HISTORY */
-@/* */
-@/* DATE NAME DESCRIPTION */
-@/* */
-@/* 09-30-2020 William E. Lamie Initial Version 6.1 */
-@/* 10-15-2021 William E. Lamie Modified comment(s), added */
-@/* execution profile support, */
-@/* resulting in version 6.1.9 */
-@/* */
-@/**************************************************************************/
-@ VOID _tx_thread_fiq_context_save(VOID)
-@{
+/**************************************************************************/
+/* */
+/* FUNCTION RELEASE */
+/* */
+/* _tx_thread_fiq_context_save ARMv7-A */
+/* 6.1.11 */
+/* AUTHOR */
+/* */
+/* William E. Lamie, Microsoft Corporation */
+/* */
+/* DESCRIPTION */
+/* */
+/* This function saves the context of an executing thread in the */
+/* beginning of interrupt processing. The function also ensures that */
+/* the system stack is used upon return to the calling ISR. */
+/* */
+/* INPUT */
+/* */
+/* None */
+/* */
+/* OUTPUT */
+/* */
+/* None */
+/* */
+/* CALLS */
+/* */
+/* None */
+/* */
+/* CALLED BY */
+/* */
+/* ISRs */
+/* */
+/* RELEASE HISTORY */
+/* */
+/* DATE NAME DESCRIPTION */
+/* */
+/* 09-30-2020 William E. Lamie Initial Version 6.1 */
+/* 10-15-2021 William E. Lamie Modified comment(s), added */
+/* execution profile support, */
+/* resulting in version 6.1.9 */
+/* 04-25-2022 Zhen Kong Updated comments, */
+/* resulting in version 6.1.11 */
+/* */
+/**************************************************************************/
.global _tx_thread_fiq_context_save
.type _tx_thread_fiq_context_save,function
_tx_thread_fiq_context_save:
-@
-@ /* Upon entry to this routine, it is assumed that IRQ interrupts are locked
-@ out, we are in IRQ mode, and all registers are intact. */
-@
-@ /* Check for a nested interrupt condition. */
-@ if (_tx_thread_system_state++)
-@ {
-@
- STMDB sp!, {r0-r3} @ Save some working registers
- LDR r3, =_tx_thread_system_state @ Pickup address of system state variable
- LDR r2, [r3] @ Pickup system state
- CMP r2, #0 @ Is this the first interrupt?
- BEQ __tx_thread_fiq_not_nested_save @ Yes, not a nested context save
-@
-@ /* Nested interrupt condition. */
-@
- ADD r2, r2, #1 @ Increment the interrupt counter
- STR r2, [r3] @ Store it back in the variable
-@
-@ /* Save the rest of the scratch registers on the stack and return to the
-@ calling ISR. */
-@
- MRS r0, SPSR @ Pickup saved SPSR
- SUB lr, lr, #4 @ Adjust point of interrupt
- STMDB sp!, {r0, r10, r12, lr} @ Store other registers
-@
-@ /* Return to the ISR. */
-@
- MOV r10, #0 @ Clear stack limit
+
+ /* Upon entry to this routine, it is assumed that IRQ interrupts are locked
+ out, we are in IRQ mode, and all registers are intact. */
+
+ /* Check for a nested interrupt condition. */
+
+ STMDB sp!, {r0-r3} // Save some working registers
+ LDR r3, =_tx_thread_system_state // Pickup address of system state variable
+ LDR r2, [r3] // Pickup system state
+ CMP r2, #0 // Is this the first interrupt?
+ BEQ __tx_thread_fiq_not_nested_save // Yes, not a nested context save
+
+ /* Nested interrupt condition. */
+
+ ADD r2, r2, #1 // Increment the interrupt counter
+ STR r2, [r3] // Store it back in the variable
+
+ /* Save the rest of the scratch registers on the stack and return to the
+ calling ISR. */
+
+ MRS r0, SPSR // Pickup saved SPSR
+ SUB lr, lr, #4 // Adjust point of interrupt
+ STMDB sp!, {r0, r10, r12, lr} // Store other registers
+
+ /* Return to the ISR. */
+
+ MOV r10, #0 // Clear stack limit
#if (defined(TX_ENABLE_EXECUTION_CHANGE_NOTIFY) || defined(TX_EXECUTION_PROFILE_ENABLE))
-@
-@ /* Call the ISR enter function to indicate an ISR is executing. */
-@
- PUSH {lr} @ Save ISR lr
- BL _tx_execution_isr_enter @ Call the ISR enter function
- POP {lr} @ Recover ISR lr
+
+ /* Call the ISR enter function to indicate an ISR is executing. */
+
+ PUSH {lr} // Save ISR lr
+ BL _tx_execution_isr_enter // Call the ISR enter function
+ POP {lr} // Recover ISR lr
#endif
- B __tx_fiq_processing_return @ Continue FIQ processing
-@
+ B __tx_fiq_processing_return // Continue FIQ processing
+//
__tx_thread_fiq_not_nested_save:
-@ }
-@
-@ /* Otherwise, not nested, check to see if a thread was running. */
-@ else if (_tx_thread_current_ptr)
-@ {
-@
- ADD r2, r2, #1 @ Increment the interrupt counter
- STR r2, [r3] @ Store it back in the variable
- LDR r1, =_tx_thread_current_ptr @ Pickup address of current thread ptr
- LDR r0, [r1] @ Pickup current thread pointer
- CMP r0, #0 @ Is it NULL?
- BEQ __tx_thread_fiq_idle_system_save @ If so, interrupt occurred in
-@ @ scheduling loop - nothing needs saving!
-@
-@ /* Save minimal context of interrupted thread. */
-@
- MRS r2, SPSR @ Pickup saved SPSR
- SUB lr, lr, #4 @ Adjust point of interrupt
- STMDB sp!, {r2, lr} @ Store other registers, Note that we don't
-@ @ need to save sl and ip since FIQ has
-@ @ copies of these registers. Nested
-@ @ interrupt processing does need to save
-@ @ these registers.
-@
-@ /* Save the current stack pointer in the thread's control block. */
-@ _tx_thread_current_ptr -> tx_thread_stack_ptr = sp;
-@
-@ /* Switch to the system stack. */
-@ sp = _tx_thread_system_stack_ptr;
-@
- MOV r10, #0 @ Clear stack limit
+
+ /* Otherwise, not nested, check to see if a thread was running. */
+
+ ADD r2, r2, #1 // Increment the interrupt counter
+ STR r2, [r3] // Store it back in the variable
+ LDR r1, =_tx_thread_current_ptr // Pickup address of current thread ptr
+ LDR r0, [r1] // Pickup current thread pointer
+ CMP r0, #0 // Is it NULL?
+ BEQ __tx_thread_fiq_idle_system_save // If so, interrupt occurred in
+ // scheduling loop - nothing needs saving!
+
+ /* Save minimal context of interrupted thread. */
+
+ MRS r2, SPSR // Pickup saved SPSR
+ SUB lr, lr, #4 // Adjust point of interrupt
+ STMDB sp!, {r2, lr} // Store other registers, Note that we don't
+ // need to save sl and ip since FIQ has
+ // copies of these registers. Nested
+ // interrupt processing does need to save
+ // these registers.
+
+ MOV r10, #0 // Clear stack limit
#if (defined(TX_ENABLE_EXECUTION_CHANGE_NOTIFY) || defined(TX_EXECUTION_PROFILE_ENABLE))
-@
-@ /* Call the ISR enter function to indicate an ISR is executing. */
-@
- PUSH {lr} @ Save ISR lr
- BL _tx_execution_isr_enter @ Call the ISR enter function
- POP {lr} @ Recover ISR lr
+
+ /* Call the ISR enter function to indicate an ISR is executing. */
+
+ PUSH {lr} // Save ISR lr
+ BL _tx_execution_isr_enter // Call the ISR enter function
+ POP {lr} // Recover ISR lr
#endif
- B __tx_fiq_processing_return @ Continue FIQ processing
-@
-@ }
-@ else
-@ {
-@
+ B __tx_fiq_processing_return // Continue FIQ processing
+
__tx_thread_fiq_idle_system_save:
-@
-@ /* Interrupt occurred in the scheduling loop. */
-@
-#if (defined(TX_ENABLE_EXECUTION_CHANGE_NOTIFY) || defined(TX_EXECUTION_PROFILE_ENABLE))
-@
-@ /* Call the ISR enter function to indicate an ISR is executing. */
-@
- PUSH {lr} @ Save ISR lr
- BL _tx_execution_isr_enter @ Call the ISR enter function
- POP {lr} @ Recover ISR lr
-#endif
-@
-@ /* Not much to do here, save the current SPSR and LR for possible
-@ use in IRQ interrupted in idle system conditions, and return to
-@ FIQ interrupt processing. */
-@
- MRS r0, SPSR @ Pickup saved SPSR
- SUB lr, lr, #4 @ Adjust point of interrupt
- STMDB sp!, {r0, lr} @ Store other registers that will get used
-@ @ or stripped off the stack in context
-@ @ restore
- B __tx_fiq_processing_return @ Continue FIQ processing
-@
-@ }
-@}
+ /* Interrupt occurred in the scheduling loop. */
+
+#if (defined(TX_ENABLE_EXECUTION_CHANGE_NOTIFY) || defined(TX_EXECUTION_PROFILE_ENABLE))
+
+ /* Call the ISR enter function to indicate an ISR is executing. */
+
+ PUSH {lr} // Save ISR lr
+ BL _tx_execution_isr_enter // Call the ISR enter function
+ POP {lr} // Recover ISR lr
+#endif
+
+ /* Not much to do here, save the current SPSR and LR for possible
+ use in IRQ interrupted in idle system conditions, and return to
+ FIQ interrupt processing. */
+
+ MRS r0, SPSR // Pickup saved SPSR
+ SUB lr, lr, #4 // Adjust point of interrupt
+ STMDB sp!, {r0, lr} // Store other registers that will get used
+ // or stripped off the stack in context
+ // restore
+ B __tx_fiq_processing_return // Continue FIQ processing
diff --git a/ports/cortex_a9/ac6/src/tx_thread_fiq_nesting_end.S b/ports/cortex_a9/ac6/src/tx_thread_fiq_nesting_end.S
index ecf2db8e..b34d881e 100644
--- a/ports/cortex_a9/ac6/src/tx_thread_fiq_nesting_end.S
+++ b/ports/cortex_a9/ac6/src/tx_thread_fiq_nesting_end.S
@@ -1,116 +1,104 @@
-@/**************************************************************************/
-@/* */
-@/* Copyright (c) Microsoft Corporation. All rights reserved. */
-@/* */
-@/* This software is licensed under the Microsoft Software License */
-@/* Terms for Microsoft Azure RTOS. Full text of the license can be */
-@/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
-@/* and in the root directory of this software. */
-@/* */
-@/**************************************************************************/
-@
-@
-@/**************************************************************************/
-@/**************************************************************************/
-@/** */
-@/** ThreadX Component */
-@/** */
-@/** Thread */
-@/** */
-@/**************************************************************************/
-@/**************************************************************************/
-@
-@
-@#define TX_SOURCE_CODE
-@
-@
-@/* Include necessary system files. */
-@
-@#include "tx_api.h"
-@#include "tx_thread.h"
-@
-@
+/**************************************************************************/
+/* */
+/* Copyright (c) Microsoft Corporation. All rights reserved. */
+/* */
+/* This software is licensed under the Microsoft Software License */
+/* Terms for Microsoft Azure RTOS. Full text of the license can be */
+/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
+/* and in the root directory of this software. */
+/* */
+/**************************************************************************/
+
+
+/**************************************************************************/
+/**************************************************************************/
+/** */
+/** ThreadX Component */
+/** */
+/** Thread */
+/** */
+/**************************************************************************/
+/**************************************************************************/
+
#ifdef TX_ENABLE_FIQ_SUPPORT
-DISABLE_INTS = 0xC0 @ Disable IRQ/FIQ interrupts
+DISABLE_INTS = 0xC0 // Disable IRQ/FIQ interrupts
#else
-DISABLE_INTS = 0x80 @ Disable IRQ interrupts
+DISABLE_INTS = 0x80 // Disable IRQ interrupts
#endif
-MODE_MASK = 0x1F @ Mode mask
-FIQ_MODE_BITS = 0x11 @ FIQ mode bits
-@
-@
-@/* No 16-bit Thumb mode veneer code is needed for _tx_thread_fiq_nesting_end
-@ since it will never be called 16-bit mode. */
-@
+MODE_MASK = 0x1F // Mode mask
+FIQ_MODE_BITS = 0x11 // FIQ mode bits
+
+
+/* No 16-bit Thumb mode veneer code is needed for _tx_thread_fiq_nesting_end
+ since it will never be called 16-bit mode. */
+
.arm
.text
.align 2
-@/**************************************************************************/
-@/* */
-@/* FUNCTION RELEASE */
-@/* */
-@/* _tx_thread_fiq_nesting_end Cortex-A9/AC6 */
-@/* 6.1 */
-@/* AUTHOR */
-@/* */
-@/* William E. Lamie, Microsoft Corporation */
-@/* */
-@/* DESCRIPTION */
-@/* */
-@/* This function is called by the application from FIQ mode after */
-@/* _tx_thread_fiq_nesting_start has been called and switches the FIQ */
-@/* processing from system mode back to FIQ mode prior to the ISR */
-@/* calling _tx_thread_fiq_context_restore. Note that this function */
-@/* assumes the system stack pointer is in the same position after */
-@/* nesting start function was called. */
-@/* */
-@/* This function assumes that the system mode stack pointer was setup */
-@/* during low-level initialization (tx_initialize_low_level.s). */
-@/* */
-@/* This function returns with FIQ interrupts disabled. */
-@/* */
-@/* INPUT */
-@/* */
-@/* None */
-@/* */
-@/* OUTPUT */
-@/* */
-@/* None */
-@/* */
-@/* CALLS */
-@/* */
-@/* None */
-@/* */
-@/* CALLED BY */
-@/* */
-@/* ISRs */
-@/* */
-@/* RELEASE HISTORY */
-@/* */
-@/* DATE NAME DESCRIPTION */
-@/* */
-@/* 09-30-2020 William E. Lamie Initial Version 6.1 */
-@/* */
-@/**************************************************************************/
-@VOID _tx_thread_fiq_nesting_end(VOID)
-@{
+/**************************************************************************/
+/* */
+/* FUNCTION RELEASE */
+/* */
+/* _tx_thread_fiq_nesting_end ARMv7-A */
+/* 6.1.11 */
+/* AUTHOR */
+/* */
+/* William E. Lamie, Microsoft Corporation */
+/* */
+/* DESCRIPTION */
+/* */
+/* This function is called by the application from FIQ mode after */
+/* _tx_thread_fiq_nesting_start has been called and switches the FIQ */
+/* processing from system mode back to FIQ mode prior to the ISR */
+/* calling _tx_thread_fiq_context_restore. Note that this function */
+/* assumes the system stack pointer is in the same position after */
+/* nesting start function was called. */
+/* */
+/* This function assumes that the system mode stack pointer was setup */
+/* during low-level initialization (tx_initialize_low_level.s). */
+/* */
+/* This function returns with FIQ interrupts disabled. */
+/* */
+/* INPUT */
+/* */
+/* None */
+/* */
+/* OUTPUT */
+/* */
+/* None */
+/* */
+/* CALLS */
+/* */
+/* None */
+/* */
+/* CALLED BY */
+/* */
+/* ISRs */
+/* */
+/* RELEASE HISTORY */
+/* */
+/* DATE NAME DESCRIPTION */
+/* */
+/* 09-30-2020 William E. Lamie Initial Version 6.1 */
+/* 04-25-2022 Zhen Kong Updated comments, */
+/* resulting in version 6.1.11 */
+/* */
+/**************************************************************************/
.global _tx_thread_fiq_nesting_end
.type _tx_thread_fiq_nesting_end,function
_tx_thread_fiq_nesting_end:
- MOV r3,lr @ Save ISR return address
- MRS r0, CPSR @ Pickup the CPSR
- ORR r0, r0, #DISABLE_INTS @ Build disable interrupt value
- MSR CPSR_c, r0 @ Disable interrupts
- LDMIA sp!, {r1, lr} @ Pickup saved lr (and r1 throw-away for
- @ 8-byte alignment logic)
- BIC r0, r0, #MODE_MASK @ Clear mode bits
- ORR r0, r0, #FIQ_MODE_BITS @ Build IRQ mode CPSR
- MSR CPSR_c, r0 @ Reenter IRQ mode
+ MOV r3,lr // Save ISR return address
+ MRS r0, CPSR // Pickup the CPSR
+ ORR r0, r0, #DISABLE_INTS // Build disable interrupt value
+ MSR CPSR_c, r0 // Disable interrupts
+ LDMIA sp!, {r1, lr} // Pickup saved lr (and r1 throw-away for
+ // 8-byte alignment logic)
+ BIC r0, r0, #MODE_MASK // Clear mode bits
+ ORR r0, r0, #FIQ_MODE_BITS // Build IRQ mode CPSR
+ MSR CPSR_c, r0 // Reenter IRQ mode
#ifdef __THUMB_INTERWORK
- BX r3 @ Return to caller
+ BX r3 // Return to caller
#else
- MOV pc, r3 @ Return to caller
+ MOV pc, r3 // Return to caller
#endif
-@}
-
diff --git a/ports/cortex_a9/ac6/src/tx_thread_fiq_nesting_start.S b/ports/cortex_a9/ac6/src/tx_thread_fiq_nesting_start.S
index f042af5d..c9cd5a06 100644
--- a/ports/cortex_a9/ac6/src/tx_thread_fiq_nesting_start.S
+++ b/ports/cortex_a9/ac6/src/tx_thread_fiq_nesting_start.S
@@ -1,108 +1,96 @@
-@/**************************************************************************/
-@/* */
-@/* Copyright (c) Microsoft Corporation. All rights reserved. */
-@/* */
-@/* This software is licensed under the Microsoft Software License */
-@/* Terms for Microsoft Azure RTOS. Full text of the license can be */
-@/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
-@/* and in the root directory of this software. */
-@/* */
-@/**************************************************************************/
-@
-@
-@/**************************************************************************/
-@/**************************************************************************/
-@/** */
-@/** ThreadX Component */
-@/** */
-@/** Thread */
-@/** */
-@/**************************************************************************/
-@/**************************************************************************/
-@
-@
-@#define TX_SOURCE_CODE
-@
-@
-@/* Include necessary system files. */
-@
-@#include "tx_api.h"
-@#include "tx_thread.h"
-@
-@
-FIQ_DISABLE = 0x40 @ FIQ disable bit
-MODE_MASK = 0x1F @ Mode mask
-SYS_MODE_BITS = 0x1F @ System mode bits
-@
-@
-@/* No 16-bit Thumb mode veneer code is needed for _tx_thread_fiq_nesting_start
-@ since it will never be called 16-bit mode. */
-@
+/**************************************************************************/
+/* */
+/* Copyright (c) Microsoft Corporation. All rights reserved. */
+/* */
+/* This software is licensed under the Microsoft Software License */
+/* Terms for Microsoft Azure RTOS. Full text of the license can be */
+/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
+/* and in the root directory of this software. */
+/* */
+/**************************************************************************/
+
+
+/**************************************************************************/
+/**************************************************************************/
+/** */
+/** ThreadX Component */
+/** */
+/** Thread */
+/** */
+/**************************************************************************/
+/**************************************************************************/
+
+FIQ_DISABLE = 0x40 // FIQ disable bit
+MODE_MASK = 0x1F // Mode mask
+SYS_MODE_BITS = 0x1F // System mode bits
+
+
+/* No 16-bit Thumb mode veneer code is needed for _tx_thread_fiq_nesting_start
+ since it will never be called 16-bit mode. */
+
.arm
.text
.align 2
-@/**************************************************************************/
-@/* */
-@/* FUNCTION RELEASE */
-@/* */
-@/* _tx_thread_fiq_nesting_start Cortex-A9/AC6 */
-@/* 6.1 */
-@/* AUTHOR */
-@/* */
-@/* William E. Lamie, Microsoft Corporation */
-@/* */
-@/* DESCRIPTION */
-@/* */
-@/* This function is called by the application from FIQ mode after */
-@/* _tx_thread_fiq_context_save has been called and switches the FIQ */
-@/* processing to the system mode so nested FIQ interrupt processing */
-@/* is possible (system mode has its own "lr" register). Note that */
-@/* this function assumes that the system mode stack pointer was setup */
-@/* during low-level initialization (tx_initialize_low_level.s). */
-@/* */
-@/* This function returns with FIQ interrupts enabled. */
-@/* */
-@/* INPUT */
-@/* */
-@/* None */
-@/* */
-@/* OUTPUT */
-@/* */
-@/* None */
-@/* */
-@/* CALLS */
-@/* */
-@/* None */
-@/* */
-@/* CALLED BY */
-@/* */
-@/* ISRs */
-@/* */
-@/* RELEASE HISTORY */
-@/* */
-@/* DATE NAME DESCRIPTION */
-@/* */
-@/* 09-30-2020 William E. Lamie Initial Version 6.1 */
-@/* */
-@/**************************************************************************/
-@VOID _tx_thread_fiq_nesting_start(VOID)
-@{
+/**************************************************************************/
+/* */
+/* FUNCTION RELEASE */
+/* */
+/* _tx_thread_fiq_nesting_start ARMv7-A */
+/* 6.1.11 */
+/* AUTHOR */
+/* */
+/* William E. Lamie, Microsoft Corporation */
+/* */
+/* DESCRIPTION */
+/* */
+/* This function is called by the application from FIQ mode after */
+/* _tx_thread_fiq_context_save has been called and switches the FIQ */
+/* processing to the system mode so nested FIQ interrupt processing */
+/* is possible (system mode has its own "lr" register). Note that */
+/* this function assumes that the system mode stack pointer was setup */
+/* during low-level initialization (tx_initialize_low_level.s). */
+/* */
+/* This function returns with FIQ interrupts enabled. */
+/* */
+/* INPUT */
+/* */
+/* None */
+/* */
+/* OUTPUT */
+/* */
+/* None */
+/* */
+/* CALLS */
+/* */
+/* None */
+/* */
+/* CALLED BY */
+/* */
+/* ISRs */
+/* */
+/* RELEASE HISTORY */
+/* */
+/* DATE NAME DESCRIPTION */
+/* */
+/* 09-30-2020 William E. Lamie Initial Version 6.1 */
+/* 04-25-2022 Zhen Kong Updated comments, */
+/* resulting in version 6.1.11 */
+/* */
+/**************************************************************************/
.global _tx_thread_fiq_nesting_start
.type _tx_thread_fiq_nesting_start,function
_tx_thread_fiq_nesting_start:
- MOV r3,lr @ Save ISR return address
- MRS r0, CPSR @ Pickup the CPSR
- BIC r0, r0, #MODE_MASK @ Clear the mode bits
- ORR r0, r0, #SYS_MODE_BITS @ Build system mode CPSR
- MSR CPSR_c, r0 @ Enter system mode
- STMDB sp!, {r1, lr} @ Push the system mode lr on the system mode stack
- @ and push r1 just to keep 8-byte alignment
- BIC r0, r0, #FIQ_DISABLE @ Build enable FIQ CPSR
- MSR CPSR_c, r0 @ Enter system mode
+ MOV r3,lr // Save ISR return address
+ MRS r0, CPSR // Pickup the CPSR
+ BIC r0, r0, #MODE_MASK // Clear the mode bits
+ ORR r0, r0, #SYS_MODE_BITS // Build system mode CPSR
+ MSR CPSR_c, r0 // Enter system mode
+ STMDB sp!, {r1, lr} // Push the system mode lr on the system mode stack
+ // and push r1 just to keep 8-byte alignment
+ BIC r0, r0, #FIQ_DISABLE // Build enable FIQ CPSR
+ MSR CPSR_c, r0 // Enter system mode
#ifdef __THUMB_INTERWORK
- BX r3 @ Return to caller
+ BX r3 // Return to caller
#else
- MOV pc, r3 @ Return to caller
+ MOV pc, r3 // Return to caller
#endif
-@}
-
diff --git a/ports/cortex_a9/ac6/src/tx_thread_interrupt_control.S b/ports/cortex_a9/ac6/src/tx_thread_interrupt_control.S
index a6ac989c..63b1609a 100644
--- a/ports/cortex_a9/ac6/src/tx_thread_interrupt_control.S
+++ b/ports/cortex_a9/ac6/src/tx_thread_interrupt_control.S
@@ -1,115 +1,104 @@
-@/**************************************************************************/
-@/* */
-@/* Copyright (c) Microsoft Corporation. All rights reserved. */
-@/* */
-@/* This software is licensed under the Microsoft Software License */
-@/* Terms for Microsoft Azure RTOS. Full text of the license can be */
-@/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
-@/* and in the root directory of this software. */
-@/* */
-@/**************************************************************************/
-@
-@
-@/**************************************************************************/
-@/**************************************************************************/
-@/** */
-@/** ThreadX Component */
-@/** */
-@/** Thread */
-@/** */
-@/**************************************************************************/
-@/**************************************************************************/
-@
-@#define TX_SOURCE_CODE
-@
-@
-@/* Include necessary system files. */
-@
-@#include "tx_api.h"
-@#include "tx_thread.h" */
-@
+/**************************************************************************/
+/* */
+/* Copyright (c) Microsoft Corporation. All rights reserved. */
+/* */
+/* This software is licensed under the Microsoft Software License */
+/* Terms for Microsoft Azure RTOS. Full text of the license can be */
+/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
+/* and in the root directory of this software. */
+/* */
+/**************************************************************************/
+
+
+/**************************************************************************/
+/**************************************************************************/
+/** */
+/** ThreadX Component */
+/** */
+/** Thread */
+/** */
+/**************************************************************************/
+/**************************************************************************/
INT_MASK = 0x03F
-@
-@/* Define the 16-bit Thumb mode veneer for _tx_thread_interrupt_control for
-@ applications calling this function from to 16-bit Thumb mode. */
-@
+
+/* Define the 16-bit Thumb mode veneer for _tx_thread_interrupt_control for
+ applications calling this function from to 16-bit Thumb mode. */
+
.text
.align 2
.global $_tx_thread_interrupt_control
$_tx_thread_interrupt_control:
.thumb
- BX pc @ Switch to 32-bit mode
- NOP @
+ BX pc // Switch to 32-bit mode
+ NOP //
.arm
- STMFD sp!, {lr} @ Save return address
- BL _tx_thread_interrupt_control @ Call _tx_thread_interrupt_control function
- LDMFD sp!, {lr} @ Recover saved return address
- BX lr @ Return to 16-bit caller
-@
-@
+ STMFD sp!, {lr} // Save return address
+ BL _tx_thread_interrupt_control // Call _tx_thread_interrupt_control function
+ LDMFD sp!, {lr} // Recover saved return address
+ BX lr // Return to 16-bit caller
+
+
.text
.align 2
-@/**************************************************************************/
-@/* */
-@/* FUNCTION RELEASE */
-@/* */
-@/* _tx_thread_interrupt_control Cortex-A9/AC6 */
-@/* 6.1 */
-@/* AUTHOR */
-@/* */
-@/* William E. Lamie, Microsoft Corporation */
-@/* */
-@/* DESCRIPTION */
-@/* */
-@/* This function is responsible for changing the interrupt lockout */
-@/* posture of the system. */
-@/* */
-@/* INPUT */
-@/* */
-@/* new_posture New interrupt lockout posture */
-@/* */
-@/* OUTPUT */
-@/* */
-@/* old_posture Old interrupt lockout posture */
-@/* */
-@/* CALLS */
-@/* */
-@/* None */
-@/* */
-@/* CALLED BY */
-@/* */
-@/* Application Code */
-@/* */
-@/* RELEASE HISTORY */
-@/* */
-@/* DATE NAME DESCRIPTION */
-@/* */
-@/* 09-30-2020 William E. Lamie Initial Version 6.1 */
-@/* */
-@/**************************************************************************/
-@UINT _tx_thread_interrupt_control(UINT new_posture)
-@{
+/**************************************************************************/
+/* */
+/* FUNCTION RELEASE */
+/* */
+/* _tx_thread_interrupt_control ARMv7-A */
+/* 6.1.11 */
+/* AUTHOR */
+/* */
+/* William E. Lamie, Microsoft Corporation */
+/* */
+/* DESCRIPTION */
+/* */
+/* This function is responsible for changing the interrupt lockout */
+/* posture of the system. */
+/* */
+/* INPUT */
+/* */
+/* new_posture New interrupt lockout posture */
+/* */
+/* OUTPUT */
+/* */
+/* old_posture Old interrupt lockout posture */
+/* */
+/* CALLS */
+/* */
+/* None */
+/* */
+/* CALLED BY */
+/* */
+/* Application Code */
+/* */
+/* RELEASE HISTORY */
+/* */
+/* DATE NAME DESCRIPTION */
+/* */
+/* 09-30-2020 William E. Lamie Initial Version 6.1 */
+/* 04-25-2022 Zhen Kong Updated comments, */
+/* resulting in version 6.1.11 */
+/* */
+/**************************************************************************/
.global _tx_thread_interrupt_control
.type _tx_thread_interrupt_control,function
_tx_thread_interrupt_control:
-@
-@ /* Pickup current interrupt lockout posture. */
-@
- MRS r3, CPSR @ Pickup current CPSR
- MOV r2, #INT_MASK @ Build interrupt mask
- AND r1, r3, r2 @ Clear interrupt lockout bits
- ORR r1, r1, r0 @ Or-in new interrupt lockout bits
-@
-@ /* Apply the new interrupt posture. */
-@
- MSR CPSR_c, r1 @ Setup new CPSR
- BIC r0, r3, r2 @ Return previous interrupt mask
-#ifdef __THUMB_INTERWORK
- BX lr @ Return to caller
-#else
- MOV pc, lr @ Return to caller
-#endif
-@}
+ /* Pickup current interrupt lockout posture. */
+
+ MRS r3, CPSR // Pickup current CPSR
+ MOV r2, #INT_MASK // Build interrupt mask
+ AND r1, r3, r2 // Clear interrupt lockout bits
+ ORR r1, r1, r0 // Or-in new interrupt lockout bits
+
+ /* Apply the new interrupt posture. */
+
+ MSR CPSR_c, r1 // Setup new CPSR
+ BIC r0, r3, r2 // Return previous interrupt mask
+#ifdef __THUMB_INTERWORK
+ BX lr // Return to caller
+#else
+ MOV pc, lr // Return to caller
+#endif
diff --git a/ports/cortex_a9/ac6/src/tx_thread_interrupt_disable.S b/ports/cortex_a9/ac6/src/tx_thread_interrupt_disable.S
index 2b0f0840..13258808 100644
--- a/ports/cortex_a9/ac6/src/tx_thread_interrupt_disable.S
+++ b/ports/cortex_a9/ac6/src/tx_thread_interrupt_disable.S
@@ -1,113 +1,101 @@
-@/**************************************************************************/
-@/* */
-@/* Copyright (c) Microsoft Corporation. All rights reserved. */
-@/* */
-@/* This software is licensed under the Microsoft Software License */
-@/* Terms for Microsoft Azure RTOS. Full text of the license can be */
-@/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
-@/* and in the root directory of this software. */
-@/* */
-@/**************************************************************************/
-@
-@
-@/**************************************************************************/
-@/**************************************************************************/
-@/** */
-@/** ThreadX Component */
-@/** */
-@/** Thread */
-@/** */
-@/**************************************************************************/
-@/**************************************************************************/
-@
-@#define TX_SOURCE_CODE
-@
-@
-@/* Include necessary system files. */
-@
-@#include "tx_api.h"
-@#include "tx_thread.h"
-@
-@
-@/* Define the 16-bit Thumb mode veneer for _tx_thread_interrupt_disable for
-@ applications calling this function from to 16-bit Thumb mode. */
-@
+/**************************************************************************/
+/* */
+/* Copyright (c) Microsoft Corporation. All rights reserved. */
+/* */
+/* This software is licensed under the Microsoft Software License */
+/* Terms for Microsoft Azure RTOS. Full text of the license can be */
+/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
+/* and in the root directory of this software. */
+/* */
+/**************************************************************************/
+
+
+/**************************************************************************/
+/**************************************************************************/
+/** */
+/** ThreadX Component */
+/** */
+/** Thread */
+/** */
+/**************************************************************************/
+/**************************************************************************/
+
+/* Define the 16-bit Thumb mode veneer for _tx_thread_interrupt_disable for
+ applications calling this function from to 16-bit Thumb mode. */
+
.text
.align 2
.global $_tx_thread_interrupt_disable
$_tx_thread_interrupt_disable:
.thumb
- BX pc @ Switch to 32-bit mode
- NOP @
+ BX pc // Switch to 32-bit mode
+ NOP //
.arm
- STMFD sp!, {lr} @ Save return address
- BL _tx_thread_interrupt_disable @ Call _tx_thread_interrupt_disable function
- LDMFD sp!, {lr} @ Recover saved return address
- BX lr @ Return to 16-bit caller
-@
-@
+ STMFD sp!, {lr} // Save return address
+ BL _tx_thread_interrupt_disable // Call _tx_thread_interrupt_disable function
+ LDMFD sp!, {lr} // Recover saved return address
+ BX lr // Return to 16-bit caller
+
+
.text
.align 2
-@/**************************************************************************/
-@/* */
-@/* FUNCTION RELEASE */
-@/* */
-@/* _tx_thread_interrupt_disable Cortex-A9/AC6 */
-@/* 6.1 */
-@/* AUTHOR */
-@/* */
-@/* William E. Lamie, Microsoft Corporation */
-@/* */
-@/* DESCRIPTION */
-@/* */
-@/* This function is responsible for disabling interrupts */
-@/* */
-@/* INPUT */
-@/* */
-@/* None */
-@/* */
-@/* OUTPUT */
-@/* */
-@/* old_posture Old interrupt lockout posture */
-@/* */
-@/* CALLS */
-@/* */
-@/* None */
-@/* */
-@/* CALLED BY */
-@/* */
-@/* Application Code */
-@/* */
-@/* RELEASE HISTORY */
-@/* */
-@/* DATE NAME DESCRIPTION */
-@/* */
-@/* 09-30-2020 William E. Lamie Initial Version 6.1 */
-@/* */
-@/**************************************************************************/
-@UINT _tx_thread_interrupt_disable(void)
-@{
+/**************************************************************************/
+/* */
+/* FUNCTION RELEASE */
+/* */
+/* _tx_thread_interrupt_disable ARMv7-A */
+/* 6.1.11 */
+/* AUTHOR */
+/* */
+/* William E. Lamie, Microsoft Corporation */
+/* */
+/* DESCRIPTION */
+/* */
+/* This function is responsible for disabling interrupts */
+/* */
+/* INPUT */
+/* */
+/* None */
+/* */
+/* OUTPUT */
+/* */
+/* old_posture Old interrupt lockout posture */
+/* */
+/* CALLS */
+/* */
+/* None */
+/* */
+/* CALLED BY */
+/* */
+/* Application Code */
+/* */
+/* RELEASE HISTORY */
+/* */
+/* DATE NAME DESCRIPTION */
+/* */
+/* 09-30-2020 William E. Lamie Initial Version 6.1 */
+/* 04-25-2022 Zhen Kong Updated comments, */
+/* resulting in version 6.1.11 */
+/* */
+/**************************************************************************/
.global _tx_thread_interrupt_disable
.type _tx_thread_interrupt_disable,function
_tx_thread_interrupt_disable:
-@
-@ /* Pickup current interrupt lockout posture. */
-@
- MRS r0, CPSR @ Pickup current CPSR
-@
-@ /* Mask interrupts. */
-@
+
+ /* Pickup current interrupt lockout posture. */
+
+ MRS r0, CPSR // Pickup current CPSR
+
+ /* Mask interrupts. */
+
#ifdef TX_ENABLE_FIQ_SUPPORT
- CPSID if @ Disable IRQ and FIQ
+ CPSID if // Disable IRQ and FIQ
#else
- CPSID i @ Disable IRQ
+ CPSID i // Disable IRQ
#endif
#ifdef __THUMB_INTERWORK
- BX lr @ Return to caller
+ BX lr // Return to caller
#else
- MOV pc, lr @ Return to caller
+ MOV pc, lr // Return to caller
#endif
-@}
-
-
diff --git a/ports/cortex_a9/ac6/src/tx_thread_interrupt_restore.S b/ports/cortex_a9/ac6/src/tx_thread_interrupt_restore.S
index 3793925d..2d582511 100644
--- a/ports/cortex_a9/ac6/src/tx_thread_interrupt_restore.S
+++ b/ports/cortex_a9/ac6/src/tx_thread_interrupt_restore.S
@@ -1,104 +1,93 @@
-@/**************************************************************************/
-@/* */
-@/* Copyright (c) Microsoft Corporation. All rights reserved. */
-@/* */
-@/* This software is licensed under the Microsoft Software License */
-@/* Terms for Microsoft Azure RTOS. Full text of the license can be */
-@/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
-@/* and in the root directory of this software. */
-@/* */
-@/**************************************************************************/
-@
-@
-@/**************************************************************************/
-@/**************************************************************************/
-@/** */
-@/** ThreadX Component */
-@/** */
-@/** Thread */
-@/** */
-@/**************************************************************************/
-@/**************************************************************************/
-@
-@#define TX_SOURCE_CODE
-@
-@
-@/* Include necessary system files. */
-@
-@#include "tx_api.h"
-@#include "tx_thread.h"
-@
-@
-@/* Define the 16-bit Thumb mode veneer for _tx_thread_interrupt_restore for
-@ applications calling this function from to 16-bit Thumb mode. */
-@
+/**************************************************************************/
+/* */
+/* Copyright (c) Microsoft Corporation. All rights reserved. */
+/* */
+/* This software is licensed under the Microsoft Software License */
+/* Terms for Microsoft Azure RTOS. Full text of the license can be */
+/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
+/* and in the root directory of this software. */
+/* */
+/**************************************************************************/
+
+
+/**************************************************************************/
+/**************************************************************************/
+/** */
+/** ThreadX Component */
+/** */
+/** Thread */
+/** */
+/**************************************************************************/
+/**************************************************************************/
+
+/* Define the 16-bit Thumb mode veneer for _tx_thread_interrupt_restore for
+ applications calling this function from to 16-bit Thumb mode. */
+
.text
.align 2
.global $_tx_thread_interrupt_restore
$_tx_thread_interrupt_restore:
.thumb
- BX pc @ Switch to 32-bit mode
- NOP @
+ BX pc // Switch to 32-bit mode
+ NOP //
.arm
- STMFD sp!, {lr} @ Save return address
- BL _tx_thread_interrupt_restore @ Call _tx_thread_interrupt_restore function
- LDMFD sp!, {lr} @ Recover saved return address
- BX lr @ Return to 16-bit caller
-@
-@
+ STMFD sp!, {lr} // Save return address
+ BL _tx_thread_interrupt_restore // Call _tx_thread_interrupt_restore function
+ LDMFD sp!, {lr} // Recover saved return address
+ BX lr // Return to 16-bit caller
+
+
.text
.align 2
-@/**************************************************************************/
-@/* */
-@/* FUNCTION RELEASE */
-@/* */
-@/* _tx_thread_interrupt_restore Cortex-A9/AC6 */
-@/* 6.1 */
-@/* AUTHOR */
-@/* */
-@/* William E. Lamie, Microsoft Corporation */
-@/* */
-@/* DESCRIPTION */
-@/* */
-@/* This function is responsible for restoring interrupts to the state */
-@/* returned by a previous _tx_thread_interrupt_disable call. */
-@/* */
-@/* INPUT */
-@/* */
-@/* old_posture Old interrupt lockout posture */
-@/* */
-@/* OUTPUT */
-@/* */
-@/* None */
-@/* */
-@/* CALLS */
-@/* */
-@/* None */
-@/* */
-@/* CALLED BY */
-@/* */
-@/* Application Code */
-@/* */
-@/* RELEASE HISTORY */
-@/* */
-@/* DATE NAME DESCRIPTION */
-@/* */
-@/* 09-30-2020 William E. Lamie Initial Version 6.1 */
-@/* */
-@/**************************************************************************/
-@UINT _tx_thread_interrupt_restore(UINT old_posture)
-@{
+/**************************************************************************/
+/* */
+/* FUNCTION RELEASE */
+/* */
+/* _tx_thread_interrupt_restore ARMv7-A */
+/* 6.1.11 */
+/* AUTHOR */
+/* */
+/* William E. Lamie, Microsoft Corporation */
+/* */
+/* DESCRIPTION */
+/* */
+/* This function is responsible for restoring interrupts to the state */
+/* returned by a previous _tx_thread_interrupt_disable call. */
+/* */
+/* INPUT */
+/* */
+/* old_posture Old interrupt lockout posture */
+/* */
+/* OUTPUT */
+/* */
+/* None */
+/* */
+/* CALLS */
+/* */
+/* None */
+/* */
+/* CALLED BY */
+/* */
+/* Application Code */
+/* */
+/* RELEASE HISTORY */
+/* */
+/* DATE NAME DESCRIPTION */
+/* */
+/* 09-30-2020 William E. Lamie Initial Version 6.1 */
+/* 04-25-2022 Zhen Kong Updated comments, */
+/* resulting in version 6.1.11 */
+/* */
+/**************************************************************************/
.global _tx_thread_interrupt_restore
.type _tx_thread_interrupt_restore,function
_tx_thread_interrupt_restore:
-@
-@ /* Apply the new interrupt posture. */
-@
- MSR CPSR_c, r0 @ Setup new CPSR
-#ifdef __THUMB_INTERWORK
- BX lr @ Return to caller
-#else
- MOV pc, lr @ Return to caller
-#endif
-@}
+ /* Apply the new interrupt posture. */
+
+ MSR CPSR_c, r0 // Setup new CPSR
+#ifdef __THUMB_INTERWORK
+ BX lr // Return to caller
+#else
+ MOV pc, lr // Return to caller
+#endif
diff --git a/ports/cortex_a9/ac6/src/tx_thread_irq_nesting_end.S b/ports/cortex_a9/ac6/src/tx_thread_irq_nesting_end.S
index b66fa3ca..ec7e63c6 100644
--- a/ports/cortex_a9/ac6/src/tx_thread_irq_nesting_end.S
+++ b/ports/cortex_a9/ac6/src/tx_thread_irq_nesting_end.S
@@ -1,115 +1,103 @@
-@/**************************************************************************/
-@/* */
-@/* Copyright (c) Microsoft Corporation. All rights reserved. */
-@/* */
-@/* This software is licensed under the Microsoft Software License */
-@/* Terms for Microsoft Azure RTOS. Full text of the license can be */
-@/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
-@/* and in the root directory of this software. */
-@/* */
-@/**************************************************************************/
-@
-@
-@/**************************************************************************/
-@/**************************************************************************/
-@/** */
-@/** ThreadX Component */
-@/** */
-@/** Thread */
-@/** */
-@/**************************************************************************/
-@/**************************************************************************/
-@
-@
-@#define TX_SOURCE_CODE
-@
-@
-@/* Include necessary system files. */
-@
-@#include "tx_api.h"
-@#include "tx_thread.h"
-@
-@
+/**************************************************************************/
+/* */
+/* Copyright (c) Microsoft Corporation. All rights reserved. */
+/* */
+/* This software is licensed under the Microsoft Software License */
+/* Terms for Microsoft Azure RTOS. Full text of the license can be */
+/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
+/* and in the root directory of this software. */
+/* */
+/**************************************************************************/
+
+
+/**************************************************************************/
+/**************************************************************************/
+/** */
+/** ThreadX Component */
+/** */
+/** Thread */
+/** */
+/**************************************************************************/
+/**************************************************************************/
+
#ifdef TX_ENABLE_FIQ_SUPPORT
-DISABLE_INTS = 0xC0 @ Disable IRQ/FIQ interrupts
+DISABLE_INTS = 0xC0 // Disable IRQ/FIQ interrupts
#else
-DISABLE_INTS = 0x80 @ Disable IRQ interrupts
+DISABLE_INTS = 0x80 // Disable IRQ interrupts
#endif
-MODE_MASK = 0x1F @ Mode mask
-IRQ_MODE_BITS = 0x12 @ IRQ mode bits
-@
-@
-@/* No 16-bit Thumb mode veneer code is needed for _tx_thread_irq_nesting_end
-@ since it will never be called 16-bit mode. */
-@
+MODE_MASK = 0x1F // Mode mask
+IRQ_MODE_BITS = 0x12 // IRQ mode bits
+
+
+/* No 16-bit Thumb mode veneer code is needed for _tx_thread_irq_nesting_end
+ since it will never be called 16-bit mode. */
+
.arm
.text
.align 2
-@/**************************************************************************/
-@/* */
-@/* FUNCTION RELEASE */
-@/* */
-@/* _tx_thread_irq_nesting_end Cortex-A9/AC6 */
-@/* 6.1 */
-@/* AUTHOR */
-@/* */
-@/* William E. Lamie, Microsoft Corporation */
-@/* */
-@/* DESCRIPTION */
-@/* */
-@/* This function is called by the application from IRQ mode after */
-@/* _tx_thread_irq_nesting_start has been called and switches the IRQ */
-@/* processing from system mode back to IRQ mode prior to the ISR */
-@/* calling _tx_thread_context_restore. Note that this function */
-@/* assumes the system stack pointer is in the same position after */
-@/* nesting start function was called. */
-@/* */
-@/* This function assumes that the system mode stack pointer was setup */
-@/* during low-level initialization (tx_initialize_low_level.s). */
-@/* */
-@/* This function returns with IRQ interrupts disabled. */
-@/* */
-@/* INPUT */
-@/* */
-@/* None */
-@/* */
-@/* OUTPUT */
-@/* */
-@/* None */
-@/* */
-@/* CALLS */
-@/* */
-@/* None */
-@/* */
-@/* CALLED BY */
-@/* */
-@/* ISRs */
-@/* */
-@/* RELEASE HISTORY */
-@/* */
-@/* DATE NAME DESCRIPTION */
-@/* */
-@/* 09-30-2020 William E. Lamie Initial Version 6.1 */
-@/* */
-@/**************************************************************************/
-@VOID _tx_thread_irq_nesting_end(VOID)
-@{
+/**************************************************************************/
+/* */
+/* FUNCTION RELEASE */
+/* */
+/* _tx_thread_irq_nesting_end ARMv7-A */
+/* 6.1.11 */
+/* AUTHOR */
+/* */
+/* William E. Lamie, Microsoft Corporation */
+/* */
+/* DESCRIPTION */
+/* */
+/* This function is called by the application from IRQ mode after */
+/* _tx_thread_irq_nesting_start has been called and switches the IRQ */
+/* processing from system mode back to IRQ mode prior to the ISR */
+/* calling _tx_thread_context_restore. Note that this function */
+/* assumes the system stack pointer is in the same position after */
+/* nesting start function was called. */
+/* */
+/* This function assumes that the system mode stack pointer was setup */
+/* during low-level initialization (tx_initialize_low_level.s). */
+/* */
+/* This function returns with IRQ interrupts disabled. */
+/* */
+/* INPUT */
+/* */
+/* None */
+/* */
+/* OUTPUT */
+/* */
+/* None */
+/* */
+/* CALLS */
+/* */
+/* None */
+/* */
+/* CALLED BY */
+/* */
+/* ISRs */
+/* */
+/* RELEASE HISTORY */
+/* */
+/* DATE NAME DESCRIPTION */
+/* */
+/* 09-30-2020 William E. Lamie Initial Version 6.1 */
+/* 04-25-2022 Zhen Kong Updated comments, */
+/* resulting in version 6.1.11 */
+/* */
+/**************************************************************************/
.global _tx_thread_irq_nesting_end
.type _tx_thread_irq_nesting_end,function
_tx_thread_irq_nesting_end:
- MOV r3,lr @ Save ISR return address
- MRS r0, CPSR @ Pickup the CPSR
- ORR r0, r0, #DISABLE_INTS @ Build disable interrupt value
- MSR CPSR_c, r0 @ Disable interrupts
- LDMIA sp!, {r1, lr} @ Pickup saved lr (and r1 throw-away for
- @ 8-byte alignment logic)
- BIC r0, r0, #MODE_MASK @ Clear mode bits
- ORR r0, r0, #IRQ_MODE_BITS @ Build IRQ mode CPSR
- MSR CPSR_c, r0 @ Reenter IRQ mode
+ MOV r3,lr // Save ISR return address
+ MRS r0, CPSR // Pickup the CPSR
+ ORR r0, r0, #DISABLE_INTS // Build disable interrupt value
+ MSR CPSR_c, r0 // Disable interrupts
+ LDMIA sp!, {r1, lr} // Pickup saved lr (and r1 throw-away for
+ // 8-byte alignment logic)
+ BIC r0, r0, #MODE_MASK // Clear mode bits
+ ORR r0, r0, #IRQ_MODE_BITS // Build IRQ mode CPSR
+ MSR CPSR_c, r0 // Reenter IRQ mode
#ifdef __THUMB_INTERWORK
- BX r3 @ Return to caller
+ BX r3 // Return to caller
#else
- MOV pc, r3 @ Return to caller
+ MOV pc, r3 // Return to caller
#endif
-@}
-
diff --git a/ports/cortex_a9/ac6/src/tx_thread_irq_nesting_start.S b/ports/cortex_a9/ac6/src/tx_thread_irq_nesting_start.S
index e864d867..c69976ed 100644
--- a/ports/cortex_a9/ac6/src/tx_thread_irq_nesting_start.S
+++ b/ports/cortex_a9/ac6/src/tx_thread_irq_nesting_start.S
@@ -1,108 +1,96 @@
-@/**************************************************************************/
-@/* */
-@/* Copyright (c) Microsoft Corporation. All rights reserved. */
-@/* */
-@/* This software is licensed under the Microsoft Software License */
-@/* Terms for Microsoft Azure RTOS. Full text of the license can be */
-@/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
-@/* and in the root directory of this software. */
-@/* */
-@/**************************************************************************/
-@
-@
-@/**************************************************************************/
-@/**************************************************************************/
-@/** */
-@/** ThreadX Component */
-@/** */
-@/** Thread */
-@/** */
-@/**************************************************************************/
-@/**************************************************************************/
-@
-@
-@#define TX_SOURCE_CODE
-@
-@
-@/* Include necessary system files. */
-@
-@#include "tx_api.h"
-@#include "tx_thread.h"
-@
-@
-IRQ_DISABLE = 0x80 @ IRQ disable bit
-MODE_MASK = 0x1F @ Mode mask
-SYS_MODE_BITS = 0x1F @ System mode bits
-@
-@
-@/* No 16-bit Thumb mode veneer code is needed for _tx_thread_irq_nesting_start
-@ since it will never be called 16-bit mode. */
-@
+/**************************************************************************/
+/* */
+/* Copyright (c) Microsoft Corporation. All rights reserved. */
+/* */
+/* This software is licensed under the Microsoft Software License */
+/* Terms for Microsoft Azure RTOS. Full text of the license can be */
+/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
+/* and in the root directory of this software. */
+/* */
+/**************************************************************************/
+
+
+/**************************************************************************/
+/**************************************************************************/
+/** */
+/** ThreadX Component */
+/** */
+/** Thread */
+/** */
+/**************************************************************************/
+/**************************************************************************/
+
+IRQ_DISABLE = 0x80 // IRQ disable bit
+MODE_MASK = 0x1F // Mode mask
+SYS_MODE_BITS = 0x1F // System mode bits
+
+
+/* No 16-bit Thumb mode veneer code is needed for _tx_thread_irq_nesting_start
+ since it will never be called 16-bit mode. */
+
.arm
.text
.align 2
-@/**************************************************************************/
-@/* */
-@/* FUNCTION RELEASE */
-@/* */
-@/* _tx_thread_irq_nesting_start Cortex-A9/AC6 */
-@/* 6.1 */
-@/* AUTHOR */
-@/* */
-@/* William E. Lamie, Microsoft Corporation */
-@/* */
-@/* DESCRIPTION */
-@/* */
-@/* This function is called by the application from IRQ mode after */
-@/* _tx_thread_context_save has been called and switches the IRQ */
-@/* processing to the system mode so nested IRQ interrupt processing */
-@/* is possible (system mode has its own "lr" register). Note that */
-@/* this function assumes that the system mode stack pointer was setup */
-@/* during low-level initialization (tx_initialize_low_level.s). */
-@/* */
-@/* This function returns with IRQ interrupts enabled. */
-@/* */
-@/* INPUT */
-@/* */
-@/* None */
-@/* */
-@/* OUTPUT */
-@/* */
-@/* None */
-@/* */
-@/* CALLS */
-@/* */
-@/* None */
-@/* */
-@/* CALLED BY */
-@/* */
-@/* ISRs */
-@/* */
-@/* RELEASE HISTORY */
-@/* */
-@/* DATE NAME DESCRIPTION */
-@/* */
-@/* 09-30-2020 William E. Lamie Initial Version 6.1 */
-@/* */
-@/**************************************************************************/
-@VOID _tx_thread_irq_nesting_start(VOID)
-@{
+/**************************************************************************/
+/* */
+/* FUNCTION RELEASE */
+/* */
+/* _tx_thread_irq_nesting_start ARMv7-A */
+/* 6.1.11 */
+/* AUTHOR */
+/* */
+/* William E. Lamie, Microsoft Corporation */
+/* */
+/* DESCRIPTION */
+/* */
+/* This function is called by the application from IRQ mode after */
+/* _tx_thread_context_save has been called and switches the IRQ */
+/* processing to the system mode so nested IRQ interrupt processing */
+/* is possible (system mode has its own "lr" register). Note that */
+/* this function assumes that the system mode stack pointer was setup */
+/* during low-level initialization (tx_initialize_low_level.s). */
+/* */
+/* This function returns with IRQ interrupts enabled. */
+/* */
+/* INPUT */
+/* */
+/* None */
+/* */
+/* OUTPUT */
+/* */
+/* None */
+/* */
+/* CALLS */
+/* */
+/* None */
+/* */
+/* CALLED BY */
+/* */
+/* ISRs */
+/* */
+/* RELEASE HISTORY */
+/* */
+/* DATE NAME DESCRIPTION */
+/* */
+/* 09-30-2020 William E. Lamie Initial Version 6.1 */
+/* 04-25-2022 Zhen Kong Updated comments, */
+/* resulting in version 6.1.11 */
+/* */
+/**************************************************************************/
.global _tx_thread_irq_nesting_start
.type _tx_thread_irq_nesting_start,function
_tx_thread_irq_nesting_start:
- MOV r3,lr @ Save ISR return address
- MRS r0, CPSR @ Pickup the CPSR
- BIC r0, r0, #MODE_MASK @ Clear the mode bits
- ORR r0, r0, #SYS_MODE_BITS @ Build system mode CPSR
- MSR CPSR_c, r0 @ Enter system mode
- STMDB sp!, {r1, lr} @ Push the system mode lr on the system mode stack
- @ and push r1 just to keep 8-byte alignment
- BIC r0, r0, #IRQ_DISABLE @ Build enable IRQ CPSR
- MSR CPSR_c, r0 @ Enter system mode
+ MOV r3,lr // Save ISR return address
+ MRS r0, CPSR // Pickup the CPSR
+ BIC r0, r0, #MODE_MASK // Clear the mode bits
+ ORR r0, r0, #SYS_MODE_BITS // Build system mode CPSR
+ MSR CPSR_c, r0 // Enter system mode
+ STMDB sp!, {r1, lr} // Push the system mode lr on the system mode stack
+ // and push r1 just to keep 8-byte alignment
+ BIC r0, r0, #IRQ_DISABLE // Build enable IRQ CPSR
+ MSR CPSR_c, r0 // Enter system mode
#ifdef __THUMB_INTERWORK
- BX r3 @ Return to caller
+ BX r3 // Return to caller
#else
- MOV pc, r3 @ Return to caller
+ MOV pc, r3 // Return to caller
#endif
-@}
-
diff --git a/ports/cortex_a9/ac6/src/tx_thread_schedule.S b/ports/cortex_a9/ac6/src/tx_thread_schedule.S
index 680ff082..8330e9df 100644
--- a/ports/cortex_a9/ac6/src/tx_thread_schedule.S
+++ b/ports/cortex_a9/ac6/src/tx_thread_schedule.S
@@ -1,257 +1,230 @@
-@/**************************************************************************/
-@/* */
-@/* Copyright (c) Microsoft Corporation. All rights reserved. */
-@/* */
-@/* This software is licensed under the Microsoft Software License */
-@/* Terms for Microsoft Azure RTOS. Full text of the license can be */
-@/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
-@/* and in the root directory of this software. */
-@/* */
-@/**************************************************************************/
-@
-@
-@/**************************************************************************/
-@/**************************************************************************/
-@/** */
-@/** ThreadX Component */
-@/** */
-@/** Thread */
-@/** */
-@/**************************************************************************/
-@/**************************************************************************/
-@
-@
-@#define TX_SOURCE_CODE
-@
-@
-@/* Include necessary system files. */
-@
-@#include "tx_api.h"
-@#include "tx_thread.h"
-@#include "tx_timer.h"
-@
-@
+/**************************************************************************/
+/* */
+/* Copyright (c) Microsoft Corporation. All rights reserved. */
+/* */
+/* This software is licensed under the Microsoft Software License */
+/* Terms for Microsoft Azure RTOS. Full text of the license can be */
+/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
+/* and in the root directory of this software. */
+/* */
+/**************************************************************************/
+
+
+/**************************************************************************/
+/**************************************************************************/
+/** */
+/** ThreadX Component */
+/** */
+/** Thread */
+/** */
+/**************************************************************************/
+/**************************************************************************/
+
.global _tx_thread_execute_ptr
.global _tx_thread_current_ptr
.global _tx_timer_time_slice
-@
-@
-@/* Define the 16-bit Thumb mode veneer for _tx_thread_schedule for
-@ applications calling this function from to 16-bit Thumb mode. */
-@
+
+
+/* Define the 16-bit Thumb mode veneer for _tx_thread_schedule for
+ applications calling this function from to 16-bit Thumb mode. */
+
.text
.align 2
.global $_tx_thread_schedule
.type $_tx_thread_schedule,function
$_tx_thread_schedule:
.thumb
- BX pc @ Switch to 32-bit mode
- NOP @
+ BX pc // Switch to 32-bit mode
+ NOP //
.arm
- STMFD sp!, {lr} @ Save return address
- BL _tx_thread_schedule @ Call _tx_thread_schedule function
- LDMFD sp!, {lr} @ Recover saved return address
- BX lr @ Return to 16-bit caller
-@
-@
+ STMFD sp!, {lr} // Save return address
+ BL _tx_thread_schedule // Call _tx_thread_schedule function
+ LDMFD sp!, {lr} // Recover saved return address
+ BX lr // Return to 16-bit caller
+
+
.text
.align 2
-@/**************************************************************************/
-@/* */
-@/* FUNCTION RELEASE */
-@/* */
-@/* _tx_thread_schedule Cortex-A9/AC6 */
-@/* 6.1.9 */
-@/* AUTHOR */
-@/* */
-@/* William E. Lamie, Microsoft Corporation */
-@/* */
-@/* DESCRIPTION */
-@/* */
-@/* This function waits for a thread control block pointer to appear in */
-@/* the _tx_thread_execute_ptr variable. Once a thread pointer appears */
-@/* in the variable, the corresponding thread is resumed. */
-@/* */
-@/* INPUT */
-@/* */
-@/* None */
-@/* */
-@/* OUTPUT */
-@/* */
-@/* None */
-@/* */
-@/* CALLS */
-@/* */
-@/* None */
-@/* */
-@/* CALLED BY */
-@/* */
-@/* _tx_initialize_kernel_enter ThreadX entry function */
-@/* _tx_thread_system_return Return to system from thread */
-@/* _tx_thread_context_restore Restore thread's context */
-@/* */
-@/* RELEASE HISTORY */
-@/* */
-@/* DATE NAME DESCRIPTION */
-@/* */
-@/* 09-30-2020 William E. Lamie Initial Version 6.1 */
-@/* 10-15-2021 William E. Lamie Modified comment(s), added */
-@/* execution profile support, */
-@/* resulting in version 6.1.9 */
-@/* */
-@/**************************************************************************/
-@VOID _tx_thread_schedule(VOID)
-@{
+/**************************************************************************/
+/* */
+/* FUNCTION RELEASE */
+/* */
+/* _tx_thread_schedule ARMv7-A */
+/* 6.1.11 */
+/* AUTHOR */
+/* */
+/* William E. Lamie, Microsoft Corporation */
+/* */
+/* DESCRIPTION */
+/* */
+/* This function waits for a thread control block pointer to appear in */
+/* the _tx_thread_execute_ptr variable. Once a thread pointer appears */
+/* in the variable, the corresponding thread is resumed. */
+/* */
+/* INPUT */
+/* */
+/* None */
+/* */
+/* OUTPUT */
+/* */
+/* None */
+/* */
+/* CALLS */
+/* */
+/* None */
+/* */
+/* CALLED BY */
+/* */
+/* _tx_initialize_kernel_enter ThreadX entry function */
+/* _tx_thread_system_return Return to system from thread */
+/* _tx_thread_context_restore Restore thread's context */
+/* */
+/* RELEASE HISTORY */
+/* */
+/* DATE NAME DESCRIPTION */
+/* */
+/* 09-30-2020 William E. Lamie Initial Version 6.1 */
+/* 10-15-2021 William E. Lamie Modified comment(s), added */
+/* execution profile support, */
+/* resulting in version 6.1.9 */
+/* 04-25-2022 Zhen Kong Updated comments, */
+/* resulting in version 6.1.11 */
+/* */
+/**************************************************************************/
.global _tx_thread_schedule
.type _tx_thread_schedule,function
_tx_thread_schedule:
-@
-@ /* Enable interrupts. */
-@
+
+ /* Enable interrupts. */
+
#ifdef TX_ENABLE_FIQ_SUPPORT
- CPSIE if @ Enable IRQ and FIQ interrupts
+ CPSIE if // Enable IRQ and FIQ interrupts
#else
- CPSIE i @ Enable IRQ interrupts
+ CPSIE i // Enable IRQ interrupts
#endif
-@
-@ /* Wait for a thread to execute. */
-@ do
-@ {
- LDR r1, =_tx_thread_execute_ptr @ Address of thread execute ptr
-@
+
+ /* Wait for a thread to execute. */
+ LDR r1, =_tx_thread_execute_ptr // Address of thread execute ptr
+
__tx_thread_schedule_loop:
-@
- LDR r0, [r1] @ Pickup next thread to execute
- CMP r0, #0 @ Is it NULL?
- BEQ __tx_thread_schedule_loop @ If so, keep looking for a thread
-@
-@ }
-@ while(_tx_thread_execute_ptr == TX_NULL);
-@
-@ /* Yes! We have a thread to execute. Lockout interrupts and
-@ transfer control to it. */
-@
+
+ LDR r0, [r1] // Pickup next thread to execute
+ CMP r0, #0 // Is it NULL?
+ BEQ __tx_thread_schedule_loop // If so, keep looking for a thread
+ /* Yes! We have a thread to execute. Lockout interrupts and
+ transfer control to it. */
+
#ifdef TX_ENABLE_FIQ_SUPPORT
- CPSID if @ Disable IRQ and FIQ interrupts
+ CPSID if // Disable IRQ and FIQ interrupts
#else
- CPSID i @ Disable IRQ interrupts
+ CPSID i // Disable IRQ interrupts
#endif
-@
-@ /* Setup the current thread pointer. */
-@ _tx_thread_current_ptr = _tx_thread_execute_ptr;
-@
- LDR r1, =_tx_thread_current_ptr @ Pickup address of current thread
- STR r0, [r1] @ Setup current thread pointer
-@
-@ /* Increment the run count for this thread. */
-@ _tx_thread_current_ptr -> tx_thread_run_count++;
-@
- LDR r2, [r0, #4] @ Pickup run counter
- LDR r3, [r0, #24] @ Pickup time-slice for this thread
- ADD r2, r2, #1 @ Increment thread run-counter
- STR r2, [r0, #4] @ Store the new run counter
-@
-@ /* Setup time-slice, if present. */
-@ _tx_timer_time_slice = _tx_thread_current_ptr -> tx_thread_time_slice;
-@
- LDR r2, =_tx_timer_time_slice @ Pickup address of time-slice
- @ variable
- LDR sp, [r0, #8] @ Switch stack pointers
- STR r3, [r2] @ Setup time-slice
-@
-@ /* Switch to the thread's stack. */
-@ sp = _tx_thread_execute_ptr -> tx_thread_stack_ptr;
-@
+
+ /* Setup the current thread pointer. */
+
+ LDR r1, =_tx_thread_current_ptr // Pickup address of current thread
+ STR r0, [r1] // Setup current thread pointer
+
+ /* Increment the run count for this thread. */
+
+ LDR r2, [r0, #4] // Pickup run counter
+ LDR r3, [r0, #24] // Pickup time-slice for this thread
+ ADD r2, r2, #1 // Increment thread run-counter
+ STR r2, [r0, #4] // Store the new run counter
+
+ /* Setup time-slice, if present. */
+
+ LDR r2, =_tx_timer_time_slice // Pickup address of time-slice
+ // variable
+ LDR sp, [r0, #8] // Switch stack pointers
+ STR r3, [r2] // Setup time-slice
+
#if (defined(TX_ENABLE_EXECUTION_CHANGE_NOTIFY) || defined(TX_EXECUTION_PROFILE_ENABLE))
-@
-@ /* Call the thread entry function to indicate the thread is executing. */
-@
- MOV r5, r0 @ Save r0
- BL _tx_execution_thread_enter @ Call the thread execution enter function
- MOV r0, r5 @ Restore r0
+
+ /* Call the thread entry function to indicate the thread is executing. */
+
+ MOV r5, r0 // Save r0
+ BL _tx_execution_thread_enter // Call the thread execution enter function
+ MOV r0, r5 // Restore r0
#endif
-@
-@ /* Determine if an interrupt frame or a synchronous task suspension frame
-@ is present. */
-@
- LDMIA sp!, {r4, r5} @ Pickup the stack type and saved CPSR
- CMP r4, #0 @ Check for synchronous context switch
+
+ /* Determine if an interrupt frame or a synchronous task suspension frame
+ is present. */
+
+ LDMIA sp!, {r4, r5} // Pickup the stack type and saved CPSR
+ CMP r4, #0 // Check for synchronous context switch
BEQ _tx_solicited_return
- MSR SPSR_cxsf, r5 @ Setup SPSR for return
+ MSR SPSR_cxsf, r5 // Setup SPSR for return
#ifdef TX_ENABLE_VFP_SUPPORT
- LDR r1, [r0, #144] @ Pickup the VFP enabled flag
- CMP r1, #0 @ Is the VFP enabled?
- BEQ _tx_skip_interrupt_vfp_restore @ No, skip VFP interrupt restore
- VLDMIA sp!, {D0-D15} @ Recover D0-D15
- VLDMIA sp!, {D16-D31} @ Recover D16-D31
- LDR r4, [sp], #4 @ Pickup FPSCR
- VMSR FPSCR, r4 @ Restore FPSCR
+ LDR r1, [r0, #144] // Pickup the VFP enabled flag
+ CMP r1, #0 // Is the VFP enabled?
+ BEQ _tx_skip_interrupt_vfp_restore // No, skip VFP interrupt restore
+ VLDMIA sp!, {D0-D15} // Recover D0-D15
+ VLDMIA sp!, {D16-D31} // Recover D16-D31
+ LDR r4, [sp], #4 // Pickup FPSCR
+ VMSR FPSCR, r4 // Restore FPSCR
_tx_skip_interrupt_vfp_restore:
#endif
- LDMIA sp!, {r0-r12, lr, pc}^ @ Return to point of thread interrupt
+ LDMIA sp!, {r0-r12, lr, pc}^ // Return to point of thread interrupt
_tx_solicited_return:
#ifdef TX_ENABLE_VFP_SUPPORT
- LDR r1, [r0, #144] @ Pickup the VFP enabled flag
- CMP r1, #0 @ Is the VFP enabled?
- BEQ _tx_skip_solicited_vfp_restore @ No, skip VFP solicited restore
- VLDMIA sp!, {D8-D15} @ Recover D8-D15
- VLDMIA sp!, {D16-D31} @ Recover D16-D31
- LDR r4, [sp], #4 @ Pickup FPSCR
- VMSR FPSCR, r4 @ Restore FPSCR
+ LDR r1, [r0, #144] // Pickup the VFP enabled flag
+ CMP r1, #0 // Is the VFP enabled?
+ BEQ _tx_skip_solicited_vfp_restore // No, skip VFP solicited restore
+ VLDMIA sp!, {D8-D15} // Recover D8-D15
+ VLDMIA sp!, {D16-D31} // Recover D16-D31
+ LDR r4, [sp], #4 // Pickup FPSCR
+ VMSR FPSCR, r4 // Restore FPSCR
_tx_skip_solicited_vfp_restore:
#endif
- MSR CPSR_cxsf, r5 @ Recover CPSR
- LDMIA sp!, {r4-r11, lr} @ Return to thread synchronously
+ MSR CPSR_cxsf, r5 // Recover CPSR
+ LDMIA sp!, {r4-r11, lr} // Return to thread synchronously
#ifdef __THUMB_INTERWORK
- BX lr @ Return to caller
+ BX lr // Return to caller
#else
- MOV pc, lr @ Return to caller
+ MOV pc, lr // Return to caller
#endif
-@
-@}
-@
#ifdef TX_ENABLE_VFP_SUPPORT
.global tx_thread_vfp_enable
.type tx_thread_vfp_enable,function
tx_thread_vfp_enable:
- MRS r2, CPSR @ Pickup the CPSR
+ MRS r2, CPSR // Pickup the CPSR
#ifdef TX_ENABLE_FIQ_SUPPORT
- CPSID if @ Enable IRQ and FIQ interrupts
+ CPSID if // Enable IRQ and FIQ interrupts
#else
- CPSID i @ Enable IRQ interrupts
+ CPSID i // Enable IRQ interrupts
#endif
- LDR r0, =_tx_thread_current_ptr @ Build current thread pointer address
- LDR r1, [r0] @ Pickup current thread pointer
- CMP r1, #0 @ Check for NULL thread pointer
- BEQ __tx_no_thread_to_enable @ If NULL, skip VFP enable
- MOV r0, #1 @ Build enable value
- STR r0, [r1, #144] @ Set the VFP enable flag (tx_thread_vfp_enable field in TX_THREAD)
+ LDR r0, =_tx_thread_current_ptr // Build current thread pointer address
+ LDR r1, [r0] // Pickup current thread pointer
+ CMP r1, #0 // Check for NULL thread pointer
+ BEQ __tx_no_thread_to_enable // If NULL, skip VFP enable
+ MOV r0, #1 // Build enable value
+ STR r0, [r1, #144] // Set the VFP enable flag (tx_thread_vfp_enable field in TX_THREAD)
__tx_no_thread_to_enable:
- MSR CPSR_cxsf, r2 @ Recover CPSR
- BX LR @ Return to caller
+ MSR CPSR_cxsf, r2 // Recover CPSR
+ BX LR // Return to caller
.global tx_thread_vfp_disable
.type tx_thread_vfp_disable,function
tx_thread_vfp_disable:
- MRS r2, CPSR @ Pickup the CPSR
+ MRS r2, CPSR // Pickup the CPSR
#ifdef TX_ENABLE_FIQ_SUPPORT
- CPSID if @ Enable IRQ and FIQ interrupts
+ CPSID if // Enable IRQ and FIQ interrupts
#else
- CPSID i @ Enable IRQ interrupts
+ CPSID i // Enable IRQ interrupts
#endif
- LDR r0, =_tx_thread_current_ptr @ Build current thread pointer address
- LDR r1, [r0] @ Pickup current thread pointer
- CMP r1, #0 @ Check for NULL thread pointer
- BEQ __tx_no_thread_to_disable @ If NULL, skip VFP disable
- MOV r0, #0 @ Build disable value
- STR r0, [r1, #144] @ Clear the VFP enable flag (tx_thread_vfp_enable field in TX_THREAD)
+ LDR r0, =_tx_thread_current_ptr // Build current thread pointer address
+ LDR r1, [r0] // Pickup current thread pointer
+ CMP r1, #0 // Check for NULL thread pointer
+ BEQ __tx_no_thread_to_disable // If NULL, skip VFP disable
+ MOV r0, #0 // Build disable value
+ STR r0, [r1, #144] // Clear the VFP enable flag (tx_thread_vfp_enable field in TX_THREAD)
__tx_no_thread_to_disable:
- MSR CPSR_cxsf, r2 @ Recover CPSR
- BX LR @ Return to caller
+ MSR CPSR_cxsf, r2 // Recover CPSR
+ BX LR // Return to caller
#endif
-
diff --git a/ports/cortex_a9/ac6/src/tx_thread_stack_build.S b/ports/cortex_a9/ac6/src/tx_thread_stack_build.S
index 3adf5080..f413e673 100644
--- a/ports/cortex_a9/ac6/src/tx_thread_stack_build.S
+++ b/ports/cortex_a9/ac6/src/tx_thread_stack_build.S
@@ -1,178 +1,164 @@
-@/**************************************************************************/
-@/* */
-@/* Copyright (c) Microsoft Corporation. All rights reserved. */
-@/* */
-@/* This software is licensed under the Microsoft Software License */
-@/* Terms for Microsoft Azure RTOS. Full text of the license can be */
-@/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
-@/* and in the root directory of this software. */
-@/* */
-@/**************************************************************************/
-@
-@
-@/**************************************************************************/
-@/**************************************************************************/
-@/** */
-@/** ThreadX Component */
-@/** */
-@/** Thread */
-@/** */
-@/**************************************************************************/
-@/**************************************************************************/
-@
-@
-@#define TX_SOURCE_CODE
-@
-@
-@/* Include necessary system files. */
-@
-@#include "tx_api.h"
-@#include "tx_thread.h"
-@
+/**************************************************************************/
+/* */
+/* Copyright (c) Microsoft Corporation. All rights reserved. */
+/* */
+/* This software is licensed under the Microsoft Software License */
+/* Terms for Microsoft Azure RTOS. Full text of the license can be */
+/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
+/* and in the root directory of this software. */
+/* */
+/**************************************************************************/
+
+
+/**************************************************************************/
+/**************************************************************************/
+/** */
+/** ThreadX Component */
+/** */
+/** Thread */
+/** */
+/**************************************************************************/
+/**************************************************************************/
.arm
-SVC_MODE = 0x13 @ SVC mode
+SVC_MODE = 0x13 // SVC mode
#ifdef TX_ENABLE_FIQ_SUPPORT
-CPSR_MASK = 0xDF @ Mask initial CPSR, IRQ & FIQ interrupts enabled
+CPSR_MASK = 0xDF // Mask initial CPSR, IRQ & FIQ interrupts enabled
#else
-CPSR_MASK = 0x9F @ Mask initial CPSR, IRQ interrupts enabled
+CPSR_MASK = 0x9F // Mask initial CPSR, IRQ interrupts enabled
#endif
-@
-@
-@/* Define the 16-bit Thumb mode veneer for _tx_thread_stack_build for
-@ applications calling this function from to 16-bit Thumb mode. */
-@
+
+
+/* Define the 16-bit Thumb mode veneer for _tx_thread_stack_build for
+ applications calling this function from to 16-bit Thumb mode. */
+
.text
.align 2
.thumb
.global $_tx_thread_stack_build
.type $_tx_thread_stack_build,function
$_tx_thread_stack_build:
- BX pc @ Switch to 32-bit mode
- NOP @
+ BX pc // Switch to 32-bit mode
+ NOP //
.arm
- STMFD sp!, {lr} @ Save return address
- BL _tx_thread_stack_build @ Call _tx_thread_stack_build function
- LDMFD sp!, {lr} @ Recover saved return address
- BX lr @ Return to 16-bit caller
-@
-@
+ STMFD sp!, {lr} // Save return address
+ BL _tx_thread_stack_build // Call _tx_thread_stack_build function
+ LDMFD sp!, {lr} // Recover saved return address
+ BX lr // Return to 16-bit caller
+
+
.text
.align 2
-@/**************************************************************************/
-@/* */
-@/* FUNCTION RELEASE */
-@/* */
-@/* _tx_thread_stack_build Cortex-A9/AC6 */
-@/* 6.1 */
-@/* AUTHOR */
-@/* */
-@/* William E. Lamie, Microsoft Corporation */
-@/* */
-@/* DESCRIPTION */
-@/* */
-@/* This function builds a stack frame on the supplied thread's stack. */
-@/* The stack frame results in a fake interrupt return to the supplied */
-@/* function pointer. */
-@/* */
-@/* INPUT */
-@/* */
-@/* thread_ptr Pointer to thread control blk */
-@/* function_ptr Pointer to return function */
-@/* */
-@/* OUTPUT */
-@/* */
-@/* None */
-@/* */
-@/* CALLS */
-@/* */
-@/* None */
-@/* */
-@/* CALLED BY */
-@/* */
-@/* _tx_thread_create Create thread service */
-@/* */
-@/* RELEASE HISTORY */
-@/* */
-@/* DATE NAME DESCRIPTION */
-@/* */
-@/* 09-30-2020 William E. Lamie Initial Version 6.1 */
-@/* */
-@/**************************************************************************/
-@VOID _tx_thread_stack_build(TX_THREAD *thread_ptr, VOID (*function_ptr)(VOID))
-@{
+/**************************************************************************/
+/* */
+/* FUNCTION RELEASE */
+/* */
+/* _tx_thread_stack_build ARMv7-A */
+/* 6.1.11 */
+/* AUTHOR */
+/* */
+/* William E. Lamie, Microsoft Corporation */
+/* */
+/* DESCRIPTION */
+/* */
+/* This function builds a stack frame on the supplied thread's stack. */
+/* The stack frame results in a fake interrupt return to the supplied */
+/* function pointer. */
+/* */
+/* INPUT */
+/* */
+/* thread_ptr Pointer to thread control blk */
+/* function_ptr Pointer to return function */
+/* */
+/* OUTPUT */
+/* */
+/* None */
+/* */
+/* CALLS */
+/* */
+/* None */
+/* */
+/* CALLED BY */
+/* */
+/* _tx_thread_create Create thread service */
+/* */
+/* RELEASE HISTORY */
+/* */
+/* DATE NAME DESCRIPTION */
+/* */
+/* 09-30-2020 William E. Lamie Initial Version 6.1 */
+/* 04-25-2022 Zhen Kong Updated comments, */
+/* resulting in version 6.1.11 */
+/* */
+/**************************************************************************/
.global _tx_thread_stack_build
.type _tx_thread_stack_build,function
_tx_thread_stack_build:
-@
-@
-@ /* Build a fake interrupt frame. The form of the fake interrupt stack
-@ on the Cortex-A9 should look like the following after it is built:
-@
-@ Stack Top: 1 Interrupt stack frame type
-@ CPSR Initial value for CPSR
-@ a1 (r0) Initial value for a1
-@ a2 (r1) Initial value for a2
-@ a3 (r2) Initial value for a3
-@ a4 (r3) Initial value for a4
-@ v1 (r4) Initial value for v1
-@ v2 (r5) Initial value for v2
-@ v3 (r6) Initial value for v3
-@ v4 (r7) Initial value for v4
-@ v5 (r8) Initial value for v5
-@ sb (r9) Initial value for sb
-@ sl (r10) Initial value for sl
-@ fp (r11) Initial value for fp
-@ ip (r12) Initial value for ip
-@ lr (r14) Initial value for lr
-@ pc (r15) Initial value for pc
-@ 0 For stack backtracing
-@
-@ Stack Bottom: (higher memory address) */
-@
- LDR r2, [r0, #16] @ Pickup end of stack area
- BIC r2, r2, #7 @ Ensure 8-byte alignment
- SUB r2, r2, #76 @ Allocate space for the stack frame
-@
-@ /* Actually build the stack frame. */
-@
- MOV r3, #1 @ Build interrupt stack type
- STR r3, [r2, #0] @ Store stack type
- MOV r3, #0 @ Build initial register value
- STR r3, [r2, #8] @ Store initial r0
- STR r3, [r2, #12] @ Store initial r1
- STR r3, [r2, #16] @ Store initial r2
- STR r3, [r2, #20] @ Store initial r3
- STR r3, [r2, #24] @ Store initial r4
- STR r3, [r2, #28] @ Store initial r5
- STR r3, [r2, #32] @ Store initial r6
- STR r3, [r2, #36] @ Store initial r7
- STR r3, [r2, #40] @ Store initial r8
- STR r3, [r2, #44] @ Store initial r9
- LDR r3, [r0, #12] @ Pickup stack starting address
- STR r3, [r2, #48] @ Store initial r10 (sl)
- LDR r3,=_tx_thread_schedule @ Pickup address of _tx_thread_schedule for GDB backtrace
- STR r3, [r2, #60] @ Store initial r14 (lr)
- MOV r3, #0 @ Build initial register value
- STR r3, [r2, #52] @ Store initial r11
- STR r3, [r2, #56] @ Store initial r12
- STR r1, [r2, #64] @ Store initial pc
- STR r3, [r2, #68] @ 0 for back-trace
- MRS r1, CPSR @ Pickup CPSR
- BIC r1, r1, #CPSR_MASK @ Mask mode bits of CPSR
- ORR r3, r1, #SVC_MODE @ Build CPSR, SVC mode, interrupts enabled
- STR r3, [r2, #4] @ Store initial CPSR
-@
-@ /* Setup stack pointer. */
-@ thread_ptr -> tx_thread_stack_ptr = r2;
-@
- STR r2, [r0, #8] @ Save stack pointer in thread's
- @ control block
+
+
+ /* Build a fake interrupt frame. The form of the fake interrupt stack
+ on the ARMv7-A should look like the following after it is built:
+
+ Stack Top: 1 Interrupt stack frame type
+ CPSR Initial value for CPSR
+ a1 (r0) Initial value for a1
+ a2 (r1) Initial value for a2
+ a3 (r2) Initial value for a3
+ a4 (r3) Initial value for a4
+ v1 (r4) Initial value for v1
+ v2 (r5) Initial value for v2
+ v3 (r6) Initial value for v3
+ v4 (r7) Initial value for v4
+ v5 (r8) Initial value for v5
+ sb (r9) Initial value for sb
+ sl (r10) Initial value for sl
+ fp (r11) Initial value for fp
+ ip (r12) Initial value for ip
+ lr (r14) Initial value for lr
+ pc (r15) Initial value for
+ 0 For stack backtracing
+
+ Stack Bottom: (higher memory address) */
+
+ LDR r2, [r0, #16] // Pickup end of stack area
+ BIC r2, r2, #7 // Ensure 8-byte alignment
+ SUB r2, r2, #76 // Allocate space for the stack frame
+
+ /* Actually build the stack frame. */
+
+ MOV r3, #1 // Build interrupt stack type
+ STR r3, [r2, #0] // Store stack type
+ MOV r3, #0 // Build initial register value
+ STR r3, [r2, #8] // Store initial r0
+ STR r3, [r2, #12] // Store initial r1
+ STR r3, [r2, #16] // Store initial r2
+ STR r3, [r2, #20] // Store initial r3
+ STR r3, [r2, #24] // Store initial r4
+ STR r3, [r2, #28] // Store initial r5
+ STR r3, [r2, #32] // Store initial r6
+ STR r3, [r2, #36] // Store initial r7
+ STR r3, [r2, #40] // Store initial r8
+ STR r3, [r2, #44] // Store initial r9
+ LDR r3, [r0, #12] // Pickup stack starting address
+ STR r3, [r2, #48] // Store initial r10 (sl)
+ LDR r3,=_tx_thread_schedule // Pickup address of _tx_thread_schedule for GDB backtrace
+ STR r3, [r2, #60] // Store initial r14 (lr)
+ MOV r3, #0 // Build initial register value
+ STR r3, [r2, #52] // Store initial r11
+ STR r3, [r2, #56] // Store initial r12
+ STR r1, [r2, #64] // Store initial pc
+ STR r3, [r2, #68] // 0 for back-trace
+ MRS r1, CPSR // Pickup CPSR
+ BIC r1, r1, #CPSR_MASK // Mask mode bits of CPSR
+ ORR r3, r1, #SVC_MODE // Build CPSR, SVC mode, interrupts enabled
+ STR r3, [r2, #4] // Store initial CPSR
+
+ /* Setup stack pointer. */
+
+ STR r2, [r0, #8] // Save stack pointer in thread's
+ // control block
#ifdef __THUMB_INTERWORK
- BX lr @ Return to caller
+ BX lr // Return to caller
#else
- MOV pc, lr @ Return to caller
+ MOV pc, lr // Return to caller
#endif
-@}
-
-
diff --git a/ports/cortex_a9/ac6/src/tx_thread_system_return.S b/ports/cortex_a9/ac6/src/tx_thread_system_return.S
index 92669799..cb7d62ce 100644
--- a/ports/cortex_a9/ac6/src/tx_thread_system_return.S
+++ b/ports/cortex_a9/ac6/src/tx_thread_system_return.S
@@ -1,182 +1,162 @@
-@/**************************************************************************/
-@/* */
-@/* Copyright (c) Microsoft Corporation. All rights reserved. */
-@/* */
-@/* This software is licensed under the Microsoft Software License */
-@/* Terms for Microsoft Azure RTOS. Full text of the license can be */
-@/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
-@/* and in the root directory of this software. */
-@/* */
-@/**************************************************************************/
-@
-@
-@/**************************************************************************/
-@/**************************************************************************/
-@/** */
-@/** ThreadX Component */
-@/** */
-@/** Thread */
-@/** */
-@/**************************************************************************/
-@/**************************************************************************/
-@
-@#define TX_SOURCE_CODE
-@
-@
-@/* Include necessary system files. */
-@
-@#include "tx_api.h"
-@#include "tx_thread.h"
-@#include "tx_timer.h"
-@
+/**************************************************************************/
+/* */
+/* Copyright (c) Microsoft Corporation. All rights reserved. */
+/* */
+/* This software is licensed under the Microsoft Software License */
+/* Terms for Microsoft Azure RTOS. Full text of the license can be */
+/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
+/* and in the root directory of this software. */
+/* */
+/**************************************************************************/
+
+
+/**************************************************************************/
+/**************************************************************************/
+/** */
+/** ThreadX Component */
+/** */
+/** Thread */
+/** */
+/**************************************************************************/
+/**************************************************************************/
+
.arm
-@
-@
+
+
.global _tx_thread_current_ptr
.global _tx_timer_time_slice
.global _tx_thread_schedule
-@
-@
-@
-@/* Define the 16-bit Thumb mode veneer for _tx_thread_system_return for
-@ applications calling this function from to 16-bit Thumb mode. */
-@
+
+
+
+/* Define the 16-bit Thumb mode veneer for _tx_thread_system_return for
+ applications calling this function from to 16-bit Thumb mode. */
+
.text
.align 2
.global $_tx_thread_system_return
.type $_tx_thread_system_return,function
$_tx_thread_system_return:
.thumb
- BX pc @ Switch to 32-bit mode
- NOP @
+ BX pc // Switch to 32-bit mode
+ NOP //
.arm
- STMFD sp!, {lr} @ Save return address
- BL _tx_thread_system_return @ Call _tx_thread_system_return function
- LDMFD sp!, {lr} @ Recover saved return address
- BX lr @ Return to 16-bit caller
-@
-@
+ STMFD sp!, {lr} // Save return address
+ BL _tx_thread_system_return // Call _tx_thread_system_return function
+ LDMFD sp!, {lr} // Recover saved return address
+ BX lr // Return to 16-bit caller
+
+
.text
.align 2
-@/**************************************************************************/
-@/* */
-@/* FUNCTION RELEASE */
-@/* */
-@/* _tx_thread_system_return Cortex-A9/AC6 */
-@/* 6.1.9 */
-@/* AUTHOR */
-@/* */
-@/* William E. Lamie, Microsoft Corporation */
-@/* */
-@/* DESCRIPTION */
-@/* */
-@/* This function is target processor specific. It is used to transfer */
-@/* control from a thread back to the ThreadX system. Only a */
-@/* minimal context is saved since the compiler assumes temp registers */
-@/* are going to get slicked by a function call anyway. */
-@/* */
-@/* INPUT */
-@/* */
-@/* None */
-@/* */
-@/* OUTPUT */
-@/* */
-@/* None */
-@/* */
-@/* CALLS */
-@/* */
-@/* _tx_thread_schedule Thread scheduling loop */
-@/* */
-@/* CALLED BY */
-@/* */
-@/* ThreadX components */
-@/* */
-@/* RELEASE HISTORY */
-@/* */
-@/* DATE NAME DESCRIPTION */
-@/* */
-@/* 09-30-2020 William E. Lamie Initial Version 6.1 */
-@/* 10-15-2021 William E. Lamie Modified comment(s), added */
-@/* execution profile support, */
-@/* resulting in version 6.1.9 */
-@/* */
-@/**************************************************************************/
-@VOID _tx_thread_system_return(VOID)
-@{
+/**************************************************************************/
+/* */
+/* FUNCTION RELEASE */
+/* */
+/* _tx_thread_system_return ARMv7-A */
+/* 6.1.11 */
+/* AUTHOR */
+/* */
+/* William E. Lamie, Microsoft Corporation */
+/* */
+/* DESCRIPTION */
+/* */
+/* This function is target processor specific. It is used to transfer */
+/* control from a thread back to the ThreadX system. Only a */
+/* minimal context is saved since the compiler assumes temp registers */
+/* are going to get slicked by a function call anyway. */
+/* */
+/* INPUT */
+/* */
+/* None */
+/* */
+/* OUTPUT */
+/* */
+/* None */
+/* */
+/* CALLS */
+/* */
+/* _tx_thread_schedule Thread scheduling loop */
+/* */
+/* CALLED BY */
+/* */
+/* ThreadX components */
+/* */
+/* RELEASE HISTORY */
+/* */
+/* DATE NAME DESCRIPTION */
+/* */
+/* 09-30-2020 William E. Lamie Initial Version 6.1 */
+/* 10-15-2021 William E. Lamie Modified comment(s), added */
+/* execution profile support, */
+/* resulting in version 6.1.9 */
+/* 04-25-2022 Zhen Kong Updated comments, */
+/* resulting in version 6.1.11 */
+/* */
+/**************************************************************************/
.global _tx_thread_system_return
.type _tx_thread_system_return,function
_tx_thread_system_return:
-@
-@ /* Save minimal context on the stack. */
-@
- STMDB sp!, {r4-r11, lr} @ Save minimal context
- LDR r4, =_tx_thread_current_ptr @ Pickup address of current ptr
- LDR r5, [r4] @ Pickup current thread pointer
-
+ /* Save minimal context on the stack. */
+
+ STMDB sp!, {r4-r11, lr} // Save minimal context
+
+ LDR r4, =_tx_thread_current_ptr // Pickup address of current ptr
+ LDR r5, [r4] // Pickup current thread pointer
+
#ifdef TX_ENABLE_VFP_SUPPORT
- LDR r1, [r5, #144] @ Pickup the VFP enabled flag
- CMP r1, #0 @ Is the VFP enabled?
- BEQ _tx_skip_solicited_vfp_save @ No, skip VFP solicited save
- VMRS r1, FPSCR @ Pickup the FPSCR
- STR r1, [sp, #-4]! @ Save FPSCR
- VSTMDB sp!, {D16-D31} @ Save D16-D31
- VSTMDB sp!, {D8-D15} @ Save D8-D15
+ LDR r1, [r5, #144] // Pickup the VFP enabled flag
+ CMP r1, #0 // Is the VFP enabled?
+ BEQ _tx_skip_solicited_vfp_save // No, skip VFP solicited save
+ VMRS r1, FPSCR // Pickup the FPSCR
+ STR r1, [sp, #-4]! // Save FPSCR
+ VSTMDB sp!, {D16-D31} // Save D16-D31
+ VSTMDB sp!, {D8-D15} // Save D8-D15
_tx_skip_solicited_vfp_save:
#endif
- MOV r0, #0 @ Build a solicited stack type
- MRS r1, CPSR @ Pickup the CPSR
- STMDB sp!, {r0-r1} @ Save type and CPSR
-@
-@ /* Lockout interrupts. */
-@
-#ifdef TX_ENABLE_FIQ_SUPPORT
- CPSID if @ Disable IRQ and FIQ interrupts
-#else
- CPSID i @ Disable IRQ interrupts
-#endif
-
-#if (defined(TX_ENABLE_EXECUTION_CHANGE_NOTIFY) || defined(TX_EXECUTION_PROFILE_ENABLE))
-@
-@ /* Call the thread exit function to indicate the thread is no longer executing. */
-@
- BL _tx_execution_thread_exit @ Call the thread exit function
-#endif
- MOV r3, r4 @ Pickup address of current ptr
- MOV r0, r5 @ Pickup current thread pointer
- LDR r2, =_tx_timer_time_slice @ Pickup address of time slice
- LDR r1, [r2] @ Pickup current time slice
-@
-@ /* Save current stack and switch to system stack. */
-@ _tx_thread_current_ptr -> tx_thread_stack_ptr = sp;
-@ sp = _tx_thread_system_stack_ptr;
-@
- STR sp, [r0, #8] @ Save thread stack pointer
-@
-@ /* Determine if the time-slice is active. */
-@ if (_tx_timer_time_slice)
-@ {
-@
- MOV r4, #0 @ Build clear value
- CMP r1, #0 @ Is a time-slice active?
- BEQ __tx_thread_dont_save_ts @ No, don't save the time-slice
-@
-@ /* Save time-slice for the thread and clear the current time-slice. */
-@ _tx_thread_current_ptr -> tx_thread_time_slice = _tx_timer_time_slice;
-@ _tx_timer_time_slice = 0;
-@
- STR r4, [r2] @ Clear time-slice
- STR r1, [r0, #24] @ Save current time-slice
-@
-@ }
-__tx_thread_dont_save_ts:
-@
-@ /* Clear the current thread pointer. */
-@ _tx_thread_current_ptr = TX_NULL;
-@
- STR r4, [r3] @ Clear current thread pointer
- B _tx_thread_schedule @ Jump to scheduler!
-@
-@}
+ MOV r0, #0 // Build a solicited stack type
+ MRS r1, CPSR // Pickup the CPSR
+ STMDB sp!, {r0-r1} // Save type and CPSR
+ /* Lockout interrupts. */
+
+#ifdef TX_ENABLE_FIQ_SUPPORT
+ CPSID if // Disable IRQ and FIQ interrupts
+#else
+ CPSID i // Disable IRQ interrupts
+#endif
+
+#if (defined(TX_ENABLE_EXECUTION_CHANGE_NOTIFY) || defined(TX_EXECUTION_PROFILE_ENABLE))
+
+ /* Call the thread exit function to indicate the thread is no longer executing. */
+
+ BL _tx_execution_thread_exit // Call the thread exit function
+#endif
+ MOV r3, r4 // Pickup address of current ptr
+ MOV r0, r5 // Pickup current thread pointer
+ LDR r2, =_tx_timer_time_slice // Pickup address of time slice
+ LDR r1, [r2] // Pickup current time slice
+
+ /* Save current stack and switch to system stack. */
+
+ STR sp, [r0, #8] // Save thread stack pointer
+
+ /* Determine if the time-slice is active. */
+
+ MOV r4, #0 // Build clear value
+ CMP r1, #0 // Is a time-slice active?
+ BEQ __tx_thread_dont_save_ts // No, don't save the time-slice
+
+ /* Save time-slice for the thread and clear the current time-slice. */
+
+ STR r4, [r2] // Clear time-slice
+ STR r1, [r0, #24] // Save current time-slice
+
+__tx_thread_dont_save_ts:
+
+ /* Clear the current thread pointer. */
+
+ STR r4, [r3] // Clear current thread pointer
+ B _tx_thread_schedule // Jump to scheduler!
diff --git a/ports/cortex_a9/ac6/src/tx_thread_vectored_context_save.S b/ports/cortex_a9/ac6/src/tx_thread_vectored_context_save.S
index a7e25a32..d846223f 100644
--- a/ports/cortex_a9/ac6/src/tx_thread_vectored_context_save.S
+++ b/ports/cortex_a9/ac6/src/tx_thread_vectored_context_save.S
@@ -1,192 +1,165 @@
-@/**************************************************************************/
-@/* */
-@/* Copyright (c) Microsoft Corporation. All rights reserved. */
-@/* */
-@/* This software is licensed under the Microsoft Software License */
-@/* Terms for Microsoft Azure RTOS. Full text of the license can be */
-@/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
-@/* and in the root directory of this software. */
-@/* */
-@/**************************************************************************/
-@
-@
-@/**************************************************************************/
-@/**************************************************************************/
-@/** */
-@/** ThreadX Component */
-@/** */
-@/** Thread */
-@/** */
-@/**************************************************************************/
-@/**************************************************************************/
-@
-@
-@#define TX_SOURCE_CODE
-@
-@
-@/* Include necessary system files. */
-@
-@#include "tx_api.h"
-@#include "tx_thread.h"
-@
-@
+/**************************************************************************/
+/* */
+/* Copyright (c) Microsoft Corporation. All rights reserved. */
+/* */
+/* This software is licensed under the Microsoft Software License */
+/* Terms for Microsoft Azure RTOS. Full text of the license can be */
+/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
+/* and in the root directory of this software. */
+/* */
+/**************************************************************************/
+
+
+/**************************************************************************/
+/**************************************************************************/
+/** */
+/** ThreadX Component */
+/** */
+/** Thread */
+/** */
+/**************************************************************************/
+/**************************************************************************/
+
.global _tx_thread_system_state
.global _tx_thread_current_ptr
-@
-@
-@
-@/* No 16-bit Thumb mode veneer code is needed for _tx_thread_vectored_context_save
-@ since it will never be called 16-bit mode. */
-@
+ .global _tx_execution_isr_enter
+
+
+
+/* No 16-bit Thumb mode veneer code is needed for _tx_thread_vectored_context_save
+ since it will never be called 16-bit mode. */
+
.arm
.text
.align 2
-@/**************************************************************************/
-@/* */
-@/* FUNCTION RELEASE */
-@/* */
-@/* _tx_thread_vectored_context_save Cortex-A9/AC6 */
-@/* 6.1.9 */
-@/* AUTHOR */
-@/* */
-@/* William E. Lamie, Microsoft Corporation */
-@/* */
-@/* DESCRIPTION */
-@/* */
-@/* This function saves the context of an executing thread in the */
-@/* beginning of interrupt processing. The function also ensures that */
-@/* the system stack is used upon return to the calling ISR. */
-@/* */
-@/* INPUT */
-@/* */
-@/* None */
-@/* */
-@/* OUTPUT */
-@/* */
-@/* None */
-@/* */
-@/* CALLS */
-@/* */
-@/* None */
-@/* */
-@/* CALLED BY */
-@/* */
-@/* ISRs */
-@/* */
-@/* RELEASE HISTORY */
-@/* */
-@/* DATE NAME DESCRIPTION */
-@/* */
-@/* 09-30-2020 William E. Lamie Initial Version 6.1 */
-@/* 10-15-2021 William E. Lamie Modified comment(s), added */
-@/* execution profile support, */
-@/* resulting in version 6.1.9 */
-@/* */
-@/**************************************************************************/
-@VOID _tx_thread_vectored_context_save(VOID)
-@{
+/**************************************************************************/
+/* */
+/* FUNCTION RELEASE */
+/* */
+/* _tx_thread_vectored_context_save ARMv7-A */
+/* 6.1.11 */
+/* AUTHOR */
+/* */
+/* William E. Lamie, Microsoft Corporation */
+/* */
+/* DESCRIPTION */
+/* */
+/* This function saves the context of an executing thread in the */
+/* beginning of interrupt processing. The function also ensures that */
+/* the system stack is used upon return to the calling ISR. */
+/* */
+/* INPUT */
+/* */
+/* None */
+/* */
+/* OUTPUT */
+/* */
+/* None */
+/* */
+/* CALLS */
+/* */
+/* None */
+/* */
+/* CALLED BY */
+/* */
+/* ISRs */
+/* */
+/* RELEASE HISTORY */
+/* */
+/* DATE NAME DESCRIPTION */
+/* */
+/* 09-30-2020 William E. Lamie Initial Version 6.1 */
+/* 10-15-2021 William E. Lamie Modified comment(s), added */
+/* execution profile support, */
+/* resulting in version 6.1.9 */
+/* 04-25-2022 Zhen Kong Updated comments, */
+/* resulting in version 6.1.11 */
+/* */
+/**************************************************************************/
.global _tx_thread_vectored_context_save
.type _tx_thread_vectored_context_save,function
_tx_thread_vectored_context_save:
-@
-@ /* Upon entry to this routine, it is assumed that IRQ interrupts are locked
-@ out, we are in IRQ mode, and all registers are intact. */
-@
-@ /* Check for a nested interrupt condition. */
-@ if (_tx_thread_system_state++)
-@ {
-@
+
+ /* Upon entry to this routine, it is assumed that IRQ interrupts are locked
+ out, we are in IRQ mode, and all registers are intact. */
+
+ /* Check for a nested interrupt condition. */
+
#ifdef TX_ENABLE_FIQ_SUPPORT
- CPSID if @ Disable IRQ and FIQ interrupts
+ CPSID if // Disable IRQ and FIQ interrupts
#endif
- LDR r3, =_tx_thread_system_state @ Pickup address of system state variable
- LDR r2, [r3, #0] @ Pickup system state
- CMP r2, #0 @ Is this the first interrupt?
- BEQ __tx_thread_not_nested_save @ Yes, not a nested context save
-@
-@ /* Nested interrupt condition. */
-@
- ADD r2, r2, #1 @ Increment the interrupt counter
- STR r2, [r3, #0] @ Store it back in the variable
-@
-@ /* Note: Minimal context of interrupted thread is already saved. */
-@
-@ /* Return to the ISR. */
-@
- MOV r10, #0 @ Clear stack limit
+ LDR r3, =_tx_thread_system_state // Pickup address of system state variable
+ LDR r2, [r3, #0] // Pickup system state
+ CMP r2, #0 // Is this the first interrupt?
+ BEQ __tx_thread_not_nested_save // Yes, not a nested context save
+
+ /* Nested interrupt condition. */
+
+ ADD r2, r2, #1 // Increment the interrupt counter
+ STR r2, [r3, #0] // Store it back in the variable
+
+ /* Note: Minimal context of interrupted thread is already saved. */
+
+ /* Return to the ISR. */
+
+ MOV r10, #0 // Clear stack limit
#if (defined(TX_ENABLE_EXECUTION_CHANGE_NOTIFY) || defined(TX_EXECUTION_PROFILE_ENABLE))
-@
-@ /* Call the ISR enter function to indicate an ISR is executing. */
-@
- PUSH {lr} @ Save ISR lr
- BL _tx_execution_isr_enter @ Call the ISR enter function
- POP {lr} @ Recover ISR lr
+
+ /* Call the ISR enter function to indicate an ISR is executing. */
+
+ PUSH {lr} // Save ISR lr
+ BL _tx_execution_isr_enter // Call the ISR enter function
+ POP {lr} // Recover ISR lr
#endif
- MOV pc, lr @ Return to caller
-@
+ MOV pc, lr // Return to caller
+
__tx_thread_not_nested_save:
-@ }
-@
-@ /* Otherwise, not nested, check to see if a thread was running. */
-@ else if (_tx_thread_current_ptr)
-@ {
-@
- ADD r2, r2, #1 @ Increment the interrupt counter
- STR r2, [r3, #0] @ Store it back in the variable
- LDR r1, =_tx_thread_current_ptr @ Pickup address of current thread ptr
- LDR r0, [r1, #0] @ Pickup current thread pointer
- CMP r0, #0 @ Is it NULL?
- BEQ __tx_thread_idle_system_save @ If so, interrupt occurred in
- @ scheduling loop - nothing needs saving!
-@
-@ /* Note: Minimal context of interrupted thread is already saved. */
-@
-@ /* Save the current stack pointer in the thread's control block. */
-@ _tx_thread_current_ptr -> tx_thread_stack_ptr = sp;
-@
-@ /* Switch to the system stack. */
-@ sp = _tx_thread_system_stack_ptr;
-@
- MOV r10, #0 @ Clear stack limit
+
+ /* Otherwise, not nested, check to see if a thread was running. */
+
+ ADD r2, r2, #1 // Increment the interrupt counter
+ STR r2, [r3, #0] // Store it back in the variable
+ LDR r1, =_tx_thread_current_ptr // Pickup address of current thread ptr
+ LDR r0, [r1, #0] // Pickup current thread pointer
+ CMP r0, #0 // Is it NULL?
+ BEQ __tx_thread_idle_system_save // If so, interrupt occurred in
+ // scheduling loop - nothing needs saving!
+ /* Note: Minimal context of interrupted thread is already saved. */
+
+ /* Save the current stack pointer in the thread's control block. */
+
+ MOV r10, #0 // Clear stack limit
#if (defined(TX_ENABLE_EXECUTION_CHANGE_NOTIFY) || defined(TX_EXECUTION_PROFILE_ENABLE))
-@
-@ /* Call the ISR enter function to indicate an ISR is executing. */
-@
- PUSH {lr} @ Save ISR lr
- BL _tx_execution_isr_enter @ Call the ISR enter function
- POP {lr} @ Recover ISR lr
+
+ /* Call the ISR enter function to indicate an ISR is executing. */
+
+ PUSH {lr} // Save ISR lr
+ BL _tx_execution_isr_enter // Call the ISR enter function
+ POP {lr} // Recover ISR lr
#endif
- MOV pc, lr @ Return to caller
-@
-@ }
-@ else
-@ {
-@
+ MOV pc, lr // Return to caller
+
__tx_thread_idle_system_save:
-@
-@ /* Interrupt occurred in the scheduling loop. */
-@
-@ /* Not much to do here, just adjust the stack pointer, and return to IRQ
-@ processing. */
-@
- MOV r10, #0 @ Clear stack limit
+
+ /* Interrupt occurred in the scheduling loop. */
+
+ /* Not much to do here, just adjust the stack pointer, and return to IRQ
+ processing. */
+
+ MOV r10, #0 // Clear stack limit
#if (defined(TX_ENABLE_EXECUTION_CHANGE_NOTIFY) || defined(TX_EXECUTION_PROFILE_ENABLE))
-@
-@ /* Call the ISR enter function to indicate an ISR is executing. */
-@
- PUSH {lr} @ Save ISR lr
- BL _tx_execution_isr_enter @ Call the ISR enter function
- POP {lr} @ Recover ISR lr
+
+ /* Call the ISR enter function to indicate an ISR is executing. */
+
+ PUSH {lr} // Save ISR lr
+ BL _tx_execution_isr_enter // Call the ISR enter function
+ POP {lr} // Recover ISR lr
#endif
- ADD sp, sp, #32 @ Recover saved registers
- MOV pc, lr @ Return to caller
-@
-@ }
-@}
-
+ ADD sp, sp, #32 // Recover saved registers
+ MOV pc, lr // Return to caller
diff --git a/ports/cortex_a9/ac6/src/tx_timer_interrupt.S b/ports/cortex_a9/ac6/src/tx_timer_interrupt.S
index fa28e728..7337ed0c 100644
--- a/ports/cortex_a9/ac6/src/tx_timer_interrupt.S
+++ b/ports/cortex_a9/ac6/src/tx_timer_interrupt.S
@@ -1,40 +1,30 @@
-@/**************************************************************************/
-@/* */
-@/* Copyright (c) Microsoft Corporation. All rights reserved. */
-@/* */
-@/* This software is licensed under the Microsoft Software License */
-@/* Terms for Microsoft Azure RTOS. Full text of the license can be */
-@/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
-@/* and in the root directory of this software. */
-@/* */
-@/**************************************************************************/
-@
-@
-@/**************************************************************************/
-@/**************************************************************************/
-@/** */
-@/** ThreadX Component */
-@/** */
-@/** Timer */
-@/** */
-@/**************************************************************************/
-@/**************************************************************************/
-@
-@#define TX_SOURCE_CODE
-@
-@
-@/* Include necessary system files. */
-@
-@#include "tx_api.h"
-@#include "tx_timer.h"
-@#include "tx_thread.h"
-@
-@
+/**************************************************************************/
+/* */
+/* Copyright (c) Microsoft Corporation. All rights reserved. */
+/* */
+/* This software is licensed under the Microsoft Software License */
+/* Terms for Microsoft Azure RTOS. Full text of the license can be */
+/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
+/* and in the root directory of this software. */
+/* */
+/**************************************************************************/
+
+
+/**************************************************************************/
+/**************************************************************************/
+/** */
+/** ThreadX Component */
+/** */
+/** Timer */
+/** */
+/**************************************************************************/
+/**************************************************************************/
+
.arm
-@
-@/* Define Assembly language external references... */
-@
+
+/* Define Assembly language external references... */
+
.global _tx_timer_time_slice
.global _tx_timer_system_clock
.global _tx_timer_current_ptr
@@ -43,237 +33,199 @@
.global _tx_timer_expired_time_slice
.global _tx_timer_expired
.global _tx_thread_time_slice
-@
-@
-@
-@/* Define the 16-bit Thumb mode veneer for _tx_timer_interrupt for
-@ applications calling this function from to 16-bit Thumb mode. */
-@
+
+
+
+/* Define the 16-bit Thumb mode veneer for _tx_timer_interrupt for
+ applications calling this function from to 16-bit Thumb mode. */
+
.text
.align 2
.thumb
.global $_tx_timer_interrupt
.type $_tx_timer_interrupt,function
$_tx_timer_interrupt:
- BX pc @ Switch to 32-bit mode
- NOP @
+ BX pc // Switch to 32-bit mode
+ NOP //
.arm
- STMFD sp!, {lr} @ Save return address
- BL _tx_timer_interrupt @ Call _tx_timer_interrupt function
- LDMFD sp!, {lr} @ Recover saved return address
- BX lr @ Return to 16-bit caller
-@
-@
+ STMFD sp!, {lr} // Save return address
+ BL _tx_timer_interrupt // Call _tx_timer_interrupt function
+ LDMFD sp!, {lr} // Recover saved return address
+ BX lr // Return to 16-bit caller
+
+
.text
.align 2
-@/**************************************************************************/
-@/* */
-@/* FUNCTION RELEASE */
-@/* */
-@/* _tx_timer_interrupt Cortex-A9/AC6 */
-@/* 6.1 */
-@/* AUTHOR */
-@/* */
-@/* William E. Lamie, Microsoft Corporation */
-@/* */
-@/* DESCRIPTION */
-@/* */
-@/* This function processes the hardware timer interrupt. This */
-@/* processing includes incrementing the system clock and checking for */
-@/* time slice and/or timer expiration. If either is found, the */
-@/* interrupt context save/restore functions are called along with the */
-@/* expiration functions. */
-@/* */
-@/* INPUT */
-@/* */
-@/* None */
-@/* */
-@/* OUTPUT */
-@/* */
-@/* None */
-@/* */
-@/* CALLS */
-@/* */
-@/* _tx_thread_time_slice Time slice interrupted thread */
-@/* _tx_timer_expiration_process Timer expiration processing */
-@/* */
-@/* CALLED BY */
-@/* */
-@/* interrupt vector */
-@/* */
-@/* RELEASE HISTORY */
-@/* */
-@/* DATE NAME DESCRIPTION */
-@/* */
-@/* 09-30-2020 William E. Lamie Initial Version 6.1 */
-@/* */
-@/**************************************************************************/
-@VOID _tx_timer_interrupt(VOID)
-@{
+/**************************************************************************/
+/* */
+/* FUNCTION RELEASE */
+/* */
+/* _tx_timer_interrupt ARMv7-A */
+/* 6.1.11 */
+/* AUTHOR */
+/* */
+/* William E. Lamie, Microsoft Corporation */
+/* */
+/* DESCRIPTION */
+/* */
+/* This function processes the hardware timer interrupt. This */
+/* processing includes incrementing the system clock and checking for */
+/* time slice and/or timer expiration. If either is found, the */
+/* interrupt context save/restore functions are called along with the */
+/* expiration functions. */
+/* */
+/* INPUT */
+/* */
+/* None */
+/* */
+/* OUTPUT */
+/* */
+/* None */
+/* */
+/* CALLS */
+/* */
+/* _tx_thread_time_slice Time slice interrupted thread */
+/* _tx_timer_expiration_process Timer expiration processing */
+/* */
+/* CALLED BY */
+/* */
+/* interrupt vector */
+/* */
+/* RELEASE HISTORY */
+/* */
+/* DATE NAME DESCRIPTION */
+/* */
+/* 09-30-2020 William E. Lamie Initial Version 6.1 */
+/* 04-25-2022 Zhen Kong Updated comments, */
+/* resulting in version 6.1.11 */
+/* */
+/**************************************************************************/
.global _tx_timer_interrupt
.type _tx_timer_interrupt,function
_tx_timer_interrupt:
-@
-@ /* Upon entry to this routine, it is assumed that context save has already
-@ been called, and therefore the compiler scratch registers are available
-@ for use. */
-@
-@ /* Increment the system clock. */
-@ _tx_timer_system_clock++;
-@
- LDR r1, =_tx_timer_system_clock @ Pickup address of system clock
- LDR r0, [r1] @ Pickup system clock
- ADD r0, r0, #1 @ Increment system clock
- STR r0, [r1] @ Store new system clock
-@
-@ /* Test for time-slice expiration. */
-@ if (_tx_timer_time_slice)
-@ {
-@
- LDR r3, =_tx_timer_time_slice @ Pickup address of time-slice
- LDR r2, [r3] @ Pickup time-slice
- CMP r2, #0 @ Is it non-active?
- BEQ __tx_timer_no_time_slice @ Yes, skip time-slice processing
-@
-@ /* Decrement the time_slice. */
-@ _tx_timer_time_slice--;
-@
- SUB r2, r2, #1 @ Decrement the time-slice
- STR r2, [r3] @ Store new time-slice value
-@
-@ /* Check for expiration. */
-@ if (__tx_timer_time_slice == 0)
-@
- CMP r2, #0 @ Has it expired?
- BNE __tx_timer_no_time_slice @ No, skip expiration processing
-@
-@ /* Set the time-slice expired flag. */
-@ _tx_timer_expired_time_slice = TX_TRUE;
-@
- LDR r3, =_tx_timer_expired_time_slice @ Pickup address of expired flag
- MOV r0, #1 @ Build expired value
- STR r0, [r3] @ Set time-slice expiration flag
-@
-@ }
-@
-__tx_timer_no_time_slice:
-@
-@ /* Test for timer expiration. */
-@ if (*_tx_timer_current_ptr)
-@ {
-@
- LDR r1, =_tx_timer_current_ptr @ Pickup current timer pointer address
- LDR r0, [r1] @ Pickup current timer
- LDR r2, [r0] @ Pickup timer list entry
- CMP r2, #0 @ Is there anything in the list?
- BEQ __tx_timer_no_timer @ No, just increment the timer
-@
-@ /* Set expiration flag. */
-@ _tx_timer_expired = TX_TRUE;
-@
- LDR r3, =_tx_timer_expired @ Pickup expiration flag address
- MOV r2, #1 @ Build expired value
- STR r2, [r3] @ Set expired flag
- B __tx_timer_done @ Finished timer processing
-@
-@ }
-@ else
-@ {
-__tx_timer_no_timer:
-@
-@ /* No timer expired, increment the timer pointer. */
-@ _tx_timer_current_ptr++;
-@
- ADD r0, r0, #4 @ Move to next timer
-@
-@ /* Check for wraparound. */
-@ if (_tx_timer_current_ptr == _tx_timer_list_end)
-@
- LDR r3, =_tx_timer_list_end @ Pickup address of timer list end
- LDR r2, [r3] @ Pickup list end
- CMP r0, r2 @ Are we at list end?
- BNE __tx_timer_skip_wrap @ No, skip wraparound logic
-@
-@ /* Wrap to beginning of list. */
-@ _tx_timer_current_ptr = _tx_timer_list_start;
-@
- LDR r3, =_tx_timer_list_start @ Pickup address of timer list start
- LDR r0, [r3] @ Set current pointer to list start
-@
-__tx_timer_skip_wrap:
-@
- STR r0, [r1] @ Store new current timer pointer
-@ }
-@
-__tx_timer_done:
-@
-@
-@ /* See if anything has expired. */
-@ if ((_tx_timer_expired_time_slice) || (_tx_timer_expired))
-@ {
-@
- LDR r3, =_tx_timer_expired_time_slice @ Pickup address of expired flag
- LDR r2, [r3] @ Pickup time-slice expired flag
- CMP r2, #0 @ Did a time-slice expire?
- BNE __tx_something_expired @ If non-zero, time-slice expired
- LDR r1, =_tx_timer_expired @ Pickup address of other expired flag
- LDR r0, [r1] @ Pickup timer expired flag
- CMP r0, #0 @ Did a timer expire?
- BEQ __tx_timer_nothing_expired @ No, nothing expired
-@
-__tx_something_expired:
-@
-@
- STMDB sp!, {r0, lr} @ Save the lr register on the stack
- @ and save r0 just to keep 8-byte alignment
-@
-@ /* Did a timer expire? */
-@ if (_tx_timer_expired)
-@ {
-@
- LDR r1, =_tx_timer_expired @ Pickup address of expired flag
- LDR r0, [r1] @ Pickup timer expired flag
- CMP r0, #0 @ Check for timer expiration
- BEQ __tx_timer_dont_activate @ If not set, skip timer activation
-@
-@ /* Process timer expiration. */
-@ _tx_timer_expiration_process();
-@
- BL _tx_timer_expiration_process @ Call the timer expiration handling routine
-@
-@ }
-__tx_timer_dont_activate:
-@
-@ /* Did time slice expire? */
-@ if (_tx_timer_expired_time_slice)
-@ {
-@
- LDR r3, =_tx_timer_expired_time_slice @ Pickup address of time-slice expired
- LDR r2, [r3] @ Pickup the actual flag
- CMP r2, #0 @ See if the flag is set
- BEQ __tx_timer_not_ts_expiration @ No, skip time-slice processing
-@
-@ /* Time slice interrupted thread. */
-@ _tx_thread_time_slice();
-@
- BL _tx_thread_time_slice @ Call time-slice processing
-@
-@ }
-@
-__tx_timer_not_ts_expiration:
-@
- LDMIA sp!, {r0, lr} @ Recover lr register (r0 is just there for
- @ the 8-byte stack alignment
-@
-@ }
-@
-__tx_timer_nothing_expired:
-@
-#ifdef __THUMB_INTERWORK
- BX lr @ Return to caller
-#else
- MOV pc, lr @ Return to caller
-#endif
-@
-@}
+ /* Upon entry to this routine, it is assumed that context save has already
+ been called, and therefore the compiler scratch registers are available
+ for use. */
+
+ /* Increment the system clock. */
+
+ LDR r1, =_tx_timer_system_clock // Pickup address of system clock
+ LDR r0, [r1] // Pickup system clock
+ ADD r0, r0, #1 // Increment system clock
+ STR r0, [r1] // Store new system clock
+
+ /* Test for time-slice expiration. */
+
+ LDR r3, =_tx_timer_time_slice // Pickup address of time-slice
+ LDR r2, [r3] // Pickup time-slice
+ CMP r2, #0 // Is it non-active?
+ BEQ __tx_timer_no_time_slice // Yes, skip time-slice processing
+
+ /* Decrement the time_slice. */
+
+ SUB r2, r2, #1 // Decrement the time-slice
+ STR r2, [r3] // Store new time-slice value
+
+ /* Check for expiration. */
+
+ CMP r2, #0 // Has it expired?
+ BNE __tx_timer_no_time_slice // No, skip expiration processing
+
+ /* Set the time-slice expired flag. */
+
+ LDR r3, =_tx_timer_expired_time_slice // Pickup address of expired flag
+ MOV r0, #1 // Build expired value
+ STR r0, [r3] // Set time-slice expiration flag
+
+__tx_timer_no_time_slice:
+
+ /* Test for timer expiration. */
+
+ LDR r1, =_tx_timer_current_ptr // Pickup current timer pointer address
+ LDR r0, [r1] // Pickup current timer
+ LDR r2, [r0] // Pickup timer list entry
+ CMP r2, #0 // Is there anything in the list?
+ BEQ __tx_timer_no_timer // No, just increment the timer
+
+ /* Set expiration flag. */
+
+ LDR r3, =_tx_timer_expired // Pickup expiration flag address
+ MOV r2, #1 // Build expired value
+ STR r2, [r3] // Set expired flag
+ B __tx_timer_done // Finished timer processing
+
+__tx_timer_no_timer:
+
+ /* No timer expired, increment the timer pointer. */
+ ADD r0, r0, #4 // Move to next timer
+
+ /* Check for wraparound. */
+
+ LDR r3, =_tx_timer_list_end // Pickup address of timer list end
+ LDR r2, [r3] // Pickup list end
+ CMP r0, r2 // Are we at list end?
+ BNE __tx_timer_skip_wrap // No, skip wraparound logic
+
+ /* Wrap to beginning of list. */
+
+ LDR r3, =_tx_timer_list_start // Pickup address of timer list start
+ LDR r0, [r3] // Set current pointer to list start
+
+__tx_timer_skip_wrap:
+
+ STR r0, [r1] // Store new current timer pointer
+
+__tx_timer_done:
+
+ /* See if anything has expired. */
+
+ LDR r3, =_tx_timer_expired_time_slice // Pickup address of expired flag
+ LDR r2, [r3] // Pickup time-slice expired flag
+ CMP r2, #0 // Did a time-slice expire?
+ BNE __tx_something_expired // If non-zero, time-slice expired
+ LDR r1, =_tx_timer_expired // Pickup address of other expired flag
+ LDR r0, [r1] // Pickup timer expired flag
+ CMP r0, #0 // Did a timer expire?
+ BEQ __tx_timer_nothing_expired // No, nothing expired
+
+__tx_something_expired:
+
+ STMDB sp!, {r0, lr} // Save the lr register on the stack
+ // and save r0 just to keep 8-byte alignment
+
+ /* Did a timer expire? */
+
+ LDR r1, =_tx_timer_expired // Pickup address of expired flag
+ LDR r0, [r1] // Pickup timer expired flag
+ CMP r0, #0 // Check for timer expiration
+ BEQ __tx_timer_dont_activate // If not set, skip timer activation
+
+ /* Process timer expiration. */
+ BL _tx_timer_expiration_process // Call the timer expiration handling routine
+
+__tx_timer_dont_activate:
+
+ /* Did time slice expire? */
+
+ LDR r3, =_tx_timer_expired_time_slice // Pickup address of time-slice expired
+ LDR r2, [r3] // Pickup the actual flag
+ CMP r2, #0 // See if the flag is set
+ BEQ __tx_timer_not_ts_expiration // No, skip time-slice processing
+
+ /* Time slice interrupted thread. */
+
+ BL _tx_thread_time_slice // Call time-slice processing
+
+__tx_timer_not_ts_expiration:
+
+ LDMIA sp!, {r0, lr} // Recover lr register (r0 is just there for
+ // the 8-byte stack alignment
+
+__tx_timer_nothing_expired:
+
+#ifdef __THUMB_INTERWORK
+ BX lr // Return to caller
+#else
+ MOV pc, lr // Return to caller
+#endif
diff --git a/ports/cortex_a9/gnu/example_build/build_threadx_sample.bat b/ports/cortex_a9/gnu/example_build/build_threadx_sample.bat
index e6ddefad..1a558630 100644
--- a/ports/cortex_a9/gnu/example_build/build_threadx_sample.bat
+++ b/ports/cortex_a9/gnu/example_build/build_threadx_sample.bat
@@ -2,5 +2,5 @@ arm-none-eabi-gcc -c -g -mcpu=cortex-a9 reset.S
arm-none-eabi-gcc -c -g -mcpu=cortex-a9 crt0.S
arm-none-eabi-gcc -c -g -mcpu=cortex-a9 tx_initialize_low_level.S
arm-none-eabi-gcc -c -g -mcpu=cortex-a9 -I../../../../common/inc -I../inc sample_threadx.c
-arm-none-eabi-ld -A cortex-a9 -T sample_threadx.ld reset.o crt0.o tx_initialize_low_level.o sample_threadx.o tx.a libc.a libgcc.a -o sample_threadx.out -M > sample_threadx.map
+arm-none-eabi-gcc -g -mcpu=cortex-a8 -T sample_threadx.ld --specs=nosys.specs -o sample_threadx.out -Wl,-Map=sample_threadx.map tx_initialize_low_level.o sample_threadx.o tx.a
diff --git a/ports/cortex_a9/gnu/example_build/crt0.S b/ports/cortex_a9/gnu/example_build/crt0.S
index aa0f3239..56b6c958 100644
--- a/ports/cortex_a9/gnu/example_build/crt0.S
+++ b/ports/cortex_a9/gnu/example_build/crt0.S
@@ -26,13 +26,13 @@ _mainCRTStartup:
mov a2, #0 /* Second arg: fill value */
mov fp, a2 /* Null frame pointer */
mov r7, a2 /* Null frame pointer for Thumb */
-
- ldr a1, .LC1 /* First arg: start of memory block */
- ldr a3, .LC2
- sub a3, a3, a1 /* Third arg: length of block */
-
-
+ ldr a1, .LC1 /* First arg: start of memory block */
+ ldr a3, .LC2
+ sub a3, a3, a1 /* Third arg: length of block */
+
+
+
bl memset
mov r0, #0 /* no arguments */
mov r1, #0 /* no argv either */
@@ -48,15 +48,15 @@ _mainCRTStartup:
/* bl init */
mov r0, r4
mov r1, r5
-#endif
+#endif
bl main
bl exit /* Should not return. */
-
- /* For Thumb, constants must be after the code since only
+
+ /* For Thumb, constants must be after the code since only
positive offsets are supported for PC relative addresses. */
-
+
.align 0
.LC0:
.LC1:
diff --git a/ports/cortex_a9/gnu/example_build/reset.S b/ports/cortex_a9/gnu/example_build/reset.S
index 856e31eb..597e9d9a 100644
--- a/ports/cortex_a9/gnu/example_build/reset.S
+++ b/ports/cortex_a9/gnu/example_build/reset.S
@@ -1,35 +1,24 @@
-@/**************************************************************************/
-@/* */
-@/* Copyright (c) Microsoft Corporation. All rights reserved. */
-@/* */
-@/* This software is licensed under the Microsoft Software License */
-@/* Terms for Microsoft Azure RTOS. Full text of the license can be */
-@/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
-@/* and in the root directory of this software. */
-@/* */
-@/**************************************************************************/
-@
-@
-@/**************************************************************************/
-@/**************************************************************************/
-@/** */
-@/** ThreadX Component */
-@/** */
-@/** Initialize */
-@/** */
-@/**************************************************************************/
-@/**************************************************************************/
-@
-@
-@#define TX_SOURCE_CODE
-@
-@
-@/* Include necessary system files. */
-@
-@#include "tx_api.h"
-@#include "tx_initialize.h"
-@#include "tx_thread.h"
-@#include "tx_timer.h"
+/**************************************************************************/
+/* */
+/* Copyright (c) Microsoft Corporation. All rights reserved. */
+/* */
+/* This software is licensed under the Microsoft Software License */
+/* Terms for Microsoft Azure RTOS. Full text of the license can be */
+/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
+/* and in the root directory of this software. */
+/* */
+/**************************************************************************/
+
+
+/**************************************************************************/
+/**************************************************************************/
+/** */
+/** ThreadX Component */
+/** */
+/** Initialize */
+/** */
+/**************************************************************************/
+/**************************************************************************/
.arm
@@ -41,36 +30,35 @@
.global __tx_reserved_handler
.global __tx_irq_handler
.global __tx_fiq_handler
-@
-@
-@/* Define the vector area. This should be located or copied to 0. */
-@
+
+/* Define the vector area. This should be located or copied to 0. */
+
.text
.global __vectors
__vectors:
- LDR pc, STARTUP @ Reset goes to startup function
- LDR pc, UNDEFINED @ Undefined handler
- LDR pc, SWI @ Software interrupt handler
- LDR pc, PREFETCH @ Prefetch exception handler
- LDR pc, ABORT @ Abort exception handler
- LDR pc, RESERVED @ Reserved exception handler
- LDR pc, IRQ @ IRQ interrupt handler
- LDR pc, FIQ @ FIQ interrupt handler
+ LDR pc, STARTUP // Reset goes to startup function
+ LDR pc, UNDEFINED // Undefined handler
+ LDR pc, SWI // Software interrupt handler
+ LDR pc, PREFETCH // Prefetch exception handler
+ LDR pc, ABORT // Abort exception handler
+ LDR pc, RESERVED // Reserved exception handler
+ LDR pc, IRQ // IRQ interrupt handler
+ LDR pc, FIQ // FIQ interrupt handler
STARTUP:
- .word _start @ Reset goes to C startup function
+ .word _start // Reset goes to C startup function
UNDEFINED:
- .word __tx_undefined @ Undefined handler
+ .word __tx_undefined // Undefined handler
SWI:
- .word __tx_swi_interrupt @ Software interrupt handler
+ .word __tx_swi_interrupt // Software interrupt handler
PREFETCH:
- .word __tx_prefetch_handler @ Prefetch exception handler
-ABORT:
- .word __tx_abort_handler @ Abort exception handler
-RESERVED:
- .word __tx_reserved_handler @ Reserved exception handler
-IRQ:
- .word __tx_irq_handler @ IRQ interrupt handler
+ .word __tx_prefetch_handler // Prefetch exception handler
+ABORT:
+ .word __tx_abort_handler // Abort exception handler
+RESERVED:
+ .word __tx_reserved_handler // Reserved exception handler
+IRQ:
+ .word __tx_irq_handler // IRQ interrupt handler
FIQ:
- .word __tx_fiq_handler @ FIQ interrupt handler
+ .word __tx_fiq_handler // FIQ interrupt handler
diff --git a/ports/cortex_a9/gnu/example_build/sample_threadx.c b/ports/cortex_a9/gnu/example_build/sample_threadx.c
index 418ec634..8c61de06 100644
--- a/ports/cortex_a9/gnu/example_build/sample_threadx.c
+++ b/ports/cortex_a9/gnu/example_build/sample_threadx.c
@@ -1,5 +1,5 @@
/* This is a small demo of the high-performance ThreadX kernel. It includes examples of eight
- threads of different priorities, using a message queue, semaphore, mutex, event flags group,
+ threads of different priorities, using a message queue, semaphore, mutex, event flags group,
byte pool, and block pool. */
#include "tx_api.h"
@@ -80,42 +80,42 @@ CHAR *pointer = TX_NULL;
tx_byte_allocate(&byte_pool_0, (VOID **) &pointer, DEMO_STACK_SIZE, TX_NO_WAIT);
/* Create the main thread. */
- tx_thread_create(&thread_0, "thread 0", thread_0_entry, 0,
- pointer, DEMO_STACK_SIZE,
+ tx_thread_create(&thread_0, "thread 0", thread_0_entry, 0,
+ pointer, DEMO_STACK_SIZE,
1, 1, TX_NO_TIME_SLICE, TX_AUTO_START);
/* Allocate the stack for thread 1. */
tx_byte_allocate(&byte_pool_0, (VOID **) &pointer, DEMO_STACK_SIZE, TX_NO_WAIT);
- /* Create threads 1 and 2. These threads pass information through a ThreadX
+ /* Create threads 1 and 2. These threads pass information through a ThreadX
message queue. It is also interesting to note that these threads have a time
slice. */
- tx_thread_create(&thread_1, "thread 1", thread_1_entry, 1,
- pointer, DEMO_STACK_SIZE,
+ tx_thread_create(&thread_1, "thread 1", thread_1_entry, 1,
+ pointer, DEMO_STACK_SIZE,
16, 16, 4, TX_AUTO_START);
/* Allocate the stack for thread 2. */
tx_byte_allocate(&byte_pool_0, (VOID **) &pointer, DEMO_STACK_SIZE, TX_NO_WAIT);
- tx_thread_create(&thread_2, "thread 2", thread_2_entry, 2,
- pointer, DEMO_STACK_SIZE,
+ tx_thread_create(&thread_2, "thread 2", thread_2_entry, 2,
+ pointer, DEMO_STACK_SIZE,
16, 16, 4, TX_AUTO_START);
/* Allocate the stack for thread 3. */
tx_byte_allocate(&byte_pool_0, (VOID **) &pointer, DEMO_STACK_SIZE, TX_NO_WAIT);
- /* Create threads 3 and 4. These threads compete for a ThreadX counting semaphore.
+ /* Create threads 3 and 4. These threads compete for a ThreadX counting semaphore.
An interesting thing here is that both threads share the same instruction area. */
- tx_thread_create(&thread_3, "thread 3", thread_3_and_4_entry, 3,
- pointer, DEMO_STACK_SIZE,
+ tx_thread_create(&thread_3, "thread 3", thread_3_and_4_entry, 3,
+ pointer, DEMO_STACK_SIZE,
8, 8, TX_NO_TIME_SLICE, TX_AUTO_START);
/* Allocate the stack for thread 4. */
tx_byte_allocate(&byte_pool_0, (VOID **) &pointer, DEMO_STACK_SIZE, TX_NO_WAIT);
- tx_thread_create(&thread_4, "thread 4", thread_3_and_4_entry, 4,
- pointer, DEMO_STACK_SIZE,
+ tx_thread_create(&thread_4, "thread 4", thread_3_and_4_entry, 4,
+ pointer, DEMO_STACK_SIZE,
8, 8, TX_NO_TIME_SLICE, TX_AUTO_START);
/* Allocate the stack for thread 5. */
@@ -123,23 +123,23 @@ CHAR *pointer = TX_NULL;
/* Create thread 5. This thread simply pends on an event flag which will be set
by thread_0. */
- tx_thread_create(&thread_5, "thread 5", thread_5_entry, 5,
- pointer, DEMO_STACK_SIZE,
+ tx_thread_create(&thread_5, "thread 5", thread_5_entry, 5,
+ pointer, DEMO_STACK_SIZE,
4, 4, TX_NO_TIME_SLICE, TX_AUTO_START);
/* Allocate the stack for thread 6. */
tx_byte_allocate(&byte_pool_0, (VOID **) &pointer, DEMO_STACK_SIZE, TX_NO_WAIT);
/* Create threads 6 and 7. These threads compete for a ThreadX mutex. */
- tx_thread_create(&thread_6, "thread 6", thread_6_and_7_entry, 6,
- pointer, DEMO_STACK_SIZE,
+ tx_thread_create(&thread_6, "thread 6", thread_6_and_7_entry, 6,
+ pointer, DEMO_STACK_SIZE,
8, 8, TX_NO_TIME_SLICE, TX_AUTO_START);
/* Allocate the stack for thread 7. */
tx_byte_allocate(&byte_pool_0, (VOID **) &pointer, DEMO_STACK_SIZE, TX_NO_WAIT);
- tx_thread_create(&thread_7, "thread 7", thread_6_and_7_entry, 7,
- pointer, DEMO_STACK_SIZE,
+ tx_thread_create(&thread_7, "thread 7", thread_6_and_7_entry, 7,
+ pointer, DEMO_STACK_SIZE,
8, 8, TX_NO_TIME_SLICE, TX_AUTO_START);
/* Allocate the message queue. */
@@ -242,11 +242,11 @@ UINT status;
/* Retrieve a message from the queue. */
status = tx_queue_receive(&queue_0, &received_message, TX_WAIT_FOREVER);
- /* Check completion status and make sure the message is what we
+ /* Check completion status and make sure the message is what we
expected. */
if ((status != TX_SUCCESS) || (received_message != thread_2_messages_received))
break;
-
+
/* Otherwise, all is okay. Increment the received message count. */
thread_2_messages_received++;
}
@@ -305,7 +305,7 @@ ULONG actual_flags;
thread_5_counter++;
/* Wait for event flag 0. */
- status = tx_event_flags_get(&event_flags_0, 0x1, TX_OR_CLEAR,
+ status = tx_event_flags_get(&event_flags_0, 0x1, TX_OR_CLEAR,
&actual_flags, TX_WAIT_FOREVER);
/* Check status. */
@@ -358,7 +358,7 @@ UINT status;
if (status != TX_SUCCESS)
break;
- /* Release the mutex again. This will actually
+ /* Release the mutex again. This will actually
release ownership since it was obtained twice. */
status = tx_mutex_put(&mutex_0);
diff --git a/ports/cortex_a9/gnu/example_build/tx_initialize_low_level.S b/ports/cortex_a9/gnu/example_build/tx_initialize_low_level.S
index 2e274666..7de5d3ce 100644
--- a/ports/cortex_a9/gnu/example_build/tx_initialize_low_level.S
+++ b/ports/cortex_a9/gnu/example_build/tx_initialize_low_level.S
@@ -1,47 +1,35 @@
-@/**************************************************************************/
-@/* */
-@/* Copyright (c) Microsoft Corporation. All rights reserved. */
-@/* */
-@/* This software is licensed under the Microsoft Software License */
-@/* Terms for Microsoft Azure RTOS. Full text of the license can be */
-@/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
-@/* and in the root directory of this software. */
-@/* */
-@/**************************************************************************/
-@
-@
-@/**************************************************************************/
-@/**************************************************************************/
-@/** */
-@/** ThreadX Component */
-@/** */
-@/** Initialize */
-@/** */
-@/**************************************************************************/
-@/**************************************************************************/
-@
-@
-@#define TX_SOURCE_CODE
-@
-@
-@/* Include necessary system files. */
-@
-@#include "tx_api.h"
-@#include "tx_initialize.h"
-@#include "tx_thread.h"
-@#include "tx_timer.h"
+/**************************************************************************/
+/* */
+/* Copyright (c) Microsoft Corporation. All rights reserved. */
+/* */
+/* This software is licensed under the Microsoft Software License */
+/* Terms for Microsoft Azure RTOS. Full text of the license can be */
+/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
+/* and in the root directory of this software. */
+/* */
+/**************************************************************************/
+
+
+/**************************************************************************/
+/**************************************************************************/
+/** */
+/** ThreadX Component */
+/** */
+/** Initialize */
+/** */
+/**************************************************************************/
+/**************************************************************************/
.arm
-SVC_MODE = 0xD3 @ Disable IRQ/FIQ SVC mode
-IRQ_MODE = 0xD2 @ Disable IRQ/FIQ IRQ mode
-FIQ_MODE = 0xD1 @ Disable IRQ/FIQ FIQ mode
-SYS_MODE = 0xDF @ Disable IRQ/FIQ SYS mode
-FIQ_STACK_SIZE = 512 @ FIQ stack size
-IRQ_STACK_SIZE = 1024 @ IRQ stack size
-SYS_STACK_SIZE = 1024 @ System stack size
-@
-@
+SVC_MODE = 0xD3 // Disable IRQ/FIQ SVC mode
+IRQ_MODE = 0xD2 // Disable IRQ/FIQ IRQ mode
+FIQ_MODE = 0xD1 // Disable IRQ/FIQ FIQ mode
+SYS_MODE = 0xDF // Disable IRQ/FIQ SYS mode
+FIQ_STACK_SIZE = 512 // FIQ stack size
+IRQ_STACK_SIZE = 1024 // IRQ stack size
+SYS_STACK_SIZE = 1024 // System stack size
+
.global _tx_thread_system_stack_ptr
.global _tx_initialize_unused_memory
.global _tx_thread_context_save
@@ -51,297 +39,267 @@ SYS_STACK_SIZE = 1024 @ System stack size
.global _sp
.global _stack_bottom
-@
-@
-@/* Define the 16-bit Thumb mode veneer for _tx_initialize_low_level for
-@ applications calling this function from to 16-bit Thumb mode. */
-@
+
+/* Define the 16-bit Thumb mode veneer for _tx_initialize_low_level for
+ applications calling this function from to 16-bit Thumb mode. */
+
.text
.align 2
.thumb
.global $_tx_initialize_low_level
.type $_tx_initialize_low_level,function
$_tx_initialize_low_level:
- BX pc @ Switch to 32-bit mode
- NOP @
+ BX pc // Switch to 32-bit mode
+ NOP //
.arm
- STMFD sp!, {lr} @ Save return address
- BL _tx_initialize_low_level @ Call _tx_initialize_low_level function
- LDMFD sp!, {lr} @ Recover saved return address
- BX lr @ Return to 16-bit caller
-@
-@
+ STMFD sp!, {lr} // Save return address
+ BL _tx_initialize_low_level // Call _tx_initialize_low_level function
+ LDMFD sp!, {lr} // Recover saved return address
+ BX lr // Return to 16-bit caller
+
.text
.align 2
-@/**************************************************************************/
-@/* */
-@/* FUNCTION RELEASE */
-@/* */
-@/* _tx_initialize_low_level Cortex-A9/GNU */
-@/* 6.1 */
-@/* AUTHOR */
-@/* */
-@/* William E. Lamie, Microsoft Corporation */
-@/* */
-@/* DESCRIPTION */
-@/* */
-@/* This function is responsible for any low-level processor */
-@/* initialization, including setting up interrupt vectors, setting */
-@/* up a periodic timer interrupt source, saving the system stack */
-@/* pointer for use in ISR processing later, and finding the first */
-@/* available RAM memory address for tx_application_define. */
-@/* */
-@/* INPUT */
-@/* */
-@/* None */
-@/* */
-@/* OUTPUT */
-@/* */
-@/* None */
-@/* */
-@/* CALLS */
-@/* */
-@/* None */
-@/* */
-@/* CALLED BY */
-@/* */
-@/* _tx_initialize_kernel_enter ThreadX entry function */
-@/* */
-@/* RELEASE HISTORY */
-@/* */
-@/* DATE NAME DESCRIPTION */
-@/* */
-@/* 09-30-2020 William E. Lamie Initial Version 6.1 */
-@/* */
-@/**************************************************************************/
-@VOID _tx_initialize_low_level(VOID)
-@{
+/**************************************************************************/
+/* */
+/* FUNCTION RELEASE */
+/* */
+/* _tx_initialize_low_level ARMv7-A */
+/* 6.1.11 */
+/* AUTHOR */
+/* */
+/* William E. Lamie, Microsoft Corporation */
+/* */
+/* DESCRIPTION */
+/* */
+/* This function is responsible for any low-level processor */
+/* initialization, including setting up interrupt vectors, setting */
+/* up a periodic timer interrupt source, saving the system stack */
+/* pointer for use in ISR processing later, and finding the first */
+/* available RAM memory address for tx_application_define. */
+/* */
+/* INPUT */
+/* */
+/* None */
+/* */
+/* OUTPUT */
+/* */
+/* None */
+/* */
+/* CALLS */
+/* */
+/* None */
+/* */
+/* CALLED BY */
+/* */
+/* _tx_initialize_kernel_enter ThreadX entry function */
+/* */
+/* RELEASE HISTORY */
+/* */
+/* DATE NAME DESCRIPTION */
+/* */
+/* 09-30-2020 William E. Lamie Initial Version 6.1 */
+/* 04-25-2022 Zhen Kong Updated comments, */
+/* resulting in version 6.1.11 */
+/* */
+/**************************************************************************/
.global _tx_initialize_low_level
.type _tx_initialize_low_level,function
_tx_initialize_low_level:
-@
-@ /* We must be in SVC mode at this point! */
-@
-@ /* Setup various stack pointers. */
-@
- LDR r1, =_sp @ Get pointer to stack area
-#ifdef TX_ENABLE_IRQ_NESTING
-@
-@ /* Setup the system mode stack for nested interrupt support */
-@
- LDR r2, =SYS_STACK_SIZE @ Pickup stack size
- MOV r3, #SYS_MODE @ Build SYS mode CPSR
- MSR CPSR_c, r3 @ Enter SYS mode
- SUB r1, r1, #1 @ Backup 1 byte
- BIC r1, r1, #7 @ Ensure 8-byte alignment
- MOV sp, r1 @ Setup SYS stack pointer
- SUB r1, r1, r2 @ Calculate start of next stack
+ /* We must be in SVC mode at this point! */
+
+ /* Setup various stack pointers. */
+
+ LDR r1, =_sp // Get pointer to stack area
+
+#ifdef TX_ENABLE_IRQ_NESTING
+
+ /* Setup the system mode stack for nested interrupt support */
+
+ LDR r2, =SYS_STACK_SIZE // Pickup stack size
+ MOV r3, #SYS_MODE // Build SYS mode CPSR
+ MSR CPSR_c, r3 // Enter SYS mode
+ SUB r1, r1, #1 // Backup 1 byte
+ BIC r1, r1, #7 // Ensure 8-byte alignment
+ MOV sp, r1 // Setup SYS stack pointer
+ SUB r1, r1, r2 // Calculate start of next stack
#endif
- LDR r2, =FIQ_STACK_SIZE @ Pickup stack size
- MOV r0, #FIQ_MODE @ Build FIQ mode CPSR
- MSR CPSR, r0 @ Enter FIQ mode
- SUB r1, r1, #1 @ Backup 1 byte
- BIC r1, r1, #7 @ Ensure 8-byte alignment
- MOV sp, r1 @ Setup FIQ stack pointer
- SUB r1, r1, r2 @ Calculate start of next stack
- LDR r2, =IRQ_STACK_SIZE @ Pickup IRQ stack size
- MOV r0, #IRQ_MODE @ Build IRQ mode CPSR
- MSR CPSR, r0 @ Enter IRQ mode
- SUB r1, r1, #1 @ Backup 1 byte
- BIC r1, r1, #7 @ Ensure 8-byte alignment
- MOV sp, r1 @ Setup IRQ stack pointer
- SUB r3, r1, r2 @ Calculate end of IRQ stack
- MOV r0, #SVC_MODE @ Build SVC mode CPSR
- MSR CPSR, r0 @ Enter SVC mode
- LDR r2, =_stack_bottom @ Pickup stack bottom
- CMP r3, r2 @ Compare the current stack end with the bottom
-_stack_error_loop:
- BLT _stack_error_loop @ If the IRQ stack exceeds the stack bottom, just sit here!
-@
-@ /* Save the system stack pointer. */
-@ _tx_thread_system_stack_ptr = (VOID_PTR) (sp);
-@
- LDR r2, =_tx_thread_system_stack_ptr @ Pickup stack pointer
- STR r1, [r2] @ Save the system stack
-@
-@ /* Save the first available memory address. */
-@ _tx_initialize_unused_memory = (VOID_PTR) _end;
-@
- LDR r1, =_end @ Get end of non-initialized RAM area
- LDR r2, =_tx_initialize_unused_memory @ Pickup unused memory ptr address
- ADD r1, r1, #8 @ Increment to next free word
- STR r1, [r2] @ Save first free memory address
-@
-@ /* Setup Timer for periodic interrupts. */
-@
-@ /* Done, return to caller. */
-@
+ LDR r2, =FIQ_STACK_SIZE // Pickup stack size
+ MOV r0, #FIQ_MODE // Build FIQ mode CPSR
+ MSR CPSR, r0 // Enter FIQ mode
+ SUB r1, r1, #1 // Backup 1 byte
+ BIC r1, r1, #7 // Ensure 8-byte alignment
+ MOV sp, r1 // Setup FIQ stack pointer
+ SUB r1, r1, r2 // Calculate start of next stack
+ LDR r2, =IRQ_STACK_SIZE // Pickup IRQ stack size
+ MOV r0, #IRQ_MODE // Build IRQ mode CPSR
+ MSR CPSR, r0 // Enter IRQ mode
+ SUB r1, r1, #1 // Backup 1 byte
+ BIC r1, r1, #7 // Ensure 8-byte alignment
+ MOV sp, r1 // Setup IRQ stack pointer
+ SUB r3, r1, r2 // Calculate end of IRQ stack
+ MOV r0, #SVC_MODE // Build SVC mode CPSR
+ MSR CPSR, r0 // Enter SVC mode
+ LDR r2, =_stack_bottom // Pickup stack bottom
+ CMP r3, r2 // Compare the current stack end with the bottom
+_stack_error_loop:
+ BLT _stack_error_loop // If the IRQ stack exceeds the stack bottom, just sit here!
+
+ LDR r2, =_tx_thread_system_stack_ptr // Pickup stack pointer
+ STR r1, [r2] // Save the system stack
+
+ LDR r1, =_end // Get end of non-initialized RAM area
+ LDR r2, =_tx_initialize_unused_memory // Pickup unused memory ptr address
+ ADD r1, r1, #8 // Increment to next free word
+ STR r1, [r2] // Save first free memory address
+
#ifdef __THUMB_INTERWORK
- BX lr @ Return to caller
+ BX lr // Return to caller
#else
- MOV pc, lr @ Return to caller
+ MOV pc, lr // Return to caller
#endif
-@}
-@
-@
-@/* Define shells for each of the interrupt vectors. */
-@
+
+/* Define shells for each of the interrupt vectors. */
+
.global __tx_undefined
__tx_undefined:
- B __tx_undefined @ Undefined handler
-@
+ B __tx_undefined // Undefined handler
+
.global __tx_swi_interrupt
__tx_swi_interrupt:
- B __tx_swi_interrupt @ Software interrupt handler
-@
+ B __tx_swi_interrupt // Software interrupt handler
+
.global __tx_prefetch_handler
__tx_prefetch_handler:
- B __tx_prefetch_handler @ Prefetch exception handler
-@
+ B __tx_prefetch_handler // Prefetch exception handler
+
.global __tx_abort_handler
__tx_abort_handler:
- B __tx_abort_handler @ Abort exception handler
-@
+ B __tx_abort_handler // Abort exception handler
+
.global __tx_reserved_handler
__tx_reserved_handler:
- B __tx_reserved_handler @ Reserved exception handler
-@
+ B __tx_reserved_handler // Reserved exception handler
+
.global __tx_irq_handler
- .global __tx_irq_processing_return
+ .global __tx_irq_processing_return
__tx_irq_handler:
-@
-@ /* Jump to context save to save system context. */
+
+ /* Jump to context save to save system context. */
B _tx_thread_context_save
__tx_irq_processing_return:
-@
-@ /* At this point execution is still in the IRQ mode. The CPSR, point of
-@ interrupt, and all C scratch registers are available for use. In
-@ addition, IRQ interrupts may be re-enabled - with certain restrictions -
-@ if nested IRQ interrupts are desired. Interrupts may be re-enabled over
-@ small code sequences where lr is saved before enabling interrupts and
-@ restored after interrupts are again disabled. */
-@
-@ /* Interrupt nesting is allowed after calling _tx_thread_irq_nesting_start
-@ from IRQ mode with interrupts disabled. This routine switches to the
-@ system mode and returns with IRQ interrupts enabled.
-@
-@ NOTE: It is very important to ensure all IRQ interrupts are cleared
-@ prior to enabling nested IRQ interrupts. */
+//
+ /* At this point execution is still in the IRQ mode. The CPSR, point of
+ interrupt, and all C scratch registers are available for use. In
+ addition, IRQ interrupts may be re-enabled - with certain restrictions -
+ if nested IRQ interrupts are desired. Interrupts may be re-enabled over
+ small code sequences where lr is saved before enabling interrupts and
+ restored after interrupts are again disabled. */
+
+ /* Interrupt nesting is allowed after calling _tx_thread_irq_nesting_start
+ from IRQ mode with interrupts disabled. This routine switches to the
+ system mode and returns with IRQ interrupts enabled.
+
+ NOTE: It is very important to ensure all IRQ interrupts are cleared
+ prior to enabling nested IRQ interrupts. */
#ifdef TX_ENABLE_IRQ_NESTING
BL _tx_thread_irq_nesting_start
#endif
-@
-@ /* For debug purpose, execute the timer interrupt processing here. In
-@ a real system, some kind of status indication would have to be checked
-@ before the timer interrupt handler could be called. */
-@
- BL _tx_timer_interrupt @ Timer interrupt handler
-@
-@
-@ /* If interrupt nesting was started earlier, the end of interrupt nesting
-@ service must be called before returning to _tx_thread_context_restore.
-@ This routine returns in processing in IRQ mode with interrupts disabled. */
+
+ /* For debug purpose, execute the timer interrupt processing here. In
+ a real system, some kind of status indication would have to be checked
+ before the timer interrupt handler could be called. */
+
+ BL _tx_timer_interrupt // Timer interrupt handler
+
+
+ /* If interrupt nesting was started earlier, the end of interrupt nesting
+ service must be called before returning to _tx_thread_context_restore.
+ This routine returns in processing in IRQ mode with interrupts disabled. */
#ifdef TX_ENABLE_IRQ_NESTING
BL _tx_thread_irq_nesting_end
#endif
-@
-@ /* Jump to context restore to restore system context. */
+
+ /* Jump to context restore to restore system context. */
B _tx_thread_context_restore
-@
-@
-@ /* This is an example of a vectored IRQ handler. */
-@
-@ .global __tx_example_vectored_irq_handler
-@__tx_example_vectored_irq_handler:
-@
-@
-@ /* Save initial context and call context save to prepare for
-@ vectored ISR execution. */
-@
-@ STMDB sp!, {r0-r3} @ Save some scratch registers
-@ MRS r0, SPSR @ Pickup saved SPSR
-@ SUB lr, lr, #4 @ Adjust point of interrupt
-@ STMDB sp!, {r0, r10, r12, lr} @ Store other scratch registers
-@ BL _tx_thread_vectored_context_save @ Vectored context save
-@
-@ /* At this point execution is still in the IRQ mode. The CPSR, point of
-@ interrupt, and all C scratch registers are available for use. In
-@ addition, IRQ interrupts may be re-enabled - with certain restrictions -
-@ if nested IRQ interrupts are desired. Interrupts may be re-enabled over
-@ small code sequences where lr is saved before enabling interrupts and
-@ restored after interrupts are again disabled. */
-@
-@
-@ /* Interrupt nesting is allowed after calling _tx_thread_irq_nesting_start
-@ from IRQ mode with interrupts disabled. This routine switches to the
-@ system mode and returns with IRQ interrupts enabled.
-@
-@ NOTE: It is very important to ensure all IRQ interrupts are cleared
-@ prior to enabling nested IRQ interrupts. */
-@#ifdef TX_ENABLE_IRQ_NESTING
-@ BL _tx_thread_irq_nesting_start
-@#endif
-@
-@ /* Application IRQ handlers can be called here! */
-@
-@ /* If interrupt nesting was started earlier, the end of interrupt nesting
-@ service must be called before returning to _tx_thread_context_restore.
-@ This routine returns in processing in IRQ mode with interrupts disabled. */
-@#ifdef TX_ENABLE_IRQ_NESTING
-@ BL _tx_thread_irq_nesting_end
-@#endif
-@
-@ /* Jump to context restore to restore system context. */
-@ B _tx_thread_context_restore
-@
-@
+
+
+ /* This is an example of a vectored IRQ handler. */
+
+
+
+ /* Save initial context and call context save to prepare for
+ vectored ISR execution. */
+
+ /* At this point execution is still in the IRQ mode. The CPSR, point of
+ interrupt, and all C scratch registers are available for use. In
+ addition, IRQ interrupts may be re-enabled - with certain restrictions -
+ if nested IRQ interrupts are desired. Interrupts may be re-enabled over
+ small code sequences where lr is saved before enabling interrupts and
+ restored after interrupts are again disabled. */
+
+
+ /* Interrupt nesting is allowed after calling _tx_thread_irq_nesting_start
+ from IRQ mode with interrupts disabled. This routine switches to the
+ system mode and returns with IRQ interrupts enabled.
+
+ NOTE: It is very important to ensure all IRQ interrupts are cleared
+ prior to enabling nested IRQ interrupts. */
+
+ /* Application IRQ handlers can be called here! */
+
+ /* If interrupt nesting was started earlier, the end of interrupt nesting
+ service must be called before returning to _tx_thread_context_restore.
+ This routine returns in processing in IRQ mode with interrupts disabled. */
+
+
+
#ifdef TX_ENABLE_FIQ_SUPPORT
.global __tx_fiq_handler
.global __tx_fiq_processing_return
__tx_fiq_handler:
-@
-@ /* Jump to fiq context save to save system context. */
+
+ /* Jump to fiq context save to save system context. */
B _tx_thread_fiq_context_save
__tx_fiq_processing_return:
-@
-@ /* At this point execution is still in the FIQ mode. The CPSR, point of
-@ interrupt, and all C scratch registers are available for use. */
-@
-@ /* Interrupt nesting is allowed after calling _tx_thread_fiq_nesting_start
-@ from FIQ mode with interrupts disabled. This routine switches to the
-@ system mode and returns with FIQ interrupts enabled.
-@
-@ NOTE: It is very important to ensure all FIQ interrupts are cleared
-@ prior to enabling nested FIQ interrupts. */
+
+ /* At this point execution is still in the FIQ mode. The CPSR, point of
+ interrupt, and all C scratch registers are available for use. */
+
+ /* Interrupt nesting is allowed after calling _tx_thread_fiq_nesting_start
+ from FIQ mode with interrupts disabled. This routine switches to the
+ system mode and returns with FIQ interrupts enabled.
+
+ NOTE: It is very important to ensure all FIQ interrupts are cleared
+ prior to enabling nested FIQ interrupts. */
#ifdef TX_ENABLE_FIQ_NESTING
BL _tx_thread_fiq_nesting_start
#endif
-@
-@ /* Application FIQ handlers can be called here! */
-@
-@ /* If interrupt nesting was started earlier, the end of interrupt nesting
-@ service must be called before returning to _tx_thread_fiq_context_restore. */
+
+ /* Application FIQ handlers can be called here! */
+
+ /* If interrupt nesting was started earlier, the end of interrupt nesting
+ service must be called before returning to _tx_thread_fiq_context_restore. */
#ifdef TX_ENABLE_FIQ_NESTING
BL _tx_thread_fiq_nesting_end
#endif
-@
-@ /* Jump to fiq context restore to restore system context. */
+
+ /* Jump to fiq context restore to restore system context. */
B _tx_thread_fiq_context_restore
-@
-@
+
+
#else
.global __tx_fiq_handler
__tx_fiq_handler:
- B __tx_fiq_handler @ FIQ interrupt handler
+ B __tx_fiq_handler // FIQ interrupt handler
#endif
-@
-@
+
+
BUILD_OPTIONS:
- .word _tx_build_options @ Reference to bring in
+ .word _tx_build_options // Reference to bring in
VERSION_ID:
- .word _tx_version_id @ Reference to bring in
+ .word _tx_version_id // Reference to bring in
diff --git a/ports/cortex_a9/gnu/inc/tx_port.h b/ports/cortex_a9/gnu/inc/tx_port.h
index 51f6fd2a..19463de1 100644
--- a/ports/cortex_a9/gnu/inc/tx_port.h
+++ b/ports/cortex_a9/gnu/inc/tx_port.h
@@ -12,7 +12,7 @@
/**************************************************************************/
/**************************************************************************/
-/** */
+/** */
/** ThreadX Component */
/** */
/** Port Specific */
@@ -21,36 +21,38 @@
/**************************************************************************/
-/**************************************************************************/
-/* */
-/* PORT SPECIFIC C INFORMATION RELEASE */
-/* */
-/* tx_port.h Cortex-A9/GNU */
-/* 6.1.6 */
+/**************************************************************************/
+/* */
+/* PORT SPECIFIC C INFORMATION RELEASE */
+/* */
+/* tx_port.h ARMv7-A */
+/* 6.1.11 */
/* */
/* AUTHOR */
/* */
/* William E. Lamie, Microsoft Corporation */
/* */
/* DESCRIPTION */
-/* */
-/* This file contains data type definitions that make the ThreadX */
-/* real-time kernel function identically on a variety of different */
-/* processor architectures. For example, the size or number of bits */
-/* in an "int" data type vary between microprocessor architectures and */
-/* even C compilers for the same microprocessor. ThreadX does not */
-/* directly use native C data types. Instead, ThreadX creates its */
-/* own special types that can be mapped to actual data types by this */
-/* file to guarantee consistency in the interface and functionality. */
-/* */
-/* RELEASE HISTORY */
-/* */
+/* */
+/* This file contains data type definitions that make the ThreadX */
+/* real-time kernel function identically on a variety of different */
+/* processor architectures. For example, the size or number of bits */
+/* in an "int" data type vary between microprocessor architectures and */
+/* even C compilers for the same microprocessor. ThreadX does not */
+/* directly use native C data types. Instead, ThreadX creates its */
+/* own special types that can be mapped to actual data types by this */
+/* file to guarantee consistency in the interface and functionality. */
+/* */
+/* RELEASE HISTORY */
+/* */
/* DATE NAME DESCRIPTION */
/* */
/* 09-30-2020 William E. Lamie Initial Version 6.1 */
/* 04-02-2021 Bhupendra Naphade Modified comment(s),updated */
/* macro definition, */
/* resulting in version 6.1.6 */
+/* 04-25-2022 Zhen Kong Updated comments, */
+/* resulting in version 6.1.11 */
/* */
/**************************************************************************/
@@ -63,7 +65,7 @@
#ifdef TX_INCLUDE_USER_DEFINE_FILE
-/* Yes, include the user defines in tx_user.h. The defines in this file may
+/* Yes, include the user defines in tx_user.h. The defines in this file may
alternately be defined on the command line. */
#include "tx_user.h"
@@ -76,7 +78,7 @@
#include
-/* Define ThreadX basic types for this port. */
+/* Define ThreadX basic types for this port. */
#define VOID void
typedef char CHAR;
@@ -112,12 +114,12 @@ typedef unsigned short USHORT;
#define TX_TIMER_THREAD_STACK_SIZE 1024 /* Default timer thread stack size */
#endif
-#ifndef TX_TIMER_THREAD_PRIORITY
-#define TX_TIMER_THREAD_PRIORITY 0 /* Default timer thread priority */
+#ifndef TX_TIMER_THREAD_PRIORITY
+#define TX_TIMER_THREAD_PRIORITY 0 /* Default timer thread priority */
#endif
-/* Define various constants for the ThreadX ARM port. */
+/* Define various constants for the ThreadX ARM port. */
#ifdef TX_ENABLE_FIQ_SUPPORT
#define TX_INT_DISABLE 0xC0 /* Disable IRQ & FIQ interrupts */
@@ -127,8 +129,8 @@ typedef unsigned short USHORT;
#define TX_INT_ENABLE 0x00 /* Enable IRQ interrupts */
-/* Define the clock source for trace event entry time stamp. The following two item are port specific.
- For example, if the time source is at the address 0x0a800024 and is 16-bits in size, the clock
+/* Define the clock source for trace event entry time stamp. The following two item are port specific.
+ For example, if the time source is at the address 0x0a800024 and is 16-bits in size, the clock
source constants would be:
#define TX_TRACE_TIME_SOURCE *((ULONG *) 0x0a800024)
@@ -175,7 +177,7 @@ typedef unsigned short USHORT;
#define TX_INLINE_INITIALIZATION
-/* Determine whether or not stack checking is enabled. By default, ThreadX stack checking is
+/* Determine whether or not stack checking is enabled. By default, ThreadX stack checking is
disabled. When the following is defined, ThreadX thread stack checking is enabled. If stack
checking is enabled (TX_ENABLE_STACK_CHECKING is defined), the TX_DISABLE_STACK_FILLING
define is negated, thereby forcing the stack fill which is necessary for the stack checking
@@ -187,13 +189,13 @@ typedef unsigned short USHORT;
/* Define the TX_THREAD control block extensions for this port. The main reason
- for the multiple macros is so that backward compatibility can be maintained with
+ for the multiple macros is so that backward compatibility can be maintained with
existing ThreadX kernel awareness modules. */
-#define TX_THREAD_EXTENSION_0
-#define TX_THREAD_EXTENSION_1
+#define TX_THREAD_EXTENSION_0
+#define TX_THREAD_EXTENSION_1
#define TX_THREAD_EXTENSION_2 ULONG tx_thread_vfp_enable;
-#define TX_THREAD_EXTENSION_3
+#define TX_THREAD_EXTENSION_3
/* Define the port extensions of the remaining ThreadX objects. */
@@ -207,11 +209,11 @@ typedef unsigned short USHORT;
#define TX_TIMER_EXTENSION
-/* Define the user extension field of the thread control block. Nothing
+/* Define the user extension field of the thread control block. Nothing
additional is needed for this port so it is defined as white space. */
#ifndef TX_THREAD_USER_EXTENSION
-#define TX_THREAD_USER_EXTENSION
+#define TX_THREAD_USER_EXTENSION
#endif
@@ -219,8 +221,8 @@ typedef unsigned short USHORT;
tx_thread_shell_entry, and tx_thread_terminate. */
-#define TX_THREAD_CREATE_EXTENSION(thread_ptr)
-#define TX_THREAD_DELETE_EXTENSION(thread_ptr)
+#define TX_THREAD_CREATE_EXTENSION(thread_ptr)
+#define TX_THREAD_DELETE_EXTENSION(thread_ptr)
#define TX_THREAD_COMPLETED_EXTENSION(thread_ptr)
#define TX_THREAD_TERMINATED_EXTENSION(thread_ptr)
@@ -247,24 +249,24 @@ typedef unsigned short USHORT;
#define TX_TIMER_DELETE_EXTENSION(timer_ptr)
-/* Determine if the ARM architecture has the CLZ instruction. This is available on
- architectures v5 and above. If available, redefine the macro for calculating the
+/* Determine if the ARM architecture has the CLZ instruction. This is available on
+ architectures v5 and above. If available, redefine the macro for calculating the
lowest bit set. */
-
+
#if __TARGET_ARCH_ARM > 4
#ifndef __thumb__
#define TX_LOWEST_SET_BIT_CALCULATE(m, b) m = m & ((ULONG) (-((LONG) m))); \
asm volatile (" CLZ %0,%1 ": "=r" (b) : "r" (m) ); \
- b = 31 - b;
+ b = 31 - b;
#endif
#endif
-/* Define ThreadX interrupt lockout and restore macros for protection on
- access of critical kernel information. The restore interrupt macro must
- restore the interrupt posture of the running thread prior to the value
+/* Define ThreadX interrupt lockout and restore macros for protection on
+ access of critical kernel information. The restore interrupt macro must
+ restore the interrupt posture of the running thread prior to the value
present prior to the disable macro. In most cases, the save area macro
is used to define a local function save area for the disable and restore
macros. */
@@ -295,7 +297,7 @@ unsigned int _tx_thread_interrupt_restore(UINT old_posture);
#endif
-/* Define VFP extension for the Cortex-A9. Each is assumed to be called in the context of the executing
+/* Define VFP extension for the ARMv7-A. Each is assumed to be called in the context of the executing
thread. */
void tx_thread_vfp_enable(void);
@@ -315,8 +317,8 @@ void tx_thread_vfp_disable(void);
/* Define the version ID of ThreadX. This may be utilized by the application. */
#ifdef TX_THREAD_INIT
-CHAR _tx_version_id[] =
- "Copyright (c) Microsoft Corporation. All rights reserved. * ThreadX Cortex-A9/GNU Version 6.1.9 *";
+CHAR _tx_version_id[] =
+ "Copyright (c) Microsoft Corporation. All rights reserved. * ThreadX ARMv7-A Version 6.1.11 *";
#else
extern CHAR _tx_version_id[];
#endif
diff --git a/ports/cortex_a9/gnu/src/tx_thread_context_restore.S b/ports/cortex_a9/gnu/src/tx_thread_context_restore.S
index c3ef49a4..fae7e72d 100644
--- a/ports/cortex_a9/gnu/src/tx_thread_context_restore.S
+++ b/ports/cortex_a9/gnu/src/tx_thread_context_restore.S
@@ -1,260 +1,222 @@
-@/**************************************************************************/
-@/* */
-@/* Copyright (c) Microsoft Corporation. All rights reserved. */
-@/* */
-@/* This software is licensed under the Microsoft Software License */
-@/* Terms for Microsoft Azure RTOS. Full text of the license can be */
-@/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
-@/* and in the root directory of this software. */
-@/* */
-@/**************************************************************************/
-@
-@
-@/**************************************************************************/
-@/**************************************************************************/
-@/** */
-@/** ThreadX Component */
-@/** */
-@/** Thread */
-@/** */
-@/**************************************************************************/
-@/**************************************************************************/
-@
-@
-@#define TX_SOURCE_CODE
-@
-@
-@/* Include necessary system files. */
-@
-@#include "tx_api.h"
-@#include "tx_thread.h"
-@#include "tx_timer.h"
-@
+/**************************************************************************/
+/* */
+/* Copyright (c) Microsoft Corporation. All rights reserved. */
+/* */
+/* This software is licensed under the Microsoft Software License */
+/* Terms for Microsoft Azure RTOS. Full text of the license can be */
+/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
+/* and in the root directory of this software. */
+/* */
+/**************************************************************************/
+
+
+/**************************************************************************/
+/**************************************************************************/
+/** */
+/** ThreadX Component */
+/** */
+/** Thread */
+/** */
+/**************************************************************************/
+/**************************************************************************/
+
.arm
#ifdef TX_ENABLE_FIQ_SUPPORT
-SVC_MODE = 0xD3 @ Disable IRQ/FIQ, SVC mode
-IRQ_MODE = 0xD2 @ Disable IRQ/FIQ, IRQ mode
+SVC_MODE = 0xD3 // Disable IRQ/FIQ, SVC mode
+IRQ_MODE = 0xD2 // Disable IRQ/FIQ, IRQ mode
#else
-SVC_MODE = 0x93 @ Disable IRQ, SVC mode
-IRQ_MODE = 0x92 @ Disable IRQ, IRQ mode
+SVC_MODE = 0x93 // Disable IRQ, SVC mode
+IRQ_MODE = 0x92 // Disable IRQ, IRQ mode
#endif
-@
+
.global _tx_thread_system_state
.global _tx_thread_current_ptr
.global _tx_thread_execute_ptr
.global _tx_timer_time_slice
.global _tx_thread_schedule
.global _tx_thread_preempt_disable
- .global _tx_execution_isr_exit
-@
-@
-@/* No 16-bit Thumb mode veneer code is needed for _tx_thread_context_restore
-@ since it will never be called 16-bit mode. */
-@
+
+
+
+/* No 16-bit Thumb mode veneer code is needed for _tx_thread_context_restore
+ since it will never be called 16-bit mode. */
+
.arm
.text
.align 2
-@/**************************************************************************/
-@/* */
-@/* FUNCTION RELEASE */
-@/* */
-@/* _tx_thread_context_restore Cortex-A9/GNU */
-@/* 6.1.9 */
-@/* AUTHOR */
-@/* */
-@/* William E. Lamie, Microsoft Corporation */
-@/* */
-@/* DESCRIPTION */
-@/* */
-@/* This function restores the interrupt context if it is processing a */
-@/* nested interrupt. If not, it returns to the interrupt thread if no */
-@/* preemption is necessary. Otherwise, if preemption is necessary or */
-@/* if no thread was running, the function returns to the scheduler. */
-@/* */
-@/* INPUT */
-@/* */
-@/* None */
-@/* */
-@/* OUTPUT */
-@/* */
-@/* None */
-@/* */
-@/* CALLS */
-@/* */
-@/* _tx_thread_schedule Thread scheduling routine */
-@/* */
-@/* CALLED BY */
-@/* */
-@/* ISRs Interrupt Service Routines */
-@/* */
-@/* RELEASE HISTORY */
-@/* */
-@/* DATE NAME DESCRIPTION */
-@/* */
-@/* 09-30-2020 William E. Lamie Initial Version 6.1 */
-@/* 10-15-2021 William E. Lamie Modified comment(s), added */
-@/* execution profile support, */
-@/* resulting in version 6.1.9 */
-@/* */
-@/**************************************************************************/
-@VOID _tx_thread_context_restore(VOID)
-@{
+/**************************************************************************/
+/* */
+/* FUNCTION RELEASE */
+/* */
+/* _tx_thread_context_restore ARMv7-A */
+/* 6.1.11 */
+/* AUTHOR */
+/* */
+/* William E. Lamie, Microsoft Corporation */
+/* */
+/* DESCRIPTION */
+/* */
+/* This function restores the interrupt context if it is processing a */
+/* nested interrupt. If not, it returns to the interrupt thread if no */
+/* preemption is necessary. Otherwise, if preemption is necessary or */
+/* if no thread was running, the function returns to the scheduler. */
+/* */
+/* INPUT */
+/* */
+/* None */
+/* */
+/* OUTPUT */
+/* */
+/* None */
+/* */
+/* CALLS */
+/* */
+/* _tx_thread_schedule Thread scheduling routine */
+/* */
+/* CALLED BY */
+/* */
+/* ISRs Interrupt Service Routines */
+/* */
+/* RELEASE HISTORY */
+/* */
+/* DATE NAME DESCRIPTION */
+/* */
+/* 09-30-2020 William E. Lamie Initial Version 6.1 */
+/* 10-15-2021 William E. Lamie Modified comment(s), added */
+/* execution profile support, */
+/* resulting in version 6.1.9 */
+/* 04-25-2022 Zhen Kong Updated comments, */
+/* resulting in version 6.1.11 */
+/* */
+/**************************************************************************/
.global _tx_thread_context_restore
.type _tx_thread_context_restore,function
_tx_thread_context_restore:
-@
-@ /* Lockout interrupts. */
-@
+
+ /* Lockout interrupts. */
+
#ifdef TX_ENABLE_FIQ_SUPPORT
- CPSID if @ Disable IRQ and FIQ interrupts
+ CPSID if // Disable IRQ and FIQ interrupts
#else
- CPSID i @ Disable IRQ interrupts
+ CPSID i // Disable IRQ interrupts
#endif
#if (defined(TX_ENABLE_EXECUTION_CHANGE_NOTIFY) || defined(TX_EXECUTION_PROFILE_ENABLE))
-@
-@ /* Call the ISR exit function to indicate an ISR is complete. */
-@
- BL _tx_execution_isr_exit @ Call the ISR exit function
-#endif
-@
-@ /* Determine if interrupts are nested. */
-@ if (--_tx_thread_system_state)
-@ {
-@
- LDR r3, =_tx_thread_system_state @ Pickup address of system state variable
- LDR r2, [r3] @ Pickup system state
- SUB r2, r2, #1 @ Decrement the counter
- STR r2, [r3] @ Store the counter
- CMP r2, #0 @ Was this the first interrupt?
- BEQ __tx_thread_not_nested_restore @ If so, not a nested restore
-@
-@ /* Interrupts are nested. */
-@
-@ /* Just recover the saved registers and return to the point of
-@ interrupt. */
-@
- LDMIA sp!, {r0, r10, r12, lr} @ Recover SPSR, POI, and scratch regs
- MSR SPSR_cxsf, r0 @ Put SPSR back
- LDMIA sp!, {r0-r3} @ Recover r0-r3
- MOVS pc, lr @ Return to point of interrupt
-@
-@ }
-__tx_thread_not_nested_restore:
-@
-@ /* Determine if a thread was interrupted and no preemption is required. */
-@ else if (((_tx_thread_current_ptr) && (_tx_thread_current_ptr == _tx_thread_execute_ptr)
-@ || (_tx_thread_preempt_disable))
-@ {
-@
- LDR r1, =_tx_thread_current_ptr @ Pickup address of current thread ptr
- LDR r0, [r1] @ Pickup actual current thread pointer
- CMP r0, #0 @ Is it NULL?
- BEQ __tx_thread_idle_system_restore @ Yes, idle system was interrupted
-@
- LDR r3, =_tx_thread_preempt_disable @ Pickup preempt disable address
- LDR r2, [r3] @ Pickup actual preempt disable flag
- CMP r2, #0 @ Is it set?
- BNE __tx_thread_no_preempt_restore @ Yes, don't preempt this thread
- LDR r3, =_tx_thread_execute_ptr @ Pickup address of execute thread ptr
- LDR r2, [r3] @ Pickup actual execute thread pointer
- CMP r0, r2 @ Is the same thread highest priority?
- BNE __tx_thread_preempt_restore @ No, preemption needs to happen
-@
-@
-__tx_thread_no_preempt_restore:
-@
-@ /* Restore interrupted thread or ISR. */
-@
-@ /* Pickup the saved stack pointer. */
-@ tmp_ptr = _tx_thread_current_ptr -> tx_thread_stack_ptr;
-@
-@ /* Recover the saved context and return to the point of interrupt. */
-@
- LDMIA sp!, {r0, r10, r12, lr} @ Recover SPSR, POI, and scratch regs
- MSR SPSR_cxsf, r0 @ Put SPSR back
- LDMIA sp!, {r0-r3} @ Recover r0-r3
- MOVS pc, lr @ Return to point of interrupt
-@
-@ }
-@ else
-@ {
-__tx_thread_preempt_restore:
-@
- LDMIA sp!, {r3, r10, r12, lr} @ Recover temporarily saved registers
- MOV r1, lr @ Save lr (point of interrupt)
- MOV r2, #SVC_MODE @ Build SVC mode CPSR
- MSR CPSR_c, r2 @ Enter SVC mode
- STR r1, [sp, #-4]! @ Save point of interrupt
- STMDB sp!, {r4-r12, lr} @ Save upper half of registers
- MOV r4, r3 @ Save SPSR in r4
- MOV r2, #IRQ_MODE @ Build IRQ mode CPSR
- MSR CPSR_c, r2 @ Enter IRQ mode
- LDMIA sp!, {r0-r3} @ Recover r0-r3
- MOV r5, #SVC_MODE @ Build SVC mode CPSR
- MSR CPSR_c, r5 @ Enter SVC mode
- STMDB sp!, {r0-r3} @ Save r0-r3 on thread's stack
- LDR r1, =_tx_thread_current_ptr @ Pickup address of current thread ptr
- LDR r0, [r1] @ Pickup current thread pointer
+ /* Call the ISR exit function to indicate an ISR is complete. */
+
+ BL _tx_execution_isr_exit // Call the ISR exit function
+#endif
+
+ /* Determine if interrupts are nested. */
+
+ LDR r3, =_tx_thread_system_state // Pickup address of system state variable
+ LDR r2, [r3] // Pickup system state
+ SUB r2, r2, #1 // Decrement the counter
+ STR r2, [r3] // Store the counter
+ CMP r2, #0 // Was this the first interrupt?
+ BEQ __tx_thread_not_nested_restore // If so, not a nested restore
+
+ /* Interrupts are nested. */
+
+ /* Just recover the saved registers and return to the point of
+ interrupt. */
+
+ LDMIA sp!, {r0, r10, r12, lr} // Recover SPSR, POI, and scratch regs
+ MSR SPSR_cxsf, r0 // Put SPSR back
+ LDMIA sp!, {r0-r3} // Recover r0-r3
+ MOVS pc, lr // Return to point of interrupt
+
+__tx_thread_not_nested_restore:
+
+ /* Determine if a thread was interrupted and no preemption is required. */
+
+ LDR r1, =_tx_thread_current_ptr // Pickup address of current thread ptr
+ LDR r0, [r1] // Pickup actual current thread pointer
+ CMP r0, #0 // Is it NULL?
+ BEQ __tx_thread_idle_system_restore // Yes, idle system was interrupted
+
+ LDR r3, =_tx_thread_preempt_disable // Pickup preempt disable address
+ LDR r2, [r3] // Pickup actual preempt disable flag
+ CMP r2, #0 // Is it set?
+ BNE __tx_thread_no_preempt_restore // Yes, don't preempt this thread
+ LDR r3, =_tx_thread_execute_ptr // Pickup address of execute thread ptr
+ LDR r2, [r3] // Pickup actual execute thread pointer
+ CMP r0, r2 // Is the same thread highest priority?
+ BNE __tx_thread_preempt_restore // No, preemption needs to happen
+
+
+__tx_thread_no_preempt_restore:
+
+ /* Recover the saved context and return to the point of interrupt. */
+
+ /* Pickup the saved stack pointer. */
+
+ /* Recover the saved context and return to the point of interrupt. */
+ LDMIA sp!, {r0, r10, r12, lr} // Recover SPSR, POI, and scratch regs
+ MSR SPSR_cxsf, r0 // Put SPSR back
+ LDMIA sp!, {r0-r3} // Recover r0-r3
+ MOVS pc, lr // Return to point of interrupt
+
+__tx_thread_preempt_restore:
+
+ LDMIA sp!, {r3, r10, r12, lr} // Recover temporarily saved registers
+ MOV r1, lr // Save lr (point of interrupt)
+ MOV r2, #SVC_MODE // Build SVC mode CPSR
+ MSR CPSR_c, r2 // Enter SVC mode
+ STR r1, [sp, #-4]! // Save point of interrupt
+ STMDB sp!, {r4-r12, lr} // Save upper half of registers
+ MOV r4, r3 // Save SPSR in r4
+ MOV r2, #IRQ_MODE // Build IRQ mode CPSR
+ MSR CPSR_c, r2 // Enter IRQ mode
+ LDMIA sp!, {r0-r3} // Recover r0-r3
+ MOV r5, #SVC_MODE // Build SVC mode CPSR
+ MSR CPSR_c, r5 // Enter SVC mode
+ STMDB sp!, {r0-r3} // Save r0-r3 on thread's stack
+
+ LDR r1, =_tx_thread_current_ptr // Pickup address of current thread ptr
+ LDR r0, [r1] // Pickup current thread pointer
#ifdef TX_ENABLE_VFP_SUPPORT
- LDR r2, [r0, #144] @ Pickup the VFP enabled flag
- CMP r2, #0 @ Is the VFP enabled?
- BEQ _tx_skip_irq_vfp_save @ No, skip VFP IRQ save
- VMRS r2, FPSCR @ Pickup the FPSCR
- STR r2, [sp, #-4]! @ Save FPSCR
- VSTMDB sp!, {D16-D31} @ Save D16-D31
- VSTMDB sp!, {D0-D15} @ Save D0-D15
+ LDR r2, [r0, #144] // Pickup the VFP enabled flag
+ CMP r2, #0 // Is the VFP enabled?
+ BEQ _tx_skip_irq_vfp_save // No, skip VFP IRQ save
+ VMRS r2, FPSCR // Pickup the FPSCR
+ STR r2, [sp, #-4]! // Save FPSCR
+ VSTMDB sp!, {D16-D31} // Save D16-D31
+ VSTMDB sp!, {D0-D15} // Save D0-D15
+
_tx_skip_irq_vfp_save:
+
#endif
- MOV r3, #1 @ Build interrupt stack type
- STMDB sp!, {r3, r4} @ Save interrupt stack type and SPSR
- STR sp, [r0, #8] @ Save stack pointer in thread control
- @ block
-@
-@ /* Save the remaining time-slice and disable it. */
-@ if (_tx_timer_time_slice)
-@ {
-@
- LDR r3, =_tx_timer_time_slice @ Pickup time-slice variable address
- LDR r2, [r3] @ Pickup time-slice
- CMP r2, #0 @ Is it active?
- BEQ __tx_thread_dont_save_ts @ No, don't save it
-@
-@ _tx_thread_current_ptr -> tx_thread_time_slice = _tx_timer_time_slice;
-@ _tx_timer_time_slice = 0;
-@
- STR r2, [r0, #24] @ Save thread's time-slice
- MOV r2, #0 @ Clear value
- STR r2, [r3] @ Disable global time-slice flag
-@
-@ }
+ MOV r3, #1 // Build interrupt stack type
+ STMDB sp!, {r3, r4} // Save interrupt stack type and SPSR
+ STR sp, [r0, #8] // Save stack pointer in thread control
+ // block
+
+ /* Save the remaining time-slice and disable it. */
+ LDR r3, =_tx_timer_time_slice // Pickup time-slice variable address
+ LDR r2, [r3] // Pickup time-slice
+ CMP r2, #0 // Is it active?
+ BEQ __tx_thread_dont_save_ts // No, don't save it
+ STR r2, [r0, #24] // Save thread's time-slice
+ MOV r2, #0 // Clear value
+ STR r2, [r3] // Disable global time-slice flag
+
__tx_thread_dont_save_ts:
-@
-@
-@ /* Clear the current task pointer. */
-@ _tx_thread_current_ptr = TX_NULL;
-@
- MOV r0, #0 @ NULL value
- STR r0, [r1] @ Clear current thread pointer
-@
-@ /* Return to the scheduler. */
-@ _tx_thread_schedule();
-@
- B _tx_thread_schedule @ Return to scheduler
-@ }
-@
+
+ /* Clear the current task pointer. */
+ MOV r0, #0 // NULL value
+ STR r0, [r1] // Clear current thread pointer
+
+ /* Return to the scheduler. */
+ B _tx_thread_schedule // Return to scheduler
+
__tx_thread_idle_system_restore:
-@
-@ /* Just return back to the scheduler! */
-@
- MOV r0, #SVC_MODE @ Build SVC mode CPSR
- MSR CPSR_c, r0 @ Enter SVC mode
- B _tx_thread_schedule @ Return to scheduler
-@}
-
-
+ /* Just return back to the scheduler! */
+ MOV r0, #SVC_MODE // Build SVC mode CPSR
+ MSR CPSR_c, r0 // Enter SVC mode
+ B _tx_thread_schedule // Return to scheduler
diff --git a/ports/cortex_a9/gnu/src/tx_thread_context_save.S b/ports/cortex_a9/gnu/src/tx_thread_context_save.S
index 9dfdd92f..7ac48c2e 100644
--- a/ports/cortex_a9/gnu/src/tx_thread_context_save.S
+++ b/ports/cortex_a9/gnu/src/tx_thread_context_save.S
@@ -1,206 +1,172 @@
-@/**************************************************************************/
-@/* */
-@/* Copyright (c) Microsoft Corporation. All rights reserved. */
-@/* */
-@/* This software is licensed under the Microsoft Software License */
-@/* Terms for Microsoft Azure RTOS. Full text of the license can be */
-@/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
-@/* and in the root directory of this software. */
-@/* */
-@/**************************************************************************/
-@
-@
-@/**************************************************************************/
-@/**************************************************************************/
-@/** */
-@/** ThreadX Component */
-@/** */
-@/** Thread */
-@/** */
-@/**************************************************************************/
-@/**************************************************************************/
-@
-@
-@#define TX_SOURCE_CODE
-@
-@
-@/* Include necessary system files. */
-@
-@#include "tx_api.h"
-@#include "tx_thread.h"
-@#include "tx_timer.h"
-@
-@
+/**************************************************************************/
+/* */
+/* Copyright (c) Microsoft Corporation. All rights reserved. */
+/* */
+/* This software is licensed under the Microsoft Software License */
+/* Terms for Microsoft Azure RTOS. Full text of the license can be */
+/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
+/* and in the root directory of this software. */
+/* */
+/**************************************************************************/
+
+
+/**************************************************************************/
+/**************************************************************************/
+/** */
+/** ThreadX Component */
+/** */
+/** Thread */
+/** */
+/**************************************************************************/
+/**************************************************************************/
+
.global _tx_thread_system_state
.global _tx_thread_current_ptr
- .global _tx_irq_processing_return
- .global _tx_execution_isr_enter
-@
-@
-@/* No 16-bit Thumb mode veneer code is needed for _tx_thread_context_save
-@ since it will never be called 16-bit mode. */
-@
+ .global __tx_irq_processing_return
+
+
+/* No 16-bit Thumb mode veneer code is needed for _tx_thread_context_save
+ since it will never be called 16-bit mode. */
+
.arm
.text
.align 2
-@/**************************************************************************/
-@/* */
-@/* FUNCTION RELEASE */
-@/* */
-@/* _tx_thread_context_save Cortex-A9/GNU */
-@/* 6.1.9 */
-@/* AUTHOR */
-@/* */
-@/* William E. Lamie, Microsoft Corporation */
-@/* */
-@/* DESCRIPTION */
-@/* */
-@/* This function saves the context of an executing thread in the */
-@/* beginning of interrupt processing. The function also ensures that */
-@/* the system stack is used upon return to the calling ISR. */
-@/* */
-@/* INPUT */
-@/* */
-@/* None */
-@/* */
-@/* OUTPUT */
-@/* */
-@/* None */
-@/* */
-@/* CALLS */
-@/* */
-@/* None */
-@/* */
-@/* CALLED BY */
-@/* */
-@/* ISRs */
-@/* */
-@/* RELEASE HISTORY */
-@/* */
-@/* DATE NAME DESCRIPTION */
-@/* */
-@/* 09-30-2020 William E. Lamie Initial Version 6.1 */
-@/* 10-15-2021 William E. Lamie Modified comment(s), added */
-@/* execution profile support, */
-@/* resulting in version 6.1.9 */
-@/* */
-@/**************************************************************************/
-@VOID _tx_thread_context_save(VOID)
-@{
+/**************************************************************************/
+/* */
+/* FUNCTION RELEASE */
+/* */
+/* _tx_thread_context_save ARMv7-A */
+/* 6.1.11 */
+/* AUTHOR */
+/* */
+/* William E. Lamie, Microsoft Corporation */
+/* */
+/* DESCRIPTION */
+/* */
+/* This function saves the context of an executing thread in the */
+/* beginning of interrupt processing. The function also ensures that */
+/* the system stack is used upon return to the calling ISR. */
+/* */
+/* INPUT */
+/* */
+/* None */
+/* */
+/* OUTPUT */
+/* */
+/* None */
+/* */
+/* CALLS */
+/* */
+/* None */
+/* */
+/* CALLED BY */
+/* */
+/* ISRs */
+/* */
+/* RELEASE HISTORY */
+/* */
+/* DATE NAME DESCRIPTION */
+/* */
+/* 09-30-2020 William E. Lamie Initial Version 6.1 */
+/* 10-15-2021 William E. Lamie Modified comment(s), added */
+/* execution profile support, */
+/* resulting in version 6.1.9 */
+/* 04-25-2022 Zhen Kong Updated comments, */
+/* resulting in version 6.1.11 */
+/* */
+/**************************************************************************/
.global _tx_thread_context_save
.type _tx_thread_context_save,function
_tx_thread_context_save:
-@
-@ /* Upon entry to this routine, it is assumed that IRQ interrupts are locked
-@ out, we are in IRQ mode, and all registers are intact. */
-@
-@ /* Check for a nested interrupt condition. */
-@ if (_tx_thread_system_state++)
-@ {
-@
- STMDB sp!, {r0-r3} @ Save some working registers
+
+ /* Upon entry to this routine, it is assumed that IRQ interrupts are locked
+ out, we are in IRQ mode, and all registers are intact. */
+
+ /* Check for a nested interrupt condition. */
+
+ STMDB sp!, {r0-r3} // Save some working registers
#ifdef TX_ENABLE_FIQ_SUPPORT
- CPSID if @ Disable FIQ interrupts
+ CPSID if // Disable FIQ interrupts
#endif
- LDR r3, =_tx_thread_system_state @ Pickup address of system state variable
- LDR r2, [r3] @ Pickup system state
- CMP r2, #0 @ Is this the first interrupt?
- BEQ __tx_thread_not_nested_save @ Yes, not a nested context save
-@
-@ /* Nested interrupt condition. */
-@
- ADD r2, r2, #1 @ Increment the interrupt counter
- STR r2, [r3] @ Store it back in the variable
-@
-@ /* Save the rest of the scratch registers on the stack and return to the
-@ calling ISR. */
-@
- MRS r0, SPSR @ Pickup saved SPSR
- SUB lr, lr, #4 @ Adjust point of interrupt
- STMDB sp!, {r0, r10, r12, lr} @ Store other registers
-@
-@ /* Return to the ISR. */
-@
- MOV r10, #0 @ Clear stack limit
+ LDR r3, =_tx_thread_system_state // Pickup address of system state variable
+ LDR r2, [r3] // Pickup system state
+ CMP r2, #0 // Is this the first interrupt?
+ BEQ __tx_thread_not_nested_save // Yes, not a nested context save
+
+ /* Nested interrupt condition. */
+
+ ADD r2, r2, #1 // Increment the interrupt counter
+ STR r2, [r3] // Store it back in the variable
+
+ /* Save the rest of the scratch registers on the stack and return to the
+ calling ISR. */
+
+ MRS r0, SPSR // Pickup saved SPSR
+ SUB lr, lr, #4 // Adjust point of interrupt
+ STMDB sp!, {r0, r10, r12, lr} // Store other registers
+
+ /* Return to the ISR. */
+
+ MOV r10, #0 // Clear stack limit
#if (defined(TX_ENABLE_EXECUTION_CHANGE_NOTIFY) || defined(TX_EXECUTION_PROFILE_ENABLE))
-@
-@ /* Call the ISR enter function to indicate an ISR is executing. */
-@
- PUSH {lr} @ Save ISR lr
- BL _tx_execution_isr_enter @ Call the ISR enter function
- POP {lr} @ Recover ISR lr
+
+ /* Call the ISR enter function to indicate an ISR is executing. */
+
+ PUSH {lr} // Save ISR lr
+ BL _tx_execution_isr_enter // Call the ISR enter function
+ POP {lr} // Recover ISR lr
#endif
- B __tx_irq_processing_return @ Continue IRQ processing
-@
+ B __tx_irq_processing_return // Continue IRQ processing
+
__tx_thread_not_nested_save:
-@ }
-@
-@ /* Otherwise, not nested, check to see if a thread was running. */
-@ else if (_tx_thread_current_ptr)
-@ {
-@
- ADD r2, r2, #1 @ Increment the interrupt counter
- STR r2, [r3] @ Store it back in the variable
- LDR r1, =_tx_thread_current_ptr @ Pickup address of current thread ptr
- LDR r0, [r1] @ Pickup current thread pointer
- CMP r0, #0 @ Is it NULL?
- BEQ __tx_thread_idle_system_save @ If so, interrupt occurred in
- @ scheduling loop - nothing needs saving!
-@
-@ /* Save minimal context of interrupted thread. */
-@
- MRS r2, SPSR @ Pickup saved SPSR
- SUB lr, lr, #4 @ Adjust point of interrupt
- STMDB sp!, {r2, r10, r12, lr} @ Store other registers
-@
-@ /* Save the current stack pointer in the thread's control block. */
-@ _tx_thread_current_ptr -> tx_thread_stack_ptr = sp;
-@
-@ /* Switch to the system stack. */
-@ sp = _tx_thread_system_stack_ptr@
-@
- MOV r10, #0 @ Clear stack limit
+
+ /* Otherwise, not nested, check to see if a thread was running. */
+ ADD r2, r2, #1 // Increment the interrupt counter
+ STR r2, [r3] // Store it back in the variable
+ LDR r1, =_tx_thread_current_ptr // Pickup address of current thread ptr
+ LDR r0, [r1] // Pickup current thread pointer
+ CMP r0, #0 // Is it NULL?
+ BEQ __tx_thread_idle_system_save // If so, interrupt occurred in
+ // scheduling loop - nothing needs saving!
+
+ /* Save minimal context of interrupted thread. */
+
+ MRS r2, SPSR // Pickup saved SPSR
+ SUB lr, lr, #4 // Adjust point of interrupt
+ STMDB sp!, {r2, r10, r12, lr} // Store other registers
+
+ MOV r10, #0 // Clear stack limit
#if (defined(TX_ENABLE_EXECUTION_CHANGE_NOTIFY) || defined(TX_EXECUTION_PROFILE_ENABLE))
-@
-@ /* Call the ISR enter function to indicate an ISR is executing. */
-@
- PUSH {lr} @ Save ISR lr
- BL _tx_execution_isr_enter @ Call the ISR enter function
- POP {lr} @ Recover ISR lr
+
+ /* Call the ISR enter function to indicate an ISR is executing. */
+
+ PUSH {lr} // Save ISR lr
+ BL _tx_execution_isr_enter // Call the ISR enter function
+ POP {lr} // Recover ISR lr
#endif
- B __tx_irq_processing_return @ Continue IRQ processing
-@
-@ }
-@ else
-@ {
-@
+ B __tx_irq_processing_return // Continue IRQ processing
+
__tx_thread_idle_system_save:
-@
-@ /* Interrupt occurred in the scheduling loop. */
-@
-@ /* Not much to do here, just adjust the stack pointer, and return to IRQ
-@ processing. */
-@
- MOV r10, #0 @ Clear stack limit
+
+ /* Interrupt occurred in the scheduling loop. */
+
+ /* Not much to do here, just adjust the stack pointer, and return to IRQ
+ processing. */
+
+ MOV r10, #0 // Clear stack limit
#if (defined(TX_ENABLE_EXECUTION_CHANGE_NOTIFY) || defined(TX_EXECUTION_PROFILE_ENABLE))
-@
-@ /* Call the ISR enter function to indicate an ISR is executing. */
-@
- PUSH {lr} @ Save ISR lr
- BL _tx_execution_isr_enter @ Call the ISR enter function
- POP {lr} @ Recover ISR lr
+
+ /* Call the ISR enter function to indicate an ISR is executing. */
+
+ PUSH {lr} // Save ISR lr
+ BL _tx_execution_isr_enter // Call the ISR enter function
+ POP {lr} // Recover ISR lr
#endif
- ADD sp, sp, #16 @ Recover saved registers
- B __tx_irq_processing_return @ Continue IRQ processing
-@
-@ }
-@}
-
-
-
+ ADD sp, sp, #16 // Recover saved registers
+ B __tx_irq_processing_return // Continue IRQ processing
diff --git a/ports/cortex_a9/gnu/src/tx_thread_fiq_context_restore.S b/ports/cortex_a9/gnu/src/tx_thread_fiq_context_restore.S
index fd32c934..006be973 100644
--- a/ports/cortex_a9/gnu/src/tx_thread_fiq_context_restore.S
+++ b/ports/cortex_a9/gnu/src/tx_thread_fiq_context_restore.S
@@ -1,43 +1,32 @@
-@/**************************************************************************/
-@/* */
-@/* Copyright (c) Microsoft Corporation. All rights reserved. */
-@/* */
-@/* This software is licensed under the Microsoft Software License */
-@/* Terms for Microsoft Azure RTOS. Full text of the license can be */
-@/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
-@/* and in the root directory of this software. */
-@/* */
-@/**************************************************************************/
-@
-@
-@/**************************************************************************/
-@/**************************************************************************/
-@/** */
-@/** ThreadX Component */
-@/** */
-@/** Thread */
-@/** */
-@/**************************************************************************/
-@/**************************************************************************/
-@
-@
-@#define TX_SOURCE_CODE
-@
-@
-@/* Include necessary system files. */
-@
-@#include "tx_api.h"
-@#include "tx_thread.h"
-@#include "tx_timer.h"
-@
-@
-SVC_MODE = 0xD3 @ SVC mode
-FIQ_MODE = 0xD1 @ FIQ mode
-MODE_MASK = 0x1F @ Mode mask
-THUMB_MASK = 0x20 @ Thumb bit mask
-IRQ_MODE_BITS = 0x12 @ IRQ mode bits
-@
-@
+/**************************************************************************/
+/* */
+/* Copyright (c) Microsoft Corporation. All rights reserved. */
+/* */
+/* This software is licensed under the Microsoft Software License */
+/* Terms for Microsoft Azure RTOS. Full text of the license can be */
+/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
+/* and in the root directory of this software. */
+/* */
+/**************************************************************************/
+
+
+/**************************************************************************/
+/**************************************************************************/
+/** */
+/** ThreadX Component */
+/** */
+/** Thread */
+/** */
+/**************************************************************************/
+/**************************************************************************/
+
+SVC_MODE = 0xD3 // SVC mode
+FIQ_MODE = 0xD1 // FIQ mode
+MODE_MASK = 0x1F // Mode mask
+THUMB_MASK = 0x20 // Thumb bit mask
+IRQ_MODE_BITS = 0x12 // IRQ mode bits
+
+
.global _tx_thread_system_state
.global _tx_thread_current_ptr
.global _tx_thread_system_stack_ptr
@@ -46,218 +35,189 @@ IRQ_MODE_BITS = 0x12 @ IRQ mode bits
.global _tx_thread_schedule
.global _tx_thread_preempt_disable
.global _tx_execution_isr_exit
-@
-@
-@/* No 16-bit Thumb mode veneer code is needed for _tx_thread_fiq_context_restore
-@ since it will never be called 16-bit mode. */
-@
+
+
+/* No 16-bit Thumb mode veneer code is needed for _tx_thread_fiq_context_restore
+ since it will never be called 16-bit mode. */
+
.arm
.text
.align 2
-@/**************************************************************************/
-@/* */
-@/* FUNCTION RELEASE */
-@/* */
-@/* _tx_thread_fiq_context_restore Cortex-A9/GNU */
-@/* 6.1.9 */
-@/* AUTHOR */
-@/* */
-@/* William E. Lamie, Microsoft Corporation */
-@/* */
-@/* DESCRIPTION */
-@/* */
-@/* This function restores the fiq interrupt context when processing a */
-@/* nested interrupt. If not, it returns to the interrupt thread if no */
-@/* preemption is necessary. Otherwise, if preemption is necessary or */
-@/* if no thread was running, the function returns to the scheduler. */
-@/* */
-@/* INPUT */
-@/* */
-@/* None */
-@/* */
-@/* OUTPUT */
-@/* */
-@/* None */
-@/* */
-@/* CALLS */
-@/* */
-@/* _tx_thread_schedule Thread scheduling routine */
-@/* */
-@/* CALLED BY */
-@/* */
-@/* FIQ ISR Interrupt Service Routines */
-@/* */
-@/* RELEASE HISTORY */
-@/* */
-@/* DATE NAME DESCRIPTION */
-@/* */
-@/* 09-30-2020 William E. Lamie Initial Version 6.1 */
-@/* 10-15-2021 William E. Lamie Modified comment(s), added */
-@/* execution profile support, */
-@/* resulting in version 6.1.9 */
-@/* */
-@/**************************************************************************/
-@VOID _tx_thread_fiq_context_restore(VOID)
-@{
+/**************************************************************************/
+/* */
+/* FUNCTION RELEASE */
+/* */
+/* _tx_thread_fiq_context_restore ARMv7-A */
+/* 6.1.11 */
+/* AUTHOR */
+/* */
+/* William E. Lamie, Microsoft Corporation */
+/* */
+/* DESCRIPTION */
+/* */
+/* This function restores the fiq interrupt context when processing a */
+/* nested interrupt. If not, it returns to the interrupt thread if no */
+/* preemption is necessary. Otherwise, if preemption is necessary or */
+/* if no thread was running, the function returns to the scheduler. */
+/* */
+/* INPUT */
+/* */
+/* None */
+/* */
+/* OUTPUT */
+/* */
+/* None */
+/* */
+/* CALLS */
+/* */
+/* _tx_thread_schedule Thread scheduling routine */
+/* */
+/* CALLED BY */
+/* */
+/* FIQ ISR Interrupt Service Routines */
+/* */
+/* RELEASE HISTORY */
+/* */
+/* DATE NAME DESCRIPTION */
+/* */
+/* 09-30-2020 William E. Lamie Initial Version 6.1 */
+/* 10-15-2021 William E. Lamie Modified comment(s), added */
+/* execution profile support, */
+/* resulting in version 6.1.9 */
+/* 04-25-2022 Zhen Kong Updated comments, */
+/* resulting in version 6.1.11 */
+/* */
+/**************************************************************************/
.global _tx_thread_fiq_context_restore
.type _tx_thread_fiq_context_restore,function
_tx_thread_fiq_context_restore:
-@
-@ /* Lockout interrupts. */
-@
- CPSID if @ Disable IRQ and FIQ interrupts
+
+ /* Lockout interrupts. */
+
+ CPSID if // Disable IRQ and FIQ interrupts
#if (defined(TX_ENABLE_EXECUTION_CHANGE_NOTIFY) || defined(TX_EXECUTION_PROFILE_ENABLE))
-@
-@ /* Call the ISR exit function to indicate an ISR is complete. */
-@
- BL _tx_execution_isr_exit @ Call the ISR exit function
+
+ /* Call the ISR exit function to indicate an ISR is complete. */
+
+ BL _tx_execution_isr_exit // Call the ISR exit function
#endif
-@
-@ /* Determine if interrupts are nested. */
-@ if (--_tx_thread_system_state)
-@ {
-@
- LDR r3, =_tx_thread_system_state @ Pickup address of system state variable
- LDR r2, [r3] @ Pickup system state
- SUB r2, r2, #1 @ Decrement the counter
- STR r2, [r3] @ Store the counter
- CMP r2, #0 @ Was this the first interrupt?
- BEQ __tx_thread_fiq_not_nested_restore @ If so, not a nested restore
-@
-@ /* Interrupts are nested. */
-@
-@ /* Just recover the saved registers and return to the point of
-@ interrupt. */
-@
- LDMIA sp!, {r0, r10, r12, lr} @ Recover SPSR, POI, and scratch regs
- MSR SPSR_cxsf, r0 @ Put SPSR back
- LDMIA sp!, {r0-r3} @ Recover r0-r3
- MOVS pc, lr @ Return to point of interrupt
-@
-@ }
+
+ /* Determine if interrupts are nested. */
+ LDR r3, =_tx_thread_system_state // Pickup address of system state variable
+ LDR r2, [r3] // Pickup system state
+ SUB r2, r2, #1 // Decrement the counter
+ STR r2, [r3] // Store the counter
+ CMP r2, #0 // Was this the first interrupt?
+ BEQ __tx_thread_fiq_not_nested_restore // If so, not a nested restore
+
+ /* Interrupts are nested. */
+
+ /* Just recover the saved registers and return to the point of
+ interrupt. */
+
+ LDMIA sp!, {r0, r10, r12, lr} // Recover SPSR, POI, and scratch regs
+ MSR SPSR_cxsf, r0 // Put SPSR back
+ LDMIA sp!, {r0-r3} // Recover r0-r3
+ MOVS pc, lr // Return to point of interrupt
+
__tx_thread_fiq_not_nested_restore:
-@
-@ /* Determine if a thread was interrupted and no preemption is required. */
-@ else if (((_tx_thread_current_ptr) && (_tx_thread_current_ptr == _tx_thread_execute_ptr)
-@ || (_tx_thread_preempt_disable))
-@ {
-@
- LDR r1, [sp] @ Pickup the saved SPSR
- MOV r2, #MODE_MASK @ Build mask to isolate the interrupted mode
- AND r1, r1, r2 @ Isolate mode bits
- CMP r1, #IRQ_MODE_BITS @ Was an interrupt taken in IRQ mode before we
- @ got to context save? */
- BEQ __tx_thread_fiq_no_preempt_restore @ Yes, just go back to point of interrupt
+
+ /* Determine if a thread was interrupted and no preemption is required. */
+
+ LDR r1, [sp] // Pickup the saved SPSR
+ MOV r2, #MODE_MASK // Build mask to isolate the interrupted mode
+ AND r1, r1, r2 // Isolate mode bits
+ CMP r1, #IRQ_MODE_BITS // Was an interrupt taken in IRQ mode before we
+ // got to context save? */
+ BEQ __tx_thread_fiq_no_preempt_restore // Yes, just go back to point of interrupt
- LDR r1, =_tx_thread_current_ptr @ Pickup address of current thread ptr
- LDR r0, [r1] @ Pickup actual current thread pointer
- CMP r0, #0 @ Is it NULL?
- BEQ __tx_thread_fiq_idle_system_restore @ Yes, idle system was interrupted
+ LDR r1, =_tx_thread_current_ptr // Pickup address of current thread ptr
+ LDR r0, [r1] // Pickup actual current thread pointer
+ CMP r0, #0 // Is it NULL?
+ BEQ __tx_thread_fiq_idle_system_restore // Yes, idle system was interrupted
- LDR r3, =_tx_thread_preempt_disable @ Pickup preempt disable address
- LDR r2, [r3] @ Pickup actual preempt disable flag
- CMP r2, #0 @ Is it set?
- BNE __tx_thread_fiq_no_preempt_restore @ Yes, don't preempt this thread
- LDR r3, =_tx_thread_execute_ptr @ Pickup address of execute thread ptr
- LDR r2, [r3] @ Pickup actual execute thread pointer
- CMP r0, r2 @ Is the same thread highest priority?
- BNE __tx_thread_fiq_preempt_restore @ No, preemption needs to happen
+ LDR r3, =_tx_thread_preempt_disable // Pickup preempt disable address
+ LDR r2, [r3] // Pickup actual preempt disable flag
+ CMP r2, #0 // Is it set?
+ BNE __tx_thread_fiq_no_preempt_restore // Yes, don't preempt this thread
+ LDR r3, =_tx_thread_execute_ptr // Pickup address of execute thread ptr
+ LDR r2, [r3] // Pickup actual execute thread pointer
+ CMP r0, r2 // Is the same thread highest priority?
+ BNE __tx_thread_fiq_preempt_restore // No, preemption needs to happen
__tx_thread_fiq_no_preempt_restore:
-@
-@ /* Restore interrupted thread or ISR. */
-@
-@ /* Pickup the saved stack pointer. */
-@ tmp_ptr = _tx_thread_current_ptr -> tx_thread_stack_ptr;
-@
-@ /* Recover the saved context and return to the point of interrupt. */
-@
- LDMIA sp!, {r0, lr} @ Recover SPSR, POI, and scratch regs
- MSR SPSR_cxsf, r0 @ Put SPSR back
- LDMIA sp!, {r0-r3} @ Recover r0-r3
- MOVS pc, lr @ Return to point of interrupt
-@
-@ }
-@ else
-@ {
-__tx_thread_fiq_preempt_restore:
-@
- LDMIA sp!, {r3, lr} @ Recover temporarily saved registers
- MOV r1, lr @ Save lr (point of interrupt)
- MOV r2, #SVC_MODE @ Build SVC mode CPSR
- MSR CPSR_c, r2 @ Enter SVC mode
- STR r1, [sp, #-4]! @ Save point of interrupt
- STMDB sp!, {r4-r12, lr} @ Save upper half of registers
- MOV r4, r3 @ Save SPSR in r4
- MOV r2, #FIQ_MODE @ Build FIQ mode CPSR
- MSR CPSR_c, r2 @ Reenter FIQ mode
- LDMIA sp!, {r0-r3} @ Recover r0-r3
- MOV r5, #SVC_MODE @ Build SVC mode CPSR
- MSR CPSR_c, r5 @ Enter SVC mode
- STMDB sp!, {r0-r3} @ Save r0-r3 on thread's stack
- LDR r1, =_tx_thread_current_ptr @ Pickup address of current thread ptr
- LDR r0, [r1] @ Pickup current thread pointer
+ /* Restore interrupted thread or ISR. */
+ /* Recover the saved context and return to the point of interrupt. */
+
+ LDMIA sp!, {r0, lr} // Recover SPSR, POI, and scratch regs
+ MSR SPSR_cxsf, r0 // Put SPSR back
+ LDMIA sp!, {r0-r3} // Recover r0-r3
+ MOVS pc, lr // Return to point of interrupt
+
+__tx_thread_fiq_preempt_restore:
+
+ LDMIA sp!, {r3, lr} // Recover temporarily saved registers
+ MOV r1, lr // Save lr (point of interrupt)
+ MOV r2, #SVC_MODE // Build SVC mode CPSR
+ MSR CPSR_c, r2 // Enter SVC mode
+ STR r1, [sp, #-4]! // Save point of interrupt
+ STMDB sp!, {r4-r12, lr} // Save upper half of registers
+ MOV r4, r3 // Save SPSR in r4
+ MOV r2, #FIQ_MODE // Build FIQ mode CPSR
+ MSR CPSR_c, r2 // Reenter FIQ mode
+ LDMIA sp!, {r0-r3} // Recover r0-r3
+ MOV r5, #SVC_MODE // Build SVC mode CPSR
+ MSR CPSR_c, r5 // Enter SVC mode
+ STMDB sp!, {r0-r3} // Save r0-r3 on thread's stack
+
+ LDR r1, =_tx_thread_current_ptr // Pickup address of current thread ptr
+ LDR r0, [r1] // Pickup current thread pointer
#ifdef TX_ENABLE_VFP_SUPPORT
- LDR r2, [r0, #144] @ Pickup the VFP enabled flag
- CMP r2, #0 @ Is the VFP enabled?
- BEQ _tx_skip_fiq_vfp_save @ No, skip VFP IRQ save
- VMRS r2, FPSCR @ Pickup the FPSCR
- STR r2, [sp, #-4]! @ Save FPSCR
- VSTMDB sp!, {D16-D31} @ Save D16-D31
- VSTMDB sp!, {D0-D15} @ Save D0-D15
+ LDR r2, [r0, #144] // Pickup the VFP enabled flag
+ CMP r2, #0 // Is the VFP enabled?
+ BEQ _tx_skip_fiq_vfp_save // No, skip VFP IRQ save
+ VMRS r2, FPSCR // Pickup the FPSCR
+ STR r2, [sp, #-4]! // Save FPSCR
+ VSTMDB sp!, {D16-D31} // Save D16-D31
+ VSTMDB sp!, {D0-D15} // Save D0-D15
_tx_skip_fiq_vfp_save:
#endif
- MOV r3, #1 @ Build interrupt stack type
- STMDB sp!, {r3, r4} @ Save interrupt stack type and SPSR
- STR sp, [r0, #8] @ Save stack pointer in thread control
- @ block */
-@
-@ /* Save the remaining time-slice and disable it. */
-@ if (_tx_timer_time_slice)
-@ {
-@
- LDR r3, =_tx_timer_time_slice @ Pickup time-slice variable address
- LDR r2, [r3] @ Pickup time-slice
- CMP r2, #0 @ Is it active?
- BEQ __tx_thread_fiq_dont_save_ts @ No, don't save it
-@
-@ _tx_thread_current_ptr -> tx_thread_time_slice = _tx_timer_time_slice;
-@ _tx_timer_time_slice = 0;
-@
- STR r2, [r0, #24] @ Save thread's time-slice
- MOV r2, #0 @ Clear value
- STR r2, [r3] @ Disable global time-slice flag
-@
-@ }
-__tx_thread_fiq_dont_save_ts:
-@
-@
-@ /* Clear the current task pointer. */
-@ _tx_thread_current_ptr = TX_NULL;
-@
- MOV r0, #0 @ NULL value
- STR r0, [r1] @ Clear current thread pointer
-@
-@ /* Return to the scheduler. */
-@ _tx_thread_schedule();
-@
- B _tx_thread_schedule @ Return to scheduler
-@ }
-@
-__tx_thread_fiq_idle_system_restore:
-@
-@ /* Just return back to the scheduler! */
-@
- ADD sp, sp, #24 @ Recover FIQ stack space
- MOV r3, #SVC_MODE @ Build SVC mode CPSR
- MSR CPSR_c, r3 @ Lockout interrupts
- B _tx_thread_schedule @ Return to scheduler
-@
-@}
+ MOV r3, #1 // Build interrupt stack type
+ STMDB sp!, {r3, r4} // Save interrupt stack type and SPSR
+ STR sp, [r0, #8] // Save stack pointer in thread control
+ // block */
+ LDR r3, =_tx_timer_time_slice // Pickup time-slice variable address
+ LDR r2, [r3] // Pickup time-slice
+ CMP r2, #0 // Is it active?
+ BEQ __tx_thread_fiq_dont_save_ts // No, don't save it
+
+ STR r2, [r0, #24] // Save thread's time-slice
+ MOV r2, #0 // Clear value
+ STR r2, [r3] // Disable global time-slice flag
+
+__tx_thread_fiq_dont_save_ts:
+
+ /* Clear the current task pointer. */
+
+ MOV r0, #0 // NULL value
+ STR r0, [r1] // Clear current thread pointer
+
+ /* Return to the scheduler. */
+
+ B _tx_thread_schedule // Return to scheduler
+
+__tx_thread_fiq_idle_system_restore:
+
+ /* Just return back to the scheduler! */
+
+ ADD sp, sp, #24 // Recover FIQ stack space
+ MOV r3, #SVC_MODE // Build SVC mode CPSR
+ MSR CPSR_c, r3 // Lockout interrupts
+ B _tx_thread_schedule // Return to scheduler
diff --git a/ports/cortex_a9/gnu/src/tx_thread_fiq_context_save.S b/ports/cortex_a9/gnu/src/tx_thread_fiq_context_save.S
index e863339d..7db6a4c2 100644
--- a/ports/cortex_a9/gnu/src/tx_thread_fiq_context_save.S
+++ b/ports/cortex_a9/gnu/src/tx_thread_fiq_context_save.S
@@ -1,207 +1,178 @@
-@/**************************************************************************/
-@/* */
-@/* Copyright (c) Microsoft Corporation. All rights reserved. */
-@/* */
-@/* This software is licensed under the Microsoft Software License */
-@/* Terms for Microsoft Azure RTOS. Full text of the license can be */
-@/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
-@/* and in the root directory of this software. */
-@/* */
-@/**************************************************************************/
-@
-@
-@/**************************************************************************/
-@/**************************************************************************/
-@/** */
-@/** ThreadX Component */
-@/** */
-@/** Thread */
-@/** */
-@/**************************************************************************/
-@/**************************************************************************/
-@
-@
-@#define TX_SOURCE_CODE
-@
-@
-@/* Include necessary system files. */
-@
-@#include "tx_api.h"
-@#include "tx_thread.h"
-@
-@
+/**************************************************************************/
+/* */
+/* Copyright (c) Microsoft Corporation. All rights reserved. */
+/* */
+/* This software is licensed under the Microsoft Software License */
+/* Terms for Microsoft Azure RTOS. Full text of the license can be */
+/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
+/* and in the root directory of this software. */
+/* */
+/**************************************************************************/
+
+
+/**************************************************************************/
+/**************************************************************************/
+/** */
+/** ThreadX Component */
+/** */
+/** Thread */
+/** */
+/**************************************************************************/
+/**************************************************************************/
+
.global _tx_thread_system_state
.global _tx_thread_current_ptr
.global __tx_fiq_processing_return
.global _tx_execution_isr_enter
-@
-@
-@/* No 16-bit Thumb mode veneer code is needed for _tx_thread_fiq_context_save
-@ since it will never be called 16-bit mode. */
-@
+
+
+/* No 16-bit Thumb mode veneer code is needed for _tx_thread_fiq_context_save
+ since it will never be called 16-bit mode. */
+
.arm
.text
.align 2
-@/**************************************************************************/
-@/* */
-@/* FUNCTION RELEASE */
-@/* */
-@/* _tx_thread_fiq_context_save Cortex-A9/GNU */
-@/* 6.1.9 */
-@/* AUTHOR */
-@/* */
-@/* William E. Lamie, Microsoft Corporation */
-@/* */
-@/* DESCRIPTION */
-@/* */
-@/* This function saves the context of an executing thread in the */
-@/* beginning of interrupt processing. The function also ensures that */
-@/* the system stack is used upon return to the calling ISR. */
-@/* */
-@/* INPUT */
-@/* */
-@/* None */
-@/* */
-@/* OUTPUT */
-@/* */
-@/* None */
-@/* */
-@/* CALLS */
-@/* */
-@/* None */
-@/* */
-@/* CALLED BY */
-@/* */
-@/* ISRs */
-@/* */
-@/* RELEASE HISTORY */
-@/* */
-@/* DATE NAME DESCRIPTION */
-@/* */
-@/* 09-30-2020 William E. Lamie Initial Version 6.1 */
-@/* 10-15-2021 William E. Lamie Modified comment(s), added */
-@/* execution profile support, */
-@/* resulting in version 6.1.9 */
-@/* */
-@/**************************************************************************/
-@ VOID _tx_thread_fiq_context_save(VOID)
-@{
+/**************************************************************************/
+/* */
+/* FUNCTION RELEASE */
+/* */
+/* _tx_thread_fiq_context_save ARMv7-A */
+/* 6.1.11 */
+/* AUTHOR */
+/* */
+/* William E. Lamie, Microsoft Corporation */
+/* */
+/* DESCRIPTION */
+/* */
+/* This function saves the context of an executing thread in the */
+/* beginning of interrupt processing. The function also ensures that */
+/* the system stack is used upon return to the calling ISR. */
+/* */
+/* INPUT */
+/* */
+/* None */
+/* */
+/* OUTPUT */
+/* */
+/* None */
+/* */
+/* CALLS */
+/* */
+/* None */
+/* */
+/* CALLED BY */
+/* */
+/* ISRs */
+/* */
+/* RELEASE HISTORY */
+/* */
+/* DATE NAME DESCRIPTION */
+/* */
+/* 09-30-2020 William E. Lamie Initial Version 6.1 */
+/* 10-15-2021 William E. Lamie Modified comment(s), added */
+/* execution profile support, */
+/* resulting in version 6.1.9 */
+/* 04-25-2022 Zhen Kong Updated comments, */
+/* resulting in version 6.1.11 */
+/* */
+/**************************************************************************/
.global _tx_thread_fiq_context_save
.type _tx_thread_fiq_context_save,function
_tx_thread_fiq_context_save:
-@
-@ /* Upon entry to this routine, it is assumed that IRQ interrupts are locked
-@ out, we are in IRQ mode, and all registers are intact. */
-@
-@ /* Check for a nested interrupt condition. */
-@ if (_tx_thread_system_state++)
-@ {
-@
- STMDB sp!, {r0-r3} @ Save some working registers
- LDR r3, =_tx_thread_system_state @ Pickup address of system state variable
- LDR r2, [r3] @ Pickup system state
- CMP r2, #0 @ Is this the first interrupt?
- BEQ __tx_thread_fiq_not_nested_save @ Yes, not a nested context save
-@
-@ /* Nested interrupt condition. */
-@
- ADD r2, r2, #1 @ Increment the interrupt counter
- STR r2, [r3] @ Store it back in the variable
-@
-@ /* Save the rest of the scratch registers on the stack and return to the
-@ calling ISR. */
-@
- MRS r0, SPSR @ Pickup saved SPSR
- SUB lr, lr, #4 @ Adjust point of interrupt
- STMDB sp!, {r0, r10, r12, lr} @ Store other registers
-@
-@ /* Return to the ISR. */
-@
- MOV r10, #0 @ Clear stack limit
+
+ /* Upon entry to this routine, it is assumed that IRQ interrupts are locked
+ out, we are in IRQ mode, and all registers are intact. */
+
+ /* Check for a nested interrupt condition. */
+
+ STMDB sp!, {r0-r3} // Save some working registers
+ LDR r3, =_tx_thread_system_state // Pickup address of system state variable
+ LDR r2, [r3] // Pickup system state
+ CMP r2, #0 // Is this the first interrupt?
+ BEQ __tx_thread_fiq_not_nested_save // Yes, not a nested context save
+
+ /* Nested interrupt condition. */
+
+ ADD r2, r2, #1 // Increment the interrupt counter
+ STR r2, [r3] // Store it back in the variable
+
+ /* Save the rest of the scratch registers on the stack and return to the
+ calling ISR. */
+
+ MRS r0, SPSR // Pickup saved SPSR
+ SUB lr, lr, #4 // Adjust point of interrupt
+ STMDB sp!, {r0, r10, r12, lr} // Store other registers
+
+ /* Return to the ISR. */
+
+ MOV r10, #0 // Clear stack limit
#if (defined(TX_ENABLE_EXECUTION_CHANGE_NOTIFY) || defined(TX_EXECUTION_PROFILE_ENABLE))
-@
-@ /* Call the ISR enter function to indicate an ISR is executing. */
-@
- PUSH {lr} @ Save ISR lr
- BL _tx_execution_isr_enter @ Call the ISR enter function
- POP {lr} @ Recover ISR lr
+
+ /* Call the ISR enter function to indicate an ISR is executing. */
+
+ PUSH {lr} // Save ISR lr
+ BL _tx_execution_isr_enter // Call the ISR enter function
+ POP {lr} // Recover ISR lr
#endif
- B __tx_fiq_processing_return @ Continue FIQ processing
-@
+ B __tx_fiq_processing_return // Continue FIQ processing
+//
__tx_thread_fiq_not_nested_save:
-@ }
-@
-@ /* Otherwise, not nested, check to see if a thread was running. */
-@ else if (_tx_thread_current_ptr)
-@ {
-@
- ADD r2, r2, #1 @ Increment the interrupt counter
- STR r2, [r3] @ Store it back in the variable
- LDR r1, =_tx_thread_current_ptr @ Pickup address of current thread ptr
- LDR r0, [r1] @ Pickup current thread pointer
- CMP r0, #0 @ Is it NULL?
- BEQ __tx_thread_fiq_idle_system_save @ If so, interrupt occurred in
-@ @ scheduling loop - nothing needs saving!
-@
-@ /* Save minimal context of interrupted thread. */
-@
- MRS r2, SPSR @ Pickup saved SPSR
- SUB lr, lr, #4 @ Adjust point of interrupt
- STMDB sp!, {r2, lr} @ Store other registers, Note that we don't
-@ @ need to save sl and ip since FIQ has
-@ @ copies of these registers. Nested
-@ @ interrupt processing does need to save
-@ @ these registers.
-@
-@ /* Save the current stack pointer in the thread's control block. */
-@ _tx_thread_current_ptr -> tx_thread_stack_ptr = sp;
-@
-@ /* Switch to the system stack. */
-@ sp = _tx_thread_system_stack_ptr;
-@
- MOV r10, #0 @ Clear stack limit
+
+ /* Otherwise, not nested, check to see if a thread was running. */
+
+ ADD r2, r2, #1 // Increment the interrupt counter
+ STR r2, [r3] // Store it back in the variable
+ LDR r1, =_tx_thread_current_ptr // Pickup address of current thread ptr
+ LDR r0, [r1] // Pickup current thread pointer
+ CMP r0, #0 // Is it NULL?
+ BEQ __tx_thread_fiq_idle_system_save // If so, interrupt occurred in
+ // scheduling loop - nothing needs saving!
+
+ /* Save minimal context of interrupted thread. */
+
+ MRS r2, SPSR // Pickup saved SPSR
+ SUB lr, lr, #4 // Adjust point of interrupt
+ STMDB sp!, {r2, lr} // Store other registers, Note that we don't
+ // need to save sl and ip since FIQ has
+ // copies of these registers. Nested
+ // interrupt processing does need to save
+ // these registers.
+
+ MOV r10, #0 // Clear stack limit
#if (defined(TX_ENABLE_EXECUTION_CHANGE_NOTIFY) || defined(TX_EXECUTION_PROFILE_ENABLE))
-@
-@ /* Call the ISR enter function to indicate an ISR is executing. */
-@
- PUSH {lr} @ Save ISR lr
- BL _tx_execution_isr_enter @ Call the ISR enter function
- POP {lr} @ Recover ISR lr
+
+ /* Call the ISR enter function to indicate an ISR is executing. */
+
+ PUSH {lr} // Save ISR lr
+ BL _tx_execution_isr_enter // Call the ISR enter function
+ POP {lr} // Recover ISR lr
#endif
- B __tx_fiq_processing_return @ Continue FIQ processing
-@
-@ }
-@ else
-@ {
-@
+ B __tx_fiq_processing_return // Continue FIQ processing
+
__tx_thread_fiq_idle_system_save:
-@
-@ /* Interrupt occurred in the scheduling loop. */
-@
-#if (defined(TX_ENABLE_EXECUTION_CHANGE_NOTIFY) || defined(TX_EXECUTION_PROFILE_ENABLE))
-@
-@ /* Call the ISR enter function to indicate an ISR is executing. */
-@
- PUSH {lr} @ Save ISR lr
- BL _tx_execution_isr_enter @ Call the ISR enter function
- POP {lr} @ Recover ISR lr
-#endif
-@
-@ /* Not much to do here, save the current SPSR and LR for possible
-@ use in IRQ interrupted in idle system conditions, and return to
-@ FIQ interrupt processing. */
-@
- MRS r0, SPSR @ Pickup saved SPSR
- SUB lr, lr, #4 @ Adjust point of interrupt
- STMDB sp!, {r0, lr} @ Store other registers that will get used
-@ @ or stripped off the stack in context
-@ @ restore
- B __tx_fiq_processing_return @ Continue FIQ processing
-@
-@ }
-@}
+ /* Interrupt occurred in the scheduling loop. */
+
+#if (defined(TX_ENABLE_EXECUTION_CHANGE_NOTIFY) || defined(TX_EXECUTION_PROFILE_ENABLE))
+
+ /* Call the ISR enter function to indicate an ISR is executing. */
+
+ PUSH {lr} // Save ISR lr
+ BL _tx_execution_isr_enter // Call the ISR enter function
+ POP {lr} // Recover ISR lr
+#endif
+
+ /* Not much to do here, save the current SPSR and LR for possible
+ use in IRQ interrupted in idle system conditions, and return to
+ FIQ interrupt processing. */
+
+ MRS r0, SPSR // Pickup saved SPSR
+ SUB lr, lr, #4 // Adjust point of interrupt
+ STMDB sp!, {r0, lr} // Store other registers that will get used
+ // or stripped off the stack in context
+ // restore
+ B __tx_fiq_processing_return // Continue FIQ processing
diff --git a/ports/cortex_a9/gnu/src/tx_thread_fiq_nesting_end.S b/ports/cortex_a9/gnu/src/tx_thread_fiq_nesting_end.S
index db32cf5b..b34d881e 100644
--- a/ports/cortex_a9/gnu/src/tx_thread_fiq_nesting_end.S
+++ b/ports/cortex_a9/gnu/src/tx_thread_fiq_nesting_end.S
@@ -1,116 +1,104 @@
-@/**************************************************************************/
-@/* */
-@/* Copyright (c) Microsoft Corporation. All rights reserved. */
-@/* */
-@/* This software is licensed under the Microsoft Software License */
-@/* Terms for Microsoft Azure RTOS. Full text of the license can be */
-@/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
-@/* and in the root directory of this software. */
-@/* */
-@/**************************************************************************/
-@
-@
-@/**************************************************************************/
-@/**************************************************************************/
-@/** */
-@/** ThreadX Component */
-@/** */
-@/** Thread */
-@/** */
-@/**************************************************************************/
-@/**************************************************************************/
-@
-@
-@#define TX_SOURCE_CODE
-@
-@
-@/* Include necessary system files. */
-@
-@#include "tx_api.h"
-@#include "tx_thread.h"
-@
-@
+/**************************************************************************/
+/* */
+/* Copyright (c) Microsoft Corporation. All rights reserved. */
+/* */
+/* This software is licensed under the Microsoft Software License */
+/* Terms for Microsoft Azure RTOS. Full text of the license can be */
+/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
+/* and in the root directory of this software. */
+/* */
+/**************************************************************************/
+
+
+/**************************************************************************/
+/**************************************************************************/
+/** */
+/** ThreadX Component */
+/** */
+/** Thread */
+/** */
+/**************************************************************************/
+/**************************************************************************/
+
#ifdef TX_ENABLE_FIQ_SUPPORT
-DISABLE_INTS = 0xC0 @ Disable IRQ/FIQ interrupts
+DISABLE_INTS = 0xC0 // Disable IRQ/FIQ interrupts
#else
-DISABLE_INTS = 0x80 @ Disable IRQ interrupts
+DISABLE_INTS = 0x80 // Disable IRQ interrupts
#endif
-MODE_MASK = 0x1F @ Mode mask
-FIQ_MODE_BITS = 0x11 @ FIQ mode bits
-@
-@
-@/* No 16-bit Thumb mode veneer code is needed for _tx_thread_fiq_nesting_end
-@ since it will never be called 16-bit mode. */
-@
+MODE_MASK = 0x1F // Mode mask
+FIQ_MODE_BITS = 0x11 // FIQ mode bits
+
+
+/* No 16-bit Thumb mode veneer code is needed for _tx_thread_fiq_nesting_end
+ since it will never be called 16-bit mode. */
+
.arm
.text
.align 2
-@/**************************************************************************/
-@/* */
-@/* FUNCTION RELEASE */
-@/* */
-@/* _tx_thread_fiq_nesting_end Cortex-A9/GNU */
-@/* 6.1 */
-@/* AUTHOR */
-@/* */
-@/* William E. Lamie, Microsoft Corporation */
-@/* */
-@/* DESCRIPTION */
-@/* */
-@/* This function is called by the application from FIQ mode after */
-@/* _tx_thread_fiq_nesting_start has been called and switches the FIQ */
-@/* processing from system mode back to FIQ mode prior to the ISR */
-@/* calling _tx_thread_fiq_context_restore. Note that this function */
-@/* assumes the system stack pointer is in the same position after */
-@/* nesting start function was called. */
-@/* */
-@/* This function assumes that the system mode stack pointer was setup */
-@/* during low-level initialization (tx_initialize_low_level.s). */
-@/* */
-@/* This function returns with FIQ interrupts disabled. */
-@/* */
-@/* INPUT */
-@/* */
-@/* None */
-@/* */
-@/* OUTPUT */
-@/* */
-@/* None */
-@/* */
-@/* CALLS */
-@/* */
-@/* None */
-@/* */
-@/* CALLED BY */
-@/* */
-@/* ISRs */
-@/* */
-@/* RELEASE HISTORY */
-@/* */
-@/* DATE NAME DESCRIPTION */
-@/* */
-@/* 09-30-2020 William E. Lamie Initial Version 6.1 */
-@/* */
-@/**************************************************************************/
-@VOID _tx_thread_fiq_nesting_end(VOID)
-@{
+/**************************************************************************/
+/* */
+/* FUNCTION RELEASE */
+/* */
+/* _tx_thread_fiq_nesting_end ARMv7-A */
+/* 6.1.11 */
+/* AUTHOR */
+/* */
+/* William E. Lamie, Microsoft Corporation */
+/* */
+/* DESCRIPTION */
+/* */
+/* This function is called by the application from FIQ mode after */
+/* _tx_thread_fiq_nesting_start has been called and switches the FIQ */
+/* processing from system mode back to FIQ mode prior to the ISR */
+/* calling _tx_thread_fiq_context_restore. Note that this function */
+/* assumes the system stack pointer is in the same position after */
+/* nesting start function was called. */
+/* */
+/* This function assumes that the system mode stack pointer was setup */
+/* during low-level initialization (tx_initialize_low_level.s). */
+/* */
+/* This function returns with FIQ interrupts disabled. */
+/* */
+/* INPUT */
+/* */
+/* None */
+/* */
+/* OUTPUT */
+/* */
+/* None */
+/* */
+/* CALLS */
+/* */
+/* None */
+/* */
+/* CALLED BY */
+/* */
+/* ISRs */
+/* */
+/* RELEASE HISTORY */
+/* */
+/* DATE NAME DESCRIPTION */
+/* */
+/* 09-30-2020 William E. Lamie Initial Version 6.1 */
+/* 04-25-2022 Zhen Kong Updated comments, */
+/* resulting in version 6.1.11 */
+/* */
+/**************************************************************************/
.global _tx_thread_fiq_nesting_end
.type _tx_thread_fiq_nesting_end,function
_tx_thread_fiq_nesting_end:
- MOV r3,lr @ Save ISR return address
- MRS r0, CPSR @ Pickup the CPSR
- ORR r0, r0, #DISABLE_INTS @ Build disable interrupt value
- MSR CPSR_c, r0 @ Disable interrupts
- LDMIA sp!, {r1, lr} @ Pickup saved lr (and r1 throw-away for
- @ 8-byte alignment logic)
- BIC r0, r0, #MODE_MASK @ Clear mode bits
- ORR r0, r0, #FIQ_MODE_BITS @ Build IRQ mode CPSR
- MSR CPSR_c, r0 @ Reenter IRQ mode
+ MOV r3,lr // Save ISR return address
+ MRS r0, CPSR // Pickup the CPSR
+ ORR r0, r0, #DISABLE_INTS // Build disable interrupt value
+ MSR CPSR_c, r0 // Disable interrupts
+ LDMIA sp!, {r1, lr} // Pickup saved lr (and r1 throw-away for
+ // 8-byte alignment logic)
+ BIC r0, r0, #MODE_MASK // Clear mode bits
+ ORR r0, r0, #FIQ_MODE_BITS // Build IRQ mode CPSR
+ MSR CPSR_c, r0 // Reenter IRQ mode
#ifdef __THUMB_INTERWORK
- BX r3 @ Return to caller
+ BX r3 // Return to caller
#else
- MOV pc, r3 @ Return to caller
+ MOV pc, r3 // Return to caller
#endif
-@}
-
diff --git a/ports/cortex_a9/gnu/src/tx_thread_fiq_nesting_start.S b/ports/cortex_a9/gnu/src/tx_thread_fiq_nesting_start.S
index 6cb88686..c9cd5a06 100644
--- a/ports/cortex_a9/gnu/src/tx_thread_fiq_nesting_start.S
+++ b/ports/cortex_a9/gnu/src/tx_thread_fiq_nesting_start.S
@@ -1,108 +1,96 @@
-@/**************************************************************************/
-@/* */
-@/* Copyright (c) Microsoft Corporation. All rights reserved. */
-@/* */
-@/* This software is licensed under the Microsoft Software License */
-@/* Terms for Microsoft Azure RTOS. Full text of the license can be */
-@/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
-@/* and in the root directory of this software. */
-@/* */
-@/**************************************************************************/
-@
-@
-@/**************************************************************************/
-@/**************************************************************************/
-@/** */
-@/** ThreadX Component */
-@/** */
-@/** Thread */
-@/** */
-@/**************************************************************************/
-@/**************************************************************************/
-@
-@
-@#define TX_SOURCE_CODE
-@
-@
-@/* Include necessary system files. */
-@
-@#include "tx_api.h"
-@#include "tx_thread.h"
-@
-@
-FIQ_DISABLE = 0x40 @ FIQ disable bit
-MODE_MASK = 0x1F @ Mode mask
-SYS_MODE_BITS = 0x1F @ System mode bits
-@
-@
-@/* No 16-bit Thumb mode veneer code is needed for _tx_thread_fiq_nesting_start
-@ since it will never be called 16-bit mode. */
-@
+/**************************************************************************/
+/* */
+/* Copyright (c) Microsoft Corporation. All rights reserved. */
+/* */
+/* This software is licensed under the Microsoft Software License */
+/* Terms for Microsoft Azure RTOS. Full text of the license can be */
+/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
+/* and in the root directory of this software. */
+/* */
+/**************************************************************************/
+
+
+/**************************************************************************/
+/**************************************************************************/
+/** */
+/** ThreadX Component */
+/** */
+/** Thread */
+/** */
+/**************************************************************************/
+/**************************************************************************/
+
+FIQ_DISABLE = 0x40 // FIQ disable bit
+MODE_MASK = 0x1F // Mode mask
+SYS_MODE_BITS = 0x1F // System mode bits
+
+
+/* No 16-bit Thumb mode veneer code is needed for _tx_thread_fiq_nesting_start
+ since it will never be called 16-bit mode. */
+
.arm
.text
.align 2
-@/**************************************************************************/
-@/* */
-@/* FUNCTION RELEASE */
-@/* */
-@/* _tx_thread_fiq_nesting_start Cortex-A9/GNU */
-@/* 6.1 */
-@/* AUTHOR */
-@/* */
-@/* William E. Lamie, Microsoft Corporation */
-@/* */
-@/* DESCRIPTION */
-@/* */
-@/* This function is called by the application from FIQ mode after */
-@/* _tx_thread_fiq_context_save has been called and switches the FIQ */
-@/* processing to the system mode so nested FIQ interrupt processing */
-@/* is possible (system mode has its own "lr" register). Note that */
-@/* this function assumes that the system mode stack pointer was setup */
-@/* during low-level initialization (tx_initialize_low_level.s). */
-@/* */
-@/* This function returns with FIQ interrupts enabled. */
-@/* */
-@/* INPUT */
-@/* */
-@/* None */
-@/* */
-@/* OUTPUT */
-@/* */
-@/* None */
-@/* */
-@/* CALLS */
-@/* */
-@/* None */
-@/* */
-@/* CALLED BY */
-@/* */
-@/* ISRs */
-@/* */
-@/* RELEASE HISTORY */
-@/* */
-@/* DATE NAME DESCRIPTION */
-@/* */
-@/* 09-30-2020 William E. Lamie Initial Version 6.1 */
-@/* */
-@/**************************************************************************/
-@VOID _tx_thread_fiq_nesting_start(VOID)
-@{
+/**************************************************************************/
+/* */
+/* FUNCTION RELEASE */
+/* */
+/* _tx_thread_fiq_nesting_start ARMv7-A */
+/* 6.1.11 */
+/* AUTHOR */
+/* */
+/* William E. Lamie, Microsoft Corporation */
+/* */
+/* DESCRIPTION */
+/* */
+/* This function is called by the application from FIQ mode after */
+/* _tx_thread_fiq_context_save has been called and switches the FIQ */
+/* processing to the system mode so nested FIQ interrupt processing */
+/* is possible (system mode has its own "lr" register). Note that */
+/* this function assumes that the system mode stack pointer was setup */
+/* during low-level initialization (tx_initialize_low_level.s). */
+/* */
+/* This function returns with FIQ interrupts enabled. */
+/* */
+/* INPUT */
+/* */
+/* None */
+/* */
+/* OUTPUT */
+/* */
+/* None */
+/* */
+/* CALLS */
+/* */
+/* None */
+/* */
+/* CALLED BY */
+/* */
+/* ISRs */
+/* */
+/* RELEASE HISTORY */
+/* */
+/* DATE NAME DESCRIPTION */
+/* */
+/* 09-30-2020 William E. Lamie Initial Version 6.1 */
+/* 04-25-2022 Zhen Kong Updated comments, */
+/* resulting in version 6.1.11 */
+/* */
+/**************************************************************************/
.global _tx_thread_fiq_nesting_start
.type _tx_thread_fiq_nesting_start,function
_tx_thread_fiq_nesting_start:
- MOV r3,lr @ Save ISR return address
- MRS r0, CPSR @ Pickup the CPSR
- BIC r0, r0, #MODE_MASK @ Clear the mode bits
- ORR r0, r0, #SYS_MODE_BITS @ Build system mode CPSR
- MSR CPSR_c, r0 @ Enter system mode
- STMDB sp!, {r1, lr} @ Push the system mode lr on the system mode stack
- @ and push r1 just to keep 8-byte alignment
- BIC r0, r0, #FIQ_DISABLE @ Build enable FIQ CPSR
- MSR CPSR_c, r0 @ Enter system mode
+ MOV r3,lr // Save ISR return address
+ MRS r0, CPSR // Pickup the CPSR
+ BIC r0, r0, #MODE_MASK // Clear the mode bits
+ ORR r0, r0, #SYS_MODE_BITS // Build system mode CPSR
+ MSR CPSR_c, r0 // Enter system mode
+ STMDB sp!, {r1, lr} // Push the system mode lr on the system mode stack
+ // and push r1 just to keep 8-byte alignment
+ BIC r0, r0, #FIQ_DISABLE // Build enable FIQ CPSR
+ MSR CPSR_c, r0 // Enter system mode
#ifdef __THUMB_INTERWORK
- BX r3 @ Return to caller
+ BX r3 // Return to caller
#else
- MOV pc, r3 @ Return to caller
+ MOV pc, r3 // Return to caller
#endif
-@}
-
diff --git a/ports/cortex_a9/gnu/src/tx_thread_interrupt_control.S b/ports/cortex_a9/gnu/src/tx_thread_interrupt_control.S
index ac645d47..63b1609a 100644
--- a/ports/cortex_a9/gnu/src/tx_thread_interrupt_control.S
+++ b/ports/cortex_a9/gnu/src/tx_thread_interrupt_control.S
@@ -1,115 +1,104 @@
-@/**************************************************************************/
-@/* */
-@/* Copyright (c) Microsoft Corporation. All rights reserved. */
-@/* */
-@/* This software is licensed under the Microsoft Software License */
-@/* Terms for Microsoft Azure RTOS. Full text of the license can be */
-@/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
-@/* and in the root directory of this software. */
-@/* */
-@/**************************************************************************/
-@
-@
-@/**************************************************************************/
-@/**************************************************************************/
-@/** */
-@/** ThreadX Component */
-@/** */
-@/** Thread */
-@/** */
-@/**************************************************************************/
-@/**************************************************************************/
-@
-@#define TX_SOURCE_CODE
-@
-@
-@/* Include necessary system files. */
-@
-@#include "tx_api.h"
-@#include "tx_thread.h" */
-@
+/**************************************************************************/
+/* */
+/* Copyright (c) Microsoft Corporation. All rights reserved. */
+/* */
+/* This software is licensed under the Microsoft Software License */
+/* Terms for Microsoft Azure RTOS. Full text of the license can be */
+/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
+/* and in the root directory of this software. */
+/* */
+/**************************************************************************/
+
+
+/**************************************************************************/
+/**************************************************************************/
+/** */
+/** ThreadX Component */
+/** */
+/** Thread */
+/** */
+/**************************************************************************/
+/**************************************************************************/
INT_MASK = 0x03F
-@
-@/* Define the 16-bit Thumb mode veneer for _tx_thread_interrupt_control for
-@ applications calling this function from to 16-bit Thumb mode. */
-@
+
+/* Define the 16-bit Thumb mode veneer for _tx_thread_interrupt_control for
+ applications calling this function from to 16-bit Thumb mode. */
+
.text
.align 2
.global $_tx_thread_interrupt_control
$_tx_thread_interrupt_control:
.thumb
- BX pc @ Switch to 32-bit mode
- NOP @
+ BX pc // Switch to 32-bit mode
+ NOP //
.arm
- STMFD sp!, {lr} @ Save return address
- BL _tx_thread_interrupt_control @ Call _tx_thread_interrupt_control function
- LDMFD sp!, {lr} @ Recover saved return address
- BX lr @ Return to 16-bit caller
-@
-@
+ STMFD sp!, {lr} // Save return address
+ BL _tx_thread_interrupt_control // Call _tx_thread_interrupt_control function
+ LDMFD sp!, {lr} // Recover saved return address
+ BX lr // Return to 16-bit caller
+
+
.text
.align 2
-@/**************************************************************************/
-@/* */
-@/* FUNCTION RELEASE */
-@/* */
-@/* _tx_thread_interrupt_control Cortex-A9/GNU */
-@/* 6.1 */
-@/* AUTHOR */
-@/* */
-@/* William E. Lamie, Microsoft Corporation */
-@/* */
-@/* DESCRIPTION */
-@/* */
-@/* This function is responsible for changing the interrupt lockout */
-@/* posture of the system. */
-@/* */
-@/* INPUT */
-@/* */
-@/* new_posture New interrupt lockout posture */
-@/* */
-@/* OUTPUT */
-@/* */
-@/* old_posture Old interrupt lockout posture */
-@/* */
-@/* CALLS */
-@/* */
-@/* None */
-@/* */
-@/* CALLED BY */
-@/* */
-@/* Application Code */
-@/* */
-@/* RELEASE HISTORY */
-@/* */
-@/* DATE NAME DESCRIPTION */
-@/* */
-@/* 09-30-2020 William E. Lamie Initial Version 6.1 */
-@/* */
-@/**************************************************************************/
-@UINT _tx_thread_interrupt_control(UINT new_posture)
-@{
+/**************************************************************************/
+/* */
+/* FUNCTION RELEASE */
+/* */
+/* _tx_thread_interrupt_control ARMv7-A */
+/* 6.1.11 */
+/* AUTHOR */
+/* */
+/* William E. Lamie, Microsoft Corporation */
+/* */
+/* DESCRIPTION */
+/* */
+/* This function is responsible for changing the interrupt lockout */
+/* posture of the system. */
+/* */
+/* INPUT */
+/* */
+/* new_posture New interrupt lockout posture */
+/* */
+/* OUTPUT */
+/* */
+/* old_posture Old interrupt lockout posture */
+/* */
+/* CALLS */
+/* */
+/* None */
+/* */
+/* CALLED BY */
+/* */
+/* Application Code */
+/* */
+/* RELEASE HISTORY */
+/* */
+/* DATE NAME DESCRIPTION */
+/* */
+/* 09-30-2020 William E. Lamie Initial Version 6.1 */
+/* 04-25-2022 Zhen Kong Updated comments, */
+/* resulting in version 6.1.11 */
+/* */
+/**************************************************************************/
.global _tx_thread_interrupt_control
.type _tx_thread_interrupt_control,function
_tx_thread_interrupt_control:
-@
-@ /* Pickup current interrupt lockout posture. */
-@
- MRS r3, CPSR @ Pickup current CPSR
- MOV r2, #INT_MASK @ Build interrupt mask
- AND r1, r3, r2 @ Clear interrupt lockout bits
- ORR r1, r1, r0 @ Or-in new interrupt lockout bits
-@
-@ /* Apply the new interrupt posture. */
-@
- MSR CPSR_c, r1 @ Setup new CPSR
- BIC r0, r3, r2 @ Return previous interrupt mask
-#ifdef __THUMB_INTERWORK
- BX lr @ Return to caller
-#else
- MOV pc, lr @ Return to caller
-#endif
-@}
+ /* Pickup current interrupt lockout posture. */
+
+ MRS r3, CPSR // Pickup current CPSR
+ MOV r2, #INT_MASK // Build interrupt mask
+ AND r1, r3, r2 // Clear interrupt lockout bits
+ ORR r1, r1, r0 // Or-in new interrupt lockout bits
+
+ /* Apply the new interrupt posture. */
+
+ MSR CPSR_c, r1 // Setup new CPSR
+ BIC r0, r3, r2 // Return previous interrupt mask
+#ifdef __THUMB_INTERWORK
+ BX lr // Return to caller
+#else
+ MOV pc, lr // Return to caller
+#endif
diff --git a/ports/cortex_a9/gnu/src/tx_thread_interrupt_disable.S b/ports/cortex_a9/gnu/src/tx_thread_interrupt_disable.S
index b7fa0185..13258808 100644
--- a/ports/cortex_a9/gnu/src/tx_thread_interrupt_disable.S
+++ b/ports/cortex_a9/gnu/src/tx_thread_interrupt_disable.S
@@ -1,113 +1,101 @@
-@/**************************************************************************/
-@/* */
-@/* Copyright (c) Microsoft Corporation. All rights reserved. */
-@/* */
-@/* This software is licensed under the Microsoft Software License */
-@/* Terms for Microsoft Azure RTOS. Full text of the license can be */
-@/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
-@/* and in the root directory of this software. */
-@/* */
-@/**************************************************************************/
-@
-@
-@/**************************************************************************/
-@/**************************************************************************/
-@/** */
-@/** ThreadX Component */
-@/** */
-@/** Thread */
-@/** */
-@/**************************************************************************/
-@/**************************************************************************/
-@
-@#define TX_SOURCE_CODE
-@
-@
-@/* Include necessary system files. */
-@
-@#include "tx_api.h"
-@#include "tx_thread.h"
-@
-@
-@/* Define the 16-bit Thumb mode veneer for _tx_thread_interrupt_disable for
-@ applications calling this function from to 16-bit Thumb mode. */
-@
+/**************************************************************************/
+/* */
+/* Copyright (c) Microsoft Corporation. All rights reserved. */
+/* */
+/* This software is licensed under the Microsoft Software License */
+/* Terms for Microsoft Azure RTOS. Full text of the license can be */
+/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
+/* and in the root directory of this software. */
+/* */
+/**************************************************************************/
+
+
+/**************************************************************************/
+/**************************************************************************/
+/** */
+/** ThreadX Component */
+/** */
+/** Thread */
+/** */
+/**************************************************************************/
+/**************************************************************************/
+
+/* Define the 16-bit Thumb mode veneer for _tx_thread_interrupt_disable for
+ applications calling this function from to 16-bit Thumb mode. */
+
.text
.align 2
.global $_tx_thread_interrupt_disable
$_tx_thread_interrupt_disable:
.thumb
- BX pc @ Switch to 32-bit mode
- NOP @
+ BX pc // Switch to 32-bit mode
+ NOP //
.arm
- STMFD sp!, {lr} @ Save return address
- BL _tx_thread_interrupt_disable @ Call _tx_thread_interrupt_disable function
- LDMFD sp!, {lr} @ Recover saved return address
- BX lr @ Return to 16-bit caller
-@
-@
+ STMFD sp!, {lr} // Save return address
+ BL _tx_thread_interrupt_disable // Call _tx_thread_interrupt_disable function
+ LDMFD sp!, {lr} // Recover saved return address
+ BX lr // Return to 16-bit caller
+
+
.text
.align 2
-@/**************************************************************************/
-@/* */
-@/* FUNCTION RELEASE */
-@/* */
-@/* _tx_thread_interrupt_disable Cortex-A9/GNU */
-@/* 6.1 */
-@/* AUTHOR */
-@/* */
-@/* William E. Lamie, Microsoft Corporation */
-@/* */
-@/* DESCRIPTION */
-@/* */
-@/* This function is responsible for disabling interrupts */
-@/* */
-@/* INPUT */
-@/* */
-@/* None */
-@/* */
-@/* OUTPUT */
-@/* */
-@/* old_posture Old interrupt lockout posture */
-@/* */
-@/* CALLS */
-@/* */
-@/* None */
-@/* */
-@/* CALLED BY */
-@/* */
-@/* Application Code */
-@/* */
-@/* RELEASE HISTORY */
-@/* */
-@/* DATE NAME DESCRIPTION */
-@/* */
-@/* 09-30-2020 William E. Lamie Initial Version 6.1 */
-@/* */
-@/**************************************************************************/
-@UINT _tx_thread_interrupt_disable(void)
-@{
+/**************************************************************************/
+/* */
+/* FUNCTION RELEASE */
+/* */
+/* _tx_thread_interrupt_disable ARMv7-A */
+/* 6.1.11 */
+/* AUTHOR */
+/* */
+/* William E. Lamie, Microsoft Corporation */
+/* */
+/* DESCRIPTION */
+/* */
+/* This function is responsible for disabling interrupts */
+/* */
+/* INPUT */
+/* */
+/* None */
+/* */
+/* OUTPUT */
+/* */
+/* old_posture Old interrupt lockout posture */
+/* */
+/* CALLS */
+/* */
+/* None */
+/* */
+/* CALLED BY */
+/* */
+/* Application Code */
+/* */
+/* RELEASE HISTORY */
+/* */
+/* DATE NAME DESCRIPTION */
+/* */
+/* 09-30-2020 William E. Lamie Initial Version 6.1 */
+/* 04-25-2022 Zhen Kong Updated comments, */
+/* resulting in version 6.1.11 */
+/* */
+/**************************************************************************/
.global _tx_thread_interrupt_disable
.type _tx_thread_interrupt_disable,function
_tx_thread_interrupt_disable:
-@
-@ /* Pickup current interrupt lockout posture. */
-@
- MRS r0, CPSR @ Pickup current CPSR
-@
-@ /* Mask interrupts. */
-@
+
+ /* Pickup current interrupt lockout posture. */
+
+ MRS r0, CPSR // Pickup current CPSR
+
+ /* Mask interrupts. */
+
#ifdef TX_ENABLE_FIQ_SUPPORT
- CPSID if @ Disable IRQ and FIQ
+ CPSID if // Disable IRQ and FIQ
#else
- CPSID i @ Disable IRQ
+ CPSID i // Disable IRQ
#endif
#ifdef __THUMB_INTERWORK
- BX lr @ Return to caller
+ BX lr // Return to caller
#else
- MOV pc, lr @ Return to caller
+ MOV pc, lr // Return to caller
#endif
-@}
-
-
diff --git a/ports/cortex_a9/gnu/src/tx_thread_interrupt_restore.S b/ports/cortex_a9/gnu/src/tx_thread_interrupt_restore.S
index e88e6090..2d582511 100644
--- a/ports/cortex_a9/gnu/src/tx_thread_interrupt_restore.S
+++ b/ports/cortex_a9/gnu/src/tx_thread_interrupt_restore.S
@@ -1,104 +1,93 @@
-@/**************************************************************************/
-@/* */
-@/* Copyright (c) Microsoft Corporation. All rights reserved. */
-@/* */
-@/* This software is licensed under the Microsoft Software License */
-@/* Terms for Microsoft Azure RTOS. Full text of the license can be */
-@/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
-@/* and in the root directory of this software. */
-@/* */
-@/**************************************************************************/
-@
-@
-@/**************************************************************************/
-@/**************************************************************************/
-@/** */
-@/** ThreadX Component */
-@/** */
-@/** Thread */
-@/** */
-@/**************************************************************************/
-@/**************************************************************************/
-@
-@#define TX_SOURCE_CODE
-@
-@
-@/* Include necessary system files. */
-@
-@#include "tx_api.h"
-@#include "tx_thread.h"
-@
-@
-@/* Define the 16-bit Thumb mode veneer for _tx_thread_interrupt_restore for
-@ applications calling this function from to 16-bit Thumb mode. */
-@
+/**************************************************************************/
+/* */
+/* Copyright (c) Microsoft Corporation. All rights reserved. */
+/* */
+/* This software is licensed under the Microsoft Software License */
+/* Terms for Microsoft Azure RTOS. Full text of the license can be */
+/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
+/* and in the root directory of this software. */
+/* */
+/**************************************************************************/
+
+
+/**************************************************************************/
+/**************************************************************************/
+/** */
+/** ThreadX Component */
+/** */
+/** Thread */
+/** */
+/**************************************************************************/
+/**************************************************************************/
+
+/* Define the 16-bit Thumb mode veneer for _tx_thread_interrupt_restore for
+ applications calling this function from to 16-bit Thumb mode. */
+
.text
.align 2
.global $_tx_thread_interrupt_restore
$_tx_thread_interrupt_restore:
.thumb
- BX pc @ Switch to 32-bit mode
- NOP @
+ BX pc // Switch to 32-bit mode
+ NOP //
.arm
- STMFD sp!, {lr} @ Save return address
- BL _tx_thread_interrupt_restore @ Call _tx_thread_interrupt_restore function
- LDMFD sp!, {lr} @ Recover saved return address
- BX lr @ Return to 16-bit caller
-@
-@
+ STMFD sp!, {lr} // Save return address
+ BL _tx_thread_interrupt_restore // Call _tx_thread_interrupt_restore function
+ LDMFD sp!, {lr} // Recover saved return address
+ BX lr // Return to 16-bit caller
+
+
.text
.align 2
-@/**************************************************************************/
-@/* */
-@/* FUNCTION RELEASE */
-@/* */
-@/* _tx_thread_interrupt_restore Cortex-A9/GNU */
-@/* 6.1 */
-@/* AUTHOR */
-@/* */
-@/* William E. Lamie, Microsoft Corporation */
-@/* */
-@/* DESCRIPTION */
-@/* */
-@/* This function is responsible for restoring interrupts to the state */
-@/* returned by a previous _tx_thread_interrupt_disable call. */
-@/* */
-@/* INPUT */
-@/* */
-@/* old_posture Old interrupt lockout posture */
-@/* */
-@/* OUTPUT */
-@/* */
-@/* None */
-@/* */
-@/* CALLS */
-@/* */
-@/* None */
-@/* */
-@/* CALLED BY */
-@/* */
-@/* Application Code */
-@/* */
-@/* RELEASE HISTORY */
-@/* */
-@/* DATE NAME DESCRIPTION */
-@/* */
-@/* 09-30-2020 William E. Lamie Initial Version 6.1 */
-@/* */
-@/**************************************************************************/
-@UINT _tx_thread_interrupt_restore(UINT old_posture)
-@{
+/**************************************************************************/
+/* */
+/* FUNCTION RELEASE */
+/* */
+/* _tx_thread_interrupt_restore ARMv7-A */
+/* 6.1.11 */
+/* AUTHOR */
+/* */
+/* William E. Lamie, Microsoft Corporation */
+/* */
+/* DESCRIPTION */
+/* */
+/* This function is responsible for restoring interrupts to the state */
+/* returned by a previous _tx_thread_interrupt_disable call. */
+/* */
+/* INPUT */
+/* */
+/* old_posture Old interrupt lockout posture */
+/* */
+/* OUTPUT */
+/* */
+/* None */
+/* */
+/* CALLS */
+/* */
+/* None */
+/* */
+/* CALLED BY */
+/* */
+/* Application Code */
+/* */
+/* RELEASE HISTORY */
+/* */
+/* DATE NAME DESCRIPTION */
+/* */
+/* 09-30-2020 William E. Lamie Initial Version 6.1 */
+/* 04-25-2022 Zhen Kong Updated comments, */
+/* resulting in version 6.1.11 */
+/* */
+/**************************************************************************/
.global _tx_thread_interrupt_restore
.type _tx_thread_interrupt_restore,function
_tx_thread_interrupt_restore:
-@
-@ /* Apply the new interrupt posture. */
-@
- MSR CPSR_c, r0 @ Setup new CPSR
-#ifdef __THUMB_INTERWORK
- BX lr @ Return to caller
-#else
- MOV pc, lr @ Return to caller
-#endif
-@}
+ /* Apply the new interrupt posture. */
+
+ MSR CPSR_c, r0 // Setup new CPSR
+#ifdef __THUMB_INTERWORK
+ BX lr // Return to caller
+#else
+ MOV pc, lr // Return to caller
+#endif
diff --git a/ports/cortex_a9/gnu/src/tx_thread_irq_nesting_end.S b/ports/cortex_a9/gnu/src/tx_thread_irq_nesting_end.S
index 30e601cb..ec7e63c6 100644
--- a/ports/cortex_a9/gnu/src/tx_thread_irq_nesting_end.S
+++ b/ports/cortex_a9/gnu/src/tx_thread_irq_nesting_end.S
@@ -1,115 +1,103 @@
-@/**************************************************************************/
-@/* */
-@/* Copyright (c) Microsoft Corporation. All rights reserved. */
-@/* */
-@/* This software is licensed under the Microsoft Software License */
-@/* Terms for Microsoft Azure RTOS. Full text of the license can be */
-@/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
-@/* and in the root directory of this software. */
-@/* */
-@/**************************************************************************/
-@
-@
-@/**************************************************************************/
-@/**************************************************************************/
-@/** */
-@/** ThreadX Component */
-@/** */
-@/** Thread */
-@/** */
-@/**************************************************************************/
-@/**************************************************************************/
-@
-@
-@#define TX_SOURCE_CODE
-@
-@
-@/* Include necessary system files. */
-@
-@#include "tx_api.h"
-@#include "tx_thread.h"
-@
-@
+/**************************************************************************/
+/* */
+/* Copyright (c) Microsoft Corporation. All rights reserved. */
+/* */
+/* This software is licensed under the Microsoft Software License */
+/* Terms for Microsoft Azure RTOS. Full text of the license can be */
+/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
+/* and in the root directory of this software. */
+/* */
+/**************************************************************************/
+
+
+/**************************************************************************/
+/**************************************************************************/
+/** */
+/** ThreadX Component */
+/** */
+/** Thread */
+/** */
+/**************************************************************************/
+/**************************************************************************/
+
#ifdef TX_ENABLE_FIQ_SUPPORT
-DISABLE_INTS = 0xC0 @ Disable IRQ/FIQ interrupts
+DISABLE_INTS = 0xC0 // Disable IRQ/FIQ interrupts
#else
-DISABLE_INTS = 0x80 @ Disable IRQ interrupts
+DISABLE_INTS = 0x80 // Disable IRQ interrupts
#endif
-MODE_MASK = 0x1F @ Mode mask
-IRQ_MODE_BITS = 0x12 @ IRQ mode bits
-@
-@
-@/* No 16-bit Thumb mode veneer code is needed for _tx_thread_irq_nesting_end
-@ since it will never be called 16-bit mode. */
-@
+MODE_MASK = 0x1F // Mode mask
+IRQ_MODE_BITS = 0x12 // IRQ mode bits
+
+
+/* No 16-bit Thumb mode veneer code is needed for _tx_thread_irq_nesting_end
+ since it will never be called 16-bit mode. */
+
.arm
.text
.align 2
-@/**************************************************************************/
-@/* */
-@/* FUNCTION RELEASE */
-@/* */
-@/* _tx_thread_irq_nesting_end Cortex-A9/GNU */
-@/* 6.1 */
-@/* AUTHOR */
-@/* */
-@/* William E. Lamie, Microsoft Corporation */
-@/* */
-@/* DESCRIPTION */
-@/* */
-@/* This function is called by the application from IRQ mode after */
-@/* _tx_thread_irq_nesting_start has been called and switches the IRQ */
-@/* processing from system mode back to IRQ mode prior to the ISR */
-@/* calling _tx_thread_context_restore. Note that this function */
-@/* assumes the system stack pointer is in the same position after */
-@/* nesting start function was called. */
-@/* */
-@/* This function assumes that the system mode stack pointer was setup */
-@/* during low-level initialization (tx_initialize_low_level.s). */
-@/* */
-@/* This function returns with IRQ interrupts disabled. */
-@/* */
-@/* INPUT */
-@/* */
-@/* None */
-@/* */
-@/* OUTPUT */
-@/* */
-@/* None */
-@/* */
-@/* CALLS */
-@/* */
-@/* None */
-@/* */
-@/* CALLED BY */
-@/* */
-@/* ISRs */
-@/* */
-@/* RELEASE HISTORY */
-@/* */
-@/* DATE NAME DESCRIPTION */
-@/* */
-@/* 09-30-2020 William E. Lamie Initial Version 6.1 */
-@/* */
-@/**************************************************************************/
-@VOID _tx_thread_irq_nesting_end(VOID)
-@{
+/**************************************************************************/
+/* */
+/* FUNCTION RELEASE */
+/* */
+/* _tx_thread_irq_nesting_end ARMv7-A */
+/* 6.1.11 */
+/* AUTHOR */
+/* */
+/* William E. Lamie, Microsoft Corporation */
+/* */
+/* DESCRIPTION */
+/* */
+/* This function is called by the application from IRQ mode after */
+/* _tx_thread_irq_nesting_start has been called and switches the IRQ */
+/* processing from system mode back to IRQ mode prior to the ISR */
+/* calling _tx_thread_context_restore. Note that this function */
+/* assumes the system stack pointer is in the same position after */
+/* nesting start function was called. */
+/* */
+/* This function assumes that the system mode stack pointer was setup */
+/* during low-level initialization (tx_initialize_low_level.s). */
+/* */
+/* This function returns with IRQ interrupts disabled. */
+/* */
+/* INPUT */
+/* */
+/* None */
+/* */
+/* OUTPUT */
+/* */
+/* None */
+/* */
+/* CALLS */
+/* */
+/* None */
+/* */
+/* CALLED BY */
+/* */
+/* ISRs */
+/* */
+/* RELEASE HISTORY */
+/* */
+/* DATE NAME DESCRIPTION */
+/* */
+/* 09-30-2020 William E. Lamie Initial Version 6.1 */
+/* 04-25-2022 Zhen Kong Updated comments, */
+/* resulting in version 6.1.11 */
+/* */
+/**************************************************************************/
.global _tx_thread_irq_nesting_end
.type _tx_thread_irq_nesting_end,function
_tx_thread_irq_nesting_end:
- MOV r3,lr @ Save ISR return address
- MRS r0, CPSR @ Pickup the CPSR
- ORR r0, r0, #DISABLE_INTS @ Build disable interrupt value
- MSR CPSR_c, r0 @ Disable interrupts
- LDMIA sp!, {r1, lr} @ Pickup saved lr (and r1 throw-away for
- @ 8-byte alignment logic)
- BIC r0, r0, #MODE_MASK @ Clear mode bits
- ORR r0, r0, #IRQ_MODE_BITS @ Build IRQ mode CPSR
- MSR CPSR_c, r0 @ Reenter IRQ mode
+ MOV r3,lr // Save ISR return address
+ MRS r0, CPSR // Pickup the CPSR
+ ORR r0, r0, #DISABLE_INTS // Build disable interrupt value
+ MSR CPSR_c, r0 // Disable interrupts
+ LDMIA sp!, {r1, lr} // Pickup saved lr (and r1 throw-away for
+ // 8-byte alignment logic)
+ BIC r0, r0, #MODE_MASK // Clear mode bits
+ ORR r0, r0, #IRQ_MODE_BITS // Build IRQ mode CPSR
+ MSR CPSR_c, r0 // Reenter IRQ mode
#ifdef __THUMB_INTERWORK
- BX r3 @ Return to caller
+ BX r3 // Return to caller
#else
- MOV pc, r3 @ Return to caller
+ MOV pc, r3 // Return to caller
#endif
-@}
-
diff --git a/ports/cortex_a9/gnu/src/tx_thread_irq_nesting_start.S b/ports/cortex_a9/gnu/src/tx_thread_irq_nesting_start.S
index a13f73cb..c69976ed 100644
--- a/ports/cortex_a9/gnu/src/tx_thread_irq_nesting_start.S
+++ b/ports/cortex_a9/gnu/src/tx_thread_irq_nesting_start.S
@@ -1,108 +1,96 @@
-@/**************************************************************************/
-@/* */
-@/* Copyright (c) Microsoft Corporation. All rights reserved. */
-@/* */
-@/* This software is licensed under the Microsoft Software License */
-@/* Terms for Microsoft Azure RTOS. Full text of the license can be */
-@/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
-@/* and in the root directory of this software. */
-@/* */
-@/**************************************************************************/
-@
-@
-@/**************************************************************************/
-@/**************************************************************************/
-@/** */
-@/** ThreadX Component */
-@/** */
-@/** Thread */
-@/** */
-@/**************************************************************************/
-@/**************************************************************************/
-@
-@
-@#define TX_SOURCE_CODE
-@
-@
-@/* Include necessary system files. */
-@
-@#include "tx_api.h"
-@#include "tx_thread.h"
-@
-@
-IRQ_DISABLE = 0x80 @ IRQ disable bit
-MODE_MASK = 0x1F @ Mode mask
-SYS_MODE_BITS = 0x1F @ System mode bits
-@
-@
-@/* No 16-bit Thumb mode veneer code is needed for _tx_thread_irq_nesting_start
-@ since it will never be called 16-bit mode. */
-@
+/**************************************************************************/
+/* */
+/* Copyright (c) Microsoft Corporation. All rights reserved. */
+/* */
+/* This software is licensed under the Microsoft Software License */
+/* Terms for Microsoft Azure RTOS. Full text of the license can be */
+/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
+/* and in the root directory of this software. */
+/* */
+/**************************************************************************/
+
+
+/**************************************************************************/
+/**************************************************************************/
+/** */
+/** ThreadX Component */
+/** */
+/** Thread */
+/** */
+/**************************************************************************/
+/**************************************************************************/
+
+IRQ_DISABLE = 0x80 // IRQ disable bit
+MODE_MASK = 0x1F // Mode mask
+SYS_MODE_BITS = 0x1F // System mode bits
+
+
+/* No 16-bit Thumb mode veneer code is needed for _tx_thread_irq_nesting_start
+ since it will never be called 16-bit mode. */
+
.arm
.text
.align 2
-@/**************************************************************************/
-@/* */
-@/* FUNCTION RELEASE */
-@/* */
-@/* _tx_thread_irq_nesting_start Cortex-A9/GNU */
-@/* 6.1 */
-@/* AUTHOR */
-@/* */
-@/* William E. Lamie, Microsoft Corporation */
-@/* */
-@/* DESCRIPTION */
-@/* */
-@/* This function is called by the application from IRQ mode after */
-@/* _tx_thread_context_save has been called and switches the IRQ */
-@/* processing to the system mode so nested IRQ interrupt processing */
-@/* is possible (system mode has its own "lr" register). Note that */
-@/* this function assumes that the system mode stack pointer was setup */
-@/* during low-level initialization (tx_initialize_low_level.s). */
-@/* */
-@/* This function returns with IRQ interrupts enabled. */
-@/* */
-@/* INPUT */
-@/* */
-@/* None */
-@/* */
-@/* OUTPUT */
-@/* */
-@/* None */
-@/* */
-@/* CALLS */
-@/* */
-@/* None */
-@/* */
-@/* CALLED BY */
-@/* */
-@/* ISRs */
-@/* */
-@/* RELEASE HISTORY */
-@/* */
-@/* DATE NAME DESCRIPTION */
-@/* */
-@/* 09-30-2020 William E. Lamie Initial Version 6.1 */
-@/* */
-@/**************************************************************************/
-@VOID _tx_thread_irq_nesting_start(VOID)
-@{
+/**************************************************************************/
+/* */
+/* FUNCTION RELEASE */
+/* */
+/* _tx_thread_irq_nesting_start ARMv7-A */
+/* 6.1.11 */
+/* AUTHOR */
+/* */
+/* William E. Lamie, Microsoft Corporation */
+/* */
+/* DESCRIPTION */
+/* */
+/* This function is called by the application from IRQ mode after */
+/* _tx_thread_context_save has been called and switches the IRQ */
+/* processing to the system mode so nested IRQ interrupt processing */
+/* is possible (system mode has its own "lr" register). Note that */
+/* this function assumes that the system mode stack pointer was setup */
+/* during low-level initialization (tx_initialize_low_level.s). */
+/* */
+/* This function returns with IRQ interrupts enabled. */
+/* */
+/* INPUT */
+/* */
+/* None */
+/* */
+/* OUTPUT */
+/* */
+/* None */
+/* */
+/* CALLS */
+/* */
+/* None */
+/* */
+/* CALLED BY */
+/* */
+/* ISRs */
+/* */
+/* RELEASE HISTORY */
+/* */
+/* DATE NAME DESCRIPTION */
+/* */
+/* 09-30-2020 William E. Lamie Initial Version 6.1 */
+/* 04-25-2022 Zhen Kong Updated comments, */
+/* resulting in version 6.1.11 */
+/* */
+/**************************************************************************/
.global _tx_thread_irq_nesting_start
.type _tx_thread_irq_nesting_start,function
_tx_thread_irq_nesting_start:
- MOV r3,lr @ Save ISR return address
- MRS r0, CPSR @ Pickup the CPSR
- BIC r0, r0, #MODE_MASK @ Clear the mode bits
- ORR r0, r0, #SYS_MODE_BITS @ Build system mode CPSR
- MSR CPSR_c, r0 @ Enter system mode
- STMDB sp!, {r1, lr} @ Push the system mode lr on the system mode stack
- @ and push r1 just to keep 8-byte alignment
- BIC r0, r0, #IRQ_DISABLE @ Build enable IRQ CPSR
- MSR CPSR_c, r0 @ Enter system mode
+ MOV r3,lr // Save ISR return address
+ MRS r0, CPSR // Pickup the CPSR
+ BIC r0, r0, #MODE_MASK // Clear the mode bits
+ ORR r0, r0, #SYS_MODE_BITS // Build system mode CPSR
+ MSR CPSR_c, r0 // Enter system mode
+ STMDB sp!, {r1, lr} // Push the system mode lr on the system mode stack
+ // and push r1 just to keep 8-byte alignment
+ BIC r0, r0, #IRQ_DISABLE // Build enable IRQ CPSR
+ MSR CPSR_c, r0 // Enter system mode
#ifdef __THUMB_INTERWORK
- BX r3 @ Return to caller
+ BX r3 // Return to caller
#else
- MOV pc, r3 @ Return to caller
+ MOV pc, r3 // Return to caller
#endif
-@}
-
diff --git a/ports/cortex_a9/gnu/src/tx_thread_schedule.S b/ports/cortex_a9/gnu/src/tx_thread_schedule.S
index 2d3db2b5..8330e9df 100644
--- a/ports/cortex_a9/gnu/src/tx_thread_schedule.S
+++ b/ports/cortex_a9/gnu/src/tx_thread_schedule.S
@@ -1,258 +1,230 @@
-@/**************************************************************************/
-@/* */
-@/* Copyright (c) Microsoft Corporation. All rights reserved. */
-@/* */
-@/* This software is licensed under the Microsoft Software License */
-@/* Terms for Microsoft Azure RTOS. Full text of the license can be */
-@/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
-@/* and in the root directory of this software. */
-@/* */
-@/**************************************************************************/
-@
-@
-@/**************************************************************************/
-@/**************************************************************************/
-@/** */
-@/** ThreadX Component */
-@/** */
-@/** Thread */
-@/** */
-@/**************************************************************************/
-@/**************************************************************************/
-@
-@
-@#define TX_SOURCE_CODE
-@
-@
-@/* Include necessary system files. */
-@
-@#include "tx_api.h"
-@#include "tx_thread.h"
-@#include "tx_timer.h"
-@
-@
+/**************************************************************************/
+/* */
+/* Copyright (c) Microsoft Corporation. All rights reserved. */
+/* */
+/* This software is licensed under the Microsoft Software License */
+/* Terms for Microsoft Azure RTOS. Full text of the license can be */
+/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
+/* and in the root directory of this software. */
+/* */
+/**************************************************************************/
+
+
+/**************************************************************************/
+/**************************************************************************/
+/** */
+/** ThreadX Component */
+/** */
+/** Thread */
+/** */
+/**************************************************************************/
+/**************************************************************************/
+
.global _tx_thread_execute_ptr
.global _tx_thread_current_ptr
.global _tx_timer_time_slice
- .global _tx_execution_thread_enter
-@
-@
-@/* Define the 16-bit Thumb mode veneer for _tx_thread_schedule for
-@ applications calling this function from to 16-bit Thumb mode. */
-@
+
+
+/* Define the 16-bit Thumb mode veneer for _tx_thread_schedule for
+ applications calling this function from to 16-bit Thumb mode. */
+
.text
.align 2
.global $_tx_thread_schedule
.type $_tx_thread_schedule,function
$_tx_thread_schedule:
.thumb
- BX pc @ Switch to 32-bit mode
- NOP @
+ BX pc // Switch to 32-bit mode
+ NOP //
.arm
- STMFD sp!, {lr} @ Save return address
- BL _tx_thread_schedule @ Call _tx_thread_schedule function
- LDMFD sp!, {lr} @ Recover saved return address
- BX lr @ Return to 16-bit caller
-@
-@
+ STMFD sp!, {lr} // Save return address
+ BL _tx_thread_schedule // Call _tx_thread_schedule function
+ LDMFD sp!, {lr} // Recover saved return address
+ BX lr // Return to 16-bit caller
+
+
.text
.align 2
-@/**************************************************************************/
-@/* */
-@/* FUNCTION RELEASE */
-@/* */
-@/* _tx_thread_schedule Cortex-A9/GNU */
-@/* 6.1.9 */
-@/* AUTHOR */
-@/* */
-@/* William E. Lamie, Microsoft Corporation */
-@/* */
-@/* DESCRIPTION */
-@/* */
-@/* This function waits for a thread control block pointer to appear in */
-@/* the _tx_thread_execute_ptr variable. Once a thread pointer appears */
-@/* in the variable, the corresponding thread is resumed. */
-@/* */
-@/* INPUT */
-@/* */
-@/* None */
-@/* */
-@/* OUTPUT */
-@/* */
-@/* None */
-@/* */
-@/* CALLS */
-@/* */
-@/* None */
-@/* */
-@/* CALLED BY */
-@/* */
-@/* _tx_initialize_kernel_enter ThreadX entry function */
-@/* _tx_thread_system_return Return to system from thread */
-@/* _tx_thread_context_restore Restore thread's context */
-@/* */
-@/* RELEASE HISTORY */
-@/* */
-@/* DATE NAME DESCRIPTION */
-@/* */
-@/* 09-30-2020 William E. Lamie Initial Version 6.1 */
-@/* 10-15-2021 William E. Lamie Modified comment(s), added */
-@/* execution profile support, */
-@/* resulting in version 6.1.9 */
-@/* */
-@/**************************************************************************/
-@VOID _tx_thread_schedule(VOID)
-@{
+/**************************************************************************/
+/* */
+/* FUNCTION RELEASE */
+/* */
+/* _tx_thread_schedule ARMv7-A */
+/* 6.1.11 */
+/* AUTHOR */
+/* */
+/* William E. Lamie, Microsoft Corporation */
+/* */
+/* DESCRIPTION */
+/* */
+/* This function waits for a thread control block pointer to appear in */
+/* the _tx_thread_execute_ptr variable. Once a thread pointer appears */
+/* in the variable, the corresponding thread is resumed. */
+/* */
+/* INPUT */
+/* */
+/* None */
+/* */
+/* OUTPUT */
+/* */
+/* None */
+/* */
+/* CALLS */
+/* */
+/* None */
+/* */
+/* CALLED BY */
+/* */
+/* _tx_initialize_kernel_enter ThreadX entry function */
+/* _tx_thread_system_return Return to system from thread */
+/* _tx_thread_context_restore Restore thread's context */
+/* */
+/* RELEASE HISTORY */
+/* */
+/* DATE NAME DESCRIPTION */
+/* */
+/* 09-30-2020 William E. Lamie Initial Version 6.1 */
+/* 10-15-2021 William E. Lamie Modified comment(s), added */
+/* execution profile support, */
+/* resulting in version 6.1.9 */
+/* 04-25-2022 Zhen Kong Updated comments, */
+/* resulting in version 6.1.11 */
+/* */
+/**************************************************************************/
.global _tx_thread_schedule
.type _tx_thread_schedule,function
_tx_thread_schedule:
-@
-@ /* Enable interrupts. */
-@
+
+ /* Enable interrupts. */
+
#ifdef TX_ENABLE_FIQ_SUPPORT
- CPSIE if @ Enable IRQ and FIQ interrupts
+ CPSIE if // Enable IRQ and FIQ interrupts
#else
- CPSIE i @ Enable IRQ interrupts
+ CPSIE i // Enable IRQ interrupts
#endif
-@
-@ /* Wait for a thread to execute. */
-@ do
-@ {
- LDR r1, =_tx_thread_execute_ptr @ Address of thread execute ptr
-@
+
+ /* Wait for a thread to execute. */
+ LDR r1, =_tx_thread_execute_ptr // Address of thread execute ptr
+
__tx_thread_schedule_loop:
-@
- LDR r0, [r1] @ Pickup next thread to execute
- CMP r0, #0 @ Is it NULL?
- BEQ __tx_thread_schedule_loop @ If so, keep looking for a thread
-@
-@ }
-@ while(_tx_thread_execute_ptr == TX_NULL);
-@
-@ /* Yes! We have a thread to execute. Lockout interrupts and
-@ transfer control to it. */
-@
+
+ LDR r0, [r1] // Pickup next thread to execute
+ CMP r0, #0 // Is it NULL?
+ BEQ __tx_thread_schedule_loop // If so, keep looking for a thread
+ /* Yes! We have a thread to execute. Lockout interrupts and
+ transfer control to it. */
+
#ifdef TX_ENABLE_FIQ_SUPPORT
- CPSID if @ Disable IRQ and FIQ interrupts
+ CPSID if // Disable IRQ and FIQ interrupts
#else
- CPSID i @ Disable IRQ interrupts
+ CPSID i // Disable IRQ interrupts
#endif
-@
-@ /* Setup the current thread pointer. */
-@ _tx_thread_current_ptr = _tx_thread_execute_ptr;
-@
- LDR r1, =_tx_thread_current_ptr @ Pickup address of current thread
- STR r0, [r1] @ Setup current thread pointer
-@
-@ /* Increment the run count for this thread. */
-@ _tx_thread_current_ptr -> tx_thread_run_count++;
-@
- LDR r2, [r0, #4] @ Pickup run counter
- LDR r3, [r0, #24] @ Pickup time-slice for this thread
- ADD r2, r2, #1 @ Increment thread run-counter
- STR r2, [r0, #4] @ Store the new run counter
-@
-@ /* Setup time-slice, if present. */
-@ _tx_timer_time_slice = _tx_thread_current_ptr -> tx_thread_time_slice;
-@
- LDR r2, =_tx_timer_time_slice @ Pickup address of time-slice
- @ variable
- LDR sp, [r0, #8] @ Switch stack pointers
- STR r3, [r2] @ Setup time-slice
-@
-@ /* Switch to the thread's stack. */
-@ sp = _tx_thread_execute_ptr -> tx_thread_stack_ptr;
-@
+
+ /* Setup the current thread pointer. */
+
+ LDR r1, =_tx_thread_current_ptr // Pickup address of current thread
+ STR r0, [r1] // Setup current thread pointer
+
+ /* Increment the run count for this thread. */
+
+ LDR r2, [r0, #4] // Pickup run counter
+ LDR r3, [r0, #24] // Pickup time-slice for this thread
+ ADD r2, r2, #1 // Increment thread run-counter
+ STR r2, [r0, #4] // Store the new run counter
+
+ /* Setup time-slice, if present. */
+
+ LDR r2, =_tx_timer_time_slice // Pickup address of time-slice
+ // variable
+ LDR sp, [r0, #8] // Switch stack pointers
+ STR r3, [r2] // Setup time-slice
+
#if (defined(TX_ENABLE_EXECUTION_CHANGE_NOTIFY) || defined(TX_EXECUTION_PROFILE_ENABLE))
-@
-@ /* Call the thread entry function to indicate the thread is executing. */
-@
- MOV r5, r0 @ Save r0
- BL _tx_execution_thread_enter @ Call the thread execution enter function
- MOV r0, r5 @ Restore r0
+
+ /* Call the thread entry function to indicate the thread is executing. */
+
+ MOV r5, r0 // Save r0
+ BL _tx_execution_thread_enter // Call the thread execution enter function
+ MOV r0, r5 // Restore r0
#endif
-@
-@ /* Determine if an interrupt frame or a synchronous task suspension frame
-@ is present. */
-@
- LDMIA sp!, {r4, r5} @ Pickup the stack type and saved CPSR
- CMP r4, #0 @ Check for synchronous context switch
+
+ /* Determine if an interrupt frame or a synchronous task suspension frame
+ is present. */
+
+ LDMIA sp!, {r4, r5} // Pickup the stack type and saved CPSR
+ CMP r4, #0 // Check for synchronous context switch
BEQ _tx_solicited_return
- MSR SPSR_cxsf, r5 @ Setup SPSR for return
+ MSR SPSR_cxsf, r5 // Setup SPSR for return
#ifdef TX_ENABLE_VFP_SUPPORT
- LDR r1, [r0, #144] @ Pickup the VFP enabled flag
- CMP r1, #0 @ Is the VFP enabled?
- BEQ _tx_skip_interrupt_vfp_restore @ No, skip VFP interrupt restore
- VLDMIA sp!, {D0-D15} @ Recover D0-D15
- VLDMIA sp!, {D16-D31} @ Recover D16-D31
- LDR r4, [sp], #4 @ Pickup FPSCR
- VMSR FPSCR, r4 @ Restore FPSCR
+ LDR r1, [r0, #144] // Pickup the VFP enabled flag
+ CMP r1, #0 // Is the VFP enabled?
+ BEQ _tx_skip_interrupt_vfp_restore // No, skip VFP interrupt restore
+ VLDMIA sp!, {D0-D15} // Recover D0-D15
+ VLDMIA sp!, {D16-D31} // Recover D16-D31
+ LDR r4, [sp], #4 // Pickup FPSCR
+ VMSR FPSCR, r4 // Restore FPSCR
_tx_skip_interrupt_vfp_restore:
#endif
- LDMIA sp!, {r0-r12, lr, pc}^ @ Return to point of thread interrupt
+ LDMIA sp!, {r0-r12, lr, pc}^ // Return to point of thread interrupt
_tx_solicited_return:
#ifdef TX_ENABLE_VFP_SUPPORT
- LDR r1, [r0, #144] @ Pickup the VFP enabled flag
- CMP r1, #0 @ Is the VFP enabled?
- BEQ _tx_skip_solicited_vfp_restore @ No, skip VFP solicited restore
- VLDMIA sp!, {D8-D15} @ Recover D8-D15
- VLDMIA sp!, {D16-D31} @ Recover D16-D31
- LDR r4, [sp], #4 @ Pickup FPSCR
- VMSR FPSCR, r4 @ Restore FPSCR
+ LDR r1, [r0, #144] // Pickup the VFP enabled flag
+ CMP r1, #0 // Is the VFP enabled?
+ BEQ _tx_skip_solicited_vfp_restore // No, skip VFP solicited restore
+ VLDMIA sp!, {D8-D15} // Recover D8-D15
+ VLDMIA sp!, {D16-D31} // Recover D16-D31
+ LDR r4, [sp], #4 // Pickup FPSCR
+ VMSR FPSCR, r4 // Restore FPSCR
_tx_skip_solicited_vfp_restore:
#endif
- MSR CPSR_cxsf, r5 @ Recover CPSR
- LDMIA sp!, {r4-r11, lr} @ Return to thread synchronously
+ MSR CPSR_cxsf, r5 // Recover CPSR
+ LDMIA sp!, {r4-r11, lr} // Return to thread synchronously
#ifdef __THUMB_INTERWORK
- BX lr @ Return to caller
+ BX lr // Return to caller
#else
- MOV pc, lr @ Return to caller
+ MOV pc, lr // Return to caller
#endif
-@
-@}
-@
#ifdef TX_ENABLE_VFP_SUPPORT
.global tx_thread_vfp_enable
.type tx_thread_vfp_enable,function
tx_thread_vfp_enable:
- MRS r2, CPSR @ Pickup the CPSR
+ MRS r2, CPSR // Pickup the CPSR
#ifdef TX_ENABLE_FIQ_SUPPORT
- CPSID if @ Enable IRQ and FIQ interrupts
+ CPSID if // Enable IRQ and FIQ interrupts
#else
- CPSID i @ Enable IRQ interrupts
+ CPSID i // Enable IRQ interrupts
#endif
- LDR r0, =_tx_thread_current_ptr @ Build current thread pointer address
- LDR r1, [r0] @ Pickup current thread pointer
- CMP r1, #0 @ Check for NULL thread pointer
- BEQ __tx_no_thread_to_enable @ If NULL, skip VFP enable
- MOV r0, #1 @ Build enable value
- STR r0, [r1, #144] @ Set the VFP enable flag (tx_thread_vfp_enable field in TX_THREAD)
+ LDR r0, =_tx_thread_current_ptr // Build current thread pointer address
+ LDR r1, [r0] // Pickup current thread pointer
+ CMP r1, #0 // Check for NULL thread pointer
+ BEQ __tx_no_thread_to_enable // If NULL, skip VFP enable
+ MOV r0, #1 // Build enable value
+ STR r0, [r1, #144] // Set the VFP enable flag (tx_thread_vfp_enable field in TX_THREAD)
__tx_no_thread_to_enable:
- MSR CPSR_cxsf, r2 @ Recover CPSR
- BX LR @ Return to caller
+ MSR CPSR_cxsf, r2 // Recover CPSR
+ BX LR // Return to caller
.global tx_thread_vfp_disable
.type tx_thread_vfp_disable,function
tx_thread_vfp_disable:
- MRS r2, CPSR @ Pickup the CPSR
+ MRS r2, CPSR // Pickup the CPSR
#ifdef TX_ENABLE_FIQ_SUPPORT
- CPSID if @ Enable IRQ and FIQ interrupts
+ CPSID if // Enable IRQ and FIQ interrupts
#else
- CPSID i @ Enable IRQ interrupts
+ CPSID i // Enable IRQ interrupts
#endif
- LDR r0, =_tx_thread_current_ptr @ Build current thread pointer address
- LDR r1, [r0] @ Pickup current thread pointer
- CMP r1, #0 @ Check for NULL thread pointer
- BEQ __tx_no_thread_to_disable @ If NULL, skip VFP disable
- MOV r0, #0 @ Build disable value
- STR r0, [r1, #144] @ Clear the VFP enable flag (tx_thread_vfp_enable field in TX_THREAD)
+ LDR r0, =_tx_thread_current_ptr // Build current thread pointer address
+ LDR r1, [r0] // Pickup current thread pointer
+ CMP r1, #0 // Check for NULL thread pointer
+ BEQ __tx_no_thread_to_disable // If NULL, skip VFP disable
+ MOV r0, #0 // Build disable value
+ STR r0, [r1, #144] // Clear the VFP enable flag (tx_thread_vfp_enable field in TX_THREAD)
__tx_no_thread_to_disable:
- MSR CPSR_cxsf, r2 @ Recover CPSR
- BX LR @ Return to caller
+ MSR CPSR_cxsf, r2 // Recover CPSR
+ BX LR // Return to caller
#endif
-
diff --git a/ports/cortex_a9/gnu/src/tx_thread_stack_build.S b/ports/cortex_a9/gnu/src/tx_thread_stack_build.S
index 5bb2c09f..f413e673 100644
--- a/ports/cortex_a9/gnu/src/tx_thread_stack_build.S
+++ b/ports/cortex_a9/gnu/src/tx_thread_stack_build.S
@@ -1,178 +1,164 @@
-@/**************************************************************************/
-@/* */
-@/* Copyright (c) Microsoft Corporation. All rights reserved. */
-@/* */
-@/* This software is licensed under the Microsoft Software License */
-@/* Terms for Microsoft Azure RTOS. Full text of the license can be */
-@/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
-@/* and in the root directory of this software. */
-@/* */
-@/**************************************************************************/
-@
-@
-@/**************************************************************************/
-@/**************************************************************************/
-@/** */
-@/** ThreadX Component */
-@/** */
-@/** Thread */
-@/** */
-@/**************************************************************************/
-@/**************************************************************************/
-@
-@
-@#define TX_SOURCE_CODE
-@
-@
-@/* Include necessary system files. */
-@
-@#include "tx_api.h"
-@#include "tx_thread.h"
-@
+/**************************************************************************/
+/* */
+/* Copyright (c) Microsoft Corporation. All rights reserved. */
+/* */
+/* This software is licensed under the Microsoft Software License */
+/* Terms for Microsoft Azure RTOS. Full text of the license can be */
+/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
+/* and in the root directory of this software. */
+/* */
+/**************************************************************************/
+
+
+/**************************************************************************/
+/**************************************************************************/
+/** */
+/** ThreadX Component */
+/** */
+/** Thread */
+/** */
+/**************************************************************************/
+/**************************************************************************/
.arm
-SVC_MODE = 0x13 @ SVC mode
+SVC_MODE = 0x13 // SVC mode
#ifdef TX_ENABLE_FIQ_SUPPORT
-CPSR_MASK = 0xDF @ Mask initial CPSR, IRQ & FIQ interrupts enabled
+CPSR_MASK = 0xDF // Mask initial CPSR, IRQ & FIQ interrupts enabled
#else
-CPSR_MASK = 0x9F @ Mask initial CPSR, IRQ interrupts enabled
+CPSR_MASK = 0x9F // Mask initial CPSR, IRQ interrupts enabled
#endif
-@
-@
-@/* Define the 16-bit Thumb mode veneer for _tx_thread_stack_build for
-@ applications calling this function from to 16-bit Thumb mode. */
-@
+
+
+/* Define the 16-bit Thumb mode veneer for _tx_thread_stack_build for
+ applications calling this function from to 16-bit Thumb mode. */
+
.text
.align 2
.thumb
.global $_tx_thread_stack_build
.type $_tx_thread_stack_build,function
$_tx_thread_stack_build:
- BX pc @ Switch to 32-bit mode
- NOP @
+ BX pc // Switch to 32-bit mode
+ NOP //
.arm
- STMFD sp!, {lr} @ Save return address
- BL _tx_thread_stack_build @ Call _tx_thread_stack_build function
- LDMFD sp!, {lr} @ Recover saved return address
- BX lr @ Return to 16-bit caller
-@
-@
+ STMFD sp!, {lr} // Save return address
+ BL _tx_thread_stack_build // Call _tx_thread_stack_build function
+ LDMFD sp!, {lr} // Recover saved return address
+ BX lr // Return to 16-bit caller
+
+
.text
.align 2
-@/**************************************************************************/
-@/* */
-@/* FUNCTION RELEASE */
-@/* */
-@/* _tx_thread_stack_build Cortex-A9/GNU */
-@/* 6.1 */
-@/* AUTHOR */
-@/* */
-@/* William E. Lamie, Microsoft Corporation */
-@/* */
-@/* DESCRIPTION */
-@/* */
-@/* This function builds a stack frame on the supplied thread's stack. */
-@/* The stack frame results in a fake interrupt return to the supplied */
-@/* function pointer. */
-@/* */
-@/* INPUT */
-@/* */
-@/* thread_ptr Pointer to thread control blk */
-@/* function_ptr Pointer to return function */
-@/* */
-@/* OUTPUT */
-@/* */
-@/* None */
-@/* */
-@/* CALLS */
-@/* */
-@/* None */
-@/* */
-@/* CALLED BY */
-@/* */
-@/* _tx_thread_create Create thread service */
-@/* */
-@/* RELEASE HISTORY */
-@/* */
-@/* DATE NAME DESCRIPTION */
-@/* */
-@/* 09-30-2020 William E. Lamie Initial Version 6.1 */
-@/* */
-@/**************************************************************************/
-@VOID _tx_thread_stack_build(TX_THREAD *thread_ptr, VOID (*function_ptr)(VOID))
-@{
+/**************************************************************************/
+/* */
+/* FUNCTION RELEASE */
+/* */
+/* _tx_thread_stack_build ARMv7-A */
+/* 6.1.11 */
+/* AUTHOR */
+/* */
+/* William E. Lamie, Microsoft Corporation */
+/* */
+/* DESCRIPTION */
+/* */
+/* This function builds a stack frame on the supplied thread's stack. */
+/* The stack frame results in a fake interrupt return to the supplied */
+/* function pointer. */
+/* */
+/* INPUT */
+/* */
+/* thread_ptr Pointer to thread control blk */
+/* function_ptr Pointer to return function */
+/* */
+/* OUTPUT */
+/* */
+/* None */
+/* */
+/* CALLS */
+/* */
+/* None */
+/* */
+/* CALLED BY */
+/* */
+/* _tx_thread_create Create thread service */
+/* */
+/* RELEASE HISTORY */
+/* */
+/* DATE NAME DESCRIPTION */
+/* */
+/* 09-30-2020 William E. Lamie Initial Version 6.1 */
+/* 04-25-2022 Zhen Kong Updated comments, */
+/* resulting in version 6.1.11 */
+/* */
+/**************************************************************************/
.global _tx_thread_stack_build
.type _tx_thread_stack_build,function
_tx_thread_stack_build:
-@
-@
-@ /* Build a fake interrupt frame. The form of the fake interrupt stack
-@ on the Cortex-A9 should look like the following after it is built:
-@
-@ Stack Top: 1 Interrupt stack frame type
-@ CPSR Initial value for CPSR
-@ a1 (r0) Initial value for a1
-@ a2 (r1) Initial value for a2
-@ a3 (r2) Initial value for a3
-@ a4 (r3) Initial value for a4
-@ v1 (r4) Initial value for v1
-@ v2 (r5) Initial value for v2
-@ v3 (r6) Initial value for v3
-@ v4 (r7) Initial value for v4
-@ v5 (r8) Initial value for v5
-@ sb (r9) Initial value for sb
-@ sl (r10) Initial value for sl
-@ fp (r11) Initial value for fp
-@ ip (r12) Initial value for ip
-@ lr (r14) Initial value for lr
-@ pc (r15) Initial value for pc
-@ 0 For stack backtracing
-@
-@ Stack Bottom: (higher memory address) */
-@
- LDR r2, [r0, #16] @ Pickup end of stack area
- BIC r2, r2, #7 @ Ensure 8-byte alignment
- SUB r2, r2, #76 @ Allocate space for the stack frame
-@
-@ /* Actually build the stack frame. */
-@
- MOV r3, #1 @ Build interrupt stack type
- STR r3, [r2, #0] @ Store stack type
- MOV r3, #0 @ Build initial register value
- STR r3, [r2, #8] @ Store initial r0
- STR r3, [r2, #12] @ Store initial r1
- STR r3, [r2, #16] @ Store initial r2
- STR r3, [r2, #20] @ Store initial r3
- STR r3, [r2, #24] @ Store initial r4
- STR r3, [r2, #28] @ Store initial r5
- STR r3, [r2, #32] @ Store initial r6
- STR r3, [r2, #36] @ Store initial r7
- STR r3, [r2, #40] @ Store initial r8
- STR r3, [r2, #44] @ Store initial r9
- LDR r3, [r0, #12] @ Pickup stack starting address
- STR r3, [r2, #48] @ Store initial r10 (sl)
- LDR r3,=_tx_thread_schedule @ Pickup address of _tx_thread_schedule for GDB backtrace
- STR r3, [r2, #60] @ Store initial r14 (lr)
- MOV r3, #0 @ Build initial register value
- STR r3, [r2, #52] @ Store initial r11
- STR r3, [r2, #56] @ Store initial r12
- STR r1, [r2, #64] @ Store initial pc
- STR r3, [r2, #68] @ 0 for back-trace
- MRS r1, CPSR @ Pickup CPSR
- BIC r1, r1, #CPSR_MASK @ Mask mode bits of CPSR
- ORR r3, r1, #SVC_MODE @ Build CPSR, SVC mode, interrupts enabled
- STR r3, [r2, #4] @ Store initial CPSR
-@
-@ /* Setup stack pointer. */
-@ thread_ptr -> tx_thread_stack_ptr = r2;
-@
- STR r2, [r0, #8] @ Save stack pointer in thread's
- @ control block
+
+
+ /* Build a fake interrupt frame. The form of the fake interrupt stack
+ on the ARMv7-A should look like the following after it is built:
+
+ Stack Top: 1 Interrupt stack frame type
+ CPSR Initial value for CPSR
+ a1 (r0) Initial value for a1
+ a2 (r1) Initial value for a2
+ a3 (r2) Initial value for a3
+ a4 (r3) Initial value for a4
+ v1 (r4) Initial value for v1
+ v2 (r5) Initial value for v2
+ v3 (r6) Initial value for v3
+ v4 (r7) Initial value for v4
+ v5 (r8) Initial value for v5
+ sb (r9) Initial value for sb
+ sl (r10) Initial value for sl
+ fp (r11) Initial value for fp
+ ip (r12) Initial value for ip
+ lr (r14) Initial value for lr
+ pc (r15) Initial value for
+ 0 For stack backtracing
+
+ Stack Bottom: (higher memory address) */
+
+ LDR r2, [r0, #16] // Pickup end of stack area
+ BIC r2, r2, #7 // Ensure 8-byte alignment
+ SUB r2, r2, #76 // Allocate space for the stack frame
+
+ /* Actually build the stack frame. */
+
+ MOV r3, #1 // Build interrupt stack type
+ STR r3, [r2, #0] // Store stack type
+ MOV r3, #0 // Build initial register value
+ STR r3, [r2, #8] // Store initial r0
+ STR r3, [r2, #12] // Store initial r1
+ STR r3, [r2, #16] // Store initial r2
+ STR r3, [r2, #20] // Store initial r3
+ STR r3, [r2, #24] // Store initial r4
+ STR r3, [r2, #28] // Store initial r5
+ STR r3, [r2, #32] // Store initial r6
+ STR r3, [r2, #36] // Store initial r7
+ STR r3, [r2, #40] // Store initial r8
+ STR r3, [r2, #44] // Store initial r9
+ LDR r3, [r0, #12] // Pickup stack starting address
+ STR r3, [r2, #48] // Store initial r10 (sl)
+ LDR r3,=_tx_thread_schedule // Pickup address of _tx_thread_schedule for GDB backtrace
+ STR r3, [r2, #60] // Store initial r14 (lr)
+ MOV r3, #0 // Build initial register value
+ STR r3, [r2, #52] // Store initial r11
+ STR r3, [r2, #56] // Store initial r12
+ STR r1, [r2, #64] // Store initial pc
+ STR r3, [r2, #68] // 0 for back-trace
+ MRS r1, CPSR // Pickup CPSR
+ BIC r1, r1, #CPSR_MASK // Mask mode bits of CPSR
+ ORR r3, r1, #SVC_MODE // Build CPSR, SVC mode, interrupts enabled
+ STR r3, [r2, #4] // Store initial CPSR
+
+ /* Setup stack pointer. */
+
+ STR r2, [r0, #8] // Save stack pointer in thread's
+ // control block
#ifdef __THUMB_INTERWORK
- BX lr @ Return to caller
+ BX lr // Return to caller
#else
- MOV pc, lr @ Return to caller
+ MOV pc, lr // Return to caller
#endif
-@}
-
-
diff --git a/ports/cortex_a9/gnu/src/tx_thread_system_return.S b/ports/cortex_a9/gnu/src/tx_thread_system_return.S
index f495d445..cb7d62ce 100644
--- a/ports/cortex_a9/gnu/src/tx_thread_system_return.S
+++ b/ports/cortex_a9/gnu/src/tx_thread_system_return.S
@@ -1,183 +1,162 @@
-@/**************************************************************************/
-@/* */
-@/* Copyright (c) Microsoft Corporation. All rights reserved. */
-@/* */
-@/* This software is licensed under the Microsoft Software License */
-@/* Terms for Microsoft Azure RTOS. Full text of the license can be */
-@/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
-@/* and in the root directory of this software. */
-@/* */
-@/**************************************************************************/
-@
-@
-@/**************************************************************************/
-@/**************************************************************************/
-@/** */
-@/** ThreadX Component */
-@/** */
-@/** Thread */
-@/** */
-@/**************************************************************************/
-@/**************************************************************************/
-@
-@#define TX_SOURCE_CODE
-@
-@
-@/* Include necessary system files. */
-@
-@#include "tx_api.h"
-@#include "tx_thread.h"
-@#include "tx_timer.h"
-@
+/**************************************************************************/
+/* */
+/* Copyright (c) Microsoft Corporation. All rights reserved. */
+/* */
+/* This software is licensed under the Microsoft Software License */
+/* Terms for Microsoft Azure RTOS. Full text of the license can be */
+/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
+/* and in the root directory of this software. */
+/* */
+/**************************************************************************/
+
+
+/**************************************************************************/
+/**************************************************************************/
+/** */
+/** ThreadX Component */
+/** */
+/** Thread */
+/** */
+/**************************************************************************/
+/**************************************************************************/
+
.arm
-@
-@
+
+
.global _tx_thread_current_ptr
.global _tx_timer_time_slice
.global _tx_thread_schedule
- .global _tx_execution_thread_exit
-@
-@
-@
-@/* Define the 16-bit Thumb mode veneer for _tx_thread_system_return for
-@ applications calling this function from to 16-bit Thumb mode. */
-@
+
+
+
+/* Define the 16-bit Thumb mode veneer for _tx_thread_system_return for
+ applications calling this function from to 16-bit Thumb mode. */
+
.text
.align 2
.global $_tx_thread_system_return
.type $_tx_thread_system_return,function
$_tx_thread_system_return:
.thumb
- BX pc @ Switch to 32-bit mode
- NOP @
+ BX pc // Switch to 32-bit mode
+ NOP //
.arm
- STMFD sp!, {lr} @ Save return address
- BL _tx_thread_system_return @ Call _tx_thread_system_return function
- LDMFD sp!, {lr} @ Recover saved return address
- BX lr @ Return to 16-bit caller
-@
-@
+ STMFD sp!, {lr} // Save return address
+ BL _tx_thread_system_return // Call _tx_thread_system_return function
+ LDMFD sp!, {lr} // Recover saved return address
+ BX lr // Return to 16-bit caller
+
+
.text
.align 2
-@/**************************************************************************/
-@/* */
-@/* FUNCTION RELEASE */
-@/* */
-@/* _tx_thread_system_return Cortex-A9/GNU */
-@/* 6.1.9 */
-@/* AUTHOR */
-@/* */
-@/* William E. Lamie, Microsoft Corporation */
-@/* */
-@/* DESCRIPTION */
-@/* */
-@/* This function is target processor specific. It is used to transfer */
-@/* control from a thread back to the ThreadX system. Only a */
-@/* minimal context is saved since the compiler assumes temp registers */
-@/* are going to get slicked by a function call anyway. */
-@/* */
-@/* INPUT */
-@/* */
-@/* None */
-@/* */
-@/* OUTPUT */
-@/* */
-@/* None */
-@/* */
-@/* CALLS */
-@/* */
-@/* _tx_thread_schedule Thread scheduling loop */
-@/* */
-@/* CALLED BY */
-@/* */
-@/* ThreadX components */
-@/* */
-@/* RELEASE HISTORY */
-@/* */
-@/* DATE NAME DESCRIPTION */
-@/* */
-@/* 09-30-2020 William E. Lamie Initial Version 6.1 */
-@/* 10-15-2021 William E. Lamie Modified comment(s), added */
-@/* execution profile support, */
-@/* resulting in version 6.1.9 */
-@/* */
-@/**************************************************************************/
-@VOID _tx_thread_system_return(VOID)
-@{
+/**************************************************************************/
+/* */
+/* FUNCTION RELEASE */
+/* */
+/* _tx_thread_system_return ARMv7-A */
+/* 6.1.11 */
+/* AUTHOR */
+/* */
+/* William E. Lamie, Microsoft Corporation */
+/* */
+/* DESCRIPTION */
+/* */
+/* This function is target processor specific. It is used to transfer */
+/* control from a thread back to the ThreadX system. Only a */
+/* minimal context is saved since the compiler assumes temp registers */
+/* are going to get slicked by a function call anyway. */
+/* */
+/* INPUT */
+/* */
+/* None */
+/* */
+/* OUTPUT */
+/* */
+/* None */
+/* */
+/* CALLS */
+/* */
+/* _tx_thread_schedule Thread scheduling loop */
+/* */
+/* CALLED BY */
+/* */
+/* ThreadX components */
+/* */
+/* RELEASE HISTORY */
+/* */
+/* DATE NAME DESCRIPTION */
+/* */
+/* 09-30-2020 William E. Lamie Initial Version 6.1 */
+/* 10-15-2021 William E. Lamie Modified comment(s), added */
+/* execution profile support, */
+/* resulting in version 6.1.9 */
+/* 04-25-2022 Zhen Kong Updated comments, */
+/* resulting in version 6.1.11 */
+/* */
+/**************************************************************************/
.global _tx_thread_system_return
.type _tx_thread_system_return,function
_tx_thread_system_return:
-@
-@ /* Save minimal context on the stack. */
-@
- STMDB sp!, {r4-r11, lr} @ Save minimal context
- LDR r4, =_tx_thread_current_ptr @ Pickup address of current ptr
- LDR r5, [r4] @ Pickup current thread pointer
-
+ /* Save minimal context on the stack. */
+
+ STMDB sp!, {r4-r11, lr} // Save minimal context
+
+ LDR r4, =_tx_thread_current_ptr // Pickup address of current ptr
+ LDR r5, [r4] // Pickup current thread pointer
+
#ifdef TX_ENABLE_VFP_SUPPORT
- LDR r1, [r5, #144] @ Pickup the VFP enabled flag
- CMP r1, #0 @ Is the VFP enabled?
- BEQ _tx_skip_solicited_vfp_save @ No, skip VFP solicited save
- VMRS r1, FPSCR @ Pickup the FPSCR
- STR r1, [sp, #-4]! @ Save FPSCR
- VSTMDB sp!, {D16-D31} @ Save D16-D31
- VSTMDB sp!, {D8-D15} @ Save D8-D15
+ LDR r1, [r5, #144] // Pickup the VFP enabled flag
+ CMP r1, #0 // Is the VFP enabled?
+ BEQ _tx_skip_solicited_vfp_save // No, skip VFP solicited save
+ VMRS r1, FPSCR // Pickup the FPSCR
+ STR r1, [sp, #-4]! // Save FPSCR
+ VSTMDB sp!, {D16-D31} // Save D16-D31
+ VSTMDB sp!, {D8-D15} // Save D8-D15
_tx_skip_solicited_vfp_save:
#endif
- MOV r0, #0 @ Build a solicited stack type
- MRS r1, CPSR @ Pickup the CPSR
- STMDB sp!, {r0-r1} @ Save type and CPSR
-@
-@ /* Lockout interrupts. */
-@
-#ifdef TX_ENABLE_FIQ_SUPPORT
- CPSID if @ Disable IRQ and FIQ interrupts
-#else
- CPSID i @ Disable IRQ interrupts
-#endif
-
-#if (defined(TX_ENABLE_EXECUTION_CHANGE_NOTIFY) || defined(TX_EXECUTION_PROFILE_ENABLE))
-@
-@ /* Call the thread exit function to indicate the thread is no longer executing. */
-@
- BL _tx_execution_thread_exit @ Call the thread exit function
-#endif
- MOV r3, r4 @ Pickup address of current ptr
- MOV r0, r5 @ Pickup current thread pointer
- LDR r2, =_tx_timer_time_slice @ Pickup address of time slice
- LDR r1, [r2] @ Pickup current time slice
-@
-@ /* Save current stack and switch to system stack. */
-@ _tx_thread_current_ptr -> tx_thread_stack_ptr = sp;
-@ sp = _tx_thread_system_stack_ptr;
-@
- STR sp, [r0, #8] @ Save thread stack pointer
-@
-@ /* Determine if the time-slice is active. */
-@ if (_tx_timer_time_slice)
-@ {
-@
- MOV r4, #0 @ Build clear value
- CMP r1, #0 @ Is a time-slice active?
- BEQ __tx_thread_dont_save_ts @ No, don't save the time-slice
-@
-@ /* Save time-slice for the thread and clear the current time-slice. */
-@ _tx_thread_current_ptr -> tx_thread_time_slice = _tx_timer_time_slice;
-@ _tx_timer_time_slice = 0;
-@
- STR r4, [r2] @ Clear time-slice
- STR r1, [r0, #24] @ Save current time-slice
-@
-@ }
-__tx_thread_dont_save_ts:
-@
-@ /* Clear the current thread pointer. */
-@ _tx_thread_current_ptr = TX_NULL;
-@
- STR r4, [r3] @ Clear current thread pointer
- B _tx_thread_schedule @ Jump to scheduler!
-@
-@}
+ MOV r0, #0 // Build a solicited stack type
+ MRS r1, CPSR // Pickup the CPSR
+ STMDB sp!, {r0-r1} // Save type and CPSR
+ /* Lockout interrupts. */
+
+#ifdef TX_ENABLE_FIQ_SUPPORT
+ CPSID if // Disable IRQ and FIQ interrupts
+#else
+ CPSID i // Disable IRQ interrupts
+#endif
+
+#if (defined(TX_ENABLE_EXECUTION_CHANGE_NOTIFY) || defined(TX_EXECUTION_PROFILE_ENABLE))
+
+ /* Call the thread exit function to indicate the thread is no longer executing. */
+
+ BL _tx_execution_thread_exit // Call the thread exit function
+#endif
+ MOV r3, r4 // Pickup address of current ptr
+ MOV r0, r5 // Pickup current thread pointer
+ LDR r2, =_tx_timer_time_slice // Pickup address of time slice
+ LDR r1, [r2] // Pickup current time slice
+
+ /* Save current stack and switch to system stack. */
+
+ STR sp, [r0, #8] // Save thread stack pointer
+
+ /* Determine if the time-slice is active. */
+
+ MOV r4, #0 // Build clear value
+ CMP r1, #0 // Is a time-slice active?
+ BEQ __tx_thread_dont_save_ts // No, don't save the time-slice
+
+ /* Save time-slice for the thread and clear the current time-slice. */
+
+ STR r4, [r2] // Clear time-slice
+ STR r1, [r0, #24] // Save current time-slice
+
+__tx_thread_dont_save_ts:
+
+ /* Clear the current thread pointer. */
+
+ STR r4, [r3] // Clear current thread pointer
+ B _tx_thread_schedule // Jump to scheduler!
diff --git a/ports/cortex_a9/gnu/src/tx_thread_vectored_context_save.S b/ports/cortex_a9/gnu/src/tx_thread_vectored_context_save.S
index f3b7c7a8..d846223f 100644
--- a/ports/cortex_a9/gnu/src/tx_thread_vectored_context_save.S
+++ b/ports/cortex_a9/gnu/src/tx_thread_vectored_context_save.S
@@ -1,193 +1,165 @@
-@/**************************************************************************/
-@/* */
-@/* Copyright (c) Microsoft Corporation. All rights reserved. */
-@/* */
-@/* This software is licensed under the Microsoft Software License */
-@/* Terms for Microsoft Azure RTOS. Full text of the license can be */
-@/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
-@/* and in the root directory of this software. */
-@/* */
-@/**************************************************************************/
-@
-@
-@/**************************************************************************/
-@/**************************************************************************/
-@/** */
-@/** ThreadX Component */
-@/** */
-@/** Thread */
-@/** */
-@/**************************************************************************/
-@/**************************************************************************/
-@
-@
-@#define TX_SOURCE_CODE
-@
-@
-@/* Include necessary system files. */
-@
-@#include "tx_api.h"
-@#include "tx_thread.h"
-@
-@
+/**************************************************************************/
+/* */
+/* Copyright (c) Microsoft Corporation. All rights reserved. */
+/* */
+/* This software is licensed under the Microsoft Software License */
+/* Terms for Microsoft Azure RTOS. Full text of the license can be */
+/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
+/* and in the root directory of this software. */
+/* */
+/**************************************************************************/
+
+
+/**************************************************************************/
+/**************************************************************************/
+/** */
+/** ThreadX Component */
+/** */
+/** Thread */
+/** */
+/**************************************************************************/
+/**************************************************************************/
+
.global _tx_thread_system_state
.global _tx_thread_current_ptr
.global _tx_execution_isr_enter
-@
-@
-@
-@/* No 16-bit Thumb mode veneer code is needed for _tx_thread_vectored_context_save
-@ since it will never be called 16-bit mode. */
-@
+
+
+
+/* No 16-bit Thumb mode veneer code is needed for _tx_thread_vectored_context_save
+ since it will never be called 16-bit mode. */
+
.arm
.text
.align 2
-@/**************************************************************************/
-@/* */
-@/* FUNCTION RELEASE */
-@/* */
-@/* _tx_thread_vectored_context_save Cortex-A9/GNU */
-@/* 6.1.9 */
-@/* AUTHOR */
-@/* */
-@/* William E. Lamie, Microsoft Corporation */
-@/* */
-@/* DESCRIPTION */
-@/* */
-@/* This function saves the context of an executing thread in the */
-@/* beginning of interrupt processing. The function also ensures that */
-@/* the system stack is used upon return to the calling ISR. */
-@/* */
-@/* INPUT */
-@/* */
-@/* None */
-@/* */
-@/* OUTPUT */
-@/* */
-@/* None */
-@/* */
-@/* CALLS */
-@/* */
-@/* None */
-@/* */
-@/* CALLED BY */
-@/* */
-@/* ISRs */
-@/* */
-@/* RELEASE HISTORY */
-@/* */
-@/* DATE NAME DESCRIPTION */
-@/* */
-@/* 09-30-2020 William E. Lamie Initial Version 6.1 */
-@/* 10-15-2021 William E. Lamie Modified comment(s), added */
-@/* execution profile support, */
-@/* resulting in version 6.1.9 */
-@/* */
-@/**************************************************************************/
-@VOID _tx_thread_vectored_context_save(VOID)
-@{
+/**************************************************************************/
+/* */
+/* FUNCTION RELEASE */
+/* */
+/* _tx_thread_vectored_context_save ARMv7-A */
+/* 6.1.11 */
+/* AUTHOR */
+/* */
+/* William E. Lamie, Microsoft Corporation */
+/* */
+/* DESCRIPTION */
+/* */
+/* This function saves the context of an executing thread in the */
+/* beginning of interrupt processing. The function also ensures that */
+/* the system stack is used upon return to the calling ISR. */
+/* */
+/* INPUT */
+/* */
+/* None */
+/* */
+/* OUTPUT */
+/* */
+/* None */
+/* */
+/* CALLS */
+/* */
+/* None */
+/* */
+/* CALLED BY */
+/* */
+/* ISRs */
+/* */
+/* RELEASE HISTORY */
+/* */
+/* DATE NAME DESCRIPTION */
+/* */
+/* 09-30-2020 William E. Lamie Initial Version 6.1 */
+/* 10-15-2021 William E. Lamie Modified comment(s), added */
+/* execution profile support, */
+/* resulting in version 6.1.9 */
+/* 04-25-2022 Zhen Kong Updated comments, */
+/* resulting in version 6.1.11 */
+/* */
+/**************************************************************************/
.global _tx_thread_vectored_context_save
.type _tx_thread_vectored_context_save,function
_tx_thread_vectored_context_save:
-@
-@ /* Upon entry to this routine, it is assumed that IRQ interrupts are locked
-@ out, we are in IRQ mode, and all registers are intact. */
-@
-@ /* Check for a nested interrupt condition. */
-@ if (_tx_thread_system_state++)
-@ {
-@
+
+ /* Upon entry to this routine, it is assumed that IRQ interrupts are locked
+ out, we are in IRQ mode, and all registers are intact. */
+
+ /* Check for a nested interrupt condition. */
+
#ifdef TX_ENABLE_FIQ_SUPPORT
- CPSID if @ Disable IRQ and FIQ interrupts
+ CPSID if // Disable IRQ and FIQ interrupts
#endif
- LDR r3, =_tx_thread_system_state @ Pickup address of system state variable
- LDR r2, [r3, #0] @ Pickup system state
- CMP r2, #0 @ Is this the first interrupt?
- BEQ __tx_thread_not_nested_save @ Yes, not a nested context save
-@
-@ /* Nested interrupt condition. */
-@
- ADD r2, r2, #1 @ Increment the interrupt counter
- STR r2, [r3, #0] @ Store it back in the variable
-@
-@ /* Note: Minimal context of interrupted thread is already saved. */
-@
-@ /* Return to the ISR. */
-@
- MOV r10, #0 @ Clear stack limit
+ LDR r3, =_tx_thread_system_state // Pickup address of system state variable
+ LDR r2, [r3, #0] // Pickup system state
+ CMP r2, #0 // Is this the first interrupt?
+ BEQ __tx_thread_not_nested_save // Yes, not a nested context save
+
+ /* Nested interrupt condition. */
+
+ ADD r2, r2, #1 // Increment the interrupt counter
+ STR r2, [r3, #0] // Store it back in the variable
+
+ /* Note: Minimal context of interrupted thread is already saved. */
+
+ /* Return to the ISR. */
+
+ MOV r10, #0 // Clear stack limit
#if (defined(TX_ENABLE_EXECUTION_CHANGE_NOTIFY) || defined(TX_EXECUTION_PROFILE_ENABLE))
-@
-@ /* Call the ISR enter function to indicate an ISR is executing. */
-@
- PUSH {lr} @ Save ISR lr
- BL _tx_execution_isr_enter @ Call the ISR enter function
- POP {lr} @ Recover ISR lr
+
+ /* Call the ISR enter function to indicate an ISR is executing. */
+
+ PUSH {lr} // Save ISR lr
+ BL _tx_execution_isr_enter // Call the ISR enter function
+ POP {lr} // Recover ISR lr
#endif
- MOV pc, lr @ Return to caller
-@
+ MOV pc, lr // Return to caller
+
__tx_thread_not_nested_save:
-@ }
-@
-@ /* Otherwise, not nested, check to see if a thread was running. */
-@ else if (_tx_thread_current_ptr)
-@ {
-@
- ADD r2, r2, #1 @ Increment the interrupt counter
- STR r2, [r3, #0] @ Store it back in the variable
- LDR r1, =_tx_thread_current_ptr @ Pickup address of current thread ptr
- LDR r0, [r1, #0] @ Pickup current thread pointer
- CMP r0, #0 @ Is it NULL?
- BEQ __tx_thread_idle_system_save @ If so, interrupt occurred in
- @ scheduling loop - nothing needs saving!
-@
-@ /* Note: Minimal context of interrupted thread is already saved. */
-@
-@ /* Save the current stack pointer in the thread's control block. */
-@ _tx_thread_current_ptr -> tx_thread_stack_ptr = sp;
-@
-@ /* Switch to the system stack. */
-@ sp = _tx_thread_system_stack_ptr;
-@
- MOV r10, #0 @ Clear stack limit
+
+ /* Otherwise, not nested, check to see if a thread was running. */
+
+ ADD r2, r2, #1 // Increment the interrupt counter
+ STR r2, [r3, #0] // Store it back in the variable
+ LDR r1, =_tx_thread_current_ptr // Pickup address of current thread ptr
+ LDR r0, [r1, #0] // Pickup current thread pointer
+ CMP r0, #0 // Is it NULL?
+ BEQ __tx_thread_idle_system_save // If so, interrupt occurred in
+ // scheduling loop - nothing needs saving!
+ /* Note: Minimal context of interrupted thread is already saved. */
+
+ /* Save the current stack pointer in the thread's control block. */
+
+ MOV r10, #0 // Clear stack limit
#if (defined(TX_ENABLE_EXECUTION_CHANGE_NOTIFY) || defined(TX_EXECUTION_PROFILE_ENABLE))
-@
-@ /* Call the ISR enter function to indicate an ISR is executing. */
-@
- PUSH {lr} @ Save ISR lr
- BL _tx_execution_isr_enter @ Call the ISR enter function
- POP {lr} @ Recover ISR lr
+
+ /* Call the ISR enter function to indicate an ISR is executing. */
+
+ PUSH {lr} // Save ISR lr
+ BL _tx_execution_isr_enter // Call the ISR enter function
+ POP {lr} // Recover ISR lr
#endif
- MOV pc, lr @ Return to caller
-@
-@ }
-@ else
-@ {
-@
+ MOV pc, lr // Return to caller
+
__tx_thread_idle_system_save:
-@
-@ /* Interrupt occurred in the scheduling loop. */
-@
-@ /* Not much to do here, just adjust the stack pointer, and return to IRQ
-@ processing. */
-@
- MOV r10, #0 @ Clear stack limit
+
+ /* Interrupt occurred in the scheduling loop. */
+
+ /* Not much to do here, just adjust the stack pointer, and return to IRQ
+ processing. */
+
+ MOV r10, #0 // Clear stack limit
#if (defined(TX_ENABLE_EXECUTION_CHANGE_NOTIFY) || defined(TX_EXECUTION_PROFILE_ENABLE))
-@
-@ /* Call the ISR enter function to indicate an ISR is executing. */
-@
- PUSH {lr} @ Save ISR lr
- BL _tx_execution_isr_enter @ Call the ISR enter function
- POP {lr} @ Recover ISR lr
+
+ /* Call the ISR enter function to indicate an ISR is executing. */
+
+ PUSH {lr} // Save ISR lr
+ BL _tx_execution_isr_enter // Call the ISR enter function
+ POP {lr} // Recover ISR lr
#endif
- ADD sp, sp, #32 @ Recover saved registers
- MOV pc, lr @ Return to caller
-@
-@ }
-@}
-
+ ADD sp, sp, #32 // Recover saved registers
+ MOV pc, lr // Return to caller
diff --git a/ports/cortex_a9/gnu/src/tx_timer_interrupt.S b/ports/cortex_a9/gnu/src/tx_timer_interrupt.S
index 92365206..7337ed0c 100644
--- a/ports/cortex_a9/gnu/src/tx_timer_interrupt.S
+++ b/ports/cortex_a9/gnu/src/tx_timer_interrupt.S
@@ -1,40 +1,30 @@
-@/**************************************************************************/
-@/* */
-@/* Copyright (c) Microsoft Corporation. All rights reserved. */
-@/* */
-@/* This software is licensed under the Microsoft Software License */
-@/* Terms for Microsoft Azure RTOS. Full text of the license can be */
-@/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
-@/* and in the root directory of this software. */
-@/* */
-@/**************************************************************************/
-@
-@
-@/**************************************************************************/
-@/**************************************************************************/
-@/** */
-@/** ThreadX Component */
-@/** */
-@/** Timer */
-@/** */
-@/**************************************************************************/
-@/**************************************************************************/
-@
-@#define TX_SOURCE_CODE
-@
-@
-@/* Include necessary system files. */
-@
-@#include "tx_api.h"
-@#include "tx_timer.h"
-@#include "tx_thread.h"
-@
-@
+/**************************************************************************/
+/* */
+/* Copyright (c) Microsoft Corporation. All rights reserved. */
+/* */
+/* This software is licensed under the Microsoft Software License */
+/* Terms for Microsoft Azure RTOS. Full text of the license can be */
+/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
+/* and in the root directory of this software. */
+/* */
+/**************************************************************************/
+
+
+/**************************************************************************/
+/**************************************************************************/
+/** */
+/** ThreadX Component */
+/** */
+/** Timer */
+/** */
+/**************************************************************************/
+/**************************************************************************/
+
.arm
-@
-@/* Define Assembly language external references... */
-@
+
+/* Define Assembly language external references... */
+
.global _tx_timer_time_slice
.global _tx_timer_system_clock
.global _tx_timer_current_ptr
@@ -43,237 +33,199 @@
.global _tx_timer_expired_time_slice
.global _tx_timer_expired
.global _tx_thread_time_slice
-@
-@
-@
-@/* Define the 16-bit Thumb mode veneer for _tx_timer_interrupt for
-@ applications calling this function from to 16-bit Thumb mode. */
-@
+
+
+
+/* Define the 16-bit Thumb mode veneer for _tx_timer_interrupt for
+ applications calling this function from to 16-bit Thumb mode. */
+
.text
.align 2
.thumb
.global $_tx_timer_interrupt
.type $_tx_timer_interrupt,function
$_tx_timer_interrupt:
- BX pc @ Switch to 32-bit mode
- NOP @
+ BX pc // Switch to 32-bit mode
+ NOP //
.arm
- STMFD sp!, {lr} @ Save return address
- BL _tx_timer_interrupt @ Call _tx_timer_interrupt function
- LDMFD sp!, {lr} @ Recover saved return address
- BX lr @ Return to 16-bit caller
-@
-@
+ STMFD sp!, {lr} // Save return address
+ BL _tx_timer_interrupt // Call _tx_timer_interrupt function
+ LDMFD sp!, {lr} // Recover saved return address
+ BX lr // Return to 16-bit caller
+
+
.text
.align 2
-@/**************************************************************************/
-@/* */
-@/* FUNCTION RELEASE */
-@/* */
-@/* _tx_timer_interrupt Cortex-A9/GNU */
-@/* 6.1 */
-@/* AUTHOR */
-@/* */
-@/* William E. Lamie, Microsoft Corporation */
-@/* */
-@/* DESCRIPTION */
-@/* */
-@/* This function processes the hardware timer interrupt. This */
-@/* processing includes incrementing the system clock and checking for */
-@/* time slice and/or timer expiration. If either is found, the */
-@/* interrupt context save/restore functions are called along with the */
-@/* expiration functions. */
-@/* */
-@/* INPUT */
-@/* */
-@/* None */
-@/* */
-@/* OUTPUT */
-@/* */
-@/* None */
-@/* */
-@/* CALLS */
-@/* */
-@/* _tx_thread_time_slice Time slice interrupted thread */
-@/* _tx_timer_expiration_process Timer expiration processing */
-@/* */
-@/* CALLED BY */
-@/* */
-@/* interrupt vector */
-@/* */
-@/* RELEASE HISTORY */
-@/* */
-@/* DATE NAME DESCRIPTION */
-@/* */
-@/* 09-30-2020 William E. Lamie Initial Version 6.1 */
-@/* */
-@/**************************************************************************/
-@VOID _tx_timer_interrupt(VOID)
-@{
+/**************************************************************************/
+/* */
+/* FUNCTION RELEASE */
+/* */
+/* _tx_timer_interrupt ARMv7-A */
+/* 6.1.11 */
+/* AUTHOR */
+/* */
+/* William E. Lamie, Microsoft Corporation */
+/* */
+/* DESCRIPTION */
+/* */
+/* This function processes the hardware timer interrupt. This */
+/* processing includes incrementing the system clock and checking for */
+/* time slice and/or timer expiration. If either is found, the */
+/* interrupt context save/restore functions are called along with the */
+/* expiration functions. */
+/* */
+/* INPUT */
+/* */
+/* None */
+/* */
+/* OUTPUT */
+/* */
+/* None */
+/* */
+/* CALLS */
+/* */
+/* _tx_thread_time_slice Time slice interrupted thread */
+/* _tx_timer_expiration_process Timer expiration processing */
+/* */
+/* CALLED BY */
+/* */
+/* interrupt vector */
+/* */
+/* RELEASE HISTORY */
+/* */
+/* DATE NAME DESCRIPTION */
+/* */
+/* 09-30-2020 William E. Lamie Initial Version 6.1 */
+/* 04-25-2022 Zhen Kong Updated comments, */
+/* resulting in version 6.1.11 */
+/* */
+/**************************************************************************/
.global _tx_timer_interrupt
.type _tx_timer_interrupt,function
_tx_timer_interrupt:
-@
-@ /* Upon entry to this routine, it is assumed that context save has already
-@ been called, and therefore the compiler scratch registers are available
-@ for use. */
-@
-@ /* Increment the system clock. */
-@ _tx_timer_system_clock++;
-@
- LDR r1, =_tx_timer_system_clock @ Pickup address of system clock
- LDR r0, [r1] @ Pickup system clock
- ADD r0, r0, #1 @ Increment system clock
- STR r0, [r1] @ Store new system clock
-@
-@ /* Test for time-slice expiration. */
-@ if (_tx_timer_time_slice)
-@ {
-@
- LDR r3, =_tx_timer_time_slice @ Pickup address of time-slice
- LDR r2, [r3] @ Pickup time-slice
- CMP r2, #0 @ Is it non-active?
- BEQ __tx_timer_no_time_slice @ Yes, skip time-slice processing
-@
-@ /* Decrement the time_slice. */
-@ _tx_timer_time_slice--;
-@
- SUB r2, r2, #1 @ Decrement the time-slice
- STR r2, [r3] @ Store new time-slice value
-@
-@ /* Check for expiration. */
-@ if (__tx_timer_time_slice == 0)
-@
- CMP r2, #0 @ Has it expired?
- BNE __tx_timer_no_time_slice @ No, skip expiration processing
-@
-@ /* Set the time-slice expired flag. */
-@ _tx_timer_expired_time_slice = TX_TRUE;
-@
- LDR r3, =_tx_timer_expired_time_slice @ Pickup address of expired flag
- MOV r0, #1 @ Build expired value
- STR r0, [r3] @ Set time-slice expiration flag
-@
-@ }
-@
-__tx_timer_no_time_slice:
-@
-@ /* Test for timer expiration. */
-@ if (*_tx_timer_current_ptr)
-@ {
-@
- LDR r1, =_tx_timer_current_ptr @ Pickup current timer pointer address
- LDR r0, [r1] @ Pickup current timer
- LDR r2, [r0] @ Pickup timer list entry
- CMP r2, #0 @ Is there anything in the list?
- BEQ __tx_timer_no_timer @ No, just increment the timer
-@
-@ /* Set expiration flag. */
-@ _tx_timer_expired = TX_TRUE;
-@
- LDR r3, =_tx_timer_expired @ Pickup expiration flag address
- MOV r2, #1 @ Build expired value
- STR r2, [r3] @ Set expired flag
- B __tx_timer_done @ Finished timer processing
-@
-@ }
-@ else
-@ {
-__tx_timer_no_timer:
-@
-@ /* No timer expired, increment the timer pointer. */
-@ _tx_timer_current_ptr++;
-@
- ADD r0, r0, #4 @ Move to next timer
-@
-@ /* Check for wraparound. */
-@ if (_tx_timer_current_ptr == _tx_timer_list_end)
-@
- LDR r3, =_tx_timer_list_end @ Pickup address of timer list end
- LDR r2, [r3] @ Pickup list end
- CMP r0, r2 @ Are we at list end?
- BNE __tx_timer_skip_wrap @ No, skip wraparound logic
-@
-@ /* Wrap to beginning of list. */
-@ _tx_timer_current_ptr = _tx_timer_list_start;
-@
- LDR r3, =_tx_timer_list_start @ Pickup address of timer list start
- LDR r0, [r3] @ Set current pointer to list start
-@
-__tx_timer_skip_wrap:
-@
- STR r0, [r1] @ Store new current timer pointer
-@ }
-@
-__tx_timer_done:
-@
-@
-@ /* See if anything has expired. */
-@ if ((_tx_timer_expired_time_slice) || (_tx_timer_expired))
-@ {
-@
- LDR r3, =_tx_timer_expired_time_slice @ Pickup address of expired flag
- LDR r2, [r3] @ Pickup time-slice expired flag
- CMP r2, #0 @ Did a time-slice expire?
- BNE __tx_something_expired @ If non-zero, time-slice expired
- LDR r1, =_tx_timer_expired @ Pickup address of other expired flag
- LDR r0, [r1] @ Pickup timer expired flag
- CMP r0, #0 @ Did a timer expire?
- BEQ __tx_timer_nothing_expired @ No, nothing expired
-@
-__tx_something_expired:
-@
-@
- STMDB sp!, {r0, lr} @ Save the lr register on the stack
- @ and save r0 just to keep 8-byte alignment
-@
-@ /* Did a timer expire? */
-@ if (_tx_timer_expired)
-@ {
-@
- LDR r1, =_tx_timer_expired @ Pickup address of expired flag
- LDR r0, [r1] @ Pickup timer expired flag
- CMP r0, #0 @ Check for timer expiration
- BEQ __tx_timer_dont_activate @ If not set, skip timer activation
-@
-@ /* Process timer expiration. */
-@ _tx_timer_expiration_process();
-@
- BL _tx_timer_expiration_process @ Call the timer expiration handling routine
-@
-@ }
-__tx_timer_dont_activate:
-@
-@ /* Did time slice expire? */
-@ if (_tx_timer_expired_time_slice)
-@ {
-@
- LDR r3, =_tx_timer_expired_time_slice @ Pickup address of time-slice expired
- LDR r2, [r3] @ Pickup the actual flag
- CMP r2, #0 @ See if the flag is set
- BEQ __tx_timer_not_ts_expiration @ No, skip time-slice processing
-@
-@ /* Time slice interrupted thread. */
-@ _tx_thread_time_slice();
-@
- BL _tx_thread_time_slice @ Call time-slice processing
-@
-@ }
-@
-__tx_timer_not_ts_expiration:
-@
- LDMIA sp!, {r0, lr} @ Recover lr register (r0 is just there for
- @ the 8-byte stack alignment
-@
-@ }
-@
-__tx_timer_nothing_expired:
-@
-#ifdef __THUMB_INTERWORK
- BX lr @ Return to caller
-#else
- MOV pc, lr @ Return to caller
-#endif
-@
-@}
+ /* Upon entry to this routine, it is assumed that context save has already
+ been called, and therefore the compiler scratch registers are available
+ for use. */
+
+ /* Increment the system clock. */
+
+ LDR r1, =_tx_timer_system_clock // Pickup address of system clock
+ LDR r0, [r1] // Pickup system clock
+ ADD r0, r0, #1 // Increment system clock
+ STR r0, [r1] // Store new system clock
+
+ /* Test for time-slice expiration. */
+
+ LDR r3, =_tx_timer_time_slice // Pickup address of time-slice
+ LDR r2, [r3] // Pickup time-slice
+ CMP r2, #0 // Is it non-active?
+ BEQ __tx_timer_no_time_slice // Yes, skip time-slice processing
+
+ /* Decrement the time_slice. */
+
+ SUB r2, r2, #1 // Decrement the time-slice
+ STR r2, [r3] // Store new time-slice value
+
+ /* Check for expiration. */
+
+ CMP r2, #0 // Has it expired?
+ BNE __tx_timer_no_time_slice // No, skip expiration processing
+
+ /* Set the time-slice expired flag. */
+
+ LDR r3, =_tx_timer_expired_time_slice // Pickup address of expired flag
+ MOV r0, #1 // Build expired value
+ STR r0, [r3] // Set time-slice expiration flag
+
+__tx_timer_no_time_slice:
+
+ /* Test for timer expiration. */
+
+ LDR r1, =_tx_timer_current_ptr // Pickup current timer pointer address
+ LDR r0, [r1] // Pickup current timer
+ LDR r2, [r0] // Pickup timer list entry
+ CMP r2, #0 // Is there anything in the list?
+ BEQ __tx_timer_no_timer // No, just increment the timer
+
+ /* Set expiration flag. */
+
+ LDR r3, =_tx_timer_expired // Pickup expiration flag address
+ MOV r2, #1 // Build expired value
+ STR r2, [r3] // Set expired flag
+ B __tx_timer_done // Finished timer processing
+
+__tx_timer_no_timer:
+
+ /* No timer expired, increment the timer pointer. */
+ ADD r0, r0, #4 // Move to next timer
+
+ /* Check for wraparound. */
+
+ LDR r3, =_tx_timer_list_end // Pickup address of timer list end
+ LDR r2, [r3] // Pickup list end
+ CMP r0, r2 // Are we at list end?
+ BNE __tx_timer_skip_wrap // No, skip wraparound logic
+
+ /* Wrap to beginning of list. */
+
+ LDR r3, =_tx_timer_list_start // Pickup address of timer list start
+ LDR r0, [r3] // Set current pointer to list start
+
+__tx_timer_skip_wrap:
+
+ STR r0, [r1] // Store new current timer pointer
+
+__tx_timer_done:
+
+ /* See if anything has expired. */
+
+ LDR r3, =_tx_timer_expired_time_slice // Pickup address of expired flag
+ LDR r2, [r3] // Pickup time-slice expired flag
+ CMP r2, #0 // Did a time-slice expire?
+ BNE __tx_something_expired // If non-zero, time-slice expired
+ LDR r1, =_tx_timer_expired // Pickup address of other expired flag
+ LDR r0, [r1] // Pickup timer expired flag
+ CMP r0, #0 // Did a timer expire?
+ BEQ __tx_timer_nothing_expired // No, nothing expired
+
+__tx_something_expired:
+
+ STMDB sp!, {r0, lr} // Save the lr register on the stack
+ // and save r0 just to keep 8-byte alignment
+
+ /* Did a timer expire? */
+
+ LDR r1, =_tx_timer_expired // Pickup address of expired flag
+ LDR r0, [r1] // Pickup timer expired flag
+ CMP r0, #0 // Check for timer expiration
+ BEQ __tx_timer_dont_activate // If not set, skip timer activation
+
+ /* Process timer expiration. */
+ BL _tx_timer_expiration_process // Call the timer expiration handling routine
+
+__tx_timer_dont_activate:
+
+ /* Did time slice expire? */
+
+ LDR r3, =_tx_timer_expired_time_slice // Pickup address of time-slice expired
+ LDR r2, [r3] // Pickup the actual flag
+ CMP r2, #0 // See if the flag is set
+ BEQ __tx_timer_not_ts_expiration // No, skip time-slice processing
+
+ /* Time slice interrupted thread. */
+
+ BL _tx_thread_time_slice // Call time-slice processing
+
+__tx_timer_not_ts_expiration:
+
+ LDMIA sp!, {r0, lr} // Recover lr register (r0 is just there for
+ // the 8-byte stack alignment
+
+__tx_timer_nothing_expired:
+
+#ifdef __THUMB_INTERWORK
+ BX lr // Return to caller
+#else
+ MOV pc, lr // Return to caller
+#endif
diff --git a/ports/cortex_m0/ac5/inc/tx_port.h b/ports/cortex_m0/ac5/inc/tx_port.h
index 048999e4..7dfaa192 100644
--- a/ports/cortex_m0/ac5/inc/tx_port.h
+++ b/ports/cortex_m0/ac5/inc/tx_port.h
@@ -26,7 +26,7 @@
/* PORT SPECIFIC C INFORMATION RELEASE */
/* */
/* tx_port.h Cortex-M0/AC5 */
-/* 6.1.6 */
+/* 6.1.11 */
/* */
/* AUTHOR */
/* */
@@ -51,6 +51,9 @@
/* 04-02-2021 Bhupendra Naphade Modified comment(s),updated */
/* macro definition, */
/* resulting in version 6.1.6 */
+/* 04-25-2022 Scott Larson Modified comments and added */
+/* volatile to registers, */
+/* resulting in version 6.1.11 */
/* */
/**************************************************************************/
@@ -126,14 +129,14 @@ typedef unsigned short USHORT;
For example, if the time source is at the address 0x0a800024 and is 16-bits in size, the clock
source constants would be:
-#define TX_TRACE_TIME_SOURCE *((ULONG *) 0x0a800024)
+#define TX_TRACE_TIME_SOURCE *((volatile ULONG *) 0x0a800024)
#define TX_TRACE_TIME_MASK 0x0000FFFFUL
*/
#ifndef TX_MISRA_ENABLE
#ifndef TX_TRACE_TIME_SOURCE
-#define TX_TRACE_TIME_SOURCE *((ULONG *) 0xE0001004)
+#define TX_TRACE_TIME_SOURCE *((volatile ULONG *) 0xE0001004)
#endif
#else
ULONG _tx_misra_time_stamp_get(VOID);
@@ -304,7 +307,7 @@ unsigned int was_masked;
/* Set PendSV to invoke ThreadX scheduler. */
- *((ULONG *) 0xE000ED04) = ((ULONG) 0x10000000);
+ *((volatile ULONG *) 0xE000ED04) = ((ULONG) 0x10000000);
if (_ipsr == 0)
{
was_masked = __disable_irq();
@@ -321,7 +324,7 @@ unsigned int was_masked;
#ifdef TX_THREAD_INIT
CHAR _tx_version_id[] =
- "Copyright (c) Microsoft Corporation. All rights reserved. * ThreadX Cortex-M0/AC5 Version 6.1.9 *";
+ "Copyright (c) Microsoft Corporation. All rights reserved. * ThreadX Cortex-M0/AC5 Version 6.1.11 *";
#else
#ifdef TX_MISRA_ENABLE
extern CHAR _tx_version_id[100];
diff --git a/ports/cortex_m0/ac6/inc/tx_port.h b/ports/cortex_m0/ac6/inc/tx_port.h
index c96e16ed..7492d49c 100644
--- a/ports/cortex_m0/ac6/inc/tx_port.h
+++ b/ports/cortex_m0/ac6/inc/tx_port.h
@@ -26,7 +26,7 @@
/* PORT SPECIFIC C INFORMATION RELEASE */
/* */
/* tx_port.h Cortex-M0/AC6 */
-/* 6.1.6 */
+/* 6.1.11 */
/* */
/* AUTHOR */
/* */
@@ -51,6 +51,9 @@
/* 04-02-2021 Bhupendra Naphade Modified comment(s),updated */
/* macro definition, */
/* resulting in version 6.1.6 */
+/* 04-25-2022 Scott Larson Modified comments and added */
+/* volatile to registers, */
+/* resulting in version 6.1.11 */
/* */
/**************************************************************************/
@@ -143,19 +146,20 @@ typedef unsigned short USHORT;
For example, if the time source is at the address 0x0a800024 and is 16-bits in size, the clock
source constants would be:
-#define TX_TRACE_TIME_SOURCE *((ULONG *) 0x0a800024)
+#define TX_TRACE_TIME_SOURCE *((volatile ULONG *) 0x0a800024)
#define TX_TRACE_TIME_MASK 0x0000FFFFUL
*/
#ifndef TX_MISRA_ENABLE
#ifndef TX_TRACE_TIME_SOURCE
-#define TX_TRACE_TIME_SOURCE *((ULONG *) 0xE0001004)
+#define TX_TRACE_TIME_SOURCE *((volatile ULONG *) 0xE0001004)
#endif
#else
ULONG _tx_misra_time_stamp_get(VOID);
#define TX_TRACE_TIME_SOURCE _tx_misra_time_stamp_get()
#endif
+
#ifndef TX_TRACE_TIME_MASK
#define TX_TRACE_TIME_MASK 0xFFFFFFFFUL
#endif
@@ -328,7 +332,8 @@ __attribute__( ( always_inline ) ) static inline void _tx_thread_system_return_i
{
unsigned int interrupt_save;
- *((ULONG *) 0xE000ED04) = ((ULONG) 0x10000000);
+ /* Set PendSV to invoke ThreadX scheduler. */
+ *((volatile ULONG *) 0xE000ED04) = ((ULONG) 0x10000000);
if (__get_ipsr_value() == 0)
{
interrupt_save = __get_primask_value();
@@ -362,7 +367,7 @@ unsigned int interrupt_save;
#ifdef TX_THREAD_INIT
CHAR _tx_version_id[] =
- "Copyright (c) Microsoft Corporation. All rights reserved. * ThreadX Cortex-M0/AC6 Version 6.1.9 *";
+ "Copyright (c) Microsoft Corporation. All rights reserved. * ThreadX Cortex-M0/AC6 Version 6.1.11 *";
#else
extern CHAR _tx_version_id[];
#endif
diff --git a/ports/cortex_m0/gnu/inc/tx_port.h b/ports/cortex_m0/gnu/inc/tx_port.h
index cc17069d..c5e2c447 100644
--- a/ports/cortex_m0/gnu/inc/tx_port.h
+++ b/ports/cortex_m0/gnu/inc/tx_port.h
@@ -26,7 +26,7 @@
/* PORT SPECIFIC C INFORMATION RELEASE */
/* */
/* tx_port.h Cortex-M0/GNU */
-/* 6.1.6 */
+/* 6.1.11 */
/* */
/* AUTHOR */
/* */
@@ -47,12 +47,15 @@
/* */
/* DATE NAME DESCRIPTION */
/* */
-/* 05-19-2020 William E. Lamie Initial Version 6.0 */
-/* 09-30-2020 William E. Lamie Modified comment(s), */
+/* 05-19-2020 William E. Lamie Initial Version 6.0 */
+/* 09-30-2020 William E. Lamie Modified comment(s), */
/* resulting in version 6.1 */
-/* 04-02-2021 Bhupendra Naphade Modified comment(s),updated */
+/* 04-02-2021 Bhupendra Naphade Modified comment(s),updated */
/* macro definition, */
/* resulting in version 6.1.6 */
+/* 04-25-2022 Scott Larson Modified comments and added */
+/* volatile to registers, */
+/* resulting in version 6.1.11 */
/* */
/**************************************************************************/
@@ -145,19 +148,20 @@ typedef unsigned short USHORT;
For example, if the time source is at the address 0x0a800024 and is 16-bits in size, the clock
source constants would be:
-#define TX_TRACE_TIME_SOURCE *((ULONG *) 0x0a800024)
+#define TX_TRACE_TIME_SOURCE *((volatile ULONG *) 0x0a800024)
#define TX_TRACE_TIME_MASK 0x0000FFFFUL
*/
#ifndef TX_MISRA_ENABLE
#ifndef TX_TRACE_TIME_SOURCE
-#define TX_TRACE_TIME_SOURCE *((ULONG *) 0xE0001004)
+#define TX_TRACE_TIME_SOURCE *((volatile ULONG *) 0xE0001004)
#endif
#else
ULONG _tx_misra_time_stamp_get(VOID);
#define TX_TRACE_TIME_SOURCE _tx_misra_time_stamp_get()
#endif
+
#ifndef TX_TRACE_TIME_MASK
#define TX_TRACE_TIME_MASK 0xFFFFFFFFUL
#endif
@@ -330,7 +334,8 @@ __attribute__( ( always_inline ) ) static inline void _tx_thread_system_return_i
{
unsigned int interrupt_save;
- *((ULONG *) 0xE000ED04) = ((ULONG) 0x10000000);
+ /* Set PendSV to invoke ThreadX scheduler. */
+ *((volatile ULONG *) 0xE000ED04) = ((ULONG) 0x10000000);
if (__get_ipsr_value() == 0)
{
interrupt_save = __get_primask_value();
@@ -364,7 +369,7 @@ unsigned int interrupt_save;
#ifdef TX_THREAD_INIT
CHAR _tx_version_id[] =
- "Copyright (c) Microsoft Corporation. All rights reserved. * ThreadX Cortex-M0/GNU Version 6.1.9 *";
+ "Copyright (c) Microsoft Corporation. All rights reserved. * ThreadX Cortex-M0/GNU Version 6.1.11 *";
#else
extern CHAR _tx_version_id[];
#endif
diff --git a/ports/cortex_m0/iar/inc/tx_port.h b/ports/cortex_m0/iar/inc/tx_port.h
index 534fd8c1..d78f7e74 100644
--- a/ports/cortex_m0/iar/inc/tx_port.h
+++ b/ports/cortex_m0/iar/inc/tx_port.h
@@ -26,7 +26,7 @@
/* PORT SPECIFIC C INFORMATION RELEASE */
/* */
/* tx_port.h Cortex-M0/IAR */
-/* 6.1.6 */
+/* 6.1.11 */
/* */
/* AUTHOR */
/* */
@@ -47,10 +47,13 @@
/* */
/* DATE NAME DESCRIPTION */
/* */
-/* 09-30-2020 William E. Lamie Initial Version 6.1 */
-/* 04-02-2021 Bhupendra Naphade Modified comment(s),updated */
+/* 09-30-2020 William E. Lamie Initial Version 6.1 */
+/* 04-02-2021 Bhupendra Naphade Modified comment(s),updated */
/* macro definition, */
/* resulting in version 6.1.6 */
+/* 04-25-2022 Scott Larson Modified comments and added */
+/* volatile to registers, */
+/* resulting in version 6.1.11 */
/* */
/**************************************************************************/
@@ -130,14 +133,14 @@ typedef unsigned short USHORT;
For example, if the time source is at the address 0x0a800024 and is 16-bits in size, the clock
source constants would be:
-#define TX_TRACE_TIME_SOURCE *((ULONG *) 0x0a800024)
+#define TX_TRACE_TIME_SOURCE *((volatile ULONG *) 0x0a800024)
#define TX_TRACE_TIME_MASK 0x0000FFFFUL
*/
#ifndef TX_MISRA_ENABLE
#ifndef TX_TRACE_TIME_SOURCE
-#define TX_TRACE_TIME_SOURCE *((ULONG *) 0xE0001004)
+#define TX_TRACE_TIME_SOURCE *((volatile ULONG *) 0xE0001004)
#endif
#else
ULONG _tx_misra_time_stamp_get(VOID);
@@ -321,7 +324,7 @@ static void _tx_thread_system_return_inline(void)
__istate_t interrupt_save;
/* Set PendSV to invoke ThreadX scheduler. */
- *((ULONG *) 0xE000ED04) = ((ULONG) 0x10000000);
+ *((volatile ULONG *) 0xE000ED04) = ((ULONG) 0x10000000);
if (__get_IPSR() == 0)
{
interrupt_save = __get_interrupt_state();
@@ -347,7 +350,7 @@ __istate_t interrupt_save;
#ifdef TX_THREAD_INIT
CHAR _tx_version_id[] =
- "Copyright (c) Microsoft Corporation. All rights reserved. * ThreadX Cortex-M0/IAR Version 6.1.9 *";
+ "Copyright (c) Microsoft Corporation. All rights reserved. * ThreadX Cortex-M0/IAR Version 6.1.11 *";
#else
#ifdef TX_MISRA_ENABLE
extern CHAR _tx_version_id[100];
diff --git a/ports/cortex_m0/keil/inc/tx_port.h b/ports/cortex_m0/keil/inc/tx_port.h
index 504edd41..8f0699ab 100644
--- a/ports/cortex_m0/keil/inc/tx_port.h
+++ b/ports/cortex_m0/keil/inc/tx_port.h
@@ -26,7 +26,7 @@
/* PORT SPECIFIC C INFORMATION RELEASE */
/* */
/* tx_port.h Cortex-M0/AC5 */
-/* 6.1.6 */
+/* 6.1.11 */
/* */
/* AUTHOR */
/* */
@@ -47,10 +47,13 @@
/* */
/* DATE NAME DESCRIPTION */
/* */
-/* 09-30-2020 William E. Lamie Initial Version 6.1 */
-/* 04-02-2021 Bhupendra Naphade Modified comment(s),updated */
+/* 09-30-2020 William E. Lamie Initial Version 6.1 */
+/* 04-02-2021 Bhupendra Naphade Modified comment(s),updated */
/* macro definition, */
/* resulting in version 6.1.6 */
+/* 04-25-2022 Scott Larson Modified comments and added */
+/* volatile to registers, */
+/* resulting in version 6.1.11 */
/* */
/**************************************************************************/
@@ -126,14 +129,14 @@ typedef unsigned short USHORT;
For example, if the time source is at the address 0x0a800024 and is 16-bits in size, the clock
source constants would be:
-#define TX_TRACE_TIME_SOURCE *((ULONG *) 0x0a800024)
+#define TX_TRACE_TIME_SOURCE *((volatile ULONG *) 0x0a800024)
#define TX_TRACE_TIME_MASK 0x0000FFFFUL
*/
#ifndef TX_MISRA_ENABLE
#ifndef TX_TRACE_TIME_SOURCE
-#define TX_TRACE_TIME_SOURCE *((ULONG *) 0xE0001004)
+#define TX_TRACE_TIME_SOURCE *((volatile ULONG *) 0xE0001004)
#endif
#else
ULONG _tx_misra_time_stamp_get(VOID);
@@ -304,7 +307,7 @@ unsigned int was_masked;
/* Set PendSV to invoke ThreadX scheduler. */
- *((ULONG *) 0xE000ED04) = ((ULONG) 0x10000000);
+ *((volatile ULONG *) 0xE000ED04) = ((ULONG) 0x10000000);
if (_ipsr == 0)
{
was_masked = __disable_irq();
@@ -321,7 +324,7 @@ unsigned int was_masked;
#ifdef TX_THREAD_INIT
CHAR _tx_version_id[] =
- "Copyright (c) Microsoft Corporation. All rights reserved. * ThreadX Cortex-M0/AC5 Version 6.1.9 *";
+ "Copyright (c) Microsoft Corporation. All rights reserved. * ThreadX Cortex-M0/AC5 Version 6.1.11 *";
#else
#ifdef TX_MISRA_ENABLE
extern CHAR _tx_version_id[100];
diff --git a/ports/cortex_m23/ac6/inc/tx_port.h b/ports/cortex_m23/ac6/inc/tx_port.h
index 51010daf..ccde44e9 100644
--- a/ports/cortex_m23/ac6/inc/tx_port.h
+++ b/ports/cortex_m23/ac6/inc/tx_port.h
@@ -26,7 +26,7 @@
/* PORT SPECIFIC C INFORMATION RELEASE */
/* */
/* tx_port.h Cortex-M23/AC6 */
-/* 6.1.9 */
+/* 6.1.11 */
/* */
/* AUTHOR */
/* */
@@ -56,9 +56,12 @@
/* conditional compilation */
/* for ARMv8-M (Cortex M23/33) */
/* resulting in version 6.1.7 */
-/* 10-15-2021 Scott Larson Modified comment(s), improved */
+/* 10-15-2021 Scott Larson Modified comment(s), improved */
/* stack check error handling, */
/* resulting in version 6.1.9 */
+/* 04-25-2022 Scott Larson Modified comments and added */
+/* volatile to registers, */
+/* resulting in version 6.1.11 */
/* */
/**************************************************************************/
@@ -175,14 +178,14 @@ UINT _tx_thread_secure_stack_free(struct TX_THREAD_STRUCT *tx_thread);
For example, if the time source is at the address 0x0a800024 and is 16-bits in size, the clock
source constants would be:
-#define TX_TRACE_TIME_SOURCE *((ULONG *) 0x0a800024)
+#define TX_TRACE_TIME_SOURCE *((volatile ULONG *) 0x0a800024)
#define TX_TRACE_TIME_MASK 0x0000FFFFUL
*/
#ifndef TX_MISRA_ENABLE
#ifndef TX_TRACE_TIME_SOURCE
-#define TX_TRACE_TIME_SOURCE *((ULONG *) 0xE0001004)
+#define TX_TRACE_TIME_SOURCE *((volatile ULONG *) 0xE0001004)
#endif
#else
ULONG _tx_misra_time_stamp_get(VOID);
@@ -392,7 +395,7 @@ unsigned int was_masked;
/* Set PendSV to invoke ThreadX scheduler. */
- *((ULONG *) 0xE000ED04) = ((ULONG) 0x10000000);
+ *((volatile ULONG *) 0xE000ED04) = ((ULONG) 0x10000000);
if (_get_ipsr() == 0)
{
was_masked = __disable_irq();
@@ -408,7 +411,7 @@ unsigned int was_masked;
#ifdef TX_THREAD_INIT
CHAR _tx_version_id[] =
- "Copyright (c) Microsoft Corporation. All rights reserved. * ThreadX Cortex-M23/AC6 Version 6.1.9 *";
+ "Copyright (c) Microsoft Corporation. All rights reserved. * ThreadX Cortex-M23/AC6 Version 6.1.11 *";
#else
#ifdef TX_MISRA_ENABLE
extern CHAR _tx_version_id[100];
diff --git a/ports/cortex_m23/gnu/inc/tx_port.h b/ports/cortex_m23/gnu/inc/tx_port.h
index 6c3b424f..db5935cb 100644
--- a/ports/cortex_m23/gnu/inc/tx_port.h
+++ b/ports/cortex_m23/gnu/inc/tx_port.h
@@ -26,7 +26,7 @@
/* PORT SPECIFIC C INFORMATION RELEASE */
/* */
/* tx_port.h Cortex-M23/GNU */
-/* 6.1.9 */
+/* 6.1.11 */
/* */
/* AUTHOR */
/* */
@@ -57,9 +57,12 @@
/* conditional compilation */
/* for ARMv8-M (Cortex M23/33) */
/* resulting in version 6.1.7 */
-/* 10-15-2021 Scott Larson Modified comment(s), improved */
+/* 10-15-2021 Scott Larson Modified comment(s), improved */
/* stack check error handling, */
/* resulting in version 6.1.9 */
+/* 04-25-2022 Scott Larson Modified comments and added */
+/* volatile to registers, */
+/* resulting in version 6.1.11 */
/* */
/**************************************************************************/
@@ -175,14 +178,14 @@ UINT _tx_thread_secure_stack_free(struct TX_THREAD_STRUCT *tx_thread);
For example, if the time source is at the address 0x0a800024 and is 16-bits in size, the clock
source constants would be:
-#define TX_TRACE_TIME_SOURCE *((ULONG *) 0x0a800024)
+#define TX_TRACE_TIME_SOURCE *((volatile ULONG *) 0x0a800024)
#define TX_TRACE_TIME_MASK 0x0000FFFFUL
*/
#ifndef TX_MISRA_ENABLE
#ifndef TX_TRACE_TIME_SOURCE
-#define TX_TRACE_TIME_SOURCE *((ULONG *) 0xE0001004)
+#define TX_TRACE_TIME_SOURCE *((volatile ULONG *) 0xE0001004)
#endif
#else
ULONG _tx_misra_time_stamp_get(VOID);
@@ -405,7 +408,8 @@ __attribute__( ( always_inline ) ) static inline void _tx_thread_system_return_i
{
unsigned int interrupt_save;
- *((ULONG *) 0xE000ED04) = ((ULONG) 0x10000000);
+ /* Set PendSV to invoke ThreadX scheduler. */
+ *((volatile ULONG *) 0xE000ED04) = ((ULONG) 0x10000000);
if (_get_ipsr() == 0)
{
interrupt_save = __get_primask_value();
@@ -439,7 +443,7 @@ unsigned int interrupt_save;
#ifdef TX_THREAD_INIT
CHAR _tx_version_id[] =
- "Copyright (c) Microsoft Corporation. All rights reserved. * ThreadX Cortex-M23/GNU Version 6.1.9 *";
+ "Copyright (c) Microsoft Corporation. All rights reserved. * ThreadX Cortex-M23/GNU Version 6.1.11 *";
#else
#ifdef TX_MISRA_ENABLE
extern CHAR _tx_version_id[100];
diff --git a/ports/cortex_m23/iar/inc/tx_port.h b/ports/cortex_m23/iar/inc/tx_port.h
index 602e73e9..73a54080 100644
--- a/ports/cortex_m23/iar/inc/tx_port.h
+++ b/ports/cortex_m23/iar/inc/tx_port.h
@@ -26,7 +26,7 @@
/* PORT SPECIFIC C INFORMATION RELEASE */
/* */
/* tx_port.h Cortex-M23/IAR */
-/* 6.1.9 */
+/* 6.1.11 */
/* */
/* AUTHOR */
/* */
@@ -55,9 +55,12 @@
/* conditional compilation */
/* for ARMv8-M (Cortex M23/33) */
/* resulting in version 6.1.7 */
-/* 10-15-2021 Scott Larson Modified comment(s), improved */
+/* 10-15-2021 Scott Larson Modified comment(s), improved */
/* stack check error handling, */
/* resulting in version 6.1.9 */
+/* 04-25-2022 Scott Larson Modified comments and added */
+/* volatile to registers, */
+/* resulting in version 6.1.11 */
/* */
/**************************************************************************/
@@ -178,14 +181,14 @@ UINT _tx_thread_secure_stack_free(struct TX_THREAD_STRUCT *tx_thread);
For example, if the time source is at the address 0x0a800024 and is 16-bits in size, the clock
source constants would be:
-#define TX_TRACE_TIME_SOURCE *((ULONG *) 0x0a800024)
+#define TX_TRACE_TIME_SOURCE *((volatile ULONG *) 0x0a800024)
#define TX_TRACE_TIME_MASK 0x0000FFFFUL
*/
#ifndef TX_MISRA_ENABLE
#ifndef TX_TRACE_TIME_SOURCE
-#define TX_TRACE_TIME_SOURCE *((ULONG *) 0xE0001004)
+#define TX_TRACE_TIME_SOURCE *((volatile ULONG *) 0xE0001004)
#endif
#else
ULONG _tx_misra_time_stamp_get(VOID);
@@ -414,7 +417,7 @@ static void _tx_thread_system_return_inline(void)
__istate_t interrupt_save;
/* Set PendSV to invoke ThreadX scheduler. */
- *((ULONG *) 0xE000ED04) = ((ULONG) 0x10000000);
+ *((volatile ULONG *) 0xE000ED04) = ((ULONG) 0x10000000);
if (__get_IPSR() == 0)
{
interrupt_save = __get_interrupt_state();
@@ -430,7 +433,7 @@ __istate_t interrupt_save;
#ifdef TX_THREAD_INIT
CHAR _tx_version_id[] =
- "Copyright (c) Microsoft Corporation. All rights reserved. * ThreadX Cortex-M23/IAR Version 6.1.9 *";
+ "Copyright (c) Microsoft Corporation. All rights reserved. * ThreadX Cortex-M23/IAR Version 6.1.11 *";
#else
#ifdef TX_MISRA_ENABLE
extern CHAR _tx_version_id[100];
diff --git a/ports/cortex_m3/ac5/inc/tx_port.h b/ports/cortex_m3/ac5/inc/tx_port.h
index 6e75da84..e29c80c8 100644
--- a/ports/cortex_m3/ac5/inc/tx_port.h
+++ b/ports/cortex_m3/ac5/inc/tx_port.h
@@ -26,7 +26,7 @@
/* PORT SPECIFIC C INFORMATION RELEASE */
/* */
/* tx_port.h Cortex-M3/AC5 */
-/* 6.1.10 */
+/* 6.1.11 */
/* */
/* AUTHOR */
/* */
@@ -56,6 +56,9 @@
/* violation, */
/* fixed predefined macro, */
/* resulting in version 6.1.10 */
+/* 04-25-2022 Scott Larson Modified comments and added */
+/* volatile to registers, */
+/* resulting in version 6.1.11 */
/* */
/**************************************************************************/
@@ -154,14 +157,14 @@ typedef unsigned short USHORT;
For example, if the time source is at the address 0x0a800024 and is 16-bits in size, the clock
source constants would be:
-#define TX_TRACE_TIME_SOURCE *((ULONG *) 0x0a800024)
+#define TX_TRACE_TIME_SOURCE *((volatile ULONG *) 0x0a800024)
#define TX_TRACE_TIME_MASK 0x0000FFFFUL
*/
#ifndef TX_MISRA_ENABLE
#ifndef TX_TRACE_TIME_SOURCE
-#define TX_TRACE_TIME_SOURCE *((ULONG *) 0xE0001004)
+#define TX_TRACE_TIME_SOURCE *((volatile ULONG *) 0xE0001004)
#endif
#else
ULONG _tx_misra_time_stamp_get(VOID);
@@ -378,7 +381,7 @@ void _tx_vfp_access(void);
else \
{ \
ULONG _tx_fpccr; \
- _tx_fpccr = *((ULONG *) 0xE000EF34); \
+ _tx_fpccr = *((volatile ULONG *) 0xE000EF34); \
_tx_fpccr = _tx_fpccr & ((ULONG) 0x01); \
if (_tx_fpccr == ((ULONG) 0x01)) \
{ \
@@ -582,7 +585,7 @@ __attribute__( ( always_inline ) ) static inline void _tx_thread_system_return_i
unsigned int interrupt_save;
/* Set PendSV to invoke ThreadX scheduler. */
- *((ULONG *) 0xE000ED04) = ((ULONG) 0x10000000);
+ *((volatile ULONG *) 0xE000ED04) = ((ULONG) 0x10000000);
if (__get_ipsr_value() == 0)
{
interrupt_save = __get_interrupt_posture();
@@ -651,7 +654,7 @@ static void _tx_thread_system_return_inline(void)
unsigned int interrupt_save;
/* Set PendSV to invoke ThreadX scheduler. */
- *((ULONG *) 0xE000ED04) = ((ULONG) 0x10000000);
+ *((volatile ULONG *) 0xE000ED04) = ((ULONG) 0x10000000);
if (_ipsr == 0)
{
#ifdef TX_PORT_USE_BASEPRI
@@ -704,7 +707,7 @@ void tx_thread_fpu_disable(void);
#ifdef TX_THREAD_INIT
CHAR _tx_version_id[] =
- "Copyright (c) Microsoft Corporation. All rights reserved. * ThreadX Cortex-M3/AC5 Version 6.1.9 *";
+ "Copyright (c) Microsoft Corporation. All rights reserved. * ThreadX Cortex-M3/AC5 Version 6.1.11 *";
#else
#ifdef TX_MISRA_ENABLE
extern CHAR _tx_version_id[100];
diff --git a/ports/cortex_m3/ac5/src/tx_thread_schedule.s b/ports/cortex_m3/ac5/src/tx_thread_schedule.s
index f2b1d103..7646d41d 100644
--- a/ports/cortex_m3/ac5/src/tx_thread_schedule.s
+++ b/ports/cortex_m3/ac5/src/tx_thread_schedule.s
@@ -37,7 +37,7 @@
/* FUNCTION RELEASE */
/* */
/* _tx_thread_schedule Cortex-M3/AC5 */
-/* 6.1.7 */
+/* 6.1.11 */
/* AUTHOR */
/* */
/* Scott Larson, Microsoft Corporation */
@@ -64,13 +64,14 @@
/* */
/* _tx_initialize_kernel_enter ThreadX entry function */
/* _tx_thread_system_return Return to system from thread */
-/* _tx_thread_context_restore Restore thread's context */
/* */
/* RELEASE HISTORY */
/* */
/* DATE NAME DESCRIPTION */
/* */
/* 06-02-2021 Scott Larson Initial Version 6.1.7 */
+/* 04-25-2022 Scott Larson Added BASEPRI support, */
+/* resulting in version 6.1.11 */
/* */
/**************************************************************************/
// VOID _tx_thread_schedule(VOID)
@@ -126,12 +127,22 @@ __tx_ts_handler
#if (defined(TX_ENABLE_EXECUTION_CHANGE_NOTIFY) || defined(TX_EXECUTION_PROFILE_ENABLE))
/* Call the thread exit function to indicate the thread is no longer executing. */
+#ifdef TX_PORT_USE_BASEPRI
+ LDR r1, =TX_PORT_BASEPRI // Mask interrupt priorities =< TX_PORT_BASEPRI
+ MSR BASEPRI, r1
+#else
CPSID i // Disable interrupts
+#endif /* TX_PORT_USE_BASEPRI */
PUSH {r0, lr} // Save LR (and r0 just for alignment)
BL _tx_execution_thread_exit // Call the thread exit function
POP {r0, lr} // Recover LR
+#ifdef TX_PORT_USE_BASEPRI
+ MOV r0, 0 // Disable BASEPRI masking (enable interrupts)
+ MSR BASEPRI, r0
+#else
CPSIE i // Enable interrupts
-#endif
+#endif /* TX_PORT_USE_BASEPRI */
+#endif /* EXECUTION PROFILE */
LDR r0, =_tx_thread_current_ptr // Build current thread pointer address
LDR r2, =_tx_thread_execute_ptr // Build execute thread pointer address
@@ -176,14 +187,24 @@ __tx_ts_new
/* Now we are looking for a new thread to execute! */
+#ifdef TX_PORT_USE_BASEPRI
+ LDR r1, =TX_PORT_BASEPRI // Mask interrupt priorities =< TX_PORT_BASEPRI
+ MSR BASEPRI, r1
+#else
CPSID i // Disable interrupts
+#endif
LDR r1, [r2] // Is there another thread ready to execute?
CBZ r1, __tx_ts_wait // No, skip to the wait processing
/* Yes, another thread is ready for else, make the current thread the new thread. */
STR r1, [r0] // Setup the current thread pointer to the new thread
+#ifdef TX_PORT_USE_BASEPRI
+ MOV r4, #0 // Disable BASEPRI masking (enable interrupts)
+ MSR BASEPRI, r4
+#else
CPSIE i // Enable interrupts
+#endif
/* Increment the thread run count. */
@@ -227,7 +248,12 @@ _skip_vfp_restore
are disabled to allow use of WFI for waiting for a thread to arrive. */
__tx_ts_wait
+#ifdef TX_PORT_USE_BASEPRI
+ LDR r1, =TX_PORT_BASEPRI // Mask interrupt priorities =< TX_PORT_BASEPRI
+ MSR BASEPRI, r1
+#else
CPSID i // Disable interrupts
+#endif
LDR r1, [r2] // Pickup the next thread to execute pointer
STR r1, [r0] // Store it in the current pointer
CBNZ r1, __tx_ts_ready // If non-NULL, a new thread is ready!
@@ -250,7 +276,12 @@ __tx_ts_wait
POP {r0-r3}
#endif
+#ifdef TX_PORT_USE_BASEPRI
+ MOV r4, #0 // Disable BASEPRI masking (enable interrupts)
+ MSR BASEPRI, r4
+#else
CPSIE i // Enable interrupts
+#endif
B __tx_ts_wait // Loop to continue waiting
/* At this point, we have a new thread ready to go. Clear any newly pended PendSV - since we are
@@ -262,8 +293,12 @@ __tx_ts_ready
STR r7, [r8, #0xD04] // Clear any PendSV
/* Re-enable interrupts and restore new thread. */
-
+#ifdef TX_PORT_USE_BASEPRI
+ MOV r4, #0 // Disable BASEPRI masking (enable interrupts)
+ MSR BASEPRI, r4
+#else
CPSIE i // Enable interrupts
+#endif
B __tx_ts_restore // Restore the thread
// }
diff --git a/ports/cortex_m3/ac6/inc/tx_port.h b/ports/cortex_m3/ac6/inc/tx_port.h
index eb4cb176..eedf0164 100644
--- a/ports/cortex_m3/ac6/inc/tx_port.h
+++ b/ports/cortex_m3/ac6/inc/tx_port.h
@@ -26,7 +26,7 @@
/* PORT SPECIFIC C INFORMATION RELEASE */
/* */
/* tx_port.h Cortex-M3/AC6 */
-/* 6.1.10 */
+/* 6.1.11 */
/* */
/* AUTHOR */
/* */
@@ -56,6 +56,9 @@
/* violation, */
/* fixed predefined macro, */
/* resulting in version 6.1.10 */
+/* 04-25-2022 Scott Larson Modified comments and added */
+/* volatile to registers, */
+/* resulting in version 6.1.11 */
/* */
/**************************************************************************/
@@ -154,14 +157,14 @@ typedef unsigned short USHORT;
For example, if the time source is at the address 0x0a800024 and is 16-bits in size, the clock
source constants would be:
-#define TX_TRACE_TIME_SOURCE *((ULONG *) 0x0a800024)
+#define TX_TRACE_TIME_SOURCE *((volatile ULONG *) 0x0a800024)
#define TX_TRACE_TIME_MASK 0x0000FFFFUL
*/
#ifndef TX_MISRA_ENABLE
#ifndef TX_TRACE_TIME_SOURCE
-#define TX_TRACE_TIME_SOURCE *((ULONG *) 0xE0001004)
+#define TX_TRACE_TIME_SOURCE *((volatile ULONG *) 0xE0001004)
#endif
#else
ULONG _tx_misra_time_stamp_get(VOID);
@@ -378,7 +381,7 @@ void _tx_vfp_access(void);
else \
{ \
ULONG _tx_fpccr; \
- _tx_fpccr = *((ULONG *) 0xE000EF34); \
+ _tx_fpccr = *((volatile ULONG *) 0xE000EF34); \
_tx_fpccr = _tx_fpccr & ((ULONG) 0x01); \
if (_tx_fpccr == ((ULONG) 0x01)) \
{ \
@@ -582,7 +585,7 @@ __attribute__( ( always_inline ) ) static inline void _tx_thread_system_return_i
unsigned int interrupt_save;
/* Set PendSV to invoke ThreadX scheduler. */
- *((ULONG *) 0xE000ED04) = ((ULONG) 0x10000000);
+ *((volatile ULONG *) 0xE000ED04) = ((ULONG) 0x10000000);
if (__get_ipsr_value() == 0)
{
interrupt_save = __get_interrupt_posture();
@@ -651,7 +654,7 @@ static void _tx_thread_system_return_inline(void)
unsigned int interrupt_save;
/* Set PendSV to invoke ThreadX scheduler. */
- *((ULONG *) 0xE000ED04) = ((ULONG) 0x10000000);
+ *((volatile ULONG *) 0xE000ED04) = ((ULONG) 0x10000000);
if (_ipsr == 0)
{
#ifdef TX_PORT_USE_BASEPRI
@@ -704,7 +707,7 @@ void tx_thread_fpu_disable(void);
#ifdef TX_THREAD_INIT
CHAR _tx_version_id[] =
- "Copyright (c) Microsoft Corporation. All rights reserved. * ThreadX Cortex-M3/AC6 Version 6.1.9 *";
+ "Copyright (c) Microsoft Corporation. All rights reserved. * ThreadX Cortex-M3/AC6 Version 6.1.11 *";
#else
#ifdef TX_MISRA_ENABLE
extern CHAR _tx_version_id[100];
diff --git a/ports/cortex_m3/ac6/src/tx_misra.S b/ports/cortex_m3/ac6/src/tx_misra.S
new file mode 100644
index 00000000..b03fdcd0
--- /dev/null
+++ b/ports/cortex_m3/ac6/src/tx_misra.S
@@ -0,0 +1,1033 @@
+/**************************************************************************/
+/* */
+/* Copyright (c) Microsoft Corporation. All rights reserved. */
+/* */
+/* This software is licensed under the Microsoft Software License */
+/* Terms for Microsoft Azure RTOS. Full text of the license can be */
+/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
+/* and in the root directory of this software. */
+/* */
+/**************************************************************************/
+
+
+/**************************************************************************/
+/**************************************************************************/
+/** */
+/** ThreadX Component */
+/** */
+/** ThreadX MISRA Compliance */
+/** */
+/**************************************************************************/
+/**************************************************************************/
+
+ #define SHT_PROGBITS 0x1
+
+ .global __aeabi_memset
+ .global _tx_thread_current_ptr
+ .global _tx_thread_interrupt_disable
+ .global _tx_thread_interrupt_restore
+ .global _tx_thread_stack_analyze
+ .global _tx_thread_stack_error_handler
+ .global _tx_thread_system_state
+#ifdef TX_ENABLE_EVENT_TRACE
+ .global _tx_trace_buffer_current_ptr
+ .global _tx_trace_buffer_end_ptr
+ .global _tx_trace_buffer_start_ptr
+ .global _tx_trace_event_enable_bits
+ .global _tx_trace_full_notify_function
+ .global _tx_trace_header_ptr
+#endif
+
+ .global _tx_misra_always_true
+ .global _tx_misra_block_pool_to_uchar_pointer_convert
+ .global _tx_misra_byte_pool_to_uchar_pointer_convert
+ .global _tx_misra_char_to_uchar_pointer_convert
+ .global _tx_misra_const_char_to_char_pointer_convert
+#ifdef TX_ENABLE_EVENT_TRACE
+ .global _tx_misra_entry_to_uchar_pointer_convert
+#endif
+ .global _tx_misra_indirect_void_to_uchar_pointer_convert
+ .global _tx_misra_memset
+ .global _tx_misra_message_copy
+#ifdef TX_ENABLE_EVENT_TRACE
+ .global _tx_misra_object_to_uchar_pointer_convert
+#endif
+ .global _tx_misra_pointer_to_ulong_convert
+ .global _tx_misra_status_get
+ .global _tx_misra_thread_stack_check
+#ifdef TX_ENABLE_EVENT_TRACE
+ .global _tx_misra_time_stamp_get
+#endif
+ .global _tx_misra_timer_indirect_to_void_pointer_convert
+ .global _tx_misra_timer_pointer_add
+ .global _tx_misra_timer_pointer_dif
+#ifdef TX_ENABLE_EVENT_TRACE
+ .global _tx_misra_trace_event_insert
+#endif
+ .global _tx_misra_uchar_pointer_add
+ .global _tx_misra_uchar_pointer_dif
+ .global _tx_misra_uchar_pointer_sub
+ .global _tx_misra_uchar_to_align_type_pointer_convert
+ .global _tx_misra_uchar_to_block_pool_pointer_convert
+#ifdef TX_ENABLE_EVENT_TRACE
+ .global _tx_misra_uchar_to_entry_pointer_convert
+ .global _tx_misra_uchar_to_header_pointer_convert
+#endif
+ .global _tx_misra_uchar_to_indirect_byte_pool_pointer_convert
+ .global _tx_misra_uchar_to_indirect_uchar_pointer_convert
+#ifdef TX_ENABLE_EVENT_TRACE
+ .global _tx_misra_uchar_to_object_pointer_convert
+#endif
+ .global _tx_misra_uchar_to_void_pointer_convert
+ .global _tx_misra_ulong_pointer_add
+ .global _tx_misra_ulong_pointer_dif
+ .global _tx_misra_ulong_pointer_sub
+ .global _tx_misra_ulong_to_pointer_convert
+ .global _tx_misra_ulong_to_thread_pointer_convert
+ .global _tx_misra_user_timer_pointer_get
+ .global _tx_misra_void_to_block_pool_pointer_convert
+ .global _tx_misra_void_to_byte_pool_pointer_convert
+ .global _tx_misra_void_to_event_flags_pointer_convert
+ .global _tx_misra_void_to_indirect_uchar_pointer_convert
+ .global _tx_misra_void_to_mutex_pointer_convert
+ .global _tx_misra_void_to_queue_pointer_convert
+ .global _tx_misra_void_to_semaphore_pointer_convert
+ .global _tx_misra_void_to_thread_pointer_convert
+ .global _tx_misra_void_to_uchar_pointer_convert
+ .global _tx_misra_void_to_ulong_pointer_convert
+ .global _tx_misra_ipsr_get
+ .global _tx_misra_control_get
+ .global _tx_misra_control_set
+#ifdef __ARM_FP
+ .global _tx_misra_fpccr_get
+ .global _tx_misra_vfp_touch
+#endif
+
+
+/**************************************************************************/
+/**************************************************************************/
+/** */
+/** VOID _tx_misra_memset(VOID *ptr, UINT value, UINT size); */
+/** */
+/**************************************************************************/
+/**************************************************************************/
+
+ .text
+ .align 4
+ .syntax unified
+ .thumb_func
+_tx_misra_memset:
+ PUSH {R4,LR}
+ MOVS R4,R0
+ MOVS R0,R2
+ MOVS R2,R1
+ MOVS R1,R0
+ MOVS R0,R4
+ BL __aeabi_memset
+ POP {R4,PC} // return
+
+/**************************************************************************/
+/**************************************************************************/
+/** */
+/** UCHAR *_tx_misra_uchar_pointer_add(UCHAR *ptr, ULONG amount); */
+/** */
+/**************************************************************************/
+/**************************************************************************/
+
+ .text
+ .thumb_func
+_tx_misra_uchar_pointer_add:
+ ADD R0,R0,R1
+ BX LR // return
+
+
+/**************************************************************************/
+/**************************************************************************/
+/** */
+/** UCHAR *_tx_misra_uchar_pointer_sub(UCHAR *ptr, ULONG amount); */
+/** */
+/**************************************************************************/
+/**************************************************************************/
+
+ .text
+ .thumb_func
+_tx_misra_uchar_pointer_sub:
+ RSBS R1,R1,#+0
+ ADD R0,R0,R1
+ BX LR // return
+
+
+/**************************************************************************/
+/**************************************************************************/
+/** */
+/** ULONG _tx_misra_uchar_pointer_dif(UCHAR *ptr1, UCHAR *ptr2); */
+/** */
+/**************************************************************************/
+/**************************************************************************/
+
+ .text
+ .thumb_func
+_tx_misra_uchar_pointer_dif:
+ SUBS R0,R0,R1
+ BX LR // return
+
+
+/**************************************************************************/
+/**************************************************************************/
+/** */
+/** ULONG _tx_misra_pointer_to_ulong_convert(VOID *ptr); */
+/** */
+/**************************************************************************/
+/**************************************************************************/
+
+ .text
+ .thumb_func
+_tx_misra_pointer_to_ulong_convert:
+ BX LR // return
+
+
+/**************************************************************************/
+/**************************************************************************/
+/** */
+/** ULONG *_tx_misra_ulong_pointer_add(ULONG *ptr, ULONG amount); */
+/** */
+/**************************************************************************/
+/**************************************************************************/
+
+ .text
+ .thumb_func
+_tx_misra_ulong_pointer_add:
+ ADD R0,R0,R1, LSL #+2
+ BX LR // return
+
+
+/**************************************************************************/
+/**************************************************************************/
+/** */
+/** ULONG *_tx_misra_ulong_pointer_sub(ULONG *ptr, ULONG amount); */
+/** */
+/**************************************************************************/
+/**************************************************************************/
+
+ .text
+ .thumb_func
+_tx_misra_ulong_pointer_sub:
+ MVNS R2,#+3
+ MULS R1,R2,R1
+ ADD R0,R0,R1
+ BX LR // return
+
+
+/**************************************************************************/
+/**************************************************************************/
+/** */
+/** ULONG _tx_misra_ulong_pointer_dif(ULONG *ptr1, ULONG *ptr2); */
+/** */
+/**************************************************************************/
+/**************************************************************************/
+
+ .text
+ .thumb_func
+_tx_misra_ulong_pointer_dif:
+ SUBS R0,R0,R1
+ ASRS R0,R0,#+2
+ BX LR // return
+
+
+/**************************************************************************/
+/**************************************************************************/
+/** */
+/** VOID *_tx_misra_ulong_to_pointer_convert(ULONG input); */
+/** */
+/**************************************************************************/
+/**************************************************************************/
+
+ .text
+ .thumb_func
+_tx_misra_ulong_to_pointer_convert:
+ BX LR // return
+
+
+/**************************************************************************/
+/**************************************************************************/
+/** */
+/** VOID _tx_misra_message_copy(ULONG **source, ULONG **destination, */
+/** UINT size); */
+/** */
+/**************************************************************************/
+/**************************************************************************/
+
+ .text
+ .thumb_func
+_tx_misra_message_copy:
+ PUSH {R4,R5}
+ LDR R3,[R0, #+0]
+ LDR R4,[R1, #+0]
+ LDR R5,[R3, #+0]
+ STR R5,[R4, #+0]
+ ADDS R4,R4,#+4
+ ADDS R3,R3,#+4
+ CMP R2,#+2
+ BCC.N _tx_misra_message_copy_0
+ SUBS R2,R2,#+1
+ B.N _tx_misra_message_copy_1
+_tx_misra_message_copy_2:
+ LDR R5,[R3, #+0]
+ STR R5,[R4, #+0]
+ ADDS R4,R4,#+4
+ ADDS R3,R3,#+4
+ SUBS R2,R2,#+1
+_tx_misra_message_copy_1:
+ CMP R2,#+0
+ BNE.N _tx_misra_message_copy_2
+_tx_misra_message_copy_0:
+ STR R3,[R0, #+0]
+ STR R4,[R1, #+0]
+ POP {R4,R5}
+ BX LR // return
+
+
+/**************************************************************************/
+/**************************************************************************/
+/** */
+/** ULONG _tx_misra_timer_pointer_dif(TX_TIMER_INTERNAL **ptr1, */
+/** TX_TIMER_INTERNAL **ptr2); */
+/** */
+/**************************************************************************/
+/**************************************************************************/
+
+ .text
+ .thumb_func
+_tx_misra_timer_pointer_dif:
+ SUBS R0,R0,R1
+ ASRS R0,R0,#+2
+ BX LR // return
+
+
+/**************************************************************************/
+/**************************************************************************/
+/** */
+/** TX_TIMER_INTERNAL **_tx_misra_timer_pointer_add(TX_TIMER_INTERNAL */
+/** **ptr1, ULONG size); */
+/** */
+/**************************************************************************/
+/**************************************************************************/
+
+ .text
+ .thumb_func
+_tx_misra_timer_pointer_add:
+ ADD R0,R0,R1, LSL #+2
+ BX LR // return
+
+
+/**************************************************************************/
+/**************************************************************************/
+/** */
+/** VOID _tx_misra_user_timer_pointer_get(TX_TIMER_INTERNAL */
+/** *internal_timer, TX_TIMER **user_timer); */
+/** */
+/**************************************************************************/
+/**************************************************************************/
+
+ .text
+ .thumb_func
+_tx_misra_user_timer_pointer_get:
+ ADDS R2,R0,#+8
+ SUBS R2,R2,R0
+ RSBS R2,R2,#+0
+ ADD R0,R0,R2
+ STR R0,[R1, #+0]
+ BX LR // return
+
+
+/**************************************************************************/
+/**************************************************************************/
+/** */
+/** VOID _tx_misra_thread_stack_check(TX_THREAD *thread_ptr, */
+/** VOID **highest_stack); */
+/** */
+/**************************************************************************/
+/**************************************************************************/
+
+ .text
+ .thumb_func
+_tx_misra_thread_stack_check:
+ PUSH {R3-R5,LR}
+ MOVS R4,R0
+ MOVS R5,R1
+ BL _tx_thread_interrupt_disable
+ CMP R4,#+0
+ BEQ.N _tx_misra_thread_stack_check_0
+ LDR R1,[R4, #+0]
+ LDR R2,=0x54485244
+ CMP R1,R2
+ BNE.N _tx_misra_thread_stack_check_0
+ LDR R1,[R4, #+8]
+ LDR R2,[R5, #+0]
+ CMP R1,R2
+ BCS.N _tx_misra_thread_stack_check_1
+ LDR R1,[R4, #+8]
+ STR R1,[R5, #+0]
+_tx_misra_thread_stack_check_1:
+ LDR R1,[R4, #+12]
+ LDR R1,[R1, #+0]
+ CMP R1,#-269488145
+ BNE.N _tx_misra_thread_stack_check_2
+ LDR R1,[R4, #+16]
+ LDR R1,[R1, #+1]
+ CMP R1,#-269488145
+ BNE.N _tx_misra_thread_stack_check_2
+ LDR R1,[R5, #+0]
+ LDR R2,[R4, #+12]
+ CMP R1,R2
+ BCS.N _tx_misra_thread_stack_check_3
+_tx_misra_thread_stack_check_2:
+ BL _tx_thread_interrupt_restore
+ MOVS R0,R4
+ BL _tx_thread_stack_error_handler
+ BL _tx_thread_interrupt_disable
+_tx_misra_thread_stack_check_3:
+ LDR R1,[R5, #+0]
+ LDR R1,[R1, #-4]
+ CMP R1,#-269488145
+ BEQ.N _tx_misra_thread_stack_check_0
+ BL _tx_thread_interrupt_restore
+ MOVS R0,R4
+ BL _tx_thread_stack_analyze
+ BL _tx_thread_interrupt_disable
+_tx_misra_thread_stack_check_0:
+ BL _tx_thread_interrupt_restore
+ POP {R0,R4,R5,PC} // return
+
+#ifdef TX_ENABLE_EVENT_TRACE
+
+/**************************************************************************/
+/**************************************************************************/
+/** */
+/** VOID _tx_misra_trace_event_insert(ULONG event_id, */
+/** VOID *info_field_1, ULONG info_field_2, ULONG info_field_3, */
+/** ULONG info_field_4, ULONG filter, ULONG time_stamp); */
+/** */
+/**************************************************************************/
+/**************************************************************************/
+
+ .text
+ .thumb_func
+_tx_misra_trace_event_insert:
+ PUSH {R3-R7,LR}
+ LDR.N R4,DataTable2_1
+ LDR R4,[R4, #+0]
+ CMP R4,#+0
+ BEQ.N _tx_misra_trace_event_insert_0
+ LDR.N R5,DataTable2_2
+ LDR R5,[R5, #+0]
+ LDR R6,[SP, #+28]
+ TST R5,R6
+ BEQ.N _tx_misra_trace_event_insert_0
+ LDR.N R5,DataTable2_3
+ LDR R5,[R5, #+0]
+ LDR.N R6,DataTable2_4
+ LDR R6,[R6, #+0]
+ CMP R5,#+0
+ BNE.N _tx_misra_trace_event_insert_1
+ LDR R5,[R6, #+44]
+ LDR R7,[R6, #+60]
+ LSLS R7,R7,#+16
+ ORRS R7,R7,#0x80000000
+ ORRS R5,R7,R5
+ B.N _tx_misra_trace_event_insert_2
+_tx_misra_trace_event_insert_1:
+ CMP R5,#-252645136
+ BCS.N _tx_misra_trace_event_insert_3
+ MOVS R5,R6
+ MOVS R6,#-1
+ B.N _tx_misra_trace_event_insert_2
+_tx_misra_trace_event_insert_3:
+ MOVS R6,#-252645136
+ MOVS R5,#+0
+_tx_misra_trace_event_insert_2:
+ STR R6,[R4, #+0]
+ STR R5,[R4, #+4]
+ STR R0,[R4, #+8]
+ LDR R0,[SP, #+32]
+ STR R0,[R4, #+12]
+ STR R1,[R4, #+16]
+ STR R2,[R4, #+20]
+ STR R3,[R4, #+24]
+ LDR R0,[SP, #+24]
+ STR R0,[R4, #+28]
+ ADDS R4,R4,#+32
+ LDR.N R0,DataTable2_5
+ LDR R0,[R0, #+0]
+ CMP R4,R0
+ BCC.N _tx_misra_trace_event_insert_4
+ LDR.N R0,DataTable2_6
+ LDR R4,[R0, #+0]
+ LDR.N R0,DataTable2_1
+ STR R4,[R0, #+0]
+ LDR.N R0,DataTable2_7
+ LDR R0,[R0, #+0]
+ STR R4,[R0, #+32]
+ LDR.N R0,DataTable2_8
+ LDR R0,[R0, #+0]
+ CMP R0,#+0
+ BEQ.N _tx_misra_trace_event_insert_0
+ LDR.N R0,DataTable2_7
+ LDR R0,[R0, #+0]
+ LDR.N R1,DataTable2_8
+ LDR R1,[R1, #+0]
+ BLX R1
+ B.N _tx_misra_trace_event_insert_0
+_tx_misra_trace_event_insert_4:
+ LDR.N R0,DataTable2_1
+ STR R4,[R0, #+0]
+ LDR.N R0,DataTable2_7
+ LDR R0,[R0, #+0]
+ STR R4,[R0, #+32]
+_tx_misra_trace_event_insert_0:
+ POP {R0,R4-R7,PC} // return
+
+
+ .data
+DataTable2_1:
+ .word _tx_trace_buffer_current_ptr
+
+ .data
+DataTable2_2:
+ .word _tx_trace_event_enable_bits
+
+ .data
+DataTable2_5:
+ .word _tx_trace_buffer_end_ptr
+
+ .data
+DataTable2_6:
+ .word _tx_trace_buffer_start_ptr
+
+ .data
+DataTable2_7:
+ .word _tx_trace_header_ptr
+
+ .data
+DataTable2_8:
+ .word _tx_trace_full_notify_function
+
+
+/**************************************************************************/
+/**************************************************************************/
+/** */
+/** ULONG _tx_misra_time_stamp_get(VOID); */
+/** */
+/**************************************************************************/
+/**************************************************************************/
+
+ .text
+ .thumb_func
+_tx_misra_time_stamp_get:
+ MOVS R0,#+0
+ BX LR // return
+
+#endif
+
+ .data
+DataTable2_3:
+ .word _tx_thread_system_state
+
+ .data
+DataTable2_4:
+ .word _tx_thread_current_ptr
+
+
+/**************************************************************************/
+/**************************************************************************/
+/** */
+/** UINT _tx_misra_always_true(void); */
+/** */
+/**************************************************************************/
+/**************************************************************************/
+
+ .text
+ .thumb_func
+_tx_misra_always_true:
+ MOVS R0,#+1
+ BX LR // return
+
+
+/******************************************************************************************/
+/******************************************************************************************/
+/** */
+/** UCHAR **_tx_misra_indirect_void_to_uchar_pointer_convert(VOID **return_ptr); */
+/** */
+/******************************************************************************************/
+/******************************************************************************************/
+
+ .text
+ .thumb_func
+_tx_misra_indirect_void_to_uchar_pointer_convert:
+ BX LR // return
+
+
+/***************************************************************************************/
+/***************************************************************************************/
+/** */
+/** UCHAR **_tx_misra_uchar_to_indirect_uchar_pointer_convert(UCHAR *pointer); */
+/** */
+/***************************************************************************************/
+/***************************************************************************************/
+
+ .text
+ .thumb_func
+_tx_misra_uchar_to_indirect_uchar_pointer_convert:
+ BX LR // return
+
+
+/***********************************************************************************/
+/***********************************************************************************/
+/** */
+/** UCHAR *_tx_misra_block_pool_to_uchar_pointer_convert(TX_BLOCK_POOL *pool); */
+/** */
+/***********************************************************************************/
+/***********************************************************************************/
+
+ .text
+ .thumb_func
+_tx_misra_block_pool_to_uchar_pointer_convert:
+ BX LR // return
+
+
+/******************************************************************************************/
+/******************************************************************************************/
+/** */
+/** TX_BLOCK_POOL *_tx_misra_void_to_block_pool_pointer_convert(VOID *pointer); */
+/** */
+/******************************************************************************************/
+/******************************************************************************************/
+
+ .text
+ .thumb_func
+_tx_misra_void_to_block_pool_pointer_convert:
+ BX LR // return
+
+
+/*****************************************************************************/
+/*****************************************************************************/
+/** */
+/** UCHAR *_tx_misra_void_to_uchar_pointer_convert(VOID *pointer); */
+/** */
+/*****************************************************************************/
+/*****************************************************************************/
+
+ .text
+ .thumb_func
+_tx_misra_void_to_uchar_pointer_convert:
+ BX LR // return
+
+
+/************************************************************************************/
+/************************************************************************************/
+/** */
+/** TX_BLOCK_POOL *_tx_misra_uchar_to_block_pool_pointer_convert(UCHAR *pointer); */
+/** */
+/************************************************************************************/
+/************************************************************************************/
+
+ .text
+ .thumb_func
+_tx_misra_uchar_to_block_pool_pointer_convert:
+ BX LR // return
+
+
+/**************************************************************************************/
+/**************************************************************************************/
+/** */
+/** UCHAR **_tx_misra_void_to_indirect_uchar_pointer_convert(VOID *pointer); */
+/** */
+/**************************************************************************************/
+/**************************************************************************************/
+
+ .text
+ .thumb_func
+_tx_misra_void_to_indirect_uchar_pointer_convert:
+ BX LR // return
+
+
+/*****************************************************************************************/
+/*****************************************************************************************/
+/** */
+/** TX_BYTE_POOL *_tx_misra_void_to_byte_pool_pointer_convert(VOID *pointer); */
+/** */
+/*****************************************************************************************/
+/*****************************************************************************************/
+
+ .text
+ .thumb_func
+_tx_misra_void_to_byte_pool_pointer_convert:
+ BX LR // return
+
+
+/***************************************************************************************/
+/***************************************************************************************/
+/** */
+/** UCHAR *_tx_misra_byte_pool_to_uchar_pointer_convert(TX_BYTE_POOL *pool); */
+/** */
+/***************************************************************************************/
+/***************************************************************************************/
+
+ .text
+ .thumb_func
+_tx_misra_byte_pool_to_uchar_pointer_convert:
+ BX LR // return
+
+
+/*****************************************************************************************/
+/*****************************************************************************************/
+/** */
+/** ALIGN_TYPE *_tx_misra_uchar_to_align_type_pointer_convert(UCHAR *pointer); */
+/** */
+/*****************************************************************************************/
+/*****************************************************************************************/
+
+ .text
+ .thumb_func
+_tx_misra_uchar_to_align_type_pointer_convert:
+ BX LR // return
+
+
+/****************************************************************************************************/
+/****************************************************************************************************/
+/** */
+/** TX_BYTE_POOL **_tx_misra_uchar_to_indirect_byte_pool_pointer_convert(UCHAR *pointer); */
+/** */
+/****************************************************************************************************/
+/****************************************************************************************************/
+
+ .text
+ .thumb_func
+_tx_misra_uchar_to_indirect_byte_pool_pointer_convert:
+ BX LR // return
+
+
+/**************************************************************************************************/
+/**************************************************************************************************/
+/** */
+/** TX_EVENT_FLAGS_GROUP *_tx_misra_void_to_event_flags_pointer_convert(VOID *pointer); */
+/** */
+/**************************************************************************************************/
+/**************************************************************************************************/
+
+ .text
+ .thumb_func
+_tx_misra_void_to_event_flags_pointer_convert:
+ BX LR // return
+
+
+/*****************************************************************************/
+/*****************************************************************************/
+/** */
+/** ULONG *_tx_misra_void_to_ulong_pointer_convert(VOID *pointer); */
+/** */
+/*****************************************************************************/
+/*****************************************************************************/
+
+ .text
+ .thumb_func
+_tx_misra_void_to_ulong_pointer_convert:
+ BX LR // return
+
+
+/********************************************************************************/
+/********************************************************************************/
+/** */
+/** TX_MUTEX *_tx_misra_void_to_mutex_pointer_convert(VOID *pointer); */
+/** */
+/********************************************************************************/
+/********************************************************************************/
+
+ .text
+ .thumb_func
+_tx_misra_void_to_mutex_pointer_convert:
+ BX LR // return
+
+
+/**************************************************************************/
+/**************************************************************************/
+/** */
+/** UINT _tx_misra_status_get(UINT status); */
+/** */
+/**************************************************************************/
+/**************************************************************************/
+
+ .text
+ .thumb_func
+_tx_misra_status_get:
+ MOVS R0,#+0
+ BX LR // return
+
+
+/********************************************************************************/
+/********************************************************************************/
+/** */
+/** TX_QUEUE *_tx_misra_void_to_queue_pointer_convert(VOID *pointer); */
+/** */
+/********************************************************************************/
+/********************************************************************************/
+
+ .text
+ .thumb_func
+_tx_misra_void_to_queue_pointer_convert:
+ BX LR // return
+
+
+/****************************************************************************************/
+/****************************************************************************************/
+/** */
+/** TX_SEMAPHORE *_tx_misra_void_to_semaphore_pointer_convert(VOID *pointer); */
+/** */
+/****************************************************************************************/
+/****************************************************************************************/
+
+ .text
+ .thumb_func
+_tx_misra_void_to_semaphore_pointer_convert:
+ BX LR // return
+
+
+/**************************************************************************/
+/**************************************************************************/
+/** */
+/** VOID *_tx_misra_uchar_to_void_pointer_convert(UCHAR *pointer); */
+/** */
+/**************************************************************************/
+/**************************************************************************/
+
+ .text
+ .thumb_func
+_tx_misra_uchar_to_void_pointer_convert:
+ BX LR // return
+
+
+/*********************************************************************************/
+/*********************************************************************************/
+/** */
+/** TX_THREAD *_tx_misra_ulong_to_thread_pointer_convert(ULONG value); */
+/** */
+/*********************************************************************************/
+/*********************************************************************************/
+
+ .text
+ .thumb_func
+_tx_misra_ulong_to_thread_pointer_convert:
+ BX LR // return
+
+
+/***************************************************************************************************/
+/***************************************************************************************************/
+/** */
+/** VOID *_tx_misra_timer_indirect_to_void_pointer_convert(TX_TIMER_INTERNAL **pointer); */
+/** */
+/***************************************************************************************************/
+/***************************************************************************************************/
+
+ .text
+ .thumb_func
+_tx_misra_timer_indirect_to_void_pointer_convert:
+ BX LR // return
+
+
+/***************************************************************************************/
+/***************************************************************************************/
+/** */
+/** CHAR *_tx_misra_const_char_to_char_pointer_convert(const char *pointer); */
+/** */
+/***************************************************************************************/
+/***************************************************************************************/
+
+ .text
+ .thumb_func
+_tx_misra_const_char_to_char_pointer_convert:
+ BX LR // return
+
+
+/**********************************************************************************/
+/**********************************************************************************/
+/** */
+/** TX_THREAD *_tx_misra_void_to_thread_pointer_convert(void *pointer); */
+/** */
+/**********************************************************************************/
+/**********************************************************************************/
+
+ .text
+ .thumb_func
+_tx_misra_void_to_thread_pointer_convert:
+ BX LR // return
+
+
+#ifdef TX_ENABLE_EVENT_TRACE
+
+/************************************************************************************************/
+/************************************************************************************************/
+/** */
+/** UCHAR *_tx_misra_object_to_uchar_pointer_convert(TX_TRACE_OBJECT_ENTRY *pointer); */
+/** */
+/************************************************************************************************/
+/************************************************************************************************/
+
+ .text
+ .thumb_func
+_tx_misra_object_to_uchar_pointer_convert:
+ BX LR // return
+
+
+/************************************************************************************************/
+/************************************************************************************************/
+/** */
+/** TX_TRACE_OBJECT_ENTRY *_tx_misra_uchar_to_object_pointer_convert(UCHAR *pointer); */
+/** */
+/************************************************************************************************/
+/************************************************************************************************/
+
+ .text
+ .thumb_func
+_tx_misra_uchar_to_object_pointer_convert:
+ BX LR // return
+
+
+/******************************************************************************************/
+/******************************************************************************************/
+/** */
+/** TX_TRACE_HEADER *_tx_misra_uchar_to_header_pointer_convert(UCHAR *pointer); */
+/** */
+/******************************************************************************************/
+/******************************************************************************************/
+
+ .text
+ .thumb_func
+_tx_misra_uchar_to_header_pointer_convert:
+ BX LR // return
+
+
+/***********************************************************************************************/
+/***********************************************************************************************/
+/** */
+/** TX_TRACE_BUFFER_ENTRY *_tx_misra_uchar_to_entry_pointer_convert(UCHAR *pointer); */
+/** */
+/***********************************************************************************************/
+/***********************************************************************************************/
+
+ .text
+ .thumb_func
+_tx_misra_uchar_to_entry_pointer_convert:
+ BX LR // return
+
+
+/***********************************************************************************************/
+/***********************************************************************************************/
+/** */
+/** UCHAR *_tx_misra_entry_to_uchar_pointer_convert(TX_TRACE_BUFFER_ENTRY *pointer); */
+/** */
+/***********************************************************************************************/
+/***********************************************************************************************/
+
+ .text
+ .thumb_func
+_tx_misra_entry_to_uchar_pointer_convert:
+ BX LR // return
+#endif
+
+
+/***********************************************************************************************/
+/***********************************************************************************************/
+/** */
+/** UCHAR *_tx_misra_char_to_uchar_pointer_convert(CHAR *pointer); */
+/** */
+/***********************************************************************************************/
+/***********************************************************************************************/
+
+ .text
+ .thumb_func
+_tx_misra_char_to_uchar_pointer_convert:
+ BX LR // return
+
+
+/***********************************************************************************************/
+/***********************************************************************************************/
+/** */
+/** ULONG _tx_misra_ipsr_get(void); */
+/** */
+/***********************************************************************************************/
+/***********************************************************************************************/
+
+ .text
+ .thumb_func
+_tx_misra_ipsr_get:
+ MRS R0, IPSR
+ BX LR // return
+
+
+/***********************************************************************************************/
+/***********************************************************************************************/
+/** */
+/** ULONG _tx_misra_control_get(void); */
+/** */
+/***********************************************************************************************/
+/***********************************************************************************************/
+
+ .text
+ .thumb_func
+_tx_misra_control_get:
+ MRS R0, CONTROL
+ BX LR // return
+
+
+/***********************************************************************************************/
+/***********************************************************************************************/
+/** */
+/** void _tx_misra_control_set(ULONG value); */
+/** */
+/***********************************************************************************************/
+/***********************************************************************************************/
+
+ .text
+ .thumb_func
+_tx_misra_control_set:
+ MSR CONTROL, R0
+ BX LR // return
+
+
+#ifdef __ARM_FP
+
+/***********************************************************************************************/
+/***********************************************************************************************/
+/** */
+/** ULONG _tx_misra_fpccr_get(void); */
+/** */
+/***********************************************************************************************/
+/***********************************************************************************************/
+
+ .text
+ .thumb_func
+_tx_misra_fpccr_get:
+ LDR r0, =0xE000EF34 // Build FPCCR address
+ LDR r0, [r0] // Load FPCCR value
+ BX LR // return
+
+
+/***********************************************************************************************/
+/***********************************************************************************************/
+/** */
+/** void _tx_misra_vfp_touch(void); */
+/** */
+/***********************************************************************************************/
+/***********************************************************************************************/
+
+ .text
+ .thumb_func
+_tx_misra_vfp_touch:
+ vmov.f32 s0, s0
+ BX LR // return
+
+#endif
+
+
+ .data
+ .word 0
diff --git a/ports/cortex_m3/ac6/src/tx_thread_schedule.S b/ports/cortex_m3/ac6/src/tx_thread_schedule.S
index 7f43ef4e..2dd3922d 100644
--- a/ports/cortex_m3/ac6/src/tx_thread_schedule.S
+++ b/ports/cortex_m3/ac6/src/tx_thread_schedule.S
@@ -39,7 +39,7 @@
/* FUNCTION RELEASE */
/* */
/* _tx_thread_schedule Cortex-M3/AC6 */
-/* 6.1.7 */
+/* 6.1.11 */
/* AUTHOR */
/* */
/* Scott Larson, Microsoft Corporation */
@@ -66,13 +66,14 @@
/* */
/* _tx_initialize_kernel_enter ThreadX entry function */
/* _tx_thread_system_return Return to system from thread */
-/* _tx_thread_context_restore Restore thread's context */
/* */
/* RELEASE HISTORY */
/* */
/* DATE NAME DESCRIPTION */
/* */
/* 06-02-2021 Scott Larson Initial Version 6.1.7 */
+/* 04-25-2022 Scott Larson Added BASEPRI support, */
+/* resulting in version 6.1.11 */
/* */
/**************************************************************************/
// VOID _tx_thread_schedule(VOID)
@@ -132,12 +133,22 @@ __tx_ts_handler:
#if (defined(TX_ENABLE_EXECUTION_CHANGE_NOTIFY) || defined(TX_EXECUTION_PROFILE_ENABLE))
/* Call the thread exit function to indicate the thread is no longer executing. */
+#ifdef TX_PORT_USE_BASEPRI
+ LDR r1, =TX_PORT_BASEPRI // Mask interrupt priorities =< TX_PORT_BASEPRI
+ MSR BASEPRI, r1
+#else
CPSID i // Disable interrupts
+#endif /* TX_PORT_USE_BASEPRI */
PUSH {r0, lr} // Save LR (and r0 just for alignment)
BL _tx_execution_thread_exit // Call the thread exit function
POP {r0, lr} // Recover LR
+#ifdef TX_PORT_USE_BASEPRI
+ MOV r0, 0 // Disable BASEPRI masking (enable interrupts)
+ MSR BASEPRI, r0
+#else
CPSIE i // Enable interrupts
-#endif
+#endif /* TX_PORT_USE_BASEPRI */
+#endif /* EXECUTION PROFILE */
LDR r0, =_tx_thread_current_ptr // Build current thread pointer address
LDR r2, =_tx_thread_execute_ptr // Build execute thread pointer address
@@ -182,14 +193,24 @@ __tx_ts_new:
/* Now we are looking for a new thread to execute! */
+#ifdef TX_PORT_USE_BASEPRI
+ LDR r1, =TX_PORT_BASEPRI // Mask interrupt priorities =< TX_PORT_BASEPRI
+ MSR BASEPRI, r1
+#else
CPSID i // Disable interrupts
+#endif
LDR r1, [r2] // Is there another thread ready to execute?
CBZ r1, __tx_ts_wait // No, skip to the wait processing
/* Yes, another thread is ready for else, make the current thread the new thread. */
STR r1, [r0] // Setup the current thread pointer to the new thread
+#ifdef TX_PORT_USE_BASEPRI
+ MOV r4, #0 // Disable BASEPRI masking (enable interrupts)
+ MSR BASEPRI, r4
+#else
CPSIE i // Enable interrupts
+#endif
/* Increment the thread run count. */
@@ -233,7 +254,12 @@ _skip_vfp_restore:
are disabled to allow use of WFI for waiting for a thread to arrive. */
__tx_ts_wait:
+#ifdef TX_PORT_USE_BASEPRI
+ LDR r1, =TX_PORT_BASEPRI // Mask interrupt priorities =< TX_PORT_BASEPRI
+ MSR BASEPRI, r1
+#else
CPSID i // Disable interrupts
+#endif
LDR r1, [r2] // Pickup the next thread to execute pointer
STR r1, [r0] // Store it in the current pointer
CBNZ r1, __tx_ts_ready // If non-NULL, a new thread is ready!
@@ -256,7 +282,12 @@ __tx_ts_wait:
POP {r0-r3}
#endif
+#ifdef TX_PORT_USE_BASEPRI
+ MOV r4, #0 // Disable BASEPRI masking (enable interrupts)
+ MSR BASEPRI, r4
+#else
CPSIE i // Enable interrupts
+#endif
B __tx_ts_wait // Loop to continue waiting
/* At this point, we have a new thread ready to go. Clear any newly pended PendSV - since we are
@@ -268,8 +299,12 @@ __tx_ts_ready:
STR r7, [r8, #0xD04] // Clear any PendSV
/* Re-enable interrupts and restore new thread. */
-
+#ifdef TX_PORT_USE_BASEPRI
+ MOV r4, #0 // Disable BASEPRI masking (enable interrupts)
+ MSR BASEPRI, r4
+#else
CPSIE i // Enable interrupts
+#endif
B __tx_ts_restore // Restore the thread
// }
diff --git a/ports/cortex_m3/gnu/inc/tx_port.h b/ports/cortex_m3/gnu/inc/tx_port.h
index af6bade2..54a16b80 100644
--- a/ports/cortex_m3/gnu/inc/tx_port.h
+++ b/ports/cortex_m3/gnu/inc/tx_port.h
@@ -26,7 +26,7 @@
/* PORT SPECIFIC C INFORMATION RELEASE */
/* */
/* tx_port.h Cortex-M3/GNU */
-/* 6.1.10 */
+/* 6.1.11 */
/* */
/* AUTHOR */
/* */
@@ -56,6 +56,9 @@
/* violation, */
/* fixed predefined macro, */
/* resulting in version 6.1.10 */
+/* 04-25-2022 Scott Larson Modified comments and added */
+/* volatile to registers, */
+/* resulting in version 6.1.11 */
/* */
/**************************************************************************/
@@ -154,14 +157,14 @@ typedef unsigned short USHORT;
For example, if the time source is at the address 0x0a800024 and is 16-bits in size, the clock
source constants would be:
-#define TX_TRACE_TIME_SOURCE *((ULONG *) 0x0a800024)
+#define TX_TRACE_TIME_SOURCE *((volatile ULONG *) 0x0a800024)
#define TX_TRACE_TIME_MASK 0x0000FFFFUL
*/
#ifndef TX_MISRA_ENABLE
#ifndef TX_TRACE_TIME_SOURCE
-#define TX_TRACE_TIME_SOURCE *((ULONG *) 0xE0001004)
+#define TX_TRACE_TIME_SOURCE *((volatile ULONG *) 0xE0001004)
#endif
#else
ULONG _tx_misra_time_stamp_get(VOID);
@@ -378,7 +381,7 @@ void _tx_vfp_access(void);
else \
{ \
ULONG _tx_fpccr; \
- _tx_fpccr = *((ULONG *) 0xE000EF34); \
+ _tx_fpccr = *((volatile ULONG *) 0xE000EF34); \
_tx_fpccr = _tx_fpccr & ((ULONG) 0x01); \
if (_tx_fpccr == ((ULONG) 0x01)) \
{ \
@@ -582,7 +585,7 @@ __attribute__( ( always_inline ) ) static inline void _tx_thread_system_return_i
unsigned int interrupt_save;
/* Set PendSV to invoke ThreadX scheduler. */
- *((ULONG *) 0xE000ED04) = ((ULONG) 0x10000000);
+ *((volatile ULONG *) 0xE000ED04) = ((ULONG) 0x10000000);
if (__get_ipsr_value() == 0)
{
interrupt_save = __get_interrupt_posture();
@@ -651,7 +654,7 @@ static void _tx_thread_system_return_inline(void)
unsigned int interrupt_save;
/* Set PendSV to invoke ThreadX scheduler. */
- *((ULONG *) 0xE000ED04) = ((ULONG) 0x10000000);
+ *((volatile ULONG *) 0xE000ED04) = ((ULONG) 0x10000000);
if (_ipsr == 0)
{
#ifdef TX_PORT_USE_BASEPRI
@@ -704,7 +707,7 @@ void tx_thread_fpu_disable(void);
#ifdef TX_THREAD_INIT
CHAR _tx_version_id[] =
- "Copyright (c) Microsoft Corporation. All rights reserved. * ThreadX Cortex-M3/GNU Version 6.1.9 *";
+ "Copyright (c) Microsoft Corporation. All rights reserved. * ThreadX Cortex-M3/GNU Version 6.1.11 *";
#else
#ifdef TX_MISRA_ENABLE
extern CHAR _tx_version_id[100];
diff --git a/ports/cortex_m3/gnu/src/tx_misra.S b/ports/cortex_m3/gnu/src/tx_misra.S
new file mode 100644
index 00000000..b03fdcd0
--- /dev/null
+++ b/ports/cortex_m3/gnu/src/tx_misra.S
@@ -0,0 +1,1033 @@
+/**************************************************************************/
+/* */
+/* Copyright (c) Microsoft Corporation. All rights reserved. */
+/* */
+/* This software is licensed under the Microsoft Software License */
+/* Terms for Microsoft Azure RTOS. Full text of the license can be */
+/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
+/* and in the root directory of this software. */
+/* */
+/**************************************************************************/
+
+
+/**************************************************************************/
+/**************************************************************************/
+/** */
+/** ThreadX Component */
+/** */
+/** ThreadX MISRA Compliance */
+/** */
+/**************************************************************************/
+/**************************************************************************/
+
+ #define SHT_PROGBITS 0x1
+
+ .global __aeabi_memset
+ .global _tx_thread_current_ptr
+ .global _tx_thread_interrupt_disable
+ .global _tx_thread_interrupt_restore
+ .global _tx_thread_stack_analyze
+ .global _tx_thread_stack_error_handler
+ .global _tx_thread_system_state
+#ifdef TX_ENABLE_EVENT_TRACE
+ .global _tx_trace_buffer_current_ptr
+ .global _tx_trace_buffer_end_ptr
+ .global _tx_trace_buffer_start_ptr
+ .global _tx_trace_event_enable_bits
+ .global _tx_trace_full_notify_function
+ .global _tx_trace_header_ptr
+#endif
+
+ .global _tx_misra_always_true
+ .global _tx_misra_block_pool_to_uchar_pointer_convert
+ .global _tx_misra_byte_pool_to_uchar_pointer_convert
+ .global _tx_misra_char_to_uchar_pointer_convert
+ .global _tx_misra_const_char_to_char_pointer_convert
+#ifdef TX_ENABLE_EVENT_TRACE
+ .global _tx_misra_entry_to_uchar_pointer_convert
+#endif
+ .global _tx_misra_indirect_void_to_uchar_pointer_convert
+ .global _tx_misra_memset
+ .global _tx_misra_message_copy
+#ifdef TX_ENABLE_EVENT_TRACE
+ .global _tx_misra_object_to_uchar_pointer_convert
+#endif
+ .global _tx_misra_pointer_to_ulong_convert
+ .global _tx_misra_status_get
+ .global _tx_misra_thread_stack_check
+#ifdef TX_ENABLE_EVENT_TRACE
+ .global _tx_misra_time_stamp_get
+#endif
+ .global _tx_misra_timer_indirect_to_void_pointer_convert
+ .global _tx_misra_timer_pointer_add
+ .global _tx_misra_timer_pointer_dif
+#ifdef TX_ENABLE_EVENT_TRACE
+ .global _tx_misra_trace_event_insert
+#endif
+ .global _tx_misra_uchar_pointer_add
+ .global _tx_misra_uchar_pointer_dif
+ .global _tx_misra_uchar_pointer_sub
+ .global _tx_misra_uchar_to_align_type_pointer_convert
+ .global _tx_misra_uchar_to_block_pool_pointer_convert
+#ifdef TX_ENABLE_EVENT_TRACE
+ .global _tx_misra_uchar_to_entry_pointer_convert
+ .global _tx_misra_uchar_to_header_pointer_convert
+#endif
+ .global _tx_misra_uchar_to_indirect_byte_pool_pointer_convert
+ .global _tx_misra_uchar_to_indirect_uchar_pointer_convert
+#ifdef TX_ENABLE_EVENT_TRACE
+ .global _tx_misra_uchar_to_object_pointer_convert
+#endif
+ .global _tx_misra_uchar_to_void_pointer_convert
+ .global _tx_misra_ulong_pointer_add
+ .global _tx_misra_ulong_pointer_dif
+ .global _tx_misra_ulong_pointer_sub
+ .global _tx_misra_ulong_to_pointer_convert
+ .global _tx_misra_ulong_to_thread_pointer_convert
+ .global _tx_misra_user_timer_pointer_get
+ .global _tx_misra_void_to_block_pool_pointer_convert
+ .global _tx_misra_void_to_byte_pool_pointer_convert
+ .global _tx_misra_void_to_event_flags_pointer_convert
+ .global _tx_misra_void_to_indirect_uchar_pointer_convert
+ .global _tx_misra_void_to_mutex_pointer_convert
+ .global _tx_misra_void_to_queue_pointer_convert
+ .global _tx_misra_void_to_semaphore_pointer_convert
+ .global _tx_misra_void_to_thread_pointer_convert
+ .global _tx_misra_void_to_uchar_pointer_convert
+ .global _tx_misra_void_to_ulong_pointer_convert
+ .global _tx_misra_ipsr_get
+ .global _tx_misra_control_get
+ .global _tx_misra_control_set
+#ifdef __ARM_FP
+ .global _tx_misra_fpccr_get
+ .global _tx_misra_vfp_touch
+#endif
+
+
+/**************************************************************************/
+/**************************************************************************/
+/** */
+/** VOID _tx_misra_memset(VOID *ptr, UINT value, UINT size); */
+/** */
+/**************************************************************************/
+/**************************************************************************/
+
+ .text
+ .align 4
+ .syntax unified
+ .thumb_func
+_tx_misra_memset:
+ PUSH {R4,LR}
+ MOVS R4,R0
+ MOVS R0,R2
+ MOVS R2,R1
+ MOVS R1,R0
+ MOVS R0,R4
+ BL __aeabi_memset
+ POP {R4,PC} // return
+
+/**************************************************************************/
+/**************************************************************************/
+/** */
+/** UCHAR *_tx_misra_uchar_pointer_add(UCHAR *ptr, ULONG amount); */
+/** */
+/**************************************************************************/
+/**************************************************************************/
+
+ .text
+ .thumb_func
+_tx_misra_uchar_pointer_add:
+ ADD R0,R0,R1
+ BX LR // return
+
+
+/**************************************************************************/
+/**************************************************************************/
+/** */
+/** UCHAR *_tx_misra_uchar_pointer_sub(UCHAR *ptr, ULONG amount); */
+/** */
+/**************************************************************************/
+/**************************************************************************/
+
+ .text
+ .thumb_func
+_tx_misra_uchar_pointer_sub:
+ RSBS R1,R1,#+0
+ ADD R0,R0,R1
+ BX LR // return
+
+
+/**************************************************************************/
+/**************************************************************************/
+/** */
+/** ULONG _tx_misra_uchar_pointer_dif(UCHAR *ptr1, UCHAR *ptr2); */
+/** */
+/**************************************************************************/
+/**************************************************************************/
+
+ .text
+ .thumb_func
+_tx_misra_uchar_pointer_dif:
+ SUBS R0,R0,R1
+ BX LR // return
+
+
+/**************************************************************************/
+/**************************************************************************/
+/** */
+/** ULONG _tx_misra_pointer_to_ulong_convert(VOID *ptr); */
+/** */
+/**************************************************************************/
+/**************************************************************************/
+
+ .text
+ .thumb_func
+_tx_misra_pointer_to_ulong_convert:
+ BX LR // return
+
+
+/**************************************************************************/
+/**************************************************************************/
+/** */
+/** ULONG *_tx_misra_ulong_pointer_add(ULONG *ptr, ULONG amount); */
+/** */
+/**************************************************************************/
+/**************************************************************************/
+
+ .text
+ .thumb_func
+_tx_misra_ulong_pointer_add:
+ ADD R0,R0,R1, LSL #+2
+ BX LR // return
+
+
+/**************************************************************************/
+/**************************************************************************/
+/** */
+/** ULONG *_tx_misra_ulong_pointer_sub(ULONG *ptr, ULONG amount); */
+/** */
+/**************************************************************************/
+/**************************************************************************/
+
+ .text
+ .thumb_func
+_tx_misra_ulong_pointer_sub:
+ MVNS R2,#+3
+ MULS R1,R2,R1
+ ADD R0,R0,R1
+ BX LR // return
+
+
+/**************************************************************************/
+/**************************************************************************/
+/** */
+/** ULONG _tx_misra_ulong_pointer_dif(ULONG *ptr1, ULONG *ptr2); */
+/** */
+/**************************************************************************/
+/**************************************************************************/
+
+ .text
+ .thumb_func
+_tx_misra_ulong_pointer_dif:
+ SUBS R0,R0,R1
+ ASRS R0,R0,#+2
+ BX LR // return
+
+
+/**************************************************************************/
+/**************************************************************************/
+/** */
+/** VOID *_tx_misra_ulong_to_pointer_convert(ULONG input); */
+/** */
+/**************************************************************************/
+/**************************************************************************/
+
+ .text
+ .thumb_func
+_tx_misra_ulong_to_pointer_convert:
+ BX LR // return
+
+
+/**************************************************************************/
+/**************************************************************************/
+/** */
+/** VOID _tx_misra_message_copy(ULONG **source, ULONG **destination, */
+/** UINT size); */
+/** */
+/**************************************************************************/
+/**************************************************************************/
+
+ .text
+ .thumb_func
+_tx_misra_message_copy:
+ PUSH {R4,R5}
+ LDR R3,[R0, #+0]
+ LDR R4,[R1, #+0]
+ LDR R5,[R3, #+0]
+ STR R5,[R4, #+0]
+ ADDS R4,R4,#+4
+ ADDS R3,R3,#+4
+ CMP R2,#+2
+ BCC.N _tx_misra_message_copy_0
+ SUBS R2,R2,#+1
+ B.N _tx_misra_message_copy_1
+_tx_misra_message_copy_2:
+ LDR R5,[R3, #+0]
+ STR R5,[R4, #+0]
+ ADDS R4,R4,#+4
+ ADDS R3,R3,#+4
+ SUBS R2,R2,#+1
+_tx_misra_message_copy_1:
+ CMP R2,#+0
+ BNE.N _tx_misra_message_copy_2
+_tx_misra_message_copy_0:
+ STR R3,[R0, #+0]
+ STR R4,[R1, #+0]
+ POP {R4,R5}
+ BX LR // return
+
+
+/**************************************************************************/
+/**************************************************************************/
+/** */
+/** ULONG _tx_misra_timer_pointer_dif(TX_TIMER_INTERNAL **ptr1, */
+/** TX_TIMER_INTERNAL **ptr2); */
+/** */
+/**************************************************************************/
+/**************************************************************************/
+
+ .text
+ .thumb_func
+_tx_misra_timer_pointer_dif:
+ SUBS R0,R0,R1
+ ASRS R0,R0,#+2
+ BX LR // return
+
+
+/**************************************************************************/
+/**************************************************************************/
+/** */
+/** TX_TIMER_INTERNAL **_tx_misra_timer_pointer_add(TX_TIMER_INTERNAL */
+/** **ptr1, ULONG size); */
+/** */
+/**************************************************************************/
+/**************************************************************************/
+
+ .text
+ .thumb_func
+_tx_misra_timer_pointer_add:
+ ADD R0,R0,R1, LSL #+2
+ BX LR // return
+
+
+/**************************************************************************/
+/**************************************************************************/
+/** */
+/** VOID _tx_misra_user_timer_pointer_get(TX_TIMER_INTERNAL */
+/** *internal_timer, TX_TIMER **user_timer); */
+/** */
+/**************************************************************************/
+/**************************************************************************/
+
+ .text
+ .thumb_func
+_tx_misra_user_timer_pointer_get:
+ ADDS R2,R0,#+8
+ SUBS R2,R2,R0
+ RSBS R2,R2,#+0
+ ADD R0,R0,R2
+ STR R0,[R1, #+0]
+ BX LR // return
+
+
+/**************************************************************************/
+/**************************************************************************/
+/** */
+/** VOID _tx_misra_thread_stack_check(TX_THREAD *thread_ptr, */
+/** VOID **highest_stack); */
+/** */
+/**************************************************************************/
+/**************************************************************************/
+
+ .text
+ .thumb_func
+_tx_misra_thread_stack_check:
+ PUSH {R3-R5,LR}
+ MOVS R4,R0
+ MOVS R5,R1
+ BL _tx_thread_interrupt_disable
+ CMP R4,#+0
+ BEQ.N _tx_misra_thread_stack_check_0
+ LDR R1,[R4, #+0]
+ LDR R2,=0x54485244
+ CMP R1,R2
+ BNE.N _tx_misra_thread_stack_check_0
+ LDR R1,[R4, #+8]
+ LDR R2,[R5, #+0]
+ CMP R1,R2
+ BCS.N _tx_misra_thread_stack_check_1
+ LDR R1,[R4, #+8]
+ STR R1,[R5, #+0]
+_tx_misra_thread_stack_check_1:
+ LDR R1,[R4, #+12]
+ LDR R1,[R1, #+0]
+ CMP R1,#-269488145
+ BNE.N _tx_misra_thread_stack_check_2
+ LDR R1,[R4, #+16]
+ LDR R1,[R1, #+1]
+ CMP R1,#-269488145
+ BNE.N _tx_misra_thread_stack_check_2
+ LDR R1,[R5, #+0]
+ LDR R2,[R4, #+12]
+ CMP R1,R2
+ BCS.N _tx_misra_thread_stack_check_3
+_tx_misra_thread_stack_check_2:
+ BL _tx_thread_interrupt_restore
+ MOVS R0,R4
+ BL _tx_thread_stack_error_handler
+ BL _tx_thread_interrupt_disable
+_tx_misra_thread_stack_check_3:
+ LDR R1,[R5, #+0]
+ LDR R1,[R1, #-4]
+ CMP R1,#-269488145
+ BEQ.N _tx_misra_thread_stack_check_0
+ BL _tx_thread_interrupt_restore
+ MOVS R0,R4
+ BL _tx_thread_stack_analyze
+ BL _tx_thread_interrupt_disable
+_tx_misra_thread_stack_check_0:
+ BL _tx_thread_interrupt_restore
+ POP {R0,R4,R5,PC} // return
+
+#ifdef TX_ENABLE_EVENT_TRACE
+
+/**************************************************************************/
+/**************************************************************************/
+/** */
+/** VOID _tx_misra_trace_event_insert(ULONG event_id, */
+/** VOID *info_field_1, ULONG info_field_2, ULONG info_field_3, */
+/** ULONG info_field_4, ULONG filter, ULONG time_stamp); */
+/** */
+/**************************************************************************/
+/**************************************************************************/
+
+ .text
+ .thumb_func
+_tx_misra_trace_event_insert:
+ PUSH {R3-R7,LR}
+ LDR.N R4,DataTable2_1
+ LDR R4,[R4, #+0]
+ CMP R4,#+0
+ BEQ.N _tx_misra_trace_event_insert_0
+ LDR.N R5,DataTable2_2
+ LDR R5,[R5, #+0]
+ LDR R6,[SP, #+28]
+ TST R5,R6
+ BEQ.N _tx_misra_trace_event_insert_0
+ LDR.N R5,DataTable2_3
+ LDR R5,[R5, #+0]
+ LDR.N R6,DataTable2_4
+ LDR R6,[R6, #+0]
+ CMP R5,#+0
+ BNE.N _tx_misra_trace_event_insert_1
+ LDR R5,[R6, #+44]
+ LDR R7,[R6, #+60]
+ LSLS R7,R7,#+16
+ ORRS R7,R7,#0x80000000
+ ORRS R5,R7,R5
+ B.N _tx_misra_trace_event_insert_2
+_tx_misra_trace_event_insert_1:
+ CMP R5,#-252645136
+ BCS.N _tx_misra_trace_event_insert_3
+ MOVS R5,R6
+ MOVS R6,#-1
+ B.N _tx_misra_trace_event_insert_2
+_tx_misra_trace_event_insert_3:
+ MOVS R6,#-252645136
+ MOVS R5,#+0
+_tx_misra_trace_event_insert_2:
+ STR R6,[R4, #+0]
+ STR R5,[R4, #+4]
+ STR R0,[R4, #+8]
+ LDR R0,[SP, #+32]
+ STR R0,[R4, #+12]
+ STR R1,[R4, #+16]
+ STR R2,[R4, #+20]
+ STR R3,[R4, #+24]
+ LDR R0,[SP, #+24]
+ STR R0,[R4, #+28]
+ ADDS R4,R4,#+32
+ LDR.N R0,DataTable2_5
+ LDR R0,[R0, #+0]
+ CMP R4,R0
+ BCC.N _tx_misra_trace_event_insert_4
+ LDR.N R0,DataTable2_6
+ LDR R4,[R0, #+0]
+ LDR.N R0,DataTable2_1
+ STR R4,[R0, #+0]
+ LDR.N R0,DataTable2_7
+ LDR R0,[R0, #+0]
+ STR R4,[R0, #+32]
+ LDR.N R0,DataTable2_8
+ LDR R0,[R0, #+0]
+ CMP R0,#+0
+ BEQ.N _tx_misra_trace_event_insert_0
+ LDR.N R0,DataTable2_7
+ LDR R0,[R0, #+0]
+ LDR.N R1,DataTable2_8
+ LDR R1,[R1, #+0]
+ BLX R1
+ B.N _tx_misra_trace_event_insert_0
+_tx_misra_trace_event_insert_4:
+ LDR.N R0,DataTable2_1
+ STR R4,[R0, #+0]
+ LDR.N R0,DataTable2_7
+ LDR R0,[R0, #+0]
+ STR R4,[R0, #+32]
+_tx_misra_trace_event_insert_0:
+ POP {R0,R4-R7,PC} // return
+
+
+ .data
+DataTable2_1:
+ .word _tx_trace_buffer_current_ptr
+
+ .data
+DataTable2_2:
+ .word _tx_trace_event_enable_bits
+
+ .data
+DataTable2_5:
+ .word _tx_trace_buffer_end_ptr
+
+ .data
+DataTable2_6:
+ .word _tx_trace_buffer_start_ptr
+
+ .data
+DataTable2_7:
+ .word _tx_trace_header_ptr
+
+ .data
+DataTable2_8:
+ .word _tx_trace_full_notify_function
+
+
+/**************************************************************************/
+/**************************************************************************/
+/** */
+/** ULONG _tx_misra_time_stamp_get(VOID); */
+/** */
+/**************************************************************************/
+/**************************************************************************/
+
+ .text
+ .thumb_func
+_tx_misra_time_stamp_get:
+ MOVS R0,#+0
+ BX LR // return
+
+#endif
+
+ .data
+DataTable2_3:
+ .word _tx_thread_system_state
+
+ .data
+DataTable2_4:
+ .word _tx_thread_current_ptr
+
+
+/**************************************************************************/
+/**************************************************************************/
+/** */
+/** UINT _tx_misra_always_true(void); */
+/** */
+/**************************************************************************/
+/**************************************************************************/
+
+ .text
+ .thumb_func
+_tx_misra_always_true:
+ MOVS R0,#+1
+ BX LR // return
+
+
+/******************************************************************************************/
+/******************************************************************************************/
+/** */
+/** UCHAR **_tx_misra_indirect_void_to_uchar_pointer_convert(VOID **return_ptr); */
+/** */
+/******************************************************************************************/
+/******************************************************************************************/
+
+ .text
+ .thumb_func
+_tx_misra_indirect_void_to_uchar_pointer_convert:
+ BX LR // return
+
+
+/***************************************************************************************/
+/***************************************************************************************/
+/** */
+/** UCHAR **_tx_misra_uchar_to_indirect_uchar_pointer_convert(UCHAR *pointer); */
+/** */
+/***************************************************************************************/
+/***************************************************************************************/
+
+ .text
+ .thumb_func
+_tx_misra_uchar_to_indirect_uchar_pointer_convert:
+ BX LR // return
+
+
+/***********************************************************************************/
+/***********************************************************************************/
+/** */
+/** UCHAR *_tx_misra_block_pool_to_uchar_pointer_convert(TX_BLOCK_POOL *pool); */
+/** */
+/***********************************************************************************/
+/***********************************************************************************/
+
+ .text
+ .thumb_func
+_tx_misra_block_pool_to_uchar_pointer_convert:
+ BX LR // return
+
+
+/******************************************************************************************/
+/******************************************************************************************/
+/** */
+/** TX_BLOCK_POOL *_tx_misra_void_to_block_pool_pointer_convert(VOID *pointer); */
+/** */
+/******************************************************************************************/
+/******************************************************************************************/
+
+ .text
+ .thumb_func
+_tx_misra_void_to_block_pool_pointer_convert:
+ BX LR // return
+
+
+/*****************************************************************************/
+/*****************************************************************************/
+/** */
+/** UCHAR *_tx_misra_void_to_uchar_pointer_convert(VOID *pointer); */
+/** */
+/*****************************************************************************/
+/*****************************************************************************/
+
+ .text
+ .thumb_func
+_tx_misra_void_to_uchar_pointer_convert:
+ BX LR // return
+
+
+/************************************************************************************/
+/************************************************************************************/
+/** */
+/** TX_BLOCK_POOL *_tx_misra_uchar_to_block_pool_pointer_convert(UCHAR *pointer); */
+/** */
+/************************************************************************************/
+/************************************************************************************/
+
+ .text
+ .thumb_func
+_tx_misra_uchar_to_block_pool_pointer_convert:
+ BX LR // return
+
+
+/**************************************************************************************/
+/**************************************************************************************/
+/** */
+/** UCHAR **_tx_misra_void_to_indirect_uchar_pointer_convert(VOID *pointer); */
+/** */
+/**************************************************************************************/
+/**************************************************************************************/
+
+ .text
+ .thumb_func
+_tx_misra_void_to_indirect_uchar_pointer_convert:
+ BX LR // return
+
+
+/*****************************************************************************************/
+/*****************************************************************************************/
+/** */
+/** TX_BYTE_POOL *_tx_misra_void_to_byte_pool_pointer_convert(VOID *pointer); */
+/** */
+/*****************************************************************************************/
+/*****************************************************************************************/
+
+ .text
+ .thumb_func
+_tx_misra_void_to_byte_pool_pointer_convert:
+ BX LR // return
+
+
+/***************************************************************************************/
+/***************************************************************************************/
+/** */
+/** UCHAR *_tx_misra_byte_pool_to_uchar_pointer_convert(TX_BYTE_POOL *pool); */
+/** */
+/***************************************************************************************/
+/***************************************************************************************/
+
+ .text
+ .thumb_func
+_tx_misra_byte_pool_to_uchar_pointer_convert:
+ BX LR // return
+
+
+/*****************************************************************************************/
+/*****************************************************************************************/
+/** */
+/** ALIGN_TYPE *_tx_misra_uchar_to_align_type_pointer_convert(UCHAR *pointer); */
+/** */
+/*****************************************************************************************/
+/*****************************************************************************************/
+
+ .text
+ .thumb_func
+_tx_misra_uchar_to_align_type_pointer_convert:
+ BX LR // return
+
+
+/****************************************************************************************************/
+/****************************************************************************************************/
+/** */
+/** TX_BYTE_POOL **_tx_misra_uchar_to_indirect_byte_pool_pointer_convert(UCHAR *pointer); */
+/** */
+/****************************************************************************************************/
+/****************************************************************************************************/
+
+ .text
+ .thumb_func
+_tx_misra_uchar_to_indirect_byte_pool_pointer_convert:
+ BX LR // return
+
+
+/**************************************************************************************************/
+/**************************************************************************************************/
+/** */
+/** TX_EVENT_FLAGS_GROUP *_tx_misra_void_to_event_flags_pointer_convert(VOID *pointer); */
+/** */
+/**************************************************************************************************/
+/**************************************************************************************************/
+
+ .text
+ .thumb_func
+_tx_misra_void_to_event_flags_pointer_convert:
+ BX LR // return
+
+
+/*****************************************************************************/
+/*****************************************************************************/
+/** */
+/** ULONG *_tx_misra_void_to_ulong_pointer_convert(VOID *pointer); */
+/** */
+/*****************************************************************************/
+/*****************************************************************************/
+
+ .text
+ .thumb_func
+_tx_misra_void_to_ulong_pointer_convert:
+ BX LR // return
+
+
+/********************************************************************************/
+/********************************************************************************/
+/** */
+/** TX_MUTEX *_tx_misra_void_to_mutex_pointer_convert(VOID *pointer); */
+/** */
+/********************************************************************************/
+/********************************************************************************/
+
+ .text
+ .thumb_func
+_tx_misra_void_to_mutex_pointer_convert:
+ BX LR // return
+
+
+/**************************************************************************/
+/**************************************************************************/
+/** */
+/** UINT _tx_misra_status_get(UINT status); */
+/** */
+/**************************************************************************/
+/**************************************************************************/
+
+ .text
+ .thumb_func
+_tx_misra_status_get:
+ MOVS R0,#+0
+ BX LR // return
+
+
+/********************************************************************************/
+/********************************************************************************/
+/** */
+/** TX_QUEUE *_tx_misra_void_to_queue_pointer_convert(VOID *pointer); */
+/** */
+/********************************************************************************/
+/********************************************************************************/
+
+ .text
+ .thumb_func
+_tx_misra_void_to_queue_pointer_convert:
+ BX LR // return
+
+
+/****************************************************************************************/
+/****************************************************************************************/
+/** */
+/** TX_SEMAPHORE *_tx_misra_void_to_semaphore_pointer_convert(VOID *pointer); */
+/** */
+/****************************************************************************************/
+/****************************************************************************************/
+
+ .text
+ .thumb_func
+_tx_misra_void_to_semaphore_pointer_convert:
+ BX LR // return
+
+
+/**************************************************************************/
+/**************************************************************************/
+/** */
+/** VOID *_tx_misra_uchar_to_void_pointer_convert(UCHAR *pointer); */
+/** */
+/**************************************************************************/
+/**************************************************************************/
+
+ .text
+ .thumb_func
+_tx_misra_uchar_to_void_pointer_convert:
+ BX LR // return
+
+
+/*********************************************************************************/
+/*********************************************************************************/
+/** */
+/** TX_THREAD *_tx_misra_ulong_to_thread_pointer_convert(ULONG value); */
+/** */
+/*********************************************************************************/
+/*********************************************************************************/
+
+ .text
+ .thumb_func
+_tx_misra_ulong_to_thread_pointer_convert:
+ BX LR // return
+
+
+/***************************************************************************************************/
+/***************************************************************************************************/
+/** */
+/** VOID *_tx_misra_timer_indirect_to_void_pointer_convert(TX_TIMER_INTERNAL **pointer); */
+/** */
+/***************************************************************************************************/
+/***************************************************************************************************/
+
+ .text
+ .thumb_func
+_tx_misra_timer_indirect_to_void_pointer_convert:
+ BX LR // return
+
+
+/***************************************************************************************/
+/***************************************************************************************/
+/** */
+/** CHAR *_tx_misra_const_char_to_char_pointer_convert(const char *pointer); */
+/** */
+/***************************************************************************************/
+/***************************************************************************************/
+
+ .text
+ .thumb_func
+_tx_misra_const_char_to_char_pointer_convert:
+ BX LR // return
+
+
+/**********************************************************************************/
+/**********************************************************************************/
+/** */
+/** TX_THREAD *_tx_misra_void_to_thread_pointer_convert(void *pointer); */
+/** */
+/**********************************************************************************/
+/**********************************************************************************/
+
+ .text
+ .thumb_func
+_tx_misra_void_to_thread_pointer_convert:
+ BX LR // return
+
+
+#ifdef TX_ENABLE_EVENT_TRACE
+
+/************************************************************************************************/
+/************************************************************************************************/
+/** */
+/** UCHAR *_tx_misra_object_to_uchar_pointer_convert(TX_TRACE_OBJECT_ENTRY *pointer); */
+/** */
+/************************************************************************************************/
+/************************************************************************************************/
+
+ .text
+ .thumb_func
+_tx_misra_object_to_uchar_pointer_convert:
+ BX LR // return
+
+
+/************************************************************************************************/
+/************************************************************************************************/
+/** */
+/** TX_TRACE_OBJECT_ENTRY *_tx_misra_uchar_to_object_pointer_convert(UCHAR *pointer); */
+/** */
+/************************************************************************************************/
+/************************************************************************************************/
+
+ .text
+ .thumb_func
+_tx_misra_uchar_to_object_pointer_convert:
+ BX LR // return
+
+
+/******************************************************************************************/
+/******************************************************************************************/
+/** */
+/** TX_TRACE_HEADER *_tx_misra_uchar_to_header_pointer_convert(UCHAR *pointer); */
+/** */
+/******************************************************************************************/
+/******************************************************************************************/
+
+ .text
+ .thumb_func
+_tx_misra_uchar_to_header_pointer_convert:
+ BX LR // return
+
+
+/***********************************************************************************************/
+/***********************************************************************************************/
+/** */
+/** TX_TRACE_BUFFER_ENTRY *_tx_misra_uchar_to_entry_pointer_convert(UCHAR *pointer); */
+/** */
+/***********************************************************************************************/
+/***********************************************************************************************/
+
+ .text
+ .thumb_func
+_tx_misra_uchar_to_entry_pointer_convert:
+ BX LR // return
+
+
+/***********************************************************************************************/
+/***********************************************************************************************/
+/** */
+/** UCHAR *_tx_misra_entry_to_uchar_pointer_convert(TX_TRACE_BUFFER_ENTRY *pointer); */
+/** */
+/***********************************************************************************************/
+/***********************************************************************************************/
+
+ .text
+ .thumb_func
+_tx_misra_entry_to_uchar_pointer_convert:
+ BX LR // return
+#endif
+
+
+/***********************************************************************************************/
+/***********************************************************************************************/
+/** */
+/** UCHAR *_tx_misra_char_to_uchar_pointer_convert(CHAR *pointer); */
+/** */
+/***********************************************************************************************/
+/***********************************************************************************************/
+
+ .text
+ .thumb_func
+_tx_misra_char_to_uchar_pointer_convert:
+ BX LR // return
+
+
+/***********************************************************************************************/
+/***********************************************************************************************/
+/** */
+/** ULONG _tx_misra_ipsr_get(void); */
+/** */
+/***********************************************************************************************/
+/***********************************************************************************************/
+
+ .text
+ .thumb_func
+_tx_misra_ipsr_get:
+ MRS R0, IPSR
+ BX LR // return
+
+
+/***********************************************************************************************/
+/***********************************************************************************************/
+/** */
+/** ULONG _tx_misra_control_get(void); */
+/** */
+/***********************************************************************************************/
+/***********************************************************************************************/
+
+ .text
+ .thumb_func
+_tx_misra_control_get:
+ MRS R0, CONTROL
+ BX LR // return
+
+
+/***********************************************************************************************/
+/***********************************************************************************************/
+/** */
+/** void _tx_misra_control_set(ULONG value); */
+/** */
+/***********************************************************************************************/
+/***********************************************************************************************/
+
+ .text
+ .thumb_func
+_tx_misra_control_set:
+ MSR CONTROL, R0
+ BX LR // return
+
+
+#ifdef __ARM_FP
+
+/***********************************************************************************************/
+/***********************************************************************************************/
+/** */
+/** ULONG _tx_misra_fpccr_get(void); */
+/** */
+/***********************************************************************************************/
+/***********************************************************************************************/
+
+ .text
+ .thumb_func
+_tx_misra_fpccr_get:
+ LDR r0, =0xE000EF34 // Build FPCCR address
+ LDR r0, [r0] // Load FPCCR value
+ BX LR // return
+
+
+/***********************************************************************************************/
+/***********************************************************************************************/
+/** */
+/** void _tx_misra_vfp_touch(void); */
+/** */
+/***********************************************************************************************/
+/***********************************************************************************************/
+
+ .text
+ .thumb_func
+_tx_misra_vfp_touch:
+ vmov.f32 s0, s0
+ BX LR // return
+
+#endif
+
+
+ .data
+ .word 0
diff --git a/ports/cortex_m3/gnu/src/tx_thread_schedule.S b/ports/cortex_m3/gnu/src/tx_thread_schedule.S
index e5b425fb..5c1f4767 100644
--- a/ports/cortex_m3/gnu/src/tx_thread_schedule.S
+++ b/ports/cortex_m3/gnu/src/tx_thread_schedule.S
@@ -37,7 +37,7 @@
/* FUNCTION RELEASE */
/* */
/* _tx_thread_schedule Cortex-M3/GNU */
-/* 6.1.10 */
+/* 6.1.11 */
/* AUTHOR */
/* */
/* Scott Larson, Microsoft Corporation */
@@ -72,6 +72,8 @@
/* 06-02-2021 Scott Larson Initial Version 6.1.7 */
/* 01-31-2022 Scott Larson Fixed predefined macro name, */
/* resulting in version 6.1.10 */
+/* 04-25-2022 Scott Larson Added BASEPRI support, */
+/* resulting in version 6.1.11 */
/* */
/**************************************************************************/
// VOID _tx_thread_schedule(VOID)
@@ -131,12 +133,22 @@ __tx_ts_handler:
#if (defined(TX_ENABLE_EXECUTION_CHANGE_NOTIFY) || defined(TX_EXECUTION_PROFILE_ENABLE))
/* Call the thread exit function to indicate the thread is no longer executing. */
+#ifdef TX_PORT_USE_BASEPRI
+ LDR r1, =TX_PORT_BASEPRI // Mask interrupt priorities =< TX_PORT_BASEPRI
+ MSR BASEPRI, r1
+#else
CPSID i // Disable interrupts
+#endif /* TX_PORT_USE_BASEPRI */
PUSH {r0, lr} // Save LR (and r0 just for alignment)
BL _tx_execution_thread_exit // Call the thread exit function
POP {r0, lr} // Recover LR
+#ifdef TX_PORT_USE_BASEPRI
+ MOV r0, 0 // Disable BASEPRI masking (enable interrupts)
+ MSR BASEPRI, r0
+#else
CPSIE i // Enable interrupts
-#endif
+#endif /* TX_PORT_USE_BASEPRI */
+#endif /* EXECUTION PROFILE */
LDR r0, =_tx_thread_current_ptr // Build current thread pointer address
LDR r2, =_tx_thread_execute_ptr // Build execute thread pointer address
@@ -181,14 +193,24 @@ __tx_ts_new:
/* Now we are looking for a new thread to execute! */
+#ifdef TX_PORT_USE_BASEPRI
+ LDR r1, =TX_PORT_BASEPRI // Mask interrupt priorities =< TX_PORT_BASEPRI
+ MSR BASEPRI, r1
+#else
CPSID i // Disable interrupts
+#endif
LDR r1, [r2] // Is there another thread ready to execute?
CBZ r1, __tx_ts_wait // No, skip to the wait processing
/* Yes, another thread is ready for else, make the current thread the new thread. */
STR r1, [r0] // Setup the current thread pointer to the new thread
+#ifdef TX_PORT_USE_BASEPRI
+ MOV r4, #0 // Disable BASEPRI masking (enable interrupts)
+ MSR BASEPRI, r4
+#else
CPSIE i // Enable interrupts
+#endif
/* Increment the thread run count. */
@@ -232,7 +254,12 @@ _skip_vfp_restore:
are disabled to allow use of WFI for waiting for a thread to arrive. */
__tx_ts_wait:
+#ifdef TX_PORT_USE_BASEPRI
+ LDR r1, =TX_PORT_BASEPRI // Mask interrupt priorities =< TX_PORT_BASEPRI
+ MSR BASEPRI, r1
+#else
CPSID i // Disable interrupts
+#endif
LDR r1, [r2] // Pickup the next thread to execute pointer
STR r1, [r0] // Store it in the current pointer
CBNZ r1, __tx_ts_ready // If non-NULL, a new thread is ready!
@@ -255,7 +282,12 @@ __tx_ts_wait:
POP {r0-r3}
#endif
+#ifdef TX_PORT_USE_BASEPRI
+ MOV r4, #0 // Disable BASEPRI masking (enable interrupts)
+ MSR BASEPRI, r4
+#else
CPSIE i // Enable interrupts
+#endif
B __tx_ts_wait // Loop to continue waiting
/* At this point, we have a new thread ready to go. Clear any newly pended PendSV - since we are
@@ -267,8 +299,12 @@ __tx_ts_ready:
STR r7, [r8, #0xD04] // Clear any PendSV
/* Re-enable interrupts and restore new thread. */
-
+#ifdef TX_PORT_USE_BASEPRI
+ MOV r4, #0 // Disable BASEPRI masking (enable interrupts)
+ MSR BASEPRI, r4
+#else
CPSIE i // Enable interrupts
+#endif
B __tx_ts_restore // Restore the thread
// }
diff --git a/ports/cortex_m3/iar/inc/tx_port.h b/ports/cortex_m3/iar/inc/tx_port.h
index 33b8a2bb..77c52fba 100644
--- a/ports/cortex_m3/iar/inc/tx_port.h
+++ b/ports/cortex_m3/iar/inc/tx_port.h
@@ -26,7 +26,7 @@
/* PORT SPECIFIC C INFORMATION RELEASE */
/* */
/* tx_port.h Cortex-M3/IAR */
-/* 6.1.10 */
+/* 6.1.11 */
/* */
/* AUTHOR */
/* */
@@ -56,6 +56,9 @@
/* violation, */
/* fixed predefined macro, */
/* resulting in version 6.1.10 */
+/* 04-25-2022 Scott Larson Modified comments and added */
+/* volatile to registers, */
+/* resulting in version 6.1.11 */
/* */
/**************************************************************************/
@@ -154,14 +157,14 @@ typedef unsigned short USHORT;
For example, if the time source is at the address 0x0a800024 and is 16-bits in size, the clock
source constants would be:
-#define TX_TRACE_TIME_SOURCE *((ULONG *) 0x0a800024)
+#define TX_TRACE_TIME_SOURCE *((volatile ULONG *) 0x0a800024)
#define TX_TRACE_TIME_MASK 0x0000FFFFUL
*/
#ifndef TX_MISRA_ENABLE
#ifndef TX_TRACE_TIME_SOURCE
-#define TX_TRACE_TIME_SOURCE *((ULONG *) 0xE0001004)
+#define TX_TRACE_TIME_SOURCE *((volatile ULONG *) 0xE0001004)
#endif
#else
ULONG _tx_misra_time_stamp_get(VOID);
@@ -378,7 +381,7 @@ void _tx_vfp_access(void);
else \
{ \
ULONG _tx_fpccr; \
- _tx_fpccr = *((ULONG *) 0xE000EF34); \
+ _tx_fpccr = *((volatile ULONG *) 0xE000EF34); \
_tx_fpccr = _tx_fpccr & ((ULONG) 0x01); \
if (_tx_fpccr == ((ULONG) 0x01)) \
{ \
@@ -582,7 +585,7 @@ __attribute__( ( always_inline ) ) static inline void _tx_thread_system_return_i
unsigned int interrupt_save;
/* Set PendSV to invoke ThreadX scheduler. */
- *((ULONG *) 0xE000ED04) = ((ULONG) 0x10000000);
+ *((volatile ULONG *) 0xE000ED04) = ((ULONG) 0x10000000);
if (__get_ipsr_value() == 0)
{
interrupt_save = __get_interrupt_posture();
@@ -651,7 +654,7 @@ static void _tx_thread_system_return_inline(void)
unsigned int interrupt_save;
/* Set PendSV to invoke ThreadX scheduler. */
- *((ULONG *) 0xE000ED04) = ((ULONG) 0x10000000);
+ *((volatile ULONG *) 0xE000ED04) = ((ULONG) 0x10000000);
if (_ipsr == 0)
{
#ifdef TX_PORT_USE_BASEPRI
@@ -704,7 +707,7 @@ void tx_thread_fpu_disable(void);
#ifdef TX_THREAD_INIT
CHAR _tx_version_id[] =
- "Copyright (c) Microsoft Corporation. All rights reserved. * ThreadX Cortex-M3/IAR Version 6.1.9 *";
+ "Copyright (c) Microsoft Corporation. All rights reserved. * ThreadX Cortex-M3/IAR Version 6.1.11 *";
#else
#ifdef TX_MISRA_ENABLE
extern CHAR _tx_version_id[100];
diff --git a/ports/cortex_m3/iar/src/tx_thread_schedule.s b/ports/cortex_m3/iar/src/tx_thread_schedule.s
index 6584a90a..a40434c2 100644
--- a/ports/cortex_m3/iar/src/tx_thread_schedule.s
+++ b/ports/cortex_m3/iar/src/tx_thread_schedule.s
@@ -37,7 +37,7 @@
/* FUNCTION RELEASE */
/* */
/* _tx_thread_schedule Cortex-M3/IAR */
-/* 6.1.7 */
+/* 6.1.11 */
/* AUTHOR */
/* */
/* Scott Larson, Microsoft Corporation */
@@ -64,13 +64,14 @@
/* */
/* _tx_initialize_kernel_enter ThreadX entry function */
/* _tx_thread_system_return Return to system from thread */
-/* _tx_thread_context_restore Restore thread's context */
/* */
/* RELEASE HISTORY */
/* */
/* DATE NAME DESCRIPTION */
/* */
/* 06-02-2021 Scott Larson Initial Version 6.1.7 */
+/* 04-25-2022 Scott Larson Added BASEPRI support, */
+/* resulting in version 6.1.11 */
/* */
/**************************************************************************/
// VOID _tx_thread_schedule(VOID)
@@ -126,12 +127,22 @@ __tx_ts_handler:
#if (defined(TX_ENABLE_EXECUTION_CHANGE_NOTIFY) || defined(TX_EXECUTION_PROFILE_ENABLE))
/* Call the thread exit function to indicate the thread is no longer executing. */
+#ifdef TX_PORT_USE_BASEPRI
+ LDR r1, =TX_PORT_BASEPRI // Mask interrupt priorities =< TX_PORT_BASEPRI
+ MSR BASEPRI, r1
+#else
CPSID i // Disable interrupts
+#endif /* TX_PORT_USE_BASEPRI */
PUSH {r0, lr} // Save LR (and r0 just for alignment)
BL _tx_execution_thread_exit // Call the thread exit function
POP {r0, lr} // Recover LR
+#ifdef TX_PORT_USE_BASEPRI
+ MOV r0, 0 // Disable BASEPRI masking (enable interrupts)
+ MSR BASEPRI, r0
+#else
CPSIE i // Enable interrupts
-#endif
+#endif /* TX_PORT_USE_BASEPRI */
+#endif /* EXECUTION PROFILE */
LDR r0, =_tx_thread_current_ptr // Build current thread pointer address
LDR r2, =_tx_thread_execute_ptr // Build execute thread pointer address
@@ -176,14 +187,24 @@ __tx_ts_new:
/* Now we are looking for a new thread to execute! */
+#ifdef TX_PORT_USE_BASEPRI
+ LDR r1, =TX_PORT_BASEPRI // Mask interrupt priorities =< TX_PORT_BASEPRI
+ MSR BASEPRI, r1
+#else
CPSID i // Disable interrupts
+#endif
LDR r1, [r2] // Is there another thread ready to execute?
CBZ r1, __tx_ts_wait // No, skip to the wait processing
/* Yes, another thread is ready for else, make the current thread the new thread. */
STR r1, [r0] // Setup the current thread pointer to the new thread
+#ifdef TX_PORT_USE_BASEPRI
+ MOV r4, #0 // Disable BASEPRI masking (enable interrupts)
+ MSR BASEPRI, r4
+#else
CPSIE i // Enable interrupts
+#endif
/* Increment the thread run count. */
@@ -227,7 +248,12 @@ _skip_vfp_restore:
are disabled to allow use of WFI for waiting for a thread to arrive. */
__tx_ts_wait:
+#ifdef TX_PORT_USE_BASEPRI
+ LDR r1, =TX_PORT_BASEPRI // Mask interrupt priorities =< TX_PORT_BASEPRI
+ MSR BASEPRI, r1
+#else
CPSID i // Disable interrupts
+#endif
LDR r1, [r2] // Pickup the next thread to execute pointer
STR r1, [r0] // Store it in the current pointer
CBNZ r1, __tx_ts_ready // If non-NULL, a new thread is ready!
@@ -250,7 +276,12 @@ __tx_ts_wait:
POP {r0-r3}
#endif
+#ifdef TX_PORT_USE_BASEPRI
+ MOV r4, #0 // Disable BASEPRI masking (enable interrupts)
+ MSR BASEPRI, r4
+#else
CPSIE i // Enable interrupts
+#endif
B __tx_ts_wait // Loop to continue waiting
/* At this point, we have a new thread ready to go. Clear any newly pended PendSV - since we are
@@ -262,8 +293,12 @@ __tx_ts_ready:
STR r7, [r8, #0xD04] // Clear any PendSV
/* Re-enable interrupts and restore new thread. */
-
+#ifdef TX_PORT_USE_BASEPRI
+ MOV r4, #0 // Disable BASEPRI masking (enable interrupts)
+ MSR BASEPRI, r4
+#else
CPSIE i // Enable interrupts
+#endif
B __tx_ts_restore // Restore the thread
// }
diff --git a/ports/cortex_m3/keil/inc/tx_port.h b/ports/cortex_m3/keil/inc/tx_port.h
index 369f912b..d12b1ad9 100644
--- a/ports/cortex_m3/keil/inc/tx_port.h
+++ b/ports/cortex_m3/keil/inc/tx_port.h
@@ -26,7 +26,7 @@
/* PORT SPECIFIC C INFORMATION RELEASE */
/* */
/* tx_port.h Cortex-M3/Keil */
-/* 6.1.10 */
+/* 6.1.11 */
/* */
/* AUTHOR */
/* */
@@ -56,6 +56,9 @@
/* violation, */
/* fixed predefined macro, */
/* resulting in version 6.1.10 */
+/* 04-25-2022 Scott Larson Modified comments and added */
+/* volatile to registers, */
+/* resulting in version 6.1.11 */
/* */
/**************************************************************************/
@@ -154,14 +157,14 @@ typedef unsigned short USHORT;
For example, if the time source is at the address 0x0a800024 and is 16-bits in size, the clock
source constants would be:
-#define TX_TRACE_TIME_SOURCE *((ULONG *) 0x0a800024)
+#define TX_TRACE_TIME_SOURCE *((volatile ULONG *) 0x0a800024)
#define TX_TRACE_TIME_MASK 0x0000FFFFUL
*/
#ifndef TX_MISRA_ENABLE
#ifndef TX_TRACE_TIME_SOURCE
-#define TX_TRACE_TIME_SOURCE *((ULONG *) 0xE0001004)
+#define TX_TRACE_TIME_SOURCE *((volatile ULONG *) 0xE0001004)
#endif
#else
ULONG _tx_misra_time_stamp_get(VOID);
@@ -378,7 +381,7 @@ void _tx_vfp_access(void);
else \
{ \
ULONG _tx_fpccr; \
- _tx_fpccr = *((ULONG *) 0xE000EF34); \
+ _tx_fpccr = *((volatile ULONG *) 0xE000EF34); \
_tx_fpccr = _tx_fpccr & ((ULONG) 0x01); \
if (_tx_fpccr == ((ULONG) 0x01)) \
{ \
@@ -582,7 +585,7 @@ __attribute__( ( always_inline ) ) static inline void _tx_thread_system_return_i
unsigned int interrupt_save;
/* Set PendSV to invoke ThreadX scheduler. */
- *((ULONG *) 0xE000ED04) = ((ULONG) 0x10000000);
+ *((volatile ULONG *) 0xE000ED04) = ((ULONG) 0x10000000);
if (__get_ipsr_value() == 0)
{
interrupt_save = __get_interrupt_posture();
@@ -651,7 +654,7 @@ static void _tx_thread_system_return_inline(void)
unsigned int interrupt_save;
/* Set PendSV to invoke ThreadX scheduler. */
- *((ULONG *) 0xE000ED04) = ((ULONG) 0x10000000);
+ *((volatile ULONG *) 0xE000ED04) = ((ULONG) 0x10000000);
if (_ipsr == 0)
{
#ifdef TX_PORT_USE_BASEPRI
@@ -704,7 +707,7 @@ void tx_thread_fpu_disable(void);
#ifdef TX_THREAD_INIT
CHAR _tx_version_id[] =
- "Copyright (c) Microsoft Corporation. All rights reserved. * ThreadX Cortex-M3/Keil Version 6.1.9 *";
+ "Copyright (c) Microsoft Corporation. All rights reserved. * ThreadX Cortex-M3/Keil Version 6.1.11 *";
#else
#ifdef TX_MISRA_ENABLE
extern CHAR _tx_version_id[100];
diff --git a/ports/cortex_m33/ac6/inc/tx_port.h b/ports/cortex_m33/ac6/inc/tx_port.h
index 26620c2c..aaa02995 100644
--- a/ports/cortex_m33/ac6/inc/tx_port.h
+++ b/ports/cortex_m33/ac6/inc/tx_port.h
@@ -26,7 +26,7 @@
/* PORT SPECIFIC C INFORMATION RELEASE */
/* */
/* tx_port.h Cortex-M33 */
-/* 6.1.10 */
+/* 6.1.11 */
/* */
/* AUTHOR */
/* */
@@ -68,6 +68,9 @@
/* this file across compilers, */
/* fixed predefined macro, */
/* resulting in version 6.1.10 */
+/* 04-25-2022 Scott Larson Modified comments and added */
+/* volatile to registers, */
+/* resulting in version 6.1.11 */
/* */
/**************************************************************************/
@@ -196,14 +199,14 @@ UINT _tx_thread_secure_stack_free(struct TX_THREAD_STRUCT *tx_thread);
For example, if the time source is at the address 0x0a800024 and is 16-bits in size, the clock
source constants would be:
-#define TX_TRACE_TIME_SOURCE *((ULONG *) 0x0a800024)
+#define TX_TRACE_TIME_SOURCE *((volatile ULONG *) 0x0a800024)
#define TX_TRACE_TIME_MASK 0x0000FFFFUL
*/
#ifndef TX_MISRA_ENABLE
#ifndef TX_TRACE_TIME_SOURCE
-#define TX_TRACE_TIME_SOURCE *((ULONG *) 0xE0001004)
+#define TX_TRACE_TIME_SOURCE *((volatile ULONG *) 0xE0001004)
#endif
#else
ULONG _tx_misra_time_stamp_get(VOID);
@@ -371,9 +374,9 @@ __attribute__( ( always_inline ) ) static inline void _tx_control_set(ULONG cont
#define TX_THREAD_COMPLETED_EXTENSION(thread_ptr) { \
ULONG _tx_vfp_state; \
- _tx_vfp_state = _tx_control_get(); \
+ _tx_vfp_state = _tx_control_get(); \
_tx_vfp_state = _tx_vfp_state & ~((ULONG) 0x4); \
- _tx_control_set(_tx_vfp_state); \
+ _tx_control_set(_tx_vfp_state); \
}
#else
@@ -398,26 +401,26 @@ __attribute__( ( always_inline ) ) static inline void _tx_control_set(ULONG cont
if ((_tx_system_state == ((ULONG) 0)) && ((thread_ptr) == _tx_thread_current_ptr)) \
{ \
ULONG _tx_vfp_state; \
- _tx_vfp_state = _tx_control_get(); \
+ _tx_vfp_state = _tx_control_get(); \
_tx_vfp_state = _tx_vfp_state & ~((ULONG) 0x4); \
- _tx_control_set(_tx_vfp_state); \
+ _tx_control_set(_tx_vfp_state); \
} \
else \
{ \
ULONG _tx_fpccr; \
- _tx_fpccr = *((ULONG *) 0xE000EF34); \
+ _tx_fpccr = *((volatile ULONG *) 0xE000EF34); \
_tx_fpccr = _tx_fpccr & ((ULONG) 0x01); \
if (_tx_fpccr == ((ULONG) 0x01)) \
{ \
ULONG _tx_vfp_state; \
- _tx_vfp_state = _tx_control_get(); \
+ _tx_vfp_state = _tx_control_get(); \
_tx_vfp_state = _tx_vfp_state & ((ULONG) 0x4); \
TX_VFP_TOUCH(); \
if (_tx_vfp_state == ((ULONG) 0)) \
{ \
- _tx_vfp_state = _tx_control_get(); \
+ _tx_vfp_state = _tx_control_get(); \
_tx_vfp_state = _tx_vfp_state & ~((ULONG) 0x4); \
- _tx_control_set(_tx_vfp_state); \
+ _tx_control_set(_tx_vfp_state); \
} \
} \
} \
@@ -599,7 +602,7 @@ __attribute__( ( always_inline ) ) static inline void _tx_thread_system_return_i
UINT interrupt_save;
/* Set PendSV to invoke ThreadX scheduler. */
- *((ULONG *) 0xE000ED04) = ((ULONG) 0x10000000);
+ *((volatile ULONG *) 0xE000ED04) = ((ULONG) 0x10000000);
if (_tx_ipsr_get() == 0)
{
interrupt_save = __get_interrupt_posture();
diff --git a/ports/cortex_m33/ac6/src/tx_thread_schedule.S b/ports/cortex_m33/ac6/src/tx_thread_schedule.S
index 9ca58e90..34ca023f 100644
--- a/ports/cortex_m33/ac6/src/tx_thread_schedule.S
+++ b/ports/cortex_m33/ac6/src/tx_thread_schedule.S
@@ -30,7 +30,7 @@
/* FUNCTION RELEASE */
/* */
/* _tx_thread_schedule Cortex-M33/AC6 */
-/* 6.1.6 */
+/* 6.1.11 */
/* AUTHOR */
/* */
/* Scott Larson, Microsoft Corporation */
@@ -69,6 +69,8 @@
/* 06-02-2021 Scott Larson Added secure stack initialize */
/* in SVC handler, */
/* resulting in version 6.1.7 */
+/* 04-25-2022 Scott Larson Added BASEPRI support, */
+/* resulting in version 6.1.11 */
/* */
/**************************************************************************/
// VOID _tx_thread_schedule(VOID)
@@ -128,12 +130,22 @@ __tx_ts_handler:
#if (defined(TX_ENABLE_EXECUTION_CHANGE_NOTIFY) || defined(TX_EXECUTION_PROFILE_ENABLE))
/* Call the thread exit function to indicate the thread is no longer executing. */
+#ifdef TX_PORT_USE_BASEPRI
+ LDR r1, =TX_PORT_BASEPRI // Mask interrupt priorities =< TX_PORT_BASEPRI
+ MSR BASEPRI, r1
+#else
CPSID i // Disable interrupts
+#endif /* TX_PORT_USE_BASEPRI */
PUSH {r0, lr} // Save LR (and r0 just for alignment)
BL _tx_execution_thread_exit // Call the thread exit function
POP {r0, lr} // Recover LR
+#ifdef TX_PORT_USE_BASEPRI
+ MOV r0, 0 // Disable BASEPRI masking (enable interrupts)
+ MSR BASEPRI, r0
+#else
CPSIE i // Enable interrupts
-#endif
+#endif /* TX_PORT_USE_BASEPRI */
+#endif /* EXECUTION PROFILE */
LDR r0, =_tx_thread_current_ptr // Build current thread pointer address
LDR r2, =_tx_thread_execute_ptr // Build execute thread pointer address
@@ -189,14 +201,24 @@ __tx_ts_new:
/* Now we are looking for a new thread to execute! */
+#ifdef TX_PORT_USE_BASEPRI
+ LDR r1, =TX_PORT_BASEPRI // Mask interrupt priorities =< TX_PORT_BASEPRI
+ MSR BASEPRI, r1
+#else
CPSID i // Disable interrupts
+#endif
LDR r1, [r2] // Is there another thread ready to execute?
CBZ r1, __tx_ts_wait // No, skip to the wait processing
/* Yes, another thread is ready for else, make the current thread the new thread. */
STR r1, [r0] // Setup the current thread pointer to the new thread
+#ifdef TX_PORT_USE_BASEPRI
+ MOV r4, #0 // Disable BASEPRI masking (enable interrupts)
+ MSR BASEPRI, r4
+#else
CPSIE i // Enable interrupts
+#endif
/* Increment the thread run count. */
@@ -250,7 +272,12 @@ _skip_vfp_restore:
are disabled to allow use of WFI for waiting for a thread to arrive. */
__tx_ts_wait:
+#ifdef TX_PORT_USE_BASEPRI
+ LDR r1, =TX_PORT_BASEPRI // Mask interrupt priorities =< TX_PORT_BASEPRI
+ MSR BASEPRI, r1
+#else
CPSID i // Disable interrupts
+#endif
LDR r1, [r2] // Pickup the next thread to execute pointer
STR r1, [r0] // Store it in the current pointer
CBNZ r1, __tx_ts_ready // If non-NULL, a new thread is ready!
@@ -273,7 +300,12 @@ __tx_ts_wait:
POP {r0-r3}
#endif
+#ifdef TX_PORT_USE_BASEPRI
+ MOV r4, #0 // Disable BASEPRI masking (enable interrupts)
+ MSR BASEPRI, r4
+#else
CPSIE i // Enable interrupts
+#endif
B __tx_ts_wait // Loop to continue waiting
/* At this point, we have a new thread ready to go. Clear any newly pended PendSV - since we are
@@ -284,7 +316,12 @@ __tx_ts_ready:
STR r7, [r8, #0xD04] // Clear any PendSV
/* Re-enable interrupts and restore new thread. */
+#ifdef TX_PORT_USE_BASEPRI
+ MOV r4, #0 // Disable BASEPRI masking (enable interrupts)
+ MSR BASEPRI, r4
+#else
CPSIE i // Enable interrupts
+#endif
B __tx_ts_restore // Restore the thread
// }
diff --git a/ports/cortex_m33/gnu/inc/tx_port.h b/ports/cortex_m33/gnu/inc/tx_port.h
index 26620c2c..aaa02995 100644
--- a/ports/cortex_m33/gnu/inc/tx_port.h
+++ b/ports/cortex_m33/gnu/inc/tx_port.h
@@ -26,7 +26,7 @@
/* PORT SPECIFIC C INFORMATION RELEASE */
/* */
/* tx_port.h Cortex-M33 */
-/* 6.1.10 */
+/* 6.1.11 */
/* */
/* AUTHOR */
/* */
@@ -68,6 +68,9 @@
/* this file across compilers, */
/* fixed predefined macro, */
/* resulting in version 6.1.10 */
+/* 04-25-2022 Scott Larson Modified comments and added */
+/* volatile to registers, */
+/* resulting in version 6.1.11 */
/* */
/**************************************************************************/
@@ -196,14 +199,14 @@ UINT _tx_thread_secure_stack_free(struct TX_THREAD_STRUCT *tx_thread);
For example, if the time source is at the address 0x0a800024 and is 16-bits in size, the clock
source constants would be:
-#define TX_TRACE_TIME_SOURCE *((ULONG *) 0x0a800024)
+#define TX_TRACE_TIME_SOURCE *((volatile ULONG *) 0x0a800024)
#define TX_TRACE_TIME_MASK 0x0000FFFFUL
*/
#ifndef TX_MISRA_ENABLE
#ifndef TX_TRACE_TIME_SOURCE
-#define TX_TRACE_TIME_SOURCE *((ULONG *) 0xE0001004)
+#define TX_TRACE_TIME_SOURCE *((volatile ULONG *) 0xE0001004)
#endif
#else
ULONG _tx_misra_time_stamp_get(VOID);
@@ -371,9 +374,9 @@ __attribute__( ( always_inline ) ) static inline void _tx_control_set(ULONG cont
#define TX_THREAD_COMPLETED_EXTENSION(thread_ptr) { \
ULONG _tx_vfp_state; \
- _tx_vfp_state = _tx_control_get(); \
+ _tx_vfp_state = _tx_control_get(); \
_tx_vfp_state = _tx_vfp_state & ~((ULONG) 0x4); \
- _tx_control_set(_tx_vfp_state); \
+ _tx_control_set(_tx_vfp_state); \
}
#else
@@ -398,26 +401,26 @@ __attribute__( ( always_inline ) ) static inline void _tx_control_set(ULONG cont
if ((_tx_system_state == ((ULONG) 0)) && ((thread_ptr) == _tx_thread_current_ptr)) \
{ \
ULONG _tx_vfp_state; \
- _tx_vfp_state = _tx_control_get(); \
+ _tx_vfp_state = _tx_control_get(); \
_tx_vfp_state = _tx_vfp_state & ~((ULONG) 0x4); \
- _tx_control_set(_tx_vfp_state); \
+ _tx_control_set(_tx_vfp_state); \
} \
else \
{ \
ULONG _tx_fpccr; \
- _tx_fpccr = *((ULONG *) 0xE000EF34); \
+ _tx_fpccr = *((volatile ULONG *) 0xE000EF34); \
_tx_fpccr = _tx_fpccr & ((ULONG) 0x01); \
if (_tx_fpccr == ((ULONG) 0x01)) \
{ \
ULONG _tx_vfp_state; \
- _tx_vfp_state = _tx_control_get(); \
+ _tx_vfp_state = _tx_control_get(); \
_tx_vfp_state = _tx_vfp_state & ((ULONG) 0x4); \
TX_VFP_TOUCH(); \
if (_tx_vfp_state == ((ULONG) 0)) \
{ \
- _tx_vfp_state = _tx_control_get(); \
+ _tx_vfp_state = _tx_control_get(); \
_tx_vfp_state = _tx_vfp_state & ~((ULONG) 0x4); \
- _tx_control_set(_tx_vfp_state); \
+ _tx_control_set(_tx_vfp_state); \
} \
} \
} \
@@ -599,7 +602,7 @@ __attribute__( ( always_inline ) ) static inline void _tx_thread_system_return_i
UINT interrupt_save;
/* Set PendSV to invoke ThreadX scheduler. */
- *((ULONG *) 0xE000ED04) = ((ULONG) 0x10000000);
+ *((volatile ULONG *) 0xE000ED04) = ((ULONG) 0x10000000);
if (_tx_ipsr_get() == 0)
{
interrupt_save = __get_interrupt_posture();
diff --git a/ports/cortex_m33/gnu/src/tx_thread_schedule.S b/ports/cortex_m33/gnu/src/tx_thread_schedule.S
index caf48c51..f3c382dd 100644
--- a/ports/cortex_m33/gnu/src/tx_thread_schedule.S
+++ b/ports/cortex_m33/gnu/src/tx_thread_schedule.S
@@ -26,7 +26,7 @@
/* FUNCTION RELEASE */
/* */
/* _tx_thread_schedule Cortex-M33/GNU */
-/* 6.1.10 */
+/* 6.1.11 */
/* AUTHOR */
/* */
/* Scott Larson, Microsoft Corporation */
@@ -67,6 +67,8 @@
/* resulting in version 6.1.7 */
/* 01-31-2022 Scott Larson Fixed predefined macro name, */
/* resulting in version 6.1.10 */
+/* 04-25-2022 Scott Larson Added BASEPRI support, */
+/* resulting in version 6.1.11 */
/* */
/**************************************************************************/
// VOID _tx_thread_schedule(VOID)
@@ -126,12 +128,22 @@ __tx_ts_handler:
#if (defined(TX_ENABLE_EXECUTION_CHANGE_NOTIFY) || defined(TX_EXECUTION_PROFILE_ENABLE))
/* Call the thread exit function to indicate the thread is no longer executing. */
+#ifdef TX_PORT_USE_BASEPRI
+ LDR r1, =TX_PORT_BASEPRI // Mask interrupt priorities =< TX_PORT_BASEPRI
+ MSR BASEPRI, r1
+#else
CPSID i // Disable interrupts
+#endif /* TX_PORT_USE_BASEPRI */
PUSH {r0, lr} // Save LR (and r0 just for alignment)
BL _tx_execution_thread_exit // Call the thread exit function
POP {r0, lr} // Recover LR
+#ifdef TX_PORT_USE_BASEPRI
+ MOV r0, 0 // Disable BASEPRI masking (enable interrupts)
+ MSR BASEPRI, r0
+#else
CPSIE i // Enable interrupts
-#endif
+#endif /* TX_PORT_USE_BASEPRI */
+#endif /* EXECUTION PROFILE */
LDR r0, =_tx_thread_current_ptr // Build current thread pointer address
LDR r2, =_tx_thread_execute_ptr // Build execute thread pointer address
@@ -187,14 +199,24 @@ __tx_ts_new:
/* Now we are looking for a new thread to execute! */
+#ifdef TX_PORT_USE_BASEPRI
+ LDR r1, =TX_PORT_BASEPRI // Mask interrupt priorities =< TX_PORT_BASEPRI
+ MSR BASEPRI, r1
+#else
CPSID i // Disable interrupts
+#endif
LDR r1, [r2] // Is there another thread ready to execute?
CBZ r1, __tx_ts_wait // No, skip to the wait processing
/* Yes, another thread is ready for else, make the current thread the new thread. */
STR r1, [r0] // Setup the current thread pointer to the new thread
+#ifdef TX_PORT_USE_BASEPRI
+ MOV r4, #0 // Disable BASEPRI masking (enable interrupts)
+ MSR BASEPRI, r4
+#else
CPSIE i // Enable interrupts
+#endif
/* Increment the thread run count. */
@@ -248,7 +270,12 @@ _skip_vfp_restore:
are disabled to allow use of WFI for waiting for a thread to arrive. */
__tx_ts_wait:
+#ifdef TX_PORT_USE_BASEPRI
+ LDR r1, =TX_PORT_BASEPRI // Mask interrupt priorities =< TX_PORT_BASEPRI
+ MSR BASEPRI, r1
+#else
CPSID i // Disable interrupts
+#endif
LDR r1, [r2] // Pickup the next thread to execute pointer
STR r1, [r0] // Store it in the current pointer
CBNZ r1, __tx_ts_ready // If non-NULL, a new thread is ready!
@@ -271,7 +298,12 @@ __tx_ts_wait:
POP {r0-r3}
#endif
+#ifdef TX_PORT_USE_BASEPRI
+ MOV r4, #0 // Disable BASEPRI masking (enable interrupts)
+ MSR BASEPRI, r4
+#else
CPSIE i // Enable interrupts
+#endif
B __tx_ts_wait // Loop to continue waiting
/* At this point, we have a new thread ready to go. Clear any newly pended PendSV - since we are
@@ -282,7 +314,12 @@ __tx_ts_ready:
STR r7, [r8, #0xD04] // Clear any PendSV
/* Re-enable interrupts and restore new thread. */
+#ifdef TX_PORT_USE_BASEPRI
+ MOV r4, #0 // Disable BASEPRI masking (enable interrupts)
+ MSR BASEPRI, r4
+#else
CPSIE i // Enable interrupts
+#endif
B __tx_ts_restore // Restore the thread
// }
diff --git a/ports/cortex_m33/iar/inc/tx_port.h b/ports/cortex_m33/iar/inc/tx_port.h
index 26620c2c..aaa02995 100644
--- a/ports/cortex_m33/iar/inc/tx_port.h
+++ b/ports/cortex_m33/iar/inc/tx_port.h
@@ -26,7 +26,7 @@
/* PORT SPECIFIC C INFORMATION RELEASE */
/* */
/* tx_port.h Cortex-M33 */
-/* 6.1.10 */
+/* 6.1.11 */
/* */
/* AUTHOR */
/* */
@@ -68,6 +68,9 @@
/* this file across compilers, */
/* fixed predefined macro, */
/* resulting in version 6.1.10 */
+/* 04-25-2022 Scott Larson Modified comments and added */
+/* volatile to registers, */
+/* resulting in version 6.1.11 */
/* */
/**************************************************************************/
@@ -196,14 +199,14 @@ UINT _tx_thread_secure_stack_free(struct TX_THREAD_STRUCT *tx_thread);
For example, if the time source is at the address 0x0a800024 and is 16-bits in size, the clock
source constants would be:
-#define TX_TRACE_TIME_SOURCE *((ULONG *) 0x0a800024)
+#define TX_TRACE_TIME_SOURCE *((volatile ULONG *) 0x0a800024)
#define TX_TRACE_TIME_MASK 0x0000FFFFUL
*/
#ifndef TX_MISRA_ENABLE
#ifndef TX_TRACE_TIME_SOURCE
-#define TX_TRACE_TIME_SOURCE *((ULONG *) 0xE0001004)
+#define TX_TRACE_TIME_SOURCE *((volatile ULONG *) 0xE0001004)
#endif
#else
ULONG _tx_misra_time_stamp_get(VOID);
@@ -371,9 +374,9 @@ __attribute__( ( always_inline ) ) static inline void _tx_control_set(ULONG cont
#define TX_THREAD_COMPLETED_EXTENSION(thread_ptr) { \
ULONG _tx_vfp_state; \
- _tx_vfp_state = _tx_control_get(); \
+ _tx_vfp_state = _tx_control_get(); \
_tx_vfp_state = _tx_vfp_state & ~((ULONG) 0x4); \
- _tx_control_set(_tx_vfp_state); \
+ _tx_control_set(_tx_vfp_state); \
}
#else
@@ -398,26 +401,26 @@ __attribute__( ( always_inline ) ) static inline void _tx_control_set(ULONG cont
if ((_tx_system_state == ((ULONG) 0)) && ((thread_ptr) == _tx_thread_current_ptr)) \
{ \
ULONG _tx_vfp_state; \
- _tx_vfp_state = _tx_control_get(); \
+ _tx_vfp_state = _tx_control_get(); \
_tx_vfp_state = _tx_vfp_state & ~((ULONG) 0x4); \
- _tx_control_set(_tx_vfp_state); \
+ _tx_control_set(_tx_vfp_state); \
} \
else \
{ \
ULONG _tx_fpccr; \
- _tx_fpccr = *((ULONG *) 0xE000EF34); \
+ _tx_fpccr = *((volatile ULONG *) 0xE000EF34); \
_tx_fpccr = _tx_fpccr & ((ULONG) 0x01); \
if (_tx_fpccr == ((ULONG) 0x01)) \
{ \
ULONG _tx_vfp_state; \
- _tx_vfp_state = _tx_control_get(); \
+ _tx_vfp_state = _tx_control_get(); \
_tx_vfp_state = _tx_vfp_state & ((ULONG) 0x4); \
TX_VFP_TOUCH(); \
if (_tx_vfp_state == ((ULONG) 0)) \
{ \
- _tx_vfp_state = _tx_control_get(); \
+ _tx_vfp_state = _tx_control_get(); \
_tx_vfp_state = _tx_vfp_state & ~((ULONG) 0x4); \
- _tx_control_set(_tx_vfp_state); \
+ _tx_control_set(_tx_vfp_state); \
} \
} \
} \
@@ -599,7 +602,7 @@ __attribute__( ( always_inline ) ) static inline void _tx_thread_system_return_i
UINT interrupt_save;
/* Set PendSV to invoke ThreadX scheduler. */
- *((ULONG *) 0xE000ED04) = ((ULONG) 0x10000000);
+ *((volatile ULONG *) 0xE000ED04) = ((ULONG) 0x10000000);
if (_tx_ipsr_get() == 0)
{
interrupt_save = __get_interrupt_posture();
diff --git a/ports/cortex_m33/iar/src/tx_thread_schedule.s b/ports/cortex_m33/iar/src/tx_thread_schedule.s
index 003f8114..b7430415 100644
--- a/ports/cortex_m33/iar/src/tx_thread_schedule.s
+++ b/ports/cortex_m33/iar/src/tx_thread_schedule.s
@@ -43,7 +43,7 @@
/* FUNCTION RELEASE */
/* */
/* _tx_thread_schedule Cortex-M33/IAR */
-/* 6.1.7 */
+/* 6.1.11 */
/* AUTHOR */
/* */
/* Scott Larson, Microsoft Corporation */
@@ -82,6 +82,8 @@
/* 06-02-2021 Scott Larson Added secure stack initialize */
/* in SVC handler, */
/* resulting in version 6.1.7 */
+/* 04-25-2022 Scott Larson Added BASEPRI support, */
+/* resulting in version 6.1.11 */
/* */
/**************************************************************************/
// VOID _tx_thread_schedule(VOID)
@@ -128,12 +130,22 @@ __tx_ts_handler:
#if (defined(TX_ENABLE_EXECUTION_CHANGE_NOTIFY) || defined(TX_EXECUTION_PROFILE_ENABLE))
/* Call the thread exit function to indicate the thread is no longer executing. */
+#ifdef TX_PORT_USE_BASEPRI
+ LDR r1, =TX_PORT_BASEPRI // Mask interrupt priorities =< TX_PORT_BASEPRI
+ MSR BASEPRI, r1
+#else
CPSID i // Disable interrupts
+#endif /* TX_PORT_USE_BASEPRI */
PUSH {r0, lr} // Save LR (and r0 just for alignment)
BL _tx_execution_thread_exit // Call the thread exit function
POP {r0, lr} // Recover LR
+#ifdef TX_PORT_USE_BASEPRI
+ MOV r0, 0 // Disable BASEPRI masking (enable interrupts)
+ MSR BASEPRI, r0
+#else
CPSIE i // Enable interrupts
-#endif
+#endif /* TX_PORT_USE_BASEPRI */
+#endif /* EXECUTION PROFILE */
LDR r0, =_tx_thread_current_ptr // Build current thread pointer address
LDR r2, =_tx_thread_execute_ptr // Build execute thread pointer address
@@ -189,14 +201,24 @@ __tx_ts_new:
/* Now we are looking for a new thread to execute! */
+#ifdef TX_PORT_USE_BASEPRI
+ LDR r1, =TX_PORT_BASEPRI // Mask interrupt priorities =< TX_PORT_BASEPRI
+ MSR BASEPRI, r1
+#else
CPSID i // Disable interrupts
+#endif
LDR r1, [r2] // Is there another thread ready to execute?
CBZ r1, __tx_ts_wait // No, skip to the wait processing
/* Yes, another thread is ready for else, make the current thread the new thread. */
STR r1, [r0] // Setup the current thread pointer to the new thread
+#ifdef TX_PORT_USE_BASEPRI
+ MOV r4, #0 // Disable BASEPRI masking (enable interrupts)
+ MSR BASEPRI, r4
+#else
CPSIE i // Enable interrupts
+#endif
/* Increment the thread run count. */
@@ -250,7 +272,12 @@ _skip_vfp_restore:
are disabled to allow use of WFI for waiting for a thread to arrive. */
__tx_ts_wait:
+#ifdef TX_PORT_USE_BASEPRI
+ LDR r1, =TX_PORT_BASEPRI // Mask interrupt priorities =< TX_PORT_BASEPRI
+ MSR BASEPRI, r1
+#else
CPSID i // Disable interrupts
+#endif
LDR r1, [r2] // Pickup the next thread to execute pointer
STR r1, [r0] // Store it in the current pointer
CBNZ r1, __tx_ts_ready // If non-NULL, a new thread is ready!
@@ -273,7 +300,12 @@ __tx_ts_wait:
POP {r0-r3}
#endif
+#ifdef TX_PORT_USE_BASEPRI
+ MOV r4, #0 // Disable BASEPRI masking (enable interrupts)
+ MSR BASEPRI, r4
+#else
CPSIE i // Enable interrupts
+#endif
B __tx_ts_wait // Loop to continue waiting
/* At this point, we have a new thread ready to go. Clear any newly pended PendSV - since we are
@@ -284,7 +316,12 @@ __tx_ts_ready:
STR r7, [r8, #0xD04] // Clear any PendSV
/* Re-enable interrupts and restore new thread. */
+#ifdef TX_PORT_USE_BASEPRI
+ MOV r4, #0 // Disable BASEPRI masking (enable interrupts)
+ MSR BASEPRI, r4
+#else
CPSIE i // Enable interrupts
+#endif
B __tx_ts_restore // Restore the thread
// }
diff --git a/ports/cortex_m4/ac5/inc/tx_port.h b/ports/cortex_m4/ac5/inc/tx_port.h
index 0a08d098..7967a951 100644
--- a/ports/cortex_m4/ac5/inc/tx_port.h
+++ b/ports/cortex_m4/ac5/inc/tx_port.h
@@ -26,7 +26,7 @@
/* PORT SPECIFIC C INFORMATION RELEASE */
/* */
/* tx_port.h Cortex-M4/AC5 */
-/* 6.1.10 */
+/* 6.1.11 */
/* */
/* AUTHOR */
/* */
@@ -56,6 +56,9 @@
/* violation, */
/* fixed predefined macro, */
/* resulting in version 6.1.10 */
+/* 04-25-2022 Scott Larson Modified comments and added */
+/* volatile to registers, */
+/* resulting in version 6.1.11 */
/* */
/**************************************************************************/
@@ -154,14 +157,14 @@ typedef unsigned short USHORT;
For example, if the time source is at the address 0x0a800024 and is 16-bits in size, the clock
source constants would be:
-#define TX_TRACE_TIME_SOURCE *((ULONG *) 0x0a800024)
+#define TX_TRACE_TIME_SOURCE *((volatile ULONG *) 0x0a800024)
#define TX_TRACE_TIME_MASK 0x0000FFFFUL
*/
#ifndef TX_MISRA_ENABLE
#ifndef TX_TRACE_TIME_SOURCE
-#define TX_TRACE_TIME_SOURCE *((ULONG *) 0xE0001004)
+#define TX_TRACE_TIME_SOURCE *((volatile ULONG *) 0xE0001004)
#endif
#else
ULONG _tx_misra_time_stamp_get(VOID);
@@ -378,7 +381,7 @@ void _tx_vfp_access(void);
else \
{ \
ULONG _tx_fpccr; \
- _tx_fpccr = *((ULONG *) 0xE000EF34); \
+ _tx_fpccr = *((volatile ULONG *) 0xE000EF34); \
_tx_fpccr = _tx_fpccr & ((ULONG) 0x01); \
if (_tx_fpccr == ((ULONG) 0x01)) \
{ \
@@ -582,7 +585,7 @@ __attribute__( ( always_inline ) ) static inline void _tx_thread_system_return_i
unsigned int interrupt_save;
/* Set PendSV to invoke ThreadX scheduler. */
- *((ULONG *) 0xE000ED04) = ((ULONG) 0x10000000);
+ *((volatile ULONG *) 0xE000ED04) = ((ULONG) 0x10000000);
if (__get_ipsr_value() == 0)
{
interrupt_save = __get_interrupt_posture();
@@ -651,7 +654,7 @@ static void _tx_thread_system_return_inline(void)
unsigned int interrupt_save;
/* Set PendSV to invoke ThreadX scheduler. */
- *((ULONG *) 0xE000ED04) = ((ULONG) 0x10000000);
+ *((volatile ULONG *) 0xE000ED04) = ((ULONG) 0x10000000);
if (_ipsr == 0)
{
#ifdef TX_PORT_USE_BASEPRI
@@ -704,7 +707,7 @@ void tx_thread_fpu_disable(void);
#ifdef TX_THREAD_INIT
CHAR _tx_version_id[] =
- "Copyright (c) Microsoft Corporation. All rights reserved. * ThreadX Cortex-M4/AC5 Version 6.1.9 *";
+ "Copyright (c) Microsoft Corporation. All rights reserved. * ThreadX Cortex-M4/AC5 Version 6.1.11 *";
#else
#ifdef TX_MISRA_ENABLE
extern CHAR _tx_version_id[100];
diff --git a/ports/cortex_m4/ac5/src/tx_thread_schedule.s b/ports/cortex_m4/ac5/src/tx_thread_schedule.s
index e497ee29..122460e2 100644
--- a/ports/cortex_m4/ac5/src/tx_thread_schedule.s
+++ b/ports/cortex_m4/ac5/src/tx_thread_schedule.s
@@ -37,7 +37,7 @@
/* FUNCTION RELEASE */
/* */
/* _tx_thread_schedule Cortex-M4/AC5 */
-/* 6.1.7 */
+/* 6.1.11 */
/* AUTHOR */
/* */
/* Scott Larson, Microsoft Corporation */
@@ -64,13 +64,14 @@
/* */
/* _tx_initialize_kernel_enter ThreadX entry function */
/* _tx_thread_system_return Return to system from thread */
-/* _tx_thread_context_restore Restore thread's context */
/* */
/* RELEASE HISTORY */
/* */
/* DATE NAME DESCRIPTION */
/* */
/* 06-02-2021 Scott Larson Initial Version 6.1.7 */
+/* 04-25-2022 Scott Larson Added BASEPRI support, */
+/* resulting in version 6.1.11 */
/* */
/**************************************************************************/
// VOID _tx_thread_schedule(VOID)
@@ -126,12 +127,22 @@ __tx_ts_handler
#if (defined(TX_ENABLE_EXECUTION_CHANGE_NOTIFY) || defined(TX_EXECUTION_PROFILE_ENABLE))
/* Call the thread exit function to indicate the thread is no longer executing. */
+#ifdef TX_PORT_USE_BASEPRI
+ LDR r1, =TX_PORT_BASEPRI // Mask interrupt priorities =< TX_PORT_BASEPRI
+ MSR BASEPRI, r1
+#else
CPSID i // Disable interrupts
+#endif /* TX_PORT_USE_BASEPRI */
PUSH {r0, lr} // Save LR (and r0 just for alignment)
BL _tx_execution_thread_exit // Call the thread exit function
POP {r0, lr} // Recover LR
+#ifdef TX_PORT_USE_BASEPRI
+ MOV r0, 0 // Disable BASEPRI masking (enable interrupts)
+ MSR BASEPRI, r0
+#else
CPSIE i // Enable interrupts
-#endif
+#endif /* TX_PORT_USE_BASEPRI */
+#endif /* EXECUTION PROFILE */
LDR r0, =_tx_thread_current_ptr // Build current thread pointer address
LDR r2, =_tx_thread_execute_ptr // Build execute thread pointer address
@@ -176,14 +187,24 @@ __tx_ts_new
/* Now we are looking for a new thread to execute! */
+#ifdef TX_PORT_USE_BASEPRI
+ LDR r1, =TX_PORT_BASEPRI // Mask interrupt priorities =< TX_PORT_BASEPRI
+ MSR BASEPRI, r1
+#else
CPSID i // Disable interrupts
+#endif
LDR r1, [r2] // Is there another thread ready to execute?
CBZ r1, __tx_ts_wait // No, skip to the wait processing
/* Yes, another thread is ready for else, make the current thread the new thread. */
STR r1, [r0] // Setup the current thread pointer to the new thread
+#ifdef TX_PORT_USE_BASEPRI
+ MOV r4, #0 // Disable BASEPRI masking (enable interrupts)
+ MSR BASEPRI, r4
+#else
CPSIE i // Enable interrupts
+#endif
/* Increment the thread run count. */
@@ -227,7 +248,12 @@ _skip_vfp_restore
are disabled to allow use of WFI for waiting for a thread to arrive. */
__tx_ts_wait
+#ifdef TX_PORT_USE_BASEPRI
+ LDR r1, =TX_PORT_BASEPRI // Mask interrupt priorities =< TX_PORT_BASEPRI
+ MSR BASEPRI, r1
+#else
CPSID i // Disable interrupts
+#endif
LDR r1, [r2] // Pickup the next thread to execute pointer
STR r1, [r0] // Store it in the current pointer
CBNZ r1, __tx_ts_ready // If non-NULL, a new thread is ready!
@@ -250,7 +276,12 @@ __tx_ts_wait
POP {r0-r3}
#endif
+#ifdef TX_PORT_USE_BASEPRI
+ MOV r4, #0 // Disable BASEPRI masking (enable interrupts)
+ MSR BASEPRI, r4
+#else
CPSIE i // Enable interrupts
+#endif
B __tx_ts_wait // Loop to continue waiting
/* At this point, we have a new thread ready to go. Clear any newly pended PendSV - since we are
@@ -262,8 +293,12 @@ __tx_ts_ready
STR r7, [r8, #0xD04] // Clear any PendSV
/* Re-enable interrupts and restore new thread. */
-
+#ifdef TX_PORT_USE_BASEPRI
+ MOV r4, #0 // Disable BASEPRI masking (enable interrupts)
+ MSR BASEPRI, r4
+#else
CPSIE i // Enable interrupts
+#endif
B __tx_ts_restore // Restore the thread
// }
diff --git a/ports/cortex_m4/ac6/inc/tx_port.h b/ports/cortex_m4/ac6/inc/tx_port.h
index 51947682..e0f408ab 100644
--- a/ports/cortex_m4/ac6/inc/tx_port.h
+++ b/ports/cortex_m4/ac6/inc/tx_port.h
@@ -26,7 +26,7 @@
/* PORT SPECIFIC C INFORMATION RELEASE */
/* */
/* tx_port.h Cortex-M4/AC6 */
-/* 6.1.10 */
+/* 6.1.11 */
/* */
/* AUTHOR */
/* */
@@ -56,6 +56,9 @@
/* violation, */
/* fixed predefined macro, */
/* resulting in version 6.1.10 */
+/* 04-25-2022 Scott Larson Modified comments and added */
+/* volatile to registers, */
+/* resulting in version 6.1.11 */
/* */
/**************************************************************************/
@@ -154,14 +157,14 @@ typedef unsigned short USHORT;
For example, if the time source is at the address 0x0a800024 and is 16-bits in size, the clock
source constants would be:
-#define TX_TRACE_TIME_SOURCE *((ULONG *) 0x0a800024)
+#define TX_TRACE_TIME_SOURCE *((volatile ULONG *) 0x0a800024)
#define TX_TRACE_TIME_MASK 0x0000FFFFUL
*/
#ifndef TX_MISRA_ENABLE
#ifndef TX_TRACE_TIME_SOURCE
-#define TX_TRACE_TIME_SOURCE *((ULONG *) 0xE0001004)
+#define TX_TRACE_TIME_SOURCE *((volatile ULONG *) 0xE0001004)
#endif
#else
ULONG _tx_misra_time_stamp_get(VOID);
@@ -378,7 +381,7 @@ void _tx_vfp_access(void);
else \
{ \
ULONG _tx_fpccr; \
- _tx_fpccr = *((ULONG *) 0xE000EF34); \
+ _tx_fpccr = *((volatile ULONG *) 0xE000EF34); \
_tx_fpccr = _tx_fpccr & ((ULONG) 0x01); \
if (_tx_fpccr == ((ULONG) 0x01)) \
{ \
@@ -582,7 +585,7 @@ __attribute__( ( always_inline ) ) static inline void _tx_thread_system_return_i
unsigned int interrupt_save;
/* Set PendSV to invoke ThreadX scheduler. */
- *((ULONG *) 0xE000ED04) = ((ULONG) 0x10000000);
+ *((volatile ULONG *) 0xE000ED04) = ((ULONG) 0x10000000);
if (__get_ipsr_value() == 0)
{
interrupt_save = __get_interrupt_posture();
@@ -651,7 +654,7 @@ static void _tx_thread_system_return_inline(void)
unsigned int interrupt_save;
/* Set PendSV to invoke ThreadX scheduler. */
- *((ULONG *) 0xE000ED04) = ((ULONG) 0x10000000);
+ *((volatile ULONG *) 0xE000ED04) = ((ULONG) 0x10000000);
if (_ipsr == 0)
{
#ifdef TX_PORT_USE_BASEPRI
@@ -704,7 +707,7 @@ void tx_thread_fpu_disable(void);
#ifdef TX_THREAD_INIT
CHAR _tx_version_id[] =
- "Copyright (c) Microsoft Corporation. All rights reserved. * ThreadX Cortex-M4/AC6 Version 6.1.9 *";
+ "Copyright (c) Microsoft Corporation. All rights reserved. * ThreadX Cortex-M4/AC6 Version 6.1.11 *";
#else
#ifdef TX_MISRA_ENABLE
extern CHAR _tx_version_id[100];
diff --git a/ports/cortex_m4/ac6/src/tx_misra.S b/ports/cortex_m4/ac6/src/tx_misra.S
new file mode 100644
index 00000000..b03fdcd0
--- /dev/null
+++ b/ports/cortex_m4/ac6/src/tx_misra.S
@@ -0,0 +1,1033 @@
+/**************************************************************************/
+/* */
+/* Copyright (c) Microsoft Corporation. All rights reserved. */
+/* */
+/* This software is licensed under the Microsoft Software License */
+/* Terms for Microsoft Azure RTOS. Full text of the license can be */
+/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
+/* and in the root directory of this software. */
+/* */
+/**************************************************************************/
+
+
+/**************************************************************************/
+/**************************************************************************/
+/** */
+/** ThreadX Component */
+/** */
+/** ThreadX MISRA Compliance */
+/** */
+/**************************************************************************/
+/**************************************************************************/
+
+ #define SHT_PROGBITS 0x1
+
+ .global __aeabi_memset
+ .global _tx_thread_current_ptr
+ .global _tx_thread_interrupt_disable
+ .global _tx_thread_interrupt_restore
+ .global _tx_thread_stack_analyze
+ .global _tx_thread_stack_error_handler
+ .global _tx_thread_system_state
+#ifdef TX_ENABLE_EVENT_TRACE
+ .global _tx_trace_buffer_current_ptr
+ .global _tx_trace_buffer_end_ptr
+ .global _tx_trace_buffer_start_ptr
+ .global _tx_trace_event_enable_bits
+ .global _tx_trace_full_notify_function
+ .global _tx_trace_header_ptr
+#endif
+
+ .global _tx_misra_always_true
+ .global _tx_misra_block_pool_to_uchar_pointer_convert
+ .global _tx_misra_byte_pool_to_uchar_pointer_convert
+ .global _tx_misra_char_to_uchar_pointer_convert
+ .global _tx_misra_const_char_to_char_pointer_convert
+#ifdef TX_ENABLE_EVENT_TRACE
+ .global _tx_misra_entry_to_uchar_pointer_convert
+#endif
+ .global _tx_misra_indirect_void_to_uchar_pointer_convert
+ .global _tx_misra_memset
+ .global _tx_misra_message_copy
+#ifdef TX_ENABLE_EVENT_TRACE
+ .global _tx_misra_object_to_uchar_pointer_convert
+#endif
+ .global _tx_misra_pointer_to_ulong_convert
+ .global _tx_misra_status_get
+ .global _tx_misra_thread_stack_check
+#ifdef TX_ENABLE_EVENT_TRACE
+ .global _tx_misra_time_stamp_get
+#endif
+ .global _tx_misra_timer_indirect_to_void_pointer_convert
+ .global _tx_misra_timer_pointer_add
+ .global _tx_misra_timer_pointer_dif
+#ifdef TX_ENABLE_EVENT_TRACE
+ .global _tx_misra_trace_event_insert
+#endif
+ .global _tx_misra_uchar_pointer_add
+ .global _tx_misra_uchar_pointer_dif
+ .global _tx_misra_uchar_pointer_sub
+ .global _tx_misra_uchar_to_align_type_pointer_convert
+ .global _tx_misra_uchar_to_block_pool_pointer_convert
+#ifdef TX_ENABLE_EVENT_TRACE
+ .global _tx_misra_uchar_to_entry_pointer_convert
+ .global _tx_misra_uchar_to_header_pointer_convert
+#endif
+ .global _tx_misra_uchar_to_indirect_byte_pool_pointer_convert
+ .global _tx_misra_uchar_to_indirect_uchar_pointer_convert
+#ifdef TX_ENABLE_EVENT_TRACE
+ .global _tx_misra_uchar_to_object_pointer_convert
+#endif
+ .global _tx_misra_uchar_to_void_pointer_convert
+ .global _tx_misra_ulong_pointer_add
+ .global _tx_misra_ulong_pointer_dif
+ .global _tx_misra_ulong_pointer_sub
+ .global _tx_misra_ulong_to_pointer_convert
+ .global _tx_misra_ulong_to_thread_pointer_convert
+ .global _tx_misra_user_timer_pointer_get
+ .global _tx_misra_void_to_block_pool_pointer_convert
+ .global _tx_misra_void_to_byte_pool_pointer_convert
+ .global _tx_misra_void_to_event_flags_pointer_convert
+ .global _tx_misra_void_to_indirect_uchar_pointer_convert
+ .global _tx_misra_void_to_mutex_pointer_convert
+ .global _tx_misra_void_to_queue_pointer_convert
+ .global _tx_misra_void_to_semaphore_pointer_convert
+ .global _tx_misra_void_to_thread_pointer_convert
+ .global _tx_misra_void_to_uchar_pointer_convert
+ .global _tx_misra_void_to_ulong_pointer_convert
+ .global _tx_misra_ipsr_get
+ .global _tx_misra_control_get
+ .global _tx_misra_control_set
+#ifdef __ARM_FP
+ .global _tx_misra_fpccr_get
+ .global _tx_misra_vfp_touch
+#endif
+
+
+/**************************************************************************/
+/**************************************************************************/
+/** */
+/** VOID _tx_misra_memset(VOID *ptr, UINT value, UINT size); */
+/** */
+/**************************************************************************/
+/**************************************************************************/
+
+ .text
+ .align 4
+ .syntax unified
+ .thumb_func
+_tx_misra_memset:
+ PUSH {R4,LR}
+ MOVS R4,R0
+ MOVS R0,R2
+ MOVS R2,R1
+ MOVS R1,R0
+ MOVS R0,R4
+ BL __aeabi_memset
+ POP {R4,PC} // return
+
+/**************************************************************************/
+/**************************************************************************/
+/** */
+/** UCHAR *_tx_misra_uchar_pointer_add(UCHAR *ptr, ULONG amount); */
+/** */
+/**************************************************************************/
+/**************************************************************************/
+
+ .text
+ .thumb_func
+_tx_misra_uchar_pointer_add:
+ ADD R0,R0,R1
+ BX LR // return
+
+
+/**************************************************************************/
+/**************************************************************************/
+/** */
+/** UCHAR *_tx_misra_uchar_pointer_sub(UCHAR *ptr, ULONG amount); */
+/** */
+/**************************************************************************/
+/**************************************************************************/
+
+ .text
+ .thumb_func
+_tx_misra_uchar_pointer_sub:
+ RSBS R1,R1,#+0
+ ADD R0,R0,R1
+ BX LR // return
+
+
+/**************************************************************************/
+/**************************************************************************/
+/** */
+/** ULONG _tx_misra_uchar_pointer_dif(UCHAR *ptr1, UCHAR *ptr2); */
+/** */
+/**************************************************************************/
+/**************************************************************************/
+
+ .text
+ .thumb_func
+_tx_misra_uchar_pointer_dif:
+ SUBS R0,R0,R1
+ BX LR // return
+
+
+/**************************************************************************/
+/**************************************************************************/
+/** */
+/** ULONG _tx_misra_pointer_to_ulong_convert(VOID *ptr); */
+/** */
+/**************************************************************************/
+/**************************************************************************/
+
+ .text
+ .thumb_func
+_tx_misra_pointer_to_ulong_convert:
+ BX LR // return
+
+
+/**************************************************************************/
+/**************************************************************************/
+/** */
+/** ULONG *_tx_misra_ulong_pointer_add(ULONG *ptr, ULONG amount); */
+/** */
+/**************************************************************************/
+/**************************************************************************/
+
+ .text
+ .thumb_func
+_tx_misra_ulong_pointer_add:
+ ADD R0,R0,R1, LSL #+2
+ BX LR // return
+
+
+/**************************************************************************/
+/**************************************************************************/
+/** */
+/** ULONG *_tx_misra_ulong_pointer_sub(ULONG *ptr, ULONG amount); */
+/** */
+/**************************************************************************/
+/**************************************************************************/
+
+ .text
+ .thumb_func
+_tx_misra_ulong_pointer_sub:
+ MVNS R2,#+3
+ MULS R1,R2,R1
+ ADD R0,R0,R1
+ BX LR // return
+
+
+/**************************************************************************/
+/**************************************************************************/
+/** */
+/** ULONG _tx_misra_ulong_pointer_dif(ULONG *ptr1, ULONG *ptr2); */
+/** */
+/**************************************************************************/
+/**************************************************************************/
+
+ .text
+ .thumb_func
+_tx_misra_ulong_pointer_dif:
+ SUBS R0,R0,R1
+ ASRS R0,R0,#+2
+ BX LR // return
+
+
+/**************************************************************************/
+/**************************************************************************/
+/** */
+/** VOID *_tx_misra_ulong_to_pointer_convert(ULONG input); */
+/** */
+/**************************************************************************/
+/**************************************************************************/
+
+ .text
+ .thumb_func
+_tx_misra_ulong_to_pointer_convert:
+ BX LR // return
+
+
+/**************************************************************************/
+/**************************************************************************/
+/** */
+/** VOID _tx_misra_message_copy(ULONG **source, ULONG **destination, */
+/** UINT size); */
+/** */
+/**************************************************************************/
+/**************************************************************************/
+
+ .text
+ .thumb_func
+_tx_misra_message_copy:
+ PUSH {R4,R5}
+ LDR R3,[R0, #+0]
+ LDR R4,[R1, #+0]
+ LDR R5,[R3, #+0]
+ STR R5,[R4, #+0]
+ ADDS R4,R4,#+4
+ ADDS R3,R3,#+4
+ CMP R2,#+2
+ BCC.N _tx_misra_message_copy_0
+ SUBS R2,R2,#+1
+ B.N _tx_misra_message_copy_1
+_tx_misra_message_copy_2:
+ LDR R5,[R3, #+0]
+ STR R5,[R4, #+0]
+ ADDS R4,R4,#+4
+ ADDS R3,R3,#+4
+ SUBS R2,R2,#+1
+_tx_misra_message_copy_1:
+ CMP R2,#+0
+ BNE.N _tx_misra_message_copy_2
+_tx_misra_message_copy_0:
+ STR R3,[R0, #+0]
+ STR R4,[R1, #+0]
+ POP {R4,R5}
+ BX LR // return
+
+
+/**************************************************************************/
+/**************************************************************************/
+/** */
+/** ULONG _tx_misra_timer_pointer_dif(TX_TIMER_INTERNAL **ptr1, */
+/** TX_TIMER_INTERNAL **ptr2); */
+/** */
+/**************************************************************************/
+/**************************************************************************/
+
+ .text
+ .thumb_func
+_tx_misra_timer_pointer_dif:
+ SUBS R0,R0,R1
+ ASRS R0,R0,#+2
+ BX LR // return
+
+
+/**************************************************************************/
+/**************************************************************************/
+/** */
+/** TX_TIMER_INTERNAL **_tx_misra_timer_pointer_add(TX_TIMER_INTERNAL */
+/** **ptr1, ULONG size); */
+/** */
+/**************************************************************************/
+/**************************************************************************/
+
+ .text
+ .thumb_func
+_tx_misra_timer_pointer_add:
+ ADD R0,R0,R1, LSL #+2
+ BX LR // return
+
+
+/**************************************************************************/
+/**************************************************************************/
+/** */
+/** VOID _tx_misra_user_timer_pointer_get(TX_TIMER_INTERNAL */
+/** *internal_timer, TX_TIMER **user_timer); */
+/** */
+/**************************************************************************/
+/**************************************************************************/
+
+ .text
+ .thumb_func
+_tx_misra_user_timer_pointer_get:
+ ADDS R2,R0,#+8
+ SUBS R2,R2,R0
+ RSBS R2,R2,#+0
+ ADD R0,R0,R2
+ STR R0,[R1, #+0]
+ BX LR // return
+
+
+/**************************************************************************/
+/**************************************************************************/
+/** */
+/** VOID _tx_misra_thread_stack_check(TX_THREAD *thread_ptr, */
+/** VOID **highest_stack); */
+/** */
+/**************************************************************************/
+/**************************************************************************/
+
+ .text
+ .thumb_func
+_tx_misra_thread_stack_check:
+ PUSH {R3-R5,LR}
+ MOVS R4,R0
+ MOVS R5,R1
+ BL _tx_thread_interrupt_disable
+ CMP R4,#+0
+ BEQ.N _tx_misra_thread_stack_check_0
+ LDR R1,[R4, #+0]
+ LDR R2,=0x54485244
+ CMP R1,R2
+ BNE.N _tx_misra_thread_stack_check_0
+ LDR R1,[R4, #+8]
+ LDR R2,[R5, #+0]
+ CMP R1,R2
+ BCS.N _tx_misra_thread_stack_check_1
+ LDR R1,[R4, #+8]
+ STR R1,[R5, #+0]
+_tx_misra_thread_stack_check_1:
+ LDR R1,[R4, #+12]
+ LDR R1,[R1, #+0]
+ CMP R1,#-269488145
+ BNE.N _tx_misra_thread_stack_check_2
+ LDR R1,[R4, #+16]
+ LDR R1,[R1, #+1]
+ CMP R1,#-269488145
+ BNE.N _tx_misra_thread_stack_check_2
+ LDR R1,[R5, #+0]
+ LDR R2,[R4, #+12]
+ CMP R1,R2
+ BCS.N _tx_misra_thread_stack_check_3
+_tx_misra_thread_stack_check_2:
+ BL _tx_thread_interrupt_restore
+ MOVS R0,R4
+ BL _tx_thread_stack_error_handler
+ BL _tx_thread_interrupt_disable
+_tx_misra_thread_stack_check_3:
+ LDR R1,[R5, #+0]
+ LDR R1,[R1, #-4]
+ CMP R1,#-269488145
+ BEQ.N _tx_misra_thread_stack_check_0
+ BL _tx_thread_interrupt_restore
+ MOVS R0,R4
+ BL _tx_thread_stack_analyze
+ BL _tx_thread_interrupt_disable
+_tx_misra_thread_stack_check_0:
+ BL _tx_thread_interrupt_restore
+ POP {R0,R4,R5,PC} // return
+
+#ifdef TX_ENABLE_EVENT_TRACE
+
+/**************************************************************************/
+/**************************************************************************/
+/** */
+/** VOID _tx_misra_trace_event_insert(ULONG event_id, */
+/** VOID *info_field_1, ULONG info_field_2, ULONG info_field_3, */
+/** ULONG info_field_4, ULONG filter, ULONG time_stamp); */
+/** */
+/**************************************************************************/
+/**************************************************************************/
+
+ .text
+ .thumb_func
+_tx_misra_trace_event_insert:
+ PUSH {R3-R7,LR}
+ LDR.N R4,DataTable2_1
+ LDR R4,[R4, #+0]
+ CMP R4,#+0
+ BEQ.N _tx_misra_trace_event_insert_0
+ LDR.N R5,DataTable2_2
+ LDR R5,[R5, #+0]
+ LDR R6,[SP, #+28]
+ TST R5,R6
+ BEQ.N _tx_misra_trace_event_insert_0
+ LDR.N R5,DataTable2_3
+ LDR R5,[R5, #+0]
+ LDR.N R6,DataTable2_4
+ LDR R6,[R6, #+0]
+ CMP R5,#+0
+ BNE.N _tx_misra_trace_event_insert_1
+ LDR R5,[R6, #+44]
+ LDR R7,[R6, #+60]
+ LSLS R7,R7,#+16
+ ORRS R7,R7,#0x80000000
+ ORRS R5,R7,R5
+ B.N _tx_misra_trace_event_insert_2
+_tx_misra_trace_event_insert_1:
+ CMP R5,#-252645136
+ BCS.N _tx_misra_trace_event_insert_3
+ MOVS R5,R6
+ MOVS R6,#-1
+ B.N _tx_misra_trace_event_insert_2
+_tx_misra_trace_event_insert_3:
+ MOVS R6,#-252645136
+ MOVS R5,#+0
+_tx_misra_trace_event_insert_2:
+ STR R6,[R4, #+0]
+ STR R5,[R4, #+4]
+ STR R0,[R4, #+8]
+ LDR R0,[SP, #+32]
+ STR R0,[R4, #+12]
+ STR R1,[R4, #+16]
+ STR R2,[R4, #+20]
+ STR R3,[R4, #+24]
+ LDR R0,[SP, #+24]
+ STR R0,[R4, #+28]
+ ADDS R4,R4,#+32
+ LDR.N R0,DataTable2_5
+ LDR R0,[R0, #+0]
+ CMP R4,R0
+ BCC.N _tx_misra_trace_event_insert_4
+ LDR.N R0,DataTable2_6
+ LDR R4,[R0, #+0]
+ LDR.N R0,DataTable2_1
+ STR R4,[R0, #+0]
+ LDR.N R0,DataTable2_7
+ LDR R0,[R0, #+0]
+ STR R4,[R0, #+32]
+ LDR.N R0,DataTable2_8
+ LDR R0,[R0, #+0]
+ CMP R0,#+0
+ BEQ.N _tx_misra_trace_event_insert_0
+ LDR.N R0,DataTable2_7
+ LDR R0,[R0, #+0]
+ LDR.N R1,DataTable2_8
+ LDR R1,[R1, #+0]
+ BLX R1
+ B.N _tx_misra_trace_event_insert_0
+_tx_misra_trace_event_insert_4:
+ LDR.N R0,DataTable2_1
+ STR R4,[R0, #+0]
+ LDR.N R0,DataTable2_7
+ LDR R0,[R0, #+0]
+ STR R4,[R0, #+32]
+_tx_misra_trace_event_insert_0:
+ POP {R0,R4-R7,PC} // return
+
+
+ .data
+DataTable2_1:
+ .word _tx_trace_buffer_current_ptr
+
+ .data
+DataTable2_2:
+ .word _tx_trace_event_enable_bits
+
+ .data
+DataTable2_5:
+ .word _tx_trace_buffer_end_ptr
+
+ .data
+DataTable2_6:
+ .word _tx_trace_buffer_start_ptr
+
+ .data
+DataTable2_7:
+ .word _tx_trace_header_ptr
+
+ .data
+DataTable2_8:
+ .word _tx_trace_full_notify_function
+
+
+/**************************************************************************/
+/**************************************************************************/
+/** */
+/** ULONG _tx_misra_time_stamp_get(VOID); */
+/** */
+/**************************************************************************/
+/**************************************************************************/
+
+ .text
+ .thumb_func
+_tx_misra_time_stamp_get:
+ MOVS R0,#+0
+ BX LR // return
+
+#endif
+
+ .data
+DataTable2_3:
+ .word _tx_thread_system_state
+
+ .data
+DataTable2_4:
+ .word _tx_thread_current_ptr
+
+
+/**************************************************************************/
+/**************************************************************************/
+/** */
+/** UINT _tx_misra_always_true(void); */
+/** */
+/**************************************************************************/
+/**************************************************************************/
+
+ .text
+ .thumb_func
+_tx_misra_always_true:
+ MOVS R0,#+1
+ BX LR // return
+
+
+/******************************************************************************************/
+/******************************************************************************************/
+/** */
+/** UCHAR **_tx_misra_indirect_void_to_uchar_pointer_convert(VOID **return_ptr); */
+/** */
+/******************************************************************************************/
+/******************************************************************************************/
+
+ .text
+ .thumb_func
+_tx_misra_indirect_void_to_uchar_pointer_convert:
+ BX LR // return
+
+
+/***************************************************************************************/
+/***************************************************************************************/
+/** */
+/** UCHAR **_tx_misra_uchar_to_indirect_uchar_pointer_convert(UCHAR *pointer); */
+/** */
+/***************************************************************************************/
+/***************************************************************************************/
+
+ .text
+ .thumb_func
+_tx_misra_uchar_to_indirect_uchar_pointer_convert:
+ BX LR // return
+
+
+/***********************************************************************************/
+/***********************************************************************************/
+/** */
+/** UCHAR *_tx_misra_block_pool_to_uchar_pointer_convert(TX_BLOCK_POOL *pool); */
+/** */
+/***********************************************************************************/
+/***********************************************************************************/
+
+ .text
+ .thumb_func
+_tx_misra_block_pool_to_uchar_pointer_convert:
+ BX LR // return
+
+
+/******************************************************************************************/
+/******************************************************************************************/
+/** */
+/** TX_BLOCK_POOL *_tx_misra_void_to_block_pool_pointer_convert(VOID *pointer); */
+/** */
+/******************************************************************************************/
+/******************************************************************************************/
+
+ .text
+ .thumb_func
+_tx_misra_void_to_block_pool_pointer_convert:
+ BX LR // return
+
+
+/*****************************************************************************/
+/*****************************************************************************/
+/** */
+/** UCHAR *_tx_misra_void_to_uchar_pointer_convert(VOID *pointer); */
+/** */
+/*****************************************************************************/
+/*****************************************************************************/
+
+ .text
+ .thumb_func
+_tx_misra_void_to_uchar_pointer_convert:
+ BX LR // return
+
+
+/************************************************************************************/
+/************************************************************************************/
+/** */
+/** TX_BLOCK_POOL *_tx_misra_uchar_to_block_pool_pointer_convert(UCHAR *pointer); */
+/** */
+/************************************************************************************/
+/************************************************************************************/
+
+ .text
+ .thumb_func
+_tx_misra_uchar_to_block_pool_pointer_convert:
+ BX LR // return
+
+
+/**************************************************************************************/
+/**************************************************************************************/
+/** */
+/** UCHAR **_tx_misra_void_to_indirect_uchar_pointer_convert(VOID *pointer); */
+/** */
+/**************************************************************************************/
+/**************************************************************************************/
+
+ .text
+ .thumb_func
+_tx_misra_void_to_indirect_uchar_pointer_convert:
+ BX LR // return
+
+
+/*****************************************************************************************/
+/*****************************************************************************************/
+/** */
+/** TX_BYTE_POOL *_tx_misra_void_to_byte_pool_pointer_convert(VOID *pointer); */
+/** */
+/*****************************************************************************************/
+/*****************************************************************************************/
+
+ .text
+ .thumb_func
+_tx_misra_void_to_byte_pool_pointer_convert:
+ BX LR // return
+
+
+/***************************************************************************************/
+/***************************************************************************************/
+/** */
+/** UCHAR *_tx_misra_byte_pool_to_uchar_pointer_convert(TX_BYTE_POOL *pool); */
+/** */
+/***************************************************************************************/
+/***************************************************************************************/
+
+ .text
+ .thumb_func
+_tx_misra_byte_pool_to_uchar_pointer_convert:
+ BX LR // return
+
+
+/*****************************************************************************************/
+/*****************************************************************************************/
+/** */
+/** ALIGN_TYPE *_tx_misra_uchar_to_align_type_pointer_convert(UCHAR *pointer); */
+/** */
+/*****************************************************************************************/
+/*****************************************************************************************/
+
+ .text
+ .thumb_func
+_tx_misra_uchar_to_align_type_pointer_convert:
+ BX LR // return
+
+
+/****************************************************************************************************/
+/****************************************************************************************************/
+/** */
+/** TX_BYTE_POOL **_tx_misra_uchar_to_indirect_byte_pool_pointer_convert(UCHAR *pointer); */
+/** */
+/****************************************************************************************************/
+/****************************************************************************************************/
+
+ .text
+ .thumb_func
+_tx_misra_uchar_to_indirect_byte_pool_pointer_convert:
+ BX LR // return
+
+
+/**************************************************************************************************/
+/**************************************************************************************************/
+/** */
+/** TX_EVENT_FLAGS_GROUP *_tx_misra_void_to_event_flags_pointer_convert(VOID *pointer); */
+/** */
+/**************************************************************************************************/
+/**************************************************************************************************/
+
+ .text
+ .thumb_func
+_tx_misra_void_to_event_flags_pointer_convert:
+ BX LR // return
+
+
+/*****************************************************************************/
+/*****************************************************************************/
+/** */
+/** ULONG *_tx_misra_void_to_ulong_pointer_convert(VOID *pointer); */
+/** */
+/*****************************************************************************/
+/*****************************************************************************/
+
+ .text
+ .thumb_func
+_tx_misra_void_to_ulong_pointer_convert:
+ BX LR // return
+
+
+/********************************************************************************/
+/********************************************************************************/
+/** */
+/** TX_MUTEX *_tx_misra_void_to_mutex_pointer_convert(VOID *pointer); */
+/** */
+/********************************************************************************/
+/********************************************************************************/
+
+ .text
+ .thumb_func
+_tx_misra_void_to_mutex_pointer_convert:
+ BX LR // return
+
+
+/**************************************************************************/
+/**************************************************************************/
+/** */
+/** UINT _tx_misra_status_get(UINT status); */
+/** */
+/**************************************************************************/
+/**************************************************************************/
+
+ .text
+ .thumb_func
+_tx_misra_status_get:
+ MOVS R0,#+0
+ BX LR // return
+
+
+/********************************************************************************/
+/********************************************************************************/
+/** */
+/** TX_QUEUE *_tx_misra_void_to_queue_pointer_convert(VOID *pointer); */
+/** */
+/********************************************************************************/
+/********************************************************************************/
+
+ .text
+ .thumb_func
+_tx_misra_void_to_queue_pointer_convert:
+ BX LR // return
+
+
+/****************************************************************************************/
+/****************************************************************************************/
+/** */
+/** TX_SEMAPHORE *_tx_misra_void_to_semaphore_pointer_convert(VOID *pointer); */
+/** */
+/****************************************************************************************/
+/****************************************************************************************/
+
+ .text
+ .thumb_func
+_tx_misra_void_to_semaphore_pointer_convert:
+ BX LR // return
+
+
+/**************************************************************************/
+/**************************************************************************/
+/** */
+/** VOID *_tx_misra_uchar_to_void_pointer_convert(UCHAR *pointer); */
+/** */
+/**************************************************************************/
+/**************************************************************************/
+
+ .text
+ .thumb_func
+_tx_misra_uchar_to_void_pointer_convert:
+ BX LR // return
+
+
+/*********************************************************************************/
+/*********************************************************************************/
+/** */
+/** TX_THREAD *_tx_misra_ulong_to_thread_pointer_convert(ULONG value); */
+/** */
+/*********************************************************************************/
+/*********************************************************************************/
+
+ .text
+ .thumb_func
+_tx_misra_ulong_to_thread_pointer_convert:
+ BX LR // return
+
+
+/***************************************************************************************************/
+/***************************************************************************************************/
+/** */
+/** VOID *_tx_misra_timer_indirect_to_void_pointer_convert(TX_TIMER_INTERNAL **pointer); */
+/** */
+/***************************************************************************************************/
+/***************************************************************************************************/
+
+ .text
+ .thumb_func
+_tx_misra_timer_indirect_to_void_pointer_convert:
+ BX LR // return
+
+
+/***************************************************************************************/
+/***************************************************************************************/
+/** */
+/** CHAR *_tx_misra_const_char_to_char_pointer_convert(const char *pointer); */
+/** */
+/***************************************************************************************/
+/***************************************************************************************/
+
+ .text
+ .thumb_func
+_tx_misra_const_char_to_char_pointer_convert:
+ BX LR // return
+
+
+/**********************************************************************************/
+/**********************************************************************************/
+/** */
+/** TX_THREAD *_tx_misra_void_to_thread_pointer_convert(void *pointer); */
+/** */
+/**********************************************************************************/
+/**********************************************************************************/
+
+ .text
+ .thumb_func
+_tx_misra_void_to_thread_pointer_convert:
+ BX LR // return
+
+
+#ifdef TX_ENABLE_EVENT_TRACE
+
+/************************************************************************************************/
+/************************************************************************************************/
+/** */
+/** UCHAR *_tx_misra_object_to_uchar_pointer_convert(TX_TRACE_OBJECT_ENTRY *pointer); */
+/** */
+/************************************************************************************************/
+/************************************************************************************************/
+
+ .text
+ .thumb_func
+_tx_misra_object_to_uchar_pointer_convert:
+ BX LR // return
+
+
+/************************************************************************************************/
+/************************************************************************************************/
+/** */
+/** TX_TRACE_OBJECT_ENTRY *_tx_misra_uchar_to_object_pointer_convert(UCHAR *pointer); */
+/** */
+/************************************************************************************************/
+/************************************************************************************************/
+
+ .text
+ .thumb_func
+_tx_misra_uchar_to_object_pointer_convert:
+ BX LR // return
+
+
+/******************************************************************************************/
+/******************************************************************************************/
+/** */
+/** TX_TRACE_HEADER *_tx_misra_uchar_to_header_pointer_convert(UCHAR *pointer); */
+/** */
+/******************************************************************************************/
+/******************************************************************************************/
+
+ .text
+ .thumb_func
+_tx_misra_uchar_to_header_pointer_convert:
+ BX LR // return
+
+
+/***********************************************************************************************/
+/***********************************************************************************************/
+/** */
+/** TX_TRACE_BUFFER_ENTRY *_tx_misra_uchar_to_entry_pointer_convert(UCHAR *pointer); */
+/** */
+/***********************************************************************************************/
+/***********************************************************************************************/
+
+ .text
+ .thumb_func
+_tx_misra_uchar_to_entry_pointer_convert:
+ BX LR // return
+
+
+/***********************************************************************************************/
+/***********************************************************************************************/
+/** */
+/** UCHAR *_tx_misra_entry_to_uchar_pointer_convert(TX_TRACE_BUFFER_ENTRY *pointer); */
+/** */
+/***********************************************************************************************/
+/***********************************************************************************************/
+
+ .text
+ .thumb_func
+_tx_misra_entry_to_uchar_pointer_convert:
+ BX LR // return
+#endif
+
+
+/***********************************************************************************************/
+/***********************************************************************************************/
+/** */
+/** UCHAR *_tx_misra_char_to_uchar_pointer_convert(CHAR *pointer); */
+/** */
+/***********************************************************************************************/
+/***********************************************************************************************/
+
+ .text
+ .thumb_func
+_tx_misra_char_to_uchar_pointer_convert:
+ BX LR // return
+
+
+/***********************************************************************************************/
+/***********************************************************************************************/
+/** */
+/** ULONG _tx_misra_ipsr_get(void); */
+/** */
+/***********************************************************************************************/
+/***********************************************************************************************/
+
+ .text
+ .thumb_func
+_tx_misra_ipsr_get:
+ MRS R0, IPSR
+ BX LR // return
+
+
+/***********************************************************************************************/
+/***********************************************************************************************/
+/** */
+/** ULONG _tx_misra_control_get(void); */
+/** */
+/***********************************************************************************************/
+/***********************************************************************************************/
+
+ .text
+ .thumb_func
+_tx_misra_control_get:
+ MRS R0, CONTROL
+ BX LR // return
+
+
+/***********************************************************************************************/
+/***********************************************************************************************/
+/** */
+/** void _tx_misra_control_set(ULONG value); */
+/** */
+/***********************************************************************************************/
+/***********************************************************************************************/
+
+ .text
+ .thumb_func
+_tx_misra_control_set:
+ MSR CONTROL, R0
+ BX LR // return
+
+
+#ifdef __ARM_FP
+
+/***********************************************************************************************/
+/***********************************************************************************************/
+/** */
+/** ULONG _tx_misra_fpccr_get(void); */
+/** */
+/***********************************************************************************************/
+/***********************************************************************************************/
+
+ .text
+ .thumb_func
+_tx_misra_fpccr_get:
+ LDR r0, =0xE000EF34 // Build FPCCR address
+ LDR r0, [r0] // Load FPCCR value
+ BX LR // return
+
+
+/***********************************************************************************************/
+/***********************************************************************************************/
+/** */
+/** void _tx_misra_vfp_touch(void); */
+/** */
+/***********************************************************************************************/
+/***********************************************************************************************/
+
+ .text
+ .thumb_func
+_tx_misra_vfp_touch:
+ vmov.f32 s0, s0
+ BX LR // return
+
+#endif
+
+
+ .data
+ .word 0
diff --git a/ports/cortex_m4/ac6/src/tx_thread_schedule.S b/ports/cortex_m4/ac6/src/tx_thread_schedule.S
index d2e748af..9bae029f 100644
--- a/ports/cortex_m4/ac6/src/tx_thread_schedule.S
+++ b/ports/cortex_m4/ac6/src/tx_thread_schedule.S
@@ -39,7 +39,7 @@
/* FUNCTION RELEASE */
/* */
/* _tx_thread_schedule Cortex-M4/AC6 */
-/* 6.1.7 */
+/* 6.1.11 */
/* AUTHOR */
/* */
/* Scott Larson, Microsoft Corporation */
@@ -66,13 +66,14 @@
/* */
/* _tx_initialize_kernel_enter ThreadX entry function */
/* _tx_thread_system_return Return to system from thread */
-/* _tx_thread_context_restore Restore thread's context */
/* */
/* RELEASE HISTORY */
/* */
/* DATE NAME DESCRIPTION */
/* */
/* 06-02-2021 Scott Larson Initial Version 6.1.7 */
+/* 04-25-2022 Scott Larson Added BASEPRI support, */
+/* resulting in version 6.1.11 */
/* */
/**************************************************************************/
// VOID _tx_thread_schedule(VOID)
@@ -132,12 +133,22 @@ __tx_ts_handler:
#if (defined(TX_ENABLE_EXECUTION_CHANGE_NOTIFY) || defined(TX_EXECUTION_PROFILE_ENABLE))
/* Call the thread exit function to indicate the thread is no longer executing. */
+#ifdef TX_PORT_USE_BASEPRI
+ LDR r1, =TX_PORT_BASEPRI // Mask interrupt priorities =< TX_PORT_BASEPRI
+ MSR BASEPRI, r1
+#else
CPSID i // Disable interrupts
+#endif /* TX_PORT_USE_BASEPRI */
PUSH {r0, lr} // Save LR (and r0 just for alignment)
BL _tx_execution_thread_exit // Call the thread exit function
POP {r0, lr} // Recover LR
+#ifdef TX_PORT_USE_BASEPRI
+ MOV r0, 0 // Disable BASEPRI masking (enable interrupts)
+ MSR BASEPRI, r0
+#else
CPSIE i // Enable interrupts
-#endif
+#endif /* TX_PORT_USE_BASEPRI */
+#endif /* EXECUTION PROFILE */
LDR r0, =_tx_thread_current_ptr // Build current thread pointer address
LDR r2, =_tx_thread_execute_ptr // Build execute thread pointer address
@@ -182,14 +193,24 @@ __tx_ts_new:
/* Now we are looking for a new thread to execute! */
+#ifdef TX_PORT_USE_BASEPRI
+ LDR r1, =TX_PORT_BASEPRI // Mask interrupt priorities =< TX_PORT_BASEPRI
+ MSR BASEPRI, r1
+#else
CPSID i // Disable interrupts
+#endif
LDR r1, [r2] // Is there another thread ready to execute?
CBZ r1, __tx_ts_wait // No, skip to the wait processing
/* Yes, another thread is ready for else, make the current thread the new thread. */
STR r1, [r0] // Setup the current thread pointer to the new thread
+#ifdef TX_PORT_USE_BASEPRI
+ MOV r4, #0 // Disable BASEPRI masking (enable interrupts)
+ MSR BASEPRI, r4
+#else
CPSIE i // Enable interrupts
+#endif
/* Increment the thread run count. */
@@ -233,7 +254,12 @@ _skip_vfp_restore:
are disabled to allow use of WFI for waiting for a thread to arrive. */
__tx_ts_wait:
+#ifdef TX_PORT_USE_BASEPRI
+ LDR r1, =TX_PORT_BASEPRI // Mask interrupt priorities =< TX_PORT_BASEPRI
+ MSR BASEPRI, r1
+#else
CPSID i // Disable interrupts
+#endif
LDR r1, [r2] // Pickup the next thread to execute pointer
STR r1, [r0] // Store it in the current pointer
CBNZ r1, __tx_ts_ready // If non-NULL, a new thread is ready!
@@ -256,7 +282,12 @@ __tx_ts_wait:
POP {r0-r3}
#endif
+#ifdef TX_PORT_USE_BASEPRI
+ MOV r4, #0 // Disable BASEPRI masking (enable interrupts)
+ MSR BASEPRI, r4
+#else
CPSIE i // Enable interrupts
+#endif
B __tx_ts_wait // Loop to continue waiting
/* At this point, we have a new thread ready to go. Clear any newly pended PendSV - since we are
@@ -268,8 +299,12 @@ __tx_ts_ready:
STR r7, [r8, #0xD04] // Clear any PendSV
/* Re-enable interrupts and restore new thread. */
-
+#ifdef TX_PORT_USE_BASEPRI
+ MOV r4, #0 // Disable BASEPRI masking (enable interrupts)
+ MSR BASEPRI, r4
+#else
CPSIE i // Enable interrupts
+#endif
B __tx_ts_restore // Restore the thread
// }
diff --git a/ports/cortex_m4/gnu/inc/tx_port.h b/ports/cortex_m4/gnu/inc/tx_port.h
index 80237531..f1e25ab3 100644
--- a/ports/cortex_m4/gnu/inc/tx_port.h
+++ b/ports/cortex_m4/gnu/inc/tx_port.h
@@ -26,7 +26,7 @@
/* PORT SPECIFIC C INFORMATION RELEASE */
/* */
/* tx_port.h Cortex-M4/GNU */
-/* 6.1.10 */
+/* 6.1.11 */
/* */
/* AUTHOR */
/* */
@@ -56,6 +56,9 @@
/* violation, */
/* fixed predefined macro, */
/* resulting in version 6.1.10 */
+/* 04-25-2022 Scott Larson Modified comments and added */
+/* volatile to registers, */
+/* resulting in version 6.1.11 */
/* */
/**************************************************************************/
@@ -154,14 +157,14 @@ typedef unsigned short USHORT;
For example, if the time source is at the address 0x0a800024 and is 16-bits in size, the clock
source constants would be:
-#define TX_TRACE_TIME_SOURCE *((ULONG *) 0x0a800024)
+#define TX_TRACE_TIME_SOURCE *((volatile ULONG *) 0x0a800024)
#define TX_TRACE_TIME_MASK 0x0000FFFFUL
*/
#ifndef TX_MISRA_ENABLE
#ifndef TX_TRACE_TIME_SOURCE
-#define TX_TRACE_TIME_SOURCE *((ULONG *) 0xE0001004)
+#define TX_TRACE_TIME_SOURCE *((volatile ULONG *) 0xE0001004)
#endif
#else
ULONG _tx_misra_time_stamp_get(VOID);
@@ -378,7 +381,7 @@ void _tx_vfp_access(void);
else \
{ \
ULONG _tx_fpccr; \
- _tx_fpccr = *((ULONG *) 0xE000EF34); \
+ _tx_fpccr = *((volatile ULONG *) 0xE000EF34); \
_tx_fpccr = _tx_fpccr & ((ULONG) 0x01); \
if (_tx_fpccr == ((ULONG) 0x01)) \
{ \
@@ -582,7 +585,7 @@ __attribute__( ( always_inline ) ) static inline void _tx_thread_system_return_i
unsigned int interrupt_save;
/* Set PendSV to invoke ThreadX scheduler. */
- *((ULONG *) 0xE000ED04) = ((ULONG) 0x10000000);
+ *((volatile ULONG *) 0xE000ED04) = ((ULONG) 0x10000000);
if (__get_ipsr_value() == 0)
{
interrupt_save = __get_interrupt_posture();
@@ -651,7 +654,7 @@ static void _tx_thread_system_return_inline(void)
unsigned int interrupt_save;
/* Set PendSV to invoke ThreadX scheduler. */
- *((ULONG *) 0xE000ED04) = ((ULONG) 0x10000000);
+ *((volatile ULONG *) 0xE000ED04) = ((ULONG) 0x10000000);
if (_ipsr == 0)
{
#ifdef TX_PORT_USE_BASEPRI
@@ -704,7 +707,7 @@ void tx_thread_fpu_disable(void);
#ifdef TX_THREAD_INIT
CHAR _tx_version_id[] =
- "Copyright (c) Microsoft Corporation. All rights reserved. * ThreadX Cortex-M4/GNU Version 6.1.9 *";
+ "Copyright (c) Microsoft Corporation. All rights reserved. * ThreadX Cortex-M4/GNU Version 6.1.11 *";
#else
#ifdef TX_MISRA_ENABLE
extern CHAR _tx_version_id[100];
diff --git a/ports/cortex_m4/gnu/src/tx_misra.S b/ports/cortex_m4/gnu/src/tx_misra.S
new file mode 100644
index 00000000..b03fdcd0
--- /dev/null
+++ b/ports/cortex_m4/gnu/src/tx_misra.S
@@ -0,0 +1,1033 @@
+/**************************************************************************/
+/* */
+/* Copyright (c) Microsoft Corporation. All rights reserved. */
+/* */
+/* This software is licensed under the Microsoft Software License */
+/* Terms for Microsoft Azure RTOS. Full text of the license can be */
+/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
+/* and in the root directory of this software. */
+/* */
+/**************************************************************************/
+
+
+/**************************************************************************/
+/**************************************************************************/
+/** */
+/** ThreadX Component */
+/** */
+/** ThreadX MISRA Compliance */
+/** */
+/**************************************************************************/
+/**************************************************************************/
+
+ #define SHT_PROGBITS 0x1
+
+ .global __aeabi_memset
+ .global _tx_thread_current_ptr
+ .global _tx_thread_interrupt_disable
+ .global _tx_thread_interrupt_restore
+ .global _tx_thread_stack_analyze
+ .global _tx_thread_stack_error_handler
+ .global _tx_thread_system_state
+#ifdef TX_ENABLE_EVENT_TRACE
+ .global _tx_trace_buffer_current_ptr
+ .global _tx_trace_buffer_end_ptr
+ .global _tx_trace_buffer_start_ptr
+ .global _tx_trace_event_enable_bits
+ .global _tx_trace_full_notify_function
+ .global _tx_trace_header_ptr
+#endif
+
+ .global _tx_misra_always_true
+ .global _tx_misra_block_pool_to_uchar_pointer_convert
+ .global _tx_misra_byte_pool_to_uchar_pointer_convert
+ .global _tx_misra_char_to_uchar_pointer_convert
+ .global _tx_misra_const_char_to_char_pointer_convert
+#ifdef TX_ENABLE_EVENT_TRACE
+ .global _tx_misra_entry_to_uchar_pointer_convert
+#endif
+ .global _tx_misra_indirect_void_to_uchar_pointer_convert
+ .global _tx_misra_memset
+ .global _tx_misra_message_copy
+#ifdef TX_ENABLE_EVENT_TRACE
+ .global _tx_misra_object_to_uchar_pointer_convert
+#endif
+ .global _tx_misra_pointer_to_ulong_convert
+ .global _tx_misra_status_get
+ .global _tx_misra_thread_stack_check
+#ifdef TX_ENABLE_EVENT_TRACE
+ .global _tx_misra_time_stamp_get
+#endif
+ .global _tx_misra_timer_indirect_to_void_pointer_convert
+ .global _tx_misra_timer_pointer_add
+ .global _tx_misra_timer_pointer_dif
+#ifdef TX_ENABLE_EVENT_TRACE
+ .global _tx_misra_trace_event_insert
+#endif
+ .global _tx_misra_uchar_pointer_add
+ .global _tx_misra_uchar_pointer_dif
+ .global _tx_misra_uchar_pointer_sub
+ .global _tx_misra_uchar_to_align_type_pointer_convert
+ .global _tx_misra_uchar_to_block_pool_pointer_convert
+#ifdef TX_ENABLE_EVENT_TRACE
+ .global _tx_misra_uchar_to_entry_pointer_convert
+ .global _tx_misra_uchar_to_header_pointer_convert
+#endif
+ .global _tx_misra_uchar_to_indirect_byte_pool_pointer_convert
+ .global _tx_misra_uchar_to_indirect_uchar_pointer_convert
+#ifdef TX_ENABLE_EVENT_TRACE
+ .global _tx_misra_uchar_to_object_pointer_convert
+#endif
+ .global _tx_misra_uchar_to_void_pointer_convert
+ .global _tx_misra_ulong_pointer_add
+ .global _tx_misra_ulong_pointer_dif
+ .global _tx_misra_ulong_pointer_sub
+ .global _tx_misra_ulong_to_pointer_convert
+ .global _tx_misra_ulong_to_thread_pointer_convert
+ .global _tx_misra_user_timer_pointer_get
+ .global _tx_misra_void_to_block_pool_pointer_convert
+ .global _tx_misra_void_to_byte_pool_pointer_convert
+ .global _tx_misra_void_to_event_flags_pointer_convert
+ .global _tx_misra_void_to_indirect_uchar_pointer_convert
+ .global _tx_misra_void_to_mutex_pointer_convert
+ .global _tx_misra_void_to_queue_pointer_convert
+ .global _tx_misra_void_to_semaphore_pointer_convert
+ .global _tx_misra_void_to_thread_pointer_convert
+ .global _tx_misra_void_to_uchar_pointer_convert
+ .global _tx_misra_void_to_ulong_pointer_convert
+ .global _tx_misra_ipsr_get
+ .global _tx_misra_control_get
+ .global _tx_misra_control_set
+#ifdef __ARM_FP
+ .global _tx_misra_fpccr_get
+ .global _tx_misra_vfp_touch
+#endif
+
+
+/**************************************************************************/
+/**************************************************************************/
+/** */
+/** VOID _tx_misra_memset(VOID *ptr, UINT value, UINT size); */
+/** */
+/**************************************************************************/
+/**************************************************************************/
+
+ .text
+ .align 4
+ .syntax unified
+ .thumb_func
+_tx_misra_memset:
+ PUSH {R4,LR}
+ MOVS R4,R0
+ MOVS R0,R2
+ MOVS R2,R1
+ MOVS R1,R0
+ MOVS R0,R4
+ BL __aeabi_memset
+ POP {R4,PC} // return
+
+/**************************************************************************/
+/**************************************************************************/
+/** */
+/** UCHAR *_tx_misra_uchar_pointer_add(UCHAR *ptr, ULONG amount); */
+/** */
+/**************************************************************************/
+/**************************************************************************/
+
+ .text
+ .thumb_func
+_tx_misra_uchar_pointer_add:
+ ADD R0,R0,R1
+ BX LR // return
+
+
+/**************************************************************************/
+/**************************************************************************/
+/** */
+/** UCHAR *_tx_misra_uchar_pointer_sub(UCHAR *ptr, ULONG amount); */
+/** */
+/**************************************************************************/
+/**************************************************************************/
+
+ .text
+ .thumb_func
+_tx_misra_uchar_pointer_sub:
+ RSBS R1,R1,#+0
+ ADD R0,R0,R1
+ BX LR // return
+
+
+/**************************************************************************/
+/**************************************************************************/
+/** */
+/** ULONG _tx_misra_uchar_pointer_dif(UCHAR *ptr1, UCHAR *ptr2); */
+/** */
+/**************************************************************************/
+/**************************************************************************/
+
+ .text
+ .thumb_func
+_tx_misra_uchar_pointer_dif:
+ SUBS R0,R0,R1
+ BX LR // return
+
+
+/**************************************************************************/
+/**************************************************************************/
+/** */
+/** ULONG _tx_misra_pointer_to_ulong_convert(VOID *ptr); */
+/** */
+/**************************************************************************/
+/**************************************************************************/
+
+ .text
+ .thumb_func
+_tx_misra_pointer_to_ulong_convert:
+ BX LR // return
+
+
+/**************************************************************************/
+/**************************************************************************/
+/** */
+/** ULONG *_tx_misra_ulong_pointer_add(ULONG *ptr, ULONG amount); */
+/** */
+/**************************************************************************/
+/**************************************************************************/
+
+ .text
+ .thumb_func
+_tx_misra_ulong_pointer_add:
+ ADD R0,R0,R1, LSL #+2
+ BX LR // return
+
+
+/**************************************************************************/
+/**************************************************************************/
+/** */
+/** ULONG *_tx_misra_ulong_pointer_sub(ULONG *ptr, ULONG amount); */
+/** */
+/**************************************************************************/
+/**************************************************************************/
+
+ .text
+ .thumb_func
+_tx_misra_ulong_pointer_sub:
+ MVNS R2,#+3
+ MULS R1,R2,R1
+ ADD R0,R0,R1
+ BX LR // return
+
+
+/**************************************************************************/
+/**************************************************************************/
+/** */
+/** ULONG _tx_misra_ulong_pointer_dif(ULONG *ptr1, ULONG *ptr2); */
+/** */
+/**************************************************************************/
+/**************************************************************************/
+
+ .text
+ .thumb_func
+_tx_misra_ulong_pointer_dif:
+ SUBS R0,R0,R1
+ ASRS R0,R0,#+2
+ BX LR // return
+
+
+/**************************************************************************/
+/**************************************************************************/
+/** */
+/** VOID *_tx_misra_ulong_to_pointer_convert(ULONG input); */
+/** */
+/**************************************************************************/
+/**************************************************************************/
+
+ .text
+ .thumb_func
+_tx_misra_ulong_to_pointer_convert:
+ BX LR // return
+
+
+/**************************************************************************/
+/**************************************************************************/
+/** */
+/** VOID _tx_misra_message_copy(ULONG **source, ULONG **destination, */
+/** UINT size); */
+/** */
+/**************************************************************************/
+/**************************************************************************/
+
+ .text
+ .thumb_func
+_tx_misra_message_copy:
+ PUSH {R4,R5}
+ LDR R3,[R0, #+0]
+ LDR R4,[R1, #+0]
+ LDR R5,[R3, #+0]
+ STR R5,[R4, #+0]
+ ADDS R4,R4,#+4
+ ADDS R3,R3,#+4
+ CMP R2,#+2
+ BCC.N _tx_misra_message_copy_0
+ SUBS R2,R2,#+1
+ B.N _tx_misra_message_copy_1
+_tx_misra_message_copy_2:
+ LDR R5,[R3, #+0]
+ STR R5,[R4, #+0]
+ ADDS R4,R4,#+4
+ ADDS R3,R3,#+4
+ SUBS R2,R2,#+1
+_tx_misra_message_copy_1:
+ CMP R2,#+0
+ BNE.N _tx_misra_message_copy_2
+_tx_misra_message_copy_0:
+ STR R3,[R0, #+0]
+ STR R4,[R1, #+0]
+ POP {R4,R5}
+ BX LR // return
+
+
+/**************************************************************************/
+/**************************************************************************/
+/** */
+/** ULONG _tx_misra_timer_pointer_dif(TX_TIMER_INTERNAL **ptr1, */
+/** TX_TIMER_INTERNAL **ptr2); */
+/** */
+/**************************************************************************/
+/**************************************************************************/
+
+ .text
+ .thumb_func
+_tx_misra_timer_pointer_dif:
+ SUBS R0,R0,R1
+ ASRS R0,R0,#+2
+ BX LR // return
+
+
+/**************************************************************************/
+/**************************************************************************/
+/** */
+/** TX_TIMER_INTERNAL **_tx_misra_timer_pointer_add(TX_TIMER_INTERNAL */
+/** **ptr1, ULONG size); */
+/** */
+/**************************************************************************/
+/**************************************************************************/
+
+ .text
+ .thumb_func
+_tx_misra_timer_pointer_add:
+ ADD R0,R0,R1, LSL #+2
+ BX LR // return
+
+
+/**************************************************************************/
+/**************************************************************************/
+/** */
+/** VOID _tx_misra_user_timer_pointer_get(TX_TIMER_INTERNAL */
+/** *internal_timer, TX_TIMER **user_timer); */
+/** */
+/**************************************************************************/
+/**************************************************************************/
+
+ .text
+ .thumb_func
+_tx_misra_user_timer_pointer_get:
+ ADDS R2,R0,#+8
+ SUBS R2,R2,R0
+ RSBS R2,R2,#+0
+ ADD R0,R0,R2
+ STR R0,[R1, #+0]
+ BX LR // return
+
+
+/**************************************************************************/
+/**************************************************************************/
+/** */
+/** VOID _tx_misra_thread_stack_check(TX_THREAD *thread_ptr, */
+/** VOID **highest_stack); */
+/** */
+/**************************************************************************/
+/**************************************************************************/
+
+ .text
+ .thumb_func
+_tx_misra_thread_stack_check:
+ PUSH {R3-R5,LR}
+ MOVS R4,R0
+ MOVS R5,R1
+ BL _tx_thread_interrupt_disable
+ CMP R4,#+0
+ BEQ.N _tx_misra_thread_stack_check_0
+ LDR R1,[R4, #+0]
+ LDR R2,=0x54485244
+ CMP R1,R2
+ BNE.N _tx_misra_thread_stack_check_0
+ LDR R1,[R4, #+8]
+ LDR R2,[R5, #+0]
+ CMP R1,R2
+ BCS.N _tx_misra_thread_stack_check_1
+ LDR R1,[R4, #+8]
+ STR R1,[R5, #+0]
+_tx_misra_thread_stack_check_1:
+ LDR R1,[R4, #+12]
+ LDR R1,[R1, #+0]
+ CMP R1,#-269488145
+ BNE.N _tx_misra_thread_stack_check_2
+ LDR R1,[R4, #+16]
+ LDR R1,[R1, #+1]
+ CMP R1,#-269488145
+ BNE.N _tx_misra_thread_stack_check_2
+ LDR R1,[R5, #+0]
+ LDR R2,[R4, #+12]
+ CMP R1,R2
+ BCS.N _tx_misra_thread_stack_check_3
+_tx_misra_thread_stack_check_2:
+ BL _tx_thread_interrupt_restore
+ MOVS R0,R4
+ BL _tx_thread_stack_error_handler
+ BL _tx_thread_interrupt_disable
+_tx_misra_thread_stack_check_3:
+ LDR R1,[R5, #+0]
+ LDR R1,[R1, #-4]
+ CMP R1,#-269488145
+ BEQ.N _tx_misra_thread_stack_check_0
+ BL _tx_thread_interrupt_restore
+ MOVS R0,R4
+ BL _tx_thread_stack_analyze
+ BL _tx_thread_interrupt_disable
+_tx_misra_thread_stack_check_0:
+ BL _tx_thread_interrupt_restore
+ POP {R0,R4,R5,PC} // return
+
+#ifdef TX_ENABLE_EVENT_TRACE
+
+/**************************************************************************/
+/**************************************************************************/
+/** */
+/** VOID _tx_misra_trace_event_insert(ULONG event_id, */
+/** VOID *info_field_1, ULONG info_field_2, ULONG info_field_3, */
+/** ULONG info_field_4, ULONG filter, ULONG time_stamp); */
+/** */
+/**************************************************************************/
+/**************************************************************************/
+
+ .text
+ .thumb_func
+_tx_misra_trace_event_insert:
+ PUSH {R3-R7,LR}
+ LDR.N R4,DataTable2_1
+ LDR R4,[R4, #+0]
+ CMP R4,#+0
+ BEQ.N _tx_misra_trace_event_insert_0
+ LDR.N R5,DataTable2_2
+ LDR R5,[R5, #+0]
+ LDR R6,[SP, #+28]
+ TST R5,R6
+ BEQ.N _tx_misra_trace_event_insert_0
+ LDR.N R5,DataTable2_3
+ LDR R5,[R5, #+0]
+ LDR.N R6,DataTable2_4
+ LDR R6,[R6, #+0]
+ CMP R5,#+0
+ BNE.N _tx_misra_trace_event_insert_1
+ LDR R5,[R6, #+44]
+ LDR R7,[R6, #+60]
+ LSLS R7,R7,#+16
+ ORRS R7,R7,#0x80000000
+ ORRS R5,R7,R5
+ B.N _tx_misra_trace_event_insert_2
+_tx_misra_trace_event_insert_1:
+ CMP R5,#-252645136
+ BCS.N _tx_misra_trace_event_insert_3
+ MOVS R5,R6
+ MOVS R6,#-1
+ B.N _tx_misra_trace_event_insert_2
+_tx_misra_trace_event_insert_3:
+ MOVS R6,#-252645136
+ MOVS R5,#+0
+_tx_misra_trace_event_insert_2:
+ STR R6,[R4, #+0]
+ STR R5,[R4, #+4]
+ STR R0,[R4, #+8]
+ LDR R0,[SP, #+32]
+ STR R0,[R4, #+12]
+ STR R1,[R4, #+16]
+ STR R2,[R4, #+20]
+ STR R3,[R4, #+24]
+ LDR R0,[SP, #+24]
+ STR R0,[R4, #+28]
+ ADDS R4,R4,#+32
+ LDR.N R0,DataTable2_5
+ LDR R0,[R0, #+0]
+ CMP R4,R0
+ BCC.N _tx_misra_trace_event_insert_4
+ LDR.N R0,DataTable2_6
+ LDR R4,[R0, #+0]
+ LDR.N R0,DataTable2_1
+ STR R4,[R0, #+0]
+ LDR.N R0,DataTable2_7
+ LDR R0,[R0, #+0]
+ STR R4,[R0, #+32]
+ LDR.N R0,DataTable2_8
+ LDR R0,[R0, #+0]
+ CMP R0,#+0
+ BEQ.N _tx_misra_trace_event_insert_0
+ LDR.N R0,DataTable2_7
+ LDR R0,[R0, #+0]
+ LDR.N R1,DataTable2_8
+ LDR R1,[R1, #+0]
+ BLX R1
+ B.N _tx_misra_trace_event_insert_0
+_tx_misra_trace_event_insert_4:
+ LDR.N R0,DataTable2_1
+ STR R4,[R0, #+0]
+ LDR.N R0,DataTable2_7
+ LDR R0,[R0, #+0]
+ STR R4,[R0, #+32]
+_tx_misra_trace_event_insert_0:
+ POP {R0,R4-R7,PC} // return
+
+
+ .data
+DataTable2_1:
+ .word _tx_trace_buffer_current_ptr
+
+ .data
+DataTable2_2:
+ .word _tx_trace_event_enable_bits
+
+ .data
+DataTable2_5:
+ .word _tx_trace_buffer_end_ptr
+
+ .data
+DataTable2_6:
+ .word _tx_trace_buffer_start_ptr
+
+ .data
+DataTable2_7:
+ .word _tx_trace_header_ptr
+
+ .data
+DataTable2_8:
+ .word _tx_trace_full_notify_function
+
+
+/**************************************************************************/
+/**************************************************************************/
+/** */
+/** ULONG _tx_misra_time_stamp_get(VOID); */
+/** */
+/**************************************************************************/
+/**************************************************************************/
+
+ .text
+ .thumb_func
+_tx_misra_time_stamp_get:
+ MOVS R0,#+0
+ BX LR // return
+
+#endif
+
+ .data
+DataTable2_3:
+ .word _tx_thread_system_state
+
+ .data
+DataTable2_4:
+ .word _tx_thread_current_ptr
+
+
+/**************************************************************************/
+/**************************************************************************/
+/** */
+/** UINT _tx_misra_always_true(void); */
+/** */
+/**************************************************************************/
+/**************************************************************************/
+
+ .text
+ .thumb_func
+_tx_misra_always_true:
+ MOVS R0,#+1
+ BX LR // return
+
+
+/******************************************************************************************/
+/******************************************************************************************/
+/** */
+/** UCHAR **_tx_misra_indirect_void_to_uchar_pointer_convert(VOID **return_ptr); */
+/** */
+/******************************************************************************************/
+/******************************************************************************************/
+
+ .text
+ .thumb_func
+_tx_misra_indirect_void_to_uchar_pointer_convert:
+ BX LR // return
+
+
+/***************************************************************************************/
+/***************************************************************************************/
+/** */
+/** UCHAR **_tx_misra_uchar_to_indirect_uchar_pointer_convert(UCHAR *pointer); */
+/** */
+/***************************************************************************************/
+/***************************************************************************************/
+
+ .text
+ .thumb_func
+_tx_misra_uchar_to_indirect_uchar_pointer_convert:
+ BX LR // return
+
+
+/***********************************************************************************/
+/***********************************************************************************/
+/** */
+/** UCHAR *_tx_misra_block_pool_to_uchar_pointer_convert(TX_BLOCK_POOL *pool); */
+/** */
+/***********************************************************************************/
+/***********************************************************************************/
+
+ .text
+ .thumb_func
+_tx_misra_block_pool_to_uchar_pointer_convert:
+ BX LR // return
+
+
+/******************************************************************************************/
+/******************************************************************************************/
+/** */
+/** TX_BLOCK_POOL *_tx_misra_void_to_block_pool_pointer_convert(VOID *pointer); */
+/** */
+/******************************************************************************************/
+/******************************************************************************************/
+
+ .text
+ .thumb_func
+_tx_misra_void_to_block_pool_pointer_convert:
+ BX LR // return
+
+
+/*****************************************************************************/
+/*****************************************************************************/
+/** */
+/** UCHAR *_tx_misra_void_to_uchar_pointer_convert(VOID *pointer); */
+/** */
+/*****************************************************************************/
+/*****************************************************************************/
+
+ .text
+ .thumb_func
+_tx_misra_void_to_uchar_pointer_convert:
+ BX LR // return
+
+
+/************************************************************************************/
+/************************************************************************************/
+/** */
+/** TX_BLOCK_POOL *_tx_misra_uchar_to_block_pool_pointer_convert(UCHAR *pointer); */
+/** */
+/************************************************************************************/
+/************************************************************************************/
+
+ .text
+ .thumb_func
+_tx_misra_uchar_to_block_pool_pointer_convert:
+ BX LR // return
+
+
+/**************************************************************************************/
+/**************************************************************************************/
+/** */
+/** UCHAR **_tx_misra_void_to_indirect_uchar_pointer_convert(VOID *pointer); */
+/** */
+/**************************************************************************************/
+/**************************************************************************************/
+
+ .text
+ .thumb_func
+_tx_misra_void_to_indirect_uchar_pointer_convert:
+ BX LR // return
+
+
+/*****************************************************************************************/
+/*****************************************************************************************/
+/** */
+/** TX_BYTE_POOL *_tx_misra_void_to_byte_pool_pointer_convert(VOID *pointer); */
+/** */
+/*****************************************************************************************/
+/*****************************************************************************************/
+
+ .text
+ .thumb_func
+_tx_misra_void_to_byte_pool_pointer_convert:
+ BX LR // return
+
+
+/***************************************************************************************/
+/***************************************************************************************/
+/** */
+/** UCHAR *_tx_misra_byte_pool_to_uchar_pointer_convert(TX_BYTE_POOL *pool); */
+/** */
+/***************************************************************************************/
+/***************************************************************************************/
+
+ .text
+ .thumb_func
+_tx_misra_byte_pool_to_uchar_pointer_convert:
+ BX LR // return
+
+
+/*****************************************************************************************/
+/*****************************************************************************************/
+/** */
+/** ALIGN_TYPE *_tx_misra_uchar_to_align_type_pointer_convert(UCHAR *pointer); */
+/** */
+/*****************************************************************************************/
+/*****************************************************************************************/
+
+ .text
+ .thumb_func
+_tx_misra_uchar_to_align_type_pointer_convert:
+ BX LR // return
+
+
+/****************************************************************************************************/
+/****************************************************************************************************/
+/** */
+/** TX_BYTE_POOL **_tx_misra_uchar_to_indirect_byte_pool_pointer_convert(UCHAR *pointer); */
+/** */
+/****************************************************************************************************/
+/****************************************************************************************************/
+
+ .text
+ .thumb_func
+_tx_misra_uchar_to_indirect_byte_pool_pointer_convert:
+ BX LR // return
+
+
+/**************************************************************************************************/
+/**************************************************************************************************/
+/** */
+/** TX_EVENT_FLAGS_GROUP *_tx_misra_void_to_event_flags_pointer_convert(VOID *pointer); */
+/** */
+/**************************************************************************************************/
+/**************************************************************************************************/
+
+ .text
+ .thumb_func
+_tx_misra_void_to_event_flags_pointer_convert:
+ BX LR // return
+
+
+/*****************************************************************************/
+/*****************************************************************************/
+/** */
+/** ULONG *_tx_misra_void_to_ulong_pointer_convert(VOID *pointer); */
+/** */
+/*****************************************************************************/
+/*****************************************************************************/
+
+ .text
+ .thumb_func
+_tx_misra_void_to_ulong_pointer_convert:
+ BX LR // return
+
+
+/********************************************************************************/
+/********************************************************************************/
+/** */
+/** TX_MUTEX *_tx_misra_void_to_mutex_pointer_convert(VOID *pointer); */
+/** */
+/********************************************************************************/
+/********************************************************************************/
+
+ .text
+ .thumb_func
+_tx_misra_void_to_mutex_pointer_convert:
+ BX LR // return
+
+
+/**************************************************************************/
+/**************************************************************************/
+/** */
+/** UINT _tx_misra_status_get(UINT status); */
+/** */
+/**************************************************************************/
+/**************************************************************************/
+
+ .text
+ .thumb_func
+_tx_misra_status_get:
+ MOVS R0,#+0
+ BX LR // return
+
+
+/********************************************************************************/
+/********************************************************************************/
+/** */
+/** TX_QUEUE *_tx_misra_void_to_queue_pointer_convert(VOID *pointer); */
+/** */
+/********************************************************************************/
+/********************************************************************************/
+
+ .text
+ .thumb_func
+_tx_misra_void_to_queue_pointer_convert:
+ BX LR // return
+
+
+/****************************************************************************************/
+/****************************************************************************************/
+/** */
+/** TX_SEMAPHORE *_tx_misra_void_to_semaphore_pointer_convert(VOID *pointer); */
+/** */
+/****************************************************************************************/
+/****************************************************************************************/
+
+ .text
+ .thumb_func
+_tx_misra_void_to_semaphore_pointer_convert:
+ BX LR // return
+
+
+/**************************************************************************/
+/**************************************************************************/
+/** */
+/** VOID *_tx_misra_uchar_to_void_pointer_convert(UCHAR *pointer); */
+/** */
+/**************************************************************************/
+/**************************************************************************/
+
+ .text
+ .thumb_func
+_tx_misra_uchar_to_void_pointer_convert:
+ BX LR // return
+
+
+/*********************************************************************************/
+/*********************************************************************************/
+/** */
+/** TX_THREAD *_tx_misra_ulong_to_thread_pointer_convert(ULONG value); */
+/** */
+/*********************************************************************************/
+/*********************************************************************************/
+
+ .text
+ .thumb_func
+_tx_misra_ulong_to_thread_pointer_convert:
+ BX LR // return
+
+
+/***************************************************************************************************/
+/***************************************************************************************************/
+/** */
+/** VOID *_tx_misra_timer_indirect_to_void_pointer_convert(TX_TIMER_INTERNAL **pointer); */
+/** */
+/***************************************************************************************************/
+/***************************************************************************************************/
+
+ .text
+ .thumb_func
+_tx_misra_timer_indirect_to_void_pointer_convert:
+ BX LR // return
+
+
+/***************************************************************************************/
+/***************************************************************************************/
+/** */
+/** CHAR *_tx_misra_const_char_to_char_pointer_convert(const char *pointer); */
+/** */
+/***************************************************************************************/
+/***************************************************************************************/
+
+ .text
+ .thumb_func
+_tx_misra_const_char_to_char_pointer_convert:
+ BX LR // return
+
+
+/**********************************************************************************/
+/**********************************************************************************/
+/** */
+/** TX_THREAD *_tx_misra_void_to_thread_pointer_convert(void *pointer); */
+/** */
+/**********************************************************************************/
+/**********************************************************************************/
+
+ .text
+ .thumb_func
+_tx_misra_void_to_thread_pointer_convert:
+ BX LR // return
+
+
+#ifdef TX_ENABLE_EVENT_TRACE
+
+/************************************************************************************************/
+/************************************************************************************************/
+/** */
+/** UCHAR *_tx_misra_object_to_uchar_pointer_convert(TX_TRACE_OBJECT_ENTRY *pointer); */
+/** */
+/************************************************************************************************/
+/************************************************************************************************/
+
+ .text
+ .thumb_func
+_tx_misra_object_to_uchar_pointer_convert:
+ BX LR // return
+
+
+/************************************************************************************************/
+/************************************************************************************************/
+/** */
+/** TX_TRACE_OBJECT_ENTRY *_tx_misra_uchar_to_object_pointer_convert(UCHAR *pointer); */
+/** */
+/************************************************************************************************/
+/************************************************************************************************/
+
+ .text
+ .thumb_func
+_tx_misra_uchar_to_object_pointer_convert:
+ BX LR // return
+
+
+/******************************************************************************************/
+/******************************************************************************************/
+/** */
+/** TX_TRACE_HEADER *_tx_misra_uchar_to_header_pointer_convert(UCHAR *pointer); */
+/** */
+/******************************************************************************************/
+/******************************************************************************************/
+
+ .text
+ .thumb_func
+_tx_misra_uchar_to_header_pointer_convert:
+ BX LR // return
+
+
+/***********************************************************************************************/
+/***********************************************************************************************/
+/** */
+/** TX_TRACE_BUFFER_ENTRY *_tx_misra_uchar_to_entry_pointer_convert(UCHAR *pointer); */
+/** */
+/***********************************************************************************************/
+/***********************************************************************************************/
+
+ .text
+ .thumb_func
+_tx_misra_uchar_to_entry_pointer_convert:
+ BX LR // return
+
+
+/***********************************************************************************************/
+/***********************************************************************************************/
+/** */
+/** UCHAR *_tx_misra_entry_to_uchar_pointer_convert(TX_TRACE_BUFFER_ENTRY *pointer); */
+/** */
+/***********************************************************************************************/
+/***********************************************************************************************/
+
+ .text
+ .thumb_func
+_tx_misra_entry_to_uchar_pointer_convert:
+ BX LR // return
+#endif
+
+
+/***********************************************************************************************/
+/***********************************************************************************************/
+/** */
+/** UCHAR *_tx_misra_char_to_uchar_pointer_convert(CHAR *pointer); */
+/** */
+/***********************************************************************************************/
+/***********************************************************************************************/
+
+ .text
+ .thumb_func
+_tx_misra_char_to_uchar_pointer_convert:
+ BX LR // return
+
+
+/***********************************************************************************************/
+/***********************************************************************************************/
+/** */
+/** ULONG _tx_misra_ipsr_get(void); */
+/** */
+/***********************************************************************************************/
+/***********************************************************************************************/
+
+ .text
+ .thumb_func
+_tx_misra_ipsr_get:
+ MRS R0, IPSR
+ BX LR // return
+
+
+/***********************************************************************************************/
+/***********************************************************************************************/
+/** */
+/** ULONG _tx_misra_control_get(void); */
+/** */
+/***********************************************************************************************/
+/***********************************************************************************************/
+
+ .text
+ .thumb_func
+_tx_misra_control_get:
+ MRS R0, CONTROL
+ BX LR // return
+
+
+/***********************************************************************************************/
+/***********************************************************************************************/
+/** */
+/** void _tx_misra_control_set(ULONG value); */
+/** */
+/***********************************************************************************************/
+/***********************************************************************************************/
+
+ .text
+ .thumb_func
+_tx_misra_control_set:
+ MSR CONTROL, R0
+ BX LR // return
+
+
+#ifdef __ARM_FP
+
+/***********************************************************************************************/
+/***********************************************************************************************/
+/** */
+/** ULONG _tx_misra_fpccr_get(void); */
+/** */
+/***********************************************************************************************/
+/***********************************************************************************************/
+
+ .text
+ .thumb_func
+_tx_misra_fpccr_get:
+ LDR r0, =0xE000EF34 // Build FPCCR address
+ LDR r0, [r0] // Load FPCCR value
+ BX LR // return
+
+
+/***********************************************************************************************/
+/***********************************************************************************************/
+/** */
+/** void _tx_misra_vfp_touch(void); */
+/** */
+/***********************************************************************************************/
+/***********************************************************************************************/
+
+ .text
+ .thumb_func
+_tx_misra_vfp_touch:
+ vmov.f32 s0, s0
+ BX LR // return
+
+#endif
+
+
+ .data
+ .word 0
diff --git a/ports/cortex_m4/gnu/src/tx_thread_schedule.S b/ports/cortex_m4/gnu/src/tx_thread_schedule.S
index 63e5c01f..77e63398 100644
--- a/ports/cortex_m4/gnu/src/tx_thread_schedule.S
+++ b/ports/cortex_m4/gnu/src/tx_thread_schedule.S
@@ -37,7 +37,7 @@
/* FUNCTION RELEASE */
/* */
/* _tx_thread_schedule Cortex-M4/GNU */
-/* 6.1.10 */
+/* 6.1.11 */
/* AUTHOR */
/* */
/* Scott Larson, Microsoft Corporation */
@@ -72,6 +72,8 @@
/* 06-02-2021 Scott Larson Initial Version 6.1.7 */
/* 01-31-2022 Scott Larson Fixed predefined macro name, */
/* resulting in version 6.1.10 */
+/* 04-25-2022 Scott Larson Added BASEPRI support, */
+/* resulting in version 6.1.11 */
/* */
/**************************************************************************/
// VOID _tx_thread_schedule(VOID)
@@ -131,12 +133,22 @@ __tx_ts_handler:
#if (defined(TX_ENABLE_EXECUTION_CHANGE_NOTIFY) || defined(TX_EXECUTION_PROFILE_ENABLE))
/* Call the thread exit function to indicate the thread is no longer executing. */
+#ifdef TX_PORT_USE_BASEPRI
+ LDR r1, =TX_PORT_BASEPRI // Mask interrupt priorities =< TX_PORT_BASEPRI
+ MSR BASEPRI, r1
+#else
CPSID i // Disable interrupts
+#endif /* TX_PORT_USE_BASEPRI */
PUSH {r0, lr} // Save LR (and r0 just for alignment)
BL _tx_execution_thread_exit // Call the thread exit function
POP {r0, lr} // Recover LR
+#ifdef TX_PORT_USE_BASEPRI
+ MOV r0, 0 // Disable BASEPRI masking (enable interrupts)
+ MSR BASEPRI, r0
+#else
CPSIE i // Enable interrupts
-#endif
+#endif /* TX_PORT_USE_BASEPRI */
+#endif /* EXECUTION PROFILE */
LDR r0, =_tx_thread_current_ptr // Build current thread pointer address
LDR r2, =_tx_thread_execute_ptr // Build execute thread pointer address
@@ -181,14 +193,24 @@ __tx_ts_new:
/* Now we are looking for a new thread to execute! */
+#ifdef TX_PORT_USE_BASEPRI
+ LDR r1, =TX_PORT_BASEPRI // Mask interrupt priorities =< TX_PORT_BASEPRI
+ MSR BASEPRI, r1
+#else
CPSID i // Disable interrupts
+#endif
LDR r1, [r2] // Is there another thread ready to execute?
CBZ r1, __tx_ts_wait // No, skip to the wait processing
/* Yes, another thread is ready for else, make the current thread the new thread. */
STR r1, [r0] // Setup the current thread pointer to the new thread
+#ifdef TX_PORT_USE_BASEPRI
+ MOV r4, #0 // Disable BASEPRI masking (enable interrupts)
+ MSR BASEPRI, r4
+#else
CPSIE i // Enable interrupts
+#endif
/* Increment the thread run count. */
@@ -232,7 +254,12 @@ _skip_vfp_restore:
are disabled to allow use of WFI for waiting for a thread to arrive. */
__tx_ts_wait:
+#ifdef TX_PORT_USE_BASEPRI
+ LDR r1, =TX_PORT_BASEPRI // Mask interrupt priorities =< TX_PORT_BASEPRI
+ MSR BASEPRI, r1
+#else
CPSID i // Disable interrupts
+#endif
LDR r1, [r2] // Pickup the next thread to execute pointer
STR r1, [r0] // Store it in the current pointer
CBNZ r1, __tx_ts_ready // If non-NULL, a new thread is ready!
@@ -255,7 +282,12 @@ __tx_ts_wait:
POP {r0-r3}
#endif
+#ifdef TX_PORT_USE_BASEPRI
+ MOV r4, #0 // Disable BASEPRI masking (enable interrupts)
+ MSR BASEPRI, r4
+#else
CPSIE i // Enable interrupts
+#endif
B __tx_ts_wait // Loop to continue waiting
/* At this point, we have a new thread ready to go. Clear any newly pended PendSV - since we are
@@ -267,8 +299,12 @@ __tx_ts_ready:
STR r7, [r8, #0xD04] // Clear any PendSV
/* Re-enable interrupts and restore new thread. */
-
+#ifdef TX_PORT_USE_BASEPRI
+ MOV r4, #0 // Disable BASEPRI masking (enable interrupts)
+ MSR BASEPRI, r4
+#else
CPSIE i // Enable interrupts
+#endif
B __tx_ts_restore // Restore the thread
// }
diff --git a/ports/cortex_m4/iar/inc/tx_port.h b/ports/cortex_m4/iar/inc/tx_port.h
index 67b7fa60..b27fa00f 100644
--- a/ports/cortex_m4/iar/inc/tx_port.h
+++ b/ports/cortex_m4/iar/inc/tx_port.h
@@ -26,7 +26,7 @@
/* PORT SPECIFIC C INFORMATION RELEASE */
/* */
/* tx_port.h Cortex-M4/IAR */
-/* 6.1.10 */
+/* 6.1.11 */
/* */
/* AUTHOR */
/* */
@@ -56,6 +56,9 @@
/* violation, */
/* fixed predefined macro, */
/* resulting in version 6.1.10 */
+/* 04-25-2022 Scott Larson Modified comments and added */
+/* volatile to registers, */
+/* resulting in version 6.1.11 */
/* */
/**************************************************************************/
@@ -154,14 +157,14 @@ typedef unsigned short USHORT;
For example, if the time source is at the address 0x0a800024 and is 16-bits in size, the clock
source constants would be:
-#define TX_TRACE_TIME_SOURCE *((ULONG *) 0x0a800024)
+#define TX_TRACE_TIME_SOURCE *((volatile ULONG *) 0x0a800024)
#define TX_TRACE_TIME_MASK 0x0000FFFFUL
*/
#ifndef TX_MISRA_ENABLE
#ifndef TX_TRACE_TIME_SOURCE
-#define TX_TRACE_TIME_SOURCE *((ULONG *) 0xE0001004)
+#define TX_TRACE_TIME_SOURCE *((volatile ULONG *) 0xE0001004)
#endif
#else
ULONG _tx_misra_time_stamp_get(VOID);
@@ -378,7 +381,7 @@ void _tx_vfp_access(void);
else \
{ \
ULONG _tx_fpccr; \
- _tx_fpccr = *((ULONG *) 0xE000EF34); \
+ _tx_fpccr = *((volatile ULONG *) 0xE000EF34); \
_tx_fpccr = _tx_fpccr & ((ULONG) 0x01); \
if (_tx_fpccr == ((ULONG) 0x01)) \
{ \
@@ -582,7 +585,7 @@ __attribute__( ( always_inline ) ) static inline void _tx_thread_system_return_i
unsigned int interrupt_save;
/* Set PendSV to invoke ThreadX scheduler. */
- *((ULONG *) 0xE000ED04) = ((ULONG) 0x10000000);
+ *((volatile ULONG *) 0xE000ED04) = ((ULONG) 0x10000000);
if (__get_ipsr_value() == 0)
{
interrupt_save = __get_interrupt_posture();
@@ -651,7 +654,7 @@ static void _tx_thread_system_return_inline(void)
unsigned int interrupt_save;
/* Set PendSV to invoke ThreadX scheduler. */
- *((ULONG *) 0xE000ED04) = ((ULONG) 0x10000000);
+ *((volatile ULONG *) 0xE000ED04) = ((ULONG) 0x10000000);
if (_ipsr == 0)
{
#ifdef TX_PORT_USE_BASEPRI
@@ -704,7 +707,7 @@ void tx_thread_fpu_disable(void);
#ifdef TX_THREAD_INIT
CHAR _tx_version_id[] =
- "Copyright (c) Microsoft Corporation. All rights reserved. * ThreadX Cortex-M4/IAR Version 6.1.9 *";
+ "Copyright (c) Microsoft Corporation. All rights reserved. * ThreadX Cortex-M4/IAR Version 6.1.11 *";
#else
#ifdef TX_MISRA_ENABLE
extern CHAR _tx_version_id[100];
diff --git a/ports/cortex_m4/iar/src/tx_thread_schedule.s b/ports/cortex_m4/iar/src/tx_thread_schedule.s
index 927fb3d5..2d1b0298 100644
--- a/ports/cortex_m4/iar/src/tx_thread_schedule.s
+++ b/ports/cortex_m4/iar/src/tx_thread_schedule.s
@@ -37,7 +37,7 @@
/* FUNCTION RELEASE */
/* */
/* _tx_thread_schedule Cortex-M4/IAR */
-/* 6.1.7 */
+/* 6.1.11 */
/* AUTHOR */
/* */
/* Scott Larson, Microsoft Corporation */
@@ -64,13 +64,14 @@
/* */
/* _tx_initialize_kernel_enter ThreadX entry function */
/* _tx_thread_system_return Return to system from thread */
-/* _tx_thread_context_restore Restore thread's context */
/* */
/* RELEASE HISTORY */
/* */
/* DATE NAME DESCRIPTION */
/* */
/* 06-02-2021 Scott Larson Initial Version 6.1.7 */
+/* 04-25-2022 Scott Larson Added BASEPRI support, */
+/* resulting in version 6.1.11 */
/* */
/**************************************************************************/
// VOID _tx_thread_schedule(VOID)
@@ -126,12 +127,22 @@ __tx_ts_handler:
#if (defined(TX_ENABLE_EXECUTION_CHANGE_NOTIFY) || defined(TX_EXECUTION_PROFILE_ENABLE))
/* Call the thread exit function to indicate the thread is no longer executing. */
+#ifdef TX_PORT_USE_BASEPRI
+ LDR r1, =TX_PORT_BASEPRI // Mask interrupt priorities =< TX_PORT_BASEPRI
+ MSR BASEPRI, r1
+#else
CPSID i // Disable interrupts
+#endif /* TX_PORT_USE_BASEPRI */
PUSH {r0, lr} // Save LR (and r0 just for alignment)
BL _tx_execution_thread_exit // Call the thread exit function
POP {r0, lr} // Recover LR
+#ifdef TX_PORT_USE_BASEPRI
+ MOV r0, 0 // Disable BASEPRI masking (enable interrupts)
+ MSR BASEPRI, r0
+#else
CPSIE i // Enable interrupts
-#endif
+#endif /* TX_PORT_USE_BASEPRI */
+#endif /* EXECUTION PROFILE */
LDR r0, =_tx_thread_current_ptr // Build current thread pointer address
LDR r2, =_tx_thread_execute_ptr // Build execute thread pointer address
@@ -176,14 +187,24 @@ __tx_ts_new:
/* Now we are looking for a new thread to execute! */
+#ifdef TX_PORT_USE_BASEPRI
+ LDR r1, =TX_PORT_BASEPRI // Mask interrupt priorities =< TX_PORT_BASEPRI
+ MSR BASEPRI, r1
+#else
CPSID i // Disable interrupts
+#endif
LDR r1, [r2] // Is there another thread ready to execute?
CBZ r1, __tx_ts_wait // No, skip to the wait processing
/* Yes, another thread is ready for else, make the current thread the new thread. */
STR r1, [r0] // Setup the current thread pointer to the new thread
+#ifdef TX_PORT_USE_BASEPRI
+ MOV r4, #0 // Disable BASEPRI masking (enable interrupts)
+ MSR BASEPRI, r4
+#else
CPSIE i // Enable interrupts
+#endif
/* Increment the thread run count. */
@@ -227,7 +248,12 @@ _skip_vfp_restore:
are disabled to allow use of WFI for waiting for a thread to arrive. */
__tx_ts_wait:
+#ifdef TX_PORT_USE_BASEPRI
+ LDR r1, =TX_PORT_BASEPRI // Mask interrupt priorities =< TX_PORT_BASEPRI
+ MSR BASEPRI, r1
+#else
CPSID i // Disable interrupts
+#endif
LDR r1, [r2] // Pickup the next thread to execute pointer
STR r1, [r0] // Store it in the current pointer
CBNZ r1, __tx_ts_ready // If non-NULL, a new thread is ready!
@@ -250,7 +276,12 @@ __tx_ts_wait:
POP {r0-r3}
#endif
+#ifdef TX_PORT_USE_BASEPRI
+ MOV r4, #0 // Disable BASEPRI masking (enable interrupts)
+ MSR BASEPRI, r4
+#else
CPSIE i // Enable interrupts
+#endif
B __tx_ts_wait // Loop to continue waiting
/* At this point, we have a new thread ready to go. Clear any newly pended PendSV - since we are
@@ -262,8 +293,12 @@ __tx_ts_ready:
STR r7, [r8, #0xD04] // Clear any PendSV
/* Re-enable interrupts and restore new thread. */
-
+#ifdef TX_PORT_USE_BASEPRI
+ MOV r4, #0 // Disable BASEPRI masking (enable interrupts)
+ MSR BASEPRI, r4
+#else
CPSIE i // Enable interrupts
+#endif
B __tx_ts_restore // Restore the thread
// }
diff --git a/ports/cortex_m4/keil/inc/tx_port.h b/ports/cortex_m4/keil/inc/tx_port.h
index 8c6f8c8c..199c876a 100644
--- a/ports/cortex_m4/keil/inc/tx_port.h
+++ b/ports/cortex_m4/keil/inc/tx_port.h
@@ -26,7 +26,7 @@
/* PORT SPECIFIC C INFORMATION RELEASE */
/* */
/* tx_port.h Cortex-M4/Keil */
-/* 6.1.10 */
+/* 6.1.11 */
/* */
/* AUTHOR */
/* */
@@ -56,6 +56,9 @@
/* violation, */
/* fixed predefined macro, */
/* resulting in version 6.1.10 */
+/* 04-25-2022 Scott Larson Modified comments and added */
+/* volatile to registers, */
+/* resulting in version 6.1.11 */
/* */
/**************************************************************************/
@@ -154,14 +157,14 @@ typedef unsigned short USHORT;
For example, if the time source is at the address 0x0a800024 and is 16-bits in size, the clock
source constants would be:
-#define TX_TRACE_TIME_SOURCE *((ULONG *) 0x0a800024)
+#define TX_TRACE_TIME_SOURCE *((volatile ULONG *) 0x0a800024)
#define TX_TRACE_TIME_MASK 0x0000FFFFUL
*/
#ifndef TX_MISRA_ENABLE
#ifndef TX_TRACE_TIME_SOURCE
-#define TX_TRACE_TIME_SOURCE *((ULONG *) 0xE0001004)
+#define TX_TRACE_TIME_SOURCE *((volatile ULONG *) 0xE0001004)
#endif
#else
ULONG _tx_misra_time_stamp_get(VOID);
@@ -378,7 +381,7 @@ void _tx_vfp_access(void);
else \
{ \
ULONG _tx_fpccr; \
- _tx_fpccr = *((ULONG *) 0xE000EF34); \
+ _tx_fpccr = *((volatile ULONG *) 0xE000EF34); \
_tx_fpccr = _tx_fpccr & ((ULONG) 0x01); \
if (_tx_fpccr == ((ULONG) 0x01)) \
{ \
@@ -582,7 +585,7 @@ __attribute__( ( always_inline ) ) static inline void _tx_thread_system_return_i
unsigned int interrupt_save;
/* Set PendSV to invoke ThreadX scheduler. */
- *((ULONG *) 0xE000ED04) = ((ULONG) 0x10000000);
+ *((volatile ULONG *) 0xE000ED04) = ((ULONG) 0x10000000);
if (__get_ipsr_value() == 0)
{
interrupt_save = __get_interrupt_posture();
@@ -651,7 +654,7 @@ static void _tx_thread_system_return_inline(void)
unsigned int interrupt_save;
/* Set PendSV to invoke ThreadX scheduler. */
- *((ULONG *) 0xE000ED04) = ((ULONG) 0x10000000);
+ *((volatile ULONG *) 0xE000ED04) = ((ULONG) 0x10000000);
if (_ipsr == 0)
{
#ifdef TX_PORT_USE_BASEPRI
@@ -704,7 +707,7 @@ void tx_thread_fpu_disable(void);
#ifdef TX_THREAD_INIT
CHAR _tx_version_id[] =
- "Copyright (c) Microsoft Corporation. All rights reserved. * ThreadX Cortex-M4/Keil Version 6.1.9 *";
+ "Copyright (c) Microsoft Corporation. All rights reserved. * ThreadX Cortex-M4/Keil Version 6.1.11 *";
#else
#ifdef TX_MISRA_ENABLE
extern CHAR _tx_version_id[100];
diff --git a/ports/cortex_m7/ac5/inc/tx_port.h b/ports/cortex_m7/ac5/inc/tx_port.h
index 391fd743..a0e3d194 100644
--- a/ports/cortex_m7/ac5/inc/tx_port.h
+++ b/ports/cortex_m7/ac5/inc/tx_port.h
@@ -26,7 +26,7 @@
/* PORT SPECIFIC C INFORMATION RELEASE */
/* */
/* tx_port.h Cortex-M7/AC5 */
-/* 6.1.10 */
+/* 6.1.11 */
/* */
/* AUTHOR */
/* */
@@ -56,6 +56,9 @@
/* violation, */
/* fixed predefined macro, */
/* resulting in version 6.1.10 */
+/* 04-25-2022 Scott Larson Modified comments and added */
+/* volatile to registers, */
+/* resulting in version 6.1.11 */
/* */
/**************************************************************************/
@@ -154,14 +157,14 @@ typedef unsigned short USHORT;
For example, if the time source is at the address 0x0a800024 and is 16-bits in size, the clock
source constants would be:
-#define TX_TRACE_TIME_SOURCE *((ULONG *) 0x0a800024)
+#define TX_TRACE_TIME_SOURCE *((volatile ULONG *) 0x0a800024)
#define TX_TRACE_TIME_MASK 0x0000FFFFUL
*/
#ifndef TX_MISRA_ENABLE
#ifndef TX_TRACE_TIME_SOURCE
-#define TX_TRACE_TIME_SOURCE *((ULONG *) 0xE0001004)
+#define TX_TRACE_TIME_SOURCE *((volatile ULONG *) 0xE0001004)
#endif
#else
ULONG _tx_misra_time_stamp_get(VOID);
@@ -378,7 +381,7 @@ void _tx_vfp_access(void);
else \
{ \
ULONG _tx_fpccr; \
- _tx_fpccr = *((ULONG *) 0xE000EF34); \
+ _tx_fpccr = *((volatile ULONG *) 0xE000EF34); \
_tx_fpccr = _tx_fpccr & ((ULONG) 0x01); \
if (_tx_fpccr == ((ULONG) 0x01)) \
{ \
@@ -582,7 +585,7 @@ __attribute__( ( always_inline ) ) static inline void _tx_thread_system_return_i
unsigned int interrupt_save;
/* Set PendSV to invoke ThreadX scheduler. */
- *((ULONG *) 0xE000ED04) = ((ULONG) 0x10000000);
+ *((volatile ULONG *) 0xE000ED04) = ((ULONG) 0x10000000);
if (__get_ipsr_value() == 0)
{
interrupt_save = __get_interrupt_posture();
@@ -651,7 +654,7 @@ static void _tx_thread_system_return_inline(void)
unsigned int interrupt_save;
/* Set PendSV to invoke ThreadX scheduler. */
- *((ULONG *) 0xE000ED04) = ((ULONG) 0x10000000);
+ *((volatile ULONG *) 0xE000ED04) = ((ULONG) 0x10000000);
if (_ipsr == 0)
{
#ifdef TX_PORT_USE_BASEPRI
@@ -704,7 +707,7 @@ void tx_thread_fpu_disable(void);
#ifdef TX_THREAD_INIT
CHAR _tx_version_id[] =
- "Copyright (c) Microsoft Corporation. All rights reserved. * ThreadX Cortex-M7/AC5 Version 6.1.9 *";
+ "Copyright (c) Microsoft Corporation. All rights reserved. * ThreadX Cortex-M7/AC5 Version 6.1.11 *";
#else
#ifdef TX_MISRA_ENABLE
extern CHAR _tx_version_id[100];
diff --git a/ports/cortex_m7/ac5/src/tx_thread_schedule.s b/ports/cortex_m7/ac5/src/tx_thread_schedule.s
index a09eadd2..7c9dc566 100644
--- a/ports/cortex_m7/ac5/src/tx_thread_schedule.s
+++ b/ports/cortex_m7/ac5/src/tx_thread_schedule.s
@@ -37,7 +37,7 @@
/* FUNCTION RELEASE */
/* */
/* _tx_thread_schedule Cortex-M7/AC5 */
-/* 6.1.7 */
+/* 6.1.11 */
/* AUTHOR */
/* */
/* Scott Larson, Microsoft Corporation */
@@ -64,13 +64,14 @@
/* */
/* _tx_initialize_kernel_enter ThreadX entry function */
/* _tx_thread_system_return Return to system from thread */
-/* _tx_thread_context_restore Restore thread's context */
/* */
/* RELEASE HISTORY */
/* */
/* DATE NAME DESCRIPTION */
/* */
/* 06-02-2021 Scott Larson Initial Version 6.1.7 */
+/* 04-25-2022 Scott Larson Added BASEPRI support, */
+/* resulting in version 6.1.11 */
/* */
/**************************************************************************/
// VOID _tx_thread_schedule(VOID)
@@ -126,12 +127,22 @@ __tx_ts_handler
#if (defined(TX_ENABLE_EXECUTION_CHANGE_NOTIFY) || defined(TX_EXECUTION_PROFILE_ENABLE))
/* Call the thread exit function to indicate the thread is no longer executing. */
+#ifdef TX_PORT_USE_BASEPRI
+ LDR r1, =TX_PORT_BASEPRI // Mask interrupt priorities =< TX_PORT_BASEPRI
+ MSR BASEPRI, r1
+#else
CPSID i // Disable interrupts
+#endif /* TX_PORT_USE_BASEPRI */
PUSH {r0, lr} // Save LR (and r0 just for alignment)
BL _tx_execution_thread_exit // Call the thread exit function
POP {r0, lr} // Recover LR
+#ifdef TX_PORT_USE_BASEPRI
+ MOV r0, 0 // Disable BASEPRI masking (enable interrupts)
+ MSR BASEPRI, r0
+#else
CPSIE i // Enable interrupts
-#endif
+#endif /* TX_PORT_USE_BASEPRI */
+#endif /* EXECUTION PROFILE */
LDR r0, =_tx_thread_current_ptr // Build current thread pointer address
LDR r2, =_tx_thread_execute_ptr // Build execute thread pointer address
@@ -176,14 +187,24 @@ __tx_ts_new
/* Now we are looking for a new thread to execute! */
+#ifdef TX_PORT_USE_BASEPRI
+ LDR r1, =TX_PORT_BASEPRI // Mask interrupt priorities =< TX_PORT_BASEPRI
+ MSR BASEPRI, r1
+#else
CPSID i // Disable interrupts
+#endif
LDR r1, [r2] // Is there another thread ready to execute?
CBZ r1, __tx_ts_wait // No, skip to the wait processing
/* Yes, another thread is ready for else, make the current thread the new thread. */
STR r1, [r0] // Setup the current thread pointer to the new thread
+#ifdef TX_PORT_USE_BASEPRI
+ MOV r4, #0 // Disable BASEPRI masking (enable interrupts)
+ MSR BASEPRI, r4
+#else
CPSIE i // Enable interrupts
+#endif
/* Increment the thread run count. */
@@ -227,7 +248,12 @@ _skip_vfp_restore
are disabled to allow use of WFI for waiting for a thread to arrive. */
__tx_ts_wait
+#ifdef TX_PORT_USE_BASEPRI
+ LDR r1, =TX_PORT_BASEPRI // Mask interrupt priorities =< TX_PORT_BASEPRI
+ MSR BASEPRI, r1
+#else
CPSID i // Disable interrupts
+#endif
LDR r1, [r2] // Pickup the next thread to execute pointer
STR r1, [r0] // Store it in the current pointer
CBNZ r1, __tx_ts_ready // If non-NULL, a new thread is ready!
@@ -250,7 +276,12 @@ __tx_ts_wait
POP {r0-r3}
#endif
+#ifdef TX_PORT_USE_BASEPRI
+ MOV r4, #0 // Disable BASEPRI masking (enable interrupts)
+ MSR BASEPRI, r4
+#else
CPSIE i // Enable interrupts
+#endif
B __tx_ts_wait // Loop to continue waiting
/* At this point, we have a new thread ready to go. Clear any newly pended PendSV - since we are
@@ -262,8 +293,12 @@ __tx_ts_ready
STR r7, [r8, #0xD04] // Clear any PendSV
/* Re-enable interrupts and restore new thread. */
-
+#ifdef TX_PORT_USE_BASEPRI
+ MOV r4, #0 // Disable BASEPRI masking (enable interrupts)
+ MSR BASEPRI, r4
+#else
CPSIE i // Enable interrupts
+#endif
B __tx_ts_restore // Restore the thread
// }
diff --git a/ports/cortex_m7/ac6/inc/tx_port.h b/ports/cortex_m7/ac6/inc/tx_port.h
index 904a12b3..cfd833fa 100644
--- a/ports/cortex_m7/ac6/inc/tx_port.h
+++ b/ports/cortex_m7/ac6/inc/tx_port.h
@@ -26,7 +26,7 @@
/* PORT SPECIFIC C INFORMATION RELEASE */
/* */
/* tx_port.h Cortex-M7/AC6 */
-/* 6.1.10 */
+/* 6.1.11 */
/* */
/* AUTHOR */
/* */
@@ -56,6 +56,9 @@
/* violation, */
/* fixed predefined macro, */
/* resulting in version 6.1.10 */
+/* 04-25-2022 Scott Larson Modified comments and added */
+/* volatile to registers, */
+/* resulting in version 6.1.11 */
/* */
/**************************************************************************/
@@ -154,14 +157,14 @@ typedef unsigned short USHORT;
For example, if the time source is at the address 0x0a800024 and is 16-bits in size, the clock
source constants would be:
-#define TX_TRACE_TIME_SOURCE *((ULONG *) 0x0a800024)
+#define TX_TRACE_TIME_SOURCE *((volatile ULONG *) 0x0a800024)
#define TX_TRACE_TIME_MASK 0x0000FFFFUL
*/
#ifndef TX_MISRA_ENABLE
#ifndef TX_TRACE_TIME_SOURCE
-#define TX_TRACE_TIME_SOURCE *((ULONG *) 0xE0001004)
+#define TX_TRACE_TIME_SOURCE *((volatile ULONG *) 0xE0001004)
#endif
#else
ULONG _tx_misra_time_stamp_get(VOID);
@@ -378,7 +381,7 @@ void _tx_vfp_access(void);
else \
{ \
ULONG _tx_fpccr; \
- _tx_fpccr = *((ULONG *) 0xE000EF34); \
+ _tx_fpccr = *((volatile ULONG *) 0xE000EF34); \
_tx_fpccr = _tx_fpccr & ((ULONG) 0x01); \
if (_tx_fpccr == ((ULONG) 0x01)) \
{ \
@@ -582,7 +585,7 @@ __attribute__( ( always_inline ) ) static inline void _tx_thread_system_return_i
unsigned int interrupt_save;
/* Set PendSV to invoke ThreadX scheduler. */
- *((ULONG *) 0xE000ED04) = ((ULONG) 0x10000000);
+ *((volatile ULONG *) 0xE000ED04) = ((ULONG) 0x10000000);
if (__get_ipsr_value() == 0)
{
interrupt_save = __get_interrupt_posture();
@@ -651,7 +654,7 @@ static void _tx_thread_system_return_inline(void)
unsigned int interrupt_save;
/* Set PendSV to invoke ThreadX scheduler. */
- *((ULONG *) 0xE000ED04) = ((ULONG) 0x10000000);
+ *((volatile ULONG *) 0xE000ED04) = ((ULONG) 0x10000000);
if (_ipsr == 0)
{
#ifdef TX_PORT_USE_BASEPRI
@@ -704,7 +707,7 @@ void tx_thread_fpu_disable(void);
#ifdef TX_THREAD_INIT
CHAR _tx_version_id[] =
- "Copyright (c) Microsoft Corporation. All rights reserved. * ThreadX Cortex-M7/AC6 Version 6.1.9 *";
+ "Copyright (c) Microsoft Corporation. All rights reserved. * ThreadX Cortex-M7/AC6 Version 6.1.11 *";
#else
#ifdef TX_MISRA_ENABLE
extern CHAR _tx_version_id[100];
diff --git a/ports/cortex_m7/ac6/src/tx_misra.S b/ports/cortex_m7/ac6/src/tx_misra.S
new file mode 100644
index 00000000..b03fdcd0
--- /dev/null
+++ b/ports/cortex_m7/ac6/src/tx_misra.S
@@ -0,0 +1,1033 @@
+/**************************************************************************/
+/* */
+/* Copyright (c) Microsoft Corporation. All rights reserved. */
+/* */
+/* This software is licensed under the Microsoft Software License */
+/* Terms for Microsoft Azure RTOS. Full text of the license can be */
+/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
+/* and in the root directory of this software. */
+/* */
+/**************************************************************************/
+
+
+/**************************************************************************/
+/**************************************************************************/
+/** */
+/** ThreadX Component */
+/** */
+/** ThreadX MISRA Compliance */
+/** */
+/**************************************************************************/
+/**************************************************************************/
+
+ #define SHT_PROGBITS 0x1
+
+ .global __aeabi_memset
+ .global _tx_thread_current_ptr
+ .global _tx_thread_interrupt_disable
+ .global _tx_thread_interrupt_restore
+ .global _tx_thread_stack_analyze
+ .global _tx_thread_stack_error_handler
+ .global _tx_thread_system_state
+#ifdef TX_ENABLE_EVENT_TRACE
+ .global _tx_trace_buffer_current_ptr
+ .global _tx_trace_buffer_end_ptr
+ .global _tx_trace_buffer_start_ptr
+ .global _tx_trace_event_enable_bits
+ .global _tx_trace_full_notify_function
+ .global _tx_trace_header_ptr
+#endif
+
+ .global _tx_misra_always_true
+ .global _tx_misra_block_pool_to_uchar_pointer_convert
+ .global _tx_misra_byte_pool_to_uchar_pointer_convert
+ .global _tx_misra_char_to_uchar_pointer_convert
+ .global _tx_misra_const_char_to_char_pointer_convert
+#ifdef TX_ENABLE_EVENT_TRACE
+ .global _tx_misra_entry_to_uchar_pointer_convert
+#endif
+ .global _tx_misra_indirect_void_to_uchar_pointer_convert
+ .global _tx_misra_memset
+ .global _tx_misra_message_copy
+#ifdef TX_ENABLE_EVENT_TRACE
+ .global _tx_misra_object_to_uchar_pointer_convert
+#endif
+ .global _tx_misra_pointer_to_ulong_convert
+ .global _tx_misra_status_get
+ .global _tx_misra_thread_stack_check
+#ifdef TX_ENABLE_EVENT_TRACE
+ .global _tx_misra_time_stamp_get
+#endif
+ .global _tx_misra_timer_indirect_to_void_pointer_convert
+ .global _tx_misra_timer_pointer_add
+ .global _tx_misra_timer_pointer_dif
+#ifdef TX_ENABLE_EVENT_TRACE
+ .global _tx_misra_trace_event_insert
+#endif
+ .global _tx_misra_uchar_pointer_add
+ .global _tx_misra_uchar_pointer_dif
+ .global _tx_misra_uchar_pointer_sub
+ .global _tx_misra_uchar_to_align_type_pointer_convert
+ .global _tx_misra_uchar_to_block_pool_pointer_convert
+#ifdef TX_ENABLE_EVENT_TRACE
+ .global _tx_misra_uchar_to_entry_pointer_convert
+ .global _tx_misra_uchar_to_header_pointer_convert
+#endif
+ .global _tx_misra_uchar_to_indirect_byte_pool_pointer_convert
+ .global _tx_misra_uchar_to_indirect_uchar_pointer_convert
+#ifdef TX_ENABLE_EVENT_TRACE
+ .global _tx_misra_uchar_to_object_pointer_convert
+#endif
+ .global _tx_misra_uchar_to_void_pointer_convert
+ .global _tx_misra_ulong_pointer_add
+ .global _tx_misra_ulong_pointer_dif
+ .global _tx_misra_ulong_pointer_sub
+ .global _tx_misra_ulong_to_pointer_convert
+ .global _tx_misra_ulong_to_thread_pointer_convert
+ .global _tx_misra_user_timer_pointer_get
+ .global _tx_misra_void_to_block_pool_pointer_convert
+ .global _tx_misra_void_to_byte_pool_pointer_convert
+ .global _tx_misra_void_to_event_flags_pointer_convert
+ .global _tx_misra_void_to_indirect_uchar_pointer_convert
+ .global _tx_misra_void_to_mutex_pointer_convert
+ .global _tx_misra_void_to_queue_pointer_convert
+ .global _tx_misra_void_to_semaphore_pointer_convert
+ .global _tx_misra_void_to_thread_pointer_convert
+ .global _tx_misra_void_to_uchar_pointer_convert
+ .global _tx_misra_void_to_ulong_pointer_convert
+ .global _tx_misra_ipsr_get
+ .global _tx_misra_control_get
+ .global _tx_misra_control_set
+#ifdef __ARM_FP
+ .global _tx_misra_fpccr_get
+ .global _tx_misra_vfp_touch
+#endif
+
+
+/**************************************************************************/
+/**************************************************************************/
+/** */
+/** VOID _tx_misra_memset(VOID *ptr, UINT value, UINT size); */
+/** */
+/**************************************************************************/
+/**************************************************************************/
+
+ .text
+ .align 4
+ .syntax unified
+ .thumb_func
+_tx_misra_memset:
+ PUSH {R4,LR}
+ MOVS R4,R0
+ MOVS R0,R2
+ MOVS R2,R1
+ MOVS R1,R0
+ MOVS R0,R4
+ BL __aeabi_memset
+ POP {R4,PC} // return
+
+/**************************************************************************/
+/**************************************************************************/
+/** */
+/** UCHAR *_tx_misra_uchar_pointer_add(UCHAR *ptr, ULONG amount); */
+/** */
+/**************************************************************************/
+/**************************************************************************/
+
+ .text
+ .thumb_func
+_tx_misra_uchar_pointer_add:
+ ADD R0,R0,R1
+ BX LR // return
+
+
+/**************************************************************************/
+/**************************************************************************/
+/** */
+/** UCHAR *_tx_misra_uchar_pointer_sub(UCHAR *ptr, ULONG amount); */
+/** */
+/**************************************************************************/
+/**************************************************************************/
+
+ .text
+ .thumb_func
+_tx_misra_uchar_pointer_sub:
+ RSBS R1,R1,#+0
+ ADD R0,R0,R1
+ BX LR // return
+
+
+/**************************************************************************/
+/**************************************************************************/
+/** */
+/** ULONG _tx_misra_uchar_pointer_dif(UCHAR *ptr1, UCHAR *ptr2); */
+/** */
+/**************************************************************************/
+/**************************************************************************/
+
+ .text
+ .thumb_func
+_tx_misra_uchar_pointer_dif:
+ SUBS R0,R0,R1
+ BX LR // return
+
+
+/**************************************************************************/
+/**************************************************************************/
+/** */
+/** ULONG _tx_misra_pointer_to_ulong_convert(VOID *ptr); */
+/** */
+/**************************************************************************/
+/**************************************************************************/
+
+ .text
+ .thumb_func
+_tx_misra_pointer_to_ulong_convert:
+ BX LR // return
+
+
+/**************************************************************************/
+/**************************************************************************/
+/** */
+/** ULONG *_tx_misra_ulong_pointer_add(ULONG *ptr, ULONG amount); */
+/** */
+/**************************************************************************/
+/**************************************************************************/
+
+ .text
+ .thumb_func
+_tx_misra_ulong_pointer_add:
+ ADD R0,R0,R1, LSL #+2
+ BX LR // return
+
+
+/**************************************************************************/
+/**************************************************************************/
+/** */
+/** ULONG *_tx_misra_ulong_pointer_sub(ULONG *ptr, ULONG amount); */
+/** */
+/**************************************************************************/
+/**************************************************************************/
+
+ .text
+ .thumb_func
+_tx_misra_ulong_pointer_sub:
+ MVNS R2,#+3
+ MULS R1,R2,R1
+ ADD R0,R0,R1
+ BX LR // return
+
+
+/**************************************************************************/
+/**************************************************************************/
+/** */
+/** ULONG _tx_misra_ulong_pointer_dif(ULONG *ptr1, ULONG *ptr2); */
+/** */
+/**************************************************************************/
+/**************************************************************************/
+
+ .text
+ .thumb_func
+_tx_misra_ulong_pointer_dif:
+ SUBS R0,R0,R1
+ ASRS R0,R0,#+2
+ BX LR // return
+
+
+/**************************************************************************/
+/**************************************************************************/
+/** */
+/** VOID *_tx_misra_ulong_to_pointer_convert(ULONG input); */
+/** */
+/**************************************************************************/
+/**************************************************************************/
+
+ .text
+ .thumb_func
+_tx_misra_ulong_to_pointer_convert:
+ BX LR // return
+
+
+/**************************************************************************/
+/**************************************************************************/
+/** */
+/** VOID _tx_misra_message_copy(ULONG **source, ULONG **destination, */
+/** UINT size); */
+/** */
+/**************************************************************************/
+/**************************************************************************/
+
+ .text
+ .thumb_func
+_tx_misra_message_copy:
+ PUSH {R4,R5}
+ LDR R3,[R0, #+0]
+ LDR R4,[R1, #+0]
+ LDR R5,[R3, #+0]
+ STR R5,[R4, #+0]
+ ADDS R4,R4,#+4
+ ADDS R3,R3,#+4
+ CMP R2,#+2
+ BCC.N _tx_misra_message_copy_0
+ SUBS R2,R2,#+1
+ B.N _tx_misra_message_copy_1
+_tx_misra_message_copy_2:
+ LDR R5,[R3, #+0]
+ STR R5,[R4, #+0]
+ ADDS R4,R4,#+4
+ ADDS R3,R3,#+4
+ SUBS R2,R2,#+1
+_tx_misra_message_copy_1:
+ CMP R2,#+0
+ BNE.N _tx_misra_message_copy_2
+_tx_misra_message_copy_0:
+ STR R3,[R0, #+0]
+ STR R4,[R1, #+0]
+ POP {R4,R5}
+ BX LR // return
+
+
+/**************************************************************************/
+/**************************************************************************/
+/** */
+/** ULONG _tx_misra_timer_pointer_dif(TX_TIMER_INTERNAL **ptr1, */
+/** TX_TIMER_INTERNAL **ptr2); */
+/** */
+/**************************************************************************/
+/**************************************************************************/
+
+ .text
+ .thumb_func
+_tx_misra_timer_pointer_dif:
+ SUBS R0,R0,R1
+ ASRS R0,R0,#+2
+ BX LR // return
+
+
+/**************************************************************************/
+/**************************************************************************/
+/** */
+/** TX_TIMER_INTERNAL **_tx_misra_timer_pointer_add(TX_TIMER_INTERNAL */
+/** **ptr1, ULONG size); */
+/** */
+/**************************************************************************/
+/**************************************************************************/
+
+ .text
+ .thumb_func
+_tx_misra_timer_pointer_add:
+ ADD R0,R0,R1, LSL #+2
+ BX LR // return
+
+
+/**************************************************************************/
+/**************************************************************************/
+/** */
+/** VOID _tx_misra_user_timer_pointer_get(TX_TIMER_INTERNAL */
+/** *internal_timer, TX_TIMER **user_timer); */
+/** */
+/**************************************************************************/
+/**************************************************************************/
+
+ .text
+ .thumb_func
+_tx_misra_user_timer_pointer_get:
+ ADDS R2,R0,#+8
+ SUBS R2,R2,R0
+ RSBS R2,R2,#+0
+ ADD R0,R0,R2
+ STR R0,[R1, #+0]
+ BX LR // return
+
+
+/**************************************************************************/
+/**************************************************************************/
+/** */
+/** VOID _tx_misra_thread_stack_check(TX_THREAD *thread_ptr, */
+/** VOID **highest_stack); */
+/** */
+/**************************************************************************/
+/**************************************************************************/
+
+ .text
+ .thumb_func
+_tx_misra_thread_stack_check:
+ PUSH {R3-R5,LR}
+ MOVS R4,R0
+ MOVS R5,R1
+ BL _tx_thread_interrupt_disable
+ CMP R4,#+0
+ BEQ.N _tx_misra_thread_stack_check_0
+ LDR R1,[R4, #+0]
+ LDR R2,=0x54485244
+ CMP R1,R2
+ BNE.N _tx_misra_thread_stack_check_0
+ LDR R1,[R4, #+8]
+ LDR R2,[R5, #+0]
+ CMP R1,R2
+ BCS.N _tx_misra_thread_stack_check_1
+ LDR R1,[R4, #+8]
+ STR R1,[R5, #+0]
+_tx_misra_thread_stack_check_1:
+ LDR R1,[R4, #+12]
+ LDR R1,[R1, #+0]
+ CMP R1,#-269488145
+ BNE.N _tx_misra_thread_stack_check_2
+ LDR R1,[R4, #+16]
+ LDR R1,[R1, #+1]
+ CMP R1,#-269488145
+ BNE.N _tx_misra_thread_stack_check_2
+ LDR R1,[R5, #+0]
+ LDR R2,[R4, #+12]
+ CMP R1,R2
+ BCS.N _tx_misra_thread_stack_check_3
+_tx_misra_thread_stack_check_2:
+ BL _tx_thread_interrupt_restore
+ MOVS R0,R4
+ BL _tx_thread_stack_error_handler
+ BL _tx_thread_interrupt_disable
+_tx_misra_thread_stack_check_3:
+ LDR R1,[R5, #+0]
+ LDR R1,[R1, #-4]
+ CMP R1,#-269488145
+ BEQ.N _tx_misra_thread_stack_check_0
+ BL _tx_thread_interrupt_restore
+ MOVS R0,R4
+ BL _tx_thread_stack_analyze
+ BL _tx_thread_interrupt_disable
+_tx_misra_thread_stack_check_0:
+ BL _tx_thread_interrupt_restore
+ POP {R0,R4,R5,PC} // return
+
+#ifdef TX_ENABLE_EVENT_TRACE
+
+/**************************************************************************/
+/**************************************************************************/
+/** */
+/** VOID _tx_misra_trace_event_insert(ULONG event_id, */
+/** VOID *info_field_1, ULONG info_field_2, ULONG info_field_3, */
+/** ULONG info_field_4, ULONG filter, ULONG time_stamp); */
+/** */
+/**************************************************************************/
+/**************************************************************************/
+
+ .text
+ .thumb_func
+_tx_misra_trace_event_insert:
+ PUSH {R3-R7,LR}
+ LDR.N R4,DataTable2_1
+ LDR R4,[R4, #+0]
+ CMP R4,#+0
+ BEQ.N _tx_misra_trace_event_insert_0
+ LDR.N R5,DataTable2_2
+ LDR R5,[R5, #+0]
+ LDR R6,[SP, #+28]
+ TST R5,R6
+ BEQ.N _tx_misra_trace_event_insert_0
+ LDR.N R5,DataTable2_3
+ LDR R5,[R5, #+0]
+ LDR.N R6,DataTable2_4
+ LDR R6,[R6, #+0]
+ CMP R5,#+0
+ BNE.N _tx_misra_trace_event_insert_1
+ LDR R5,[R6, #+44]
+ LDR R7,[R6, #+60]
+ LSLS R7,R7,#+16
+ ORRS R7,R7,#0x80000000
+ ORRS R5,R7,R5
+ B.N _tx_misra_trace_event_insert_2
+_tx_misra_trace_event_insert_1:
+ CMP R5,#-252645136
+ BCS.N _tx_misra_trace_event_insert_3
+ MOVS R5,R6
+ MOVS R6,#-1
+ B.N _tx_misra_trace_event_insert_2
+_tx_misra_trace_event_insert_3:
+ MOVS R6,#-252645136
+ MOVS R5,#+0
+_tx_misra_trace_event_insert_2:
+ STR R6,[R4, #+0]
+ STR R5,[R4, #+4]
+ STR R0,[R4, #+8]
+ LDR R0,[SP, #+32]
+ STR R0,[R4, #+12]
+ STR R1,[R4, #+16]
+ STR R2,[R4, #+20]
+ STR R3,[R4, #+24]
+ LDR R0,[SP, #+24]
+ STR R0,[R4, #+28]
+ ADDS R4,R4,#+32
+ LDR.N R0,DataTable2_5
+ LDR R0,[R0, #+0]
+ CMP R4,R0
+ BCC.N _tx_misra_trace_event_insert_4
+ LDR.N R0,DataTable2_6
+ LDR R4,[R0, #+0]
+ LDR.N R0,DataTable2_1
+ STR R4,[R0, #+0]
+ LDR.N R0,DataTable2_7
+ LDR R0,[R0, #+0]
+ STR R4,[R0, #+32]
+ LDR.N R0,DataTable2_8
+ LDR R0,[R0, #+0]
+ CMP R0,#+0
+ BEQ.N _tx_misra_trace_event_insert_0
+ LDR.N R0,DataTable2_7
+ LDR R0,[R0, #+0]
+ LDR.N R1,DataTable2_8
+ LDR R1,[R1, #+0]
+ BLX R1
+ B.N _tx_misra_trace_event_insert_0
+_tx_misra_trace_event_insert_4:
+ LDR.N R0,DataTable2_1
+ STR R4,[R0, #+0]
+ LDR.N R0,DataTable2_7
+ LDR R0,[R0, #+0]
+ STR R4,[R0, #+32]
+_tx_misra_trace_event_insert_0:
+ POP {R0,R4-R7,PC} // return
+
+
+ .data
+DataTable2_1:
+ .word _tx_trace_buffer_current_ptr
+
+ .data
+DataTable2_2:
+ .word _tx_trace_event_enable_bits
+
+ .data
+DataTable2_5:
+ .word _tx_trace_buffer_end_ptr
+
+ .data
+DataTable2_6:
+ .word _tx_trace_buffer_start_ptr
+
+ .data
+DataTable2_7:
+ .word _tx_trace_header_ptr
+
+ .data
+DataTable2_8:
+ .word _tx_trace_full_notify_function
+
+
+/**************************************************************************/
+/**************************************************************************/
+/** */
+/** ULONG _tx_misra_time_stamp_get(VOID); */
+/** */
+/**************************************************************************/
+/**************************************************************************/
+
+ .text
+ .thumb_func
+_tx_misra_time_stamp_get:
+ MOVS R0,#+0
+ BX LR // return
+
+#endif
+
+ .data
+DataTable2_3:
+ .word _tx_thread_system_state
+
+ .data
+DataTable2_4:
+ .word _tx_thread_current_ptr
+
+
+/**************************************************************************/
+/**************************************************************************/
+/** */
+/** UINT _tx_misra_always_true(void); */
+/** */
+/**************************************************************************/
+/**************************************************************************/
+
+ .text
+ .thumb_func
+_tx_misra_always_true:
+ MOVS R0,#+1
+ BX LR // return
+
+
+/******************************************************************************************/
+/******************************************************************************************/
+/** */
+/** UCHAR **_tx_misra_indirect_void_to_uchar_pointer_convert(VOID **return_ptr); */
+/** */
+/******************************************************************************************/
+/******************************************************************************************/
+
+ .text
+ .thumb_func
+_tx_misra_indirect_void_to_uchar_pointer_convert:
+ BX LR // return
+
+
+/***************************************************************************************/
+/***************************************************************************************/
+/** */
+/** UCHAR **_tx_misra_uchar_to_indirect_uchar_pointer_convert(UCHAR *pointer); */
+/** */
+/***************************************************************************************/
+/***************************************************************************************/
+
+ .text
+ .thumb_func
+_tx_misra_uchar_to_indirect_uchar_pointer_convert:
+ BX LR // return
+
+
+/***********************************************************************************/
+/***********************************************************************************/
+/** */
+/** UCHAR *_tx_misra_block_pool_to_uchar_pointer_convert(TX_BLOCK_POOL *pool); */
+/** */
+/***********************************************************************************/
+/***********************************************************************************/
+
+ .text
+ .thumb_func
+_tx_misra_block_pool_to_uchar_pointer_convert:
+ BX LR // return
+
+
+/******************************************************************************************/
+/******************************************************************************************/
+/** */
+/** TX_BLOCK_POOL *_tx_misra_void_to_block_pool_pointer_convert(VOID *pointer); */
+/** */
+/******************************************************************************************/
+/******************************************************************************************/
+
+ .text
+ .thumb_func
+_tx_misra_void_to_block_pool_pointer_convert:
+ BX LR // return
+
+
+/*****************************************************************************/
+/*****************************************************************************/
+/** */
+/** UCHAR *_tx_misra_void_to_uchar_pointer_convert(VOID *pointer); */
+/** */
+/*****************************************************************************/
+/*****************************************************************************/
+
+ .text
+ .thumb_func
+_tx_misra_void_to_uchar_pointer_convert:
+ BX LR // return
+
+
+/************************************************************************************/
+/************************************************************************************/
+/** */
+/** TX_BLOCK_POOL *_tx_misra_uchar_to_block_pool_pointer_convert(UCHAR *pointer); */
+/** */
+/************************************************************************************/
+/************************************************************************************/
+
+ .text
+ .thumb_func
+_tx_misra_uchar_to_block_pool_pointer_convert:
+ BX LR // return
+
+
+/**************************************************************************************/
+/**************************************************************************************/
+/** */
+/** UCHAR **_tx_misra_void_to_indirect_uchar_pointer_convert(VOID *pointer); */
+/** */
+/**************************************************************************************/
+/**************************************************************************************/
+
+ .text
+ .thumb_func
+_tx_misra_void_to_indirect_uchar_pointer_convert:
+ BX LR // return
+
+
+/*****************************************************************************************/
+/*****************************************************************************************/
+/** */
+/** TX_BYTE_POOL *_tx_misra_void_to_byte_pool_pointer_convert(VOID *pointer); */
+/** */
+/*****************************************************************************************/
+/*****************************************************************************************/
+
+ .text
+ .thumb_func
+_tx_misra_void_to_byte_pool_pointer_convert:
+ BX LR // return
+
+
+/***************************************************************************************/
+/***************************************************************************************/
+/** */
+/** UCHAR *_tx_misra_byte_pool_to_uchar_pointer_convert(TX_BYTE_POOL *pool); */
+/** */
+/***************************************************************************************/
+/***************************************************************************************/
+
+ .text
+ .thumb_func
+_tx_misra_byte_pool_to_uchar_pointer_convert:
+ BX LR // return
+
+
+/*****************************************************************************************/
+/*****************************************************************************************/
+/** */
+/** ALIGN_TYPE *_tx_misra_uchar_to_align_type_pointer_convert(UCHAR *pointer); */
+/** */
+/*****************************************************************************************/
+/*****************************************************************************************/
+
+ .text
+ .thumb_func
+_tx_misra_uchar_to_align_type_pointer_convert:
+ BX LR // return
+
+
+/****************************************************************************************************/
+/****************************************************************************************************/
+/** */
+/** TX_BYTE_POOL **_tx_misra_uchar_to_indirect_byte_pool_pointer_convert(UCHAR *pointer); */
+/** */
+/****************************************************************************************************/
+/****************************************************************************************************/
+
+ .text
+ .thumb_func
+_tx_misra_uchar_to_indirect_byte_pool_pointer_convert:
+ BX LR // return
+
+
+/**************************************************************************************************/
+/**************************************************************************************************/
+/** */
+/** TX_EVENT_FLAGS_GROUP *_tx_misra_void_to_event_flags_pointer_convert(VOID *pointer); */
+/** */
+/**************************************************************************************************/
+/**************************************************************************************************/
+
+ .text
+ .thumb_func
+_tx_misra_void_to_event_flags_pointer_convert:
+ BX LR // return
+
+
+/*****************************************************************************/
+/*****************************************************************************/
+/** */
+/** ULONG *_tx_misra_void_to_ulong_pointer_convert(VOID *pointer); */
+/** */
+/*****************************************************************************/
+/*****************************************************************************/
+
+ .text
+ .thumb_func
+_tx_misra_void_to_ulong_pointer_convert:
+ BX LR // return
+
+
+/********************************************************************************/
+/********************************************************************************/
+/** */
+/** TX_MUTEX *_tx_misra_void_to_mutex_pointer_convert(VOID *pointer); */
+/** */
+/********************************************************************************/
+/********************************************************************************/
+
+ .text
+ .thumb_func
+_tx_misra_void_to_mutex_pointer_convert:
+ BX LR // return
+
+
+/**************************************************************************/
+/**************************************************************************/
+/** */
+/** UINT _tx_misra_status_get(UINT status); */
+/** */
+/**************************************************************************/
+/**************************************************************************/
+
+ .text
+ .thumb_func
+_tx_misra_status_get:
+ MOVS R0,#+0
+ BX LR // return
+
+
+/********************************************************************************/
+/********************************************************************************/
+/** */
+/** TX_QUEUE *_tx_misra_void_to_queue_pointer_convert(VOID *pointer); */
+/** */
+/********************************************************************************/
+/********************************************************************************/
+
+ .text
+ .thumb_func
+_tx_misra_void_to_queue_pointer_convert:
+ BX LR // return
+
+
+/****************************************************************************************/
+/****************************************************************************************/
+/** */
+/** TX_SEMAPHORE *_tx_misra_void_to_semaphore_pointer_convert(VOID *pointer); */
+/** */
+/****************************************************************************************/
+/****************************************************************************************/
+
+ .text
+ .thumb_func
+_tx_misra_void_to_semaphore_pointer_convert:
+ BX LR // return
+
+
+/**************************************************************************/
+/**************************************************************************/
+/** */
+/** VOID *_tx_misra_uchar_to_void_pointer_convert(UCHAR *pointer); */
+/** */
+/**************************************************************************/
+/**************************************************************************/
+
+ .text
+ .thumb_func
+_tx_misra_uchar_to_void_pointer_convert:
+ BX LR // return
+
+
+/*********************************************************************************/
+/*********************************************************************************/
+/** */
+/** TX_THREAD *_tx_misra_ulong_to_thread_pointer_convert(ULONG value); */
+/** */
+/*********************************************************************************/
+/*********************************************************************************/
+
+ .text
+ .thumb_func
+_tx_misra_ulong_to_thread_pointer_convert:
+ BX LR // return
+
+
+/***************************************************************************************************/
+/***************************************************************************************************/
+/** */
+/** VOID *_tx_misra_timer_indirect_to_void_pointer_convert(TX_TIMER_INTERNAL **pointer); */
+/** */
+/***************************************************************************************************/
+/***************************************************************************************************/
+
+ .text
+ .thumb_func
+_tx_misra_timer_indirect_to_void_pointer_convert:
+ BX LR // return
+
+
+/***************************************************************************************/
+/***************************************************************************************/
+/** */
+/** CHAR *_tx_misra_const_char_to_char_pointer_convert(const char *pointer); */
+/** */
+/***************************************************************************************/
+/***************************************************************************************/
+
+ .text
+ .thumb_func
+_tx_misra_const_char_to_char_pointer_convert:
+ BX LR // return
+
+
+/**********************************************************************************/
+/**********************************************************************************/
+/** */
+/** TX_THREAD *_tx_misra_void_to_thread_pointer_convert(void *pointer); */
+/** */
+/**********************************************************************************/
+/**********************************************************************************/
+
+ .text
+ .thumb_func
+_tx_misra_void_to_thread_pointer_convert:
+ BX LR // return
+
+
+#ifdef TX_ENABLE_EVENT_TRACE
+
+/************************************************************************************************/
+/************************************************************************************************/
+/** */
+/** UCHAR *_tx_misra_object_to_uchar_pointer_convert(TX_TRACE_OBJECT_ENTRY *pointer); */
+/** */
+/************************************************************************************************/
+/************************************************************************************************/
+
+ .text
+ .thumb_func
+_tx_misra_object_to_uchar_pointer_convert:
+ BX LR // return
+
+
+/************************************************************************************************/
+/************************************************************************************************/
+/** */
+/** TX_TRACE_OBJECT_ENTRY *_tx_misra_uchar_to_object_pointer_convert(UCHAR *pointer); */
+/** */
+/************************************************************************************************/
+/************************************************************************************************/
+
+ .text
+ .thumb_func
+_tx_misra_uchar_to_object_pointer_convert:
+ BX LR // return
+
+
+/******************************************************************************************/
+/******************************************************************************************/
+/** */
+/** TX_TRACE_HEADER *_tx_misra_uchar_to_header_pointer_convert(UCHAR *pointer); */
+/** */
+/******************************************************************************************/
+/******************************************************************************************/
+
+ .text
+ .thumb_func
+_tx_misra_uchar_to_header_pointer_convert:
+ BX LR // return
+
+
+/***********************************************************************************************/
+/***********************************************************************************************/
+/** */
+/** TX_TRACE_BUFFER_ENTRY *_tx_misra_uchar_to_entry_pointer_convert(UCHAR *pointer); */
+/** */
+/***********************************************************************************************/
+/***********************************************************************************************/
+
+ .text
+ .thumb_func
+_tx_misra_uchar_to_entry_pointer_convert:
+ BX LR // return
+
+
+/***********************************************************************************************/
+/***********************************************************************************************/
+/** */
+/** UCHAR *_tx_misra_entry_to_uchar_pointer_convert(TX_TRACE_BUFFER_ENTRY *pointer); */
+/** */
+/***********************************************************************************************/
+/***********************************************************************************************/
+
+ .text
+ .thumb_func
+_tx_misra_entry_to_uchar_pointer_convert:
+ BX LR // return
+#endif
+
+
+/***********************************************************************************************/
+/***********************************************************************************************/
+/** */
+/** UCHAR *_tx_misra_char_to_uchar_pointer_convert(CHAR *pointer); */
+/** */
+/***********************************************************************************************/
+/***********************************************************************************************/
+
+ .text
+ .thumb_func
+_tx_misra_char_to_uchar_pointer_convert:
+ BX LR // return
+
+
+/***********************************************************************************************/
+/***********************************************************************************************/
+/** */
+/** ULONG _tx_misra_ipsr_get(void); */
+/** */
+/***********************************************************************************************/
+/***********************************************************************************************/
+
+ .text
+ .thumb_func
+_tx_misra_ipsr_get:
+ MRS R0, IPSR
+ BX LR // return
+
+
+/***********************************************************************************************/
+/***********************************************************************************************/
+/** */
+/** ULONG _tx_misra_control_get(void); */
+/** */
+/***********************************************************************************************/
+/***********************************************************************************************/
+
+ .text
+ .thumb_func
+_tx_misra_control_get:
+ MRS R0, CONTROL
+ BX LR // return
+
+
+/***********************************************************************************************/
+/***********************************************************************************************/
+/** */
+/** void _tx_misra_control_set(ULONG value); */
+/** */
+/***********************************************************************************************/
+/***********************************************************************************************/
+
+ .text
+ .thumb_func
+_tx_misra_control_set:
+ MSR CONTROL, R0
+ BX LR // return
+
+
+#ifdef __ARM_FP
+
+/***********************************************************************************************/
+/***********************************************************************************************/
+/** */
+/** ULONG _tx_misra_fpccr_get(void); */
+/** */
+/***********************************************************************************************/
+/***********************************************************************************************/
+
+ .text
+ .thumb_func
+_tx_misra_fpccr_get:
+ LDR r0, =0xE000EF34 // Build FPCCR address
+ LDR r0, [r0] // Load FPCCR value
+ BX LR // return
+
+
+/***********************************************************************************************/
+/***********************************************************************************************/
+/** */
+/** void _tx_misra_vfp_touch(void); */
+/** */
+/***********************************************************************************************/
+/***********************************************************************************************/
+
+ .text
+ .thumb_func
+_tx_misra_vfp_touch:
+ vmov.f32 s0, s0
+ BX LR // return
+
+#endif
+
+
+ .data
+ .word 0
diff --git a/ports/cortex_m7/ac6/src/tx_thread_schedule.S b/ports/cortex_m7/ac6/src/tx_thread_schedule.S
index b8ca4f1b..e4a34ac8 100644
--- a/ports/cortex_m7/ac6/src/tx_thread_schedule.S
+++ b/ports/cortex_m7/ac6/src/tx_thread_schedule.S
@@ -39,7 +39,7 @@
/* FUNCTION RELEASE */
/* */
/* _tx_thread_schedule Cortex-M7/AC6 */
-/* 6.1.7 */
+/* 6.1.11 */
/* AUTHOR */
/* */
/* Scott Larson, Microsoft Corporation */
@@ -66,13 +66,14 @@
/* */
/* _tx_initialize_kernel_enter ThreadX entry function */
/* _tx_thread_system_return Return to system from thread */
-/* _tx_thread_context_restore Restore thread's context */
/* */
/* RELEASE HISTORY */
/* */
/* DATE NAME DESCRIPTION */
/* */
/* 06-02-2021 Scott Larson Initial Version 6.1.7 */
+/* 04-25-2022 Scott Larson Added BASEPRI support, */
+/* resulting in version 6.1.11 */
/* */
/**************************************************************************/
// VOID _tx_thread_schedule(VOID)
@@ -132,12 +133,22 @@ __tx_ts_handler:
#if (defined(TX_ENABLE_EXECUTION_CHANGE_NOTIFY) || defined(TX_EXECUTION_PROFILE_ENABLE))
/* Call the thread exit function to indicate the thread is no longer executing. */
+#ifdef TX_PORT_USE_BASEPRI
+ LDR r1, =TX_PORT_BASEPRI // Mask interrupt priorities =< TX_PORT_BASEPRI
+ MSR BASEPRI, r1
+#else
CPSID i // Disable interrupts
+#endif /* TX_PORT_USE_BASEPRI */
PUSH {r0, lr} // Save LR (and r0 just for alignment)
BL _tx_execution_thread_exit // Call the thread exit function
POP {r0, lr} // Recover LR
+#ifdef TX_PORT_USE_BASEPRI
+ MOV r0, 0 // Disable BASEPRI masking (enable interrupts)
+ MSR BASEPRI, r0
+#else
CPSIE i // Enable interrupts
-#endif
+#endif /* TX_PORT_USE_BASEPRI */
+#endif /* EXECUTION PROFILE */
LDR r0, =_tx_thread_current_ptr // Build current thread pointer address
LDR r2, =_tx_thread_execute_ptr // Build execute thread pointer address
@@ -182,14 +193,24 @@ __tx_ts_new:
/* Now we are looking for a new thread to execute! */
+#ifdef TX_PORT_USE_BASEPRI
+ LDR r1, =TX_PORT_BASEPRI // Mask interrupt priorities =< TX_PORT_BASEPRI
+ MSR BASEPRI, r1
+#else
CPSID i // Disable interrupts
+#endif
LDR r1, [r2] // Is there another thread ready to execute?
CBZ r1, __tx_ts_wait // No, skip to the wait processing
/* Yes, another thread is ready for else, make the current thread the new thread. */
STR r1, [r0] // Setup the current thread pointer to the new thread
+#ifdef TX_PORT_USE_BASEPRI
+ MOV r4, #0 // Disable BASEPRI masking (enable interrupts)
+ MSR BASEPRI, r4
+#else
CPSIE i // Enable interrupts
+#endif
/* Increment the thread run count. */
@@ -233,7 +254,12 @@ _skip_vfp_restore:
are disabled to allow use of WFI for waiting for a thread to arrive. */
__tx_ts_wait:
+#ifdef TX_PORT_USE_BASEPRI
+ LDR r1, =TX_PORT_BASEPRI // Mask interrupt priorities =< TX_PORT_BASEPRI
+ MSR BASEPRI, r1
+#else
CPSID i // Disable interrupts
+#endif
LDR r1, [r2] // Pickup the next thread to execute pointer
STR r1, [r0] // Store it in the current pointer
CBNZ r1, __tx_ts_ready // If non-NULL, a new thread is ready!
@@ -256,7 +282,12 @@ __tx_ts_wait:
POP {r0-r3}
#endif
+#ifdef TX_PORT_USE_BASEPRI
+ MOV r4, #0 // Disable BASEPRI masking (enable interrupts)
+ MSR BASEPRI, r4
+#else
CPSIE i // Enable interrupts
+#endif
B __tx_ts_wait // Loop to continue waiting
/* At this point, we have a new thread ready to go. Clear any newly pended PendSV - since we are
@@ -268,8 +299,12 @@ __tx_ts_ready:
STR r7, [r8, #0xD04] // Clear any PendSV
/* Re-enable interrupts and restore new thread. */
-
+#ifdef TX_PORT_USE_BASEPRI
+ MOV r4, #0 // Disable BASEPRI masking (enable interrupts)
+ MSR BASEPRI, r4
+#else
CPSIE i // Enable interrupts
+#endif
B __tx_ts_restore // Restore the thread
// }
diff --git a/ports/cortex_m7/gnu/inc/tx_port.h b/ports/cortex_m7/gnu/inc/tx_port.h
index 1c351bb8..9a88b1d8 100644
--- a/ports/cortex_m7/gnu/inc/tx_port.h
+++ b/ports/cortex_m7/gnu/inc/tx_port.h
@@ -26,7 +26,7 @@
/* PORT SPECIFIC C INFORMATION RELEASE */
/* */
/* tx_port.h Cortex-M7/GNU */
-/* 6.1.10 */
+/* 6.1.11 */
/* */
/* AUTHOR */
/* */
@@ -56,6 +56,9 @@
/* violation, */
/* fixed predefined macro, */
/* resulting in version 6.1.10 */
+/* 04-25-2022 Scott Larson Modified comments and added */
+/* volatile to registers, */
+/* resulting in version 6.1.11 */
/* */
/**************************************************************************/
@@ -154,14 +157,14 @@ typedef unsigned short USHORT;
For example, if the time source is at the address 0x0a800024 and is 16-bits in size, the clock
source constants would be:
-#define TX_TRACE_TIME_SOURCE *((ULONG *) 0x0a800024)
+#define TX_TRACE_TIME_SOURCE *((volatile ULONG *) 0x0a800024)
#define TX_TRACE_TIME_MASK 0x0000FFFFUL
*/
#ifndef TX_MISRA_ENABLE
#ifndef TX_TRACE_TIME_SOURCE
-#define TX_TRACE_TIME_SOURCE *((ULONG *) 0xE0001004)
+#define TX_TRACE_TIME_SOURCE *((volatile ULONG *) 0xE0001004)
#endif
#else
ULONG _tx_misra_time_stamp_get(VOID);
@@ -378,7 +381,7 @@ void _tx_vfp_access(void);
else \
{ \
ULONG _tx_fpccr; \
- _tx_fpccr = *((ULONG *) 0xE000EF34); \
+ _tx_fpccr = *((volatile ULONG *) 0xE000EF34); \
_tx_fpccr = _tx_fpccr & ((ULONG) 0x01); \
if (_tx_fpccr == ((ULONG) 0x01)) \
{ \
@@ -582,7 +585,7 @@ __attribute__( ( always_inline ) ) static inline void _tx_thread_system_return_i
unsigned int interrupt_save;
/* Set PendSV to invoke ThreadX scheduler. */
- *((ULONG *) 0xE000ED04) = ((ULONG) 0x10000000);
+ *((volatile ULONG *) 0xE000ED04) = ((ULONG) 0x10000000);
if (__get_ipsr_value() == 0)
{
interrupt_save = __get_interrupt_posture();
@@ -651,7 +654,7 @@ static void _tx_thread_system_return_inline(void)
unsigned int interrupt_save;
/* Set PendSV to invoke ThreadX scheduler. */
- *((ULONG *) 0xE000ED04) = ((ULONG) 0x10000000);
+ *((volatile ULONG *) 0xE000ED04) = ((ULONG) 0x10000000);
if (_ipsr == 0)
{
#ifdef TX_PORT_USE_BASEPRI
@@ -704,7 +707,7 @@ void tx_thread_fpu_disable(void);
#ifdef TX_THREAD_INIT
CHAR _tx_version_id[] =
- "Copyright (c) Microsoft Corporation. All rights reserved. * ThreadX Cortex-M7/GNU Version 6.1.9 *";
+ "Copyright (c) Microsoft Corporation. All rights reserved. * ThreadX Cortex-M7/GNU Version 6.1.11 *";
#else
#ifdef TX_MISRA_ENABLE
extern CHAR _tx_version_id[100];
diff --git a/ports/cortex_m7/gnu/src/tx_misra.S b/ports/cortex_m7/gnu/src/tx_misra.S
new file mode 100644
index 00000000..b03fdcd0
--- /dev/null
+++ b/ports/cortex_m7/gnu/src/tx_misra.S
@@ -0,0 +1,1033 @@
+/**************************************************************************/
+/* */
+/* Copyright (c) Microsoft Corporation. All rights reserved. */
+/* */
+/* This software is licensed under the Microsoft Software License */
+/* Terms for Microsoft Azure RTOS. Full text of the license can be */
+/* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
+/* and in the root directory of this software. */
+/* */
+/**************************************************************************/
+
+
+/**************************************************************************/
+/**************************************************************************/
+/** */
+/** ThreadX Component */
+/** */
+/** ThreadX MISRA Compliance */
+/** */
+/**************************************************************************/
+/**************************************************************************/
+
+ #define SHT_PROGBITS 0x1
+
+ .global __aeabi_memset
+ .global _tx_thread_current_ptr
+ .global _tx_thread_interrupt_disable
+ .global _tx_thread_interrupt_restore
+ .global _tx_thread_stack_analyze
+ .global _tx_thread_stack_error_handler
+ .global _tx_thread_system_state
+#ifdef TX_ENABLE_EVENT_TRACE
+ .global _tx_trace_buffer_current_ptr
+ .global _tx_trace_buffer_end_ptr
+ .global _tx_trace_buffer_start_ptr
+ .global _tx_trace_event_enable_bits
+ .global _tx_trace_full_notify_function
+ .global _tx_trace_header_ptr
+#endif
+
+ .global _tx_misra_always_true
+ .global _tx_misra_block_pool_to_uchar_pointer_convert
+ .global _tx_misra_byte_pool_to_uchar_pointer_convert
+ .global _tx_misra_char_to_uchar_pointer_convert
+ .global _tx_misra_const_char_to_char_pointer_convert
+#ifdef TX_ENABLE_EVENT_TRACE
+ .global _tx_misra_entry_to_uchar_pointer_convert
+#endif
+ .global _tx_misra_indirect_void_to_uchar_pointer_convert
+ .global _tx_misra_memset
+ .global _tx_misra_message_copy
+#ifdef TX_ENABLE_EVENT_TRACE
+ .global _tx_misra_object_to_uchar_pointer_convert
+#endif
+ .global _tx_misra_pointer_to_ulong_convert
+ .global _tx_misra_status_get
+ .global _tx_misra_thread_stack_check
+#ifdef TX_ENABLE_EVENT_TRACE
+ .global _tx_misra_time_stamp_get
+#endif
+ .global _tx_misra_timer_indirect_to_void_pointer_convert
+ .global _tx_misra_timer_pointer_add
+ .global _tx_misra_timer_pointer_dif
+#ifdef TX_ENABLE_EVENT_TRACE
+ .global _tx_misra_trace_event_insert
+#endif
+ .global _tx_misra_uchar_pointer_add
+ .global _tx_misra_uchar_pointer_dif
+ .global _tx_misra_uchar_pointer_sub
+ .global _tx_misra_uchar_to_align_type_pointer_convert
+ .global _tx_misra_uchar_to_block_pool_pointer_convert
+#ifdef TX_ENABLE_EVENT_TRACE
+ .global _tx_misra_uchar_to_entry_pointer_convert
+ .global _tx_misra_uchar_to_header_pointer_convert
+#endif
+ .global _tx_misra_uchar_to_indirect_byte_pool_pointer_convert
+ .global _tx_misra_uchar_to_indirect_uchar_pointer_convert
+#ifdef TX_ENABLE_EVENT_TRACE
+ .global _tx_misra_uchar_to_object_pointer_convert
+#endif
+ .global _tx_misra_uchar_to_void_pointer_convert
+ .global _tx_misra_ulong_pointer_add
+ .global _tx_misra_ulong_pointer_dif
+ .global _tx_misra_ulong_pointer_sub
+ .global _tx_misra_ulong_to_pointer_convert
+ .global _tx_misra_ulong_to_thread_pointer_convert
+ .global _tx_misra_user_timer_pointer_get
+ .global _tx_misra_void_to_block_pool_pointer_convert
+ .global _tx_misra_void_to_byte_pool_pointer_convert
+ .global _tx_misra_void_to_event_flags_pointer_convert
+ .global _tx_misra_void_to_indirect_uchar_pointer_convert
+ .global _tx_misra_void_to_mutex_pointer_convert
+ .global _tx_misra_void_to_queue_pointer_convert
+ .global _tx_misra_void_to_semaphore_pointer_convert
+ .global _tx_misra_void_to_thread_pointer_convert
+ .global _tx_misra_void_to_uchar_pointer_convert
+ .global _tx_misra_void_to_ulong_pointer_convert
+ .global _tx_misra_ipsr_get
+ .global _tx_misra_control_get
+ .global _tx_misra_control_set
+#ifdef __ARM_FP
+ .global _tx_misra_fpccr_get
+ .global _tx_misra_vfp_touch
+#endif
+
+
+/**************************************************************************/
+/**************************************************************************/
+/** */
+/** VOID _tx_misra_memset(VOID *ptr, UINT value, UINT size); */
+/** */
+/**************************************************************************/
+/**************************************************************************/
+
+ .text
+ .align 4
+ .syntax unified
+ .thumb_func
+_tx_misra_memset:
+ PUSH {R4,LR}
+ MOVS R4,R0
+ MOVS R0,R2
+ MOVS R2,R1
+ MOVS R1,R0
+ MOVS R0,R4
+ BL __aeabi_memset
+ POP {R4,PC} // return
+
+/**************************************************************************/
+/**************************************************************************/
+/** */
+/** UCHAR *_tx_misra_uchar_pointer_add(UCHAR *ptr, ULONG amount); */
+/** */
+/**************************************************************************/
+/**************************************************************************/
+
+ .text
+ .thumb_func
+_tx_misra_uchar_pointer_add:
+ ADD R0,R0,R1
+ BX LR // return
+
+
+/**************************************************************************/
+/**************************************************************************/
+/** */
+/** UCHAR *_tx_misra_uchar_pointer_sub(UCHAR *ptr, ULONG amount); */
+/** */
+/**************************************************************************/
+/**************************************************************************/
+
+ .text
+ .thumb_func
+_tx_misra_uchar_pointer_sub:
+ RSBS R1,R1,#+0
+ ADD R0,R0,R1
+ BX LR // return
+
+
+/**************************************************************************/
+/**************************************************************************/
+/** */
+/** ULONG _tx_misra_uchar_pointer_dif(UCHAR *ptr1, UCHAR *ptr2); */
+/** */
+/**************************************************************************/
+/**************************************************************************/
+
+ .text
+ .thumb_func
+_tx_misra_uchar_pointer_dif:
+ SUBS R0,R0,R1
+ BX LR // return
+
+
+/**************************************************************************/
+/**************************************************************************/
+/** */
+/** ULONG _tx_misra_pointer_to_ulong_convert(VOID *ptr); */
+/** */
+/**************************************************************************/
+/**************************************************************************/
+
+ .text
+ .thumb_func
+_tx_misra_pointer_to_ulong_convert:
+ BX LR // return
+
+
+/**************************************************************************/
+/**************************************************************************/
+/** */
+/** ULONG *_tx_misra_ulong_pointer_add(ULONG *ptr, ULONG amount); */
+/** */
+/**************************************************************************/
+/**************************************************************************/
+
+ .text
+ .thumb_func
+_tx_misra_ulong_pointer_add:
+ ADD R0,R0,R1, LSL #+2
+ BX LR // return
+
+
+/**************************************************************************/
+/**************************************************************************/
+/** */
+/** ULONG *_tx_misra_ulong_pointer_sub(ULONG *ptr, ULONG amount); */
+/** */
+/**************************************************************************/
+/**************************************************************************/
+
+ .text
+ .thumb_func
+_tx_misra_ulong_pointer_sub:
+ MVNS R2,#+3
+ MULS R1,R2,R1
+ ADD R0,R0,R1
+ BX LR // return
+
+
+/**************************************************************************/
+/**************************************************************************/
+/** */
+/** ULONG _tx_misra_ulong_pointer_dif(ULONG *ptr1, ULONG *ptr2); */
+/** */
+/**************************************************************************/
+/**************************************************************************/
+
+ .text
+ .thumb_func
+_tx_misra_ulong_pointer_dif:
+ SUBS R0,R0,R1
+ ASRS R0,R0,#+2
+ BX LR // return
+
+
+/**************************************************************************/
+/**************************************************************************/
+/** */
+/** VOID *_tx_misra_ulong_to_pointer_convert(ULONG input); */
+/** */
+/**************************************************************************/
+/**************************************************************************/
+
+ .text
+ .thumb_func
+_tx_misra_ulong_to_pointer_convert:
+ BX LR // return
+
+
+/**************************************************************************/
+/**************************************************************************/
+/** */
+/** VOID _tx_misra_message_copy(ULONG **source, ULONG **destination, */
+/** UINT size); */
+/** */
+/**************************************************************************/
+/**************************************************************************/
+
+ .text
+ .thumb_func
+_tx_misra_message_copy:
+ PUSH {R4,R5}
+ LDR R3,[R0, #+0]
+ LDR R4,[R1, #+0]
+ LDR R5,[R3, #+0]
+ STR R5,[R4, #+0]
+ ADDS R4,R4,#+4
+ ADDS R3,R3,#+4
+ CMP R2,#+2
+ BCC.N _tx_misra_message_copy_0
+ SUBS R2,R2,#+1
+ B.N _tx_misra_message_copy_1
+_tx_misra_message_copy_2:
+ LDR R5,[R3, #+0]
+ STR R5,[R4, #+0]
+ ADDS R4,R4,#+4
+ ADDS R3,R3,#+4
+ SUBS R2,R2,#+1
+_tx_misra_message_copy_1:
+ CMP R2,#+0
+ BNE.N _tx_misra_message_copy_2
+_tx_misra_message_copy_0:
+ STR R3,[R0, #+0]
+ STR R4,[R1, #+0]
+ POP {R4,R5}
+ BX LR // return
+
+
+/**************************************************************************/
+/**************************************************************************/
+/** */
+/** ULONG _tx_misra_timer_pointer_dif(TX_TIMER_INTERNAL **ptr1, */
+/** TX_TIMER_INTERNAL **ptr2); */
+/** */
+/**************************************************************************/
+/**************************************************************************/
+
+ .text
+ .thumb_func
+_tx_misra_timer_pointer_dif:
+ SUBS R0,R0,R1
+ ASRS R0,R0,#+2
+ BX LR // return
+
+
+/**************************************************************************/
+/**************************************************************************/
+/** */
+/** TX_TIMER_INTERNAL **_tx_misra_timer_pointer_add(TX_TIMER_INTERNAL */
+/** **ptr1, ULONG size); */
+/** */
+/**************************************************************************/
+/**************************************************************************/
+
+ .text
+ .thumb_func
+_tx_misra_timer_pointer_add:
+ ADD R0,R0,R1, LSL #+2
+ BX LR // return
+
+
+/**************************************************************************/
+/**************************************************************************/
+/** */
+/** VOID _tx_misra_user_timer_pointer_get(TX_TIMER_INTERNAL */
+/** *internal_timer, TX_TIMER **user_timer); */
+/** */
+/**************************************************************************/
+/**************************************************************************/
+
+ .text
+ .thumb_func
+_tx_misra_user_timer_pointer_get:
+ ADDS R2,R0,#+8
+ SUBS R2,R2,R0
+ RSBS R2,R2,#+0
+ ADD R0,R0,R2
+ STR R0,[R1, #+0]
+ BX LR // return
+
+
+/**************************************************************************/
+/**************************************************************************/
+/** */
+/** VOID _tx_misra_thread_stack_check(TX_THREAD *thread_ptr, */
+/** VOID **highest_stack); */
+/** */
+/**************************************************************************/
+/**************************************************************************/
+
+ .text
+ .thumb_func
+_tx_misra_thread_stack_check:
+ PUSH {R3-R5,LR}
+ MOVS R4,R0
+ MOVS R5,R1
+ BL _tx_thread_interrupt_disable
+ CMP R4,#+0
+ BEQ.N _tx_misra_thread_stack_check_0
+ LDR R1,[R4, #+0]
+ LDR R2,=0x54485244
+ CMP R1,R2
+ BNE.N _tx_misra_thread_stack_check_0
+ LDR R1,[R4, #+8]
+ LDR R2,[R5, #+0]
+ CMP R1,R2
+ BCS.N _tx_misra_thread_stack_check_1
+ LDR R1,[R4, #+8]
+ STR R1,[R5, #+0]
+_tx_misra_thread_stack_check_1:
+ LDR R1,[R4, #+12]
+ LDR R1,[R1, #+0]
+ CMP R1,#-269488145
+ BNE.N _tx_misra_thread_stack_check_2
+ LDR R1,[R4, #+16]
+ LDR R1,[R1, #+1]
+ CMP R1,#-269488145
+ BNE.N _tx_misra_thread_stack_check_2
+ LDR R1,[R5, #+0]
+ LDR R2,[R4, #+12]
+ CMP R1,R2
+ BCS.N _tx_misra_thread_stack_check_3
+_tx_misra_thread_stack_check_2:
+ BL _tx_thread_interrupt_restore
+ MOVS R0,R4
+ BL _tx_thread_stack_error_handler
+ BL _tx_thread_interrupt_disable
+_tx_misra_thread_stack_check_3:
+ LDR R1,[R5, #+0]
+ LDR R1,[R1, #-4]
+ CMP R1,#-269488145
+ BEQ.N _tx_misra_thread_stack_check_0
+ BL _tx_thread_interrupt_restore
+ MOVS R0,R4
+ BL _tx_thread_stack_analyze
+ BL _tx_thread_interrupt_disable
+_tx_misra_thread_stack_check_0:
+ BL _tx_thread_interrupt_restore
+ POP {R0,R4,R5,PC} // return
+
+#ifdef TX_ENABLE_EVENT_TRACE
+
+/**************************************************************************/
+/**************************************************************************/
+/** */
+/** VOID _tx_misra_trace_event_insert(ULONG event_id, */
+/** VOID *info_field_1, ULONG info_field_2, ULONG info_field_3, */
+/** ULONG info_field_4, ULONG filter, ULONG time_stamp); */
+/** */
+/**************************************************************************/
+/**************************************************************************/
+
+ .text
+ .thumb_func
+_tx_misra_trace_event_insert:
+ PUSH {R3-R7,LR}
+ LDR.N R4,DataTable2_1
+ LDR R4,[R4, #+0]
+ CMP R4,#+0
+ BEQ.N _tx_misra_trace_event_insert_0
+ LDR.N R5,DataTable2_2
+ LDR R5,[R5, #+0]
+ LDR R6,[SP, #+28]
+ TST R5,R6
+ BEQ.N _tx_misra_trace_event_insert_0
+ LDR.N R5,DataTable2_3
+ LDR R5,[R5, #+0]
+ LDR.N R6,DataTable2_4
+ LDR R6,[R6, #+0]
+ CMP R5,#+0
+ BNE.N _tx_misra_trace_event_insert_1
+ LDR R5,[R6, #+44]
+ LDR R7,[R6, #+60]
+ LSLS R7,R7,#+16
+ ORRS R7,R7,#0x80000000
+ ORRS R5,R7,R5
+ B.N _tx_misra_trace_event_insert_2
+_tx_misra_trace_event_insert_1:
+ CMP R5,#-252645136
+ BCS.N _tx_misra_trace_event_insert_3
+ MOVS R5,R6
+ MOVS R6,#-1
+ B.N _tx_misra_trace_event_insert_2
+_tx_misra_trace_event_insert_3:
+ MOVS R6,#-252645136
+ MOVS R5,#+0
+_tx_misra_trace_event_insert_2:
+ STR R6,[R4, #+0]
+ STR R5,[R4, #+4]
+ STR R0,[R4, #+8]
+ LDR R0,[SP, #+32]
+ STR R0,[R4, #+12]
+ STR R1,[R4, #+16]
+ STR R2,[R4, #+20]
+ STR R3,[R4, #+24]
+ LDR R0,[SP, #+24]
+ STR R0,[R4, #+28]
+ ADDS R4,R4,#+32
+ LDR.N R0,DataTable2_5
+ LDR R0,[R0, #+0]
+ CMP R4,R0
+ BCC.N _tx_misra_trace_event_insert_4
+ LDR.N R0,DataTable2_6
+ LDR R4,[R0, #+0]
+ LDR.N R0,DataTable2_1
+ STR R4,[R0, #+0]
+ LDR.N R0,DataTable2_7
+ LDR R0,[R0, #+0]
+ STR R4,[R0, #+32]
+ LDR.N R0,DataTable2_8
+ LDR R0,[R0, #+0]
+ CMP R0,#+0
+ BEQ.N _tx_misra_trace_event_insert_0
+ LDR.N R0,DataTable2_7
+ LDR R0,[R0, #+0]
+ LDR.N R1,DataTable2_8
+ LDR R1,[R1, #+0]
+ BLX R1
+ B.N _tx_misra_trace_event_insert_0
+_tx_misra_trace_event_insert_4:
+ LDR.N R0,DataTable2_1
+ STR R4,[R0, #+0]
+ LDR.N R0,DataTable2_7
+ LDR R0,[R0, #+0]
+ STR R4,[R0, #+32]
+_tx_misra_trace_event_insert_0:
+ POP {R0,R4-R7,PC} // return
+
+
+ .data
+DataTable2_1:
+ .word _tx_trace_buffer_current_ptr
+
+ .data
+DataTable2_2:
+ .word _tx_trace_event_enable_bits
+
+ .data
+DataTable2_5:
+ .word _tx_trace_buffer_end_ptr
+
+ .data
+DataTable2_6:
+ .word _tx_trace_buffer_start_ptr
+
+ .data
+DataTable2_7:
+ .word _tx_trace_header_ptr
+
+ .data
+DataTable2_8:
+ .word _tx_trace_full_notify_function
+
+
+/**************************************************************************/
+/**************************************************************************/
+/** */
+/** ULONG _tx_misra_time_stamp_get(VOID); */
+/** */
+/**************************************************************************/
+/**************************************************************************/
+
+ .text
+ .thumb_func
+_tx_misra_time_stamp_get:
+ MOVS R0,#+0
+ BX LR // return
+
+#endif
+
+ .data
+DataTable2_3:
+ .word _tx_thread_system_state
+
+ .data
+DataTable2_4:
+ .word _tx_thread_current_ptr
+
+
+/**************************************************************************/
+/**************************************************************************/
+/** */
+/** UINT _tx_misra_always_true(void); */
+/** */
+/**************************************************************************/
+/**************************************************************************/
+
+ .text
+ .thumb_func
+_tx_misra_always_true:
+ MOVS R0,#+1
+ BX LR // return
+
+
+/******************************************************************************************/
+/******************************************************************************************/
+/** */
+/** UCHAR **_tx_misra_indirect_void_to_uchar_pointer_convert(VOID **return_ptr); */
+/** */
+/******************************************************************************************/
+/******************************************************************************************/
+
+ .text
+ .thumb_func
+_tx_misra_indirect_void_to_uchar_pointer_convert:
+ BX LR // return
+
+
+/***************************************************************************************/
+/***************************************************************************************/
+/** */
+/** UCHAR **_tx_misra_uchar_to_indirect_uchar_pointer_convert(UCHAR *pointer); */
+/** */
+/***************************************************************************************/
+/***************************************************************************************/
+
+ .text
+ .thumb_func
+_tx_misra_uchar_to_indirect_uchar_pointer_convert:
+ BX LR // return
+
+
+/***********************************************************************************/
+/***********************************************************************************/
+/** */
+/** UCHAR *_tx_misra_block_pool_to_uchar_pointer_convert(TX_BLOCK_POOL *pool); */
+/** */
+/***********************************************************************************/
+/***********************************************************************************/
+
+ .text
+ .thumb_func
+_tx_misra_block_pool_to_uchar_pointer_convert:
+ BX LR // return
+
+
+/******************************************************************************************/
+/******************************************************************************************/
+/** */
+/** TX_BLOCK_POOL *_tx_misra_void_to_block_pool_pointer_convert(VOID *pointer); */
+/** */
+/******************************************************************************************/
+/******************************************************************************************/
+
+ .text
+ .thumb_func
+_tx_misra_void_to_block_pool_pointer_convert:
+ BX LR // return
+
+
+/*****************************************************************************/
+/*****************************************************************************/
+/** */
+/** UCHAR *_tx_misra_void_to_uchar_pointer_convert(VOID *pointer); */
+/** */
+/*****************************************************************************/
+/*****************************************************************************/
+
+ .text
+ .thumb_func
+_tx_misra_void_to_uchar_pointer_convert:
+ BX LR // return
+
+
+/************************************************************************************/
+/************************************************************************************/
+/** */
+/** TX_BLOCK_POOL *_tx_misra_uchar_to_block_pool_pointer_convert(UCHAR *pointer); */
+/** */
+/************************************************************************************/
+/************************************************************************************/
+
+ .text
+ .thumb_func
+_tx_misra_uchar_to_block_pool_pointer_convert:
+ BX LR // return
+
+
+/**************************************************************************************/
+/**************************************************************************************/
+/** */
+/** UCHAR **_tx_misra_void_to_indirect_uchar_pointer_convert(VOID *pointer); */
+/** */
+/**************************************************************************************/
+/**************************************************************************************/
+
+ .text
+ .thumb_func
+_tx_misra_void_to_indirect_uchar_pointer_convert:
+ BX LR // return
+
+
+/*****************************************************************************************/
+/*****************************************************************************************/
+/** */
+/** TX_BYTE_POOL *_tx_misra_void_to_byte_pool_pointer_convert(VOID *pointer); */
+/** */
+/*****************************************************************************************/
+/*****************************************************************************************/
+
+ .text
+ .thumb_func
+_tx_misra_void_to_byte_pool_pointer_convert:
+ BX LR // return
+
+
+/***************************************************************************************/
+/***************************************************************************************/
+/** */
+/** UCHAR *_tx_misra_byte_pool_to_uchar_pointer_convert(TX_BYTE_POOL *pool); */
+/** */
+/***************************************************************************************/
+/***************************************************************************************/
+
+ .text
+ .thumb_func
+_tx_misra_byte_pool_to_uchar_pointer_convert:
+ BX LR // return
+
+
+/*****************************************************************************************/
+/*****************************************************************************************/
+/** */
+/** ALIGN_TYPE *_tx_misra_uchar_to_align_type_pointer_convert(UCHAR *pointer); */
+/** */
+/*****************************************************************************************/
+/*****************************************************************************************/
+
+ .text
+ .thumb_func
+_tx_misra_uchar_to_align_type_pointer_convert:
+ BX LR // return
+
+
+/****************************************************************************************************/
+/****************************************************************************************************/
+/** */
+/** TX_BYTE_POOL **_tx_misra_uchar_to_indirect_byte_pool_pointer_convert(UCHAR *pointer); */
+/** */
+/****************************************************************************************************/
+/****************************************************************************************************/
+
+ .text
+ .thumb_func
+_tx_misra_uchar_to_indirect_byte_pool_pointer_convert:
+ BX LR // return
+
+
+/**************************************************************************************************/
+/**************************************************************************************************/
+/** */
+/** TX_EVENT_FLAGS_GROUP *_tx_misra_void_to_event_flags_pointer_convert(VOID *pointer); */
+/** */
+/**************************************************************************************************/
+/**************************************************************************************************/
+
+ .text
+ .thumb_func
+_tx_misra_void_to_event_flags_pointer_convert:
+ BX LR // return
+
+
+/*****************************************************************************/
+/*****************************************************************************/
+/** */
+/** ULONG *_tx_misra_void_to_ulong_pointer_convert(VOID *pointer); */
+/** */
+/*****************************************************************************/
+/*****************************************************************************/
+
+ .text
+ .thumb_func
+_tx_misra_void_to_ulong_pointer_convert:
+ BX LR // return
+
+
+/********************************************************************************/
+/********************************************************************************/
+/** */
+/** TX_MUTEX *_tx_misra_void_to_mutex_pointer_convert(VOID *pointer); */
+/** */
+/********************************************************************************/
+/********************************************************************************/
+
+ .text
+ .thumb_func
+_tx_misra_void_to_mutex_pointer_convert:
+ BX LR // return
+
+
+/**************************************************************************/
+/**************************************************************************/
+/** */
+/** UINT _tx_misra_status_get(UINT status); */
+/** */
+/**************************************************************************/
+/**************************************************************************/
+
+ .text
+ .thumb_func
+_tx_misra_status_get:
+ MOVS R0,#+0
+ BX LR // return
+
+
+/********************************************************************************/
+/********************************************************************************/
+/** */
+/** TX_QUEUE *_tx_misra_void_to_queue_pointer_convert(VOID *pointer); */
+/** */
+/********************************************************************************/
+/********************************************************************************/
+
+ .text
+ .thumb_func
+_tx_misra_void_to_queue_pointer_convert:
+ BX LR // return
+
+
+/****************************************************************************************/
+/****************************************************************************************/
+/** */
+/** TX_SEMAPHORE *_tx_misra_void_to_semaphore_pointer_convert(VOID *pointer); */
+/** */
+/****************************************************************************************/
+/****************************************************************************************/
+
+ .text
+ .thumb_func
+_tx_misra_void_to_semaphore_pointer_convert:
+ BX LR // return
+
+
+/**************************************************************************/
+/**************************************************************************/
+/** */
+/** VOID *_tx_misra_uchar_to_void_pointer_convert(UCHAR *pointer); */
+/** */
+/**************************************************************************/
+/**************************************************************************/
+
+ .text
+ .thumb_func
+_tx_misra_uchar_to_void_pointer_convert:
+ BX LR // return
+
+
+/*********************************************************************************/
+/*********************************************************************************/
+/** */
+/** TX_THREAD *_tx_misra_ulong_to_thread_pointer_convert(ULONG value); */
+/** */
+/*********************************************************************************/
+/*********************************************************************************/
+
+ .text
+ .thumb_func
+_tx_misra_ulong_to_thread_pointer_convert:
+ BX LR // return
+
+
+/***************************************************************************************************/
+/***************************************************************************************************/
+/** */
+/** VOID *_tx_misra_timer_indirect_to_void_pointer_convert(TX_TIMER_INTERNAL **pointer); */
+/** */
+/***************************************************************************************************/
+/***************************************************************************************************/
+
+ .text
+ .thumb_func
+_tx_misra_timer_indirect_to_void_pointer_convert:
+ BX LR // return
+
+
+/***************************************************************************************/
+/***************************************************************************************/
+/** */
+/** CHAR *_tx_misra_const_char_to_char_pointer_convert(const char *pointer); */
+/** */
+/***************************************************************************************/
+/***************************************************************************************/
+
+ .text
+ .thumb_func
+_tx_misra_const_char_to_char_pointer_convert:
+ BX LR // return
+
+
+/**********************************************************************************/
+/**********************************************************************************/
+/** */
+/** TX_THREAD *_tx_misra_void_to_thread_pointer_convert(void *pointer); */
+/** */
+/**********************************************************************************/
+/**********************************************************************************/
+
+ .text
+ .thumb_func
+_tx_misra_void_to_thread_pointer_convert:
+ BX LR // return
+
+
+#ifdef TX_ENABLE_EVENT_TRACE
+
+/************************************************************************************************/
+/************************************************************************************************/
+/** */
+/** UCHAR *_tx_misra_object_to_uchar_pointer_convert(TX_TRACE_OBJECT_ENTRY *pointer); */
+/** */
+/************************************************************************************************/
+/************************************************************************************************/
+
+ .text
+ .thumb_func
+_tx_misra_object_to_uchar_pointer_convert:
+ BX LR // return
+
+
+/************************************************************************************************/
+/************************************************************************************************/
+/** */
+/** TX_TRACE_OBJECT_ENTRY *_tx_misra_uchar_to_object_pointer_convert(UCHAR *pointer); */
+/** */
+/************************************************************************************************/
+/************************************************************************************************/
+
+ .text
+ .thumb_func
+_tx_misra_uchar_to_object_pointer_convert:
+ BX LR // return
+
+
+/******************************************************************************************/
+/******************************************************************************************/
+/** */
+/** TX_TRACE_HEADER *_tx_misra_uchar_to_header_pointer_convert(UCHAR *pointer); */
+/** */
+/******************************************************************************************/
+/******************************************************************************************/
+
+ .text
+ .thumb_func
+_tx_misra_uchar_to_header_pointer_convert:
+ BX LR // return
+
+
+/***********************************************************************************************/
+/***********************************************************************************************/
+/** */
+/** TX_TRACE_BUFFER_ENTRY *_tx_misra_uchar_to_entry_pointer_convert(UCHAR *pointer); */
+/** */
+/***********************************************************************************************/
+/***********************************************************************************************/
+
+ .text
+ .thumb_func
+_tx_misra_uchar_to_entry_pointer_convert:
+ BX LR // return
+
+
+/***********************************************************************************************/
+/***********************************************************************************************/
+/** */
+/** UCHAR *_tx_misra_entry_to_uchar_pointer_convert(TX_TRACE_BUFFER_ENTRY *pointer); */
+/** */
+/***********************************************************************************************/
+/***********************************************************************************************/
+
+ .text
+ .thumb_func
+_tx_misra_entry_to_uchar_pointer_convert:
+ BX LR // return
+#endif
+
+
+/***********************************************************************************************/
+/***********************************************************************************************/
+/** */
+/** UCHAR *_tx_misra_char_to_uchar_pointer_convert(CHAR *pointer); */
+/** */
+/***********************************************************************************************/
+/***********************************************************************************************/
+
+ .text
+ .thumb_func
+_tx_misra_char_to_uchar_pointer_convert:
+ BX LR // return
+
+
+/***********************************************************************************************/
+/***********************************************************************************************/
+/** */
+/** ULONG _tx_misra_ipsr_get(void); */
+/** */
+/***********************************************************************************************/
+/***********************************************************************************************/
+
+ .text
+ .thumb_func
+_tx_misra_ipsr_get:
+ MRS R0, IPSR
+ BX LR // return
+
+
+/***********************************************************************************************/
+/***********************************************************************************************/
+/** */
+/** ULONG _tx_misra_control_get(void); */
+/** */
+/***********************************************************************************************/
+/***********************************************************************************************/
+
+ .text
+ .thumb_func
+_tx_misra_control_get:
+ MRS R0, CONTROL
+ BX LR // return
+
+
+/***********************************************************************************************/
+/***********************************************************************************************/
+/** */
+/** void _tx_misra_control_set(ULONG value); */
+/** */
+/***********************************************************************************************/
+/***********************************************************************************************/
+
+ .text
+ .thumb_func
+_tx_misra_control_set:
+ MSR CONTROL, R0
+ BX LR // return
+
+
+#ifdef __ARM_FP
+
+/***********************************************************************************************/
+/***********************************************************************************************/
+/** */
+/** ULONG _tx_misra_fpccr_get(void); */
+/** */
+/***********************************************************************************************/
+/***********************************************************************************************/
+
+ .text
+ .thumb_func
+_tx_misra_fpccr_get:
+ LDR r0, =0xE000EF34 // Build FPCCR address
+ LDR r0, [r0] // Load FPCCR value
+ BX LR // return
+
+
+/***********************************************************************************************/
+/***********************************************************************************************/
+/** */
+/** void _tx_misra_vfp_touch(void); */
+/** */
+/***********************************************************************************************/
+/***********************************************************************************************/
+
+ .text
+ .thumb_func
+_tx_misra_vfp_touch:
+ vmov.f32 s0, s0
+ BX LR // return
+
+#endif
+
+
+ .data
+ .word 0
diff --git a/ports/cortex_m7/gnu/src/tx_thread_schedule.S b/ports/cortex_m7/gnu/src/tx_thread_schedule.S
index f75bff29..12343ad4 100644
--- a/ports/cortex_m7/gnu/src/tx_thread_schedule.S
+++ b/ports/cortex_m7/gnu/src/tx_thread_schedule.S
@@ -37,7 +37,7 @@
/* FUNCTION RELEASE */
/* */
/* _tx_thread_schedule Cortex-M7/GNU */
-/* 6.1.10 */
+/* 6.1.11 */
/* AUTHOR */
/* */
/* Scott Larson, Microsoft Corporation */
@@ -72,6 +72,8 @@
/* 06-02-2021 Scott Larson Initial Version 6.1.7 */
/* 01-31-2022 Scott Larson Fixed predefined macro name, */
/* resulting in version 6.1.10 */
+/* 04-25-2022 Scott Larson Added BASEPRI support, */
+/* resulting in version 6.1.11 */
/* */
/**************************************************************************/
// VOID _tx_thread_schedule(VOID)
@@ -131,12 +133,22 @@ __tx_ts_handler:
#if (defined(TX_ENABLE_EXECUTION_CHANGE_NOTIFY) || defined(TX_EXECUTION_PROFILE_ENABLE))
/* Call the thread exit function to indicate the thread is no longer executing. */
+#ifdef TX_PORT_USE_BASEPRI
+ LDR r1, =TX_PORT_BASEPRI // Mask interrupt priorities =< TX_PORT_BASEPRI
+ MSR BASEPRI, r1
+#else
CPSID i // Disable interrupts
+#endif /* TX_PORT_USE_BASEPRI */
PUSH {r0, lr} // Save LR (and r0 just for alignment)
BL _tx_execution_thread_exit // Call the thread exit function
POP {r0, lr} // Recover LR
+#ifdef TX_PORT_USE_BASEPRI
+ MOV r0, 0 // Disable BASEPRI masking (enable interrupts)
+ MSR BASEPRI, r0
+#else
CPSIE i // Enable interrupts
-#endif
+#endif /* TX_PORT_USE_BASEPRI */
+#endif /* EXECUTION PROFILE */
LDR r0, =_tx_thread_current_ptr // Build current thread pointer address
LDR r2, =_tx_thread_execute_ptr // Build execute thread pointer address
@@ -181,14 +193,24 @@ __tx_ts_new:
/* Now we are looking for a new thread to execute! */
+#ifdef TX_PORT_USE_BASEPRI
+ LDR r1, =TX_PORT_BASEPRI // Mask interrupt priorities =< TX_PORT_BASEPRI
+ MSR BASEPRI, r1
+#else
CPSID i // Disable interrupts
+#endif
LDR r1, [r2] // Is there another thread ready to execute?
CBZ r1, __tx_ts_wait // No, skip to the wait processing
/* Yes, another thread is ready for else, make the current thread the new thread. */
STR r1, [r0] // Setup the current thread pointer to the new thread
+#ifdef TX_PORT_USE_BASEPRI
+ MOV r4, #0 // Disable BASEPRI masking (enable interrupts)
+ MSR BASEPRI, r4
+#else
CPSIE i // Enable interrupts
+#endif
/* Increment the thread run count. */
@@ -232,7 +254,12 @@ _skip_vfp_restore:
are disabled to allow use of WFI for waiting for a thread to arrive. */
__tx_ts_wait:
+#ifdef TX_PORT_USE_BASEPRI
+ LDR r1, =TX_PORT_BASEPRI // Mask interrupt priorities =< TX_PORT_BASEPRI
+ MSR BASEPRI, r1
+#else
CPSID i // Disable interrupts
+#endif
LDR r1, [r2] // Pickup the next thread to execute pointer
STR r1, [r0] // Store it in the current pointer
CBNZ r1, __tx_ts_ready // If non-NULL, a new thread is ready!
@@ -255,7 +282,12 @@ __tx_ts_wait:
POP {r0-r3}
#endif
+#ifdef TX_PORT_USE_BASEPRI
+ MOV r4, #0 // Disable BASEPRI masking (enable interrupts)
+ MSR BASEPRI, r4
+#else
CPSIE i // Enable interrupts
+#endif
B __tx_ts_wait // Loop to continue waiting
/* At this point, we have a new thread ready to go. Clear any newly pended PendSV - since we are
@@ -267,8 +299,12 @@ __tx_ts_ready:
STR r7, [r8, #0xD04] // Clear any PendSV
/* Re-enable interrupts and restore new thread. */
-
+#ifdef TX_PORT_USE_BASEPRI
+ MOV r4, #0 // Disable BASEPRI masking (enable interrupts)
+ MSR BASEPRI, r4
+#else
CPSIE i // Enable interrupts
+#endif
B __tx_ts_restore // Restore the thread
// }
diff --git a/ports/cortex_m7/iar/inc/tx_port.h b/ports/cortex_m7/iar/inc/tx_port.h
index f5a144cd..f11ff9a9 100644
--- a/ports/cortex_m7/iar/inc/tx_port.h
+++ b/ports/cortex_m7/iar/inc/tx_port.h
@@ -26,7 +26,7 @@
/* PORT SPECIFIC C INFORMATION RELEASE */
/* */
/* tx_port.h Cortex-M7/IAR */
-/* 6.1.10 */
+/* 6.1.11 */
/* */
/* AUTHOR */
/* */
@@ -56,6 +56,9 @@
/* violation, */
/* fixed predefined macro, */
/* resulting in version 6.1.10 */
+/* 04-25-2022 Scott Larson Modified comments and added */
+/* volatile to registers, */
+/* resulting in version 6.1.11 */
/* */
/**************************************************************************/
@@ -154,14 +157,14 @@ typedef unsigned short USHORT;
For example, if the time source is at the address 0x0a800024 and is 16-bits in size, the clock
source constants would be:
-#define TX_TRACE_TIME_SOURCE *((ULONG *) 0x0a800024)
+#define TX_TRACE_TIME_SOURCE *((volatile ULONG *) 0x0a800024)
#define TX_TRACE_TIME_MASK 0x0000FFFFUL
*/
#ifndef TX_MISRA_ENABLE
#ifndef TX_TRACE_TIME_SOURCE
-#define TX_TRACE_TIME_SOURCE *((ULONG *) 0xE0001004)
+#define TX_TRACE_TIME_SOURCE *((volatile ULONG *) 0xE0001004)
#endif
#else
ULONG _tx_misra_time_stamp_get(VOID);
@@ -378,7 +381,7 @@ void _tx_vfp_access(void);
else \
{ \
ULONG _tx_fpccr; \
- _tx_fpccr = *((ULONG *) 0xE000EF34); \
+ _tx_fpccr = *((volatile ULONG *) 0xE000EF34); \
_tx_fpccr = _tx_fpccr & ((ULONG) 0x01); \
if (_tx_fpccr == ((ULONG) 0x01)) \
{ \
@@ -582,7 +585,7 @@ __attribute__( ( always_inline ) ) static inline void _tx_thread_system_return_i
unsigned int interrupt_save;
/* Set PendSV to invoke ThreadX scheduler. */
- *((ULONG *) 0xE000ED04) = ((ULONG) 0x10000000);
+ *((volatile ULONG *) 0xE000ED04) = ((ULONG) 0x10000000);
if (__get_ipsr_value() == 0)
{
interrupt_save = __get_interrupt_posture();
@@ -651,7 +654,7 @@ static void _tx_thread_system_return_inline(void)
unsigned int interrupt_save;
/* Set PendSV to invoke ThreadX scheduler. */
- *((ULONG *) 0xE000ED04) = ((ULONG) 0x10000000);
+ *((volatile ULONG *) 0xE000ED04) = ((ULONG) 0x10000000);
if (_ipsr == 0)
{
#ifdef TX_PORT_USE_BASEPRI
@@ -704,7 +707,7 @@ void tx_thread_fpu_disable(void);
#ifdef TX_THREAD_INIT
CHAR _tx_version_id[] =
- "Copyright (c) Microsoft Corporation. All rights reserved. * ThreadX Cortex-M7/IAR Version 6.1.9 *";
+ "Copyright (c) Microsoft Corporation. All rights reserved. * ThreadX Cortex-M7/IAR Version 6.1.11 *";
#else
#ifdef TX_MISRA_ENABLE
extern CHAR _tx_version_id[100];
diff --git a/ports/cortex_m7/iar/src/tx_thread_schedule.s b/ports/cortex_m7/iar/src/tx_thread_schedule.s
index 65893cb8..954c36fc 100644
--- a/ports/cortex_m7/iar/src/tx_thread_schedule.s
+++ b/ports/cortex_m7/iar/src/tx_thread_schedule.s
@@ -37,7 +37,7 @@
/* FUNCTION RELEASE */
/* */
/* _tx_thread_schedule Cortex-M7/IAR */
-/* 6.1.7 */
+/* 6.1.11 */
/* AUTHOR */
/* */
/* Scott Larson, Microsoft Corporation */
@@ -64,13 +64,14 @@
/* */
/* _tx_initialize_kernel_enter ThreadX entry function */
/* _tx_thread_system_return Return to system from thread */
-/* _tx_thread_context_restore Restore thread's context */
/* */
/* RELEASE HISTORY */
/* */
/* DATE NAME DESCRIPTION */
/* */
/* 06-02-2021 Scott Larson Initial Version 6.1.7 */
+/* 04-25-2022 Scott Larson Added BASEPRI support, */
+/* resulting in version 6.1.11 */
/* */
/**************************************************************************/
// VOID _tx_thread_schedule(VOID)
@@ -126,12 +127,22 @@ __tx_ts_handler:
#if (defined(TX_ENABLE_EXECUTION_CHANGE_NOTIFY) || defined(TX_EXECUTION_PROFILE_ENABLE))
/* Call the thread exit function to indicate the thread is no longer executing. */
+#ifdef TX_PORT_USE_BASEPRI
+ LDR r1, =TX_PORT_BASEPRI // Mask interrupt priorities =< TX_PORT_BASEPRI
+ MSR BASEPRI, r1
+#else
CPSID i // Disable interrupts
+#endif /* TX_PORT_USE_BASEPRI */
PUSH {r0, lr} // Save LR (and r0 just for alignment)
BL _tx_execution_thread_exit // Call the thread exit function
POP {r0, lr} // Recover LR
+#ifdef TX_PORT_USE_BASEPRI
+ MOV r0, 0 // Disable BASEPRI masking (enable interrupts)
+ MSR BASEPRI, r0
+#else
CPSIE i // Enable interrupts
-#endif
+#endif /* TX_PORT_USE_BASEPRI */
+#endif /* EXECUTION PROFILE */
LDR r0, =_tx_thread_current_ptr // Build current thread pointer address
LDR r2, =_tx_thread_execute_ptr // Build execute thread pointer address
@@ -176,14 +187,24 @@ __tx_ts_new:
/* Now we are looking for a new thread to execute! */
+#ifdef TX_PORT_USE_BASEPRI
+ LDR r1, =TX_PORT_BASEPRI // Mask interrupt priorities =< TX_PORT_BASEPRI
+ MSR BASEPRI, r1
+#else
CPSID i // Disable interrupts
+#endif
LDR r1, [r2] // Is there another thread ready to execute?
CBZ r1, __tx_ts_wait // No, skip to the wait processing
/* Yes, another thread is ready for else, make the current thread the new thread. */
STR r1, [r0] // Setup the current thread pointer to the new thread
+#ifdef TX_PORT_USE_BASEPRI
+ MOV r4, #0 // Disable BASEPRI masking (enable interrupts)
+ MSR BASEPRI, r4
+#else
CPSIE i // Enable interrupts
+#endif
/* Increment the thread run count. */
@@ -227,7 +248,12 @@ _skip_vfp_restore:
are disabled to allow use of WFI for waiting for a thread to arrive. */
__tx_ts_wait:
+#ifdef TX_PORT_USE_BASEPRI
+ LDR r1, =TX_PORT_BASEPRI // Mask interrupt priorities =< TX_PORT_BASEPRI
+ MSR BASEPRI, r1
+#else
CPSID i // Disable interrupts
+#endif
LDR r1, [r2] // Pickup the next thread to execute pointer
STR r1, [r0] // Store it in the current pointer
CBNZ r1, __tx_ts_ready // If non-NULL, a new thread is ready!
@@ -250,7 +276,12 @@ __tx_ts_wait:
POP {r0-r3}
#endif
+#ifdef TX_PORT_USE_BASEPRI
+ MOV r4, #0 // Disable BASEPRI masking (enable interrupts)
+ MSR BASEPRI, r4
+#else
CPSIE i // Enable interrupts
+#endif
B __tx_ts_wait // Loop to continue waiting
/* At this point, we have a new thread ready to go. Clear any newly pended PendSV - since we are
@@ -262,8 +293,12 @@ __tx_ts_ready:
STR r7, [r8, #0xD04] // Clear any PendSV
/* Re-enable interrupts and restore new thread. */
-
+#ifdef TX_PORT_USE_BASEPRI
+ MOV r4, #0 // Disable BASEPRI masking (enable interrupts)
+ MSR BASEPRI, r4
+#else
CPSIE i // Enable interrupts
+#endif
B __tx_ts_restore // Restore the thread
// }
diff --git a/ports/linux/gnu/inc/tx_port.h b/ports/linux/gnu/inc/tx_port.h
index 2202dbda..024b12bd 100644
--- a/ports/linux/gnu/inc/tx_port.h
+++ b/ports/linux/gnu/inc/tx_port.h
@@ -26,7 +26,7 @@
/* PORT SPECIFIC C INFORMATION RELEASE */
/* */
/* tx_port.h Linux/GNU */
-/* 6.1.9 */
+/* 6.1.11 */
/* */
/* AUTHOR */
/* */
@@ -51,6 +51,9 @@
/* 10-15-2021 William E. Lamie Modified comment(s), added */
/* symbol ULONG64_DEFINED, */
/* resulting in version 6.1.9 */
+/* 04-25-2022 William E. Lamie Modified comment(s), removed */
+/* useless definition, */
+/* resulting in version 6.1.11 */
/* */
/**************************************************************************/
@@ -566,8 +569,6 @@ void _tx_linux_thread_init();
#define TX_LINUX_MEMORY_SIZE 64000
#endif
-#define TX_TIMER_TICKS_PER_SECOND 100UL
-
/* Define priorities of pthreads. */
#define TX_LINUX_PRIORITY_SCHEDULE (3)
diff --git a/ports/xtensa/xcc/src/tx_initialize_low_level.c b/ports/xtensa/xcc/src/tx_initialize_low_level.c
index d7082db0..eebae217 100644
--- a/ports/xtensa/xcc/src/tx_initialize_low_level.c
+++ b/ports/xtensa/xcc/src/tx_initialize_low_level.c
@@ -52,7 +52,10 @@ int32_t xt_timer_intnum = -1;
/* */
/* DATE NAME DESCRIPTION */
/* */
-/* 12-31-2020 Cadence Design Systems Initial Version 6.1.3 */
+/* 12-31-2020 Cadence Design Systems Initial Version 6.1.3 */
+/* 04-25-2022 Scott Larson Modified comments and updated */
+/* function names, */
+/* resulting in version 6.1.11 */
/* */
/**************************************************************************/
VOID _tx_initialize_low_level(VOID)
@@ -154,14 +157,14 @@ VOID _tx_initialize_low_level(VOID)
/* Compute tick divisor if clock freq is not compile-time constant. */
#ifndef XT_CLOCK_FREQ
- _xt_tick_divisor_init();
+ xt_tick_divisor_init();
#endif
/* Set up the periodic tick timer (assume enough time to complete init). */
#ifdef XT_CLOCK_FREQ
XT_WSR_CCOMPARE(XT_RSR_CCOUNT() + XT_TICK_DIVISOR);
#else
- XT_WSR_CCOMPARE(XT_RSR_CCOUNT() + _xt_tick_divisor);
+ XT_WSR_CCOMPARE(XT_RSR_CCOUNT() + xt_tick_divisor);
#endif
#if XCHAL_HAVE_XEA3
diff --git a/ports/xtensa/xcc/src/tx_timer_interrupt.S b/ports/xtensa/xcc/src/tx_timer_interrupt.S
index 6fcc5d36..2953f996 100644
--- a/ports/xtensa/xcc/src/tx_timer_interrupt.S
+++ b/ports/xtensa/xcc/src/tx_timer_interrupt.S
@@ -64,7 +64,10 @@
/* */
/* DATE NAME DESCRIPTION */
/* */
-/* 12-31-2020 Cadence Design Systems Initial Version 6.1.3 */
+/* 12-31-2020 Cadence Design Systems Initial Version 6.1.3 */
+/* 04-25-2022 Scott Larson Modified comments and updated */
+/* function name, */
+/* resulting in version 6.1.11 */
/* */
/**************************************************************************/
@@ -128,7 +131,7 @@ _tx_timer_interrupt:
#ifdef XT_CLOCK_FREQ
movi a2, XT_TICK_DIVISOR /* a2 = comparator increment */
#else
- movi a3, _xt_tick_divisor
+ movi a3, xt_tick_divisor
l32i a2, a3, 0 /* a2 = comparator increment */
#endif
rsr a3, XT_CCOMPARE /* a3 = old comparator value */
diff --git a/ports/xtensa/xcc/src/xtensa_coproc_handler.S b/ports/xtensa/xcc/src/xtensa_coproc_handler.S
index ee55e471..33de2070 100644
--- a/ports/xtensa/xcc/src/xtensa_coproc_handler.S
+++ b/ports/xtensa/xcc/src/xtensa_coproc_handler.S
@@ -461,8 +461,10 @@ _xt_coproc_restorecs:
//-----------------------------------------------------------------------------
// For XEA3, coprocessor exceptions come here. This is a wrapper function that
-// calls _xt_coproc_handler() to do the actual work. Since the handler can be
-// interrupted make sure that no context switch occurs.
+// calls _xt_coproc_handler() to do the actual work. We don't want the handler
+// to be interrupted because that might cause a round-robin switch and leave
+// coprocessor context in a confused state. So interrupts are disabled before
+// calling the handler. They will be re-enabled on return from exception.
//-----------------------------------------------------------------------------
.text
@@ -474,9 +476,9 @@ _xt_coproc_exc:
#ifdef __XTENSA_CALL0_ABI__
addi a1, a1, -16 // reserve 16 bytes on stack
s32i a0, a1, 0 // save return address
- s32i a2, a1, 4 // save a2
s32i a15, a1, 8 // must save a15 (see dispatch)
- l32i a2, a1, 4
+ movi a3, PS_DI_MASK
+ xps a3, a3 // Set PS.DI, disable interrupts
l32i a3, a2, XT_STK_EXCCAUSE // a3 <- exccause
extui a2, a3, 8, 4 // a2 <- CP index
call0 _xt_coproc_handler
@@ -487,6 +489,8 @@ _xt_coproc_exc:
#else
entry a1, 48 // reserve 16 bytes on stack
s32i a0, a1, 0 // save return address
+ movi a3, PS_DI_MASK
+ xps a3, a3 // Set PS.DI, disable interrupts
l32i a3, a2, XT_STK_EXCCAUSE // a3 <- exccause
extui a2, a3, 8, 4 // a2 <- CP index
call0 _xt_coproc_handler
diff --git a/ports/xtensa/xcc/src/xtensa_init.c b/ports/xtensa/xcc/src/xtensa_init.c
index 0359028c..298ca28d 100644
--- a/ports/xtensa/xcc/src/xtensa_init.c
+++ b/ports/xtensa/xcc/src/xtensa_init.c
@@ -46,17 +46,17 @@
#ifdef XT_RTOS_TIMER_INT
#ifndef XT_CLOCK_FREQ
-uint32_t _xt_tick_divisor = 0; /* cached number of cycles per tick */
+uint32_t xt_tick_divisor = 0; /* cached number of cycles per tick */
/*
Compute and initialize at run-time the tick divisor (the number of
processor clock cycles in an RTOS tick, used to set the tick timer).
Called when the processor clock frequency is not known at compile-time.
*/
-void _xt_tick_divisor_init(void)
+void xt_tick_divisor_init(void)
{
#ifdef XT_BOARD
- _xt_tick_divisor = xtbsp_clock_freq_hz() / XT_TICK_PER_SEC;
+ xt_tick_divisor = xtbsp_clock_freq_hz() / XT_TICK_PER_SEC;
#else
#error "No way to obtain processor clock frequency"
#endif /* XT_BOARD */
diff --git a/ports/xtensa/xcc/src/xtensa_vectors_xea3.S b/ports/xtensa/xcc/src/xtensa_vectors_xea3.S
index 5596a75c..9a25dcee 100644
--- a/ports/xtensa/xcc/src/xtensa_vectors_xea3.S
+++ b/ports/xtensa/xcc/src/xtensa_vectors_xea3.S
@@ -77,9 +77,6 @@
// The entry point vectors are common for call0 and windowed configurations.
//-----------------------------------------------------------------------------
- .extern _DoubleExceptionHandler
- .extern _xtos_exc_dispatch
-
.section .DispatchVector.text, "ax"
#if XCHAL_HAVE_VECBASE
.align 64 // 64-byte alignment needed when vecbase
@@ -105,7 +102,7 @@ _Reserved1:
.weak _DoubleExceptionVector
_DoubleExceptionVector:
- j _DoubleExceptionHandler
+ j _DoubleExceptionHandler // Externally defined
.org 9 // Reserved
.local _Reserved2
diff --git a/ports_module/cortex_a35_smp/ac6/module_manager/src/tx_thread_context_restore.S b/ports_module/cortex_a35_smp/ac6/module_manager/src/tx_thread_context_restore.S
index 56f748f9..19791662 100644
--- a/ports_module/cortex_a35_smp/ac6/module_manager/src/tx_thread_context_restore.S
+++ b/ports_module/cortex_a35_smp/ac6/module_manager/src/tx_thread_context_restore.S
@@ -21,9 +21,6 @@
/**************************************************************************/
-/* Include macros for modifying the wait list. */
-#include "tx_thread_smp_protection_wait_list_macros.h"
-
.text
.align 3
/**************************************************************************/
@@ -31,7 +28,7 @@
/* FUNCTION RELEASE */
/* */
/* _tx_thread_context_restore Cortex-A35-SMP/AC6 */
-/* 6.1.3 */
+/* 6.1.11 */
/* AUTHOR */
/* */
/* Andres Mlinar, Microsoft Corporation */
@@ -63,7 +60,10 @@
/* */
/* DATE NAME DESCRIPTION */
/* */
-/* 12-31-2020 Andres Mlinar Initial Version 6.1.3 */
+/* 12-31-2020 Andres Mlinar Initial Version 6.1.3 */
+/* 04-25-2022 William E. Lamie Modified comments, removed */
+/* FIFO queueing, */
+/* resulting in version 6.1.11 */
/* */
/**************************************************************************/
// VOID _tx_thread_context_restore(VOID)
@@ -76,7 +76,7 @@ _tx_thread_context_restore:
MSR DAIFSet, 0x3 // Lockout interrupts
-#ifdef TX_ENABLE_EXECUTION_CHANGE_NOTIFY
+#if (defined(TX_ENABLE_EXECUTION_CHANGE_NOTIFY) || defined(TX_EXECUTION_PROFILE_ENABLE))
/* Call the ISR exit function to indicate an ISR is complete. */
@@ -101,13 +101,13 @@ _tx_thread_context_restore:
LDR x3, =_tx_thread_system_state // Pickup address of system state var
LDR w2, [x3, x8, LSL #2] // Pickup system state
SUB w2, w2, #1 // Decrement the counter
- STR w2, [x3, x8, LSL #2] // Store the counter
+ STR w2, [x3, x8, LSL #2] // Store the counter
CMP w2, #0 // Was this the first interrupt?
BEQ __tx_thread_not_nested_restore // If so, not a nested restore
/* Interrupts are nested. */
- /* Just recover the saved registers and return to the point of
+ /* Just recover the saved registers and return to the point of
interrupt. */
LDP x4, x5, [sp], #16 // Pickup saved SPSR/DAIF and ELR_EL
@@ -151,7 +151,7 @@ __tx_thread_not_nested_restore:
LDR x3, =_tx_thread_execute_ptr // Pickup address of execute thread ptr
LDR x2, [x3, x8, LSL #3] // Pickup actual execute thread pointer
CMP x0, x2 // Is the same thread highest priority?
- BEQ __tx_thread_no_preempt_restore // Same thread in the execute list,
+ BEQ __tx_thread_no_preempt_restore // Same thread in the execute list,
// no preemption needs to happen
LDR x3, =_tx_thread_smp_protection // Build address to protection structure
LDR w3, [x3, #4] // Pickup the owning core
@@ -206,74 +206,6 @@ __tx_thread_no_preempt_restore:
// {
__tx_thread_preempt_restore:
- /* Was the thread being preempted waiting for the lock? */
- // if (_tx_thread_smp_protect_wait_counts[this_core] != 0)
- // {
-
- LDR x2, =_tx_thread_smp_protect_wait_counts // Load waiting count list
- LDR w3, [x2, x8, LSL #2] // Load waiting value for this core
- CMP w3, #0
- BEQ _nobody_waiting_for_lock // Is the core waiting for the lock?
-
- /* Do we not have the lock? This means the ISR never got the inter-core lock. */
- // if (_tx_thread_smp_protection.tx_thread_smp_protect_owned != this_core)
- // {
-
- LDR x2, =_tx_thread_smp_protection // Load address of protection structure
- LDR w3, [x2, #4] // Pickup the owning core
- CMP w8, w3 // Compare our core to the owning core
- BEQ _this_core_has_lock // Do we have the lock?
-
- /* We don't have the lock. This core should be in the list. Remove it. */
- // _tx_thread_smp_protect_wait_list_remove(this_core);
-
- _tx_thread_smp_protect_wait_list_remove // Call macro to remove core from the list
- B _nobody_waiting_for_lock // Leave
-
- // }
- // else
- // {
- /* We have the lock. This means the ISR got the inter-core lock, but
- never released it because it saw that there was someone waiting.
- Note this core is not in the list. */
-
-_this_core_has_lock:
-
- /* We're no longer waiting. Note that this should be zero since this happens during thread preemption. */
- // _tx_thread_smp_protect_wait_counts[core]--;
-
- LDR x2, =_tx_thread_smp_protect_wait_counts // Load waiting count list
- LDR w3, [x2, x8, LSL #2] // Load waiting value for this core
- SUB w3, w3, #1 // Decrement waiting value. Should be zero now
- STR w3, [x2, x8, LSL #2] // Store new waiting value
-
- /* Now release the inter-core lock. */
-
- /* Set protected core as invalid. */
- // _tx_thread_smp_protection.tx_thread_smp_protect_core = 0xFFFFFFFF;
-
- LDR x2, =_tx_thread_smp_protection // Load address of protection structure
- MOV w3, #0xFFFFFFFF // Build invalid value
- STR w3, [x2, #4] // Mark the protected core as invalid
- DMB ISH // Ensure that accesses to shared resource have completed
-
- /* Release protection. */
- // _tx_thread_smp_protection.tx_thread_smp_protect_in_force = 0;
-
- MOV w3, #0 // Build release protection value
- STR w3, [x2, #0] // Release the protection
- DSB ISH // To ensure update of the protection occurs before other CPUs awake
-
- /* Wake up waiting processors. Note interrupts are already enabled. */
-
-#ifdef TX_ENABLE_WFE
- SEV // Send event to other CPUs
-#endif
-
- // }
- // }
-
-_nobody_waiting_for_lock:
LDR x4, [x0, #8] // Switch to thread stack pointer
MOV sp, x4 //
@@ -328,8 +260,8 @@ _skip_fp_save:
CMP w2, #0 // Is it active?
BEQ __tx_thread_dont_save_ts // No, don't save it
- /* _tx_thread_current_ptr -> tx_thread_time_slice = _tx_timer_time_slice;
- _tx_timer_time_slice = 0; */
+ // _tx_thread_current_ptr -> tx_thread_time_slice = _tx_timer_time_slice;
+ // _tx_timer_time_slice = 0;
STR w2, [x0, #36] // Save thread's time-slice
MOV w2, #0 // Clear value
diff --git a/ports_module/cortex_a35_smp/ac6/module_manager/src/tx_thread_smp_protect.S b/ports_module/cortex_a35_smp/ac6/module_manager/src/tx_thread_smp_protect.S
index 92c1702b..078d0320 100644
--- a/ports_module/cortex_a35_smp/ac6/module_manager/src/tx_thread_smp_protect.S
+++ b/ports_module/cortex_a35_smp/ac6/module_manager/src/tx_thread_smp_protect.S
@@ -32,7 +32,7 @@
/* FUNCTION RELEASE */
/* */
/* _tx_thread_smp_protect Cortex-A35-SMP/AC6 */
-/* 6.1.9 */
+/* 6.1.11 */
/* AUTHOR */
/* */
/* William E. Lamie, Microsoft Corporation */
@@ -63,11 +63,14 @@
/* */
/* DATE NAME DESCRIPTION */
/* */
-/* 09-30-2020 William E. Lamie Initial Version 6.1 */
-/* 10-15-2021 Andres Mlinar Updated comments, */
-/* added ARMv8.2-A support, */
-/* improved SMP code, */
-/* resulting in version 6.1.9 */
+/* 09-30-2020 William E. Lamie Initial Version 6.1 */
+/* 10-15-2021 Andres Mlinar Updated comments, */
+/* added ARMv8.2-A support, */
+/* improved SMP code, */
+/* resulting in version 6.1.9 */
+/* 04-25-2022 William E. Lamie Modified comments, removed */
+/* FIFO queueing, */
+/* resulting in version 6.1.11 */
/* */
/**************************************************************************/
.global _tx_thread_smp_protect
@@ -81,293 +84,49 @@ _tx_thread_smp_protect:
/* Pickup the CPU ID. */
- MRS x1, MPIDR_EL1 // Pickup the core ID
+ MRS x2, MPIDR_EL1 // Pickup the core ID
#ifdef TX_ARMV8_2
#if TX_THREAD_SMP_CLUSTERS > 1
- UBFX x7, x1, #16, #8 // Isolate cluster ID
+ UBFX x7, x2, #16, #8 // Isolate cluster ID
#endif
- UBFX x1, x1, #8, #8 // Isolate core ID
+ UBFX x2, x2, #8, #8 // Isolate core ID
#else
#if TX_THREAD_SMP_CLUSTERS > 1
- UBFX x7, x1, #8, #8 // Isolate cluster ID
+ UBFX x7, x2, #8, #8 // Isolate cluster ID
#endif
- UBFX x1, x1, #0, #8 // Isolate core ID
+ UBFX x2, x2, #0, #8 // Isolate core ID
#endif
#if TX_THREAD_SMP_CLUSTERS > 1
- ADDS x1, x1, x7, LSL #2 // Calculate CPU ID
+ ADDS x2, x2, x7, LSL #2 // Calculate CPU ID
#endif
- /* Do we already have protection? */
- // if (this_core == _tx_thread_smp_protection.tx_thread_smp_protect_core)
- // {
-
- LDR x2, =_tx_thread_smp_protection // Build address to protection structure
- LDR w3, [x2, #4] // Pickup the owning core
- CMP w1, w3 // Is it not this core?
- BNE _protection_not_owned // No, the protection is not already owned
-
- /* We already have protection. */
-
- /* Increment the protection count. */
- // _tx_thread_smp_protection.tx_thread_smp_protect_count++;
-
- LDR w3, [x2, #8] // Pickup ownership count
- ADD w3, w3, #1 // Increment ownership count
- STR w3, [x2, #8] // Store ownership count
- DMB ISH
-
- B _return
-
-_protection_not_owned:
-
- /* Is the lock available? */
- // if (_tx_thread_smp_protection.tx_thread_smp_protect_in_force == 0)
- // {
-
- LDAXR w3, [x2, #0] // Pickup the protection flag
- CMP w3, #0
- BNE _start_waiting // No, protection not available
-
- /* Is the list empty? */
- // if (_tx_thread_smp_protect_wait_list_head == _tx_thread_smp_protect_wait_list_tail)
- // {
-
- LDR x3, =_tx_thread_smp_protect_wait_list_head
- LDR w3, [x3]
- LDR x4, =_tx_thread_smp_protect_wait_list_tail
- LDR w4, [x4]
- CMP w3, w4
- BNE _list_not_empty
-
- /* Try to get the lock. */
- // if (write_exclusive(&_tx_thread_smp_protection.tx_thread_smp_protect_in_force, 1) == SUCCESS)
- // {
-
- MOV w3, #1 // Build lock value
- STXR w4, w3, [x2, #0] // Attempt to get the protection
- CMP w4, #0
- BNE _start_waiting // Did it fail?
-
- /* We got the lock! */
- // _tx_thread_smp_protect_lock_got();
-
- DMB ISH // Ensure write to protection finishes
- _tx_thread_smp_protect_lock_got // Call the lock got function
-
- B _return
-
-_list_not_empty:
-
- /* Are we at the front of the list? */
- // if (this_core == _tx_thread_smp_protect_wait_list[_tx_thread_smp_protect_wait_list_head])
- // {
-
- LDR x3, =_tx_thread_smp_protect_wait_list_head // Get the address of the head
- LDR w3, [x3] // Get the value of the head
- LDR x4, =_tx_thread_smp_protect_wait_list // Get the address of the list
- LDR w4, [x4, x3, LSL #2] // Get the value at the head index
-
- CMP w1, w4
- BNE _start_waiting
-
- /* Is the lock still available? */
- // if (_tx_thread_smp_protection.tx_thread_smp_protect_in_force == 0)
- // {
-
- LDAXR w3, [x2, #0] // Pickup the protection flag
- CMP w3, #0
- BNE _start_waiting // No, protection not available
-
- /* Get the lock. */
- // _tx_thread_smp_protection.tx_thread_smp_protect_in_force = 1;
-
- MOV w3, #1 // Build lock value
- STXR w4, w3, [x2, #0] // Attempt to get the protection
- CMP w4, #0
- BNE _start_waiting // Did it fail?
- DMB ISH //
-
- /* Got the lock. */
- // _tx_thread_smp_protect_lock_got();
-
- _tx_thread_smp_protect_lock_got
-
- /* Remove this core from the wait list. */
- // _tx_thread_smp_protect_remove_from_front_of_list();
-
- _tx_thread_smp_protect_remove_from_front_of_list
-
- B _return
-
-_start_waiting:
-
- /* For one reason or another, we didn't get the lock. */
-
- /* Increment wait count. */
- // _tx_thread_smp_protect_wait_counts[this_core]++;
-
- LDR x3, =_tx_thread_smp_protect_wait_counts // Load wait list counts
- LDR w4, [x3, x1, LSL #2] // Load waiting value for this core
- ADD w4, w4, #1 // Increment wait value
- STR w4, [x3, x1, LSL #2] // Store new wait value
-
- /* Have we not added ourselves to the list yet? */
- // if (_tx_thread_smp_protect_wait_counts[this_core] == 1)
- // {
-
- CMP w4, #1
- BNE _already_in_list0 // Is this core already waiting?
-
- /* Add ourselves to the list. */
- // _tx_thread_smp_protect_wait_list_add(this_core);
-
- _tx_thread_smp_protect_wait_list_add // Call macro to add ourselves to the list
-
- // }
-
-_already_in_list0:
-
- /* Restore interrupts. */
+ LDR x1, =_tx_thread_smp_protection // Build address to protection structure
+ LDR w3, [x1, #4] // Pickup the owning core
+ CMP w3, w2 // Is it this core?
+ BEQ _owned // Yes, the protection is already owned
+ LDAXR w4, [x1, #0] // Pickup the protection flag
+ CBZ w4, _get_protection // Yes, get the protection
MSR DAIF, x0 // Restore interrupts
ISB //
#ifdef TX_ENABLE_WFE
WFE // Go into standby
#endif
+ B _tx_thread_smp_protect // On waking, restart the protection attempt
- /* We do this until we have the lock. */
- // while (1)
- // {
-
-_try_to_get_lock:
-
- /* Disable interrupts so we don't get preempted. */
-
- MRS x0, DAIF // Pickup current interrupt posture
- MSR DAIFSet, 0x3 // Lockout interrupts
-
- /* Pickup the CPU ID. */
-
- MRS x1, MPIDR_EL1 // Pickup the core ID
-#ifdef TX_ARMV8_2
-#if TX_THREAD_SMP_CLUSTERS > 1
- UBFX x7, x1, #16, #8 // Isolate cluster ID
-#endif
- UBFX x1, x1, #8, #8 // Isolate core ID
-#else
-#if TX_THREAD_SMP_CLUSTERS > 1
- UBFX x7, x1, #8, #8 // Isolate cluster ID
-#endif
- UBFX x1, x1, #0, #8 // Isolate core ID
-#endif
-#if TX_THREAD_SMP_CLUSTERS > 1
- ADDS x1, x1, x7, LSL #2 // Calculate CPU ID
-#endif
-
- /* Do we already have protection? */
- // if (this_core == _tx_thread_smp_protection.tx_thread_smp_protect_core)
- // {
-
- LDR w3, [x2, #4] // Pickup the owning core
- CMP w3, w1 // Is it this core?
- BEQ _got_lock_after_waiting // Yes, the protection is already owned. This means
- // an ISR preempted us and got protection
-
- // }
-
- /* Are we at the front of the list? */
- // if (this_core == _tx_thread_smp_protect_wait_list[_tx_thread_smp_protect_wait_list_head])
- // {
-
- LDR x3, =_tx_thread_smp_protect_wait_list_head // Get the address of the head
- LDR w3, [x3] // Get the value of the head
- LDR x4, =_tx_thread_smp_protect_wait_list // Get the address of the list
- LDR w4, [x4, x3, LSL #2] // Get the value at the head index
-
- CMP w1, w4
- BNE _did_not_get_lock
-
- /* Is the lock still available? */
- // if (_tx_thread_smp_protection.tx_thread_smp_protect_in_force == 0)
- // {
-
- LDAXR w3, [x2, #0] // Pickup the protection flag
- CMP w3, #0
- BNE _did_not_get_lock // No, protection not available
-
- /* Get the lock. */
- // _tx_thread_smp_protection.tx_thread_smp_protect_in_force = 1;
-
- MOV w3, #1 // Build lock value
- STXR w4, w3, [x2, #0] // Attempt to get the protection
- CMP w4, #0
- BNE _did_not_get_lock // Did it fail?
- DMB ISH //
-
- /* Got the lock. */
- // _tx_thread_smp_protect_lock_got();
-
- _tx_thread_smp_protect_lock_got
-
- /* Remove this core from the wait list. */
- // _tx_thread_smp_protect_remove_from_front_of_list();
-
- _tx_thread_smp_protect_remove_from_front_of_list
-
- B _got_lock_after_waiting
-
-_did_not_get_lock:
-
- /* For one reason or another, we didn't get the lock. */
-
- /* Were we removed from the list? This can happen if we're a thread
- and we got preempted. */
- // if (_tx_thread_smp_protect_wait_counts[this_core] == 0)
- // {
-
- LDR x3, =_tx_thread_smp_protect_wait_counts // Load wait list counts
- LDR w4, [x3, x1, LSL #2] // Load waiting value for this core
- CMP w4, #0
- BNE _already_in_list1 // Is this core already in the list?
-
- /* Add ourselves to the list. */
- // _tx_thread_smp_protect_wait_list_add(this_core);
-
- _tx_thread_smp_protect_wait_list_add // Call macro to add ourselves to the list
-
- /* Our waiting count was also reset when we were preempted. Increment it again. */
- // _tx_thread_smp_protect_wait_counts[this_core]++;
-
- LDR x3, =_tx_thread_smp_protect_wait_counts // Load wait list counts
- LDR w4, [x3, x1, LSL #2] // Load waiting value for this core
- ADD w4, w4, #1 // Increment wait value
- STR w4, [x3, x1, LSL #2] // Store new wait value value
-
- // }
-
-_already_in_list1:
-
- /* Restore interrupts and try again. */
-
+_get_protection:
+ MOV x4, #1 // Build lock value
+ STXR w5, w4, [x1] // Attempt to get the protection
+ CBZ w5, _got_protection // Did it succeed? w5 = 0 means success!
MSR DAIF, x0 // Restore interrupts
- ISB //
-#ifdef TX_ENABLE_WFE
- WFE // Go into standby
-#endif
- B _try_to_get_lock // On waking, restart the protection attempt
-
-_got_lock_after_waiting:
-
- /* We're no longer waiting. */
- // _tx_thread_smp_protect_wait_counts[this_core]--;
-
- LDR x3, =_tx_thread_smp_protect_wait_counts // Load waiting list
- LDR w4, [x3, x1, LSL #2] // Load current wait value
- SUB w4, w4, #1 // Decrement wait value
- STR w4, [x3, x1, LSL #2] // Store new wait value value
-
- /* Restore registers and return. */
-
-_return:
-
+ B _tx_thread_smp_protect // Restart the protection attempt
+
+_got_protection:
+ DMB ISH //
+ STR w2, [x1, #4] // Save owning core
+_owned:
+ LDR w5, [x1, #8] // Pickup ownership count
+ ADD w5, w5, #1 // Increment ownership count
+ STR w5, [x1, #8] // Store ownership count
+ DMB ISH //
RET
diff --git a/ports_module/cortex_a35_smp/ac6/module_manager/src/tx_thread_smp_unprotect.S b/ports_module/cortex_a35_smp/ac6/module_manager/src/tx_thread_smp_unprotect.S
index ff8df7db..ea1287ef 100644
--- a/ports_module/cortex_a35_smp/ac6/module_manager/src/tx_thread_smp_unprotect.S
+++ b/ports_module/cortex_a35_smp/ac6/module_manager/src/tx_thread_smp_unprotect.S
@@ -28,7 +28,7 @@
/* FUNCTION RELEASE */
/* */
/* _tx_thread_smp_unprotect Cortex-A35-SMP/AC6 */
-/* 6.1.9 */
+/* 6.1.11 */
/* AUTHOR */
/* */
/* William E. Lamie, Microsoft Corporation */
@@ -60,10 +60,13 @@
/* */
/* DATE NAME DESCRIPTION */
/* */
-/* 09-30-2020 William E. Lamie Initial Version 6.1 */
-/* 10-15-2021 Andres Mlinar Updated comments, */
-/* added ARMv8.2-A support, */
-/* resulting in version 6.1.9 */
+/* 09-30-2020 William E. Lamie Initial Version 6.1 */
+/* 10-15-2021 Andres Mlinar Updated comments, */
+/* added ARMv8.2-A support, */
+/* resulting in version 6.1.9 */
+/* 04-25-2022 William E. Lamie Modified comments, removed */
+/* FIFO queueing, */
+/* resulting in version 6.1.11 */
/* */
/**************************************************************************/
.global _tx_thread_smp_unprotect
@@ -105,11 +108,6 @@ _tx_thread_smp_unprotect:
CMP w3, #0 // Is the preempt disable flag set?
BNE _still_protected // Yes, skip the protection release
- LDR x2,=_tx_thread_smp_protect_wait_counts // Build build address of wait counts
- LDR w3, [x2, x1, LSL #2] // Pickup wait list value
- CMP w3, #0 // Are any entities on this core waiting?
- BNE _still_protected // Yes, skip the protection release
-
LDR x2,=_tx_thread_smp_protection // Build address of protection structure
MOV w3, #0xFFFFFFFF // Build invalid value
STR w3, [x2, #4] // Mark the protected core as invalid
diff --git a/ports_module/cortex_a35_smp/gnu/module_manager/src/tx_thread_context_restore.S b/ports_module/cortex_a35_smp/gnu/module_manager/src/tx_thread_context_restore.S
index 886df701..94a31b94 100644
--- a/ports_module/cortex_a35_smp/gnu/module_manager/src/tx_thread_context_restore.S
+++ b/ports_module/cortex_a35_smp/gnu/module_manager/src/tx_thread_context_restore.S
@@ -21,9 +21,6 @@
/**************************************************************************/
-/* Include macros for modifying the wait list. */
-#include "tx_thread_smp_protection_wait_list_macros.h"
-
.text
.align 3
/**************************************************************************/
@@ -31,7 +28,7 @@
/* FUNCTION RELEASE */
/* */
/* _tx_thread_context_restore Cortex-A35-SMP/GNU */
-/* 6.1.3 */
+/* 6.1.11 */
/* AUTHOR */
/* */
/* Andres Mlinar, Microsoft Corporation */
@@ -63,7 +60,10 @@
/* */
/* DATE NAME DESCRIPTION */
/* */
-/* 12-31-2020 Andres Mlinar Initial Version 6.1.3 */
+/* 12-31-2020 Andres Mlinar Initial Version 6.1.3 */
+/* 04-25-2022 William E. Lamie Modified comments, removed */
+/* FIFO queueing, */
+/* resulting in version 6.1.11 */
/* */
/**************************************************************************/
// VOID _tx_thread_context_restore(VOID)
@@ -76,7 +76,7 @@ _tx_thread_context_restore:
MSR DAIFSet, 0x3 // Lockout interrupts
-#ifdef TX_ENABLE_EXECUTION_CHANGE_NOTIFY
+#if (defined(TX_ENABLE_EXECUTION_CHANGE_NOTIFY) || defined(TX_EXECUTION_PROFILE_ENABLE))
/* Call the ISR exit function to indicate an ISR is complete. */
@@ -101,13 +101,13 @@ _tx_thread_context_restore:
LDR x3, =_tx_thread_system_state // Pickup address of system state var
LDR w2, [x3, x8, LSL #2] // Pickup system state
SUB w2, w2, #1 // Decrement the counter
- STR w2, [x3, x8, LSL #2] // Store the counter
+ STR w2, [x3, x8, LSL #2] // Store the counter
CMP w2, #0 // Was this the first interrupt?
BEQ __tx_thread_not_nested_restore // If so, not a nested restore
/* Interrupts are nested. */
- /* Just recover the saved registers and return to the point of
+ /* Just recover the saved registers and return to the point of
interrupt. */
LDP x4, x5, [sp], #16 // Pickup saved SPSR/DAIF and ELR_EL
@@ -151,7 +151,7 @@ __tx_thread_not_nested_restore:
LDR x3, =_tx_thread_execute_ptr // Pickup address of execute thread ptr
LDR x2, [x3, x8, LSL #3] // Pickup actual execute thread pointer
CMP x0, x2 // Is the same thread highest priority?
- BEQ __tx_thread_no_preempt_restore // Same thread in the execute list,
+ BEQ __tx_thread_no_preempt_restore // Same thread in the execute list,
// no preemption needs to happen
LDR x3, =_tx_thread_smp_protection // Build address to protection structure
LDR w3, [x3, #4] // Pickup the owning core
@@ -206,74 +206,6 @@ __tx_thread_no_preempt_restore:
// {
__tx_thread_preempt_restore:
- /* Was the thread being preempted waiting for the lock? */
- // if (_tx_thread_smp_protect_wait_counts[this_core] != 0)
- // {
-
- LDR x2, =_tx_thread_smp_protect_wait_counts // Load waiting count list
- LDR w3, [x2, x8, LSL #2] // Load waiting value for this core
- CMP w3, #0
- BEQ _nobody_waiting_for_lock // Is the core waiting for the lock?
-
- /* Do we not have the lock? This means the ISR never got the inter-core lock. */
- // if (_tx_thread_smp_protection.tx_thread_smp_protect_owned != this_core)
- // {
-
- LDR x2, =_tx_thread_smp_protection // Load address of protection structure
- LDR w3, [x2, #4] // Pickup the owning core
- CMP w8, w3 // Compare our core to the owning core
- BEQ _this_core_has_lock // Do we have the lock?
-
- /* We don't have the lock. This core should be in the list. Remove it. */
- // _tx_thread_smp_protect_wait_list_remove(this_core);
-
- _tx_thread_smp_protect_wait_list_remove // Call macro to remove core from the list
- B _nobody_waiting_for_lock // Leave
-
- // }
- // else
- // {
- /* We have the lock. This means the ISR got the inter-core lock, but
- never released it because it saw that there was someone waiting.
- Note this core is not in the list. */
-
-_this_core_has_lock:
-
- /* We're no longer waiting. Note that this should be zero since this happens during thread preemption. */
- // _tx_thread_smp_protect_wait_counts[core]--;
-
- LDR x2, =_tx_thread_smp_protect_wait_counts // Load waiting count list
- LDR w3, [x2, x8, LSL #2] // Load waiting value for this core
- SUB w3, w3, #1 // Decrement waiting value. Should be zero now
- STR w3, [x2, x8, LSL #2] // Store new waiting value
-
- /* Now release the inter-core lock. */
-
- /* Set protected core as invalid. */
- // _tx_thread_smp_protection.tx_thread_smp_protect_core = 0xFFFFFFFF;
-
- LDR x2, =_tx_thread_smp_protection // Load address of protection structure
- MOV w3, #0xFFFFFFFF // Build invalid value
- STR w3, [x2, #4] // Mark the protected core as invalid
- DMB ISH // Ensure that accesses to shared resource have completed
-
- /* Release protection. */
- // _tx_thread_smp_protection.tx_thread_smp_protect_in_force = 0;
-
- MOV w3, #0 // Build release protection value
- STR w3, [x2, #0] // Release the protection
- DSB ISH // To ensure update of the protection occurs before other CPUs awake
-
- /* Wake up waiting processors. Note interrupts are already enabled. */
-
-#ifdef TX_ENABLE_WFE
- SEV // Send event to other CPUs
-#endif
-
- // }
- // }
-
-_nobody_waiting_for_lock:
LDR x4, [x0, #8] // Switch to thread stack pointer
MOV sp, x4 //
@@ -328,8 +260,8 @@ _skip_fp_save:
CMP w2, #0 // Is it active?
BEQ __tx_thread_dont_save_ts // No, don't save it
- /* _tx_thread_current_ptr -> tx_thread_time_slice = _tx_timer_time_slice;
- _tx_timer_time_slice = 0; */
+ // _tx_thread_current_ptr -> tx_thread_time_slice = _tx_timer_time_slice;
+ // _tx_timer_time_slice = 0;
STR w2, [x0, #36] // Save thread's time-slice
MOV w2, #0 // Clear value
diff --git a/ports_module/cortex_a35_smp/gnu/module_manager/src/tx_thread_smp_protect.S b/ports_module/cortex_a35_smp/gnu/module_manager/src/tx_thread_smp_protect.S
index 59bf4ff4..5b800e71 100644
--- a/ports_module/cortex_a35_smp/gnu/module_manager/src/tx_thread_smp_protect.S
+++ b/ports_module/cortex_a35_smp/gnu/module_manager/src/tx_thread_smp_protect.S
@@ -32,7 +32,7 @@
/* FUNCTION RELEASE */
/* */
/* _tx_thread_smp_protect Cortex-A35-SMP/GCC */
-/* 6.1.9 */
+/* 6.1.11 */
/* AUTHOR */
/* */
/* William E. Lamie, Microsoft Corporation */
@@ -63,11 +63,14 @@
/* */
/* DATE NAME DESCRIPTION */
/* */
-/* 09-30-2020 William E. Lamie Initial Version 6.1 */
-/* 10-15-2021 Andres Mlinar Updated comments, */
-/* added ARMv8.2-A support, */
-/* improved SMP code, */
-/* resulting in version 6.1.9 */
+/* 09-30-2020 William E. Lamie Initial Version 6.1 */
+/* 10-15-2021 Andres Mlinar Updated comments, */
+/* added ARMv8.2-A support, */
+/* improved SMP code, */
+/* resulting in version 6.1.9 */
+/* 04-25-2022 William E. Lamie Modified comments, removed */
+/* FIFO queueing, */
+/* resulting in version 6.1.11 */
/* */
/**************************************************************************/
.global _tx_thread_smp_protect
@@ -81,293 +84,49 @@ _tx_thread_smp_protect:
/* Pickup the CPU ID. */
- MRS x1, MPIDR_EL1 // Pickup the core ID
+ MRS x2, MPIDR_EL1 // Pickup the core ID
#ifdef TX_ARMV8_2
#if TX_THREAD_SMP_CLUSTERS > 1
- UBFX x7, x1, #16, #8 // Isolate cluster ID
+ UBFX x7, x2, #16, #8 // Isolate cluster ID
#endif
- UBFX x1, x1, #8, #8 // Isolate core ID
+ UBFX x2, x2, #8, #8 // Isolate core ID
#else
#if TX_THREAD_SMP_CLUSTERS > 1
- UBFX x7, x1, #8, #8 // Isolate cluster ID
+ UBFX x7, x2, #8, #8 // Isolate cluster ID
#endif
- UBFX x1, x1, #0, #8 // Isolate core ID
+ UBFX x2, x2, #0, #8 // Isolate core ID
#endif
#if TX_THREAD_SMP_CLUSTERS > 1
- ADDS x1, x1, x7, LSL #2 // Calculate CPU ID
+ ADDS x2, x2, x7, LSL #2 // Calculate CPU ID
#endif
- /* Do we already have protection? */
- // if (this_core == _tx_thread_smp_protection.tx_thread_smp_protect_core)
- // {
-
- LDR x2, =_tx_thread_smp_protection // Build address to protection structure
- LDR w3, [x2, #4] // Pickup the owning core
- CMP w1, w3 // Is it not this core?
- BNE _protection_not_owned // No, the protection is not already owned
-
- /* We already have protection. */
-
- /* Increment the protection count. */
- // _tx_thread_smp_protection.tx_thread_smp_protect_count++;
-
- LDR w3, [x2, #8] // Pickup ownership count
- ADD w3, w3, #1 // Increment ownership count
- STR w3, [x2, #8] // Store ownership count
- DMB ISH
-
- B _return
-
-_protection_not_owned:
-
- /* Is the lock available? */
- // if (_tx_thread_smp_protection.tx_thread_smp_protect_in_force == 0)
- // {
-
- LDAXR w3, [x2, #0] // Pickup the protection flag
- CMP w3, #0
- BNE _start_waiting // No, protection not available
-
- /* Is the list empty? */
- // if (_tx_thread_smp_protect_wait_list_head == _tx_thread_smp_protect_wait_list_tail)
- // {
-
- LDR x3, =_tx_thread_smp_protect_wait_list_head
- LDR w3, [x3]
- LDR x4, =_tx_thread_smp_protect_wait_list_tail
- LDR w4, [x4]
- CMP w3, w4
- BNE _list_not_empty
-
- /* Try to get the lock. */
- // if (write_exclusive(&_tx_thread_smp_protection.tx_thread_smp_protect_in_force, 1) == SUCCESS)
- // {
-
- MOV w3, #1 // Build lock value
- STXR w4, w3, [x2, #0] // Attempt to get the protection
- CMP w4, #0
- BNE _start_waiting // Did it fail?
-
- /* We got the lock! */
- // _tx_thread_smp_protect_lock_got();
-
- DMB ISH // Ensure write to protection finishes
- _tx_thread_smp_protect_lock_got // Call the lock got function
-
- B _return
-
-_list_not_empty:
-
- /* Are we at the front of the list? */
- // if (this_core == _tx_thread_smp_protect_wait_list[_tx_thread_smp_protect_wait_list_head])
- // {
-
- LDR x3, =_tx_thread_smp_protect_wait_list_head // Get the address of the head
- LDR w3, [x3] // Get the value of the head
- LDR x4, =_tx_thread_smp_protect_wait_list // Get the address of the list
- LDR w4, [x4, x3, LSL #2] // Get the value at the head index
-
- CMP w1, w4
- BNE _start_waiting
-
- /* Is the lock still available? */
- // if (_tx_thread_smp_protection.tx_thread_smp_protect_in_force == 0)
- // {
-
- LDAXR w3, [x2, #0] // Pickup the protection flag
- CMP w3, #0
- BNE _start_waiting // No, protection not available
-
- /* Get the lock. */
- // _tx_thread_smp_protection.tx_thread_smp_protect_in_force = 1;
-
- MOV w3, #1 // Build lock value
- STXR w4, w3, [x2, #0] // Attempt to get the protection
- CMP w4, #0
- BNE _start_waiting // Did it fail?
- DMB ISH //
-
- /* Got the lock. */
- // _tx_thread_smp_protect_lock_got();
-
- _tx_thread_smp_protect_lock_got
-
- /* Remove this core from the wait list. */
- // _tx_thread_smp_protect_remove_from_front_of_list();
-
- _tx_thread_smp_protect_remove_from_front_of_list
-
- B _return
-
-_start_waiting:
-
- /* For one reason or another, we didn't get the lock. */
-
- /* Increment wait count. */
- // _tx_thread_smp_protect_wait_counts[this_core]++;
-
- LDR x3, =_tx_thread_smp_protect_wait_counts // Load wait list counts
- LDR w4, [x3, x1, LSL #2] // Load waiting value for this core
- ADD w4, w4, #1 // Increment wait value
- STR w4, [x3, x1, LSL #2] // Store new wait value
-
- /* Have we not added ourselves to the list yet? */
- // if (_tx_thread_smp_protect_wait_counts[this_core] == 1)
- // {
-
- CMP w4, #1
- BNE _already_in_list0 // Is this core already waiting?
-
- /* Add ourselves to the list. */
- // _tx_thread_smp_protect_wait_list_add(this_core);
-
- _tx_thread_smp_protect_wait_list_add // Call macro to add ourselves to the list
-
- // }
-
-_already_in_list0:
-
- /* Restore interrupts. */
+ LDR x1, =_tx_thread_smp_protection // Build address to protection structure
+ LDR w3, [x1, #4] // Pickup the owning core
+ CMP w3, w2 // Is it this core?
+ BEQ _owned // Yes, the protection is already owned
+ LDAXR w4, [x1, #0] // Pickup the protection flag
+ CBZ w4, _get_protection // Yes, get the protection
MSR DAIF, x0 // Restore interrupts
ISB //
#ifdef TX_ENABLE_WFE
WFE // Go into standby
#endif
+ B _tx_thread_smp_protect // On waking, restart the protection attempt
- /* We do this until we have the lock. */
- // while (1)
- // {
-
-_try_to_get_lock:
-
- /* Disable interrupts so we don't get preempted. */
-
- MRS x0, DAIF // Pickup current interrupt posture
- MSR DAIFSet, 0x3 // Lockout interrupts
-
- /* Pickup the CPU ID. */
-
- MRS x1, MPIDR_EL1 // Pickup the core ID
-#ifdef TX_ARMV8_2
-#if TX_THREAD_SMP_CLUSTERS > 1
- UBFX x7, x1, #16, #8 // Isolate cluster ID
-#endif
- UBFX x1, x1, #8, #8 // Isolate core ID
-#else
-#if TX_THREAD_SMP_CLUSTERS > 1
- UBFX x7, x1, #8, #8 // Isolate cluster ID
-#endif
- UBFX x1, x1, #0, #8 // Isolate core ID
-#endif
-#if TX_THREAD_SMP_CLUSTERS > 1
- ADDS x1, x1, x7, LSL #2 // Calculate CPU ID
-#endif
-
- /* Do we already have protection? */
- // if (this_core == _tx_thread_smp_protection.tx_thread_smp_protect_core)
- // {
-
- LDR w3, [x2, #4] // Pickup the owning core
- CMP w3, w1 // Is it this core?
- BEQ _got_lock_after_waiting // Yes, the protection is already owned. This means
- // an ISR preempted us and got protection
-
- // }
-
- /* Are we at the front of the list? */
- // if (this_core == _tx_thread_smp_protect_wait_list[_tx_thread_smp_protect_wait_list_head])
- // {
-
- LDR x3, =_tx_thread_smp_protect_wait_list_head // Get the address of the head
- LDR w3, [x3] // Get the value of the head
- LDR x4, =_tx_thread_smp_protect_wait_list // Get the address of the list
- LDR w4, [x4, x3, LSL #2] // Get the value at the head index
-
- CMP w1, w4
- BNE _did_not_get_lock
-
- /* Is the lock still available? */
- // if (_tx_thread_smp_protection.tx_thread_smp_protect_in_force == 0)
- // {
-
- LDAXR w3, [x2, #0] // Pickup the protection flag
- CMP w3, #0
- BNE _did_not_get_lock // No, protection not available
-
- /* Get the lock. */
- // _tx_thread_smp_protection.tx_thread_smp_protect_in_force = 1;
-
- MOV w3, #1 // Build lock value
- STXR w4, w3, [x2, #0] // Attempt to get the protection
- CMP w4, #0
- BNE _did_not_get_lock // Did it fail?
- DMB ISH //
-
- /* Got the lock. */
- // _tx_thread_smp_protect_lock_got();
-
- _tx_thread_smp_protect_lock_got
-
- /* Remove this core from the wait list. */
- // _tx_thread_smp_protect_remove_from_front_of_list();
-
- _tx_thread_smp_protect_remove_from_front_of_list
-
- B _got_lock_after_waiting
-
-_did_not_get_lock:
-
- /* For one reason or another, we didn't get the lock. */
-
- /* Were we removed from the list? This can happen if we're a thread
- and we got preempted. */
- // if (_tx_thread_smp_protect_wait_counts[this_core] == 0)
- // {
-
- LDR x3, =_tx_thread_smp_protect_wait_counts // Load wait list counts
- LDR w4, [x3, x1, LSL #2] // Load waiting value for this core
- CMP w4, #0
- BNE _already_in_list1 // Is this core already in the list?
-
- /* Add ourselves to the list. */
- // _tx_thread_smp_protect_wait_list_add(this_core);
-
- _tx_thread_smp_protect_wait_list_add // Call macro to add ourselves to the list
-
- /* Our waiting count was also reset when we were preempted. Increment it again. */
- // _tx_thread_smp_protect_wait_counts[this_core]++;
-
- LDR x3, =_tx_thread_smp_protect_wait_counts // Load wait list counts
- LDR w4, [x3, x1, LSL #2] // Load waiting value for this core
- ADD w4, w4, #1 // Increment wait value
- STR w4, [x3, x1, LSL #2] // Store new wait value value
-
- // }
-
-_already_in_list1:
-
- /* Restore interrupts and try again. */
-
+_get_protection:
+ MOV x4, #1 // Build lock value
+ STXR w5, w4, [x1] // Attempt to get the protection
+ CBZ w5, _got_protection // Did it succeed? w5 = 0 means success!
MSR DAIF, x0 // Restore interrupts
- ISB //
-#ifdef TX_ENABLE_WFE
- WFE // Go into standby
-#endif
- B _try_to_get_lock // On waking, restart the protection attempt
-
-_got_lock_after_waiting:
-
- /* We're no longer waiting. */
- // _tx_thread_smp_protect_wait_counts[this_core]--;
-
- LDR x3, =_tx_thread_smp_protect_wait_counts // Load waiting list
- LDR w4, [x3, x1, LSL #2] // Load current wait value
- SUB w4, w4, #1 // Decrement wait value
- STR w4, [x3, x1, LSL #2] // Store new wait value value
-
- /* Restore registers and return. */
-
-_return:
-
+ B _tx_thread_smp_protect // Restart the protection attempt
+
+_got_protection:
+ DMB ISH //
+ STR w2, [x1, #4] // Save owning core
+_owned:
+ LDR w5, [x1, #8] // Pickup ownership count
+ ADD w5, w5, #1 // Increment ownership count
+ STR w5, [x1, #8] // Store ownership count
+ DMB ISH //
RET
diff --git a/ports_module/cortex_a35_smp/gnu/module_manager/src/tx_thread_smp_unprotect.S b/ports_module/cortex_a35_smp/gnu/module_manager/src/tx_thread_smp_unprotect.S
index c53075ae..0e96fc3b 100644
--- a/ports_module/cortex_a35_smp/gnu/module_manager/src/tx_thread_smp_unprotect.S
+++ b/ports_module/cortex_a35_smp/gnu/module_manager/src/tx_thread_smp_unprotect.S
@@ -28,7 +28,7 @@
/* FUNCTION RELEASE */
/* */
/* _tx_thread_smp_unprotect Cortex-A35-SMP/GCC */
-/* 6.1.9 */
+/* 6.1.11 */
/* AUTHOR */
/* */
/* William E. Lamie, Microsoft Corporation */
@@ -60,10 +60,13 @@
/* */
/* DATE NAME DESCRIPTION */
/* */
-/* 09-30-2020 William E. Lamie Initial Version 6.1 */
-/* 10-15-2021 Andres Mlinar Updated comments, */
-/* added ARMv8.2-A support, */
-/* resulting in version 6.1.9 */
+/* 09-30-2020 William E. Lamie Initial Version 6.1 */
+/* 10-15-2021 Andres Mlinar Updated comments, */
+/* added ARMv8.2-A support, */
+/* resulting in version 6.1.9 */
+/* 04-25-2022 William E. Lamie Modified comments, removed */
+/* FIFO queueing, */
+/* resulting in version 6.1.11 */
/* */
/**************************************************************************/
.global _tx_thread_smp_unprotect
@@ -105,11 +108,6 @@ _tx_thread_smp_unprotect:
CMP w3, #0 // Is the preempt disable flag set?
BNE _still_protected // Yes, skip the protection release
- LDR x2,=_tx_thread_smp_protect_wait_counts // Build build address of wait counts
- LDR w3, [x2, x1, LSL #2] // Pickup wait list value
- CMP w3, #0 // Are any entities on this core waiting?
- BNE _still_protected // Yes, skip the protection release
-
LDR x2,=_tx_thread_smp_protection // Build address of protection structure
MOV w3, #0xFFFFFFFF // Build invalid value
STR w3, [x2, #4] // Mark the protected core as invalid
diff --git a/ports_module/cortex_m0+/ac6/inc/tx_port.h b/ports_module/cortex_m0+/ac6/inc/tx_port.h
index 3ebd67b4..200367a3 100644
--- a/ports_module/cortex_m0+/ac6/inc/tx_port.h
+++ b/ports_module/cortex_m0+/ac6/inc/tx_port.h
@@ -26,7 +26,7 @@
/* PORT SPECIFIC C INFORMATION RELEASE */
/* */
/* tx_port.h Cortex-M0+/AC6 */
-/* 6.1.10 */
+/* 6.1.11 */
/* */
/* AUTHOR */
/* */
@@ -48,6 +48,9 @@
/* DATE NAME DESCRIPTION */
/* */
/* 01-31-2022 Scott Larson Initial Version 6.1.10 */
+/* 04-25-2022 Scott Larson Modified comments and added */
+/* volatile to registers, */
+/* resulting in version 6.1.11 */
/* */
/**************************************************************************/
@@ -123,13 +126,13 @@ typedef unsigned short USHORT;
For example, if the time source is at the address 0x0a800024 and is 16-bits in size, the clock
source constants would be:
-#define TX_TRACE_TIME_SOURCE *((ULONG *) 0x0a800024)
+#define TX_TRACE_TIME_SOURCE *((volatile ULONG *) 0x0a800024)
#define TX_TRACE_TIME_MASK 0x0000FFFFUL
*/
#ifndef TX_TRACE_TIME_SOURCE
-#define TX_TRACE_TIME_SOURCE *((ULONG *) 0xE0001004)
+#define TX_TRACE_TIME_SOURCE *((volatile ULONG *) 0xE0001004)
#endif
#ifndef TX_TRACE_TIME_MASK
#define TX_TRACE_TIME_MASK 0xFFFFFFFFUL
@@ -473,7 +476,8 @@ __attribute__( ( always_inline ) ) static inline void _tx_thread_system_return_i
{
unsigned int interrupt_save;
- *((ULONG *) 0xE000ED04) = ((ULONG) 0x10000000);
+ /* Set PendSV to invoke ThreadX scheduler. */
+ *((volatile ULONG *) 0xE000ED04) = ((ULONG) 0x10000000);
if (__get_ipsr_value() == 0)
{
interrupt_save = __get_primask_value();
diff --git a/ports_module/cortex_m0+/ac6/module_manager/src/tx_thread_schedule.S b/ports_module/cortex_m0+/ac6/module_manager/src/tx_thread_schedule.S
index f76fa5f0..8ccb9f7a 100644
--- a/ports_module/cortex_m0+/ac6/module_manager/src/tx_thread_schedule.S
+++ b/ports_module/cortex_m0+/ac6/module_manager/src/tx_thread_schedule.S
@@ -30,7 +30,7 @@
/* FUNCTION RELEASE */
/* */
/* _tx_thread_schedule Cortex-M0+/AC6 */
-/* 6.1.10 */
+/* 6.1.11 */
/* AUTHOR */
/* */
/* Scott Larson, Microsoft Corporation */
@@ -63,6 +63,8 @@
/* DATE NAME DESCRIPTION */
/* */
/* 01-31-2022 Scott Larson Initial Version 6.1.10 */
+/* 04-25-2022 Scott Larson Optimized MPU configuration, */
+/* resulting in version 6.1.11 */
/* */
/**************************************************************************/
// VOID _tx_thread_schedule(VOID)
@@ -338,14 +340,23 @@ __tx_ts_restore:
CMP r0, #0
BEQ skip_mpu_setup // Is this thread owned by a module? No, skip MPU setup
MOV r8, r1 // Copy thread ptr
- LDR r1, [r0, #0x64] // Pickup MPU register[0]
- CMP r1, #0
+ MOVS r6, #0x8C
+ LDR r2, [r0, r6] // Pickup MPU region 5 address
+ CMP r2, #0
BEQ skip_mpu_setup // Is protection required for this module? No, skip MPU setup
- LDR r1, =0xE000ED9C // Build address of MPU base register
+
+ // Is the MPU already set up for this module?
+ MOVS r1, #5 // Select region 5 from MPU
+ LDR r3, =0xE000ED98 // MPU_RNR register address
+ STR r1, [r3] // Set region to 5
+ LDR r1, =0xE000ED9C // MPU_RBAR register address
+ LDR r3, [r1] // Load address stored in MPU region 5
+ MOVS r6, #0x10
+ BICS r2, r2, r6 // Clear VALID bit
+ CMP r2, r3 // Is module already loaded?
+ BEQ _tx_enable_mpu // Yes - skip MPU reconfiguration
// Initialize loop to configure MPU registers
- // Order doesn't matter, so txm_module_instance_mpu_registers[0]
- // will be in region 7 and txm_module_instance_mpu_registers[7] will be in region 0.
MOVS r3, #0x64 // Index of MPU register settings in thread control block
ADD r0, r0, r3 // Build address of MPU register start in thread control block
MOVS r5, #0 // Select region 0
@@ -359,7 +370,7 @@ _tx_mpu_loop:
ADDS r5, r5, #1 // Increment to next region
CMP r5, #8 // Check if all regions have been set
BNE _tx_mpu_loop
-
+_tx_enable_mpu:
LDR r0, =0xE000ED94 // Build MPU control reg address
MOVS r1, #5 // Build enable value with background region enabled
STR r1, [r0] // Enable MPU
diff --git a/ports_module/cortex_m0+/gnu/inc/tx_port.h b/ports_module/cortex_m0+/gnu/inc/tx_port.h
index 06089b0f..286fb913 100644
--- a/ports_module/cortex_m0+/gnu/inc/tx_port.h
+++ b/ports_module/cortex_m0+/gnu/inc/tx_port.h
@@ -26,7 +26,7 @@
/* PORT SPECIFIC C INFORMATION RELEASE */
/* */
/* tx_port.h Cortex-M0+/GNU */
-/* 6.1.10 */
+/* 6.1.11 */
/* */
/* AUTHOR */
/* */
@@ -48,6 +48,9 @@
/* DATE NAME DESCRIPTION */
/* */
/* 01-31-2022 Scott Larson Initial Version 6.1.10 */
+/* 04-25-2022 Scott Larson Modified comments and added */
+/* volatile to registers, */
+/* resulting in version 6.1.11 */
/* */
/**************************************************************************/
@@ -138,13 +141,13 @@ typedef unsigned short USHORT;
For example, if the time source is at the address 0x0a800024 and is 16-bits in size, the clock
source constants would be:
-#define TX_TRACE_TIME_SOURCE *((ULONG *) 0x0a800024)
+#define TX_TRACE_TIME_SOURCE *((volatile ULONG *) 0x0a800024)
#define TX_TRACE_TIME_MASK 0x0000FFFFUL
*/
#ifndef TX_TRACE_TIME_SOURCE
-#define TX_TRACE_TIME_SOURCE *((ULONG *) 0xE0001004)
+#define TX_TRACE_TIME_SOURCE *((volatile ULONG *) 0xE0001004)
#endif
#ifndef TX_TRACE_TIME_MASK
#define TX_TRACE_TIME_MASK 0xFFFFFFFFUL
@@ -305,7 +308,7 @@ __attribute__( ( always_inline ) ) static inline void __set_control(ULONG contro
else \
{ \
ULONG _tx_fpccr; \
- _tx_fpccr = *((ULONG *) 0xE000EF34); \
+ _tx_fpccr = *((volatile ULONG *) 0xE000EF34); \
_tx_fpccr = _tx_fpccr & ((ULONG) 0x01); \
if (_tx_fpccr == ((ULONG) 0x01)) \
{ \
@@ -487,7 +490,8 @@ __attribute__( ( always_inline ) ) static inline void _tx_thread_system_return_i
{
unsigned int interrupt_save;
- *((ULONG *) 0xE000ED04) = ((ULONG) 0x10000000);
+ /* Set PendSV to invoke ThreadX scheduler. */
+ *((volatile ULONG *) 0xE000ED04) = ((ULONG) 0x10000000);
if (__get_ipsr_value() == 0)
{
interrupt_save = __get_primask_value();
diff --git a/ports_module/cortex_m0+/gnu/module_manager/src/tx_thread_schedule.S b/ports_module/cortex_m0+/gnu/module_manager/src/tx_thread_schedule.S
index 42c3e8d9..f1f5872b 100644
--- a/ports_module/cortex_m0+/gnu/module_manager/src/tx_thread_schedule.S
+++ b/ports_module/cortex_m0+/gnu/module_manager/src/tx_thread_schedule.S
@@ -30,7 +30,7 @@
/* FUNCTION RELEASE */
/* */
/* _tx_thread_schedule Cortex-M0+/GNU */
-/* 6.1.10 */
+/* 6.1.11 */
/* AUTHOR */
/* */
/* Scott Larson, Microsoft Corporation */
@@ -63,6 +63,8 @@
/* DATE NAME DESCRIPTION */
/* */
/* 01-31-2022 Scott Larson Initial Version 6.1.10 */
+/* 04-25-2022 Scott Larson Optimized MPU configuration, */
+/* resulting in version 6.1.11 */
/* */
/**************************************************************************/
// VOID _tx_thread_schedule(VOID)
@@ -338,14 +340,23 @@ __tx_ts_restore:
CMP r0, #0
BEQ skip_mpu_setup // Is this thread owned by a module? No, skip MPU setup
MOV r8, r1 // Copy thread ptr
- LDR r1, [r0, #0x64] // Pickup MPU register[0]
- CMP r1, #0
+ MOVS r6, #0x8C
+ LDR r2, [r0, r6] // Pickup MPU region 5 address
+ CMP r2, #0
BEQ skip_mpu_setup // Is protection required for this module? No, skip MPU setup
- LDR r1, =0xE000ED9C // Build address of MPU base register
+
+ // Is the MPU already set up for this module?
+ MOVS r1, #5 // Select region 5 from MPU
+ LDR r3, =0xE000ED98 // MPU_RNR register address
+ STR r1, [r3] // Set region to 5
+ LDR r1, =0xE000ED9C // MPU_RBAR register address
+ LDR r3, [r1] // Load address stored in MPU region 5
+ MOVS r6, #0x10
+ BICS r2, r2, r6 // Clear VALID bit
+ CMP r2, r3 // Is module already loaded?
+ BEQ _tx_enable_mpu // Yes - skip MPU reconfiguration
// Initialize loop to configure MPU registers
- // Order doesn't matter, so txm_module_instance_mpu_registers[0]
- // will be in region 7 and txm_module_instance_mpu_registers[7] will be in region 0.
MOVS r3, #0x64 // Index of MPU register settings in thread control block
ADD r0, r0, r3 // Build address of MPU register start in thread control block
MOVS r5, #0 // Select region 0
@@ -359,7 +370,7 @@ _tx_mpu_loop:
ADDS r5, r5, #1 // Increment to next region
CMP r5, #8 // Check if all regions have been set
BNE _tx_mpu_loop
-
+_tx_enable_mpu:
LDR r0, =0xE000ED94 // Build MPU control reg address
MOVS r1, #5 // Build enable value with background region enabled
STR r1, [r0] // Enable MPU
diff --git a/ports_module/cortex_m0+/iar/example_build/startup.s b/ports_module/cortex_m0+/iar/example_build/startup.s
index bbe8142b..06de32fb 100644
--- a/ports_module/cortex_m0+/iar/example_build/startup.s
+++ b/ports_module/cortex_m0+/iar/example_build/startup.s
@@ -58,9 +58,9 @@ __vector_table
DC32 NMI_Handler ; NMI
DC32 HardFault_Handler ; HardFault
- DC32 MemManage_Handler ; MemManage
- DC32 0 ; BusFault
- DC32 0 ; UsageFault
+ DC32 HardFault_Handler ; MemManage
+ DC32 HardFault_Handler ; BusFault
+ DC32 HardFault_Handler ; UsageFault
DC32 0 ; 7
DC32 0 ; 8
DC32 0 ; 9
diff --git a/ports_module/cortex_m0+/iar/inc/tx_port.h b/ports_module/cortex_m0+/iar/inc/tx_port.h
index d2554f9c..41f06ec5 100644
--- a/ports_module/cortex_m0+/iar/inc/tx_port.h
+++ b/ports_module/cortex_m0+/iar/inc/tx_port.h
@@ -26,7 +26,7 @@
/* PORT SPECIFIC C INFORMATION RELEASE */
/* */
/* tx_port.h Cortex-M0+/IAR */
-/* 6.1.10 */
+/* 6.1.11 */
/* */
/* AUTHOR */
/* */
@@ -48,6 +48,9 @@
/* DATE NAME DESCRIPTION */
/* */
/* 01-31-2022 Scott Larson Initial Version 6.1.10 */
+/* 04-25-2022 Scott Larson Modified comments and added */
+/* volatile to registers, */
+/* resulting in version 6.1.11 */
/* */
/**************************************************************************/
@@ -142,14 +145,14 @@ typedef unsigned short USHORT;
For example, if the time source is at the address 0x0a800024 and is 16-bits in size, the clock
source constants would be:
-#define TX_TRACE_TIME_SOURCE *((ULONG *) 0x0a800024)
+#define TX_TRACE_TIME_SOURCE *((volatile ULONG *) 0x0a800024)
#define TX_TRACE_TIME_MASK 0x0000FFFFUL
*/
#ifndef TX_MISRA_ENABLE
#ifndef TX_TRACE_TIME_SOURCE
-#define TX_TRACE_TIME_SOURCE *((ULONG *) 0xE0001004)
+#define TX_TRACE_TIME_SOURCE *((volatile ULONG *) 0xE0001004)
#endif
#else
ULONG _tx_misra_time_stamp_get(VOID);
@@ -361,7 +364,7 @@ __attribute__( ( always_inline ) ) static inline void __set_control(ULONG contro
else \
{ \
ULONG _tx_fpccr; \
- _tx_fpccr = *((ULONG *) 0xE000EF34); \
+ _tx_fpccr = *((volatile ULONG *) 0xE000EF34); \
_tx_fpccr = _tx_fpccr & ((ULONG) 0x01); \
if (_tx_fpccr == ((ULONG) 0x01)) \
{ \
@@ -532,7 +535,7 @@ static void _tx_thread_system_return_inline(void)
__istate_t interrupt_save;
/* Set PendSV to invoke ThreadX scheduler. */
- *((ULONG *) 0xE000ED04) = ((ULONG) 0x10000000);
+ *((volatile ULONG *) 0xE000ED04) = ((ULONG) 0x10000000);
if (__get_IPSR() == 0)
{
interrupt_save = __get_interrupt_state();
diff --git a/ports_module/cortex_m0+/iar/module_manager/src/tx_thread_schedule.S b/ports_module/cortex_m0+/iar/module_manager/src/tx_thread_schedule.S
index c75596df..bc498210 100644
--- a/ports_module/cortex_m0+/iar/module_manager/src/tx_thread_schedule.S
+++ b/ports_module/cortex_m0+/iar/module_manager/src/tx_thread_schedule.S
@@ -36,7 +36,7 @@
/* FUNCTION RELEASE */
/* */
/* _tx_thread_schedule Cortex-M0+/IAR */
-/* 6.1.10 */
+/* 6.1.11 */
/* AUTHOR */
/* */
/* Scott Larson, Microsoft Corporation */
@@ -69,6 +69,9 @@
/* DATE NAME DESCRIPTION */
/* */
/* 01-31-2022 Scott Larson Initial Version 6.1.10 */
+/* 04-25-2022 Scott Larson Optimized MPU configuration, */
+/* change handler name, */
+/* resulting in version 6.1.11 */
/* */
/**************************************************************************/
// VOID _tx_thread_schedule(VOID)
@@ -103,12 +106,8 @@ __tx_wait_here:
/* Memory Exception Handler. */
- PUBLIC MemManage_Handler
- PUBLIC BusFault_Handler
- PUBLIC UsageFault_Handler
-MemManage_Handler:
-BusFault_Handler:
-UsageFault_Handler:
+ PUBLIC HardFault_Handler
+HardFault_Handler:
CPSID i // Disable interrupts
@@ -326,14 +325,23 @@ __tx_ts_restore:
CMP r0, #0
BEQ skip_mpu_setup // Is this thread owned by a module? No, skip MPU setup
MOV r8, r1 // Copy thread ptr
- LDR r1, [r0, #0x64] // Pickup MPU register[0]
- CMP r1, #0
+ MOVS r6, #0x8C
+ LDR r2, [r0, r6] // Pickup MPU region 5 address
+ CMP r2, #0
BEQ skip_mpu_setup // Is protection required for this module? No, skip MPU setup
- LDR r1, =0xE000ED9C // Build address of MPU base register
+
+ // Is the MPU already set up for this module?
+ MOVS r1, #5 // Select region 5 from MPU
+ LDR r3, =0xE000ED98 // MPU_RNR register address
+ STR r1, [r3] // Set region to 5
+ LDR r1, =0xE000ED9C // MPU_RBAR register address
+ LDR r3, [r1] // Load address stored in MPU region 5
+ MOVS r6, #0x10
+ BICS r2, r2, r6 // Clear VALID bit
+ CMP r2, r3 // Is module already loaded?
+ BEQ _tx_enable_mpu // Yes - skip MPU reconfiguration
// Initialize loop to configure MPU registers
- // Order doesn't matter, so txm_module_instance_mpu_registers[0]
- // will be in region 7 and txm_module_instance_mpu_registers[7] will be in region 0.
MOVS r3, #0x64 // Index of MPU register settings in thread control block
ADD r0, r0, r3 // Build address of MPU register start in thread control block
MOVS r5, #0 // Select region 0
@@ -347,7 +355,7 @@ _tx_mpu_loop:
ADDS r5, r5, #1 // Increment to next region
CMP r5, #8 // Check if all regions have been set
BNE _tx_mpu_loop
-
+_tx_enable_mpu:
LDR r0, =0xE000ED94 // Build MPU control reg address
MOVS r1, #5 // Build enable value with background region enabled
STR r1, [r0] // Enable MPU
diff --git a/ports_module/cortex_m23/ac6/inc/tx_port.h b/ports_module/cortex_m23/ac6/inc/tx_port.h
index cf82d164..b3c3d99b 100644
--- a/ports_module/cortex_m23/ac6/inc/tx_port.h
+++ b/ports_module/cortex_m23/ac6/inc/tx_port.h
@@ -26,7 +26,7 @@
/* PORT SPECIFIC C INFORMATION RELEASE */
/* */
/* tx_port.h Cortex-M23/AC6 */
-/* 6.1.6 */
+/* 6.1.11 */
/* */
/* AUTHOR */
/* */
@@ -48,6 +48,9 @@
/* DATE NAME DESCRIPTION */
/* */
/* 04-02-2021 Scott Larson Initial Version 6.1.6 */
+/* 04-25-2022 Scott Larson Modified comments and added */
+/* volatile to registers, */
+/* resulting in version 6.1.11 */
/* */
/**************************************************************************/
@@ -163,7 +166,7 @@ UINT _tx_thread_secure_stack_free(struct TX_THREAD_STRUCT *tx_thread);
#define TX_TIMER_THREAD_STACK_SIZE 1024 /* Default timer thread stack size */
#endif
-#ifndef TX_TIMER_THREAD_PRIORITY
+#ifndef TX_TIMER_THREAD_PRIORITY
#define TX_TIMER_THREAD_PRIORITY 0 /* Default timer thread priority */
#endif
@@ -175,17 +178,17 @@ UINT _tx_thread_secure_stack_free(struct TX_THREAD_STRUCT *tx_thread);
/* Define the clock source for trace event entry time stamp. The following two item are port specific.
- For example, if the time source is at the address 0x0a800024 and is 16-bits in size, the clock
+ For example, if the time source is at the address 0x0a800024 and is 16-bits in size, the clock
source constants would be:
-#define TX_TRACE_TIME_SOURCE *((ULONG *) 0x0a800024)
+#define TX_TRACE_TIME_SOURCE *((volatile ULONG *) 0x0a800024)
#define TX_TRACE_TIME_MASK 0x0000FFFFUL
*/
#ifndef TX_MISRA_ENABLE
#ifndef TX_TRACE_TIME_SOURCE
-#define TX_TRACE_TIME_SOURCE *((ULONG *) 0xE0001004)
+#define TX_TRACE_TIME_SOURCE *((volatile ULONG *) 0xE0001004)
#endif
#else
ULONG _tx_misra_time_stamp_get(VOID);
@@ -214,7 +217,7 @@ ULONG _tx_misra_time_stamp_get(VOID);
#endif
-/* Determine whether or not stack checking is enabled. By default, ThreadX stack checking is
+/* Determine whether or not stack checking is enabled. By default, ThreadX stack checking is
disabled. When the following is defined, ThreadX thread stack checking is enabled. If stack
checking is enabled (TX_ENABLE_STACK_CHECKING is defined), the TX_DISABLE_STACK_FILLING
define is negated, thereby forcing the stack fill which is necessary for the stack checking
@@ -228,7 +231,7 @@ ULONG _tx_misra_time_stamp_get(VOID);
/* Define the TX_THREAD control block extensions for this port. The main reason
- for the multiple macros is so that backward compatibility can be maintained with
+ for the multiple macros is so that backward compatibility can be maintained with
existing ThreadX kernel awareness modules. */
#define TX_THREAD_EXTENSION_0
@@ -284,7 +287,7 @@ ULONG _tx_misra_time_stamp_get(VOID);
VOID (*tx_timer_module_expiration_function)(ULONG id);
-/* Define the user extension field of the thread control block. Nothing
+/* Define the user extension field of the thread control block. Nothing
additional is needed for this port so it is defined as white space. */
#ifndef TX_THREAD_USER_EXTENSION
@@ -362,9 +365,9 @@ static void _set_control(unsigned int _control)
#define TX_THREAD_COMPLETED_EXTENSION(thread_ptr) { \
ULONG _tx_vfp_state; \
- _tx_vfp_state = _get_control(); \
+ _tx_vfp_state = _get_control(); \
_tx_vfp_state = _tx_vfp_state & ~((ULONG) 0x4); \
- _set_control(_tx_vfp_state);; \
+ _set_control(_tx_vfp_state);; \
}
#else
@@ -392,26 +395,26 @@ void _tx_vfp_access(void);
if ((_tx_system_state == ((ULONG) 0)) && ((thread_ptr) == _tx_thread_current_ptr)) \
{ \
ULONG _tx_vfp_state; \
- _tx_vfp_state = _get_control(); \
+ _tx_vfp_state = _get_control(); \
_tx_vfp_state = _tx_vfp_state & ~((ULONG) 0x4); \
- _set_control(_tx_vfp_state); \
+ _set_control(_tx_vfp_state); \
} \
else \
{ \
ULONG _tx_fpccr; \
- _tx_fpccr = *((ULONG *) 0xE000EF34); \
+ _tx_fpccr = *((volatile ULONG *) 0xE000EF34); \
_tx_fpccr = _tx_fpccr & ((ULONG) 0x01); \
if (_tx_fpccr == ((ULONG) 0x01)) \
{ \
ULONG _tx_vfp_state; \
- _tx_vfp_state = _get_control(); \
+ _tx_vfp_state = _get_control(); \
_tx_vfp_state = _tx_vfp_state & ((ULONG) 0x4); \
_tx_vfp_access(); \
if (_tx_vfp_state == ((ULONG) 0)) \
{ \
- _tx_vfp_state = _get_control(); \
+ _tx_vfp_state = _get_control(); \
_tx_vfp_state = _tx_vfp_state & ~((ULONG) 0x4); \
- _set_control(_tx_vfp_state); \
+ _set_control(_tx_vfp_state); \
} \
} \
} \
@@ -481,7 +484,7 @@ void _tx_vfp_access(void);
/* Define the get system state macro. */
-
+
#ifndef TX_THREAD_GET_SYSTEM_STATE
#ifndef TX_MISRA_ENABLE
#define TX_THREAD_GET_SYSTEM_STATE() (_tx_thread_system_state | _get_ipsr())
@@ -507,15 +510,12 @@ extern void _tx_thread_secure_stack_initialize(void);
#define TX_INITIALIZE_KERNEL_ENTER_EXTENSION _tx_thread_secure_stack_initialize();
#endif
-/* Define the macro to ensure _tx_thread_preempt_disable is set early in initialization in order to
+/* Define the macro to ensure _tx_thread_preempt_disable is set early in initialization in order to
prevent early scheduling on Cortex-M parts. */
-
+
#define TX_PORT_SPECIFIC_POST_INITIALIZATION _tx_thread_preempt_disable++;
-/* Determine if the ARM architecture has the CLZ instruction. This is available on
- architectures v5 and above. If available, redefine the macro for calculating the
- lowest bit set. */
#ifndef TX_DISABLE_INLINE
@@ -557,7 +557,7 @@ unsigned int was_masked;
/* Set PendSV to invoke ThreadX scheduler. */
- *((ULONG *) 0xE000ED04) = ((ULONG) 0x10000000);
+ *((volatile ULONG *) 0xE000ED04) = ((ULONG) 0x10000000);
if (_get_ipsr() == 0)
{
was_masked = __disable_irq();
@@ -572,8 +572,8 @@ unsigned int was_masked;
/* Define the version ID of ThreadX. This may be utilized by the application. */
#ifdef TX_THREAD_INIT
-CHAR _tx_version_id[] =
- "Copyright (c) Microsoft Corporation. All rights reserved. * ThreadX Cortex-M23/AC6 Version 6.1.9 *";
+CHAR _tx_version_id[] =
+ "Copyright (c) Microsoft Corporation. All rights reserved. * ThreadX Cortex-M23/AC6 Version 6.1.11 *";
#else
#ifdef TX_MISRA_ENABLE
extern CHAR _tx_version_id[100];
diff --git a/ports_module/cortex_m23/ac6/module_manager/src/tx_thread_schedule.S b/ports_module/cortex_m23/ac6/module_manager/src/tx_thread_schedule.S
index 67fe291e..09d4658f 100644
--- a/ports_module/cortex_m23/ac6/module_manager/src/tx_thread_schedule.S
+++ b/ports_module/cortex_m23/ac6/module_manager/src/tx_thread_schedule.S
@@ -30,7 +30,7 @@
/* FUNCTION RELEASE */
/* */
/* _tx_thread_schedule Cortex-M23/AC6 */
-/* 6.1.6 */
+/* 6.1.11 */
/* AUTHOR */
/* */
/* Scott Larson, Microsoft Corporation */
@@ -63,6 +63,8 @@
/* DATE NAME DESCRIPTION */
/* */
/* 04-02-2021 Scott Larson Initial Version 6.1.6 */
+/* 04-25-2022 Scott Larson Optimized MPU configuration, */
+/* resulting in version 6.1.11 */
/* */
/**************************************************************************/
// VOID _tx_thread_schedule(VOID)
@@ -373,12 +375,20 @@ _skip_secure_restore:
LDR r0, [r1, r2] // Pickup the module instance pointer
CBZ r0, skip_mpu_setup // Is this thread owned by a module? No, skip MPU setup
MOV r8, r1 // Copy thread ptr
- LDR r1, [r0, #0x64] // Pickup MPU register[0]
- CBZ r1, skip_mpu_setup // Is protection required for this module? No, skip MPU setup
+ MOVS r2, #0x74 // Index of MPU data region
+ LDR r2, [r0, r2] // Pickup MPU data region address
+ CBZ r2, skip_mpu_setup // Is protection required for this module? No, skip MPU setup
+
+ // Is the MPU already set up for this module?
+ MOVS r1, #2 // Select MPU region 2
+ LDR r3, =0xE000ED98 // MPU_RNR register address
+ STR r1, [r3] // Set region to 2
+ LDR r1, =0xE000ED9C // MPU_RBAR register address
+ LDR r3, [r1] // Load address stored in MPU region 2
+ CMP r2, r3 // Is module already loaded?
+ BEQ _tx_enable_mpu // Yes - skip MPU reconfiguration
// Initialize loop to configure MPU registers
- // Order doesn't matter, so txm_module_instance_mpu_registers[0]
- // will be in region 7 and txm_module_instance_mpu_registers[7] will be in region 0.
MOVS r3, #0x64 // Index of MPU register settings in thread control block
ADD r0, r0, r3 // Build address of MPU register start in thread control block
MOVS r5, #0 // Select region 0
@@ -392,7 +402,7 @@ _tx_mpu_loop:
ADDS r5, r5, #1 // Increment to next region
CMP r5, #8 // Check if all regions have been set
BNE _tx_mpu_loop
-
+_tx_enable_mpu:
LDR r0, =0xE000ED94 // Build MPU control reg address
MOVS r1, #5 // Build enable value with background region enabled
STR r1, [r0] // Enable MPU
diff --git a/ports_module/cortex_m23/gnu/inc/tx_port.h b/ports_module/cortex_m23/gnu/inc/tx_port.h
index eca300d7..42486b17 100644
--- a/ports_module/cortex_m23/gnu/inc/tx_port.h
+++ b/ports_module/cortex_m23/gnu/inc/tx_port.h
@@ -26,7 +26,7 @@
/* PORT SPECIFIC C INFORMATION RELEASE */
/* */
/* tx_port.h Cortex-M23/GNU */
-/* 6.1.7 */
+/* 6.1.11 */
/* */
/* AUTHOR */
/* */
@@ -58,6 +58,9 @@
/* added symbol to enable */
/* stack error handler, */
/* resulting in version 6.1.7 */
+/* 04-25-2022 Scott Larson Modified comments and added */
+/* volatile to registers, */
+/* resulting in version 6.1.11 */
/* */
/**************************************************************************/
@@ -172,7 +175,7 @@ UINT _tx_thread_secure_stack_free(struct TX_THREAD_STRUCT *tx_thread);
#define TX_TIMER_THREAD_STACK_SIZE 1024 /* Default timer thread stack size */
#endif
-#ifndef TX_TIMER_THREAD_PRIORITY
+#ifndef TX_TIMER_THREAD_PRIORITY
#define TX_TIMER_THREAD_PRIORITY 0 /* Default timer thread priority */
#endif
@@ -184,17 +187,17 @@ UINT _tx_thread_secure_stack_free(struct TX_THREAD_STRUCT *tx_thread);
/* Define the clock source for trace event entry time stamp. The following two item are port specific.
- For example, if the time source is at the address 0x0a800024 and is 16-bits in size, the clock
+ For example, if the time source is at the address 0x0a800024 and is 16-bits in size, the clock
source constants would be:
-#define TX_TRACE_TIME_SOURCE *((ULONG *) 0x0a800024)
+#define TX_TRACE_TIME_SOURCE *((volatile ULONG *) 0x0a800024)
#define TX_TRACE_TIME_MASK 0x0000FFFFUL
*/
#ifndef TX_MISRA_ENABLE
#ifndef TX_TRACE_TIME_SOURCE
-#define TX_TRACE_TIME_SOURCE *((ULONG *) 0xE0001004)
+#define TX_TRACE_TIME_SOURCE *((volatile ULONG *) 0xE0001004)
#endif
#else
ULONG _tx_misra_time_stamp_get(VOID);
@@ -223,7 +226,7 @@ ULONG _tx_misra_time_stamp_get(VOID);
#endif
-/* Determine whether or not stack checking is enabled. By default, ThreadX stack checking is
+/* Determine whether or not stack checking is enabled. By default, ThreadX stack checking is
disabled. When the following is defined, ThreadX thread stack checking is enabled. If stack
checking is enabled (TX_ENABLE_STACK_CHECKING is defined), the TX_DISABLE_STACK_FILLING
define is negated, thereby forcing the stack fill which is necessary for the stack checking
@@ -237,7 +240,7 @@ ULONG _tx_misra_time_stamp_get(VOID);
/* Define the TX_THREAD control block extensions for this port. The main reason
- for the multiple macros is so that backward compatibility can be maintained with
+ for the multiple macros is so that backward compatibility can be maintained with
existing ThreadX kernel awareness modules. */
#define TX_THREAD_EXTENSION_0
@@ -293,7 +296,7 @@ ULONG _tx_misra_time_stamp_get(VOID);
VOID (*tx_timer_module_expiration_function)(ULONG id);
-/* Define the user extension field of the thread control block. Nothing
+/* Define the user extension field of the thread control block. Nothing
additional is needed for this port so it is defined as white space. */
#ifndef TX_THREAD_USER_EXTENSION
@@ -359,7 +362,7 @@ inline static unsigned int _get_ipsr(void)
/* Define the get system state macro. */
-
+
#ifndef TX_THREAD_GET_SYSTEM_STATE
#ifndef TX_MISRA_ENABLE
#define TX_THREAD_GET_SYSTEM_STATE() (_tx_thread_system_state | _get_ipsr())
@@ -385,15 +388,12 @@ extern void _tx_thread_secure_stack_initialize(void);
#define TX_INITIALIZE_KERNEL_ENTER_EXTENSION _tx_thread_secure_stack_initialize();
#endif
-/* Define the macro to ensure _tx_thread_preempt_disable is set early in initialization in order to
+/* Define the macro to ensure _tx_thread_preempt_disable is set early in initialization in order to
prevent early scheduling on Cortex-M parts. */
-
+
#define TX_PORT_SPECIFIC_POST_INITIALIZATION _tx_thread_preempt_disable++;
-/* Determine if the ARM architecture has the CLZ instruction. This is available on
- architectures v5 and above. If available, redefine the macro for calculating the
- lowest bit set. */
#ifndef TX_DISABLE_INLINE
@@ -449,7 +449,8 @@ __attribute__( ( always_inline ) ) static inline void _tx_thread_system_return_i
{
unsigned int interrupt_save;
- *((ULONG *) 0xE000ED04) = ((ULONG) 0x10000000);
+ /* Set PendSV to invoke ThreadX scheduler. */
+ *((volatile ULONG *) 0xE000ED04) = ((ULONG) 0x10000000);
if (_get_ipsr() == 0)
{
interrupt_save = __get_primask_value();
@@ -482,8 +483,8 @@ unsigned int interrupt_save;
/* Define the version ID of ThreadX. This may be utilized by the application. */
#ifdef TX_THREAD_INIT
-CHAR _tx_version_id[] =
- "Copyright (c) Microsoft Corporation. All rights reserved. * ThreadX Cortex-M23/GNU Version 6.1.9 *";
+CHAR _tx_version_id[] =
+ "Copyright (c) Microsoft Corporation. All rights reserved. * ThreadX Cortex-M23/GNU Version 6.1.11 *";
#else
#ifdef TX_MISRA_ENABLE
extern CHAR _tx_version_id[100];
diff --git a/ports_module/cortex_m23/gnu/module_manager/src/tx_thread_schedule.S b/ports_module/cortex_m23/gnu/module_manager/src/tx_thread_schedule.S
index c60919ff..7b640e6f 100644
--- a/ports_module/cortex_m23/gnu/module_manager/src/tx_thread_schedule.S
+++ b/ports_module/cortex_m23/gnu/module_manager/src/tx_thread_schedule.S
@@ -26,7 +26,7 @@
/* FUNCTION RELEASE */
/* */
/* _tx_thread_schedule Cortex-M23/GNU */
-/* 6.1.6 */
+/* 6.1.11 */
/* AUTHOR */
/* */
/* Scott Larson, Microsoft Corporation */
@@ -59,6 +59,8 @@
/* DATE NAME DESCRIPTION */
/* */
/* 04-02-2021 Scott Larson Initial Version 6.1.6 */
+/* 04-25-2022 Scott Larson Optimized MPU configuration, */
+/* resulting in version 6.1.11 */
/* */
/**************************************************************************/
// VOID _tx_thread_schedule(VOID)
@@ -369,12 +371,20 @@ _skip_secure_restore:
LDR r0, [r1, r2] // Pickup the module instance pointer
CBZ r0, skip_mpu_setup // Is this thread owned by a module? No, skip MPU setup
MOV r8, r1 // Copy thread ptr
- LDR r1, [r0, #0x64] // Pickup MPU register[0]
- CBZ r1, skip_mpu_setup // Is protection required for this module? No, skip MPU setup
+ MOVS r2, #0x74 // Index of MPU data region
+ LDR r2, [r0, r2] // Pickup MPU data region address
+ CBZ r2, skip_mpu_setup // Is protection required for this module? No, skip MPU setup
+
+ // Is the MPU already set up for this module?
+ MOVS r1, #2 // Select MPU region 2
+ LDR r3, =0xE000ED98 // MPU_RNR register address
+ STR r1, [r3] // Set region to 2
+ LDR r1, =0xE000ED9C // MPU_RBAR register address
+ LDR r3, [r1] // Load address stored in MPU region 2
+ CMP r2, r3 // Is module already loaded?
+ BEQ _tx_enable_mpu // Yes - skip MPU reconfiguration
// Initialize loop to configure MPU registers
- // Order doesn't matter, so txm_module_instance_mpu_registers[0]
- // will be in region 7 and txm_module_instance_mpu_registers[7] will be in region 0.
MOVS r3, #0x64 // Index of MPU register settings in thread control block
ADD r0, r0, r3 // Build address of MPU register start in thread control block
MOVS r5, #0 // Select region 0
@@ -388,7 +398,7 @@ _tx_mpu_loop:
ADDS r5, r5, #1 // Increment to next region
CMP r5, #8 // Check if all regions have been set
BNE _tx_mpu_loop
-
+_tx_enable_mpu:
LDR r0, =0xE000ED94 // Build MPU control reg address
MOVS r1, #5 // Build enable value with background region enabled
STR r1, [r0] // Enable MPU
diff --git a/ports_module/cortex_m23/iar/inc/tx_port.h b/ports_module/cortex_m23/iar/inc/tx_port.h
index 29a4b245..418ffa05 100644
--- a/ports_module/cortex_m23/iar/inc/tx_port.h
+++ b/ports_module/cortex_m23/iar/inc/tx_port.h
@@ -25,8 +25,8 @@
/* */
/* PORT SPECIFIC C INFORMATION RELEASE */
/* */
-/* tx_port.h ARMv8-M */
-/* 6.1.6 */
+/* tx_port.h Cortex-M23/IAR */
+/* 6.1.11 */
/* */
/* AUTHOR */
/* */
@@ -48,6 +48,9 @@
/* DATE NAME DESCRIPTION */
/* */
/* 04-02-2021 Scott Larson Initial Version 6.1.6 */
+/* 04-25-2022 Scott Larson Modified comments and added */
+/* volatile to registers, */
+/* resulting in version 6.1.11 */
/* */
/**************************************************************************/
@@ -179,7 +182,7 @@ UINT _tx_thread_secure_stack_free(struct TX_THREAD_STRUCT *tx_thread);
#endif
-/* Define various constants for the ThreadX Cortex-M port. */
+/* Define various constants for the ThreadX Cortex-M23 port. */
#define TX_INT_DISABLE 1 /* Disable interrupts */
#define TX_INT_ENABLE 0 /* Enable interrupts */
@@ -189,14 +192,14 @@ UINT _tx_thread_secure_stack_free(struct TX_THREAD_STRUCT *tx_thread);
For example, if the time source is at the address 0x0a800024 and is 16-bits in size, the clock
source constants would be:
-#define TX_TRACE_TIME_SOURCE *((ULONG *) 0x0a800024)
+#define TX_TRACE_TIME_SOURCE *((volatile ULONG *) 0x0a800024)
#define TX_TRACE_TIME_MASK 0x0000FFFFUL
*/
#ifndef TX_MISRA_ENABLE
#ifndef TX_TRACE_TIME_SOURCE
-#define TX_TRACE_TIME_SOURCE *((ULONG *) 0xE0001004)
+#define TX_TRACE_TIME_SOURCE *((volatile ULONG *) 0xE0001004)
#endif
#else
ULONG _tx_misra_time_stamp_get(VOID);
@@ -464,7 +467,7 @@ __attribute__( ( always_inline ) ) static inline void __set_CONTROL(ULONG contro
else \
{ \
ULONG _tx_fpccr; \
- _tx_fpccr = *((ULONG *) 0xE000EF34); \
+ _tx_fpccr = *((volatile ULONG *) 0xE000EF34); \
_tx_fpccr = _tx_fpccr & ((ULONG) 0x01); \
if (_tx_fpccr == ((ULONG) 0x01)) \
{ \
@@ -644,7 +647,7 @@ __attribute__( ( always_inline ) ) static inline void _tx_thread_system_return_i
unsigned int interrupt_save;
/* Set PendSV to invoke ThreadX scheduler. */
- *((ULONG *) 0xE000ED04) = ((ULONG) 0x10000000);
+ *((volatile ULONG *) 0xE000ED04) = ((ULONG) 0x10000000);
if (__get_IPSR() == 0)
{
interrupt_save = __get_primask_value();
@@ -665,7 +668,7 @@ static void _tx_thread_system_return_inline(void)
__istate_t interrupt_save;
/* Set PendSV to invoke ThreadX scheduler. */
- *((ULONG *) 0xE000ED04) = ((ULONG) 0x10000000);
+ *((volatile ULONG *) 0xE000ED04) = ((ULONG) 0x10000000);
if (__get_IPSR() == 0)
{
interrupt_save = __get_interrupt_state();
@@ -701,7 +704,7 @@ VOID _tx_thread_interrupt_restore(UIN
#ifdef TX_THREAD_INIT
CHAR _tx_version_id[] =
- "Copyright (c) Microsoft Corporation. All rights reserved. * ThreadX Modules ARMv8-M Version 6.1.9 *";
+ "Copyright (c) Microsoft Corporation. All rights reserved. * ThreadX Cortex-M23/IAR Version 6.1.11 *";
#else
#ifdef TX_MISRA_ENABLE
extern CHAR _tx_version_id[100];
diff --git a/ports_module/cortex_m23/iar/module_manager/src/tx_thread_schedule.s b/ports_module/cortex_m23/iar/module_manager/src/tx_thread_schedule.s
index e523070f..9e6d58bb 100644
--- a/ports_module/cortex_m23/iar/module_manager/src/tx_thread_schedule.s
+++ b/ports_module/cortex_m23/iar/module_manager/src/tx_thread_schedule.s
@@ -42,7 +42,7 @@
/* FUNCTION RELEASE */
/* */
/* _tx_thread_schedule Cortex-M23/IAR */
-/* 6.1.6 */
+/* 6.1.11 */
/* AUTHOR */
/* */
/* Scott Larson, Microsoft Corporation */
@@ -75,6 +75,8 @@
/* DATE NAME DESCRIPTION */
/* */
/* 04-02-2021 Scott Larson Initial Version 6.1.6 */
+/* 04-25-2022 Scott Larson Optimized MPU configuration, */
+/* resulting in version 6.1.11 */
/* */
/**************************************************************************/
// VOID _tx_thread_schedule(VOID)
@@ -370,12 +372,20 @@ _skip_secure_restore:
LDR r0, [r1, r2] // Pickup the module instance pointer
CBZ r0, skip_mpu_setup // Is this thread owned by a module? No, skip MPU setup
MOV r8, r1 // Copy thread ptr
- LDR r1, [r0, #0x64] // Pickup MPU register[0]
- CBZ r1, skip_mpu_setup // Is protection required for this module? No, skip MPU setup
+ MOVS r2, #0x74 // Index of MPU data region
+ LDR r2, [r0, r2] // Pickup MPU data region address
+ CBZ r2, skip_mpu_setup // Is protection required for this module? No, skip MPU setup
+
+ // Is the MPU already set up for this module?
+ MOVS r1, #2 // Select MPU region 2
+ LDR r3, =0xE000ED98 // MPU_RNR register address
+ STR r1, [r3] // Set region to 2
+ LDR r1, =0xE000ED9C // MPU_RBAR register address
+ LDR r3, [r1] // Load address stored in MPU region 2
+ CMP r2, r3 // Is module already loaded?
+ BEQ _tx_enable_mpu // Yes - skip MPU reconfiguration
// Initialize loop to configure MPU registers
- // Order doesn't matter, so txm_module_instance_mpu_registers[0]
- // will be in region 7 and txm_module_instance_mpu_registers[7] will be in region 0.
MOVS r3, #0x64 // Index of MPU register settings in thread control block
ADD r0, r0, r3 // Build address of MPU register start in thread control block
MOVS r5, #0 // Select region 0
@@ -389,7 +399,7 @@ _tx_mpu_loop:
ADDS r5, r5, #1 // Increment to next region
CMP r5, #8 // Check if all regions have been set
BNE _tx_mpu_loop
-
+_tx_enable_mpu:
LDR r0, =0xE000ED94 // Build MPU control reg address
MOVS r1, #5 // Build enable value with background region enabled
STR r1, [r0] // Enable MPU
diff --git a/ports_module/cortex_m3/ac5/inc/tx_port.h b/ports_module/cortex_m3/ac5/inc/tx_port.h
index a693d09e..0374d55e 100644
--- a/ports_module/cortex_m3/ac5/inc/tx_port.h
+++ b/ports_module/cortex_m3/ac5/inc/tx_port.h
@@ -26,7 +26,7 @@
/* PORT SPECIFIC C INFORMATION RELEASE */
/* */
/* tx_port.h Cortex-M3/AC5 */
-/* 6.1.9 */
+/* 6.1.11 */
/* */
/* AUTHOR */
/* */
@@ -48,6 +48,9 @@
/* DATE NAME DESCRIPTION */
/* */
/* 10-15-2021 Scott Larson Initial Version 6.1.9 */
+/* 04-25-2022 Scott Larson Modified comments and added */
+/* volatile to registers, */
+/* resulting in version 6.1.11 */
/* */
/**************************************************************************/
@@ -123,13 +126,13 @@ typedef unsigned short USHORT;
For example, if the time source is at the address 0x0a800024 and is 16-bits in size, the clock
source constants would be:
-#define TX_TRACE_TIME_SOURCE *((ULONG *) 0x0a800024)
+#define TX_TRACE_TIME_SOURCE *((volatile ULONG *) 0x0a800024)
#define TX_TRACE_TIME_MASK 0x0000FFFFUL
*/
#ifndef TX_TRACE_TIME_SOURCE
-#define TX_TRACE_TIME_SOURCE *((ULONG *) 0xE0001004)
+#define TX_TRACE_TIME_SOURCE *((volatile ULONG *) 0xE0001004)
#endif
#ifndef TX_TRACE_TIME_MASK
#define TX_TRACE_TIME_MASK 0xFFFFFFFFUL
@@ -282,7 +285,7 @@ void _tx_vfp_access(void);
else \
{ \
ULONG _tx_fpccr; \
- _tx_fpccr = *((ULONG *) 0xE000EF34); \
+ _tx_fpccr = *((volatile ULONG *) 0xE000EF34); \
_tx_fpccr = _tx_fpccr & ((ULONG) 0x01); \
if (_tx_fpccr == ((ULONG) 0x01)) \
{ \
@@ -435,7 +438,7 @@ unsigned int was_masked;
/* Set PendSV to invoke ThreadX scheduler. */
- *((ULONG *) 0xE000ED04) = ((ULONG) 0x10000000);
+ *((volatile ULONG *) 0xE000ED04) = ((ULONG) 0x10000000);
if (_ipsr == 0)
{
was_masked = __disable_irq();
@@ -458,7 +461,7 @@ void tx_thread_fpu_disable(void);
#ifdef TX_THREAD_INIT
CHAR _tx_version_id[] =
- "Copyright (c) Microsoft Corporation. All rights reserved. * ThreadX Cortex-M3/AC5 Version 6.1.9 *";
+ "Copyright (c) Microsoft Corporation. All rights reserved. * ThreadX Cortex-M3/AC5 Version 6.1.11 *";
#else
#ifdef TX_MISRA_ENABLE
extern CHAR _tx_version_id[100];
diff --git a/ports_module/cortex_m3/ac5/module_manager/src/tx_thread_schedule.s b/ports_module/cortex_m3/ac5/module_manager/src/tx_thread_schedule.s
index 1048802a..6f0f91df 100644
--- a/ports_module/cortex_m3/ac5/module_manager/src/tx_thread_schedule.s
+++ b/ports_module/cortex_m3/ac5/module_manager/src/tx_thread_schedule.s
@@ -40,7 +40,7 @@
/* FUNCTION RELEASE */
/* */
/* _tx_thread_schedule Cortex-M3/AC5 */
-/* 6.1.9 */
+/* 6.1.11 */
/* AUTHOR */
/* */
/* Scott Larson, Microsoft Corporation */
@@ -67,13 +67,15 @@
/* */
/* _tx_initialize_kernel_enter ThreadX entry function */
/* _tx_thread_system_return Return to system from thread */
-/* _tx_thread_context_restore Restore thread's context */
/* */
/* RELEASE HISTORY */
/* */
/* DATE NAME DESCRIPTION */
/* */
/* 10-15-2021 Scott Larson Initial Version 6.1.9 */
+/* 04-25-2022 Scott Larson Optimized MPU configuration, */
+/* added BASEPRI support, */
+/* resulting in version 6.1.11 */
/* */
/**************************************************************************/
// VOID _tx_thread_schedule(VOID)
@@ -125,7 +127,12 @@ __tx_wait_here
EXPORT MemManage_Handler
MemManage_Handler
+#ifdef TX_PORT_USE_BASEPRI
+ LDR r1, =TX_PORT_BASEPRI // Mask interrupt priorities =< TX_PORT_BASEPRI
+ MSR BASEPRI, r1
+#else
CPSID i // Disable interrupts
+#endif /* TX_PORT_USE_BASEPRI */
/* Now pickup and store all the fault related information. */
@@ -208,7 +215,12 @@ MemManage_Handler
LDR r1, =0x10000000 // Set PENDSVSET bit
STR r1, [r0] // Store ICSR
DSB // Wait for memory access to complete
+#ifdef TX_PORT_USE_BASEPRI
+ MOV r0, 0 // Disable BASEPRI masking (enable interrupts)
+ MSR BASEPRI, r0
+#else
CPSIE i // Enable interrupts
+#endif
MOV lr, #0xFFFFFFFD // Load exception return code
BX lr // Return from exception
@@ -226,12 +238,22 @@ __tx_ts_handler
#if (defined(TX_ENABLE_EXECUTION_CHANGE_NOTIFY) || defined(TX_EXECUTION_PROFILE_ENABLE))
/* Call the thread exit function to indicate the thread is no longer executing. */
+#ifdef TX_PORT_USE_BASEPRI
+ LDR r1, =TX_PORT_BASEPRI // Mask interrupt priorities =< TX_PORT_BASEPRI
+ MSR BASEPRI, r1
+#else
CPSID i // Disable interrupts
+#endif /* TX_PORT_USE_BASEPRI */
PUSH {r0, lr} // Save LR (and r0 just for alignment)
BL _tx_execution_thread_exit // Call the thread exit function
POP {r0, lr} // Recover LR
+#ifdef TX_PORT_USE_BASEPRI
+ MOV r0, 0 // Disable BASEPRI masking (enable interrupts)
+ MSR BASEPRI, r0
+#else
CPSIE i // Enable interrupts
-#endif
+#endif /* TX_PORT_USE_BASEPRI */
+#endif /* EXECUTION PROFILE */
LDR r0, =_tx_thread_current_ptr // Build current thread pointer address
LDR r2, =_tx_thread_execute_ptr // Build execute thread pointer address
@@ -276,7 +298,12 @@ __tx_ts_new
/* Now we are looking for a new thread to execute! */
+#ifdef TX_PORT_USE_BASEPRI
+ LDR r1, =TX_PORT_BASEPRI // Mask interrupt priorities =< TX_PORT_BASEPRI
+ MSR BASEPRI, r1
+#else
CPSID i // Disable interrupts
+#endif
LDR r1, [r2] // Is there another thread ready to execute?
CBNZ r1, __tx_ts_restore // Yes, schedule it
@@ -285,7 +312,12 @@ __tx_ts_new
are disabled to allow use of WFI for waiting for a thread to arrive. */
__tx_ts_wait
+#ifdef TX_PORT_USE_BASEPRI
+ LDR r1, =TX_PORT_BASEPRI // Mask interrupt priorities =< TX_PORT_BASEPRI
+ MSR BASEPRI, r1
+#else
CPSID i // Disable interrupts
+#endif
LDR r1, [r2] // Pickup the next thread to execute pointer
CBNZ r1, __tx_ts_ready // If non-NULL, a new thread is ready!
#ifdef TX_ENABLE_WFI
@@ -293,7 +325,12 @@ __tx_ts_wait
WFI // Wait for interrupt
ISB // Ensure pipeline is flushed
#endif
+#ifdef TX_PORT_USE_BASEPRI
+ MOV r4, #0 // Disable BASEPRI masking (enable interrupts)
+ MSR BASEPRI, r4
+#else
CPSIE i // Enable interrupts
+#endif
B __tx_ts_wait // Loop to continue waiting
/* At this point, we have a new thread ready to go. Clear any newly pended PendSV - since we are
@@ -310,7 +347,12 @@ __tx_ts_restore
and enable interrupts. */
STR r1, [r0] // Setup the current thread pointer to the new thread
+#ifdef TX_PORT_USE_BASEPRI
+ MOV r4, #0 // Disable BASEPRI masking (enable interrupts)
+ MSR BASEPRI, r4
+#else
CPSIE i // Enable interrupts
+#endif
/* Increment the thread run count. */
@@ -346,27 +388,34 @@ __tx_ts_restore
STR r3, [r0] // Disable MPU
LDR r0, [r1, #0x90] // Pickup the module instance pointer
CBZ r0, skip_mpu_setup // Is this thread owned by a module? No, skip MPU setup
- LDR r1, [r0, #0x64] // Pickup MPU register[0]
- CBZ r1, skip_mpu_setup // Is protection required for this module? No, skip MPU setup
- LDR r1, =0xE000ED9C // Build address of MPU base register
+
+ LDR r2, [r0, #0x8C] // Pickup MPU region 5 address
+ CBZ r2, skip_mpu_setup // Is protection required for this module? No, skip MPU setup
+
+ // Is the MPU already set up for this module?
+ MOV r1, #5 // Select region 5 from MPU
+ LDR r3, =0xE000ED98 // MPU_RNR register address
+ STR r1, [r3] // Set region to 5
+ LDR r1, =0xE000ED9C // MPU_RBAR register address
+ LDR r3, [r1] // Load address stored in MPU region 5
+ BIC r2, r2, #0x10 // Clear VALID bit
+ CMP r2, r3 // Is module already loaded?
+ BEQ _tx_enable_mpu // Yes - skip MPU reconfiguration
// Use alias registers to quickly load MPU
ADD r0, r0, #100 // Build address of MPU register start in thread control block
-#ifdef TXM_MODULE_MANAGER_16_MPU
+
LDM r0!,{r2-r9} // Load MPU regions 0-3
STM r1,{r2-r9} // Store MPU regions 0-3
LDM r0!,{r2-r9} // Load MPU regions 4-7
STM r1,{r2-r9} // Store MPU regions 4-7
+#ifdef TXM_MODULE_MANAGER_16_MPU
LDM r0!,{r2-r9} // Load MPU regions 8-11
STM r1,{r2-r9} // Store MPU regions 8-11
LDM r0,{r2-r9} // Load MPU regions 12-15
STM r1,{r2-r9} // Store MPU regions 12-15
-#else
- LDM r0!,{r2-r9} // Load first four MPU regions
- STM r1,{r2-r9} // Store first four MPU regions
- LDM r0,{r2-r9} // Load second four MPU regions
- STM r1,{r2-r9} // Store second four MPU regions
#endif
+_tx_enable_mpu
LDR r0, =0xE000ED94 // Build MPU control reg address
MOV r1, #5 // Build enable value with background region enabled
STR r1, [r0] // Enable MPU
diff --git a/ports_module/cortex_m3/ac6/inc/tx_port.h b/ports_module/cortex_m3/ac6/inc/tx_port.h
index 6f4ebb5b..cd4ce994 100644
--- a/ports_module/cortex_m3/ac6/inc/tx_port.h
+++ b/ports_module/cortex_m3/ac6/inc/tx_port.h
@@ -26,7 +26,7 @@
/* PORT SPECIFIC C INFORMATION RELEASE */
/* */
/* tx_port.h Cortex-M3/AC6 */
-/* 6.1.9 */
+/* 6.1.11 */
/* */
/* AUTHOR */
/* */
@@ -48,6 +48,9 @@
/* DATE NAME DESCRIPTION */
/* */
/* 10-15-2021 Scott Larson Initial Version 6.1.9 */
+/* 04-25-2022 Scott Larson Modified comments and added */
+/* volatile to registers, */
+/* resulting in version 6.1.11 */
/* */
/**************************************************************************/
@@ -123,13 +126,13 @@ typedef unsigned short USHORT;
For example, if the time source is at the address 0x0a800024 and is 16-bits in size, the clock
source constants would be:
-#define TX_TRACE_TIME_SOURCE *((ULONG *) 0x0a800024)
+#define TX_TRACE_TIME_SOURCE *((volatile ULONG *) 0x0a800024)
#define TX_TRACE_TIME_MASK 0x0000FFFFUL
*/
#ifndef TX_TRACE_TIME_SOURCE
-#define TX_TRACE_TIME_SOURCE *((ULONG *) 0xE0001004)
+#define TX_TRACE_TIME_SOURCE *((volatile ULONG *) 0xE0001004)
#endif
#ifndef TX_TRACE_TIME_MASK
#define TX_TRACE_TIME_MASK 0xFFFFFFFFUL
@@ -291,7 +294,7 @@ __attribute__( ( always_inline ) ) static inline void __set_control(ULONG contro
else \
{ \
ULONG _tx_fpccr; \
- _tx_fpccr = *((ULONG *) 0xE000EF34); \
+ _tx_fpccr = *((volatile ULONG *) 0xE000EF34); \
_tx_fpccr = _tx_fpccr & ((ULONG) 0x01); \
if (_tx_fpccr == ((ULONG) 0x01)) \
{ \
@@ -467,7 +470,8 @@ __attribute__( ( always_inline ) ) static inline void _tx_thread_system_return_i
{
unsigned int interrupt_save;
- *((ULONG *) 0xE000ED04) = ((ULONG) 0x10000000);
+ /* Set PendSV to invoke ThreadX scheduler. */
+ *((volatile ULONG *) 0xE000ED04) = ((ULONG) 0x10000000);
if (__get_ipsr_value() == 0)
{
interrupt_save = __get_primask_value();
@@ -497,7 +501,7 @@ unsigned int interrupt_save;
#endif
-/* Define FPU extension for the Cortex-M7. Each is assumed to be called in the context of the executing
+/* Define FPU extension for the Cortex-M3. Each is assumed to be called in the context of the executing
thread. These are no longer needed, but are preserved for backward compatibility only. */
void tx_thread_fpu_enable(void);
@@ -508,7 +512,7 @@ void tx_thread_fpu_disable(void);
#ifdef TX_THREAD_INIT
CHAR _tx_version_id[] =
- "Copyright (c) Microsoft Corporation. All rights reserved. * ThreadX Cortex-M3/AC6 Version 6.1.9 *";
+ "Copyright (c) Microsoft Corporation. All rights reserved. * ThreadX Cortex-M3/AC6 Version 6.1.11 *";
#else
extern CHAR _tx_version_id[];
#endif
diff --git a/ports_module/cortex_m3/ac6/module_manager/src/tx_thread_schedule.S b/ports_module/cortex_m3/ac6/module_manager/src/tx_thread_schedule.S
index 081e5c35..4cf1df38 100644
--- a/ports_module/cortex_m3/ac6/module_manager/src/tx_thread_schedule.S
+++ b/ports_module/cortex_m3/ac6/module_manager/src/tx_thread_schedule.S
@@ -42,7 +42,7 @@
/* FUNCTION RELEASE */
/* */
/* _tx_thread_schedule Cortex-M3/AC6 */
-/* 6.1.9 */
+/* 6.1.11 */
/* AUTHOR */
/* */
/* Scott Larson, Microsoft Corporation */
@@ -69,13 +69,15 @@
/* */
/* _tx_initialize_kernel_enter ThreadX entry function */
/* _tx_thread_system_return Return to system from thread */
-/* _tx_thread_context_restore Restore thread's context */
/* */
/* RELEASE HISTORY */
/* */
/* DATE NAME DESCRIPTION */
/* */
/* 10-15-2021 Scott Larson Initial Version 6.1.9 */
+/* 04-25-2022 Scott Larson Optimized MPU configuration, */
+/* added BASEPRI support, */
+/* resulting in version 6.1.11 */
/* */
/**************************************************************************/
// VOID _tx_thread_schedule(VOID)
@@ -135,7 +137,12 @@ BusFault_Handler:
.thumb_func
UsageFault_Handler:
+#ifdef TX_PORT_USE_BASEPRI
+ LDR r1, =TX_PORT_BASEPRI // Mask interrupt priorities =< TX_PORT_BASEPRI
+ MSR BASEPRI, r1
+#else
CPSID i // Disable interrupts
+#endif /* TX_PORT_USE_BASEPRI */
/* Now pickup and store all the fault related information. */
@@ -197,7 +204,7 @@ UsageFault_Handler:
LDR r0, =0xE000EF34 // Cleanup FPU context: Load FPCCR address
LDR r1, [r0] // Load FPCCR
BIC r1, r1, #1 // Clear the lazy preservation active bit
- STR r1, [r0] // Store the value
+ STR r1, [r0] // Save FPCCR
#endif
BL _txm_module_manager_memory_fault_handler // Call memory manager fault handler
@@ -218,7 +225,12 @@ UsageFault_Handler:
LDR r1, =0x10000000 // Set PENDSVSET bit
STR r1, [r0] // Store ICSR
DSB // Wait for memory access to complete
+#ifdef TX_PORT_USE_BASEPRI
+ MOV r0, 0 // Disable BASEPRI masking (enable interrupts)
+ MSR BASEPRI, r0
+#else
CPSIE i // Enable interrupts
+#endif
MOV lr, #0xFFFFFFFD // Load exception return code
BX lr // Return from exception
@@ -239,12 +251,22 @@ __tx_ts_handler:
#if (defined(TX_ENABLE_EXECUTION_CHANGE_NOTIFY) || defined(TX_EXECUTION_PROFILE_ENABLE))
/* Call the thread exit function to indicate the thread is no longer executing. */
+#ifdef TX_PORT_USE_BASEPRI
+ LDR r1, =TX_PORT_BASEPRI // Mask interrupt priorities =< TX_PORT_BASEPRI
+ MSR BASEPRI, r1
+#else
CPSID i // Disable interrupts
+#endif /* TX_PORT_USE_BASEPRI */
PUSH {r0, lr} // Save LR (and r0 just for alignment)
BL _tx_execution_thread_exit // Call the thread exit function
POP {r0, lr} // Recover LR
+#ifdef TX_PORT_USE_BASEPRI
+ MOV r0, 0 // Disable BASEPRI masking (enable interrupts)
+ MSR BASEPRI, r0
+#else
CPSIE i // Enable interrupts
-#endif
+#endif /* TX_PORT_USE_BASEPRI */
+#endif /* EXECUTION PROFILE */
LDR r0, =_tx_thread_current_ptr // Build current thread pointer address
LDR r2, =_tx_thread_execute_ptr // Build execute thread pointer address
@@ -289,7 +311,12 @@ __tx_ts_new:
/* Now we are looking for a new thread to execute! */
+#ifdef TX_PORT_USE_BASEPRI
+ LDR r1, =TX_PORT_BASEPRI // Mask interrupt priorities =< TX_PORT_BASEPRI
+ MSR BASEPRI, r1
+#else
CPSID i // Disable interrupts
+#endif
LDR r1, [r2] // Is there another thread ready to execute?
CBNZ r1, __tx_ts_restore // Yes, schedule it
@@ -298,7 +325,12 @@ __tx_ts_new:
are disabled to allow use of WFI for waiting for a thread to arrive. */
__tx_ts_wait:
+#ifdef TX_PORT_USE_BASEPRI
+ LDR r1, =TX_PORT_BASEPRI // Mask interrupt priorities =< TX_PORT_BASEPRI
+ MSR BASEPRI, r1
+#else
CPSID i // Disable interrupts
+#endif
LDR r1, [r2] // Pickup the next thread to execute pointer
CBNZ r1, __tx_ts_ready // If non-NULL, a new thread is ready!
#ifdef TX_ENABLE_WFI
@@ -306,7 +338,12 @@ __tx_ts_wait:
WFI // Wait for interrupt
ISB // Ensure pipeline is flushed
#endif
+#ifdef TX_PORT_USE_BASEPRI
+ MOV r4, #0 // Disable BASEPRI masking (enable interrupts)
+ MSR BASEPRI, r4
+#else
CPSIE i // Enable interrupts
+#endif
B __tx_ts_wait // Loop to continue waiting
/* At this point, we have a new thread ready to go. Clear any newly pended PendSV - since we are
@@ -323,7 +360,12 @@ __tx_ts_restore:
and enable interrupts. */
STR r1, [r0] // Setup the current thread pointer to the new thread
+#ifdef TX_PORT_USE_BASEPRI
+ MOV r4, #0 // Disable BASEPRI masking (enable interrupts)
+ MSR BASEPRI, r4
+#else
CPSIE i // Enable interrupts
+#endif
/* Increment the thread run count. */
@@ -359,27 +401,34 @@ __tx_ts_restore:
STR r3, [r0] // Disable MPU
LDR r0, [r1, #0x90] // Pickup the module instance pointer
CBZ r0, skip_mpu_setup // Is this thread owned by a module? No, skip MPU setup
- LDR r1, [r0, #0x64] // Pickup MPU register[0]
- CBZ r1, skip_mpu_setup // Is protection required for this module? No, skip MPU setup
- LDR r1, =0xE000ED9C // Build address of MPU base register
+
+ LDR r2, [r0, #0x8C] // Pickup MPU region 5 address
+ CBZ r2, skip_mpu_setup // Is protection required for this module? No, skip MPU setup
+
+ // Is the MPU already set up for this module?
+ MOV r1, #5 // Select region 5 from MPU
+ LDR r3, =0xE000ED98 // MPU_RNR register address
+ STR r1, [r3] // Set region to 5
+ LDR r1, =0xE000ED9C // MPU_RBAR register address
+ LDR r3, [r1] // Load address stored in MPU region 5
+ BIC r2, r2, #0x10 // Clear VALID bit
+ CMP r2, r3 // Is module already loaded?
+ BEQ _tx_enable_mpu // Yes - skip MPU reconfiguration
// Use alias registers to quickly load MPU
ADD r0, r0, #100 // Build address of MPU register start in thread control block
-#ifdef TXM_MODULE_MANAGER_16_MPU
+
LDM r0!,{r2-r9} // Load MPU regions 0-3
STM r1,{r2-r9} // Store MPU regions 0-3
LDM r0!,{r2-r9} // Load MPU regions 4-7
STM r1,{r2-r9} // Store MPU regions 4-7
+#ifdef TXM_MODULE_MANAGER_16_MPU
LDM r0!,{r2-r9} // Load MPU regions 8-11
STM r1,{r2-r9} // Store MPU regions 8-11
LDM r0,{r2-r9} // Load MPU regions 12-15
STM r1,{r2-r9} // Store MPU regions 12-15
-#else
- LDM r0!,{r2-r9} // Load first four MPU regions
- STM r1,{r2-r9} // Store first four MPU regions
- LDM r0,{r2-r9} // Load second four MPU regions
- STM r1,{r2-r9} // Store second four MPU regions
#endif
+_tx_enable_mpu:
LDR r0, =0xE000ED94 // Build MPU control reg address
MOV r1, #5 // Build enable value with background region enabled
STR r1, [r0] // Enable MPU
@@ -538,14 +587,14 @@ _tx_no_lazy_clear:
#endif
/* Copy kernel hardware stack to module thread stack. */
- LDM r3!, {r1-r2}
- STM r0!, {r1-r2}
- LDM r3!, {r1-r2}
- STM r0!, {r1-r2}
- LDM r3!, {r1-r2}
- STM r0!, {r1-r2}
- LDM r3!, {r1-r2}
- STM r0!, {r1-r2}
+ LDM r3!, {r1-r2} // Get r0, r1 from kernel stack
+ STM r0!, {r1-r2} // Insert r0, r1 into thread stack
+ LDM r3!, {r1-r2} // Get r2, r3 from kernel stack
+ STM r0!, {r1-r2} // Insert r2, r3 into thread stack
+ LDM r3!, {r1-r2} // Get r12, lr from kernel stack
+ STM r0!, {r1-r2} // Insert r12, lr into thread stack
+ LDM r3!, {r1-r2} // Get pc, xpsr from kernel stack
+ STM r0!, {r1-r2} // Insert pc, xpsr into thread stack
SUB r0, r0, #32 // Subtract 32 to get back to top of stack
MSR PSP, r0 // Set thread stack pointer
diff --git a/ports_module/cortex_m3/gnu/inc/tx_port.h b/ports_module/cortex_m3/gnu/inc/tx_port.h
index a3f91e24..b4197c96 100644
--- a/ports_module/cortex_m3/gnu/inc/tx_port.h
+++ b/ports_module/cortex_m3/gnu/inc/tx_port.h
@@ -26,7 +26,7 @@
/* PORT SPECIFIC C INFORMATION RELEASE */
/* */
/* tx_port.h Cortex-M3/GNU */
-/* 6.1.9 */
+/* 6.1.11 */
/* */
/* AUTHOR */
/* */
@@ -48,6 +48,9 @@
/* DATE NAME DESCRIPTION */
/* */
/* 10-15-2021 Scott Larson Initial Version 6.1.9 */
+/* 04-25-2022 Scott Larson Modified comments and added */
+/* volatile to registers, */
+/* resulting in version 6.1.11 */
/* */
/**************************************************************************/
@@ -123,13 +126,13 @@ typedef unsigned short USHORT;
For example, if the time source is at the address 0x0a800024 and is 16-bits in size, the clock
source constants would be:
-#define TX_TRACE_TIME_SOURCE *((ULONG *) 0x0a800024)
+#define TX_TRACE_TIME_SOURCE *((volatile ULONG *) 0x0a800024)
#define TX_TRACE_TIME_MASK 0x0000FFFFUL
*/
#ifndef TX_TRACE_TIME_SOURCE
-#define TX_TRACE_TIME_SOURCE *((ULONG *) 0xE0001004)
+#define TX_TRACE_TIME_SOURCE *((volatile ULONG *) 0xE0001004)
#endif
#ifndef TX_TRACE_TIME_MASK
#define TX_TRACE_TIME_MASK 0xFFFFFFFFUL
@@ -291,7 +294,7 @@ __attribute__( ( always_inline ) ) static inline void __set_control(ULONG contro
else \
{ \
ULONG _tx_fpccr; \
- _tx_fpccr = *((ULONG *) 0xE000EF34); \
+ _tx_fpccr = *((volatile ULONG *) 0xE000EF34); \
_tx_fpccr = _tx_fpccr & ((ULONG) 0x01); \
if (_tx_fpccr == ((ULONG) 0x01)) \
{ \
@@ -461,7 +464,8 @@ __attribute__( ( always_inline ) ) static inline void _tx_thread_system_return_i
{
unsigned int interrupt_save;
- *((ULONG *) 0xE000ED04) = ((ULONG) 0x10000000);
+ /* Set PendSV to invoke ThreadX scheduler. */
+ *((volatile ULONG *) 0xE000ED04) = ((ULONG) 0x10000000);
if (__get_ipsr_value() == 0)
{
interrupt_save = __get_primask_value();
@@ -491,8 +495,8 @@ unsigned int interrupt_save;
#endif
-/* Define FPU extension for the Cortex-M7. Each is assumed to be called in the context of the executing
- thread. This is for legacy only, and not needed any longer. */
+/* Define FPU extension for the Cortex-M3. Each is assumed to be called in the context of the executing
+ thread. These are no longer needed, but are preserved for backward compatibility only. */
void tx_thread_fpu_enable(void);
void tx_thread_fpu_disable(void);
@@ -502,7 +506,7 @@ void tx_thread_fpu_disable(void);
#ifdef TX_THREAD_INIT
CHAR _tx_version_id[] =
- "Copyright (c) Microsoft Corporation. All rights reserved. * ThreadX Cortex-M3/GNU Version 6.1.9 *";
+ "Copyright (c) Microsoft Corporation. All rights reserved. * ThreadX Cortex-M3/GNU Version 6.1.11 *";
#else
extern CHAR _tx_version_id[];
#endif
diff --git a/ports_module/cortex_m3/gnu/module_manager/src/tx_thread_schedule.S b/ports_module/cortex_m3/gnu/module_manager/src/tx_thread_schedule.S
index b19b6cc8..e5d101b5 100644
--- a/ports_module/cortex_m3/gnu/module_manager/src/tx_thread_schedule.S
+++ b/ports_module/cortex_m3/gnu/module_manager/src/tx_thread_schedule.S
@@ -40,7 +40,7 @@
/* FUNCTION RELEASE */
/* */
/* _tx_thread_schedule Cortex-M3/GNU */
-/* 6.1.10 */
+/* 6.1.11 */
/* AUTHOR */
/* */
/* Scott Larson, Microsoft Corporation */
@@ -67,7 +67,6 @@
/* */
/* _tx_initialize_kernel_enter ThreadX entry function */
/* _tx_thread_system_return Return to system from thread */
-/* _tx_thread_context_restore Restore thread's context */
/* */
/* RELEASE HISTORY */
/* */
@@ -76,6 +75,9 @@
/* 10-15-2021 Scott Larson Initial Version 6.1.9 */
/* 01-31-2022 Scott Larson Fixed predefined macro name, */
/* resulting in version 6.1.10 */
+/* 04-25-2022 Scott Larson Optimized MPU configuration, */
+/* added BASEPRI support, */
+/* resulting in version 6.1.11 */
/* */
/**************************************************************************/
// VOID _tx_thread_schedule(VOID)
@@ -135,7 +137,12 @@ BusFault_Handler:
.thumb_func
UsageFault_Handler:
+#ifdef TX_PORT_USE_BASEPRI
+ LDR r1, =TX_PORT_BASEPRI // Mask interrupt priorities =< TX_PORT_BASEPRI
+ MSR BASEPRI, r1
+#else
CPSID i // Disable interrupts
+#endif /* TX_PORT_USE_BASEPRI */
/* Now pickup and store all the fault related information. */
@@ -197,7 +204,7 @@ UsageFault_Handler:
LDR r0, =0xE000EF34 // Cleanup FPU context: Load FPCCR address
LDR r1, [r0] // Load FPCCR
BIC r1, r1, #1 // Clear the lazy preservation active bit
- STR r1, [r0] // Store the value
+ STR r1, [r0] // Save FPCCR
#endif
BL _txm_module_manager_memory_fault_handler // Call memory manager fault handler
@@ -218,7 +225,12 @@ UsageFault_Handler:
LDR r1, =0x10000000 // Set PENDSVSET bit
STR r1, [r0] // Store ICSR
DSB // Wait for memory access to complete
+#ifdef TX_PORT_USE_BASEPRI
+ MOV r0, 0 // Disable BASEPRI masking (enable interrupts)
+ MSR BASEPRI, r0
+#else
CPSIE i // Enable interrupts
+#endif
MOV lr, #0xFFFFFFFD // Load exception return code
BX lr // Return from exception
@@ -239,12 +251,22 @@ __tx_ts_handler:
#if (defined(TX_ENABLE_EXECUTION_CHANGE_NOTIFY) || defined(TX_EXECUTION_PROFILE_ENABLE))
/* Call the thread exit function to indicate the thread is no longer executing. */
+#ifdef TX_PORT_USE_BASEPRI
+ LDR r1, =TX_PORT_BASEPRI // Mask interrupt priorities =< TX_PORT_BASEPRI
+ MSR BASEPRI, r1
+#else
CPSID i // Disable interrupts
+#endif /* TX_PORT_USE_BASEPRI */
PUSH {r0, lr} // Save LR (and r0 just for alignment)
BL _tx_execution_thread_exit // Call the thread exit function
POP {r0, lr} // Recover LR
+#ifdef TX_PORT_USE_BASEPRI
+ MOV r0, 0 // Disable BASEPRI masking (enable interrupts)
+ MSR BASEPRI, r0
+#else
CPSIE i // Enable interrupts
-#endif
+#endif /* TX_PORT_USE_BASEPRI */
+#endif /* EXECUTION PROFILE */
LDR r0, =_tx_thread_current_ptr // Build current thread pointer address
LDR r2, =_tx_thread_execute_ptr // Build execute thread pointer address
@@ -289,7 +311,12 @@ __tx_ts_new:
/* Now we are looking for a new thread to execute! */
+#ifdef TX_PORT_USE_BASEPRI
+ LDR r1, =TX_PORT_BASEPRI // Mask interrupt priorities =< TX_PORT_BASEPRI
+ MSR BASEPRI, r1
+#else
CPSID i // Disable interrupts
+#endif
LDR r1, [r2] // Is there another thread ready to execute?
CBNZ r1, __tx_ts_restore // Yes, schedule it
@@ -298,7 +325,12 @@ __tx_ts_new:
are disabled to allow use of WFI for waiting for a thread to arrive. */
__tx_ts_wait:
+#ifdef TX_PORT_USE_BASEPRI
+ LDR r1, =TX_PORT_BASEPRI // Mask interrupt priorities =< TX_PORT_BASEPRI
+ MSR BASEPRI, r1
+#else
CPSID i // Disable interrupts
+#endif
LDR r1, [r2] // Pickup the next thread to execute pointer
CBNZ r1, __tx_ts_ready // If non-NULL, a new thread is ready!
#ifdef TX_ENABLE_WFI
@@ -306,7 +338,12 @@ __tx_ts_wait:
WFI // Wait for interrupt
ISB // Ensure pipeline is flushed
#endif
+#ifdef TX_PORT_USE_BASEPRI
+ MOV r4, #0 // Disable BASEPRI masking (enable interrupts)
+ MSR BASEPRI, r4
+#else
CPSIE i // Enable interrupts
+#endif
B __tx_ts_wait // Loop to continue waiting
/* At this point, we have a new thread ready to go. Clear any newly pended PendSV - since we are
@@ -323,7 +360,12 @@ __tx_ts_restore:
and enable interrupts. */
STR r1, [r0] // Setup the current thread pointer to the new thread
+#ifdef TX_PORT_USE_BASEPRI
+ MOV r4, #0 // Disable BASEPRI masking (enable interrupts)
+ MSR BASEPRI, r4
+#else
CPSIE i // Enable interrupts
+#endif
/* Increment the thread run count. */
@@ -359,27 +401,34 @@ __tx_ts_restore:
STR r3, [r0] // Disable MPU
LDR r0, [r1, #0x90] // Pickup the module instance pointer
CBZ r0, skip_mpu_setup // Is this thread owned by a module? No, skip MPU setup
- LDR r1, [r0, #0x64] // Pickup MPU register[0]
- CBZ r1, skip_mpu_setup // Is protection required for this module? No, skip MPU setup
- LDR r1, =0xE000ED9C // Build address of MPU base register
+
+ LDR r2, [r0, #0x8C] // Pickup MPU region 5 address
+ CBZ r2, skip_mpu_setup // Is protection required for this module? No, skip MPU setup
+
+ // Is the MPU already set up for this module?
+ MOV r1, #5 // Select region 5 from MPU
+ LDR r3, =0xE000ED98 // MPU_RNR register address
+ STR r1, [r3] // Set region to 5
+ LDR r1, =0xE000ED9C // MPU_RBAR register address
+ LDR r3, [r1] // Load address stored in MPU region 5
+ BIC r2, r2, #0x10 // Clear VALID bit
+ CMP r2, r3 // Is module already loaded?
+ BEQ _tx_enable_mpu // Yes - skip MPU reconfiguration
// Use alias registers to quickly load MPU
ADD r0, r0, #100 // Build address of MPU register start in thread control block
-#ifdef TXM_MODULE_MANAGER_16_MPU
+
LDM r0!,{r2-r9} // Load MPU regions 0-3
STM r1,{r2-r9} // Store MPU regions 0-3
LDM r0!,{r2-r9} // Load MPU regions 4-7
STM r1,{r2-r9} // Store MPU regions 4-7
+#ifdef TXM_MODULE_MANAGER_16_MPU
LDM r0!,{r2-r9} // Load MPU regions 8-11
STM r1,{r2-r9} // Store MPU regions 8-11
LDM r0,{r2-r9} // Load MPU regions 12-15
STM r1,{r2-r9} // Store MPU regions 12-15
-#else
- LDM r0!,{r2-r9} // Load first four MPU regions
- STM r1,{r2-r9} // Store first four MPU regions
- LDM r0,{r2-r9} // Load second four MPU regions
- STM r1,{r2-r9} // Store second four MPU regions
#endif
+_tx_enable_mpu:
LDR r0, =0xE000ED94 // Build MPU control reg address
MOV r1, #5 // Build enable value with background region enabled
STR r1, [r0] // Enable MPU
diff --git a/ports_module/cortex_m3/iar/inc/tx_port.h b/ports_module/cortex_m3/iar/inc/tx_port.h
index c9c13b3c..6ed2c186 100644
--- a/ports_module/cortex_m3/iar/inc/tx_port.h
+++ b/ports_module/cortex_m3/iar/inc/tx_port.h
@@ -26,7 +26,7 @@
/* PORT SPECIFIC C INFORMATION RELEASE */
/* */
/* tx_port.h Cortex-M3/IAR */
-/* 6.1.9 */
+/* 6.1.11 */
/* */
/* AUTHOR */
/* */
@@ -48,6 +48,9 @@
/* DATE NAME DESCRIPTION */
/* */
/* 10-15-2021 Scott Larson Initial Version 6.1.9 */
+/* 04-25-2022 Scott Larson Modified comments and added */
+/* volatile to registers, */
+/* resulting in version 6.1.11 */
/* */
/**************************************************************************/
@@ -127,14 +130,14 @@ typedef unsigned short USHORT;
For example, if the time source is at the address 0x0a800024 and is 16-bits in size, the clock
source constants would be:
-#define TX_TRACE_TIME_SOURCE *((ULONG *) 0x0a800024)
+#define TX_TRACE_TIME_SOURCE *((volatile ULONG *) 0x0a800024)
#define TX_TRACE_TIME_MASK 0x0000FFFFUL
*/
#ifndef TX_MISRA_ENABLE
#ifndef TX_TRACE_TIME_SOURCE
-#define TX_TRACE_TIME_SOURCE *((ULONG *) 0xE0001004)
+#define TX_TRACE_TIME_SOURCE *((volatile ULONG *) 0xE0001004)
#endif
#else
ULONG _tx_misra_time_stamp_get(VOID);
@@ -325,7 +328,7 @@ void _tx_misra_vfp_touch(void);
else \
{ \
ULONG _tx_fpccr; \
- _tx_fpccr = *((ULONG *) 0xE000EF34); \
+ _tx_fpccr = *((volatile ULONG *) 0xE000EF34); \
_tx_fpccr = _tx_fpccr & ((ULONG) 0x01); \
if (_tx_fpccr == ((ULONG) 0x01)) \
{ \
@@ -476,7 +479,7 @@ static void _tx_thread_system_return_inline(void)
__istate_t interrupt_save;
/* Set PendSV to invoke ThreadX scheduler. */
- *((ULONG *) 0xE000ED04) = ((ULONG) 0x10000000);
+ *((volatile ULONG *) 0xE000ED04) = ((ULONG) 0x10000000);
if (__get_IPSR() == 0)
{
interrupt_save = __get_interrupt_state();
@@ -488,7 +491,7 @@ __istate_t interrupt_save;
#endif
-/* Define FPU extension for the Cortex-M7. Each is assumed to be called in the context of the executing
+/* Define FPU extension for the Cortex-M3. Each is assumed to be called in the context of the executing
thread. These are no longer needed, but are preserved for backward compatibility only. */
void tx_thread_fpu_enable(void);
diff --git a/ports_module/cortex_m3/iar/module_manager/src/tx_thread_schedule.s b/ports_module/cortex_m3/iar/module_manager/src/tx_thread_schedule.s
index d6bd477c..6326b800 100644
--- a/ports_module/cortex_m3/iar/module_manager/src/tx_thread_schedule.s
+++ b/ports_module/cortex_m3/iar/module_manager/src/tx_thread_schedule.s
@@ -36,7 +36,7 @@
/* FUNCTION RELEASE */
/* */
/* _tx_thread_schedule Cortex-M3/IAR */
-/* 6.1.9 */
+/* 6.1.11 */
/* AUTHOR */
/* */
/* Scott Larson, Microsoft Corporation */
@@ -63,13 +63,15 @@
/* */
/* _tx_initialize_kernel_enter ThreadX entry function */
/* _tx_thread_system_return Return to system from thread */
-/* _tx_thread_context_restore Restore thread's context */
/* */
/* RELEASE HISTORY */
/* */
/* DATE NAME DESCRIPTION */
/* */
/* 10-15-2021 Scott Larson Initial Version 6.1.9 */
+/* 04-25-2022 Scott Larson Optimized MPU configuration, */
+/* added BASEPRI support, */
+/* resulting in version 6.1.11 */
/* */
/**************************************************************************/
// VOID _tx_thread_schedule(VOID)
@@ -125,8 +127,12 @@ __tx_wait_here:
MemManage_Handler:
BusFault_Handler:
UsageFault_Handler:
-
+#ifdef TX_PORT_USE_BASEPRI
+ LDR r1, =TX_PORT_BASEPRI // Mask interrupt priorities =< TX_PORT_BASEPRI
+ MSR BASEPRI, r1
+#else
CPSID i // Disable interrupts
+#endif /* TX_PORT_USE_BASEPRI */
/* Now pickup and store all the fault related information. */
@@ -209,7 +215,12 @@ UsageFault_Handler:
LDR r1, =0x10000000 // Set PENDSVSET bit
STR r1, [r0] // Store ICSR
DSB // Wait for memory access to complete
+#ifdef TX_PORT_USE_BASEPRI
+ MOV r0, 0 // Disable BASEPRI masking (enable interrupts)
+ MSR BASEPRI, r0
+#else
CPSIE i // Enable interrupts
+#endif
MOV lr, #0xFFFFFFFD // Load exception return code
BX lr // Return from exception
@@ -227,12 +238,22 @@ __tx_ts_handler:
#if (defined(TX_ENABLE_EXECUTION_CHANGE_NOTIFY) || defined(TX_EXECUTION_PROFILE_ENABLE))
/* Call the thread exit function to indicate the thread is no longer executing. */
+#ifdef TX_PORT_USE_BASEPRI
+ LDR r1, =TX_PORT_BASEPRI // Mask interrupt priorities =< TX_PORT_BASEPRI
+ MSR BASEPRI, r1
+#else
CPSID i // Disable interrupts
+#endif /* TX_PORT_USE_BASEPRI */
PUSH {r0, lr} // Save LR (and r0 just for alignment)
BL _tx_execution_thread_exit // Call the thread exit function
POP {r0, lr} // Recover LR
+#ifdef TX_PORT_USE_BASEPRI
+ MOV r0, 0 // Disable BASEPRI masking (enable interrupts)
+ MSR BASEPRI, r0
+#else
CPSIE i // Enable interrupts
-#endif
+#endif /* TX_PORT_USE_BASEPRI */
+#endif /* EXECUTION PROFILE */
LDR r0, =_tx_thread_current_ptr // Build current thread pointer address
LDR r2, =_tx_thread_execute_ptr // Build execute thread pointer address
@@ -277,7 +298,12 @@ __tx_ts_new:
/* Now we are looking for a new thread to execute! */
+#ifdef TX_PORT_USE_BASEPRI
+ LDR r1, =TX_PORT_BASEPRI // Mask interrupt priorities =< TX_PORT_BASEPRI
+ MSR BASEPRI, r1
+#else
CPSID i // Disable interrupts
+#endif
LDR r1, [r2] // Is there another thread ready to execute?
CBNZ r1, __tx_ts_restore // Yes, schedule it
@@ -286,7 +312,12 @@ __tx_ts_new:
are disabled to allow use of WFI for waiting for a thread to arrive. */
__tx_ts_wait:
+#ifdef TX_PORT_USE_BASEPRI
+ LDR r1, =TX_PORT_BASEPRI // Mask interrupt priorities =< TX_PORT_BASEPRI
+ MSR BASEPRI, r1
+#else
CPSID i // Disable interrupts
+#endif
LDR r1, [r2] // Pickup the next thread to execute pointer
CBNZ r1, __tx_ts_ready // If non-NULL, a new thread is ready!
#ifdef TX_ENABLE_WFI
@@ -294,7 +325,12 @@ __tx_ts_wait:
WFI // Wait for interrupt
ISB // Ensure pipeline is flushed
#endif
+#ifdef TX_PORT_USE_BASEPRI
+ MOV r4, #0 // Disable BASEPRI masking (enable interrupts)
+ MSR BASEPRI, r4
+#else
CPSIE i // Enable interrupts
+#endif
B __tx_ts_wait // Loop to continue waiting
/* At this point, we have a new thread ready to go. Clear any newly pended PendSV - since we are
@@ -311,7 +347,12 @@ __tx_ts_restore:
and enable interrupts. */
STR r1, [r0] // Setup the current thread pointer to the new thread
+#ifdef TX_PORT_USE_BASEPRI
+ MOV r4, #0 // Disable BASEPRI masking (enable interrupts)
+ MSR BASEPRI, r4
+#else
CPSIE i // Enable interrupts
+#endif
/* Increment the thread run count. */
@@ -347,27 +388,34 @@ __tx_ts_restore:
STR r3, [r0] // Disable MPU
LDR r0, [r1, #0x90] // Pickup the module instance pointer
CBZ r0, skip_mpu_setup // Is this thread owned by a module? No, skip MPU setup
- LDR r1, [r0, #0x64] // Pickup MPU register[0]
- CBZ r1, skip_mpu_setup // Is protection required for this module? No, skip MPU setup
- LDR r1, =0xE000ED9C // Build address of MPU base register
+
+ LDR r2, [r0, #0x8C] // Pickup MPU region 5 address
+ CBZ r2, skip_mpu_setup // Is protection required for this module? No, skip MPU setup
+
+ // Is the MPU already set up for this module?
+ MOV r1, #5 // Select region 5 from MPU
+ LDR r3, =0xE000ED98 // MPU_RNR register address
+ STR r1, [r3] // Set region to 5
+ LDR r1, =0xE000ED9C // MPU_RBAR register address
+ LDR r3, [r1] // Load address stored in MPU region 5
+ BIC r2, r2, #0x10 // Clear VALID bit
+ CMP r2, r3 // Is module already loaded?
+ BEQ _tx_enable_mpu // Yes - skip MPU reconfiguration
// Use alias registers to quickly load MPU
ADD r0, r0, #100 // Build address of MPU register start in thread control block
-#ifdef TXM_MODULE_MANAGER_16_MPU
+
LDM r0!,{r2-r9} // Load MPU regions 0-3
STM r1,{r2-r9} // Store MPU regions 0-3
LDM r0!,{r2-r9} // Load MPU regions 4-7
STM r1,{r2-r9} // Store MPU regions 4-7
+#ifdef TXM_MODULE_MANAGER_16_MPU
LDM r0!,{r2-r9} // Load MPU regions 8-11
STM r1,{r2-r9} // Store MPU regions 8-11
LDM r0,{r2-r9} // Load MPU regions 12-15
STM r1,{r2-r9} // Store MPU regions 12-15
-#else
- LDM r0!,{r2-r9} // Load first four MPU regions
- STM r1,{r2-r9} // Store first four MPU regions
- LDM r0,{r2-r9} // Load second four MPU regions
- STM r1,{r2-r9} // Store second four MPU regions
#endif
+_tx_enable_mpu:
LDR r0, =0xE000ED94 // Build MPU control reg address
MOV r1, #5 // Build enable value with background region enabled
STR r1, [r0] // Enable MPU
diff --git a/ports_module/cortex_m33/ac6/inc/tx_port.h b/ports_module/cortex_m33/ac6/inc/tx_port.h
index eb4a7f5f..e3547602 100644
--- a/ports_module/cortex_m33/ac6/inc/tx_port.h
+++ b/ports_module/cortex_m33/ac6/inc/tx_port.h
@@ -26,7 +26,7 @@
/* PORT SPECIFIC C INFORMATION RELEASE */
/* */
/* tx_port.h Cortex-M33 */
-/* 6.1.10 */
+/* 6.1.11 */
/* */
/* AUTHOR */
/* */
@@ -61,13 +61,16 @@
/* added symbol to enable */
/* stack error handler, */
/* resulting in version 6.1.7 */
-/* 10-15-2021 Scott Larson Modified comment(s), improved */
+/* 10-15-2021 Scott Larson Modified comment(s), improved */
/* stack check error handling, */
/* resulting in version 6.1.9 */
/* 01-31-2022 Scott Larson Modified comment(s), unified */
/* this file across compilers, */
/* fixed predefined macro, */
/* resulting in version 6.1.10 */
+/* 04-25-2022 Scott Larson Modified comments and added */
+/* volatile to registers, */
+/* resulting in version 6.1.11 */
/* */
/**************************************************************************/
@@ -196,14 +199,14 @@ UINT _tx_thread_secure_stack_free(struct TX_THREAD_STRUCT *tx_thread);
For example, if the time source is at the address 0x0a800024 and is 16-bits in size, the clock
source constants would be:
-#define TX_TRACE_TIME_SOURCE *((ULONG *) 0x0a800024)
+#define TX_TRACE_TIME_SOURCE *((volatile ULONG *) 0x0a800024)
#define TX_TRACE_TIME_MASK 0x0000FFFFUL
*/
#ifndef TX_MISRA_ENABLE
#ifndef TX_TRACE_TIME_SOURCE
-#define TX_TRACE_TIME_SOURCE *((ULONG *) 0xE0001004)
+#define TX_TRACE_TIME_SOURCE *((volatile ULONG *) 0xE0001004)
#endif
#else
ULONG _tx_misra_time_stamp_get(VOID);
@@ -429,9 +432,9 @@ __attribute__( ( always_inline ) ) static inline void _tx_control_set(ULONG cont
#define TX_THREAD_COMPLETED_EXTENSION(thread_ptr) { \
ULONG _tx_vfp_state; \
- _tx_vfp_state = _tx_control_get(); \
+ _tx_vfp_state = _tx_control_get(); \
_tx_vfp_state = _tx_vfp_state & ~((ULONG) 0x4); \
- _tx_control_set(_tx_vfp_state); \
+ _tx_control_set(_tx_vfp_state); \
}
#else
@@ -456,26 +459,26 @@ __attribute__( ( always_inline ) ) static inline void _tx_control_set(ULONG cont
if ((_tx_system_state == ((ULONG) 0)) && ((thread_ptr) == _tx_thread_current_ptr)) \
{ \
ULONG _tx_vfp_state; \
- _tx_vfp_state = _tx_control_get(); \
+ _tx_vfp_state = _tx_control_get(); \
_tx_vfp_state = _tx_vfp_state & ~((ULONG) 0x4); \
- _tx_control_set(_tx_vfp_state); \
+ _tx_control_set(_tx_vfp_state); \
} \
else \
{ \
ULONG _tx_fpccr; \
- _tx_fpccr = *((ULONG *) 0xE000EF34); \
+ _tx_fpccr = *((volatile ULONG *) 0xE000EF34); \
_tx_fpccr = _tx_fpccr & ((ULONG) 0x01); \
if (_tx_fpccr == ((ULONG) 0x01)) \
{ \
ULONG _tx_vfp_state; \
- _tx_vfp_state = _tx_control_get(); \
+ _tx_vfp_state = _tx_control_get(); \
_tx_vfp_state = _tx_vfp_state & ((ULONG) 0x4); \
TX_VFP_TOUCH(); \
if (_tx_vfp_state == ((ULONG) 0)) \
{ \
- _tx_vfp_state = _tx_control_get(); \
+ _tx_vfp_state = _tx_control_get(); \
_tx_vfp_state = _tx_vfp_state & ~((ULONG) 0x4); \
- _tx_control_set(_tx_vfp_state); \
+ _tx_control_set(_tx_vfp_state); \
} \
} \
} \
@@ -657,7 +660,7 @@ __attribute__( ( always_inline ) ) static inline void _tx_thread_system_return_i
UINT interrupt_save;
/* Set PendSV to invoke ThreadX scheduler. */
- *((ULONG *) 0xE000ED04) = ((ULONG) 0x10000000);
+ *((volatile ULONG *) 0xE000ED04) = ((ULONG) 0x10000000);
if (_tx_ipsr_get() == 0)
{
interrupt_save = __get_interrupt_posture();
diff --git a/ports_module/cortex_m33/ac6/module_manager/src/tx_thread_schedule.S b/ports_module/cortex_m33/ac6/module_manager/src/tx_thread_schedule.S
index 8ef9e2cb..c196f75a 100644
--- a/ports_module/cortex_m33/ac6/module_manager/src/tx_thread_schedule.S
+++ b/ports_module/cortex_m33/ac6/module_manager/src/tx_thread_schedule.S
@@ -29,8 +29,8 @@
/* */
/* FUNCTION RELEASE */
/* */
-/* _tx_thread_schedule Cortex-M33/MPU/AC6 */
-/* 6.1.7 */
+/* _tx_thread_schedule Cortex-M33/AC6 */
+/* 6.1.11 */
/* AUTHOR */
/* */
/* Scott Larson, Microsoft Corporation */
@@ -69,6 +69,9 @@
/* 06-02-2021 Scott Larson Fixed extended stack handling */
/* when calling kernel APIs, */
/* resulting in version 6.1.7 */
+/* 04-25-2022 Scott Larson Optimized MPU configuration, */
+/* added BASEPRI support, */
+/* resulting in version 6.1.11 */
/* */
/**************************************************************************/
// VOID _tx_thread_schedule(VOID)
@@ -128,7 +131,12 @@ MemManage_Handler:
.thumb_func
BusFault_Handler:
+#ifdef TX_PORT_USE_BASEPRI
+ LDR r1, =TX_PORT_BASEPRI // Mask interrupt priorities =< TX_PORT_BASEPRI
+ MSR BASEPRI, r1
+#else
CPSID i // Disable interrupts
+#endif /* TX_PORT_USE_BASEPRI */
/* Now pickup and store all the fault related information. */
@@ -195,7 +203,7 @@ BusFault_Handler:
BL _txm_module_manager_memory_fault_handler // Call memory manager fault handler
-#ifdef TX_ENABLE_EXECUTION_CHANGE_NOTIFY
+#if (defined(TX_ENABLE_EXECUTION_CHANGE_NOTIFY) || defined(TX_EXECUTION_PROFILE_ENABLE))
/* Call the thread exit function to indicate the thread is no longer executing. */
CPSID i // Disable interrupts
BL _tx_execution_thread_exit // Call the thread exit function
@@ -211,7 +219,12 @@ BusFault_Handler:
LDR r1, =0x10000000 // Set PENDSVSET bit
STR r1, [r0] // Store ICSR
DSB // Wait for memory access to complete
+#ifdef TX_PORT_USE_BASEPRI
+ MOV r0, 0 // Disable BASEPRI masking (enable interrupts)
+ MSR BASEPRI, r0
+#else
CPSIE i // Enable interrupts
+#endif
#ifdef TX_SINGLE_MODE_SECURE
LDR lr, =0xFFFFFFFD // Exception return to secure
#else
@@ -235,12 +248,22 @@ __tx_ts_handler:
#if (defined(TX_ENABLE_EXECUTION_CHANGE_NOTIFY) || defined(TX_EXECUTION_PROFILE_ENABLE))
/* Call the thread exit function to indicate the thread is no longer executing. */
+#ifdef TX_PORT_USE_BASEPRI
+ LDR r1, =TX_PORT_BASEPRI // Mask interrupt priorities =< TX_PORT_BASEPRI
+ MSR BASEPRI, r1
+#else
CPSID i // Disable interrupts
+#endif /* TX_PORT_USE_BASEPRI */
PUSH {r0, lr} // Save LR (and r0 just for alignment)
BL _tx_execution_thread_exit // Call the thread exit function
POP {r0, lr} // Recover LR
+#ifdef TX_PORT_USE_BASEPRI
+ MOV r0, 0 // Disable BASEPRI masking (enable interrupts)
+ MSR BASEPRI, r0
+#else
CPSIE i // Enable interrupts
-#endif
+#endif /* TX_PORT_USE_BASEPRI */
+#endif /* EXECUTION PROFILE */
LDR r0, =_tx_thread_current_ptr // Build current thread pointer address
LDR r2, =_tx_thread_execute_ptr // Build execute thread pointer address
@@ -296,7 +319,12 @@ __tx_ts_new:
/* Now we are looking for a new thread to execute! */
+#ifdef TX_PORT_USE_BASEPRI
+ LDR r1, =TX_PORT_BASEPRI // Mask interrupt priorities =< TX_PORT_BASEPRI
+ MSR BASEPRI, r1
+#else
CPSID i // Disable interrupts
+#endif
LDR r1, [r2] // Is there another thread ready to execute?
CBNZ r1, __tx_ts_restore // Yes, schedule it
@@ -305,7 +333,12 @@ __tx_ts_new:
are disabled to allow use of WFI for waiting for a thread to arrive. */
__tx_ts_wait:
+#ifdef TX_PORT_USE_BASEPRI
+ LDR r1, =TX_PORT_BASEPRI // Mask interrupt priorities =< TX_PORT_BASEPRI
+ MSR BASEPRI, r1
+#else
CPSID i // Disable interrupts
+#endif
LDR r1, [r2] // Pickup the next thread to execute pointer
CBNZ r1, __tx_ts_ready // If non-NULL, a new thread is ready!
#ifdef TX_ENABLE_WFI
@@ -313,7 +346,12 @@ __tx_ts_wait:
WFI // Wait for interrupt
ISB // Ensure pipeline is flushed
#endif
+#ifdef TX_PORT_USE_BASEPRI
+ MOV r4, #0 // Disable BASEPRI masking (enable interrupts)
+ MSR BASEPRI, r4
+#else
CPSIE i // Enable interrupts
+#endif
B __tx_ts_wait // Loop to continue waiting
/* At this point, we have a new thread ready to go. Clear any newly pended PendSV - since we are
@@ -330,7 +368,12 @@ __tx_ts_restore:
and enable interrupts. */
STR r1, [r0] // Setup the current thread pointer to the new thread
+#ifdef TX_PORT_USE_BASEPRI
+ MOV r4, #0 // Disable BASEPRI masking (enable interrupts)
+ MSR BASEPRI, r4
+#else
CPSIE i // Enable interrupts
+#endif
/* Increment the thread run count. */
@@ -378,9 +421,17 @@ _skip_secure_restore:
STR r3, [r0] // Disable MPU
LDR r0, [r1, #0x90] // Pickup the module instance pointer
CBZ r0, skip_mpu_setup // Is this thread owned by a module? No, skip MPU setup
- LDR r1, [r0, #0x64] // Pickup MPU register[0]
- CBZ r1, skip_mpu_setup // Is protection required for this module? No, skip MPU setup
- LDR r1, =0xE000ED9C // Build address of MPU base register
+ LDR r2, [r0, #0x74] // Pickup MPU address of data region
+ CBZ r2, skip_mpu_setup // Is protection required for this module? No, skip MPU setup
+
+ // Is the MPU already set up for this module?
+ MOV r1, #2 // Select MPU region 2
+ LDR r3, =0xE000ED98 // MPU_RNR register address
+ STR r1, [r3] // Set region to 2
+ LDR r1, =0xE000ED9C // MPU_RBAR register address
+ LDR r3, [r1] // Load address stored in MPU region 2
+ CMP r2, r3 // Is module already loaded?
+ BEQ _tx_enable_mpu // Yes - skip MPU reconfiguration
// Use alias registers to quickly load MPU
LDR r2, =0xE000ED98 // Get region register
@@ -393,6 +444,7 @@ _skip_secure_restore:
STR r2, [r3] // Set region to 4
LDM r0, {r2-r9} // Load second four MPU regions
STM r1, {r2-r9} // Store second four MPU regions
+_tx_enable_mpu:
LDR r0, =0xE000ED94 // Build MPU control reg address
MOV r1, #5 // Build enable value with background region enabled
STR r1, [r0] // Enable MPU
diff --git a/ports_module/cortex_m33/gnu/inc/tx_port.h b/ports_module/cortex_m33/gnu/inc/tx_port.h
index eb4a7f5f..e3547602 100644
--- a/ports_module/cortex_m33/gnu/inc/tx_port.h
+++ b/ports_module/cortex_m33/gnu/inc/tx_port.h
@@ -26,7 +26,7 @@
/* PORT SPECIFIC C INFORMATION RELEASE */
/* */
/* tx_port.h Cortex-M33 */
-/* 6.1.10 */
+/* 6.1.11 */
/* */
/* AUTHOR */
/* */
@@ -61,13 +61,16 @@
/* added symbol to enable */
/* stack error handler, */
/* resulting in version 6.1.7 */
-/* 10-15-2021 Scott Larson Modified comment(s), improved */
+/* 10-15-2021 Scott Larson Modified comment(s), improved */
/* stack check error handling, */
/* resulting in version 6.1.9 */
/* 01-31-2022 Scott Larson Modified comment(s), unified */
/* this file across compilers, */
/* fixed predefined macro, */
/* resulting in version 6.1.10 */
+/* 04-25-2022 Scott Larson Modified comments and added */
+/* volatile to registers, */
+/* resulting in version 6.1.11 */
/* */
/**************************************************************************/
@@ -196,14 +199,14 @@ UINT _tx_thread_secure_stack_free(struct TX_THREAD_STRUCT *tx_thread);
For example, if the time source is at the address 0x0a800024 and is 16-bits in size, the clock
source constants would be:
-#define TX_TRACE_TIME_SOURCE *((ULONG *) 0x0a800024)
+#define TX_TRACE_TIME_SOURCE *((volatile ULONG *) 0x0a800024)
#define TX_TRACE_TIME_MASK 0x0000FFFFUL
*/
#ifndef TX_MISRA_ENABLE
#ifndef TX_TRACE_TIME_SOURCE
-#define TX_TRACE_TIME_SOURCE *((ULONG *) 0xE0001004)
+#define TX_TRACE_TIME_SOURCE *((volatile ULONG *) 0xE0001004)
#endif
#else
ULONG _tx_misra_time_stamp_get(VOID);
@@ -429,9 +432,9 @@ __attribute__( ( always_inline ) ) static inline void _tx_control_set(ULONG cont
#define TX_THREAD_COMPLETED_EXTENSION(thread_ptr) { \
ULONG _tx_vfp_state; \
- _tx_vfp_state = _tx_control_get(); \
+ _tx_vfp_state = _tx_control_get(); \
_tx_vfp_state = _tx_vfp_state & ~((ULONG) 0x4); \
- _tx_control_set(_tx_vfp_state); \
+ _tx_control_set(_tx_vfp_state); \
}
#else
@@ -456,26 +459,26 @@ __attribute__( ( always_inline ) ) static inline void _tx_control_set(ULONG cont
if ((_tx_system_state == ((ULONG) 0)) && ((thread_ptr) == _tx_thread_current_ptr)) \
{ \
ULONG _tx_vfp_state; \
- _tx_vfp_state = _tx_control_get(); \
+ _tx_vfp_state = _tx_control_get(); \
_tx_vfp_state = _tx_vfp_state & ~((ULONG) 0x4); \
- _tx_control_set(_tx_vfp_state); \
+ _tx_control_set(_tx_vfp_state); \
} \
else \
{ \
ULONG _tx_fpccr; \
- _tx_fpccr = *((ULONG *) 0xE000EF34); \
+ _tx_fpccr = *((volatile ULONG *) 0xE000EF34); \
_tx_fpccr = _tx_fpccr & ((ULONG) 0x01); \
if (_tx_fpccr == ((ULONG) 0x01)) \
{ \
ULONG _tx_vfp_state; \
- _tx_vfp_state = _tx_control_get(); \
+ _tx_vfp_state = _tx_control_get(); \
_tx_vfp_state = _tx_vfp_state & ((ULONG) 0x4); \
TX_VFP_TOUCH(); \
if (_tx_vfp_state == ((ULONG) 0)) \
{ \
- _tx_vfp_state = _tx_control_get(); \
+ _tx_vfp_state = _tx_control_get(); \
_tx_vfp_state = _tx_vfp_state & ~((ULONG) 0x4); \
- _tx_control_set(_tx_vfp_state); \
+ _tx_control_set(_tx_vfp_state); \
} \
} \
} \
@@ -657,7 +660,7 @@ __attribute__( ( always_inline ) ) static inline void _tx_thread_system_return_i
UINT interrupt_save;
/* Set PendSV to invoke ThreadX scheduler. */
- *((ULONG *) 0xE000ED04) = ((ULONG) 0x10000000);
+ *((volatile ULONG *) 0xE000ED04) = ((ULONG) 0x10000000);
if (_tx_ipsr_get() == 0)
{
interrupt_save = __get_interrupt_posture();
diff --git a/ports_module/cortex_m33/gnu/module_manager/src/tx_thread_schedule.S b/ports_module/cortex_m33/gnu/module_manager/src/tx_thread_schedule.S
index 034f8b17..ff454e9c 100644
--- a/ports_module/cortex_m33/gnu/module_manager/src/tx_thread_schedule.S
+++ b/ports_module/cortex_m33/gnu/module_manager/src/tx_thread_schedule.S
@@ -28,8 +28,8 @@
/* */
/* FUNCTION RELEASE */
/* */
-/* _tx_thread_schedule Cortex-M33/MPU/GNU */
-/* 6.1.10 */
+/* _tx_thread_schedule Cortex-M33/GNU */
+/* 6.1.11 */
/* AUTHOR */
/* */
/* Scott Larson, Microsoft Corporation */
@@ -70,6 +70,9 @@
/* resulting in version 6.1.7 */
/* 01-31-2022 Scott Larson Fixed predefined macro name, */
/* resulting in version 6.1.10 */
+/* 04-25-2022 Scott Larson Optimized MPU configuration, */
+/* added BASEPRI support, */
+/* resulting in version 6.1.11 */
/* */
/**************************************************************************/
// VOID _tx_thread_schedule(VOID)
@@ -129,7 +132,12 @@ MemManage_Handler:
.thumb_func
BusFault_Handler:
+#ifdef TX_PORT_USE_BASEPRI
+ LDR r1, =TX_PORT_BASEPRI // Mask interrupt priorities =< TX_PORT_BASEPRI
+ MSR BASEPRI, r1
+#else
CPSID i // Disable interrupts
+#endif /* TX_PORT_USE_BASEPRI */
/* Now pickup and store all the fault related information. */
@@ -196,7 +204,7 @@ BusFault_Handler:
BL _txm_module_manager_memory_fault_handler // Call memory manager fault handler
-#ifdef TX_ENABLE_EXECUTION_CHANGE_NOTIFY
+#if (defined(TX_ENABLE_EXECUTION_CHANGE_NOTIFY) || defined(TX_EXECUTION_PROFILE_ENABLE))
/* Call the thread exit function to indicate the thread is no longer executing. */
CPSID i // Disable interrupts
BL _tx_execution_thread_exit // Call the thread exit function
@@ -212,7 +220,12 @@ BusFault_Handler:
LDR r1, =0x10000000 // Set PENDSVSET bit
STR r1, [r0] // Store ICSR
DSB // Wait for memory access to complete
+#ifdef TX_PORT_USE_BASEPRI
+ MOV r0, 0 // Disable BASEPRI masking (enable interrupts)
+ MSR BASEPRI, r0
+#else
CPSIE i // Enable interrupts
+#endif
#ifdef TX_SINGLE_MODE_SECURE
LDR lr, =0xFFFFFFFD // Exception return to secure
#else
@@ -236,12 +249,22 @@ __tx_ts_handler:
#if (defined(TX_ENABLE_EXECUTION_CHANGE_NOTIFY) || defined(TX_EXECUTION_PROFILE_ENABLE))
/* Call the thread exit function to indicate the thread is no longer executing. */
+#ifdef TX_PORT_USE_BASEPRI
+ LDR r1, =TX_PORT_BASEPRI // Mask interrupt priorities =< TX_PORT_BASEPRI
+ MSR BASEPRI, r1
+#else
CPSID i // Disable interrupts
+#endif /* TX_PORT_USE_BASEPRI */
PUSH {r0, lr} // Save LR (and r0 just for alignment)
BL _tx_execution_thread_exit // Call the thread exit function
POP {r0, lr} // Recover LR
+#ifdef TX_PORT_USE_BASEPRI
+ MOV r0, 0 // Disable BASEPRI masking (enable interrupts)
+ MSR BASEPRI, r0
+#else
CPSIE i // Enable interrupts
-#endif
+#endif /* TX_PORT_USE_BASEPRI */
+#endif /* EXECUTION PROFILE */
LDR r0, =_tx_thread_current_ptr // Build current thread pointer address
LDR r2, =_tx_thread_execute_ptr // Build execute thread pointer address
@@ -297,7 +320,12 @@ __tx_ts_new:
/* Now we are looking for a new thread to execute! */
+#ifdef TX_PORT_USE_BASEPRI
+ LDR r1, =TX_PORT_BASEPRI // Mask interrupt priorities =< TX_PORT_BASEPRI
+ MSR BASEPRI, r1
+#else
CPSID i // Disable interrupts
+#endif
LDR r1, [r2] // Is there another thread ready to execute?
CBNZ r1, __tx_ts_restore // Yes, schedule it
@@ -306,7 +334,12 @@ __tx_ts_new:
are disabled to allow use of WFI for waiting for a thread to arrive. */
__tx_ts_wait:
+#ifdef TX_PORT_USE_BASEPRI
+ LDR r1, =TX_PORT_BASEPRI // Mask interrupt priorities =< TX_PORT_BASEPRI
+ MSR BASEPRI, r1
+#else
CPSID i // Disable interrupts
+#endif
LDR r1, [r2] // Pickup the next thread to execute pointer
CBNZ r1, __tx_ts_ready // If non-NULL, a new thread is ready!
#ifdef TX_ENABLE_WFI
@@ -314,7 +347,12 @@ __tx_ts_wait:
WFI // Wait for interrupt
ISB // Ensure pipeline is flushed
#endif
+#ifdef TX_PORT_USE_BASEPRI
+ MOV r4, #0 // Disable BASEPRI masking (enable interrupts)
+ MSR BASEPRI, r4
+#else
CPSIE i // Enable interrupts
+#endif
B __tx_ts_wait // Loop to continue waiting
/* At this point, we have a new thread ready to go. Clear any newly pended PendSV - since we are
@@ -331,7 +369,12 @@ __tx_ts_restore:
and enable interrupts. */
STR r1, [r0] // Setup the current thread pointer to the new thread
+#ifdef TX_PORT_USE_BASEPRI
+ MOV r4, #0 // Disable BASEPRI masking (enable interrupts)
+ MSR BASEPRI, r4
+#else
CPSIE i // Enable interrupts
+#endif
/* Increment the thread run count. */
@@ -379,9 +422,17 @@ _skip_secure_restore:
STR r3, [r0] // Disable MPU
LDR r0, [r1, #0x90] // Pickup the module instance pointer
CBZ r0, skip_mpu_setup // Is this thread owned by a module? No, skip MPU setup
- LDR r1, [r0, #0x64] // Pickup MPU register[0]
- CBZ r1, skip_mpu_setup // Is protection required for this module? No, skip MPU setup
- LDR r1, =0xE000ED9C // Build address of MPU base register
+ LDR r2, [r0, #0x74] // Pickup MPU address of data region
+ CBZ r2, skip_mpu_setup // Is protection required for this module? No, skip MPU setup
+
+ // Is the MPU already set up for this module?
+ MOV r1, #2 // Select MPU region 2
+ LDR r3, =0xE000ED98 // MPU_RNR register address
+ STR r1, [r3] // Set region to 2
+ LDR r1, =0xE000ED9C // MPU_RBAR register address
+ LDR r3, [r1] // Load address stored in MPU region 2
+ CMP r2, r3 // Is module already loaded?
+ BEQ _tx_enable_mpu // Yes - skip MPU reconfiguration
// Use alias registers to quickly load MPU
LDR r2, =0xE000ED98 // Get region register
@@ -394,6 +445,7 @@ _skip_secure_restore:
STR r2, [r3] // Set region to 4
LDM r0, {r2-r9} // Load second four MPU regions
STM r1, {r2-r9} // Store second four MPU regions
+_tx_enable_mpu:
LDR r0, =0xE000ED94 // Build MPU control reg address
MOV r1, #5 // Build enable value with background region enabled
STR r1, [r0] // Enable MPU
diff --git a/ports_module/cortex_m33/iar/inc/tx_port.h b/ports_module/cortex_m33/iar/inc/tx_port.h
index eb4a7f5f..e3547602 100644
--- a/ports_module/cortex_m33/iar/inc/tx_port.h
+++ b/ports_module/cortex_m33/iar/inc/tx_port.h
@@ -26,7 +26,7 @@
/* PORT SPECIFIC C INFORMATION RELEASE */
/* */
/* tx_port.h Cortex-M33 */
-/* 6.1.10 */
+/* 6.1.11 */
/* */
/* AUTHOR */
/* */
@@ -61,13 +61,16 @@
/* added symbol to enable */
/* stack error handler, */
/* resulting in version 6.1.7 */
-/* 10-15-2021 Scott Larson Modified comment(s), improved */
+/* 10-15-2021 Scott Larson Modified comment(s), improved */
/* stack check error handling, */
/* resulting in version 6.1.9 */
/* 01-31-2022 Scott Larson Modified comment(s), unified */
/* this file across compilers, */
/* fixed predefined macro, */
/* resulting in version 6.1.10 */
+/* 04-25-2022 Scott Larson Modified comments and added */
+/* volatile to registers, */
+/* resulting in version 6.1.11 */
/* */
/**************************************************************************/
@@ -196,14 +199,14 @@ UINT _tx_thread_secure_stack_free(struct TX_THREAD_STRUCT *tx_thread);
For example, if the time source is at the address 0x0a800024 and is 16-bits in size, the clock
source constants would be:
-#define TX_TRACE_TIME_SOURCE *((ULONG *) 0x0a800024)
+#define TX_TRACE_TIME_SOURCE *((volatile ULONG *) 0x0a800024)
#define TX_TRACE_TIME_MASK 0x0000FFFFUL
*/
#ifndef TX_MISRA_ENABLE
#ifndef TX_TRACE_TIME_SOURCE
-#define TX_TRACE_TIME_SOURCE *((ULONG *) 0xE0001004)
+#define TX_TRACE_TIME_SOURCE *((volatile ULONG *) 0xE0001004)
#endif
#else
ULONG _tx_misra_time_stamp_get(VOID);
@@ -429,9 +432,9 @@ __attribute__( ( always_inline ) ) static inline void _tx_control_set(ULONG cont
#define TX_THREAD_COMPLETED_EXTENSION(thread_ptr) { \
ULONG _tx_vfp_state; \
- _tx_vfp_state = _tx_control_get(); \
+ _tx_vfp_state = _tx_control_get(); \
_tx_vfp_state = _tx_vfp_state & ~((ULONG) 0x4); \
- _tx_control_set(_tx_vfp_state); \
+ _tx_control_set(_tx_vfp_state); \
}
#else
@@ -456,26 +459,26 @@ __attribute__( ( always_inline ) ) static inline void _tx_control_set(ULONG cont
if ((_tx_system_state == ((ULONG) 0)) && ((thread_ptr) == _tx_thread_current_ptr)) \
{ \
ULONG _tx_vfp_state; \
- _tx_vfp_state = _tx_control_get(); \
+ _tx_vfp_state = _tx_control_get(); \
_tx_vfp_state = _tx_vfp_state & ~((ULONG) 0x4); \
- _tx_control_set(_tx_vfp_state); \
+ _tx_control_set(_tx_vfp_state); \
} \
else \
{ \
ULONG _tx_fpccr; \
- _tx_fpccr = *((ULONG *) 0xE000EF34); \
+ _tx_fpccr = *((volatile ULONG *) 0xE000EF34); \
_tx_fpccr = _tx_fpccr & ((ULONG) 0x01); \
if (_tx_fpccr == ((ULONG) 0x01)) \
{ \
ULONG _tx_vfp_state; \
- _tx_vfp_state = _tx_control_get(); \
+ _tx_vfp_state = _tx_control_get(); \
_tx_vfp_state = _tx_vfp_state & ((ULONG) 0x4); \
TX_VFP_TOUCH(); \
if (_tx_vfp_state == ((ULONG) 0)) \
{ \
- _tx_vfp_state = _tx_control_get(); \
+ _tx_vfp_state = _tx_control_get(); \
_tx_vfp_state = _tx_vfp_state & ~((ULONG) 0x4); \
- _tx_control_set(_tx_vfp_state); \
+ _tx_control_set(_tx_vfp_state); \
} \
} \
} \
@@ -657,7 +660,7 @@ __attribute__( ( always_inline ) ) static inline void _tx_thread_system_return_i
UINT interrupt_save;
/* Set PendSV to invoke ThreadX scheduler. */
- *((ULONG *) 0xE000ED04) = ((ULONG) 0x10000000);
+ *((volatile ULONG *) 0xE000ED04) = ((ULONG) 0x10000000);
if (_tx_ipsr_get() == 0)
{
interrupt_save = __get_interrupt_posture();
diff --git a/ports_module/cortex_m33/iar/module_manager/src/tx_thread_schedule.s b/ports_module/cortex_m33/iar/module_manager/src/tx_thread_schedule.s
index a584c2b7..e4d4bfd3 100644
--- a/ports_module/cortex_m33/iar/module_manager/src/tx_thread_schedule.s
+++ b/ports_module/cortex_m33/iar/module_manager/src/tx_thread_schedule.s
@@ -42,7 +42,7 @@
/* FUNCTION RELEASE */
/* */
/* _tx_thread_schedule Cortex-M33/IAR */
-/* 6.1.8 */
+/* 6.1.11 */
/* AUTHOR */
/* */
/* Scott Larson, Microsoft Corporation */
@@ -81,6 +81,9 @@
/* 06-02-2021 Scott Larson Fixed extended stack handling */
/* when calling kernel APIs, */
/* resulting in version 6.1.7 */
+/* 04-25-2022 Scott Larson Optimized MPU configuration, */
+/* added BASEPRI support, */
+/* resulting in version 6.1.11 */
/* */
/**************************************************************************/
// VOID _tx_thread_schedule(VOID)
@@ -132,7 +135,12 @@ __tx_wait_here:
MemManage_Handler:
BusFault_Handler:
+#ifdef TX_PORT_USE_BASEPRI
+ LDR r1, =TX_PORT_BASEPRI // Mask interrupt priorities =< TX_PORT_BASEPRI
+ MSR BASEPRI, r1
+#else
CPSID i // Disable interrupts
+#endif /* TX_PORT_USE_BASEPRI */
/* Now pickup and store all the fault related information. */
@@ -199,7 +207,7 @@ BusFault_Handler:
BL _txm_module_manager_memory_fault_handler // Call memory manager fault handler
-#ifdef TX_ENABLE_EXECUTION_CHANGE_NOTIFY
+#if (defined(TX_ENABLE_EXECUTION_CHANGE_NOTIFY) || defined(TX_EXECUTION_PROFILE_ENABLE))
/* Call the thread exit function to indicate the thread is no longer executing. */
CPSID i // Disable interrupts
BL _tx_execution_thread_exit // Call the thread exit function
@@ -215,7 +223,12 @@ BusFault_Handler:
LDR r1, =0x10000000 // Set PENDSVSET bit
STR r1, [r0] // Store ICSR
DSB // Wait for memory access to complete
+#ifdef TX_PORT_USE_BASEPRI
+ MOV r0, 0 // Disable BASEPRI masking (enable interrupts)
+ MSR BASEPRI, r0
+#else
CPSIE i // Enable interrupts
+#endif
#ifdef TX_SINGLE_MODE_SECURE
LDR lr, =0xFFFFFFFD // Exception return to secure
#else
@@ -232,12 +245,22 @@ __tx_ts_handler:
#if (defined(TX_ENABLE_EXECUTION_CHANGE_NOTIFY) || defined(TX_EXECUTION_PROFILE_ENABLE))
/* Call the thread exit function to indicate the thread is no longer executing. */
+#ifdef TX_PORT_USE_BASEPRI
+ LDR r1, =TX_PORT_BASEPRI // Mask interrupt priorities =< TX_PORT_BASEPRI
+ MSR BASEPRI, r1
+#else
CPSID i // Disable interrupts
+#endif /* TX_PORT_USE_BASEPRI */
PUSH {r0, lr} // Save LR (and r0 just for alignment)
BL _tx_execution_thread_exit // Call the thread exit function
POP {r0, lr} // Recover LR
+#ifdef TX_PORT_USE_BASEPRI
+ MOV r0, 0 // Disable BASEPRI masking (enable interrupts)
+ MSR BASEPRI, r0
+#else
CPSIE i // Enable interrupts
-#endif
+#endif /* TX_PORT_USE_BASEPRI */
+#endif /* EXECUTION PROFILE */
LDR r0, =_tx_thread_current_ptr // Build current thread pointer address
LDR r2, =_tx_thread_execute_ptr // Build execute thread pointer address
@@ -293,7 +316,12 @@ __tx_ts_new:
/* Now we are looking for a new thread to execute! */
+#ifdef TX_PORT_USE_BASEPRI
+ LDR r1, =TX_PORT_BASEPRI // Mask interrupt priorities =< TX_PORT_BASEPRI
+ MSR BASEPRI, r1
+#else
CPSID i // Disable interrupts
+#endif
LDR r1, [r2] // Is there another thread ready to execute?
CBNZ r1, __tx_ts_restore // Yes, schedule it
@@ -302,7 +330,12 @@ __tx_ts_new:
are disabled to allow use of WFI for waiting for a thread to arrive. */
__tx_ts_wait:
+#ifdef TX_PORT_USE_BASEPRI
+ LDR r1, =TX_PORT_BASEPRI // Mask interrupt priorities =< TX_PORT_BASEPRI
+ MSR BASEPRI, r1
+#else
CPSID i // Disable interrupts
+#endif
LDR r1, [r2] // Pickup the next thread to execute pointer
CBNZ r1, __tx_ts_ready // If non-NULL, a new thread is ready!
#ifdef TX_ENABLE_WFI
@@ -310,7 +343,12 @@ __tx_ts_wait:
WFI // Wait for interrupt
ISB // Ensure pipeline is flushed
#endif
+#ifdef TX_PORT_USE_BASEPRI
+ MOV r4, #0 // Disable BASEPRI masking (enable interrupts)
+ MSR BASEPRI, r4
+#else
CPSIE i // Enable interrupts
+#endif
B __tx_ts_wait // Loop to continue waiting
/* At this point, we have a new thread ready to go. Clear any newly pended PendSV - since we are
@@ -327,7 +365,12 @@ __tx_ts_restore:
and enable interrupts. */
STR r1, [r0] // Setup the current thread pointer to the new thread
+#ifdef TX_PORT_USE_BASEPRI
+ MOV r4, #0 // Disable BASEPRI masking (enable interrupts)
+ MSR BASEPRI, r4
+#else
CPSIE i // Enable interrupts
+#endif
/* Increment the thread run count. */
@@ -375,9 +418,17 @@ _skip_secure_restore:
STR r3, [r0] // Disable MPU
LDR r0, [r1, #0x90] // Pickup the module instance pointer
CBZ r0, skip_mpu_setup // Is this thread owned by a module? No, skip MPU setup
- LDR r1, [r0, #0x64] // Pickup MPU register[0]
- CBZ r1, skip_mpu_setup // Is protection required for this module? No, skip MPU setup
- LDR r1, =0xE000ED9C // Build address of MPU base register
+ LDR r2, [r0, #0x74] // Pickup MPU address of data region
+ CBZ r2, skip_mpu_setup // Is protection required for this module? No, skip MPU setup
+
+ // Is the MPU already set up for this module?
+ MOV r1, #2 // Select MPU region 2
+ LDR r3, =0xE000ED98 // MPU_RNR register address
+ STR r1, [r3] // Set region to 2
+ LDR r1, =0xE000ED9C // MPU_RBAR register address
+ LDR r3, [r1] // Load address stored in MPU region 2
+ CMP r2, r3 // Is module already loaded?
+ BEQ _tx_enable_mpu // Yes - skip MPU reconfiguration
// Use alias registers to quickly load MPU
LDR r2, =0xE000ED98 // Get region register
@@ -390,6 +441,7 @@ _skip_secure_restore:
STR r2, [r3] // Set region to 4
LDM r0, {r2-r9} // Load second four MPU regions
STM r1, {r2-r9} // Store second four MPU regions
+_tx_enable_mpu:
LDR r0, =0xE000ED94 // Build MPU control reg address
MOV r1, #5 // Build enable value with background region enabled
STR r1, [r0] // Enable MPU
@@ -412,8 +464,8 @@ _skip_vfp_restore:
SVC_Handler:
TST lr, #0x04 // Determine return stack from EXC_RETURN bit 2
ITE EQ
- MRSEQ r0, MSP // Get MSP
- MRSNE r0, PSP // Get PSP
+ MRSEQ r0, MSP // Get MSP if return stack is MSP
+ MRSNE r0, PSP // Get PSP if return stack is PSP
LDR r1, [r0,#24] // Load saved PC from stack
LDRB r2, [r1,#-2] // Load SVC number
diff --git a/ports_module/cortex_m4/ac5/inc/tx_port.h b/ports_module/cortex_m4/ac5/inc/tx_port.h
index 78434fb6..3aba89f0 100644
--- a/ports_module/cortex_m4/ac5/inc/tx_port.h
+++ b/ports_module/cortex_m4/ac5/inc/tx_port.h
@@ -26,7 +26,7 @@
/* PORT SPECIFIC C INFORMATION RELEASE */
/* */
/* tx_port.h Cortex-M4/AC5 */
-/* 6.1.9 */
+/* 6.1.11 */
/* */
/* AUTHOR */
/* */
@@ -48,6 +48,9 @@
/* DATE NAME DESCRIPTION */
/* */
/* 10-15-2021 Scott Larson Initial Version 6.1.9 */
+/* 04-25-2022 Scott Larson Modified comments and added */
+/* volatile to registers, */
+/* resulting in version 6.1.11 */
/* */
/**************************************************************************/
@@ -123,13 +126,13 @@ typedef unsigned short USHORT;
For example, if the time source is at the address 0x0a800024 and is 16-bits in size, the clock
source constants would be:
-#define TX_TRACE_TIME_SOURCE *((ULONG *) 0x0a800024)
+#define TX_TRACE_TIME_SOURCE *((volatile ULONG *) 0x0a800024)
#define TX_TRACE_TIME_MASK 0x0000FFFFUL
*/
#ifndef TX_TRACE_TIME_SOURCE
-#define TX_TRACE_TIME_SOURCE *((ULONG *) 0xE0001004)
+#define TX_TRACE_TIME_SOURCE *((volatile ULONG *) 0xE0001004)
#endif
#ifndef TX_TRACE_TIME_MASK
#define TX_TRACE_TIME_MASK 0xFFFFFFFFUL
@@ -282,7 +285,7 @@ void _tx_vfp_access(void);
else \
{ \
ULONG _tx_fpccr; \
- _tx_fpccr = *((ULONG *) 0xE000EF34); \
+ _tx_fpccr = *((volatile ULONG *) 0xE000EF34); \
_tx_fpccr = _tx_fpccr & ((ULONG) 0x01); \
if (_tx_fpccr == ((ULONG) 0x01)) \
{ \
@@ -435,7 +438,7 @@ unsigned int was_masked;
/* Set PendSV to invoke ThreadX scheduler. */
- *((ULONG *) 0xE000ED04) = ((ULONG) 0x10000000);
+ *((volatile ULONG *) 0xE000ED04) = ((ULONG) 0x10000000);
if (_ipsr == 0)
{
was_masked = __disable_irq();
@@ -458,7 +461,7 @@ void tx_thread_fpu_disable(void);
#ifdef TX_THREAD_INIT
CHAR _tx_version_id[] =
- "Copyright (c) Microsoft Corporation. All rights reserved. * ThreadX Cortex-M4/AC5 Version 6.1.9 *";
+ "Copyright (c) Microsoft Corporation. All rights reserved. * ThreadX Cortex-M4/AC5 Version 6.1.11 *";
#else
#ifdef TX_MISRA_ENABLE
extern CHAR _tx_version_id[100];
diff --git a/ports_module/cortex_m4/ac5/module_manager/src/tx_thread_schedule.s b/ports_module/cortex_m4/ac5/module_manager/src/tx_thread_schedule.s
index bd2f6f25..11943fcf 100644
--- a/ports_module/cortex_m4/ac5/module_manager/src/tx_thread_schedule.s
+++ b/ports_module/cortex_m4/ac5/module_manager/src/tx_thread_schedule.s
@@ -40,7 +40,7 @@
/* FUNCTION RELEASE */
/* */
/* _tx_thread_schedule Cortex-M4/AC5 */
-/* 6.1.9 */
+/* 6.1.11 */
/* AUTHOR */
/* */
/* Scott Larson, Microsoft Corporation */
@@ -67,13 +67,15 @@
/* */
/* _tx_initialize_kernel_enter ThreadX entry function */
/* _tx_thread_system_return Return to system from thread */
-/* _tx_thread_context_restore Restore thread's context */
/* */
/* RELEASE HISTORY */
/* */
/* DATE NAME DESCRIPTION */
/* */
/* 10-15-2021 Scott Larson Initial Version 6.1.9 */
+/* 04-25-2022 Scott Larson Optimized MPU configuration, */
+/* added BASEPRI support, */
+/* resulting in version 6.1.11 */
/* */
/**************************************************************************/
// VOID _tx_thread_schedule(VOID)
@@ -125,7 +127,12 @@ __tx_wait_here
EXPORT MemManage_Handler
MemManage_Handler
+#ifdef TX_PORT_USE_BASEPRI
+ LDR r1, =TX_PORT_BASEPRI // Mask interrupt priorities =< TX_PORT_BASEPRI
+ MSR BASEPRI, r1
+#else
CPSID i // Disable interrupts
+#endif /* TX_PORT_USE_BASEPRI */
/* Now pickup and store all the fault related information. */
@@ -208,7 +215,12 @@ MemManage_Handler
LDR r1, =0x10000000 // Set PENDSVSET bit
STR r1, [r0] // Store ICSR
DSB // Wait for memory access to complete
+#ifdef TX_PORT_USE_BASEPRI
+ MOV r0, 0 // Disable BASEPRI masking (enable interrupts)
+ MSR BASEPRI, r0
+#else
CPSIE i // Enable interrupts
+#endif
MOV lr, #0xFFFFFFFD // Load exception return code
BX lr // Return from exception
@@ -226,12 +238,22 @@ __tx_ts_handler
#if (defined(TX_ENABLE_EXECUTION_CHANGE_NOTIFY) || defined(TX_EXECUTION_PROFILE_ENABLE))
/* Call the thread exit function to indicate the thread is no longer executing. */
+#ifdef TX_PORT_USE_BASEPRI
+ LDR r1, =TX_PORT_BASEPRI // Mask interrupt priorities =< TX_PORT_BASEPRI
+ MSR BASEPRI, r1
+#else
CPSID i // Disable interrupts
+#endif /* TX_PORT_USE_BASEPRI */
PUSH {r0, lr} // Save LR (and r0 just for alignment)
BL _tx_execution_thread_exit // Call the thread exit function
POP {r0, lr} // Recover LR
+#ifdef TX_PORT_USE_BASEPRI
+ MOV r0, 0 // Disable BASEPRI masking (enable interrupts)
+ MSR BASEPRI, r0
+#else
CPSIE i // Enable interrupts
-#endif
+#endif /* TX_PORT_USE_BASEPRI */
+#endif /* EXECUTION PROFILE */
LDR r0, =_tx_thread_current_ptr // Build current thread pointer address
LDR r2, =_tx_thread_execute_ptr // Build execute thread pointer address
@@ -276,7 +298,12 @@ __tx_ts_new
/* Now we are looking for a new thread to execute! */
+#ifdef TX_PORT_USE_BASEPRI
+ LDR r1, =TX_PORT_BASEPRI // Mask interrupt priorities =< TX_PORT_BASEPRI
+ MSR BASEPRI, r1
+#else
CPSID i // Disable interrupts
+#endif
LDR r1, [r2] // Is there another thread ready to execute?
CBNZ r1, __tx_ts_restore // Yes, schedule it
@@ -285,7 +312,12 @@ __tx_ts_new
are disabled to allow use of WFI for waiting for a thread to arrive. */
__tx_ts_wait
+#ifdef TX_PORT_USE_BASEPRI
+ LDR r1, =TX_PORT_BASEPRI // Mask interrupt priorities =< TX_PORT_BASEPRI
+ MSR BASEPRI, r1
+#else
CPSID i // Disable interrupts
+#endif
LDR r1, [r2] // Pickup the next thread to execute pointer
CBNZ r1, __tx_ts_ready // If non-NULL, a new thread is ready!
#ifdef TX_ENABLE_WFI
@@ -293,7 +325,12 @@ __tx_ts_wait
WFI // Wait for interrupt
ISB // Ensure pipeline is flushed
#endif
+#ifdef TX_PORT_USE_BASEPRI
+ MOV r4, #0 // Disable BASEPRI masking (enable interrupts)
+ MSR BASEPRI, r4
+#else
CPSIE i // Enable interrupts
+#endif
B __tx_ts_wait // Loop to continue waiting
/* At this point, we have a new thread ready to go. Clear any newly pended PendSV - since we are
@@ -310,7 +347,12 @@ __tx_ts_restore
and enable interrupts. */
STR r1, [r0] // Setup the current thread pointer to the new thread
+#ifdef TX_PORT_USE_BASEPRI
+ MOV r4, #0 // Disable BASEPRI masking (enable interrupts)
+ MSR BASEPRI, r4
+#else
CPSIE i // Enable interrupts
+#endif
/* Increment the thread run count. */
@@ -346,27 +388,34 @@ __tx_ts_restore
STR r3, [r0] // Disable MPU
LDR r0, [r1, #0x90] // Pickup the module instance pointer
CBZ r0, skip_mpu_setup // Is this thread owned by a module? No, skip MPU setup
- LDR r1, [r0, #0x64] // Pickup MPU register[0]
- CBZ r1, skip_mpu_setup // Is protection required for this module? No, skip MPU setup
- LDR r1, =0xE000ED9C // Build address of MPU base register
+
+ LDR r2, [r0, #0x8C] // Pickup MPU region 5 address
+ CBZ r2, skip_mpu_setup // Is protection required for this module? No, skip MPU setup
+
+ // Is the MPU already set up for this module?
+ MOV r1, #5 // Select region 5 from MPU
+ LDR r3, =0xE000ED98 // MPU_RNR register address
+ STR r1, [r3] // Set region to 5
+ LDR r1, =0xE000ED9C // MPU_RBAR register address
+ LDR r3, [r1] // Load address stored in MPU region 5
+ BIC r2, r2, #0x10 // Clear VALID bit
+ CMP r2, r3 // Is module already loaded?
+ BEQ _tx_enable_mpu // Yes - skip MPU reconfiguration
// Use alias registers to quickly load MPU
ADD r0, r0, #100 // Build address of MPU register start in thread control block
-#ifdef TXM_MODULE_MANAGER_16_MPU
+
LDM r0!,{r2-r9} // Load MPU regions 0-3
STM r1,{r2-r9} // Store MPU regions 0-3
LDM r0!,{r2-r9} // Load MPU regions 4-7
STM r1,{r2-r9} // Store MPU regions 4-7
+#ifdef TXM_MODULE_MANAGER_16_MPU
LDM r0!,{r2-r9} // Load MPU regions 8-11
STM r1,{r2-r9} // Store MPU regions 8-11
LDM r0,{r2-r9} // Load MPU regions 12-15
STM r1,{r2-r9} // Store MPU regions 12-15
-#else
- LDM r0!,{r2-r9} // Load first four MPU regions
- STM r1,{r2-r9} // Store first four MPU regions
- LDM r0,{r2-r9} // Load second four MPU regions
- STM r1,{r2-r9} // Store second four MPU regions
#endif
+_tx_enable_mpu
LDR r0, =0xE000ED94 // Build MPU control reg address
MOV r1, #5 // Build enable value with background region enabled
STR r1, [r0] // Enable MPU
diff --git a/ports_module/cortex_m4/ac6/inc/tx_port.h b/ports_module/cortex_m4/ac6/inc/tx_port.h
index c4333fc6..5e0f6a91 100644
--- a/ports_module/cortex_m4/ac6/inc/tx_port.h
+++ b/ports_module/cortex_m4/ac6/inc/tx_port.h
@@ -26,7 +26,7 @@
/* PORT SPECIFIC C INFORMATION RELEASE */
/* */
/* tx_port.h Cortex-M4/AC6 */
-/* 6.1.9 */
+/* 6.1.11 */
/* */
/* AUTHOR */
/* */
@@ -48,6 +48,9 @@
/* DATE NAME DESCRIPTION */
/* */
/* 10-15-2021 Scott Larson Initial Version 6.1.9 */
+/* 04-25-2022 Scott Larson Modified comments and added */
+/* volatile to registers, */
+/* resulting in version 6.1.11 */
/* */
/**************************************************************************/
@@ -123,13 +126,13 @@ typedef unsigned short USHORT;
For example, if the time source is at the address 0x0a800024 and is 16-bits in size, the clock
source constants would be:
-#define TX_TRACE_TIME_SOURCE *((ULONG *) 0x0a800024)
+#define TX_TRACE_TIME_SOURCE *((volatile ULONG *) 0x0a800024)
#define TX_TRACE_TIME_MASK 0x0000FFFFUL
*/
#ifndef TX_TRACE_TIME_SOURCE
-#define TX_TRACE_TIME_SOURCE *((ULONG *) 0xE0001004)
+#define TX_TRACE_TIME_SOURCE *((volatile ULONG *) 0xE0001004)
#endif
#ifndef TX_TRACE_TIME_MASK
#define TX_TRACE_TIME_MASK 0xFFFFFFFFUL
@@ -291,7 +294,7 @@ __attribute__( ( always_inline ) ) static inline void __set_control(ULONG contro
else \
{ \
ULONG _tx_fpccr; \
- _tx_fpccr = *((ULONG *) 0xE000EF34); \
+ _tx_fpccr = *((volatile ULONG *) 0xE000EF34); \
_tx_fpccr = _tx_fpccr & ((ULONG) 0x01); \
if (_tx_fpccr == ((ULONG) 0x01)) \
{ \
@@ -467,7 +470,8 @@ __attribute__( ( always_inline ) ) static inline void _tx_thread_system_return_i
{
unsigned int interrupt_save;
- *((ULONG *) 0xE000ED04) = ((ULONG) 0x10000000);
+ /* Set PendSV to invoke ThreadX scheduler. */
+ *((volatile ULONG *) 0xE000ED04) = ((ULONG) 0x10000000);
if (__get_ipsr_value() == 0)
{
interrupt_save = __get_primask_value();
@@ -497,7 +501,7 @@ unsigned int interrupt_save;
#endif
-/* Define FPU extension for the Cortex-M7. Each is assumed to be called in the context of the executing
+/* Define FPU extension for the Cortex-M4. Each is assumed to be called in the context of the executing
thread. These are no longer needed, but are preserved for backward compatibility only. */
void tx_thread_fpu_enable(void);
@@ -508,7 +512,7 @@ void tx_thread_fpu_disable(void);
#ifdef TX_THREAD_INIT
CHAR _tx_version_id[] =
- "Copyright (c) Microsoft Corporation. All rights reserved. * ThreadX Cortex-M4/AC6 Version 6.1.9 *";
+ "Copyright (c) Microsoft Corporation. All rights reserved. * ThreadX Cortex-M4/AC6 Version 6.1.11 *";
#else
extern CHAR _tx_version_id[];
#endif
diff --git a/ports_module/cortex_m4/ac6/module_manager/src/tx_thread_schedule.S b/ports_module/cortex_m4/ac6/module_manager/src/tx_thread_schedule.S
index 724ed802..d56fa398 100644
--- a/ports_module/cortex_m4/ac6/module_manager/src/tx_thread_schedule.S
+++ b/ports_module/cortex_m4/ac6/module_manager/src/tx_thread_schedule.S
@@ -42,7 +42,7 @@
/* FUNCTION RELEASE */
/* */
/* _tx_thread_schedule Cortex-M4/AC6 */
-/* 6.1.9 */
+/* 6.1.11 */
/* AUTHOR */
/* */
/* Scott Larson, Microsoft Corporation */
@@ -69,13 +69,15 @@
/* */
/* _tx_initialize_kernel_enter ThreadX entry function */
/* _tx_thread_system_return Return to system from thread */
-/* _tx_thread_context_restore Restore thread's context */
/* */
/* RELEASE HISTORY */
/* */
/* DATE NAME DESCRIPTION */
/* */
/* 10-15-2021 Scott Larson Initial Version 6.1.9 */
+/* 04-25-2022 Scott Larson Optimized MPU configuration, */
+/* added BASEPRI support, */
+/* resulting in version 6.1.11 */
/* */
/**************************************************************************/
// VOID _tx_thread_schedule(VOID)
@@ -135,7 +137,12 @@ BusFault_Handler:
.thumb_func
UsageFault_Handler:
+#ifdef TX_PORT_USE_BASEPRI
+ LDR r1, =TX_PORT_BASEPRI // Mask interrupt priorities =< TX_PORT_BASEPRI
+ MSR BASEPRI, r1
+#else
CPSID i // Disable interrupts
+#endif /* TX_PORT_USE_BASEPRI */
/* Now pickup and store all the fault related information. */
@@ -197,7 +204,7 @@ UsageFault_Handler:
LDR r0, =0xE000EF34 // Cleanup FPU context: Load FPCCR address
LDR r1, [r0] // Load FPCCR
BIC r1, r1, #1 // Clear the lazy preservation active bit
- STR r1, [r0] // Store the value
+ STR r1, [r0] // Save FPCCR
#endif
BL _txm_module_manager_memory_fault_handler // Call memory manager fault handler
@@ -218,7 +225,12 @@ UsageFault_Handler:
LDR r1, =0x10000000 // Set PENDSVSET bit
STR r1, [r0] // Store ICSR
DSB // Wait for memory access to complete
+#ifdef TX_PORT_USE_BASEPRI
+ MOV r0, 0 // Disable BASEPRI masking (enable interrupts)
+ MSR BASEPRI, r0
+#else
CPSIE i // Enable interrupts
+#endif
MOV lr, #0xFFFFFFFD // Load exception return code
BX lr // Return from exception
@@ -239,12 +251,22 @@ __tx_ts_handler:
#if (defined(TX_ENABLE_EXECUTION_CHANGE_NOTIFY) || defined(TX_EXECUTION_PROFILE_ENABLE))
/* Call the thread exit function to indicate the thread is no longer executing. */
+#ifdef TX_PORT_USE_BASEPRI
+ LDR r1, =TX_PORT_BASEPRI // Mask interrupt priorities =< TX_PORT_BASEPRI
+ MSR BASEPRI, r1
+#else
CPSID i // Disable interrupts
+#endif /* TX_PORT_USE_BASEPRI */
PUSH {r0, lr} // Save LR (and r0 just for alignment)
BL _tx_execution_thread_exit // Call the thread exit function
POP {r0, lr} // Recover LR
+#ifdef TX_PORT_USE_BASEPRI
+ MOV r0, 0 // Disable BASEPRI masking (enable interrupts)
+ MSR BASEPRI, r0
+#else
CPSIE i // Enable interrupts
-#endif
+#endif /* TX_PORT_USE_BASEPRI */
+#endif /* EXECUTION PROFILE */
LDR r0, =_tx_thread_current_ptr // Build current thread pointer address
LDR r2, =_tx_thread_execute_ptr // Build execute thread pointer address
@@ -289,7 +311,12 @@ __tx_ts_new:
/* Now we are looking for a new thread to execute! */
+#ifdef TX_PORT_USE_BASEPRI
+ LDR r1, =TX_PORT_BASEPRI // Mask interrupt priorities =< TX_PORT_BASEPRI
+ MSR BASEPRI, r1
+#else
CPSID i // Disable interrupts
+#endif
LDR r1, [r2] // Is there another thread ready to execute?
CBNZ r1, __tx_ts_restore // Yes, schedule it
@@ -298,7 +325,12 @@ __tx_ts_new:
are disabled to allow use of WFI for waiting for a thread to arrive. */
__tx_ts_wait:
+#ifdef TX_PORT_USE_BASEPRI
+ LDR r1, =TX_PORT_BASEPRI // Mask interrupt priorities =< TX_PORT_BASEPRI
+ MSR BASEPRI, r1
+#else
CPSID i // Disable interrupts
+#endif
LDR r1, [r2] // Pickup the next thread to execute pointer
CBNZ r1, __tx_ts_ready // If non-NULL, a new thread is ready!
#ifdef TX_ENABLE_WFI
@@ -306,7 +338,12 @@ __tx_ts_wait:
WFI // Wait for interrupt
ISB // Ensure pipeline is flushed
#endif
+#ifdef TX_PORT_USE_BASEPRI
+ MOV r4, #0 // Disable BASEPRI masking (enable interrupts)
+ MSR BASEPRI, r4
+#else
CPSIE i // Enable interrupts
+#endif
B __tx_ts_wait // Loop to continue waiting
/* At this point, we have a new thread ready to go. Clear any newly pended PendSV - since we are
@@ -323,7 +360,12 @@ __tx_ts_restore:
and enable interrupts. */
STR r1, [r0] // Setup the current thread pointer to the new thread
+#ifdef TX_PORT_USE_BASEPRI
+ MOV r4, #0 // Disable BASEPRI masking (enable interrupts)
+ MSR BASEPRI, r4
+#else
CPSIE i // Enable interrupts
+#endif
/* Increment the thread run count. */
@@ -359,27 +401,34 @@ __tx_ts_restore:
STR r3, [r0] // Disable MPU
LDR r0, [r1, #0x90] // Pickup the module instance pointer
CBZ r0, skip_mpu_setup // Is this thread owned by a module? No, skip MPU setup
- LDR r1, [r0, #0x64] // Pickup MPU register[0]
- CBZ r1, skip_mpu_setup // Is protection required for this module? No, skip MPU setup
- LDR r1, =0xE000ED9C // Build address of MPU base register
+
+ LDR r2, [r0, #0x8C] // Pickup MPU region 5 address
+ CBZ r2, skip_mpu_setup // Is protection required for this module? No, skip MPU setup
+
+ // Is the MPU already set up for this module?
+ MOV r1, #5 // Select region 5 from MPU
+ LDR r3, =0xE000ED98 // MPU_RNR register address
+ STR r1, [r3] // Set region to 5
+ LDR r1, =0xE000ED9C // MPU_RBAR register address
+ LDR r3, [r1] // Load address stored in MPU region 5
+ BIC r2, r2, #0x10 // Clear VALID bit
+ CMP r2, r3 // Is module already loaded?
+ BEQ _tx_enable_mpu // Yes - skip MPU reconfiguration
// Use alias registers to quickly load MPU
ADD r0, r0, #100 // Build address of MPU register start in thread control block
-#ifdef TXM_MODULE_MANAGER_16_MPU
+
LDM r0!,{r2-r9} // Load MPU regions 0-3
STM r1,{r2-r9} // Store MPU regions 0-3
LDM r0!,{r2-r9} // Load MPU regions 4-7
STM r1,{r2-r9} // Store MPU regions 4-7
+#ifdef TXM_MODULE_MANAGER_16_MPU
LDM r0!,{r2-r9} // Load MPU regions 8-11
STM r1,{r2-r9} // Store MPU regions 8-11
LDM r0,{r2-r9} // Load MPU regions 12-15
STM r1,{r2-r9} // Store MPU regions 12-15
-#else
- LDM r0!,{r2-r9} // Load first four MPU regions
- STM r1,{r2-r9} // Store first four MPU regions
- LDM r0,{r2-r9} // Load second four MPU regions
- STM r1,{r2-r9} // Store second four MPU regions
#endif
+_tx_enable_mpu:
LDR r0, =0xE000ED94 // Build MPU control reg address
MOV r1, #5 // Build enable value with background region enabled
STR r1, [r0] // Enable MPU
@@ -538,14 +587,14 @@ _tx_no_lazy_clear:
#endif
/* Copy kernel hardware stack to module thread stack. */
- LDM r3!, {r1-r2}
- STM r0!, {r1-r2}
- LDM r3!, {r1-r2}
- STM r0!, {r1-r2}
- LDM r3!, {r1-r2}
- STM r0!, {r1-r2}
- LDM r3!, {r1-r2}
- STM r0!, {r1-r2}
+ LDM r3!, {r1-r2} // Get r0, r1 from kernel stack
+ STM r0!, {r1-r2} // Insert r0, r1 into thread stack
+ LDM r3!, {r1-r2} // Get r2, r3 from kernel stack
+ STM r0!, {r1-r2} // Insert r2, r3 into thread stack
+ LDM r3!, {r1-r2} // Get r12, lr from kernel stack
+ STM r0!, {r1-r2} // Insert r12, lr into thread stack
+ LDM r3!, {r1-r2} // Get pc, xpsr from kernel stack
+ STM r0!, {r1-r2} // Insert pc, xpsr into thread stack
SUB r0, r0, #32 // Subtract 32 to get back to top of stack
MSR PSP, r0 // Set thread stack pointer
diff --git a/ports_module/cortex_m4/gnu/inc/tx_port.h b/ports_module/cortex_m4/gnu/inc/tx_port.h
index 986d5051..ed3f0984 100644
--- a/ports_module/cortex_m4/gnu/inc/tx_port.h
+++ b/ports_module/cortex_m4/gnu/inc/tx_port.h
@@ -26,7 +26,7 @@
/* PORT SPECIFIC C INFORMATION RELEASE */
/* */
/* tx_port.h Cortex-M4/GNU */
-/* 6.1.9 */
+/* 6.1.11 */
/* */
/* AUTHOR */
/* */
@@ -48,6 +48,9 @@
/* DATE NAME DESCRIPTION */
/* */
/* 10-15-2021 Scott Larson Initial Version 6.1.9 */
+/* 04-25-2022 Scott Larson Modified comments and added */
+/* volatile to registers, */
+/* resulting in version 6.1.11 */
/* */
/**************************************************************************/
@@ -123,13 +126,13 @@ typedef unsigned short USHORT;
For example, if the time source is at the address 0x0a800024 and is 16-bits in size, the clock
source constants would be:
-#define TX_TRACE_TIME_SOURCE *((ULONG *) 0x0a800024)
+#define TX_TRACE_TIME_SOURCE *((volatile ULONG *) 0x0a800024)
#define TX_TRACE_TIME_MASK 0x0000FFFFUL
*/
#ifndef TX_TRACE_TIME_SOURCE
-#define TX_TRACE_TIME_SOURCE *((ULONG *) 0xE0001004)
+#define TX_TRACE_TIME_SOURCE *((volatile ULONG *) 0xE0001004)
#endif
#ifndef TX_TRACE_TIME_MASK
#define TX_TRACE_TIME_MASK 0xFFFFFFFFUL
@@ -291,7 +294,7 @@ __attribute__( ( always_inline ) ) static inline void __set_control(ULONG contro
else \
{ \
ULONG _tx_fpccr; \
- _tx_fpccr = *((ULONG *) 0xE000EF34); \
+ _tx_fpccr = *((volatile ULONG *) 0xE000EF34); \
_tx_fpccr = _tx_fpccr & ((ULONG) 0x01); \
if (_tx_fpccr == ((ULONG) 0x01)) \
{ \
@@ -461,7 +464,8 @@ __attribute__( ( always_inline ) ) static inline void _tx_thread_system_return_i
{
unsigned int interrupt_save;
- *((ULONG *) 0xE000ED04) = ((ULONG) 0x10000000);
+ /* Set PendSV to invoke ThreadX scheduler. */
+ *((volatile ULONG *) 0xE000ED04) = ((ULONG) 0x10000000);
if (__get_ipsr_value() == 0)
{
interrupt_save = __get_primask_value();
@@ -491,8 +495,8 @@ unsigned int interrupt_save;
#endif
-/* Define FPU extension for the Cortex-M7. Each is assumed to be called in the context of the executing
- thread. This is for legacy only, and not needed any longer. */
+/* Define FPU extension for the Cortex-M4. Each is assumed to be called in the context of the executing
+ thread. These are no longer needed, but are preserved for backward compatibility only. */
void tx_thread_fpu_enable(void);
void tx_thread_fpu_disable(void);
@@ -502,7 +506,7 @@ void tx_thread_fpu_disable(void);
#ifdef TX_THREAD_INIT
CHAR _tx_version_id[] =
- "Copyright (c) Microsoft Corporation. All rights reserved. * ThreadX Cortex-M4/GNU Version 6.1.9 *";
+ "Copyright (c) Microsoft Corporation. All rights reserved. * ThreadX Cortex-M4/GNU Version 6.1.11 *";
#else
extern CHAR _tx_version_id[];
#endif
diff --git a/ports_module/cortex_m4/gnu/module_manager/src/tx_thread_schedule.S b/ports_module/cortex_m4/gnu/module_manager/src/tx_thread_schedule.S
index 1377997e..948b153c 100644
--- a/ports_module/cortex_m4/gnu/module_manager/src/tx_thread_schedule.S
+++ b/ports_module/cortex_m4/gnu/module_manager/src/tx_thread_schedule.S
@@ -40,7 +40,7 @@
/* FUNCTION RELEASE */
/* */
/* _tx_thread_schedule Cortex-M4/GNU */
-/* 6.1.10 */
+/* 6.1.11 */
/* AUTHOR */
/* */
/* Scott Larson, Microsoft Corporation */
@@ -67,7 +67,6 @@
/* */
/* _tx_initialize_kernel_enter ThreadX entry function */
/* _tx_thread_system_return Return to system from thread */
-/* _tx_thread_context_restore Restore thread's context */
/* */
/* RELEASE HISTORY */
/* */
@@ -76,6 +75,9 @@
/* 10-15-2021 Scott Larson Initial Version 6.1.9 */
/* 01-31-2022 Scott Larson Fixed predefined macro name, */
/* resulting in version 6.1.10 */
+/* 04-25-2022 Scott Larson Optimized MPU configuration, */
+/* added BASEPRI support, */
+/* resulting in version 6.1.11 */
/* */
/**************************************************************************/
// VOID _tx_thread_schedule(VOID)
@@ -135,7 +137,12 @@ BusFault_Handler:
.thumb_func
UsageFault_Handler:
+#ifdef TX_PORT_USE_BASEPRI
+ LDR r1, =TX_PORT_BASEPRI // Mask interrupt priorities =< TX_PORT_BASEPRI
+ MSR BASEPRI, r1
+#else
CPSID i // Disable interrupts
+#endif /* TX_PORT_USE_BASEPRI */
/* Now pickup and store all the fault related information. */
@@ -197,7 +204,7 @@ UsageFault_Handler:
LDR r0, =0xE000EF34 // Cleanup FPU context: Load FPCCR address
LDR r1, [r0] // Load FPCCR
BIC r1, r1, #1 // Clear the lazy preservation active bit
- STR r1, [r0] // Store the value
+ STR r1, [r0] // Save FPCCR
#endif
BL _txm_module_manager_memory_fault_handler // Call memory manager fault handler
@@ -218,7 +225,12 @@ UsageFault_Handler:
LDR r1, =0x10000000 // Set PENDSVSET bit
STR r1, [r0] // Store ICSR
DSB // Wait for memory access to complete
+#ifdef TX_PORT_USE_BASEPRI
+ MOV r0, 0 // Disable BASEPRI masking (enable interrupts)
+ MSR BASEPRI, r0
+#else
CPSIE i // Enable interrupts
+#endif
MOV lr, #0xFFFFFFFD // Load exception return code
BX lr // Return from exception
@@ -239,12 +251,22 @@ __tx_ts_handler:
#if (defined(TX_ENABLE_EXECUTION_CHANGE_NOTIFY) || defined(TX_EXECUTION_PROFILE_ENABLE))
/* Call the thread exit function to indicate the thread is no longer executing. */
+#ifdef TX_PORT_USE_BASEPRI
+ LDR r1, =TX_PORT_BASEPRI // Mask interrupt priorities =< TX_PORT_BASEPRI
+ MSR BASEPRI, r1
+#else
CPSID i // Disable interrupts
+#endif /* TX_PORT_USE_BASEPRI */
PUSH {r0, lr} // Save LR (and r0 just for alignment)
BL _tx_execution_thread_exit // Call the thread exit function
POP {r0, lr} // Recover LR
+#ifdef TX_PORT_USE_BASEPRI
+ MOV r0, 0 // Disable BASEPRI masking (enable interrupts)
+ MSR BASEPRI, r0
+#else
CPSIE i // Enable interrupts
-#endif
+#endif /* TX_PORT_USE_BASEPRI */
+#endif /* EXECUTION PROFILE */
LDR r0, =_tx_thread_current_ptr // Build current thread pointer address
LDR r2, =_tx_thread_execute_ptr // Build execute thread pointer address
@@ -289,7 +311,12 @@ __tx_ts_new:
/* Now we are looking for a new thread to execute! */
+#ifdef TX_PORT_USE_BASEPRI
+ LDR r1, =TX_PORT_BASEPRI // Mask interrupt priorities =< TX_PORT_BASEPRI
+ MSR BASEPRI, r1
+#else
CPSID i // Disable interrupts
+#endif
LDR r1, [r2] // Is there another thread ready to execute?
CBNZ r1, __tx_ts_restore // Yes, schedule it
@@ -298,7 +325,12 @@ __tx_ts_new:
are disabled to allow use of WFI for waiting for a thread to arrive. */
__tx_ts_wait:
+#ifdef TX_PORT_USE_BASEPRI
+ LDR r1, =TX_PORT_BASEPRI // Mask interrupt priorities =< TX_PORT_BASEPRI
+ MSR BASEPRI, r1
+#else
CPSID i // Disable interrupts
+#endif
LDR r1, [r2] // Pickup the next thread to execute pointer
CBNZ r1, __tx_ts_ready // If non-NULL, a new thread is ready!
#ifdef TX_ENABLE_WFI
@@ -306,7 +338,12 @@ __tx_ts_wait:
WFI // Wait for interrupt
ISB // Ensure pipeline is flushed
#endif
+#ifdef TX_PORT_USE_BASEPRI
+ MOV r4, #0 // Disable BASEPRI masking (enable interrupts)
+ MSR BASEPRI, r4
+#else
CPSIE i // Enable interrupts
+#endif
B __tx_ts_wait // Loop to continue waiting
/* At this point, we have a new thread ready to go. Clear any newly pended PendSV - since we are
@@ -323,7 +360,12 @@ __tx_ts_restore:
and enable interrupts. */
STR r1, [r0] // Setup the current thread pointer to the new thread
+#ifdef TX_PORT_USE_BASEPRI
+ MOV r4, #0 // Disable BASEPRI masking (enable interrupts)
+ MSR BASEPRI, r4
+#else
CPSIE i // Enable interrupts
+#endif
/* Increment the thread run count. */
@@ -359,27 +401,34 @@ __tx_ts_restore:
STR r3, [r0] // Disable MPU
LDR r0, [r1, #0x90] // Pickup the module instance pointer
CBZ r0, skip_mpu_setup // Is this thread owned by a module? No, skip MPU setup
- LDR r1, [r0, #0x64] // Pickup MPU register[0]
- CBZ r1, skip_mpu_setup // Is protection required for this module? No, skip MPU setup
- LDR r1, =0xE000ED9C // Build address of MPU base register
+
+ LDR r2, [r0, #0x8C] // Pickup MPU region 5 address
+ CBZ r2, skip_mpu_setup // Is protection required for this module? No, skip MPU setup
+
+ // Is the MPU already set up for this module?
+ MOV r1, #5 // Select region 5 from MPU
+ LDR r3, =0xE000ED98 // MPU_RNR register address
+ STR r1, [r3] // Set region to 5
+ LDR r1, =0xE000ED9C // MPU_RBAR register address
+ LDR r3, [r1] // Load address stored in MPU region 5
+ BIC r2, r2, #0x10 // Clear VALID bit
+ CMP r2, r3 // Is module already loaded?
+ BEQ _tx_enable_mpu // Yes - skip MPU reconfiguration
// Use alias registers to quickly load MPU
ADD r0, r0, #100 // Build address of MPU register start in thread control block
-#ifdef TXM_MODULE_MANAGER_16_MPU
+
LDM r0!,{r2-r9} // Load MPU regions 0-3
STM r1,{r2-r9} // Store MPU regions 0-3
LDM r0!,{r2-r9} // Load MPU regions 4-7
STM r1,{r2-r9} // Store MPU regions 4-7
+#ifdef TXM_MODULE_MANAGER_16_MPU
LDM r0!,{r2-r9} // Load MPU regions 8-11
STM r1,{r2-r9} // Store MPU regions 8-11
LDM r0,{r2-r9} // Load MPU regions 12-15
STM r1,{r2-r9} // Store MPU regions 12-15
-#else
- LDM r0!,{r2-r9} // Load first four MPU regions
- STM r1,{r2-r9} // Store first four MPU regions
- LDM r0,{r2-r9} // Load second four MPU regions
- STM r1,{r2-r9} // Store second four MPU regions
#endif
+_tx_enable_mpu:
LDR r0, =0xE000ED94 // Build MPU control reg address
MOV r1, #5 // Build enable value with background region enabled
STR r1, [r0] // Enable MPU
diff --git a/ports_module/cortex_m4/iar/inc/tx_port.h b/ports_module/cortex_m4/iar/inc/tx_port.h
index 2ade11b9..2f620957 100644
--- a/ports_module/cortex_m4/iar/inc/tx_port.h
+++ b/ports_module/cortex_m4/iar/inc/tx_port.h
@@ -26,7 +26,7 @@
/* PORT SPECIFIC C INFORMATION RELEASE */
/* */
/* tx_port.h Cortex-M4/IAR */
-/* 6.1.9 */
+/* 6.1.11 */
/* */
/* AUTHOR */
/* */
@@ -48,6 +48,9 @@
/* DATE NAME DESCRIPTION */
/* */
/* 10-15-2021 Scott Larson Initial Version 6.1.9 */
+/* 04-25-2022 Scott Larson Modified comments and added */
+/* volatile to registers, */
+/* resulting in version 6.1.11 */
/* */
/**************************************************************************/
@@ -127,14 +130,14 @@ typedef unsigned short USHORT;
For example, if the time source is at the address 0x0a800024 and is 16-bits in size, the clock
source constants would be:
-#define TX_TRACE_TIME_SOURCE *((ULONG *) 0x0a800024)
+#define TX_TRACE_TIME_SOURCE *((volatile ULONG *) 0x0a800024)
#define TX_TRACE_TIME_MASK 0x0000FFFFUL
*/
#ifndef TX_MISRA_ENABLE
#ifndef TX_TRACE_TIME_SOURCE
-#define TX_TRACE_TIME_SOURCE *((ULONG *) 0xE0001004)
+#define TX_TRACE_TIME_SOURCE *((volatile ULONG *) 0xE0001004)
#endif
#else
ULONG _tx_misra_time_stamp_get(VOID);
@@ -325,7 +328,7 @@ void _tx_misra_vfp_touch(void);
else \
{ \
ULONG _tx_fpccr; \
- _tx_fpccr = *((ULONG *) 0xE000EF34); \
+ _tx_fpccr = *((volatile ULONG *) 0xE000EF34); \
_tx_fpccr = _tx_fpccr & ((ULONG) 0x01); \
if (_tx_fpccr == ((ULONG) 0x01)) \
{ \
@@ -476,7 +479,7 @@ static void _tx_thread_system_return_inline(void)
__istate_t interrupt_save;
/* Set PendSV to invoke ThreadX scheduler. */
- *((ULONG *) 0xE000ED04) = ((ULONG) 0x10000000);
+ *((volatile ULONG *) 0xE000ED04) = ((ULONG) 0x10000000);
if (__get_IPSR() == 0)
{
interrupt_save = __get_interrupt_state();
@@ -488,7 +491,7 @@ __istate_t interrupt_save;
#endif
-/* Define FPU extension for the Cortex-M7. Each is assumed to be called in the context of the executing
+/* Define FPU extension for the Cortex-M4. Each is assumed to be called in the context of the executing
thread. These are no longer needed, but are preserved for backward compatibility only. */
void tx_thread_fpu_enable(void);
diff --git a/ports_module/cortex_m4/iar/module_manager/src/tx_thread_schedule.s b/ports_module/cortex_m4/iar/module_manager/src/tx_thread_schedule.s
index 56da6a11..92840961 100644
--- a/ports_module/cortex_m4/iar/module_manager/src/tx_thread_schedule.s
+++ b/ports_module/cortex_m4/iar/module_manager/src/tx_thread_schedule.s
@@ -36,7 +36,7 @@
/* FUNCTION RELEASE */
/* */
/* _tx_thread_schedule Cortex-M4/IAR */
-/* 6.1.9 */
+/* 6.1.11 */
/* AUTHOR */
/* */
/* Scott Larson, Microsoft Corporation */
@@ -63,13 +63,15 @@
/* */
/* _tx_initialize_kernel_enter ThreadX entry function */
/* _tx_thread_system_return Return to system from thread */
-/* _tx_thread_context_restore Restore thread's context */
/* */
/* RELEASE HISTORY */
/* */
/* DATE NAME DESCRIPTION */
/* */
/* 10-15-2021 Scott Larson Initial Version 6.1.9 */
+/* 04-25-2022 Scott Larson Optimized MPU configuration, */
+/* added BASEPRI support, */
+/* resulting in version 6.1.11 */
/* */
/**************************************************************************/
// VOID _tx_thread_schedule(VOID)
@@ -125,8 +127,12 @@ __tx_wait_here:
MemManage_Handler:
BusFault_Handler:
UsageFault_Handler:
-
+#ifdef TX_PORT_USE_BASEPRI
+ LDR r1, =TX_PORT_BASEPRI // Mask interrupt priorities =< TX_PORT_BASEPRI
+ MSR BASEPRI, r1
+#else
CPSID i // Disable interrupts
+#endif /* TX_PORT_USE_BASEPRI */
/* Now pickup and store all the fault related information. */
@@ -209,7 +215,12 @@ UsageFault_Handler:
LDR r1, =0x10000000 // Set PENDSVSET bit
STR r1, [r0] // Store ICSR
DSB // Wait for memory access to complete
+#ifdef TX_PORT_USE_BASEPRI
+ MOV r0, 0 // Disable BASEPRI masking (enable interrupts)
+ MSR BASEPRI, r0
+#else
CPSIE i // Enable interrupts
+#endif
MOV lr, #0xFFFFFFFD // Load exception return code
BX lr // Return from exception
@@ -227,12 +238,22 @@ __tx_ts_handler:
#if (defined(TX_ENABLE_EXECUTION_CHANGE_NOTIFY) || defined(TX_EXECUTION_PROFILE_ENABLE))
/* Call the thread exit function to indicate the thread is no longer executing. */
+#ifdef TX_PORT_USE_BASEPRI
+ LDR r1, =TX_PORT_BASEPRI // Mask interrupt priorities =< TX_PORT_BASEPRI
+ MSR BASEPRI, r1
+#else
CPSID i // Disable interrupts
+#endif /* TX_PORT_USE_BASEPRI */
PUSH {r0, lr} // Save LR (and r0 just for alignment)
BL _tx_execution_thread_exit // Call the thread exit function
POP {r0, lr} // Recover LR
+#ifdef TX_PORT_USE_BASEPRI
+ MOV r0, 0 // Disable BASEPRI masking (enable interrupts)
+ MSR BASEPRI, r0
+#else
CPSIE i // Enable interrupts
-#endif
+#endif /* TX_PORT_USE_BASEPRI */
+#endif /* EXECUTION PROFILE */
LDR r0, =_tx_thread_current_ptr // Build current thread pointer address
LDR r2, =_tx_thread_execute_ptr // Build execute thread pointer address
@@ -277,7 +298,12 @@ __tx_ts_new:
/* Now we are looking for a new thread to execute! */
+#ifdef TX_PORT_USE_BASEPRI
+ LDR r1, =TX_PORT_BASEPRI // Mask interrupt priorities =< TX_PORT_BASEPRI
+ MSR BASEPRI, r1
+#else
CPSID i // Disable interrupts
+#endif
LDR r1, [r2] // Is there another thread ready to execute?
CBNZ r1, __tx_ts_restore // Yes, schedule it
@@ -286,7 +312,12 @@ __tx_ts_new:
are disabled to allow use of WFI for waiting for a thread to arrive. */
__tx_ts_wait:
+#ifdef TX_PORT_USE_BASEPRI
+ LDR r1, =TX_PORT_BASEPRI // Mask interrupt priorities =< TX_PORT_BASEPRI
+ MSR BASEPRI, r1
+#else
CPSID i // Disable interrupts
+#endif
LDR r1, [r2] // Pickup the next thread to execute pointer
CBNZ r1, __tx_ts_ready // If non-NULL, a new thread is ready!
#ifdef TX_ENABLE_WFI
@@ -294,7 +325,12 @@ __tx_ts_wait:
WFI // Wait for interrupt
ISB // Ensure pipeline is flushed
#endif
+#ifdef TX_PORT_USE_BASEPRI
+ MOV r4, #0 // Disable BASEPRI masking (enable interrupts)
+ MSR BASEPRI, r4
+#else
CPSIE i // Enable interrupts
+#endif
B __tx_ts_wait // Loop to continue waiting
/* At this point, we have a new thread ready to go. Clear any newly pended PendSV - since we are
@@ -311,7 +347,12 @@ __tx_ts_restore:
and enable interrupts. */
STR r1, [r0] // Setup the current thread pointer to the new thread
+#ifdef TX_PORT_USE_BASEPRI
+ MOV r4, #0 // Disable BASEPRI masking (enable interrupts)
+ MSR BASEPRI, r4
+#else
CPSIE i // Enable interrupts
+#endif
/* Increment the thread run count. */
@@ -347,27 +388,34 @@ __tx_ts_restore:
STR r3, [r0] // Disable MPU
LDR r0, [r1, #0x90] // Pickup the module instance pointer
CBZ r0, skip_mpu_setup // Is this thread owned by a module? No, skip MPU setup
- LDR r1, [r0, #0x64] // Pickup MPU register[0]
- CBZ r1, skip_mpu_setup // Is protection required for this module? No, skip MPU setup
- LDR r1, =0xE000ED9C // Build address of MPU base register
+
+ LDR r2, [r0, #0x8C] // Pickup MPU region 5 address
+ CBZ r2, skip_mpu_setup // Is protection required for this module? No, skip MPU setup
+
+ // Is the MPU already set up for this module?
+ MOV r1, #5 // Select region 5 from MPU
+ LDR r3, =0xE000ED98 // MPU_RNR register address
+ STR r1, [r3] // Set region to 5
+ LDR r1, =0xE000ED9C // MPU_RBAR register address
+ LDR r3, [r1] // Load address stored in MPU region 5
+ BIC r2, r2, #0x10 // Clear VALID bit
+ CMP r2, r3 // Is module already loaded?
+ BEQ _tx_enable_mpu // Yes - skip MPU reconfiguration
// Use alias registers to quickly load MPU
ADD r0, r0, #100 // Build address of MPU register start in thread control block
-#ifdef TXM_MODULE_MANAGER_16_MPU
+
LDM r0!,{r2-r9} // Load MPU regions 0-3
STM r1,{r2-r9} // Store MPU regions 0-3
LDM r0!,{r2-r9} // Load MPU regions 4-7
STM r1,{r2-r9} // Store MPU regions 4-7
+#ifdef TXM_MODULE_MANAGER_16_MPU
LDM r0!,{r2-r9} // Load MPU regions 8-11
STM r1,{r2-r9} // Store MPU regions 8-11
LDM r0,{r2-r9} // Load MPU regions 12-15
STM r1,{r2-r9} // Store MPU regions 12-15
-#else
- LDM r0!,{r2-r9} // Load first four MPU regions
- STM r1,{r2-r9} // Store first four MPU regions
- LDM r0,{r2-r9} // Load second four MPU regions
- STM r1,{r2-r9} // Store second four MPU regions
#endif
+_tx_enable_mpu:
LDR r0, =0xE000ED94 // Build MPU control reg address
MOV r1, #5 // Build enable value with background region enabled
STR r1, [r0] // Enable MPU
diff --git a/ports_module/cortex_m7/ac5/inc/tx_port.h b/ports_module/cortex_m7/ac5/inc/tx_port.h
index ff5a57e3..9aca7f4b 100644
--- a/ports_module/cortex_m7/ac5/inc/tx_port.h
+++ b/ports_module/cortex_m7/ac5/inc/tx_port.h
@@ -26,7 +26,7 @@
/* PORT SPECIFIC C INFORMATION RELEASE */
/* */
/* tx_port.h Cortex-M7/AC5 */
-/* 6.1.9 */
+/* 6.1.11 */
/* */
/* AUTHOR */
/* */
@@ -48,6 +48,9 @@
/* DATE NAME DESCRIPTION */
/* */
/* 10-15-2021 Scott Larson Initial Version 6.1.9 */
+/* 04-25-2022 Scott Larson Modified comments and added */
+/* volatile to registers, */
+/* resulting in version 6.1.11 */
/* */
/**************************************************************************/
@@ -123,13 +126,13 @@ typedef unsigned short USHORT;
For example, if the time source is at the address 0x0a800024 and is 16-bits in size, the clock
source constants would be:
-#define TX_TRACE_TIME_SOURCE *((ULONG *) 0x0a800024)
+#define TX_TRACE_TIME_SOURCE *((volatile ULONG *) 0x0a800024)
#define TX_TRACE_TIME_MASK 0x0000FFFFUL
*/
#ifndef TX_TRACE_TIME_SOURCE
-#define TX_TRACE_TIME_SOURCE *((ULONG *) 0xE0001004)
+#define TX_TRACE_TIME_SOURCE *((volatile ULONG *) 0xE0001004)
#endif
#ifndef TX_TRACE_TIME_MASK
#define TX_TRACE_TIME_MASK 0xFFFFFFFFUL
@@ -282,7 +285,7 @@ void _tx_vfp_access(void);
else \
{ \
ULONG _tx_fpccr; \
- _tx_fpccr = *((ULONG *) 0xE000EF34); \
+ _tx_fpccr = *((volatile ULONG *) 0xE000EF34); \
_tx_fpccr = _tx_fpccr & ((ULONG) 0x01); \
if (_tx_fpccr == ((ULONG) 0x01)) \
{ \
@@ -435,7 +438,7 @@ unsigned int was_masked;
/* Set PendSV to invoke ThreadX scheduler. */
- *((ULONG *) 0xE000ED04) = ((ULONG) 0x10000000);
+ *((volatile ULONG *) 0xE000ED04) = ((ULONG) 0x10000000);
if (_ipsr == 0)
{
was_masked = __disable_irq();
@@ -458,7 +461,7 @@ void tx_thread_fpu_disable(void);
#ifdef TX_THREAD_INIT
CHAR _tx_version_id[] =
- "Copyright (c) Microsoft Corporation. All rights reserved. * ThreadX Cortex-M7/AC5 Version 6.1.9 *";
+ "Copyright (c) Microsoft Corporation. All rights reserved. * ThreadX Cortex-M7/AC5 Version 6.1.11 *";
#else
#ifdef TX_MISRA_ENABLE
extern CHAR _tx_version_id[100];
diff --git a/ports_module/cortex_m7/ac5/module_manager/src/tx_thread_schedule.s b/ports_module/cortex_m7/ac5/module_manager/src/tx_thread_schedule.s
index 016cdcae..150f9818 100644
--- a/ports_module/cortex_m7/ac5/module_manager/src/tx_thread_schedule.s
+++ b/ports_module/cortex_m7/ac5/module_manager/src/tx_thread_schedule.s
@@ -40,7 +40,7 @@
/* FUNCTION RELEASE */
/* */
/* _tx_thread_schedule Cortex-M7/AC5 */
-/* 6.1.9 */
+/* 6.1.11 */
/* AUTHOR */
/* */
/* Scott Larson, Microsoft Corporation */
@@ -67,13 +67,15 @@
/* */
/* _tx_initialize_kernel_enter ThreadX entry function */
/* _tx_thread_system_return Return to system from thread */
-/* _tx_thread_context_restore Restore thread's context */
/* */
/* RELEASE HISTORY */
/* */
/* DATE NAME DESCRIPTION */
/* */
/* 10-15-2021 Scott Larson Initial Version 6.1.9 */
+/* 04-25-2022 Scott Larson Optimized MPU configuration, */
+/* added BASEPRI support, */
+/* resulting in version 6.1.11 */
/* */
/**************************************************************************/
// VOID _tx_thread_schedule(VOID)
@@ -125,7 +127,12 @@ __tx_wait_here
EXPORT MemManage_Handler
MemManage_Handler
+#ifdef TX_PORT_USE_BASEPRI
+ LDR r1, =TX_PORT_BASEPRI // Mask interrupt priorities =< TX_PORT_BASEPRI
+ MSR BASEPRI, r1
+#else
CPSID i // Disable interrupts
+#endif /* TX_PORT_USE_BASEPRI */
/* Now pickup and store all the fault related information. */
@@ -208,7 +215,12 @@ MemManage_Handler
LDR r1, =0x10000000 // Set PENDSVSET bit
STR r1, [r0] // Store ICSR
DSB // Wait for memory access to complete
+#ifdef TX_PORT_USE_BASEPRI
+ MOV r0, 0 // Disable BASEPRI masking (enable interrupts)
+ MSR BASEPRI, r0
+#else
CPSIE i // Enable interrupts
+#endif
MOV lr, #0xFFFFFFFD // Load exception return code
BX lr // Return from exception
@@ -226,12 +238,22 @@ __tx_ts_handler
#if (defined(TX_ENABLE_EXECUTION_CHANGE_NOTIFY) || defined(TX_EXECUTION_PROFILE_ENABLE))
/* Call the thread exit function to indicate the thread is no longer executing. */
+#ifdef TX_PORT_USE_BASEPRI
+ LDR r1, =TX_PORT_BASEPRI // Mask interrupt priorities =< TX_PORT_BASEPRI
+ MSR BASEPRI, r1
+#else
CPSID i // Disable interrupts
+#endif /* TX_PORT_USE_BASEPRI */
PUSH {r0, lr} // Save LR (and r0 just for alignment)
BL _tx_execution_thread_exit // Call the thread exit function
POP {r0, lr} // Recover LR
+#ifdef TX_PORT_USE_BASEPRI
+ MOV r0, 0 // Disable BASEPRI masking (enable interrupts)
+ MSR BASEPRI, r0
+#else
CPSIE i // Enable interrupts
-#endif
+#endif /* TX_PORT_USE_BASEPRI */
+#endif /* EXECUTION PROFILE */
LDR r0, =_tx_thread_current_ptr // Build current thread pointer address
LDR r2, =_tx_thread_execute_ptr // Build execute thread pointer address
@@ -276,7 +298,12 @@ __tx_ts_new
/* Now we are looking for a new thread to execute! */
+#ifdef TX_PORT_USE_BASEPRI
+ LDR r1, =TX_PORT_BASEPRI // Mask interrupt priorities =< TX_PORT_BASEPRI
+ MSR BASEPRI, r1
+#else
CPSID i // Disable interrupts
+#endif
LDR r1, [r2] // Is there another thread ready to execute?
CBNZ r1, __tx_ts_restore // Yes, schedule it
@@ -285,7 +312,12 @@ __tx_ts_new
are disabled to allow use of WFI for waiting for a thread to arrive. */
__tx_ts_wait
+#ifdef TX_PORT_USE_BASEPRI
+ LDR r1, =TX_PORT_BASEPRI // Mask interrupt priorities =< TX_PORT_BASEPRI
+ MSR BASEPRI, r1
+#else
CPSID i // Disable interrupts
+#endif
LDR r1, [r2] // Pickup the next thread to execute pointer
CBNZ r1, __tx_ts_ready // If non-NULL, a new thread is ready!
#ifdef TX_ENABLE_WFI
@@ -293,7 +325,12 @@ __tx_ts_wait
WFI // Wait for interrupt
ISB // Ensure pipeline is flushed
#endif
+#ifdef TX_PORT_USE_BASEPRI
+ MOV r4, #0 // Disable BASEPRI masking (enable interrupts)
+ MSR BASEPRI, r4
+#else
CPSIE i // Enable interrupts
+#endif
B __tx_ts_wait // Loop to continue waiting
/* At this point, we have a new thread ready to go. Clear any newly pended PendSV - since we are
@@ -310,7 +347,12 @@ __tx_ts_restore
and enable interrupts. */
STR r1, [r0] // Setup the current thread pointer to the new thread
+#ifdef TX_PORT_USE_BASEPRI
+ MOV r4, #0 // Disable BASEPRI masking (enable interrupts)
+ MSR BASEPRI, r4
+#else
CPSIE i // Enable interrupts
+#endif
/* Increment the thread run count. */
@@ -346,27 +388,34 @@ __tx_ts_restore
STR r3, [r0] // Disable MPU
LDR r0, [r1, #0x90] // Pickup the module instance pointer
CBZ r0, skip_mpu_setup // Is this thread owned by a module? No, skip MPU setup
- LDR r1, [r0, #0x64] // Pickup MPU register[0]
- CBZ r1, skip_mpu_setup // Is protection required for this module? No, skip MPU setup
- LDR r1, =0xE000ED9C // Build address of MPU base register
+
+ LDR r2, [r0, #0x8C] // Pickup MPU region 5 address
+ CBZ r2, skip_mpu_setup // Is protection required for this module? No, skip MPU setup
+
+ // Is the MPU already set up for this module?
+ MOV r1, #5 // Select region 5 from MPU
+ LDR r3, =0xE000ED98 // MPU_RNR register address
+ STR r1, [r3] // Set region to 5
+ LDR r1, =0xE000ED9C // MPU_RBAR register address
+ LDR r3, [r1] // Load address stored in MPU region 5
+ BIC r2, r2, #0x10 // Clear VALID bit
+ CMP r2, r3 // Is module already loaded?
+ BEQ _tx_enable_mpu // Yes - skip MPU reconfiguration
// Use alias registers to quickly load MPU
ADD r0, r0, #100 // Build address of MPU register start in thread control block
-#ifdef TXM_MODULE_MANAGER_16_MPU
+
LDM r0!,{r2-r9} // Load MPU regions 0-3
STM r1,{r2-r9} // Store MPU regions 0-3
LDM r0!,{r2-r9} // Load MPU regions 4-7
STM r1,{r2-r9} // Store MPU regions 4-7
+#ifdef TXM_MODULE_MANAGER_16_MPU
LDM r0!,{r2-r9} // Load MPU regions 8-11
STM r1,{r2-r9} // Store MPU regions 8-11
LDM r0,{r2-r9} // Load MPU regions 12-15
STM r1,{r2-r9} // Store MPU regions 12-15
-#else
- LDM r0!,{r2-r9} // Load first four MPU regions
- STM r1,{r2-r9} // Store first four MPU regions
- LDM r0,{r2-r9} // Load second four MPU regions
- STM r1,{r2-r9} // Store second four MPU regions
#endif
+_tx_enable_mpu
LDR r0, =0xE000ED94 // Build MPU control reg address
MOV r1, #5 // Build enable value with background region enabled
STR r1, [r0] // Enable MPU
diff --git a/ports_module/cortex_m7/ac6/inc/tx_port.h b/ports_module/cortex_m7/ac6/inc/tx_port.h
index c5f6fdd5..56f2531f 100644
--- a/ports_module/cortex_m7/ac6/inc/tx_port.h
+++ b/ports_module/cortex_m7/ac6/inc/tx_port.h
@@ -26,7 +26,7 @@
/* PORT SPECIFIC C INFORMATION RELEASE */
/* */
/* tx_port.h Cortex-M7/AC6 */
-/* 6.1.9 */
+/* 6.1.11 */
/* */
/* AUTHOR */
/* */
@@ -48,6 +48,9 @@
/* DATE NAME DESCRIPTION */
/* */
/* 10-15-2021 Scott Larson Initial Version 6.1.9 */
+/* 04-25-2022 Scott Larson Modified comments and added */
+/* volatile to registers, */
+/* resulting in version 6.1.11 */
/* */
/**************************************************************************/
@@ -123,13 +126,13 @@ typedef unsigned short USHORT;
For example, if the time source is at the address 0x0a800024 and is 16-bits in size, the clock
source constants would be:
-#define TX_TRACE_TIME_SOURCE *((ULONG *) 0x0a800024)
+#define TX_TRACE_TIME_SOURCE *((volatile ULONG *) 0x0a800024)
#define TX_TRACE_TIME_MASK 0x0000FFFFUL
*/
#ifndef TX_TRACE_TIME_SOURCE
-#define TX_TRACE_TIME_SOURCE *((ULONG *) 0xE0001004)
+#define TX_TRACE_TIME_SOURCE *((volatile ULONG *) 0xE0001004)
#endif
#ifndef TX_TRACE_TIME_MASK
#define TX_TRACE_TIME_MASK 0xFFFFFFFFUL
@@ -291,7 +294,7 @@ __attribute__( ( always_inline ) ) static inline void __set_control(ULONG contro
else \
{ \
ULONG _tx_fpccr; \
- _tx_fpccr = *((ULONG *) 0xE000EF34); \
+ _tx_fpccr = *((volatile ULONG *) 0xE000EF34); \
_tx_fpccr = _tx_fpccr & ((ULONG) 0x01); \
if (_tx_fpccr == ((ULONG) 0x01)) \
{ \
@@ -467,7 +470,8 @@ __attribute__( ( always_inline ) ) static inline void _tx_thread_system_return_i
{
unsigned int interrupt_save;
- *((ULONG *) 0xE000ED04) = ((ULONG) 0x10000000);
+ /* Set PendSV to invoke ThreadX scheduler. */
+ *((volatile ULONG *) 0xE000ED04) = ((ULONG) 0x10000000);
if (__get_ipsr_value() == 0)
{
interrupt_save = __get_primask_value();
@@ -497,7 +501,7 @@ unsigned int interrupt_save;
#endif
-/* Define FPU extension for the Cortex-M7. Each is assumed to be called in the context of the executing
+/* Define FPU extension for the Cortex-M7. Each is assumed to be called in the context of the executing
thread. These are no longer needed, but are preserved for backward compatibility only. */
void tx_thread_fpu_enable(void);
@@ -508,7 +512,7 @@ void tx_thread_fpu_disable(void);
#ifdef TX_THREAD_INIT
CHAR _tx_version_id[] =
- "Copyright (c) Microsoft Corporation. All rights reserved. * ThreadX Cortex-M7/AC6 Version 6.1.9 *";
+ "Copyright (c) Microsoft Corporation. All rights reserved. * ThreadX Cortex-M7/AC6 Version 6.1.11 *";
#else
extern CHAR _tx_version_id[];
#endif
diff --git a/ports_module/cortex_m7/ac6/module_manager/src/tx_thread_schedule.S b/ports_module/cortex_m7/ac6/module_manager/src/tx_thread_schedule.S
index 106d7e5e..c6b66e54 100644
--- a/ports_module/cortex_m7/ac6/module_manager/src/tx_thread_schedule.S
+++ b/ports_module/cortex_m7/ac6/module_manager/src/tx_thread_schedule.S
@@ -42,7 +42,7 @@
/* FUNCTION RELEASE */
/* */
/* _tx_thread_schedule Cortex-M7/AC6 */
-/* 6.1.9 */
+/* 6.1.11 */
/* AUTHOR */
/* */
/* Scott Larson, Microsoft Corporation */
@@ -69,13 +69,15 @@
/* */
/* _tx_initialize_kernel_enter ThreadX entry function */
/* _tx_thread_system_return Return to system from thread */
-/* _tx_thread_context_restore Restore thread's context */
/* */
/* RELEASE HISTORY */
/* */
/* DATE NAME DESCRIPTION */
/* */
/* 10-15-2021 Scott Larson Initial Version 6.1.9 */
+/* 04-25-2022 Scott Larson Optimized MPU configuration, */
+/* added BASEPRI support, */
+/* resulting in version 6.1.11 */
/* */
/**************************************************************************/
// VOID _tx_thread_schedule(VOID)
@@ -135,7 +137,12 @@ BusFault_Handler:
.thumb_func
UsageFault_Handler:
+#ifdef TX_PORT_USE_BASEPRI
+ LDR r1, =TX_PORT_BASEPRI // Mask interrupt priorities =< TX_PORT_BASEPRI
+ MSR BASEPRI, r1
+#else
CPSID i // Disable interrupts
+#endif /* TX_PORT_USE_BASEPRI */
/* Now pickup and store all the fault related information. */
@@ -197,7 +204,7 @@ UsageFault_Handler:
LDR r0, =0xE000EF34 // Cleanup FPU context: Load FPCCR address
LDR r1, [r0] // Load FPCCR
BIC r1, r1, #1 // Clear the lazy preservation active bit
- STR r1, [r0] // Store the value
+ STR r1, [r0] // Save FPCCR
#endif
BL _txm_module_manager_memory_fault_handler // Call memory manager fault handler
@@ -218,7 +225,12 @@ UsageFault_Handler:
LDR r1, =0x10000000 // Set PENDSVSET bit
STR r1, [r0] // Store ICSR
DSB // Wait for memory access to complete
+#ifdef TX_PORT_USE_BASEPRI
+ MOV r0, 0 // Disable BASEPRI masking (enable interrupts)
+ MSR BASEPRI, r0
+#else
CPSIE i // Enable interrupts
+#endif
MOV lr, #0xFFFFFFFD // Load exception return code
BX lr // Return from exception
@@ -239,12 +251,22 @@ __tx_ts_handler:
#if (defined(TX_ENABLE_EXECUTION_CHANGE_NOTIFY) || defined(TX_EXECUTION_PROFILE_ENABLE))
/* Call the thread exit function to indicate the thread is no longer executing. */
+#ifdef TX_PORT_USE_BASEPRI
+ LDR r1, =TX_PORT_BASEPRI // Mask interrupt priorities =< TX_PORT_BASEPRI
+ MSR BASEPRI, r1
+#else
CPSID i // Disable interrupts
+#endif /* TX_PORT_USE_BASEPRI */
PUSH {r0, lr} // Save LR (and r0 just for alignment)
BL _tx_execution_thread_exit // Call the thread exit function
POP {r0, lr} // Recover LR
+#ifdef TX_PORT_USE_BASEPRI
+ MOV r0, 0 // Disable BASEPRI masking (enable interrupts)
+ MSR BASEPRI, r0
+#else
CPSIE i // Enable interrupts
-#endif
+#endif /* TX_PORT_USE_BASEPRI */
+#endif /* EXECUTION PROFILE */
LDR r0, =_tx_thread_current_ptr // Build current thread pointer address
LDR r2, =_tx_thread_execute_ptr // Build execute thread pointer address
@@ -289,7 +311,12 @@ __tx_ts_new:
/* Now we are looking for a new thread to execute! */
+#ifdef TX_PORT_USE_BASEPRI
+ LDR r1, =TX_PORT_BASEPRI // Mask interrupt priorities =< TX_PORT_BASEPRI
+ MSR BASEPRI, r1
+#else
CPSID i // Disable interrupts
+#endif
LDR r1, [r2] // Is there another thread ready to execute?
CBNZ r1, __tx_ts_restore // Yes, schedule it
@@ -298,7 +325,12 @@ __tx_ts_new:
are disabled to allow use of WFI for waiting for a thread to arrive. */
__tx_ts_wait:
+#ifdef TX_PORT_USE_BASEPRI
+ LDR r1, =TX_PORT_BASEPRI // Mask interrupt priorities =< TX_PORT_BASEPRI
+ MSR BASEPRI, r1
+#else
CPSID i // Disable interrupts
+#endif
LDR r1, [r2] // Pickup the next thread to execute pointer
CBNZ r1, __tx_ts_ready // If non-NULL, a new thread is ready!
#ifdef TX_ENABLE_WFI
@@ -306,7 +338,12 @@ __tx_ts_wait:
WFI // Wait for interrupt
ISB // Ensure pipeline is flushed
#endif
+#ifdef TX_PORT_USE_BASEPRI
+ MOV r4, #0 // Disable BASEPRI masking (enable interrupts)
+ MSR BASEPRI, r4
+#else
CPSIE i // Enable interrupts
+#endif
B __tx_ts_wait // Loop to continue waiting
/* At this point, we have a new thread ready to go. Clear any newly pended PendSV - since we are
@@ -323,7 +360,12 @@ __tx_ts_restore:
and enable interrupts. */
STR r1, [r0] // Setup the current thread pointer to the new thread
+#ifdef TX_PORT_USE_BASEPRI
+ MOV r4, #0 // Disable BASEPRI masking (enable interrupts)
+ MSR BASEPRI, r4
+#else
CPSIE i // Enable interrupts
+#endif
/* Increment the thread run count. */
@@ -359,27 +401,34 @@ __tx_ts_restore:
STR r3, [r0] // Disable MPU
LDR r0, [r1, #0x90] // Pickup the module instance pointer
CBZ r0, skip_mpu_setup // Is this thread owned by a module? No, skip MPU setup
- LDR r1, [r0, #0x64] // Pickup MPU register[0]
- CBZ r1, skip_mpu_setup // Is protection required for this module? No, skip MPU setup
- LDR r1, =0xE000ED9C // Build address of MPU base register
+
+ LDR r2, [r0, #0x8C] // Pickup MPU region 5 address
+ CBZ r2, skip_mpu_setup // Is protection required for this module? No, skip MPU setup
+
+ // Is the MPU already set up for this module?
+ MOV r1, #5 // Select region 5 from MPU
+ LDR r3, =0xE000ED98 // MPU_RNR register address
+ STR r1, [r3] // Set region to 5
+ LDR r1, =0xE000ED9C // MPU_RBAR register address
+ LDR r3, [r1] // Load address stored in MPU region 5
+ BIC r2, r2, #0x10 // Clear VALID bit
+ CMP r2, r3 // Is module already loaded?
+ BEQ _tx_enable_mpu // Yes - skip MPU reconfiguration
// Use alias registers to quickly load MPU
ADD r0, r0, #100 // Build address of MPU register start in thread control block
-#ifdef TXM_MODULE_MANAGER_16_MPU
+
LDM r0!,{r2-r9} // Load MPU regions 0-3
STM r1,{r2-r9} // Store MPU regions 0-3
LDM r0!,{r2-r9} // Load MPU regions 4-7
STM r1,{r2-r9} // Store MPU regions 4-7
+#ifdef TXM_MODULE_MANAGER_16_MPU
LDM r0!,{r2-r9} // Load MPU regions 8-11
STM r1,{r2-r9} // Store MPU regions 8-11
LDM r0,{r2-r9} // Load MPU regions 12-15
STM r1,{r2-r9} // Store MPU regions 12-15
-#else
- LDM r0!,{r2-r9} // Load first four MPU regions
- STM r1,{r2-r9} // Store first four MPU regions
- LDM r0,{r2-r9} // Load second four MPU regions
- STM r1,{r2-r9} // Store second four MPU regions
#endif
+_tx_enable_mpu:
LDR r0, =0xE000ED94 // Build MPU control reg address
MOV r1, #5 // Build enable value with background region enabled
STR r1, [r0] // Enable MPU
@@ -538,14 +587,14 @@ _tx_no_lazy_clear:
#endif
/* Copy kernel hardware stack to module thread stack. */
- LDM r3!, {r1-r2}
- STM r0!, {r1-r2}
- LDM r3!, {r1-r2}
- STM r0!, {r1-r2}
- LDM r3!, {r1-r2}
- STM r0!, {r1-r2}
- LDM r3!, {r1-r2}
- STM r0!, {r1-r2}
+ LDM r3!, {r1-r2} // Get r0, r1 from kernel stack
+ STM r0!, {r1-r2} // Insert r0, r1 into thread stack
+ LDM r3!, {r1-r2} // Get r2, r3 from kernel stack
+ STM r0!, {r1-r2} // Insert r2, r3 into thread stack
+ LDM r3!, {r1-r2} // Get r12, lr from kernel stack
+ STM r0!, {r1-r2} // Insert r12, lr into thread stack
+ LDM r3!, {r1-r2} // Get pc, xpsr from kernel stack
+ STM r0!, {r1-r2} // Insert pc, xpsr into thread stack
SUB r0, r0, #32 // Subtract 32 to get back to top of stack
MSR PSP, r0 // Set thread stack pointer
diff --git a/ports_module/cortex_m7/gnu/inc/tx_port.h b/ports_module/cortex_m7/gnu/inc/tx_port.h
index b4e27ddf..0d9feee7 100644
--- a/ports_module/cortex_m7/gnu/inc/tx_port.h
+++ b/ports_module/cortex_m7/gnu/inc/tx_port.h
@@ -26,7 +26,7 @@
/* PORT SPECIFIC C INFORMATION RELEASE */
/* */
/* tx_port.h Cortex-M7/GNU */
-/* 6.1.9 */
+/* 6.1.11 */
/* */
/* AUTHOR */
/* */
@@ -48,6 +48,9 @@
/* DATE NAME DESCRIPTION */
/* */
/* 10-15-2021 Scott Larson Initial Version 6.1.9 */
+/* 04-25-2022 Scott Larson Modified comments and added */
+/* volatile to registers, */
+/* resulting in version 6.1.11 */
/* */
/**************************************************************************/
@@ -123,13 +126,13 @@ typedef unsigned short USHORT;
For example, if the time source is at the address 0x0a800024 and is 16-bits in size, the clock
source constants would be:
-#define TX_TRACE_TIME_SOURCE *((ULONG *) 0x0a800024)
+#define TX_TRACE_TIME_SOURCE *((volatile ULONG *) 0x0a800024)
#define TX_TRACE_TIME_MASK 0x0000FFFFUL
*/
#ifndef TX_TRACE_TIME_SOURCE
-#define TX_TRACE_TIME_SOURCE *((ULONG *) 0xE0001004)
+#define TX_TRACE_TIME_SOURCE *((volatile ULONG *) 0xE0001004)
#endif
#ifndef TX_TRACE_TIME_MASK
#define TX_TRACE_TIME_MASK 0xFFFFFFFFUL
@@ -291,7 +294,7 @@ __attribute__( ( always_inline ) ) static inline void __set_control(ULONG contro
else \
{ \
ULONG _tx_fpccr; \
- _tx_fpccr = *((ULONG *) 0xE000EF34); \
+ _tx_fpccr = *((volatile ULONG *) 0xE000EF34); \
_tx_fpccr = _tx_fpccr & ((ULONG) 0x01); \
if (_tx_fpccr == ((ULONG) 0x01)) \
{ \
@@ -461,7 +464,8 @@ __attribute__( ( always_inline ) ) static inline void _tx_thread_system_return_i
{
unsigned int interrupt_save;
- *((ULONG *) 0xE000ED04) = ((ULONG) 0x10000000);
+ /* Set PendSV to invoke ThreadX scheduler. */
+ *((volatile ULONG *) 0xE000ED04) = ((ULONG) 0x10000000);
if (__get_ipsr_value() == 0)
{
interrupt_save = __get_primask_value();
@@ -491,8 +495,8 @@ unsigned int interrupt_save;
#endif
-/* Define FPU extension for the Cortex-M7. Each is assumed to be called in the context of the executing
- thread. This is for legacy only, and not needed any longer. */
+/* Define FPU extension for the Cortex-M7. Each is assumed to be called in the context of the executing
+ thread. These are no longer needed, but are preserved for backward compatibility only. */
void tx_thread_fpu_enable(void);
void tx_thread_fpu_disable(void);
@@ -502,7 +506,7 @@ void tx_thread_fpu_disable(void);
#ifdef TX_THREAD_INIT
CHAR _tx_version_id[] =
- "Copyright (c) Microsoft Corporation. All rights reserved. * ThreadX Cortex-M7/GNU Version 6.1.9 *";
+ "Copyright (c) Microsoft Corporation. All rights reserved. * ThreadX Cortex-M7/GNU Version 6.1.11 *";
#else
extern CHAR _tx_version_id[];
#endif
diff --git a/ports_module/cortex_m7/gnu/module_manager/src/tx_thread_schedule.S b/ports_module/cortex_m7/gnu/module_manager/src/tx_thread_schedule.S
index 2f472d6d..e3401782 100644
--- a/ports_module/cortex_m7/gnu/module_manager/src/tx_thread_schedule.S
+++ b/ports_module/cortex_m7/gnu/module_manager/src/tx_thread_schedule.S
@@ -40,7 +40,7 @@
/* FUNCTION RELEASE */
/* */
/* _tx_thread_schedule Cortex-M7/GNU */
-/* 6.1.10 */
+/* 6.1.11 */
/* AUTHOR */
/* */
/* Scott Larson, Microsoft Corporation */
@@ -67,7 +67,6 @@
/* */
/* _tx_initialize_kernel_enter ThreadX entry function */
/* _tx_thread_system_return Return to system from thread */
-/* _tx_thread_context_restore Restore thread's context */
/* */
/* RELEASE HISTORY */
/* */
@@ -76,6 +75,9 @@
/* 10-15-2021 Scott Larson Initial Version 6.1.9 */
/* 01-31-2022 Scott Larson Fixed predefined macro name, */
/* resulting in version 6.1.10 */
+/* 04-25-2022 Scott Larson Optimized MPU configuration, */
+/* added BASEPRI support, */
+/* resulting in version 6.1.11 */
/* */
/**************************************************************************/
// VOID _tx_thread_schedule(VOID)
@@ -135,7 +137,12 @@ BusFault_Handler:
.thumb_func
UsageFault_Handler:
+#ifdef TX_PORT_USE_BASEPRI
+ LDR r1, =TX_PORT_BASEPRI // Mask interrupt priorities =< TX_PORT_BASEPRI
+ MSR BASEPRI, r1
+#else
CPSID i // Disable interrupts
+#endif /* TX_PORT_USE_BASEPRI */
/* Now pickup and store all the fault related information. */
@@ -197,7 +204,7 @@ UsageFault_Handler:
LDR r0, =0xE000EF34 // Cleanup FPU context: Load FPCCR address
LDR r1, [r0] // Load FPCCR
BIC r1, r1, #1 // Clear the lazy preservation active bit
- STR r1, [r0] // Store the value
+ STR r1, [r0] // Save FPCCR
#endif
BL _txm_module_manager_memory_fault_handler // Call memory manager fault handler
@@ -218,7 +225,12 @@ UsageFault_Handler:
LDR r1, =0x10000000 // Set PENDSVSET bit
STR r1, [r0] // Store ICSR
DSB // Wait for memory access to complete
+#ifdef TX_PORT_USE_BASEPRI
+ MOV r0, 0 // Disable BASEPRI masking (enable interrupts)
+ MSR BASEPRI, r0
+#else
CPSIE i // Enable interrupts
+#endif
MOV lr, #0xFFFFFFFD // Load exception return code
BX lr // Return from exception
@@ -239,12 +251,22 @@ __tx_ts_handler:
#if (defined(TX_ENABLE_EXECUTION_CHANGE_NOTIFY) || defined(TX_EXECUTION_PROFILE_ENABLE))
/* Call the thread exit function to indicate the thread is no longer executing. */
+#ifdef TX_PORT_USE_BASEPRI
+ LDR r1, =TX_PORT_BASEPRI // Mask interrupt priorities =< TX_PORT_BASEPRI
+ MSR BASEPRI, r1
+#else
CPSID i // Disable interrupts
+#endif /* TX_PORT_USE_BASEPRI */
PUSH {r0, lr} // Save LR (and r0 just for alignment)
BL _tx_execution_thread_exit // Call the thread exit function
POP {r0, lr} // Recover LR
+#ifdef TX_PORT_USE_BASEPRI
+ MOV r0, 0 // Disable BASEPRI masking (enable interrupts)
+ MSR BASEPRI, r0
+#else
CPSIE i // Enable interrupts
-#endif
+#endif /* TX_PORT_USE_BASEPRI */
+#endif /* EXECUTION PROFILE */
LDR r0, =_tx_thread_current_ptr // Build current thread pointer address
LDR r2, =_tx_thread_execute_ptr // Build execute thread pointer address
@@ -289,7 +311,12 @@ __tx_ts_new:
/* Now we are looking for a new thread to execute! */
+#ifdef TX_PORT_USE_BASEPRI
+ LDR r1, =TX_PORT_BASEPRI // Mask interrupt priorities =< TX_PORT_BASEPRI
+ MSR BASEPRI, r1
+#else
CPSID i // Disable interrupts
+#endif
LDR r1, [r2] // Is there another thread ready to execute?
CBNZ r1, __tx_ts_restore // Yes, schedule it
@@ -298,7 +325,12 @@ __tx_ts_new:
are disabled to allow use of WFI for waiting for a thread to arrive. */
__tx_ts_wait:
+#ifdef TX_PORT_USE_BASEPRI
+ LDR r1, =TX_PORT_BASEPRI // Mask interrupt priorities =< TX_PORT_BASEPRI
+ MSR BASEPRI, r1
+#else
CPSID i // Disable interrupts
+#endif
LDR r1, [r2] // Pickup the next thread to execute pointer
CBNZ r1, __tx_ts_ready // If non-NULL, a new thread is ready!
#ifdef TX_ENABLE_WFI
@@ -306,7 +338,12 @@ __tx_ts_wait:
WFI // Wait for interrupt
ISB // Ensure pipeline is flushed
#endif
+#ifdef TX_PORT_USE_BASEPRI
+ MOV r4, #0 // Disable BASEPRI masking (enable interrupts)
+ MSR BASEPRI, r4
+#else
CPSIE i // Enable interrupts
+#endif
B __tx_ts_wait // Loop to continue waiting
/* At this point, we have a new thread ready to go. Clear any newly pended PendSV - since we are
@@ -323,7 +360,12 @@ __tx_ts_restore:
and enable interrupts. */
STR r1, [r0] // Setup the current thread pointer to the new thread
+#ifdef TX_PORT_USE_BASEPRI
+ MOV r4, #0 // Disable BASEPRI masking (enable interrupts)
+ MSR BASEPRI, r4
+#else
CPSIE i // Enable interrupts
+#endif
/* Increment the thread run count. */
@@ -359,27 +401,34 @@ __tx_ts_restore:
STR r3, [r0] // Disable MPU
LDR r0, [r1, #0x90] // Pickup the module instance pointer
CBZ r0, skip_mpu_setup // Is this thread owned by a module? No, skip MPU setup
- LDR r1, [r0, #0x64] // Pickup MPU register[0]
- CBZ r1, skip_mpu_setup // Is protection required for this module? No, skip MPU setup
- LDR r1, =0xE000ED9C // Build address of MPU base register
+
+ LDR r2, [r0, #0x8C] // Pickup MPU region 5 address
+ CBZ r2, skip_mpu_setup // Is protection required for this module? No, skip MPU setup
+
+ // Is the MPU already set up for this module?
+ MOV r1, #5 // Select region 5 from MPU
+ LDR r3, =0xE000ED98 // MPU_RNR register address
+ STR r1, [r3] // Set region to 5
+ LDR r1, =0xE000ED9C // MPU_RBAR register address
+ LDR r3, [r1] // Load address stored in MPU region 5
+ BIC r2, r2, #0x10 // Clear VALID bit
+ CMP r2, r3 // Is module already loaded?
+ BEQ _tx_enable_mpu // Yes - skip MPU reconfiguration
// Use alias registers to quickly load MPU
ADD r0, r0, #100 // Build address of MPU register start in thread control block
-#ifdef TXM_MODULE_MANAGER_16_MPU
+
LDM r0!,{r2-r9} // Load MPU regions 0-3
STM r1,{r2-r9} // Store MPU regions 0-3
LDM r0!,{r2-r9} // Load MPU regions 4-7
STM r1,{r2-r9} // Store MPU regions 4-7
+#ifdef TXM_MODULE_MANAGER_16_MPU
LDM r0!,{r2-r9} // Load MPU regions 8-11
STM r1,{r2-r9} // Store MPU regions 8-11
LDM r0,{r2-r9} // Load MPU regions 12-15
STM r1,{r2-r9} // Store MPU regions 12-15
-#else
- LDM r0!,{r2-r9} // Load first four MPU regions
- STM r1,{r2-r9} // Store first four MPU regions
- LDM r0,{r2-r9} // Load second four MPU regions
- STM r1,{r2-r9} // Store second four MPU regions
#endif
+_tx_enable_mpu:
LDR r0, =0xE000ED94 // Build MPU control reg address
MOV r1, #5 // Build enable value with background region enabled
STR r1, [r0] // Enable MPU
diff --git a/ports_module/cortex_m7/iar/example_build/libraries/libboard_samv7-ek/board.h b/ports_module/cortex_m7/iar/example_build/libraries/libboard_samv7-ek/board.h
deleted file mode 100644
index e30a775d..00000000
--- a/ports_module/cortex_m7/iar/example_build/libraries/libboard_samv7-ek/board.h
+++ /dev/null
@@ -1,763 +0,0 @@
-/* ----------------------------------------------------------------------------
- * SAM Software Package License
- * ----------------------------------------------------------------------------
- * Copyright (c) 2014, Atmel Corporation
- *
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * - Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the disclaimer below.
- *
- * Atmel's name may not be used to endorse or promote products derived from
- * this software without specific prior written permission.
- *
- * DISCLAIMER: THIS SOFTWARE IS PROVIDED BY ATMEL "AS IS" AND ANY EXPRESS OR
- * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT ARE
- * DISCLAIMED. IN NO EVENT SHALL ATMEL BE LIABLE FOR ANY DIRECT, INDIRECT,
- * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA,
- * OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
- * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
- * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
- * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- * ----------------------------------------------------------------------------
- */
-
-/**
- * \page samv7_Xplained_ultra_board_desc SAM V71 Xplained Ultra - Board
- * Description
- *
- * \section Purpose
- *
- * This file is dedicated to describe the SAM V71 Xplained Ultra board.
- *
- * \section Contents
- *
- * - For SAM V71 Xplained Ultra board information, see
- * \subpage samv7_Xplained_ultra_board_info.
- * - For operating frequency information, see \subpage samv7_Xplained_ultra_opfreq.
- * - For using portable PIO definitions, see \subpage samv7_Xplained_ultra_piodef.
- * - For using GMAC PIO definitions, see \subpage samv7_Xplained_ultra_gmac.
- * - For using ISI definitions, see \subpage samv7_Xplained_ultra_isi.
- * - For on-board memories, see \subpage samv7_Xplained_ultra_mem.
- * - Several USB definitions are included here,
- * see \subpage samv7_Xplained_ultra_usb.
- * - For External components, see \subpage samv7_Xplained_ultra_extcomp.
- * - For Individual chip definition, see \subpage samv7_Xplained_ultra_chipdef.
- *
- * To get more software details and the full list of parameters related to the
- * SAM V71 Xplained Ultra board configuration, please have a look at the source
- * file:
- * \ref board.h\n
- *
- * \section Usage
- *
- * - The code for booting the board is provided by board_cstartup_xxx.c and
- * board_lowlevel.c.
- * - For using board PIOs, board characteristics (clock, etc.) and external
- * components, see board.h.
- * - For manipulating memories, see board_memories.h.
- *
- * This file can be used as a template and modified to fit a custom board, with
- * specific PIOs usage or memory connections.
- */
-
-/**
- * \file board.h
- *
- * Definition of SAM V71 Xplained Ultra board characteristics, PIOs and
- * external components interface.
- */
-
-#ifndef _BOARD_H_
-#define _BOARD_H_
-
-/*----------------------------------------------------------------------------
- * Headers
- *----------------------------------------------------------------------------*/
-
-#include "chip.h"
-
-#include "include/board_lowlevel.h"
-#include "include/board_memories.h"
-#include "include/led.h"
-#include "include/gmii.h"
-#include "include/gmacb_phy.h"
-#include "include/dbg_console.h"
-#include "include/bmp.h"
-#include "include/lcdd.h"
-#include "include/ili9488.h"
-#include "include/ili9488_reg.h"
-#include "include/ili9488_spi.h"
-#include "include/ili9488_ebi.h"
-#include "include/ili9488_dma.h"
-#include "include/ili9488_spi_dma.h"
-#include "include/ili9488_ebi_dma.h"
-#include "include/frame_buffer.h"
-#include "include/lcd_color.h"
-#include "include/lcd_draw.h"
-#include "include/lcd_font10x14.h"
-#include "include/lcd_font.h"
-#include "include/lcd_gimp_image.h"
-#include "include/rtc_calib.h"
-#include "include/wm8904.h"
-#include "include/cs2100.h"
-#include "include/s25fl1.h"
-#include "include/image_sensor_inf.h"
-#include "include/iso7816_4.h"
-
-#if defined ( __GNUC__ )
-#include "include/syscalls.h"
-#endif
-/*----------------------------------------------------------------------------
- * Definitions
- *----------------------------------------------------------------------------*/
-
-/*----------------------------------------------------------------------------*/
-/**
- * \page samv7_Xplained_ultra_board_info "SAM V71 Xplained Ultra - Board informations"
- * This page lists several definition related to the board description.
- *
- * \section Definitions
- * - \ref BOARD_NAME
- */
-
-/** Name of the board */
-#define BOARD_NAME "SAM V71 Xplained Ultra"
-#define NO_PUSHBUTTON
-/*----------------------------------------------------------------------------*/
-/**
- * \page samv7_Xplained_ultra_opfreq "SAM V71 Xplained Ultra - Operating frequencies"
- * This page lists several definition related to the board operating frequency
- * (when using the initialization done by board_lowlevel.c).
- *
- * \section Definitions
- * - \ref BOARD_MAINOSC
- * - \ref BOARD_MCK
- */
-
-/** Frequency of the board main oscillator */
-#define BOARD_MAINOSC 12000000
-
-/** Master clock frequency (when using board_lowlevel.c) */
-
-#ifdef MCK_123MHZ
-#define BOARD_MCK 123000000
-#else
-#define BOARD_MCK 150000000
-#endif
-
-#if (BOARD_MCK==132000000 )
-
-#define PLL_MUL 0x16
-#define PLL_DIV 0x01
-
-#else // 300MHz(PCK) and 150MHz(MCK) by default
-
-#define PLL_MUL 0x19
-#define PLL_DIV 0x01
-
-#endif
-
-/*----------------------------------------------------------------------------*/
-/**
- * \page samv7_Xplained_ultra_piodef "SAM V71 Xplained Ultra - PIO definitions"
- * This pages lists all the PIOs definitions contained in board.h. The constants
- * are named using the following convention: PIN_* for a constant which defines
- * a single Pin instance (but may include several PIOs sharing the same
- * controller), and PINS_* for a list of Pin instances.
- *
- * UART0
- * - \ref PINS_UART0
- *
- * UART4
- * - \ref PINS_UART4
- *
- * LEDs
- * - \ref PIN_LED_0
- * - \ref PIN_LED_1
- * - \ref PINS_LEDS
- *
- * Push buttons
- * - \ref PIN_PUSHBUTTON_0
- * - \ref PIN_PUSHBUTTON_1
- * - \ref PINS_PUSHBUTTONS
- * - \ref PUSHBUTTON_BP0
- * - \ref PUSHBUTTON_BP1
- *
- * PWMC
- * - \ref PIN_PWMC_PWMH0
- * - \ref PIN_PWMC_PWMH1
- * - \ref PIN_PWM_LED0
- * - \ref PIN_PWM_LED1
- * - \ref CHANNEL_PWM_LED0
- * - \ref CHANNEL_PWM_LED1
- *
- * SPI
- * - \ref PIN_SPI_MISO
- * - \ref PIN_SPI_MOSI
- * - \ref PIN_SPI_SPCK
- * - \ref PINS_SPI
- *
- * PCK0
- * - \ref PIN_PCK0
- * - \ref PIN_PCK1
- * - \ref PIN_PCK2
- *
- * PIO PARALLEL CAPTURE
- * - \ref PIN_PIODCEN1
- * - \ref PIN_PIODCEN2
- *
- * TWI
- * - \ref TWI_V3XX
- * - \ref PIN_TWI_TWD0
- * - \ref PIN_TWI_TWCK0
- * - \ref PINS_TWI0
- * - \ref PIN_TWI_TWD1
- * - \ref PIN_TWI_TWCK1
- * - \ref PINS_TWI1
- *
- * USART0
- * - \ref PIN_USART0_RXD
- * - \ref PIN_USART0_TXD
- * - \ref PIN_USART0_CTS
- * - \ref PIN_USART0_RTS
- * - \ref PIN_USART0_SCK
- *
- * USART1
- * - \ref PIN_USART1_RXD
- * - \ref PIN_USART1_TXD
- * - \ref PIN_USART1_CTS
- * - \ref PIN_USART1_RTS
- * - \ref PIN_USART1_SCK
- *
- * USART2
- * - \ref PIN_USART2_RXD
- * - \ref PIN_USART2_TXD
- * - \ref PIN_USART2_CTS
- * - \ref PIN_USART2_RTS
- * - \ref PIN_USART2_SCK
- *
- * SSC
- * - \ref PIN_SSC_TD
- * - \ref PIN_SSC_TK
- * - \ref PIN_SSC_TF
- * - \ref PIN_SSC_RD
- * - \ref PIN_SSC_RK
- * - \ref PIN_SSC_RF
- * - \ref PIN_SSC_TD
- * - \ref PINS_SSC_CODEC
- *
- * MCAN
- * - \ref PIN_MCAN0_TXD
- * - \ref PIN_MCAN0_RXD
- * - \ref PIN_MCAN1_TXD
- * - \ref PIN_MCAN1_RXD
- */
-
-/** SSC pin Transmitter Data (TD) */
-#define PIN_SSC_TD {PIO_PD26B_TD, PIOD, ID_PIOD, PIO_PERIPH_B, PIO_DEFAULT}
-/** SSC pin Transmitter Clock (TK) */
-#define PIN_SSC_TK {PIO_PB1D_TK, PIOB, ID_PIOB, PIO_PERIPH_D, PIO_DEFAULT}
-/** SSC pin Transmitter FrameSync (TF) */
-#define PIN_SSC_TF {PIO_PB0D_TF, PIOB, ID_PIOB, PIO_PERIPH_D, PIO_DEFAULT}
-/** SSC pin RD */
-#define PIN_SSC_RD {PIO_PA10C_RD, PIOA, ID_PIOA, PIO_PERIPH_C, PIO_DEFAULT}
-/** SSC pin RK */
-#define PIN_SSC_RK {PIO_PA22A_RK, PIOA, ID_PIOA, PIO_PERIPH_A, PIO_DEFAULT}
-/** SSC pin RF */
-#define PIN_SSC_RF {PIO_PD24B_RF, PIOD, ID_PIOD, PIO_PERIPH_B, PIO_DEFAULT}
-
-/** SSC pins definition for codec. */
-#define PINS_SSC_CODEC \
- {PIN_SSC_TD, PIN_SSC_TK, PIN_SSC_TF, PIN_SSC_RD, PIN_SSC_RK, PIN_SSC_RF}
-
-/** UART pins (UTXD0 and URXD0) definitions, PA9,10. */
-#define PINS_UART0 \
- {PIO_PA9A_URXD0 | PIO_PA10A_UTXD0, PIOA, ID_PIOA, PIO_PERIPH_A, PIO_DEFAULT}
-/** UART pins (UTXD4 and URXD4) definitions, PD19,18. */
-#define PINS_UART4 \
- {PIO_PD18C_URXD4 | PIO_PD19C_UTXD4, PIOD, ID_PIOD, PIO_PERIPH_C, PIO_DEFAULT}
-
-/* LED pins definitions */
-#define LED_YELLOW0 0
-#define LED_YELLOW1 1
-
-/** LED #0 pin definition (YELLOW). */
-#define PIN_LED_0 {PIO_PA23, PIOA, ID_PIOA, PIO_OUTPUT_1, PIO_DEFAULT}
-/** LED #0 pin definition (YELLOW). */
-#define PIN_LED_1 {PIO_PC9, PIOC, ID_PIOC, PIO_OUTPUT_1, PIO_DEFAULT}
-
-/** List of all LEDs definitions. */
-#define PINS_LEDS {PIN_LED_0, PIN_LED_1}
-
-/**
- * Push button #0 definition.
- * Attributes = pull-up + debounce + interrupt on rising edge.
- */
-#define PIN_PUSHBUTTON_0 \
- {PIO_PA9, PIOA, ID_PIOA, PIO_INPUT, PIO_PULLUP | PIO_DEBOUNCE | PIO_IT_FALL_EDGE}
-/**
- * Push button #1 definition.
- * Attributes = pull-up + debounce + interrupt on rising edge.
- */
-#define PIN_PUSHBUTTON_1 \
- {PIO_PB12, PIOB, ID_PIOB, PIO_INPUT, PIO_PULLUP | PIO_DEBOUNCE | PIO_IT_FALL_EDGE}
-
-/** List of all push button definitions. */
-#define PINS_PUSHBUTTONS {PIN_PUSHBUTTON_0, PIN_PUSHBUTTON_1}
-
-/** Push button #0 index. */
-#define PUSHBUTTON_BP0 0
-/** Push button #1 index. */
-#define PUSHBUTTON_BP1 1
-
-/** PWMC PWM0 pin definition: Output High. */
-#define PIN_PWMC_PWMH0 {PIO_PD20A_PWMH0, PIOD, ID_PIOD, PIO_PERIPH_A, PIO_DEFAULT}
-/** PWMC PWM1 pin definition: Output High. */
-#define PIN_PWMC_PWMH1 {PIO_PD21A_PWMH1, PIOD, ID_PIOD, PIO_PERIPH_A, PIO_DEFAULT}
-/** PWM pins definition for LED0 */
-#define PIN_PWM_LED0 PIN_PWMC_PWMH0
-/** PWM pins definition for LED1 */
-#define PIN_PWM_LED1 PIN_PWMC_PWMH1
-/** PWM channel for LED0 */
-#define CHANNEL_PWM_LED0 0
-/** PWM channel for LED1 */
-#define CHANNEL_PWM_LED1 1
-
-/** SPI MISO pin definition. */
-#define PIN_SPI_MISO {PIO_PD20B_SPI0_MISO, PIOD, ID_PIOD, PIO_PERIPH_B, PIO_DEFAULT}
-/** SPI MOSI pin definition. */
-#define PIN_SPI_MOSI {PIO_PD21B_SPI0_MOSI, PIOD, ID_PIOD, PIO_PERIPH_B, PIO_DEFAULT}
-/** SPI SPCK pin definition. */
-#define PIN_SPI_SPCK {PIO_PD22B_SPI0_SPCK, PIOD, ID_PIOD, PIO_PERIPH_B, PIO_DEFAULT}
-/** SPI chip select pin definition. */
-#define PIN_SPI_NPCS0 {PIO_PB2D_SPI0_NPCS0, PIOB, ID_PIOB, PIO_PERIPH_D, PIO_DEFAULT}
-#define PIN_SPI_NPCS1 {PIO_PD25B_SPI0_NPCS1, PIOD, ID_PIOD, PIO_PERIPH_B, PIO_DEFAULT}
-#define PIN_SPI_NPCS3 {PIO_PD27B_SPI0_NPCS3, PIOD, ID_PIOD, PIO_PERIPH_B, PIO_DEFAULT}
-
-/** List of SPI pin definitions (MISO, MOSI & SPCK). */
-#define PINS_SPI PIN_SPI_MISO, PIN_SPI_MOSI, PIN_SPI_SPCK
-
-/** PCK0 */
-#define PIN_PCK0 {PIO_PB13B_PCK0, PIOB, ID_PIOB, PIO_PERIPH_B, PIO_DEFAULT}
-/** PCK1 */
-#define PIN_PCK1 {PIO_PA17B_PCK1, PIOB, ID_PIOB, PIO_PERIPH_B, PIO_DEFAULT}
-/** PCK2 */
-#define PIN_PCK2 {PIO_PA18B_PCK2, PIOA, ID_PIOA, PIO_PERIPH_B, PIO_DEFAULT}
-
-
-/** PIO PARALLEL CAPTURE */
-/** Parallel Capture Mode Data Enable1 */
-#define PIN_PIODCEN1 PIO_PA15
-/** Parallel Capture Mode Data Enable2 */
-#define PIN_PIODCEN2 PIO_PA16
-
-/** TWI version 3.xx */
-#define TWI_V3XX
-/** TWI0 data pin */
-#define PIN_TWI_TWD0 {PIO_PA3A_TWD0, PIOA, ID_PIOA, PIO_PERIPH_A, PIO_DEFAULT}
-/** TWI0 clock pin */
-#define PIN_TWI_TWCK0 {PIO_PA4A_TWCK0, PIOA, ID_PIOA, PIO_PERIPH_A, PIO_DEFAULT}
-/** TWI0 pins */
-#define PINS_TWI0 {PIN_TWI_TWD0, PIN_TWI_TWCK0}
-
-/** TWI1 data pin */
-#define PIN_TWI_TWD1 {PIO_PB4A_TWD1, PIOB, ID_PIOB, PIO_PERIPH_A, PIO_DEFAULT}
-/** TWI1 clock pin */
-#define PIN_TWI_TWCK1 {PIO_PB5A_TWCK1, PIOB, ID_PIOB, PIO_PERIPH_A,PIO_DEFAULT}
-/** TWI1 pins */
-#define PINS_TWI1 {PIN_TWI_TWD1, PIN_TWI_TWCK1}
-
-/** USART0 pin RX */
-#define PIN_USART0_RXD {PIO_PB0C_RXD0, PIOB, ID_PIOB, PIO_PERIPH_C, PIO_DEFAULT}
-/** USART0 pin TX */
-#define PIN_USART0_TXD {PIO_PB1C_TXD0, PIOB, ID_PIOB, PIO_PERIPH_C, PIO_DEFAULT}
-/** USART0 pin CTS */
-#define PIN_USART0_CTS {PIO_PB2C_CTS0, PIOB, ID_PIOB, PIO_PERIPH_C, PIO_DEFAULT}
-/** USART0 pin RTS */
-#define PIN_USART0_RTS {PIO_PB3C_RTS0, PIOB, ID_PIOB, PIO_PERIPH_C, PIO_DEFAULT}
-/** USART0 pin SCK */
-#define PIN_USART0_SCK {PIO_PB13C_SCK0, PIOB, ID_PIOB, PIO_PERIPH_C,PIO_DEFAULT}
-
-/** USART1 pin RX */
-#define PIN_USART1_RXD {PIO_PA21A_RXD1, PIOA, ID_PIOA, PIO_PERIPH_A, PIO_DEFAULT}
-/** USART1 pin TX */
-#define PIN_USART1_TXD {PIO_PB4D_TXD1, PIOB, ID_PIOB, PIO_PERIPH_D, PIO_DEFAULT}
-/** USART1 pin CTS */
-#define PIN_USART1_CTS {PIO_PA25A_CTS1, PIOA, ID_PIOA, PIO_PERIPH_A, PIO_DEFAULT}
-/** USART1 pin RTS */
-#define PIN_USART1_RTS {PIO_PA24A_RTS1, PIOA, ID_PIOA, PIO_PERIPH_A, PIO_DEFAULT}
-/** USART1 pin ENABLE */
-#define PIN_USART1_EN {PIO_PA23A_SCK1, PIOA, ID_PIOA, PIO_OUTPUT_0, PIO_DEFAULT}
-/** USART1 pin SCK */
-#define PIN_USART1_SCK {PIO_PA23A_SCK1, PIOA, ID_PIOA, PIO_PERIPH_A, PIO_DEFAULT}
-
-/** USART2 pin RX */
-#define PIN_USART2_RXD {PIO_PD15B_RXD2, PIOD, ID_PIOD, PIO_PERIPH_B, PIO_DEFAULT}
-/** USART2 pin TX */
-#define PIN_USART2_TXD {PIO_PD16B_TXD2, PIOD, ID_PIOD, PIO_PERIPH_B, PIO_DEFAULT}
-/** USART2 pin CTS */
-#define PIN_USART2_CTS {PIO_PD19B_CTS2, PIOD, ID_PIOD, PIO_PERIPH_B, PIO_DEFAULT}
-/** USART2 pin RTS */
-#define PIN_USART2_RTS {PIO_PD18B_RTS2, PIOD, ID_PIOD, PIO_PERIPH_B, PIO_DEFAULT}
-/** USART2 pin SCK */
-#define PIN_USART2_SCK {PIO_PD17B_SCK2, PIOD, ID_PIOD, PIO_PERIPH_B, PIO_DEFAULT}
-
-/*Pins for USART0 as 7816 mode*/
-/** PIN used for reset the smartcard */
-#define PIN_ISO7816_RSTMC {PIO_PB2C_CTS0, PIOB, ID_PIOB, PIO_OUTPUT_0, PIO_DEFAULT}
-/** Pins used for connect the smartcard */
-#define PINS_ISO7816 PIN_USART0_TXD, PIN_USART0_SCK,PIN_ISO7816_RSTMC
-
-/** MCAN0 pin Transmit Data (TXD) */
-#define PIN_MCAN0_TXD {PIO_PB2A_CANTX0, PIOA, ID_PIOA, PIO_PERIPH_A, PIO_DEFAULT}
-/** MCAN0 pin Receive Data (RXD) */
-#define PIN_MCAN0_RXD {PIO_PB3A_CANRX0, PIOA, ID_PIOA, PIO_PERIPH_A, PIO_DEFAULT}
-
-/** MCAN1 pin Transmit Data (TXD) */
-#define PIN_MCAN1_TXD {PIO_PC14C_CANTX1, PIOC, ID_PIOC, PIO_PERIPH_C, PIO_DEFAULT}
-/** MCAN1 pin Receive Data (RXD) */
-#define PIN_MCAN1_RXD {PIO_PC12C_CANRX1, PIOC, ID_PIOC, PIO_PERIPH_C, PIO_DEFAULT}
-
-/*----------------------------------------------------------------------------*/
-/**
- * \page samv7_Xplained_ultra_gmac "SAM V71 Xplained Ultra - GMAC"
- * \section GMAC
- * - \ref BOARD_GMAC_PHY_ADDR
- * - \ref BOARD_GMAC_PHY_COMP_KSZ8061RNB
- * - \ref BOARD_GMAC_MODE_RMII
- * - \ref BOARD_GMAC_PINS
- * - \ref BOARD_GMAC_RESET_PIN
- *
- */
-/** PHY address */
-#define BOARD_GMAC_PHY_ADDR 1
-/** PHY Component */
-#define BOARD_GMAC_PHY_COMP_KSZ8061RNB 1
-/** Board GMAC power control - ALWAYS ON */
-#define BOARD_GMAC_POWER_ALWAYS_ON
-/** Board GMAC work mode - RMII/MII ( 1 / 0 ) */
-#define BOARD_GMAC_MODE_RMII 1
-
-/** The PIN list of PIO for GMAC */
-#define BOARD_GMAC_PINS \
- { (PIO_PD0A_GTXCK | PIO_PD1A_GTXEN | PIO_PD2A_GTX0 | PIO_PD3A_GTX1 \
- | PIO_PD4A_GRXDV | PIO_PD5A_GRX0 | PIO_PD6A_GRX1 | PIO_PD7A_GRXER \
- | PIO_PD8A_GMDC | PIO_PD9A_GMDIO ),PIOD, ID_PIOD, PIO_PERIPH_A, PIO_DEFAULT}, \
- {PIO_PC30, PIOC, ID_PIOC, PIO_INPUT, PIO_PULLUP},\
- {PIO_PA29, PIOA, ID_PIOA, PIO_INPUT, PIO_DEFAULT}
-
-/** The PIN list of PIO for GMAC */
-#define BOARD_GMAC_RESET_PIN {PIO_PC10, PIOC, ID_PIOC, PIO_OUTPUT_1, PIO_PULLUP}
-
-/** The runtime pin configure list for GMAC */
-#define BOARD_GMAC_RUN_PINS BOARD_GMAC_PINS
-
-
-/*----------------------------------------------------------------------------*/
-/**
- * \page samv7_Xplained_ultra_isi "SAM V71 Xplained Ultra - ISI"
- * This page lists all the IO definitions connected to ISI module.
- * ISI
- * - \ref PIN_ISI_D0
- * - \ref PIN_ISI_D1
- * - \ref PIN_ISI_D2
- * - \ref PIN_ISI_D3
- * - \ref PIN_ISI_D4
- * - \ref PIN_ISI_D5
- * - \ref PIN_ISI_D6
- * - \ref PIN_ISI_D7
- * - \ref PIN_ISI_D8
- * - \ref PIN_ISI_D9
- * - \ref BOARD_ISI_VSYNC
- * - \ref BOARD_ISI_HSYNC
- * - \ref BOARD_ISI_PCK
- * - \ref BOARD_ISI_PINS
- *
- */
-#define PIN_ISI_D0 {PIO_PD22D_ISI_D0, PIOD, ID_PIOD, PIO_PERIPH_D, PIO_PULLUP}
-#define PIN_ISI_D1 {PIO_PD21D_ISI_D1, PIOD, ID_PIOD, PIO_PERIPH_D, PIO_PULLUP}
-#define PIN_ISI_D2 {PIO_PB3D_ISI_D2, PIOB, ID_PIOB, PIO_PERIPH_D, PIO_PULLUP}
-#define PIN_ISI_D3 {PIO_PA9B_ISI_D3, PIOA, ID_PIOA, PIO_PERIPH_B, PIO_PULLUP}
-#define PIN_ISI_D4 {PIO_PA5B_ISI_D4, PIOA, ID_PIOA, PIO_PERIPH_B, PIO_PULLUP}
-#define PIN_ISI_D5 {PIO_PD11D_ISI_D5, PIOD, ID_PIOD, PIO_PERIPH_D, PIO_PULLUP}
-#define PIN_ISI_D6 {PIO_PD12D_ISI_D6, PIOD, ID_PIOD, PIO_PERIPH_D, PIO_PULLUP}
-#define PIN_ISI_D7 {PIO_PA27D_ISI_D7, PIOA, ID_PIOA, PIO_PERIPH_D, PIO_PULLUP}
-#define PIN_ISI_D8 {PIO_PD27D_ISI_D8, PIOD, ID_PIOD, PIO_PERIPH_D, PIO_PULLUP}
-#define PIN_ISI_D9 {PIO_PD28D_ISI_D9, PIOD, ID_PIOD, PIO_PERIPH_D, PIO_PULLUP}
-
-#define BOARD_ISI_VSYNC {PIO_PD25D_ISI_VSYNC, PIOD, ID_PIOD, PIO_PERIPH_D, PIO_DEFAULT}
-#define BOARD_ISI_HSYNC {PIO_PD24D_ISI_HSYNC, PIOD, ID_PIOD, PIO_PERIPH_D, PIO_DEFAULT}
-#define BOARD_ISI_PCK {PIO_PA24D_ISI_PCK, PIOA, ID_PIOA, PIO_PERIPH_D, PIO_DEFAULT}
-
-#define BOARD_ISI_PCK0 { PIO_PA6B_PCK0, PIOA, ID_PIOA, PIO_PERIPH_B, PIO_DEFAULT }
-#define BOARD_ISI_RST { 1 << 13, PIOB, ID_PIOB, PIO_OUTPUT_1, PIO_DEFAULT }
-#define BOARD_ISI_PWD { 1 << 19, PIOC, ID_PIOC, PIO_OUTPUT_1, PIO_DEFAULT }
-
-#define BOARD_ISI_PINS \
- PIN_ISI_D0, PIN_ISI_D1, PIN_ISI_D2,PIN_ISI_D3,PIN_ISI_D4, PIN_ISI_D5,\
- PIN_ISI_D6,PIN_ISI_D7,PIN_ISI_D8, PIN_ISI_D9,BOARD_ISI_VSYNC ,\
- BOARD_ISI_HSYNC ,BOARD_ISI_PCK, BOARD_ISI_RST, BOARD_ISI_PWD,BOARD_ISI_PCK0
-
-/*----------------------------------------------------------------------------*/
-/**
- * \page samv7_Xplained_ultra_usb "SAM V71 Xplained Ultra - USB device"
- *
- * \section Definitions
- * - \ref BOARD_USB_BMATTRIBUTES
- *
- * \section vBus
- * - \ref PIN_USB_VBUS
- *
- */
-
-/**
- * USB attributes configuration descriptor (bus or self powered,
- * remote wakeup)
- */
-#define BOARD_USB_BMATTRIBUTES USBConfigurationDescriptor_SELFPOWERED_NORWAKEUP
-
-/** USB VBus monitoring pin definition. */
-#define PIN_USB_VBUS {PIO_PC16, PIOC, ID_PIOC, PIO_INPUT, PIO_DEFAULT}
-
-
-/*----------------------------------------------------------------------------*/
-/**
- * \page samv7_Xplained_ultra_extcomp "SAM V71 Xplained Ultra - External components"
- * This page lists the definitions related to external on-board components
- * located in the board.h file for the SAM V71 Xplained Ultra board.
- *
- * LCD
- */
-/** Indicates board has an ILI9325 external component to manage LCD. */
-#define BOARD_LCD_ILI9488
-//#define BOARD_LCD_SPI_EXT1
-#define BOARD_LCD_SPI_EXT2
-
-/** SPI pin definition for LCD */
-#if defined (BOARD_LCD_SPI_EXT1)
-/** SPI MISO pin definition. */
-#define LCD_SPI_MISO {PIO_PD20B_SPI0_MISO, PIOD, ID_PIOD, PIO_PERIPH_B, PIO_DEFAULT}
-/** SPI MOSI pin definition. */
-#define LCD_SPI_MOSI {PIO_PD21B_SPI0_MOSI, PIOD, ID_PIOD, PIO_PERIPH_B, PIO_DEFAULT}
-/** SPI SPCK pin definition. */
-#define LCD_SPI_SPCK {PIO_PD22B_SPI0_SPCK, PIOD, ID_PIOD, PIO_PERIPH_B, PIO_DEFAULT}
-/** SPI chip select pin definition. */
-#define LCD_SPI_NPCS {PIO_PD27B_SPI0_NPCS3, PIOD, ID_PIOD, PIO_PERIPH_B,PIO_DEFAULT}
-
-/** SPI chip select pin definition. */
-#define LCD_SPI_NPCS {PIO_PD25B_SPI0_NPCS1, PIOD, ID_PIOD, PIO_PERIPH_B,PIO_DEFAULT}
-
-/** LCD pins definition. */
-#define BOARD_SPI_LCD_PINS {LCD_SPI_MISO, LCD_SPI_MOSI, LCD_SPI_SPCK, LCD_SPI_NPCS}
-
-/** Back-light pin definition. */
-
-#define BOARD_SPI_LCD_BACKLIGHT_PIN \
- {PIO_PA0A_PWMC0_PWMH0, PIOA, ID_PIOA, PIO_PERIPH_A, PIO_DEFAULT}
-
-/** PWMC PWM0 pin definition: Output Low. */
-#define LCD_SPI_PIN_RESET {PIO_PD28, PIOD, ID_PIOD, PIO_OUTPUT_1, PIO_DEFAULT}
-
-/** PWM channel for LED0 */
-#define CHANNEL_PWM_LCD 0
-
-#endif
-/*ENDIF BOARD_LCD_SPI_EXT1 */
-
-#if defined (BOARD_LCD_SPI_EXT2)
- /** SPI MISO pin definition. */
-#define LCD_SPI_MISO {PIO_PD20B_SPI0_MISO, PIOD, ID_PIOD, PIO_PERIPH_B, PIO_DEFAULT}
-/** SPI MOSI pin definition. */
-#define LCD_SPI_MOSI {PIO_PD21B_SPI0_MOSI, PIOD, ID_PIOD, PIO_PERIPH_B, PIO_DEFAULT}
-/** SPI SPCK pin definition. */
-#define LCD_SPI_SPCK {PIO_PD22B_SPI0_SPCK, PIOD, ID_PIOD, PIO_PERIPH_B, PIO_DEFAULT}
-/** SPI chip select pin definition. */
-#define LCD_SPI_NPCS {PIO_PD27B_SPI0_NPCS3, PIOD, ID_PIOD, PIO_PERIPH_B,PIO_DEFAULT}
-
-/** LCD pins definition. */
-#define BOARD_SPI_LCD_PINS {LCD_SPI_MISO, LCD_SPI_MOSI, LCD_SPI_SPCK, LCD_SPI_NPCS}
-
-/** Back-light pin definition. */
-
-#define BOARD_SPI_LCD_PIN_BACKLIGHT \
- {PIO_PC19B_PWMC0_PWMH2, PIOC, ID_PIOC, PIO_OUTPUT_1, PIO_DEFAULT}
-
-/** PWMC PWM0 pin definition: Output Low. */
-#define LCD_SPI_PIN_RESET {PIO_PA24, PIOA, ID_PIOA, PIO_OUTPUT_1, PIO_DEFAULT}
-
-/** LCD command/data select pin */
-#define BOARD_SPI_LCD_PIN_CDS {PIO_PA6, PIOA, ID_PIOA, PIO_OUTPUT_1, PIO_DEFAULT}
-
-/** PWM channel for LED0 */
-#define CHANNEL_PWM_LCD 2
-
-#endif
-/*ENDIF BOARD_LCD_SPI_EXT2 */
-
-/** SMC pin definition for LCD */
-/** LCD data pin */
-#define PIN_EBI_LCD_DATAL {0xFF, PIOC, ID_PIOC, PIO_PERIPH_A, PIO_PULLUP}
-#define PIN_EBI_LCD_DATAH_0 {0x3F, PIOE, ID_PIOE, PIO_PERIPH_A, PIO_PULLUP}
-#define PIN_EBI_LCD_DATAH_1 {PIO_PA15A_D14|PIO_PA16A_D15, PIOA, ID_PIOA, PIO_PERIPH_A, PIO_PULLUP}
-/** LCD WE pin */
-#define PIN_EBI_LCD_NWE {PIO_PC8A_NWE, PIOC, ID_PIOC, PIO_PERIPH_A, PIO_PULLUP}
-/** LCD RD pin */
-#define PIN_EBI_LCD_NRD {PIO_PC11A_NRD, PIOC, ID_PIOC, PIO_PERIPH_A, PIO_PULLUP}
-/* LCD CS pin (NCS3) */
-#define PIN_EBI_LCD_CS {PIO_PD19A_NCS3, PIOD, ID_PIOD, PIO_PERIPH_A, PIO_PULLUP}
-/** LCD command/data select pin */
-#define BOARD_EBI_LCD_PIN_CDS {PIO_PC30, PIOC, ID_PIOC, PIO_OUTPUT_1, PIO_DEFAULT}
-/** Back-light pin definition. */
-#define BOARD_EBI_LCD_PIN_BACKLIGHT {PIO_PC9B_TIOB7, PIOC, ID_PIOC, PIO_PERIPH_B, PIO_DEFAULT}
-/** LCD reset pin */
-#define LCD_EBI_PIN_RESET {PIO_PC13, PIOC, ID_PIOC, PIO_OUTPUT_1, PIO_DEFAULT}
-
-/** LCD pins definition. */
-#define BOARD_EBI_LCD_PINS \
- {PIN_EBI_LCD_DATAL, PIN_EBI_LCD_DATAH_0, PIN_EBI_LCD_DATAH_1, \
- PIN_EBI_LCD_NWE,PIN_EBI_LCD_NRD,PIN_EBI_LCD_CS}
-
-
-/** Display width in pixels. */
-#define BOARD_LCD_WIDTH 320
-/** Display height in pixels. */
-#define BOARD_LCD_HEIGHT 480
-
-
-/*----------------------------------------------------------------------------*/
-/**
- * \page samv7_Xplained_ultra_mem "SAM V71 Xplained Ultra - Memories"
- * This page lists definitions related to internal & external on-board memories.
- * \section SDRAM
- * - \ref PIN_SDRAM_D0_7
- * - \ref PIN_SDRAM_D8_13
- * - \ref PIN_SDRAM_D14_15
- * - \ref PIN_SDRAM_A0_9
- * - \ref PIN_SDRAM_SDA10
- * - \ref PIN_SDRAM_CAS
- * - \ref PIN_SDRAM_RAS
- * - \ref PIN_SDRAM_SDCKE
- * - \ref PIN_SDRAM_SDCK
- * - \ref PIN_SDRAM_SDSC
- * - \ref PIN_SDRAM_NBS0
- * - \ref PIN_SDRAM_NBS1
- * - \ref PIN_SDRAM_SDWE
- * - \ref PIN_SDRAM_BA0
- *
- * \section SDMMC
- * - \ref BOARD_MCI_PIN_CD
- * - \ref BOARD_MCI_PIN_CK
- * - \ref BOARD_MCI_PINS_SLOTA
- * - \ref BOARD_SD_PINS
- *
- * \section QSPI
- * - \ref PINS_QSPI_IO
- * - \ref PINS_QSPI_IO3
- * - \ref PINS_QSPI
- */
-
-/** List of all SDRAM pin definitions. */
-#define BOARD_SDRAM_SIZE (2*1024*1024)
-#define PIN_SDRAM_D0_7 {0x000000FF, PIOC, ID_PIOC, PIO_PERIPH_A, PIO_DEFAULT}
-#define PIN_SDRAM_D8_13 {0x0000003F, PIOE, ID_PIOE, PIO_PERIPH_A, PIO_DEFAULT}
-#define PIN_SDRAM_D14_15 {0x00018000, PIOA, ID_PIOA, PIO_PERIPH_A, PIO_DEFAULT}
-#define PIN_SDRAM_A0_9 {0x3FF00000, PIOC, ID_PIOC, PIO_PERIPH_A, PIO_DEFAULT}
-#define PIN_SDRAM_SDA10 {0x00002000, PIOD, ID_PIOD, PIO_PERIPH_C, PIO_DEFAULT}
-
-#define PIN_SDRAM_CAS {0x00020000, PIOD, ID_PIOD, PIO_PERIPH_C, PIO_DEFAULT}
-#define PIN_SDRAM_RAS {0x00010000, PIOD, ID_PIOD, PIO_PERIPH_C, PIO_DEFAULT}
-#define PIN_SDRAM_SDCKE {0x00004000, PIOD, ID_PIOD, PIO_PERIPH_C, PIO_DEFAULT}
-#define PIN_SDRAM_SDCK {0x00800000, PIOD, ID_PIOD, PIO_PERIPH_C, PIO_DEFAULT}
-#define PIN_SDRAM_SDSC {0x00008000, PIOC, ID_PIOC, PIO_PERIPH_A, PIO_DEFAULT}
-#define PIN_SDRAM_NBS0 {0x00040000, PIOC, ID_PIOC, PIO_PERIPH_A, PIO_DEFAULT}
-#define PIN_SDRAM_NBS1 {0x00008000, PIOD, ID_PIOD, PIO_PERIPH_C, PIO_DEFAULT}
-#define PIN_SDRAM_SDWE {0x20000000, PIOD, ID_PIOD, PIO_PERIPH_C, PIO_DEFAULT}
-#define PIN_SDRAM_BA0 {0x00100000, PIOA, ID_PIOA, PIO_PERIPH_C, PIO_DEFAULT}
-
-#define BOARD_SDRAM_PINS PIN_SDRAM_D0_7, PIN_SDRAM_D8_13 , PIN_SDRAM_D14_15,\
- PIN_SDRAM_A0_9, PIN_SDRAM_SDA10, PIN_SDRAM_BA0, \
- PIN_SDRAM_CAS, PIN_SDRAM_RAS, PIN_SDRAM_SDCKE,PIN_SDRAM_SDCK,\
- PIN_SDRAM_SDSC,PIN_SDRAM_NBS0 ,PIN_SDRAM_NBS1,PIN_SDRAM_SDWE
-
-
-/** List of all MCI pin definitions. */
-
-/** MCI0 Card detect pin definition. (PE5) */
-#define BOARD_MCI_PIN_CD {PIO_PD18, PIOD, ID_PIOD, PIO_INPUT, PIO_PULLUP}
-/** MCI0 Clock . */
-#define BOARD_MCI_PIN_CK {PIO_PA25D_MCCK, PIOA, ID_PIOA, PIO_PERIPH_D, PIO_DEFAULT}
-
-/** MCI0 Solt A IO pins definition. (PC4-PC13) */
-#define BOARD_MCI_PINS_SLOTA \
- {(PIO_PA30C_MCDA0 | PIO_PA31C_MCDA1 | PIO_PA26C_MCDA2 | PIO_PA27C_MCDA3 | PIO_PA28C_MCCDA),\
- PIOA, ID_PIOA, PIO_PERIPH_C, PIO_DEFAULT}
-
-/** MCI pins that shall be configured to access the SD card. */
-#define BOARD_SD_PINS {BOARD_MCI_PINS_SLOTA, BOARD_MCI_PIN_CK}
-/** MCI Card Detect pin. */
-#define BOARD_SD_PIN_CD BOARD_MCI_PIN_CD
- /** Total number of MCI interface */
-#define BOARD_NUM_MCI 1
-
-/** List of all SQPI pin definitions. */
-#define PINS_QSPI_IO \
- {(PIO_PA11A_QCS | PIO_PA13A_QIO0 | PIO_PA12A_QIO1 | PIO_PA17A_QIO2 | PIO_PA14A_QSCK),\
- PIOA, ID_PIOA, PIO_PERIPH_A, PIO_DEFAULT}
-#define PINS_QSPI_IO3 {PIO_PD31A_QIO3, PIOD, ID_PIOD, PIO_PERIPH_A, PIO_DEFAULT}
-#define PINS_QSPI {PINS_QSPI_IO, PINS_QSPI_IO3}
-
-/*----------------------------------------------------------------------------*/
-/**
- * \page samv7_Xplained_ultra_chipdef "SAM V71 Xplained Ultra - Individual chip definition"
- * This page lists the definitions related to different chip's definition
- *
- * \section USART
- * - \ref BOARD_PIN_USART_RXD
- * - \ref BOARD_PIN_USART_TXD
- * - \ref BOARD_PIN_USART_CTS
- * - \ref BOARD_PIN_USART_RTS
- * - \ref BOARD_PIN_USART_EN
- * - \ref BOARD_USART_BASE
- * - \ref BOARD_ID_USART
- */
-
-/** Rtc */
-#define BOARD_RTC_ID ID_RTC
-
-/** TWI ID for QTouch application to use */
-#define BOARD_ID_TWI_AT42 ID_TWI0
-/** TWI Base for QTouch application to use */
-#define BOARD_BASE_TWI_AT42 TWI0
-/** TWI pins for QTouch application to use */
-#define BOARD_PINS_TWI_AT42 PINS_TWI0
-
-/** USART RX pin for application */
-#define BOARD_PIN_USART_RXD PIN_USART1_RXD
-/** USART TX pin for application */
-#define BOARD_PIN_USART_TXD PIN_USART1_TXD
-/** USART CTS pin for application */
-#define BOARD_PIN_USART_CTS PIN_USART1_CTS
-/** USART RTS pin for application */
-#define BOARD_PIN_USART_RTS PIN_USART1_RTS
-/** USART ENABLE pin for application */
-#define BOARD_PIN_USART_EN PIN_USART1_EN
-/** USART Base for application */
-#define BOARD_USART_BASE USART1
-/** USART ID for application */
-#define BOARD_ID_USART ID_USART1
-
-
-
-/*----------------------------------------------------------------------------*/
- /*
- * USB pins
- */
-#define PINS_VBUS_EN {PIO_PC16, PIOC, ID_PIOC, PIO_OUTPUT_1, PIO_DEFAULT}
-#endif /* #ifndef _BOARD_H_ */
-
diff --git a/ports_module/cortex_m7/iar/example_build/libraries/libboard_samv7-ek/include/bmp.h b/ports_module/cortex_m7/iar/example_build/libraries/libboard_samv7-ek/include/bmp.h
deleted file mode 100644
index 19659115..00000000
--- a/ports_module/cortex_m7/iar/example_build/libraries/libboard_samv7-ek/include/bmp.h
+++ /dev/null
@@ -1,119 +0,0 @@
-/* ----------------------------------------------------------------------------
- * SAM Software Package License
- * ----------------------------------------------------------------------------
- * Copyright (c) 2011, Atmel Corporation
- *
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * - Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the disclaimer below.
- *
- * Atmel's name may not be used to endorse or promote products derived from
- * this software without specific prior written permission.
- *
- * DISCLAIMER: THIS SOFTWARE IS PROVIDED BY ATMEL "AS IS" AND ANY EXPRESS OR
- * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT ARE
- * DISCLAIMED. IN NO EVENT SHALL ATMEL BE LIABLE FOR ANY DIRECT, INDIRECT,
- * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA,
- * OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
- * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
- * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
- * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- * ----------------------------------------------------------------------------
- */
-
-/**
- * \file
- * \section Purpose
- *
- * Utility for BMP
- *
- */
-
-#ifndef BMP_H
-#define BMP_H
-
-/** BMP magic number ('BM'). */
-#define BMP_TYPE 0x4D42
-
-/** headerSize must be set to 40 */
-#define BITMAPINFOHEADER 40
-
-/*------------------------------------------------------------------------------
- * Exported types
- *------------------------------------------------------------------------------*/
-
-#pragma pack( 1 )
-
-/** BMP (Windows) Header Format */
-typedef struct _BMPHeader{
- /* signature, must be 4D42 hex */
- uint16_t type;
- /* size of BMP file in bytes (unreliable) */
- uint32_t fileSize;
- /* reserved, must be zero */
- uint16_t reserved1;
- /* reserved, must be zero */
- uint16_t reserved2;
- /* offset to start of image data in bytes */
- uint32_t offset;
- /* size of BITMAPINFOHEADER structure, must be 40 */
- uint32_t headerSize;
- /* image width in pixels */
- uint32_t width;
- /* image height in pixels */
- uint32_t height;
- /* number of planes in the image, must be 1 */
- uint16_t planes;
- /* number of bits per pixel (1, 4, 8, 16, 24, 32) */
- uint16_t bits;
- /* compression type (0=none, 1=RLE-8, 2=RLE-4) */
- uint32_t compression;
- /* size of image data in bytes (including padding) */
- uint32_t imageSize;
- /* horizontal resolution in pixels per meter (unreliable) */
- uint32_t xresolution;
- /* vertical resolution in pixels per meter (unreliable) */
- uint32_t yresolution;
- /* number of colors in image, or zero */
- uint32_t ncolours;
- /* number of important colors, or zero */
- uint32_t importantcolours;
- } BMPHeader;
-
-#pragma pack()
-
-/*------------------------------------------------------------------------------
- * Exported functions
- *------------------------------------------------------------------------------*/
-extern uint8_t BMP_IsValid(void *file);
-extern uint32_t BMP_GetFileSize(void *file);
-
-extern uint8_t BMP_Decode(
- void *file,
- uint8_t *buffer,
- uint32_t width,
- uint32_t height,
- uint8_t bpp );
-
-extern void WriteBMPheader(
- uint32_t *pAddressHeader,
- uint32_t bmpHSize,
- uint32_t bmpVSize,
- uint8_t nbByte_Pixels );
-
-extern void BMP_displayHeader(uint32_t* pAddressHeader);
-extern void RGB565toBGR555(
- uint8_t *fileSource,
- uint8_t *fileDestination,
- uint32_t width,
- uint32_t height,
- uint8_t bpp );
-
-#endif //#ifndef BMP_H
-
diff --git a/ports_module/cortex_m7/iar/example_build/libraries/libboard_samv7-ek/include/board_lowlevel.h b/ports_module/cortex_m7/iar/example_build/libraries/libboard_samv7-ek/include/board_lowlevel.h
deleted file mode 100644
index 836b3696..00000000
--- a/ports_module/cortex_m7/iar/example_build/libraries/libboard_samv7-ek/include/board_lowlevel.h
+++ /dev/null
@@ -1,47 +0,0 @@
-/* ----------------------------------------------------------------------------
- * SAM Software Package License
- * ----------------------------------------------------------------------------
- * Copyright (c) 2012, Atmel Corporation
- *
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * - Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the disclaimer below.
- *
- * Atmel's name may not be used to endorse or promote products derived from
- * this software without specific prior written permission.
- *
- * DISCLAIMER: THIS SOFTWARE IS PROVIDED BY ATMEL "AS IS" AND ANY EXPRESS OR
- * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT ARE
- * DISCLAIMED. IN NO EVENT SHALL ATMEL BE LIABLE FOR ANY DIRECT, INDIRECT,
- * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA,
- * OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
- * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
- * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
- * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- * ----------------------------------------------------------------------------
- */
-
-/**
- * \file
- *
- * Interface for the low-level initialization function.
- *
- */
-
-#ifndef BOARD_LOWLEVEL_H
-#define BOARD_LOWLEVEL_H
-
-/*----------------------------------------------------------------------------
- * Exported functions
- *----------------------------------------------------------------------------*/
-extern void LowLevelInit( void );
-extern void _SetupMemoryRegion( void );
-
-#endif /* BOARD_LOWLEVEL_H */
-
diff --git a/ports_module/cortex_m7/iar/example_build/libraries/libboard_samv7-ek/include/board_memories.h b/ports_module/cortex_m7/iar/example_build/libraries/libboard_samv7-ek/include/board_memories.h
deleted file mode 100644
index 04260ab4..00000000
--- a/ports_module/cortex_m7/iar/example_build/libraries/libboard_samv7-ek/include/board_memories.h
+++ /dev/null
@@ -1,48 +0,0 @@
-/* ----------------------------------------------------------------------------
- * SAM Software Package License
- * ----------------------------------------------------------------------------
- * Copyright (c) 2012, Atmel Corporation
- *
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * - Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the disclaimer below.
- *
- * Atmel's name may not be used to endorse or promote products derived from
- * this software without specific prior written permission.
- *
- * DISCLAIMER: THIS SOFTWARE IS PROVIDED BY ATMEL "AS IS" AND ANY EXPRESS OR
- * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT ARE
- * DISCLAIMED. IN NO EVENT SHALL ATMEL BE LIABLE FOR ANY DIRECT, INDIRECT,
- * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA,
- * OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
- * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
- * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
- * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- * ----------------------------------------------------------------------------
- */
-
-/**
- * \file
- *
- * Interface for memories configuration on board.
- *
- */
-
-#ifndef BOARD_MEMORIES_H
-#define BOARD_MEMORIES_H
-
-/*----------------------------------------------------------------------------
- * Exported functions
- *----------------------------------------------------------------------------*/
-
-extern void BOARD_ConfigureSdram( void );
-extern uint32_t BOARD_SdramValidation(uint32_t baseAddr, uint32_t size);
-
-#endif /* #ifndef BOARD_MEMORIES_H */
-
diff --git a/ports_module/cortex_m7/iar/example_build/libraries/libboard_samv7-ek/include/cs2100.h b/ports_module/cortex_m7/iar/example_build/libraries/libboard_samv7-ek/include/cs2100.h
deleted file mode 100644
index 6baf57ae..00000000
--- a/ports_module/cortex_m7/iar/example_build/libraries/libboard_samv7-ek/include/cs2100.h
+++ /dev/null
@@ -1,93 +0,0 @@
-/* ----------------------------------------------------------------------------
- * SAM Software Package License
- * ----------------------------------------------------------------------------
- * Copyright (c) 2012, Atmel Corporation
- *
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * - Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the disclaimer below.
- *
- * Atmel's name may not be used to endorse or promote products derived from
- * this software without specific prior written permission.
- *
- * DISCLAIMER: THIS SOFTWARE IS PROVIDED BY ATMEL "AS IS" AND ANY EXPRESS OR
- * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT ARE
- * DISCLAIMED. IN NO EVENT SHALL ATMEL BE LIABLE FOR ANY DIRECT, INDIRECT,
- * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA,
- * OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
- * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
- * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
- * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- * ----------------------------------------------------------------------------
- */
-
-/**
- * \file
- *
- * Implementation WM8904 driver.
- *
- */
-
-#ifndef CS2100_H
-#define CS2100_H
-
-#include "board.h"
-
-/*----------------------------------------------------------------------------
- * Definitions
- *----------------------------------------------------------------------------*/
-
-#define CS2100_SLAVE_ADDRESS 0x4E
-
-/** ID and Rev register*/
-#define CS2100_REG_ID 0x01
-
-/** VMID control 0 register*/
-#define CS2100_REG_CTRL 0x02
-
-/** MIC Bias control 0 register*/
-#define CS2100_REG_DEV_CFG1 0x03
-
-/** Bias control 1 register*/
-#define CS2100_REG_CFG 0x05
-
-/** Power management control 0 register*/
-#define CS2100_REG_32_BIT_RATIO_1 0x06
-/** Power management control 0 register*/
-#define CS2100_REG_32_BIT_RATIO_2 0x07
-/** Power management control 0 register*/
-#define CS2100_REG_32_BIT_RATIO_3 0x08
-/** Power management control 0 register*/
-#define CS2100_REG_32_BIT_RATIO_4 0x09
-/** Power management control 2 register*/
-#define CS2100_REG_FUNC_CFG1 0x16
-/** Power management control 3 register*/
-#define CS2100_REG_FUNC_CFG2 0x17
-/** Power management control 3 register*/
-#define CS2100_REG_FUNC_CFG3 0x1E
-
-/*----------------------------------------------------------------------------
- * Exported functions
- *----------------------------------------------------------------------------*/
-
-extern uint16_t CS2100_Read(
- Twid *pTwid,
- uint32_t device,
- uint32_t regAddr);
-
-extern void CS2100_Write(
- Twid *pTwid,
- uint32_t device,
- uint32_t regAddr,
- uint16_t data);
-
-extern uint8_t CS2100_Init(Twid *pTwid, uint32_t device, uint32_t PCK);
-#endif // CS2100_H
-
-
diff --git a/ports_module/cortex_m7/iar/example_build/libraries/libboard_samv7-ek/include/dbg_console.h b/ports_module/cortex_m7/iar/example_build/libraries/libboard_samv7-ek/include/dbg_console.h
deleted file mode 100644
index af6e651b..00000000
--- a/ports_module/cortex_m7/iar/example_build/libraries/libboard_samv7-ek/include/dbg_console.h
+++ /dev/null
@@ -1,53 +0,0 @@
-/* ----------------------------------------------------------------------------
- * SAM Software Package License
- * ----------------------------------------------------------------------------
- * Copyright (c) 2012, Atmel Corporation
- *
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * - Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the disclaimer below.
- *
- * Atmel's name may not be used to endorse or promote products derived from
- * this software without specific prior written permission.
- *
- * DISCLAIMER: THIS SOFTWARE IS PROVIDED BY ATMEL "AS IS" AND ANY EXPRESS OR
- * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT ARE
- * DISCLAIMED. IN NO EVENT SHALL ATMEL BE LIABLE FOR ANY DIRECT, INDIRECT,
- * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA,
- * OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
- * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
- * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
- * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- * ----------------------------------------------------------------------------
- */
-
-/**
- * \file
- *
- * Include function prototype for the UART console.
- */
-
-#ifndef _DBG_CONSOLE_
-#define _DBG_CONSOLE_
-
-#include
-
-extern void DBG_Configure( uint32_t dwBaudrate, uint32_t dwMasterClock ) ;
-extern void DBG_PutChar( uint8_t uc ) ;
-extern uint32_t DBG_GetChar( void ) ;
-extern uint32_t DBG_IsRxReady( void ) ;
-
-
-extern void DBG_DumpFrame( uint8_t* pucFrame, uint32_t dwSize ) ;
-extern void DBG_DumpMemory( uint8_t* pucBuffer, uint32_t dwSize, uint32_t dwAddress ) ;
-extern uint32_t DBG_GetInteger( int32_t* pdwValue ) ;
-extern uint32_t DBG_GetIntegerMinMax( int32_t* pdwValue, int32_t dwMin, int32_t dwMax ) ;
-extern uint32_t DBG_GetHexa32( uint32_t* pdwValue ) ;
-
-#endif /* _DBG_CONSOLE_ */
diff --git a/ports_module/cortex_m7/iar/example_build/libraries/libboard_samv7-ek/include/frame_buffer.h b/ports_module/cortex_m7/iar/example_build/libraries/libboard_samv7-ek/include/frame_buffer.h
deleted file mode 100644
index 7afe0488..00000000
--- a/ports_module/cortex_m7/iar/example_build/libraries/libboard_samv7-ek/include/frame_buffer.h
+++ /dev/null
@@ -1,83 +0,0 @@
-/* ----------------------------------------------------------------------------
- * SAM Software Package License
- * ----------------------------------------------------------------------------
- * Copyright (c) 2011, Atmel Corporation
- *
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * - Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the disclaimer below.
- *
- * Atmel's name may not be used to endorse or promote products derived from
- * this software without specific prior written permission.
- *
- * DISCLAIMER: THIS SOFTWARE IS PROVIDED BY ATMEL "AS IS" AND ANY EXPRESS OR
- * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT ARE
- * DISCLAIMED. IN NO EVENT SHALL ATMEL BE LIABLE FOR ANY DIRECT, INDIRECT,
- * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA,
- * OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
- * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
- * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
- * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- * ----------------------------------------------------------------------------
- */
-
-/**
- * \file
- *
- * Interface of frame buffer driver.
- *
- */
-
-#ifndef _FRAME_BUFFER_
-#define _FRAME_BUFFER_
-
-/*----------------------------------------------------------------------------
- * Exported functions
- *----------------------------------------------------------------------------*/
-
-extern void FB_SetFrameBuffer(
- LcdColor_t *pBuffer,
- uint8_t ucWidth,
- uint8_t ucHeight);
-
-extern void FB_SetColor(uint32_t color);
-
-extern uint32_t FB_DrawLine (
- uint32_t dwX1,
- uint32_t dwY1,
- uint32_t dwX2,
- uint32_t dwY2 );
-
-extern uint32_t FB_DrawPixel( uint32_t x, uint32_t y );
-extern uint32_t FB_DrawCircle( uint32_t x, uint32_t y, uint32_t r );
-extern uint32_t FB_DrawFilledCircle(
- uint32_t dwX,
- uint32_t dwY,
- uint32_t dwRadius);
-
-extern uint32_t FB_DrawRectangle(
- uint32_t dwX1,
- uint32_t dwY1,
- uint32_t dwX2,
- uint32_t dwY2 );
-
-extern uint32_t FB_DrawFilledRectangle(
- uint32_t dwX1,
- uint32_t dwY1,
- uint32_t dwX2,
- uint32_t dwY2 );
-
-extern uint32_t FB_DrawPicture(
- uint32_t dwX1,
- uint32_t dwY1,
- uint32_t dwX2,
- uint32_t dwY2,
- const void *pBuffer );
-
-#endif /* #ifndef _FRAME_BUFFER_ */
diff --git a/ports_module/cortex_m7/iar/example_build/libraries/libboard_samv7-ek/include/gmacb_phy.h b/ports_module/cortex_m7/iar/example_build/libraries/libboard_samv7-ek/include/gmacb_phy.h
deleted file mode 100644
index 568cc55a..00000000
--- a/ports_module/cortex_m7/iar/example_build/libraries/libboard_samv7-ek/include/gmacb_phy.h
+++ /dev/null
@@ -1,114 +0,0 @@
-/* ----------------------------------------------------------------------------
- * SAM Software Package License
- * ----------------------------------------------------------------------------
- * Copyright (c) 2012, Atmel Corporation
- *
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * - Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the disclaimer below.
- *
- * Atmel's name may not be used to endorse or promote products derived from
- * this software without specific prior written permission.
- *
- * DISCLAIMER: THIS SOFTWARE IS PROVIDED BY ATMEL "AS IS" AND ANY EXPRESS OR
- * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT ARE
- * DISCLAIMED. IN NO EVENT SHALL ATMEL BE LIABLE FOR ANY DIRECT, INDIRECT,
- * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA,
- * OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
- * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
- * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
- * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- * ----------------------------------------------------------------------------
- */
-
-/** \file */
-
-/** \addtogroup gmacb_module Ethernet GMACB Driver
- *@{
- * Implement GEMAC PHY driver, that initialize the PHY to prepare for
- * Ethernet transfer.
- *
- * \section Usage
- * -# EMAC related pins and Driver should be initialized at first.
- * -# Initialize GMACB Driver instance by invoking GMACB_Init().
- * -# Initialize PHY connected via GMACB_InitPhy(), PHY address is
- * automatically adjusted by attempt to read.
- * -# Perform PHY auto negotiate through GMACB_AutoNegotiate(), so
- * connection established.
- *
- *
- * Related files:\n
- * \ref gmacb.h\n
- * \ref gmacb.c\n
- * \ref gmii.h.\n
- *
- */
-/**@}*/
-
-#ifndef _GMACB_PHY_H
-#define _GMACB_PHY_H
-
-
-/*---------------------------------------------------------------------------
- * Headers
- *---------------------------------------------------------------------------*/
-
-#include "board.h"
-
-/*---------------------------------------------------------------------------
- * Definitions
- *---------------------------------------------------------------------------*/
-
-/** The reset length setting for external reset configuration */
-#define GMACB_RESET_LENGTH 0xD
-
-/*---------------------------------------------------------------------------
- * Types
- *---------------------------------------------------------------------------*/
-
-
-/** The DM9161 instance */
-typedef struct _GMacb {
- /**< Driver */
- sGmacd *pGmacd;
- /** The retry & timeout settings */
- uint32_t retryMax;
- /** PHY address ( pre-defined by pins on reset ) */
- uint8_t phyAddress;
- } GMacb;
-
-/*---------------------------------------------------------------------------
- * Exported functions
- *---------------------------------------------------------------------------*/
-extern void GMACB_SetupTimeout(GMacb *pMacb, uint32_t toMax);
-
-extern void GMACB_Init(GMacb *pMacb, sGmacd *pGmacd, uint8_t phyAddress);
-
-extern uint8_t GMACB_InitPhy(
- GMacb *pMacb,
- uint32_t mck,
- const Pin *pResetPins,
- uint32_t nbResetPins,
- const Pin *pEmacPins,
- uint32_t nbEmacPins);
-
-extern uint8_t GMACB_AutoNegotiate(GMacb *pMacb);
-
-extern uint8_t GMACB_GetLinkSpeed(GMacb *pMacb, uint8_t applySettings);
-
-extern uint8_t GMACB_Send(GMacb *pMacb, void *pBuffer, uint32_t size);
-
-extern uint32_t GMACB_Poll(GMacb *pMacb, uint8_t *pBuffer, uint32_t size);
-
-extern void GMACB_DumpRegisters(GMacb *pMacb);
-
-extern uint8_t GMACB_ResetPhy(GMacb *pMacb);
-
-#endif // #ifndef _GMACB_H
-
diff --git a/ports_module/cortex_m7/iar/example_build/libraries/libboard_samv7-ek/include/gmii.h b/ports_module/cortex_m7/iar/example_build/libraries/libboard_samv7-ek/include/gmii.h
deleted file mode 100644
index bb1a667d..00000000
--- a/ports_module/cortex_m7/iar/example_build/libraries/libboard_samv7-ek/include/gmii.h
+++ /dev/null
@@ -1,116 +0,0 @@
-/* ----------------------------------------------------------------------------
- * SAM Software Package License
- * ----------------------------------------------------------------------------
- * Copyright (c) 2011, Atmel Corporation
- *
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * - Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the disclaimer below.
- *
- * Atmel's name may not be used to endorse or promote products derived from
- * this software without specific prior written permission.
- *
- * DISCLAIMER: THIS SOFTWARE IS PROVIDED BY ATMEL "AS IS" AND ANY EXPRESS OR
- * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT ARE
- * DISCLAIMED. IN NO EVENT SHALL ATMEL BE LIABLE FOR ANY DIRECT, INDIRECT,
- * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA,
- * OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
- * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
- * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
- * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- * ----------------------------------------------------------------------------
- */
-
-#ifndef _GMII_DEFINE_H
-#define _GMII_DEFINE_H
-
-
-/*---------------------------------------------------------------------------
- * Definitions
- *---------------------------------------------------------------------------*/
-
-//IEEE defined Registers
-#define GMII_BMCR 0x0 // Basic Mode Control Register
-#define GMII_BMSR 0x1 // Basic Mode Status Register
-#define GMII_PHYID1R 0x2 // PHY Identifier Register 1
-#define GMII_PHYID2R 0x3 // PHY Identifier Register 2
-#define GMII_ANAR 0x4 // Auto_Negotiation Advertisement Register
-#define GMII_ANLPAR 0x5 // Auto_negotiation Link Partner Ability Register
-#define GMII_ANER 0x6 // Auto-negotiation Expansion Register
-#define GMII_ANNPR 0x7 // Auto-negotiation Next Page Register
-#define GMII_ANLPNPAR 0x8 // Auto_negotiation Link Partner Next Page Ability Register
-#define GMII_AFEC0R 0x11 // AFE Control 0 Register
-#define GMII_AFEC3R 0x14 // AFE Control 3 Register
-#define GMII_RXERCR 0x15 // RXER Counter Register
-#define GMII_OMSSR 0x17 // Operation Mode Strap Status Register
-#define GMII_ECR 0x18 // Expanded Control Register
-#define GMII_ICSR 0x1B // Interrupt Control/Status Register
-#define GMII_FC 0x1C // Function Control
-#define GMII_LCSR 0x1D // LinkMD® Control/Status Register
-#define GMII_PC1R 0x1E // PHY Control 1 Register
-#define GMII_PC2R 0x1F // PHY Control 2 Register
-
-// PHY ID Identifier Register
-#define GMII_LSB_MASK 0x0U
-// definitions: MII_PHYID1
-#define GMII_OUI_MSB 0x0022
-// definitions: MII_PHYID2
-#define GMII_OUI_LSB 0x1572 // KSZ8061 PHY Id2
-
-// Basic Mode Control Register (BMCR)
-// Bit definitions: MII_BMCR
-#define GMII_RESET (1 << 15) // 1= Software Reset; 0=Normal Operation
-#define GMII_LOOPBACK (1 << 14) // 1=loopback Enabled; 0=Normal Operation
-#define GMII_SPEED_SELECT_LSB (1 << 13) // 1,0=1000Mbps 0,1=100Mbps; 0,0=10Mbps
-#define GMII_AUTONEG (1 << 12) // Auto-negotiation Enable
-#define GMII_POWER_DOWN (1 << 11) // 1=Power down 0=Normal operation
-#define GMII_ISOLATE (1 << 10) // 1 = Isolates 0 = Normal operation
-#define GMII_RESTART_AUTONEG (1 << 9) // 1 = Restart auto-negotiation 0 = Normal operation
-#define GMII_DUPLEX_MODE (1 << 8) // 1 = Full duplex operation 0 = Normal operation
-// Reserved 7 // Read as 0, ignore on write
-#define GMII_SPEED_SELECT_MSB (1 << 6) //
-// Reserved 5 to 0 // Read as 0, ignore on write
-
-
-// Basic Mode Status Register (BMSR)
-// Bit definitions: MII_BMSR
-#define GMII_100BASE_T4 (1 << 15) // 100BASE-T4 Capable
-#define GMII_100BASE_TX_FD (1 << 14) // 100BASE-TX Full Duplex Capable
-#define GMII_100BASE_T4_HD (1 << 13) // 100BASE-TX Half Duplex Capable
-#define GMII_10BASE_T_FD (1 << 12) // 10BASE-T Full Duplex Capable
-#define GMII_10BASE_T_HD (1 << 11) // 10BASE-T Half Duplex Capable
-// Reserved 10 to 9 // Read as 0, ignore on write
-#define GMII_EXTEND_STATUS (1 << 8) // 1 = Extend Status Information In Reg 15
-// Reserved 7
-#define GMII_MF_PREAMB_SUPPR (1 << 6) // MII Frame Preamble Suppression
-#define GMII_AUTONEG_COMP (1 << 5) // Auto-negotiation Complete
-#define GMII_REMOTE_FAULT (1 << 4) // Remote Fault
-#define GMII_AUTONEG_ABILITY (1 << 3) // Auto Configuration Ability
-#define GMII_LINK_STATUS (1 << 2) // Link Status
-#define GMII_JABBER_DETECT (1 << 1) // Jabber Detect
-#define GMII_EXTEND_CAPAB (1 << 0) // Extended Capability
-
-// Auto-negotiation Advertisement Register (ANAR)
-// Auto-negotiation Link Partner Ability Register (ANLPAR)
-// Bit definitions: MII_ANAR, MII_ANLPAR
-#define GMII_NP (1 << 15) // Next page Indication
-// Reserved 7
-#define GMII_RF (1 << 13) // Remote Fault
-// Reserved 12 // Write as 0, ignore on read
-#define GMII_PAUSE_MASK (3 << 11) // 0,0 = No Pause 1,0 = Asymmetric Pause(link partner)
- // 0,1 = Symmetric Pause 1,1 = Symmetric&Asymmetric Pause(local device)
-#define GMII_T4 (1 << 9) // 100BASE-T4 Support
-#define GMII_TX_FDX (1 << 8) // 100BASE-TX Full Duplex Support
-#define GMII_TX_HDX (1 << 7) // 100BASE-TX Support
-#define GMII_10_FDX (1 << 6) // 10BASE-T Full Duplex Support
-#define GMII_10_HDX (1 << 5) // 10BASE-T Support
-// Selector 4 to 0 // Protocol Selection Bits
-#define GMII_AN_IEEE_802_3 0x00001
-
-#endif // #ifndef _MII_DEFINE_H
diff --git a/ports_module/cortex_m7/iar/example_build/libraries/libboard_samv7-ek/include/ili9488.h b/ports_module/cortex_m7/iar/example_build/libraries/libboard_samv7-ek/include/ili9488.h
deleted file mode 100644
index 581c5129..00000000
--- a/ports_module/cortex_m7/iar/example_build/libraries/libboard_samv7-ek/include/ili9488.h
+++ /dev/null
@@ -1,107 +0,0 @@
-/* ----------------------------------------------------------------------------
- * SAM Software Package License
- * ----------------------------------------------------------------------------
- * Copyright (c) 2014, Atmel Corporation
- *
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * - Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the disclaimer below.
- *
- * Atmel's name may not be used to endorse or promote products derived from
- * this software without specific prior written permission.
- *
- * DISCLAIMER: THIS SOFTWARE IS PROVIDED BY ATMEL "AS IS" AND ANY EXPRESS OR
- * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT ARE
- * DISCLAIMED. IN NO EVENT SHALL ATMEL BE LIABLE FOR ANY DIRECT, INDIRECT,
- * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA,
- * OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
- * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
- * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
- * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- * ----------------------------------------------------------------------------
- */
-
-/**
- * \file
- *
- * Interface of ILI9488 driver.
- *
- */
-
-#ifndef _ILI9488_H_
-#define _ILI9488_H_
-
-/*----------------------------------------------------------------------------
- * Headers
- *----------------------------------------------------------------------------*/
-
-#include "board.h"
-
-#include
-
-
-/*----------------------------------------------------------------------------
- * Definitions
- *----------------------------------------------------------------------------*/
-
-#define ILI9488_SPIMODE 0
-#define ILI9488_EBIMODE 1
-
-/* ILI9325 ID code */
-#define ILI9488_DEVICE_CODE 0x9488
-
-#define ILI9488_LCD_WIDTH 320
-#define ILI9488_LCD_HEIGHT 480
-#define ILI9488_SELF_TEST_OK 0xC0
-
-/* EBI chip select for LCD */
-#define SMC_EBI_LCD_CS 3
-
-/*----------------------------------------------------------------------------
- * Types
- *----------------------------------------------------------------------------*/
-typedef enum{
- AccessInst = 0,
- AccessRead,
- AccessWrite
-}AccessIli_t;
-
-typedef union _union_type
-{
- uint32_t value;
- struct{
- uint8_t byte_8;
- uint8_t byte_l6;
- uint8_t byte_24;
- uint8_t byte_32;
- }byte;
- struct{
- uint16_t half_word_l;
- uint16_t half_word_h;
- }half_word;
- }union_type;
-typedef volatile uint8_t REG8;
-
-typedef uint32_t LcdColor_t;
-
-/*----------------------------------------------------------------------------
- * Marcos
- *----------------------------------------------------------------------------*/
-/* Pixel cache used to speed up communication */
-#define LCD_DATA_CACHE_SIZE BOARD_LCD_WIDTH
-
-/*----------------------------------------------------------------------------
- * Function Marcos
- *----------------------------------------------------------------------------*/
-#define get_0b_to_8b(x) (((union_type*)&(x))->byte.byte_8)
-#define get_8b_to_16b(x) (((union_type*)&(x))->byte.byte_l6)
-#define get_16b_to_24b(x) (((union_type*)&(x))->byte.byte_24)
-#define get_24b_to_32b(x) (((union_type*)&(x))->byte.byte_32)
-
-#endif /* #ifndef ILI9488 */
diff --git a/ports_module/cortex_m7/iar/example_build/libraries/libboard_samv7-ek/include/ili9488_dma.h b/ports_module/cortex_m7/iar/example_build/libraries/libboard_samv7-ek/include/ili9488_dma.h
deleted file mode 100644
index 5c91539a..00000000
--- a/ports_module/cortex_m7/iar/example_build/libraries/libboard_samv7-ek/include/ili9488_dma.h
+++ /dev/null
@@ -1,94 +0,0 @@
-/* ----------------------------------------------------------------------------
- * SAM Software Package License
- * ----------------------------------------------------------------------------
- * Copyright (c) 2011, Atmel Corporation
- *
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * - Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the disclaimer below.
- *
- * Atmel's name may not be used to endorse or promote products derived from
- * this software without specific prior written permission.
- *
- * DISCLAIMER: THIS SOFTWARE IS PROVIDED BY ATMEL "AS IS" AND ANY EXPRESS OR
- * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT ARE
- * DISCLAIMED. IN NO EVENT SHALL ATMEL BE LIABLE FOR ANY DIRECT, INDIRECT,
- * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA,
- * OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
- * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
- * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
- * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- * ----------------------------------------------------------------------------
- */
-
-/**
- * \file
- *
- * Interface of ILI9488 driver.
- *
- */
-
-#ifndef _ILI9488_DMA_H_
-#define _ILI9488_DMA_H_
-
-/*----------------------------------------------------------------------------
- * Headers
- *----------------------------------------------------------------------------*/
-
-#include "board.h"
-#include
-
-/*------------------------------------------------------------------------------
- * Definitions
- *----------------------------------------------------------------------------*/
-/** An unspecified error has occurred.*/
-#define ILI9488_ERROR_DMA_ALLOCATE_CHANNEL 1
-#define ILI9488_ERROR_DMA_CONFIGURE 2
-#define ILI9488_ERROR_DMA_TRANSFER 3
-#define ILI9488_ERROR_DMA_SIZE 4
-
-#define ILI9488_SPI SPI0
-#define ILI9488_SPI_ID ID_SPI0
-
-/* EBI BASE ADDRESS for SMC LCD */
-#define ILI9488_BASE_ADDRESS 0x63000000
-
-/*------------------------------------------------------------------------------
- * Types
- *----------------------------------------------------------------------------*/
-
-typedef struct _ILI9488_dma
-{
- /** Pointer to DMA driver */
- sXdmad *xdmaD;
- /** ili9488 Tx channel */
- uint32_t ili9488DmaTxChannel;
- /** ili9488 Rx channel */
- uint32_t ili9488DmaRxChannel;
- /** ili9488 Tx/Rx configure descriptor */
- sXdmadCfg xdmadRxCfg,xdmadTxCfg;
- /** ili9488 dma interrupt */
- uint32_t xdmaInt;
- /** Pointer to SPI Hardware registers */
- Spi* pSpiHw ;
- /** SPI Id as defined in the product datasheet */
- uint8_t spiId ;
-}sIli9488Dma;
-
-typedef struct _ILI9488_ctl
-{
- /** ili9488 Command/Data mode */
- volatile uint32_t cmdOrDataFlag;
- /** ili9488 Rx done */
- volatile uint32_t rxDoneFlag;
- /** ili9488 Tx done */
- volatile uint32_t txDoneFlag;
-}sIli9488DmaCtl;
-
-#endif /* #ifndef ILI9488_DMA */
diff --git a/ports_module/cortex_m7/iar/example_build/libraries/libboard_samv7-ek/include/ili9488_ebi.h b/ports_module/cortex_m7/iar/example_build/libraries/libboard_samv7-ek/include/ili9488_ebi.h
deleted file mode 100644
index 6539751e..00000000
--- a/ports_module/cortex_m7/iar/example_build/libraries/libboard_samv7-ek/include/ili9488_ebi.h
+++ /dev/null
@@ -1,62 +0,0 @@
-/* ----------------------------------------------------------------------------
- * SAM Software Package License
- * ----------------------------------------------------------------------------
- * Copyright (c) 2014, Atmel Corporation
- *
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * - Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the disclaimer below.
- *
- * Atmel's name may not be used to endorse or promote products derived from
- * this software without specific prior written permission.
- *
- * DISCLAIMER: THIS SOFTWARE IS PROVIDED BY ATMEL "AS IS" AND ANY EXPRESS OR
- * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT ARE
- * DISCLAIMED. IN NO EVENT SHALL ATMEL BE LIABLE FOR ANY DIRECT, INDIRECT,
- * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA,
- * OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
- * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
- * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
- * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- * ----------------------------------------------------------------------------
- */
-
-/**
- * \file
- *
- * Interface of ILI9488 driver.
- *
- */
-
-#ifndef _ILI9488_EBI_H_
-#define _ILI9488_EBI_H_
-
-/*----------------------------------------------------------------------------
- * Headers
- *----------------------------------------------------------------------------*/
-
-#include "board.h"
-
-#include
-
-/*----------------------------------------------------------------------------
- * Exported functions
- *----------------------------------------------------------------------------*/
-extern uint32_t ILI9488_EbiReadChipId (void);
-extern uint32_t ILI9488_EbiInitialize( sXdmad * dmad );
-extern void ILI9488_EbiSetPixelFormat(uint16_t format);
-extern void ILI9488_EbiSetCursor(uint16_t x, uint16_t y);
-extern void ILI9488_EbiSetWindow(
- uint16_t dwX, uint16_t dwY, uint16_t dwWidth, uint16_t dwHeight );
-extern void ILI9488_EbiSetFullWindow(void);
-extern void ILI9488_EbiOn(void );
-extern void ILI9488_EbiOff(void );
-extern void ILI9488_EbiSetDisplayLandscape( uint8_t dwRGB, uint8_t LandscaprMode );
-
-#endif /* #ifndef ILI9488_EBI */
diff --git a/ports_module/cortex_m7/iar/example_build/libraries/libboard_samv7-ek/include/ili9488_ebi_dma.h b/ports_module/cortex_m7/iar/example_build/libraries/libboard_samv7-ek/include/ili9488_ebi_dma.h
deleted file mode 100644
index efe45e8c..00000000
--- a/ports_module/cortex_m7/iar/example_build/libraries/libboard_samv7-ek/include/ili9488_ebi_dma.h
+++ /dev/null
@@ -1,55 +0,0 @@
-/* ----------------------------------------------------------------------------
- * SAM Software Package License
- * ----------------------------------------------------------------------------
- * Copyright (c) 2011, Atmel Corporation
- *
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * - Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the disclaimer below.
- *
- * Atmel's name may not be used to endorse or promote products derived from
- * this software without specific prior written permission.
- *
- * DISCLAIMER: THIS SOFTWARE IS PROVIDED BY ATMEL "AS IS" AND ANY EXPRESS OR
- * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT ARE
- * DISCLAIMED. IN NO EVENT SHALL ATMEL BE LIABLE FOR ANY DIRECT, INDIRECT,
- * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA,
- * OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
- * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
- * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
- * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- * ----------------------------------------------------------------------------
- */
-
-/**
- * \file
- *
- * Interface of ILI9488 driver.
- *
- */
-
-#ifndef _ILI9488_EBI_DMA_H_
-#define _ILI9488_EBI_DMA_H_
-
-/*----------------------------------------------------------------------------
- * Headers
- *----------------------------------------------------------------------------*/
-
-#include "board.h"
-#include
-
-/*----------------------------------------------------------------------------
- * Exported functions
- *----------------------------------------------------------------------------*/
-extern uint8_t ILI9488_EbiInitializeWithDma(sXdmad * dmad);
-extern uint8_t ILI9488_EbiDmaTxTransfer( uint16_t *pTxBuffer, uint32_t wTxSize);
-extern uint8_t ILI9488_EbiDmaRxTransfer( uint32_t *pRxBuffer,uint32_t wRxSize);
-extern uint8_t ILI9488_EbiSendCommand(uint16_t Instr, uint16_t *pTxData,
- uint32_t *pRxData, AccessIli_t ReadWrite, uint32_t size);
-#endif /* #ifndef ILI9488_EBI_DMA */
diff --git a/ports_module/cortex_m7/iar/example_build/libraries/libboard_samv7-ek/include/ili9488_reg.h b/ports_module/cortex_m7/iar/example_build/libraries/libboard_samv7-ek/include/ili9488_reg.h
deleted file mode 100644
index 523ed4b0..00000000
--- a/ports_module/cortex_m7/iar/example_build/libraries/libboard_samv7-ek/include/ili9488_reg.h
+++ /dev/null
@@ -1,131 +0,0 @@
-/* ----------------------------------------------------------------------------
- * SAM Software Package License
- * ----------------------------------------------------------------------------
- * Copyright (c) 2011, Atmel Corporation
- *
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * - Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the disclaimer below.
- *
- * Atmel's name may not be used to endorse or promote products derived from
- * this software without specific prior written permission.
- *
- * DISCLAIMER: THIS SOFTWARE IS PROVIDED BY ATMEL "AS IS" AND ANY EXPRESS OR
- * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT ARE
- * DISCLAIMED. IN NO EVENT SHALL ATMEL BE LIABLE FOR ANY DIRECT, INDIRECT,
- * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA,
- * OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
- * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
- * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
- * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- * ----------------------------------------------------------------------------
- */
-
-#ifndef ILI9488_REG_H_INCLUDED
-#define ILI9488_REG_H_INCLUDED
-
-/* Level 1 Commands (from the display Datasheet) */
-#define ILI9488_CMD_NOP 0x00
-#define ILI9488_CMD_SOFTWARE_RESET 0x01
-#define ILI9488_CMD_READ_DISP_ID 0x04
-#define ILI9488_CMD_READ_ERROR_DSI 0x05
-#define ILI9488_CMD_READ_DISP_STATUS 0x09
-#define ILI9488_CMD_READ_DISP_POWER_MODE 0x0A
-#define ILI9488_CMD_READ_DISP_MADCTRL 0x0B
-#define ILI9488_CMD_READ_DISP_PIXEL_FORMAT 0x0C
-#define ILI9488_CMD_READ_DISP_IMAGE_MODE 0x0D
-#define ILI9488_CMD_READ_DISP_SIGNAL_MODE 0x0E
-#define ILI9488_CMD_READ_DISP_SELF_DIAGNOSTIC 0x0F
-#define ILI9488_CMD_ENTER_SLEEP_MODE 0x10
-#define ILI9488_CMD_SLEEP_OUT 0x11
-#define ILI9488_CMD_PARTIAL_MODE_ON 0x12
-#define ILI9488_CMD_NORMAL_DISP_MODE_ON 0x13
-#define ILI9488_CMD_DISP_INVERSION_OFF 0x20
-#define ILI9488_CMD_DISP_INVERSION_ON 0x21
-#define ILI9488_CMD_PIXEL_OFF 0x22
-#define ILI9488_CMD_PIXEL_ON 0x23
-#define ILI9488_CMD_DISPLAY_OFF 0x28
-#define ILI9488_CMD_DISPLAY_ON 0x29
-#define ILI9488_CMD_COLUMN_ADDRESS_SET 0x2A
-#define ILI9488_CMD_PAGE_ADDRESS_SET 0x2B
-#define ILI9488_CMD_MEMORY_WRITE 0x2C
-#define ILI9488_CMD_MEMORY_READ 0x2E
-#define ILI9488_CMD_PARTIAL_AREA 0x30
-#define ILI9488_CMD_VERT_SCROLL_DEFINITION 0x33
-#define ILI9488_CMD_TEARING_EFFECT_LINE_OFF 0x34
-#define ILI9488_CMD_TEARING_EFFECT_LINE_ON 0x35
-#define ILI9488_CMD_MEMORY_ACCESS_CONTROL 0x36
-#define ILI9488_CMD_VERT_SCROLL_START_ADDRESS 0x37
-#define ILI9488_CMD_IDLE_MODE_OFF 0x38
-#define ILI9488_CMD_IDLE_MODE_ON 0x39
-#define ILI9488_CMD_COLMOD_PIXEL_FORMAT_SET 0x3A
-#define ILI9488_CMD_WRITE_MEMORY_CONTINUE 0x3C
-#define ILI9488_CMD_READ_MEMORY_CONTINUE 0x3E
-#define ILI9488_CMD_SET_TEAR_SCANLINE 0x44
-#define ILI9488_CMD_GET_SCANLINE 0x45
-#define ILI9488_CMD_WRITE_DISPLAY_BRIGHTNESS 0x51
-#define ILI9488_CMD_READ_DISPLAY_BRIGHTNESS 0x52
-#define ILI9488_CMD_WRITE_CTRL_DISPLAY 0x53
-#define ILI9488_CMD_READ_CTRL_DISPLAY 0x54
-#define ILI9488_CMD_WRITE_CONTENT_ADAPT_BRIGHTNESS 0x55
-#define ILI9488_CMD_READ_CONTENT_ADAPT_BRIGHTNESS 0x56
-#define ILI9488_CMD_WRITE_MIN_CAB_LEVEL 0x5E
-#define ILI9488_CMD_READ_MIN_CAB_LEVEL 0x5F
-#define ILI9488_CMD_READ_ABC_SELF_DIAG_RES 0x68
-#define ILI9488_CMD_READ_ID1 0xDA
-#define ILI9488_CMD_READ_ID2 0xDB
-#define ILI9488_CMD_READ_ID3 0xDC
-
-/* Level 2 Commands (from the display Datasheet) */
-#define ILI9488_CMD_INTERFACE_MODE_CONTROL 0xB0
-#define ILI9488_CMD_FRAME_RATE_CONTROL_NORMAL 0xB1
-#define ILI9488_CMD_FRAME_RATE_CONTROL_IDLE_8COLOR 0xB2
-#define ILI9488_CMD_FRAME_RATE_CONTROL_PARTIAL 0xB3
-#define ILI9488_CMD_DISPLAY_INVERSION_CONTROL 0xB4
-#define ILI9488_CMD_BLANKING_PORCH_CONTROL 0xB5
-#define ILI9488_CMD_DISPLAY_FUNCTION_CONTROL 0xB6
-#define ILI9488_CMD_ENTRY_MODE_SET 0xB7
-#define ILI9488_CMD_BACKLIGHT_CONTROL_1 0xB9
-#define ILI9488_CMD_BACKLIGHT_CONTROL_2 0xBA
-#define ILI9488_CMD_HS_LANES_CONTROL 0xBE
-#define ILI9488_CMD_POWER_CONTROL_1 0xC0
-#define ILI9488_CMD_POWER_CONTROL_2 0xC1
-#define ILI9488_CMD_POWER_CONTROL_NORMAL_3 0xC2
-#define ILI9488_CMD_POWER_CONTROL_IDEL_4 0xC3
-#define ILI9488_CMD_POWER_CONTROL_PARTIAL_5 0xC4
-#define ILI9488_CMD_VCOM_CONTROL_1 0xC5
-#define ILI9488_CMD_CABC_CONTROL_1 0xC6
-#define ILI9488_CMD_CABC_CONTROL_2 0xC8
-#define ILI9488_CMD_CABC_CONTROL_3 0xC9
-#define ILI9488_CMD_CABC_CONTROL_4 0xCA
-#define ILI9488_CMD_CABC_CONTROL_5 0xCB
-#define ILI9488_CMD_CABC_CONTROL_6 0xCC
-#define ILI9488_CMD_CABC_CONTROL_7 0xCD
-#define ILI9488_CMD_CABC_CONTROL_8 0xCE
-#define ILI9488_CMD_CABC_CONTROL_9 0xCF
-#define ILI9488_CMD_NVMEM_WRITE 0xD0
-#define ILI9488_CMD_NVMEM_PROTECTION_KEY 0xD1
-#define ILI9488_CMD_NVMEM_STATUS_READ 0xD2
-#define ILI9488_CMD_READ_ID4 0xD3
-#define ILI9488_CMD_ADJUST_CONTROL_1 0xD7
-#define ILI9488_CMD_READ_ID_VERSION 0xD8
-#define ILI9488_CMD_POSITIVE_GAMMA_CORRECTION 0xE0
-#define ILI9488_CMD_NEGATIVE_GAMMA_CORRECTION 0xE1
-#define ILI9488_CMD_DIGITAL_GAMMA_CONTROL_1 0xE2
-#define ILI9488_CMD_DIGITAL_GAMMA_CONTROL_2 0xE3
-#define ILI9488_CMD_SET_IMAGE_FUNCTION 0xE9
-#define ILI9488_CMD_ADJUST_CONTROL_2 0xF2
-#define ILI9488_CMD_ADJUST_CONTROL_3 0xF7
-#define ILI9488_CMD_ADJUST_CONTROL_4 0xF8
-#define ILI9488_CMD_ADJUST_CONTROL_5 0xF9
-#define ILI9488_CMD_SPI_READ_SETTINGS 0xFB
-#define ILI9488_CMD_ADJUST_CONTROL_6 0xFC
-#define ILI9488_CMD_ADJUST_CONTROL_7 0xFF
-
-#endif /* ILI9488_REGS_H_INCLUDED */
diff --git a/ports_module/cortex_m7/iar/example_build/libraries/libboard_samv7-ek/include/ili9488_spi.h b/ports_module/cortex_m7/iar/example_build/libraries/libboard_samv7-ek/include/ili9488_spi.h
deleted file mode 100644
index 3f9ed815..00000000
--- a/ports_module/cortex_m7/iar/example_build/libraries/libboard_samv7-ek/include/ili9488_spi.h
+++ /dev/null
@@ -1,69 +0,0 @@
-/* ----------------------------------------------------------------------------
- * SAM Software Package License
- * ----------------------------------------------------------------------------
- * Copyright (c) 2014, Atmel Corporation
- *
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * - Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the disclaimer below.
- *
- * Atmel's name may not be used to endorse or promote products derived from
- * this software without specific prior written permission.
- *
- * DISCLAIMER: THIS SOFTWARE IS PROVIDED BY ATMEL "AS IS" AND ANY EXPRESS OR
- * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT ARE
- * DISCLAIMED. IN NO EVENT SHALL ATMEL BE LIABLE FOR ANY DIRECT, INDIRECT,
- * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA,
- * OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
- * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
- * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
- * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- * ----------------------------------------------------------------------------
- */
-
-/**
- * \file
- *
- * Interface of ILI9488 driver.
- *
- */
-
-#ifndef _ILI9488_SPI_H_
-#define _ILI9488_SPI_H_
-
-/*------------------------------------------------------------------------------
- * Headers
- *----------------------------------------------------------------------------*/
-
-#include "board.h"
-
-/*------------------------------------------------------------------------------
- * Exported functions
- *----------------------------------------------------------------------------*/
-extern uint32_t ILI9488_SpiReadChipId (void);
-extern uint32_t ILI9488_SpiInitialize( sXdmad * dmad );
-extern void ILI9488_SpiSetPixelFormat(uint8_t format);
-extern void ILI9488_SpiNop(void);
-extern void ILI9488_SpiWriteMemory(const uint8_t *pBuf, uint32_t size);
-extern void ILI9488_SpiReadMemory( const uint8_t *pBuf, uint32_t size);
-extern void ILI9488_SpiSetCursor(uint16_t x, uint16_t y);
-extern void ILI9488_SpiSetWindow(
- uint16_t dwX,
- uint16_t dwY,
- uint16_t dwWidth,
- uint16_t dwHeight );
-
-extern void ILI9488_SpiSetFullWindow(void);
-extern void ILI9488_SpiOn(void );
-extern void ILI9488_SpiOff(void );
-extern void ILI9488_SpiSetDisplayLandscape(
- uint8_t dwRGB, uint8_t LandscaprMode );
-extern void ILI9488_SetPixelColor(uint32_t x, uint32_t y, uint32_t color);
-
-#endif /* #ifndef ILI9488_SPI */
diff --git a/ports_module/cortex_m7/iar/example_build/libraries/libboard_samv7-ek/include/ili9488_spi_dma.h b/ports_module/cortex_m7/iar/example_build/libraries/libboard_samv7-ek/include/ili9488_spi_dma.h
deleted file mode 100644
index 9d37f78c..00000000
--- a/ports_module/cortex_m7/iar/example_build/libraries/libboard_samv7-ek/include/ili9488_spi_dma.h
+++ /dev/null
@@ -1,56 +0,0 @@
-/* ----------------------------------------------------------------------------
- * SAM Software Package License
- * ----------------------------------------------------------------------------
- * Copyright (c) 2011, Atmel Corporation
- *
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * - Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the disclaimer below.
- *
- * Atmel's name may not be used to endorse or promote products derived from
- * this software without specific prior written permission.
- *
- * DISCLAIMER: THIS SOFTWARE IS PROVIDED BY ATMEL "AS IS" AND ANY EXPRESS OR
- * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT ARE
- * DISCLAIMED. IN NO EVENT SHALL ATMEL BE LIABLE FOR ANY DIRECT, INDIRECT,
- * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA,
- * OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
- * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
- * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
- * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- * ----------------------------------------------------------------------------
- */
-
-/**
- * \file
- *
- * Interface of ILI9488 DMA driver.
- *
- */
-
-#ifndef _ILI9488_SPI_DMA_H_
-#define _ILI9488_SPI_DMA_H_
-
-/*----------------------------------------------------------------------------
- * Headers
- *----------------------------------------------------------------------------*/
-
-#include "board.h"
-#include
-
-/*----------------------------------------------------------------------------
- * Exported functions
- *----------------------------------------------------------------------------*/
-extern uint8_t ILI9488_SpiInitializeWithDma(sXdmad * dmad);
-extern uint8_t ILI9488_SpiDmaTxTransfer( uint8_t *pTxBuffer, uint32_t wTxSize);
-extern uint8_t ILI9488_SpiDmaRxTransfer( uint32_t *pRxBuffer,uint32_t wRxSize);
-extern uint8_t ILI9488_SpiSendCommand(uint8_t Instr, uint8_t* pTxData,
- uint32_t* pRxData, AccessIli_t ReadWrite, uint32_t size);
-
-#endif /* #ifndef ILI9488_SPI_DMA */
diff --git a/ports_module/cortex_m7/iar/example_build/libraries/libboard_samv7-ek/include/image_sensor_inf.h b/ports_module/cortex_m7/iar/example_build/libraries/libboard_samv7-ek/include/image_sensor_inf.h
deleted file mode 100644
index c84b5b93..00000000
--- a/ports_module/cortex_m7/iar/example_build/libraries/libboard_samv7-ek/include/image_sensor_inf.h
+++ /dev/null
@@ -1,135 +0,0 @@
-/* ----------------------------------------------------------------------------
- * SAM Software Package License
- * ----------------------------------------------------------------------------
- * Copyright (c) 2013, Atmel Corporation
- *
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * - Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the disclaimer below.
- *
- * Atmel's name may not be used to endorse or promote products derived from
- * this software without specific prior written permission.
- *
- * DISCLAIMER: THIS SOFTWARE IS PROVIDED BY ATMEL "AS IS" AND ANY EXPRESS OR
- * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT ARE
- * DISCLAIMED. IN NO EVENT SHALL ATMEL BE LIABLE FOR ANY DIRECT, INDIRECT,
- * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA,
- * OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
- * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
- * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
- * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- * ----------------------------------------------------------------------------
- */
-
-/*----------------------------------------------------------------------------
- * Headers
- *----------------------------------------------------------------------------*/
-#include "board.h"
-
-
-/*---------------------------------------------------------------------------
- * Definition
- *---------------------------------------------------------------------------*/
-#define SENDOR_SUPPORTED_OUTPUTS 7
-
-/** terminating list entry for register in configuration file */
-#define SENSOR_REG_TERM 0xFF
-/** terminating list entry for value in configuration file */
-#define SENSOR_VAL_TERM 0xFF
-
-/*----------------------------------------------------------------------------
- * Types
- *----------------------------------------------------------------------------*/
-
-/** Sensor type */
-typedef enum _sensorType {
- SENSOR_COMS = 0,
- SENSOR_CCD
-}sensorType_t;
-
-/** Sensor status or return code */
-typedef enum _sensorStatus {
- SENSOR_OK = 0, /**< Operation is successful */
- SENSOR_TWI_ERROR,
- SENSOR_ID_ERROR,
- SENSOR_RESOLUTION_NOT_SUPPORTED
-} sendorStatus_t;
-
-/** Sensor TWI mode */
-typedef enum _sensorTwiMode {
- SENSOR_TWI_REG_BYTE_DATA_BYTE = 0,
- SENSOR_TWI_REG_2BYTE_DATA_BYTE,
- SENSOR_TWI_REG_BYTE_DATA_2BYTE
-} sensorTwiMode_t;
-
-/** Sensor resolution */
-typedef enum _sensorResolution {
- QVGA = 0,
- VGA,
- SVGA,
- XGA,
- WXGA,
- UVGA
-} sensorOutputResolution_t;
-
-/** Sensor output format */
-typedef enum _sensorOutputFormat {
- RAW_BAYER_12_BIT = 0,
- RAW_BAYER_10_BIT,
- YUV_422_8_BIT,
- YUV_422_10_BIT,
- MONO_12_BIT
-} sensorOutputFormat_t;
-
-/** define a structure for sensor register initialization values */
-typedef struct _sensor_reg {
- uint16_t reg; /* Register to be written */
- uint16_t val; /* value to be written */
-}sensorReg_t;
-
-typedef struct _sensor_output {
- uint8_t type ; /** Index 0: normal, 1: AF setting*/
- sensorOutputResolution_t output_resolution; /** sensor output resolution */
- sensorOutputFormat_t output_format; /** sensor output format */
- uint8_t supported; /** supported for current output_resolution*/
- uint32_t output_width; /** output width */
- uint32_t output_height; /** output height */
- const sensorReg_t *output_setting; /** sensor registers setting */
-}sensorOutput_t;
-
-/** define a structure for sensor profile */
-typedef struct _sensor_profile {
- sensorType_t cmos_ccd; /** Sensor type for CMOS sensor or CCD */
- sensorTwiMode_t twi_inf_mode; /** TWI interface mode */
- uint32_t twi_slave_addr; /** TWI slave address */
- uint16_t pid_high_reg; /** Register address for product ID high byte */
- uint16_t pid_low_reg; /** Register address for product ID low byte*/
- uint16_t pid_high; /** product ID high byte */
- uint16_t pid_low; /** product ID low byte */
- uint16_t version_mask; /** version mask */
- const sensorOutput_t *outputConf[SENDOR_SUPPORTED_OUTPUTS]; /** sensor settings */
-}sensorProfile_t;
-
-/*----------------------------------------------------------------------------
- * Exported functions
- *----------------------------------------------------------------------------*/
-extern sendorStatus_t sensor_twi_write_regs(Twid * pTwid,
- const sensorReg_t * pReglist);
-
-extern sendorStatus_t sensor_twi_read_regs(Twid * pTwid,
- const sensorReg_t * pReglist);
-
-extern sendorStatus_t sensor_setup(Twid * pTwid,
- const sensorProfile_t *sensor_profile,
- sensorOutputResolution_t resolution);
-
-extern sendorStatus_t sensor_get_output(sensorOutputFormat_t *format,
- uint32_t *width,
- uint32_t* height,
- sensorOutputResolution_t resolution);
diff --git a/ports_module/cortex_m7/iar/example_build/libraries/libboard_samv7-ek/include/lcd_color.h b/ports_module/cortex_m7/iar/example_build/libraries/libboard_samv7-ek/include/lcd_color.h
deleted file mode 100644
index 5fc21d13..00000000
--- a/ports_module/cortex_m7/iar/example_build/libraries/libboard_samv7-ek/include/lcd_color.h
+++ /dev/null
@@ -1,109 +0,0 @@
-/* ----------------------------------------------------------------------------
- * SAM Software Package License
- * ----------------------------------------------------------------------------
- * Copyright (c) 2011, Atmel Corporation
- *
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * - Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the disclaimer below.
- *
- * Atmel's name may not be used to endorse or promote products derived from
- * this software without specific prior written permission.
- *
- * DISCLAIMER: THIS SOFTWARE IS PROVIDED BY ATMEL "AS IS" AND ANY EXPRESS OR
- * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT ARE
- * DISCLAIMED. IN NO EVENT SHALL ATMEL BE LIABLE FOR ANY DIRECT, INDIRECT,
- * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA,
- * OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
- * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
- * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
- * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- * ----------------------------------------------------------------------------
- */
-
-#ifndef COLOR_H
-#define COLOR_H
-
-/**
- * \file
- *
- * RGB 24-bits color table definition.
- *
- */
-
-/*
- * RGB 24 Bpp
- * RGB 888
- * R7R6R5R4 R3R2R1R0 G7G6G5G4 G3G2G1G0 B7B6B5B4 B3B2B1B0
- */
-
-#define COLOR_BLACK 0x000000
-#define COLOR_WHITE 0xFFFFFF
-
-#define COLOR_BLUE 0x0000FF
-#define COLOR_GREEN 0x00FF00
-#define COLOR_RED 0xFF0000
-
-#define COLOR_NAVY 0x000080
-#define COLOR_DARKBLUE 0x00008B
-#define COLOR_DARKGREEN 0x006400
-#define COLOR_DARKCYAN 0x008B8B
-#define COLOR_CYAN 0x00FFFF
-#define COLOR_TURQUOISE 0x40E0D0
-#define COLOR_INDIGO 0x4B0082
-#define COLOR_DARKRED 0x800000
-#define COLOR_OLIVE 0x808000
-#define COLOR_GRAY 0x808080
-#define COLOR_SKYBLUE 0x87CEEB
-#define COLOR_BLUEVIOLET 0x8A2BE2
-#define COLOR_LIGHTGREEN 0x90EE90
-#define COLOR_DARKVIOLET 0x9400D3
-#define COLOR_YELLOWGREEN 0x9ACD32
-#define COLOR_BROWN 0xA52A2A
-#define COLOR_DARKGRAY 0xA9A9A9
-#define COLOR_SIENNA 0xA0522D
-#define COLOR_LIGHTBLUE 0xADD8E6
-#define COLOR_GREENYELLOW 0xADFF2F
-#define COLOR_SILVER 0xC0C0C0
-#define COLOR_LIGHTGREY 0xD3D3D3
-#define COLOR_LIGHTCYAN 0xE0FFFF
-#define COLOR_VIOLET 0xEE82EE
-#define COLOR_AZUR 0xF0FFFF
-#define COLOR_BEIGE 0xF5F5DC
-#define COLOR_MAGENTA 0xFF00FF
-#define COLOR_TOMATO 0xFF6347
-#define COLOR_GOLD 0xFFD700
-#define COLOR_ORANGE 0xFFA500
-#define COLOR_SNOW 0xFFFAFA
-#define COLOR_YELLOW 0xFFFF00
-
-#define BLACK 0x0000
-#define BLUE 0x001F
-#define RED 0xF800
-#define GREEN 0x07E0
-#define WHITE 0xFFFF
-
-/* level is in [0; 31]*/
-#define BLUE_LEV( level) ( (level)&BLUE )
-#define GREEN_LEV(level) ( (((level)*2)<<5)&GREEN )
-#define RED_LEV( level) ( ((level)<<(5+6))&RED )
-#define GRAY_LEV( level) ( BLUE_LEV(level) | GREEN_LEV(level) | RED_LEV(level))
-
-#define RGB_24_TO_RGB565(RGB) \
- (((RGB >>19)<<11) | (((RGB & 0x00FC00) >>5)) | (RGB & 0x00001F))
-#define RGB_24_TO_18BIT(RGB) \
- (((RGB >>16)&0xFC) | (((RGB & 0x00FF00) >>10) << 10) | (RGB & 0x0000FC)<<16)
-#define RGB_16_TO_18BIT(RGB) \
- (((((RGB >>11)*63)/31)<<18) | (RGB & 0x00FC00) | (((RGB & 0x00001F)*63)/31))
-#define BGR_TO_RGB_18BIT(RGB) \
- (RGB & 0xFF0000) | ((RGB & 0x00FF00) >> 8 ) | ( (RGB & 0x0000FC) >> 16 ))
-#define BGR_16_TO_18BITRGB(RGB) BGR_TO_RGB_18BIT(RGB_16_TO_18BIT(RGB))
-
-
-#endif /* #define COLOR_H */
diff --git a/ports_module/cortex_m7/iar/example_build/libraries/libboard_samv7-ek/include/lcd_draw.h b/ports_module/cortex_m7/iar/example_build/libraries/libboard_samv7-ek/include/lcd_draw.h
deleted file mode 100644
index 21f7fa91..00000000
--- a/ports_module/cortex_m7/iar/example_build/libraries/libboard_samv7-ek/include/lcd_draw.h
+++ /dev/null
@@ -1,186 +0,0 @@
-/* ----------------------------------------------------------------------------
- * SAM Software Package License
- * ----------------------------------------------------------------------------
- * Copyright (c) 2011, Atmel Corporation
- *
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * - Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the disclaimer below.
- *
- * Atmel's name may not be used to endorse or promote products derived from
- * this software without specific prior written permission.
- *
- * DISCLAIMER: THIS SOFTWARE IS PROVIDED BY ATMEL "AS IS" AND ANY EXPRESS OR
- * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT ARE
- * DISCLAIMED. IN NO EVENT SHALL ATMEL BE LIABLE FOR ANY DIRECT, INDIRECT,
- * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA,
- * OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
- * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
- * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
- * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- * ----------------------------------------------------------------------------
- */
-
- /**
- * \file
- *
- * Interface for draw function on LCD.
- *
- */
-
-#ifndef DRAW_H
-#define DRAW_H
-
-/*----------------------------------------------------------------------------
- * Headers
- *----------------------------------------------------------------------------*/
-#include "board.h"
-#include
-#include "lcd_gimp_image.h"
-
-/*----------------------------------------------------------------------------
- * Definitions
- *----------------------------------------------------------------------------*/
-
-/** Horizontal direction line definition */
-#define DIRECTION_HLINE 0
-/** Vertical direction line definition */
-#define DIRECTION_VLINE 1
-
-typedef struct _rect{
- uint32_t x;
- uint32_t y;
- uint32_t width;
- uint32_t height;
-}rect;
-
-COMPILER_PACK_SET(1)
-typedef struct _rgb{
- uint8_t b;
- uint8_t g;
- uint8_t r;
-}sBGR;
-COMPILER_PACK_RESET()
-
-/*----------------------------------------------------------------------------
- * Exported functions
- *----------------------------------------------------------------------------*/
-extern void LCDD_SetUpdateWindowSize(rect rc);
-
-extern void LCDD_UpdateWindow(void);
-
-extern void LCDD_UpdatePartialWindow( uint8_t* pbuf, uint32_t size);
-
-extern void LCDD_DrawRectangleWithFill(
- uint16_t* pbuf,
- uint32_t dwX,
- uint32_t dwY,
- uint32_t dwWidth,
- uint32_t dwHeight,
- uint32_t dwColor);
-
-extern uint32_t LCDD_DrawCircle(
- uint16_t* pbuf,
- uint32_t x,
- uint32_t y,
- uint32_t r,
- uint32_t color);
-
-extern uint32_t LCD_DrawFilledCircle(
- uint16_t* pbuf,
- uint32_t dwX,
- uint32_t dwY,
- uint32_t dwRadius,
- uint32_t color);
-
-extern void LCDD_DrawString(
- uint16_t* pbuf,
- uint32_t x,
- uint32_t y,
- const uint8_t *pString,
- uint32_t color );
-
-extern void LCDD_GetStringSize(
- const uint8_t *pString,
- uint32_t *pWidth,
- uint32_t *pHeight );
-
-extern void LCDD_BitBlt(
- uint16_t* pbuf,
- uint32_t dst_x,
- uint32_t dst_y,
- uint32_t dst_w,
- uint32_t dst_h,
- const LcdColor_t *src,
- uint32_t src_x,
- uint32_t src_y,
- uint32_t src_w,
- uint32_t src_h);
-
-extern void LCDD_BitBltAlphaBlend(uint16_t* pbuf,
- uint32_t dst_x,
- uint32_t dst_y,
- uint32_t dst_w,
- uint32_t dst_h,
- const LcdColor_t *src,
- uint32_t src_x,
- uint32_t src_y,
- uint32_t src_w,
- uint32_t src_h,
- uint32_t alpha);
-extern void LCDD_DrawImage(
- uint16_t* pbuf,
- uint32_t dwX,
- uint32_t dwY,
- const LcdColor_t *pImage,
- uint32_t dwWidth,
- uint32_t dwHeight );
-
-extern void LCDD_DrawPixel(
- uint16_t* pbuf,
- uint32_t x,
- uint32_t y,
- uint32_t color );
-
-extern void LCDD_DrawLine(
- uint16_t* pbuf,
- uint32_t dwX1,
- uint32_t dwY1,
- uint32_t dwX2,
- uint32_t dwY2,
- uint32_t color);
-
-extern uint32_t LCDD_DrawLineBresenham(
- uint16_t* pbuf,
- uint32_t dwX1,
- uint32_t dwY1,
- uint32_t dwX2,
- uint32_t dwY2,
- uint32_t color);
-
-extern void LCDD_DrawRectangle(
- uint16_t* pbuf,
- uint32_t x,
- uint32_t y,
- uint32_t width,
- uint32_t height,
- uint32_t color);
-
-extern void LCDD_SetCavasBuffer(
- void* pBuffer,
- uint32_t wBufferSize);
-
-extern void LCDD_DrawStraightLine(
- uint16_t* pbuf,
- uint32_t dwX1,
- uint32_t dwY1,
- uint32_t dwX2,
- uint32_t dwY2 ,
- uint32_t color );
-#endif /* #ifndef DRAW_H */
diff --git a/ports_module/cortex_m7/iar/example_build/libraries/libboard_samv7-ek/include/lcd_font.h b/ports_module/cortex_m7/iar/example_build/libraries/libboard_samv7-ek/include/lcd_font.h
deleted file mode 100644
index 4d2c4796..00000000
--- a/ports_module/cortex_m7/iar/example_build/libraries/libboard_samv7-ek/include/lcd_font.h
+++ /dev/null
@@ -1,108 +0,0 @@
-/* ----------------------------------------------------------------------------
- * SAM Software Package License
- * ----------------------------------------------------------------------------
- * Copyright (c) 2011, Atmel Corporation
- *
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * - Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the disclaimer below.
- *
- * Atmel's name may not be used to endorse or promote products derived from
- * this software without specific prior written permission.
- *
- * DISCLAIMER: THIS SOFTWARE IS PROVIDED BY ATMEL "AS IS" AND ANY EXPRESS OR
- * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT ARE
- * DISCLAIMED. IN NO EVENT SHALL ATMEL BE LIABLE FOR ANY DIRECT, INDIRECT,
- * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA,
- * OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
- * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
- * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
- * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- * ----------------------------------------------------------------------------
- */
-
-/**
- * \file
- *
- * Interface for draw font on LCD.
- *
- */
-
-/**
- *
- * \section Purpose
- *
- * The font.h files declares a font structure and a LCDD_DrawChar function
- * that must be implemented by a font definition file to be used with the
- * LCDD_DrawString method of draw.h.
- *
- * The font10x14.c implements the necessary variable and function for a 10x14
- * font.
- *
- * \section Usage
- *
- * -# Declare a gFont global variable with the necessary Font information.
- * -# Implement an LCDD_DrawChar function which displays the specified
- * character on the LCD.
- * -# Use the LCDD_DrawString method defined in draw.h to display a complete
- * string.
- */
-
-#ifndef _LCD_FONT_
-#define _LCD_FONT_
-
-/*----------------------------------------------------------------------------
- * Headers
- *----------------------------------------------------------------------------*/
-
-#include
-
-/*----------------------------------------------------------------------------
- * Types
- *----------------------------------------------------------------------------*/
-
-
-/** \brief Describes the font (width, height, supported characters, etc.) used by
- * the LCD driver draw API.
- */
-typedef struct _Font {
- /* Font width in pixels. */
- uint8_t width;
- /* Font height in pixels. */
- uint8_t height;
-} Font;
-
-/*----------------------------------------------------------------------------
- * Variables
- *----------------------------------------------------------------------------*/
-
-/** Global variable describing the font being instanced. */
-extern const Font gFont;
-
-/*----------------------------------------------------------------------------
- * Exported functions
- *----------------------------------------------------------------------------*/
-
-extern void LCDD_DrawChar(
- uint16_t* pCanvasBuffer,
- uint32_t x,
- uint32_t y,
- uint8_t c,
- uint32_t color );
-
-extern void LCD_DrawString(
- uint16_t* pCanvasBuffer,
- uint32_t dwX,
- uint32_t dwY,
- const uint8_t *pString,
- uint32_t color );
-
-
-#endif /* #ifndef LCD_FONT_ */
-
diff --git a/ports_module/cortex_m7/iar/example_build/libraries/libboard_samv7-ek/include/lcd_font10x14.h b/ports_module/cortex_m7/iar/example_build/libraries/libboard_samv7-ek/include/lcd_font10x14.h
deleted file mode 100644
index 050da6f2..00000000
--- a/ports_module/cortex_m7/iar/example_build/libraries/libboard_samv7-ek/include/lcd_font10x14.h
+++ /dev/null
@@ -1,45 +0,0 @@
-/* ----------------------------------------------------------------------------
- * SAM Software Package License
- * ----------------------------------------------------------------------------
- * Copyright (c) 2011, Atmel Corporation
- *
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * - Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the disclaimer below.
- *
- * Atmel's name may not be used to endorse or promote products derived from
- * this software without specific prior written permission.
- *
- * DISCLAIMER: THIS SOFTWARE IS PROVIDED BY ATMEL "AS IS" AND ANY EXPRESS OR
- * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT ARE
- * DISCLAIMED. IN NO EVENT SHALL ATMEL BE LIABLE FOR ANY DIRECT, INDIRECT,
- * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA,
- * OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
- * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
- * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
- * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- * ----------------------------------------------------------------------------
- */
-
- /**
- * \file
- *
- * Font 10x14 table definition.
- *
- */
-
-#ifndef _LCD_FONT_10x14_
-#define _LCD_FONT_10x14_
-
-#include
-
-/** Char set of font 10x14 */
-extern const uint8_t pCharset10x14[];
-
-#endif /* #ifdef _LCD_FONT_10x14_ */
diff --git a/ports_module/cortex_m7/iar/example_build/libraries/libboard_samv7-ek/include/lcd_gimp_image.h b/ports_module/cortex_m7/iar/example_build/libraries/libboard_samv7-ek/include/lcd_gimp_image.h
deleted file mode 100644
index 2a975159..00000000
--- a/ports_module/cortex_m7/iar/example_build/libraries/libboard_samv7-ek/include/lcd_gimp_image.h
+++ /dev/null
@@ -1,42 +0,0 @@
-/* ----------------------------------------------------------------------------
- * SAM Software Package License
- * ----------------------------------------------------------------------------
- * Copyright (c) 2011, Atmel Corporation
- *
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * - Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the disclaimer below.
- *
- * Atmel's name may not be used to endorse or promote products derived from
- * this software without specific prior written permission.
- *
- * DISCLAIMER: THIS SOFTWARE IS PROVIDED BY ATMEL "AS IS" AND ANY EXPRESS OR
- * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT ARE
- * DISCLAIMED. IN NO EVENT SHALL ATMEL BE LIABLE FOR ANY DIRECT, INDIRECT,
- * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA,
- * OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
- * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
- * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
- * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- * ----------------------------------------------------------------------------
- */
-
-#ifndef _GIMP_IMAGE_
-#define _GIMP_IMAGE_
-
-#include
-
-typedef struct _SGIMPImage{
- uint32_t dwWidth;
- uint32_t dwHeight;
- uint32_t dwBytes_per_pixel; /* 3:RGB, 4:RGBA */
- uint8_t* pucPixel_data ;
-} SGIMPImage ;
-
-#endif // _GIMP_IMAGE_
diff --git a/ports_module/cortex_m7/iar/example_build/libraries/libboard_samv7-ek/include/lcdd.h b/ports_module/cortex_m7/iar/example_build/libraries/libboard_samv7-ek/include/lcdd.h
deleted file mode 100644
index 59d5e6f0..00000000
--- a/ports_module/cortex_m7/iar/example_build/libraries/libboard_samv7-ek/include/lcdd.h
+++ /dev/null
@@ -1,52 +0,0 @@
-/* ----------------------------------------------------------------------------
- * SAM Software Package License
- * ----------------------------------------------------------------------------
- * Copyright (c) 2011, Atmel Corporation
- *
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * - Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the disclaimer below.
- *
- * Atmel's name may not be used to endorse or promote products derived from
- * this software without specific prior written permission.
- *
- * DISCLAIMER: THIS SOFTWARE IS PROVIDED BY ATMEL "AS IS" AND ANY EXPRESS OR
- * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT ARE
- * DISCLAIMED. IN NO EVENT SHALL ATMEL BE LIABLE FOR ANY DIRECT, INDIRECT,
- * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA,
- * OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
- * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
- * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
- * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- * ----------------------------------------------------------------------------
- */
-
-/**
- * \file
- *
- * Interface for LCD driver.
- *
- */
-
-#ifndef LCDD_H
-#define LCDD_H
-
-/*----------------------------------------------------------------------------
- * Exported functions
- *----------------------------------------------------------------------------*/
-
-extern void LCDD_Initialize(uint8_t lcdMode, sXdmad * dmad, uint8_t cRotate);
-
-extern void LCDD_On(void);
-
-extern void LCDD_Off(void);
-
-extern void LCDD_SetBacklight (uint32_t step);
-
-#endif /* #ifndef LCDD_H */
diff --git a/ports_module/cortex_m7/iar/example_build/libraries/libboard_samv7-ek/include/led.h b/ports_module/cortex_m7/iar/example_build/libraries/libboard_samv7-ek/include/led.h
deleted file mode 100644
index 9f0b6abf..00000000
--- a/ports_module/cortex_m7/iar/example_build/libraries/libboard_samv7-ek/include/led.h
+++ /dev/null
@@ -1,72 +0,0 @@
-/* ----------------------------------------------------------------------------
- * SAM Software Package License
- * ----------------------------------------------------------------------------
- * Copyright (c) 2011, Atmel Corporation
- *
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * - Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the disclaimer below.
- *
- * Atmel's name may not be used to endorse or promote products derived from
- * this software without specific prior written permission.
- *
- * DISCLAIMER: THIS SOFTWARE IS PROVIDED BY ATMEL "AS IS" AND ANY EXPRESS OR
- * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT ARE
- * DISCLAIMED. IN NO EVENT SHALL ATMEL BE LIABLE FOR ANY DIRECT, INDIRECT,
- * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA,
- * OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
- * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
- * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
- * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- * ----------------------------------------------------------------------------
- */
-
-/**
- * \file
- *
- * \section Purpose
- *
- * Small set of functions for simple and portable LED usage.
- *
- * \section Usage
- *
- * -# Configure one or more LEDs using LED_Configure and
- * LED_ConfigureAll.
- * -# Set, clear and toggle LEDs using LED_Set, LED_Clear and
- * LED_Toggle.
- *
- * LEDs are numbered starting from 0; the number of LEDs depend on the
- * board being used. All the functions defined here will compile properly
- * regardless of whether the LED is defined or not; they will simply
- * return 0 when a LED which does not exist is given as an argument.
- * Also, these functions take into account how each LED is connected on to
- * board; thus, \ref LED_Set might change the level on the corresponding pin
- * to 0 or 1, but it will always light the LED on; same thing for the other
- * methods.
- */
-
-#ifndef _LED_
-#define _LED_
-
-#include
-
-/*----------------------------------------------------------------------------
- * Exported functions
- *----------------------------------------------------------------------------*/
-
-extern uint32_t LED_Configure( uint32_t dwLed );
-
-extern uint32_t LED_Set( uint32_t dwLed );
-
-extern uint32_t LED_Clear( uint32_t dwLed );
-
-extern uint32_t LED_Toggle( uint32_t dwLed );
-
-#endif /* #ifndef LED_H */
-
diff --git a/ports_module/cortex_m7/iar/example_build/libraries/libboard_samv7-ek/include/math.h b/ports_module/cortex_m7/iar/example_build/libraries/libboard_samv7-ek/include/math.h
deleted file mode 100644
index 0bb602d7..00000000
--- a/ports_module/cortex_m7/iar/example_build/libraries/libboard_samv7-ek/include/math.h
+++ /dev/null
@@ -1,42 +0,0 @@
-/* ----------------------------------------------------------------------------
- * SAM Software Package License
- * ----------------------------------------------------------------------------
- * Copyright (c) 2012, Atmel Corporation
- *
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * - Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the disclaimer below.
- *
- * Atmel's name may not be used to endorse or promote products derived from
- * this software without specific prior written permission.
- *
- * DISCLAIMER: THIS SOFTWARE IS PROVIDED BY ATMEL "AS IS" AND ANY EXPRESS OR
- * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT ARE
- * DISCLAIMED. IN NO EVENT SHALL ATMEL BE LIABLE FOR ANY DIRECT, INDIRECT,
- * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA,
- * OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
- * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
- * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
- * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- * ----------------------------------------------------------------------------
- */
-
-#ifndef _MATH_
-#define _MATH_
-
-/*------------------------------------------------------------------------------
- * Exported functions
- *------------------------------------------------------------------------------*/
-
-extern uint32_t min( uint32_t dwA, uint32_t dwB );
-extern uint32_t absv( int32_t lValue );
-extern uint32_t power( uint32_t dwX, uint32_t dwY );
-
-#endif /* #ifndef _MATH_ */
-
diff --git a/ports_module/cortex_m7/iar/example_build/libraries/libboard_samv7-ek/include/mcan_config.h b/ports_module/cortex_m7/iar/example_build/libraries/libboard_samv7-ek/include/mcan_config.h
deleted file mode 100644
index a40cea49..00000000
--- a/ports_module/cortex_m7/iar/example_build/libraries/libboard_samv7-ek/include/mcan_config.h
+++ /dev/null
@@ -1,126 +0,0 @@
-/* ----------------------------------------------------------------------------
- * SAM Software Package License
- * ----------------------------------------------------------------------------
- * Copyright (c) 2011, Atmel Corporation
- *
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * - Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the disclaimer below.
- *
- * Atmel's name may not be used to endorse or promote products derived from
- * this software without specific prior written permission.
- *
- * DISCLAIMER: THIS SOFTWARE IS PROVIDED BY ATMEL "AS IS" AND ANY EXPRESS OR
- * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT ARE
- * DISCLAIMED. IN NO EVENT SHALL ATMEL BE LIABLE FOR ANY DIRECT, INDIRECT,
- * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA,
- * OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
- * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
- * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
- * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- * ----------------------------------------------------------------------------
- */
-
-/**
- * \file
- *
- * \section Purpose
- *
- * Interface for configuring and using Timer Counter (TC) peripherals.
- *
- * \section Usage
- * -# Optionally, use TC_FindMckDivisor() to let the program find the best
- * TCCLKS field value automatically.
- * -# Configure a Timer Counter in the desired mode using TC_Configure().
- * -# Start or stop the timer clock using TC_Start() and TC_Stop().
- */
-
-#ifndef _MCAN_CONFIG_
-#define _MCAN_CONFIG_
-
-/*------------------------------------------------------------------------------
- * Headers
- *------------------------------------------------------------------------------*/
-
-
-/*------------------------------------------------------------------------------
- * Global functions
- *------------------------------------------------------------------------------*/
-
-#ifdef __cplusplus
- extern "C" {
-#endif
-
-/* Programmable Clock Source for Baud Rate is Common To Both MCAN Controllers */
-#define MCAN_PROG_CLK_PRESCALER 1 /* /1 to /256 */
-// select one of the following for the programmable clock source
-//#define MCAN_PROG_CLK_SELECT PMC_PCK_CSS_SLOW_CLK
-//#define MCAN_PROG_CLK_SELECT PMC_PCK_CSS_MAIN_CLK
-//#define MCAN_PROG_CLK_SELECT PMC_PCK_CSS_PLLA_CLK
-//#define MCAN_PROG_CLK_SELECT PMC_PCK_CSS_UPLL_CLK
-#define MCAN_PROG_CLK_SELECT PMC_PCK_CSS_MCK
-#define MCAN_PROG_CLK_FREQ_HZ \
- ( (float) 150000000 / (float) MCAN_PROG_CLK_PRESCALER )
-
-#define MCAN0_BIT_RATE_BPS 500000
-#define MCAN0_PROP_SEG 2
-#define MCAN0_PHASE_SEG1 11
-#define MCAN0_PHASE_SEG2 11
-#define MCAN0_SYNC_JUMP 4
-
-#define MCAN0_FAST_BIT_RATE_BPS 2000000
-#define MCAN0_FAST_PROP_SEG 2
-#define MCAN0_FAST_PHASE_SEG1 4
-#define MCAN0_FAST_PHASE_SEG2 4
-#define MCAN0_FAST_SYNC_JUMP 2
-
-#define MCAN0_NMBR_STD_FLTS 8 /* 128 max filters */
-#define MCAN0_NMBR_EXT_FLTS 8 /* 64 max filters */
-#define MCAN0_NMBR_RX_FIFO0_ELMTS 0 /* # of elements, 64 elements max */
-#define MCAN0_NMBR_RX_FIFO1_ELMTS 0 /* # of elements, 64 elements max */
-#define MCAN0_NMBR_RX_DED_BUF_ELMTS 16 /* # of elements, 64 elements max */
-#define MCAN0_NMBR_TX_EVT_FIFO_ELMTS 0 /* # of elements, 32 elements max */
-#define MCAN0_NMBR_TX_DED_BUF_ELMTS 4 /* # of elements, 32 elements max */
-#define MCAN0_NMBR_TX_FIFO_Q_ELMTS 0 /* # of elements, 32 elements max */
-#define MCAN0_RX_FIFO0_ELMT_SZ 8 /* 8, 12, 16, 20, 24, 32, 48, 64 bytes */
-#define MCAN0_RX_FIFO1_ELMT_SZ 8 /* 8, 12, 16, 20, 24, 32, 48, 64 bytes */
-#define MCAN0_RX_BUF_ELMT_SZ 8 /* 8, 12, 16, 20, 24, 32, 48, 64 bytes */
-#define MCAN0_TX_BUF_ELMT_SZ 8 /* 8, 12, 16, 20, 24, 32, 48, 64 bytes */
-
-#define MCAN1_BIT_RATE_BPS 500000
-#define MCAN1_PROP_SEG 2
-#define MCAN1_PHASE_SEG1 11
-#define MCAN1_PHASE_SEG2 11
-#define MCAN1_SYNC_JUMP 4
-
-#define MCAN1_FAST_BIT_RATE_BPS 2000000
-#define MCAN1_FAST_PROP_SEG 2
-#define MCAN1_FAST_PHASE_SEG1 4
-#define MCAN1_FAST_PHASE_SEG2 4
-#define MCAN1_FAST_SYNC_JUMP 2
-
-#define MCAN1_NMBR_STD_FLTS 8 /* 128 max filters */
-#define MCAN1_NMBR_EXT_FLTS 8 /* 64 max filters */
-#define MCAN1_NMBR_RX_FIFO0_ELMTS 12 /* # of elements, 64 elements max */
-#define MCAN1_NMBR_RX_FIFO1_ELMTS 0 /* # of elements, 64 elements max */
-#define MCAN1_NMBR_RX_DED_BUF_ELMTS 4 /* # of elements, 64 elements max */
-#define MCAN1_NMBR_TX_EVT_FIFO_ELMTS 0 /* # of elements, 32 elements max */
-#define MCAN1_NMBR_TX_DED_BUF_ELMTS 4 /* # of elements, 32 elements max */
-#define MCAN1_NMBR_TX_FIFO_Q_ELMTS 4 /* # of elements, 32 elements max */
-#define MCAN1_RX_FIFO0_ELMT_SZ 8 /* 8, 12, 16, 20, 24, 32, 48, 64 bytes */
-#define MCAN1_RX_FIFO1_ELMT_SZ 8 /* 8, 12, 16, 20, 24, 32, 48, 64 bytes */
-#define MCAN1_RX_BUF_ELMT_SZ 64 /* 8, 12, 16, 20, 24, 32, 48, 64 bytes */
-#define MCAN1_TX_BUF_ELMT_SZ 32 /* 8, 12, 16, 20, 24, 32, 48, 64 bytes */
-
-#ifdef __cplusplus
-}
-#endif
-
-#endif /* #ifndef _MCAN_CONFIG_ */
-
diff --git a/ports_module/cortex_m7/iar/example_build/libraries/libboard_samv7-ek/include/rtc_calib.h b/ports_module/cortex_m7/iar/example_build/libraries/libboard_samv7-ek/include/rtc_calib.h
deleted file mode 100644
index 7d4e1253..00000000
--- a/ports_module/cortex_m7/iar/example_build/libraries/libboard_samv7-ek/include/rtc_calib.h
+++ /dev/null
@@ -1,49 +0,0 @@
-/* ----------------------------------------------------------------------------
- * SAM Software Package License
- * ----------------------------------------------------------------------------
- * Copyright (c) 2014, Atmel Corporation
- *
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * - Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the disclaimer below.
- *
- * Atmel's name may not be used to endorse or promote products derived from
- * this software without specific prior written permission.
- *
- * DISCLAIMER: THIS SOFTWARE IS PROVIDED BY ATMEL "AS IS" AND ANY EXPRESS OR
- * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT ARE
- * DISCLAIMED. IN NO EVENT SHALL ATMEL BE LIABLE FOR ANY DIRECT, INDIRECT,
- * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA,
- * OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
- * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
- * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
- * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- * ----------------------------------------------------------------------------
- */
-
-/**
- * \file
- *
- * Interface for Real Time Clock calibration (RTC) .
- *
- */
-
-/** RTC crystal **/
-
-
-typedef struct{
- int8_t Tempr;
- int16_t PPM;
- uint8_t NEGPPM;
- uint8_t HIGHPPM;
- uint16_t CORRECTION;
- }RTC_PPMLookup;
-
-
-extern void RTC_ClockCalibration( Rtc* pRtc, int32_t CurrentTempr);
diff --git a/ports_module/cortex_m7/iar/example_build/libraries/libboard_samv7-ek/include/s25fl1.h b/ports_module/cortex_m7/iar/example_build/libraries/libboard_samv7-ek/include/s25fl1.h
deleted file mode 100644
index 36b0590b..00000000
--- a/ports_module/cortex_m7/iar/example_build/libraries/libboard_samv7-ek/include/s25fl1.h
+++ /dev/null
@@ -1,255 +0,0 @@
-/* ----------------------------------------------------------------------------
- * SAM Software Package License
- * ----------------------------------------------------------------------------
- * Copyright (c) 2013, Atmel Corporation
- *
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * - Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the disclaimer below.
- *
- * Atmel's name may not be used to endorse or promote products derived from
- * this software without specific prior written permission.
- *
- * DISCLAIMER: THIS SOFTWARE IS PROVIDED BY ATMEL "AS IS" AND ANY EXPRESS OR
- * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT ARE
- * DISCLAIMED. IN NO EVENT SHALL ATMEL BE LIABLE FOR ANY DIRECT, INDIRECT,
- * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA,
- * OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
- * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
- * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
- * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- * ----------------------------------------------------------------------------
- */
-
-/**
- * \file
- *
- * Interface for the S25fl1 Serial Flash driver.
- *
- */
-
-#ifndef S25FL1_H
-#define S25FL1_H
-#define USE_QSPI_DMA
-/*----------------------------------------------------------------------------
- * Macros
- *----------------------------------------------------------------------------*/
-
-#define Size(pAt25) ((pAt25)->pDesc->size)
-#define PageSize(pAt25) ((pAt25)->pDesc->pageSize)
-#define BlockSize(pAt25) ((pAt25)->pDesc->blockSize)
-#define Name(pAt25) ((pAt25)->pDesc->name)
-#define ManId(pAt25) (((pAt25)->pDesc->jedecId) & 0xFF)
-#define PageNumber(pAt25) (Size(pAt25) / PageSize(pAt25))
-#define BlockNumber(pAt25) (Size(pAt25) / BlockSize(pAt25))
-#define PagePerBlock(pAt25) (BlockSize(pAt25) / PageSize(pAt25))
-#define BlockEraseCmd(pAt25) ((pAt25)->pDesc->blockEraseCmd)
-
-/*----------------------------------------------------------------------------
- * Local definitions
- *----------------------------------------------------------------------------*/
-
-/** Device is protected, operation cannot be carried out. */
-#define ERROR_PROTECTED 1
-/** Device is busy executing a command. */
-#define ERROR_BUSY 2
-/** There was a problem while trying to program page data. */
-#define ERROR_PROGRAM 3
-/** There was an SPI communication error. */
-#define ERROR_SPI 4
-
-/** Device ready/busy status bit. */
-#define STATUS_RDYBSY (1 << 0)
-/** Device is ready. */
-#define STATUS_RDYBSY_READY (0 << 0)
-/** Device is busy with internal operations. */
-#define STATUS_RDYBSY_BUSY (1 << 0)
-/** Write enable latch status bit. */
-#define STATUS_WEL (1 << 1)
-/** Device is not write enabled. */
-#define STATUS_WEL_DISABLED (0 << 1)
-/** Device is write enabled. */
-#define STATUS_WEL_ENABLED (1 << 1)
-/** Software protection status bit-field. */
-#define STATUS_SWP (3 << 2)
-/** All sectors are software protected. */
-#define STATUS_SWP_PROTALL (3 << 2)
-/** Some sectors are software protected. */
-#define STATUS_SWP_PROTSOME (1 << 2)
-/** No sector is software protected. */
-#define STATUS_SWP_PROTNONE (0 << 2)
-/** Write protect pin status bit. */
-#define STATUS_WPP (1 << 4)
-/** Write protect signal is not asserted. */
-#define STATUS_WPP_NOTASSERTED (0 << 4)
-/** Write protect signal is asserted. */
-#define STATUS_WPP_ASSERTED (1 << 4)
-/** Erase/program error bit. */
-#define STATUS_EPE (1 << 5)
-/** Erase or program operation was successful. */
-#define STATUS_EPE_SUCCESS (0 << 5)
-/** Erase or program error detected. */
-#define STATUS_EPE_ERROR (1 << 5)
-/** Sector protection registers locked bit. */
-#define STATUS_SPRL (1 << 7)
-/** Sector protection registers are unlocked. */
-#define STATUS_SPRL_UNLOCKED (0 << 7)
-/** Sector protection registers are locked. */
-#define STATUS_SPRL_LOCKED (1 << 7)
-
-/** Quad enable bit */
-#define STATUS_QUAD_ENABLE (1 << 1)
- /** Quad enable bit */
-#define STATUS_WRAP_ENABLE (0 << 4)
-
- /** Latency control bits */
-#define STATUS_LATENCY_CTRL (0xF << 0)
-
-#define STATUS_WRAP_BYTE (1 << 5)
-
-#define BLOCK_PROTECT_Msk (7 << 2)
-
-#define TOP_BTM_PROTECT_Msk (1 << 5)
-
-#define SEC_PROTECT_Msk (1 << 6)
-
-#define CHIP_PROTECT_Msk (0x1F << 2)
-
-/** Read array command code. */
-#define READ_ARRAY 0x0B
-/** Read array (low frequency) command code. */
-#define READ_ARRAY_LF 0x03
-/** Fast Read array command code. */
-#define READ_ARRAY_DUAL 0x3B
-/** Fast Read array command code. */
-#define READ_ARRAY_QUAD 0x6B
-/** Fast Read array command code. */
-#define READ_ARRAY_DUAL_IO 0xBB
-/** Fast Read array command code. */
-#define READ_ARRAY_QUAD_IO 0xEB
-/** Block erase command code (4K block). */
-#define BLOCK_ERASE_4K 0x20
-/** Block erase command code (32K block). */
-#define BLOCK_ERASE_32K 0x52
-/** Block erase command code (64K block). */
-#define BLOCK_ERASE_64K 0xD8
-/** Chip erase command code 1. */
-#define CHIP_ERASE_1 0x60
-/** Chip erase command code 2. */
-#define CHIP_ERASE_2 0xC7
-/** Byte/page program command code. */
-#define BYTE_PAGE_PROGRAM 0x02
-/** Sequential program mode command code 1. */
-#define SEQUENTIAL_PROGRAM_1 0xAD
-/** Sequential program mode command code 2. */
-#define SEQUENTIAL_PROGRAM_2 0xAF
-/** Write enable command code. */
-#define WRITE_ENABLE 0x06
-/** Write disable command code. */
-#define WRITE_DISABLE 0x04
-/** Protect sector command code. */
-#define PROTECT_SECTOR 0x36
-/** Unprotected sector command code. */
-#define UNPROTECT_SECTOR 0x39
-/** Read sector protection registers command code. */
-#define READ_SECTOR_PROT 0x3C
-/** Read status register command code. */
-#define READ_STATUS_1 0x05
- /** Read status register command code. */
-#define READ_STATUS_2 0x35
- /** Read status register command code. */
-#define READ_STATUS_3 0x33
-/** Write status register command code. */
-#define WRITE_STATUS 0x01
-/** Read manufacturer and device ID command code. */
-#define READ_JEDEC_ID 0x9F
-/** Deep power-down command code. */
-#define DEEP_PDOWN 0xB9
-/** Resume from deep power-down command code. */
-#define RES_DEEP_PDOWN 0xAB
-/** Resume from deep power-down command code. */
-#define SOFT_RESET_ENABLE 0x66
-/** Resume from deep power-down command code. */
-#define SOFT_RESET 0x99
-/** Resume from deep power-down command code. */
-#define WRAP_ENABLE 0x77
-/** Continuous Read Mode Reset command code. */
-#define CONT_MODE_RESET 0xFF
-
-/** SPI Flash Manufacturer JEDEC ID */
-#define ATMEL_SPI_FLASH 0x1F
-#define ST_SPI_FLASH 0x20
-#define WINBOND_SPI_FLASH 0xEF
-#define MACRONIX_SPI_FLASH 0xC2
-#define SST_SPI_FLASH 0xBF
-
-/*----------------------------------------------------------------------------
- * Exported functions
- *----------------------------------------------------------------------------*/
-
-uint32_t S25FL1D_ReadJedecId(void);
-
-void S25FL1D_InitFlashInterface(uint8_t Mode);
-
-void S25FL1D_SoftReset(void);
-
-void S25FL1D_ContReadModeReset(void);
-unsigned char S25FL1D_Unprotect(void);
-
-unsigned char S25FL1D_Protect(uint32_t StartAddr, uint32_t Size);
-
-void S25FL1D_QuadMode(uint8_t Enable);
-
-void S25FL1D_EnableWrap(uint8_t ByetAlign);
-
-void S25FL1D_SetReadLatencyControl(uint8_t Latency);
-
-unsigned char S25FL1D_EraseChip(void);
-
-unsigned char S25FL1D_EraseSector( unsigned int address);
-
-unsigned char S25FL1D_Erase64KBlock( unsigned int address);
-
-unsigned char S25FL1D_Write(
- uint32_t *pData,
- uint32_t size,
- uint32_t address,
- uint8_t Secure);
-
-extern unsigned char S25FL1D_Read(
- uint32_t *pData,
- uint32_t size,
- uint32_t address);
-
-extern unsigned char S25FL1D_ReadDual(
- uint32_t *pData,
- uint32_t size,
- uint32_t address);
-
-extern unsigned char S25FL1D_ReadQuad(
- uint32_t *pData,
- uint32_t size,
- uint32_t address);
-
-extern unsigned char S25FL1D_ReadDualIO(
- uint32_t *pData,
- uint32_t size,
- uint32_t address,
- uint8_t ContMode,
- uint8_t Secure);
-
-extern unsigned char S25FL1D_ReadQuadIO(
- uint32_t *pData,
- uint32_t size,
- uint32_t address,
- uint8_t ContMode,
- uint8_t Secure);
-
-#endif // #ifndef S25FL1_H
-
diff --git a/ports_module/cortex_m7/iar/example_build/libraries/libboard_samv7-ek/include/syscalls.h b/ports_module/cortex_m7/iar/example_build/libraries/libboard_samv7-ek/include/syscalls.h
deleted file mode 100644
index c6c1deaf..00000000
--- a/ports_module/cortex_m7/iar/example_build/libraries/libboard_samv7-ek/include/syscalls.h
+++ /dev/null
@@ -1,65 +0,0 @@
-/* ----------------------------------------------------------------------------
- * SAM Software Package License
- * ----------------------------------------------------------------------------
- * Copyright (c) 2013, Atmel Corporation
- *
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * - Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the disclaimer below.
- *
- * Atmel's name may not be used to endorse or promote products derived from
- * this software without specific prior written permission.
- *
- * DISCLAIMER: THIS SOFTWARE IS PROVIDED BY ATMEL "AS IS" AND ANY EXPRESS OR
- * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT ARE
- * DISCLAIMED. IN NO EVENT SHALL ATMEL BE LIABLE FOR ANY DIRECT, INDIRECT,
- * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA,
- * OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
- * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
- * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
- * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- * ----------------------------------------------------------------------------
- */
-
-/**
- * \file syscalls.h
- *
- * Implementation of newlib syscall.
- *
- */
-
-/*----------------------------------------------------------------------------
- * Headers
- *----------------------------------------------------------------------------*/
-
-
-#include
-#include
-#include
-#include
-
-/*----------------------------------------------------------------------------
- * Exported functions
- *----------------------------------------------------------------------------*/
-
-extern caddr_t _sbrk ( int incr );
-
-extern int link( char *old, char *new );
-
-extern int _close( int file );
-
-extern int _fstat( int file, struct stat *st );
-
-extern int _isatty( int file );
-
-extern int _lseek( int file, int ptr, int dir );
-
-extern int _read(int file, char *ptr, int len);
-
-extern int _write( int file, char *ptr, int len );
diff --git a/ports_module/cortex_m7/iar/example_build/libraries/libboard_samv7-ek/include/wm8904.h b/ports_module/cortex_m7/iar/example_build/libraries/libboard_samv7-ek/include/wm8904.h
deleted file mode 100644
index 5f40d572..00000000
--- a/ports_module/cortex_m7/iar/example_build/libraries/libboard_samv7-ek/include/wm8904.h
+++ /dev/null
@@ -1,160 +0,0 @@
-/* ----------------------------------------------------------------------------
- * SAM Software Package License
- * ----------------------------------------------------------------------------
- * Copyright (c) 2012, Atmel Corporation
- *
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * - Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the disclaimer below.
- *
- * Atmel's name may not be used to endorse or promote products derived from
- * this software without specific prior written permission.
- *
- * DISCLAIMER: THIS SOFTWARE IS PROVIDED BY ATMEL "AS IS" AND ANY EXPRESS OR
- * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT ARE
- * DISCLAIMED. IN NO EVENT SHALL ATMEL BE LIABLE FOR ANY DIRECT, INDIRECT,
- * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA,
- * OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
- * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
- * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
- * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- * ----------------------------------------------------------------------------
- */
-
-/**
- * \file
- *
- * Implementation WM8904 driver.
- *
- */
-
-#ifndef WM8904_H
-#define WM8904_H
-
-#include "board.h"
-
-/*----------------------------------------------------------------------------
- * Definitions
- *----------------------------------------------------------------------------*/
-#define WM8904_CSB_STATE (0x0 << 0)
-
-/** Slave address */
-#define WM8904_SLAVE_ADDRESS 0x1a | WM8904_CSB_STATE
-#define CS2100_SLAVE_ADDRESS 0x4E
-
-
-/** Reset register*/
-#define WM8904_REG_RESET 0x00
-
-/** Bias control 0 register*/
-#define WM8904_REG_BIAS_CTRL0 0x04
-
-/** VMID control 0 register*/
-#define WM8904_REG_VMID_CTRL0 0x05
-
-/** MIC Bias control 0 register*/
-#define WM8904_REG_MICBIAS_CTRL0 0x06
-
-/** Bias control 1 register*/
-#define WM8904_REG_BIAS_CTRL1 0x07
-
-/** Power management control 0 register*/
-#define WM8904_REG_POWER_MANG0 0x0C
-/** Power management control 2 register*/
-#define WM8904_REG_POWER_MANG2 0x0E
-/** Power management control 3 register*/
-#define WM8904_REG_POWER_MANG3 0x0F
-/** Power management control 6 register*/
-#define WM8904_REG_POWER_MANG6 0x12
-
-/** Clock rate0 register*/
-#define WM8904_REG_CLOCK_RATE0 0x14
-/** Clock rate1 register*/
-#define WM8904_REG_CLOCK_RATE1 0x15
-
-/** Clock rate2 register*/
-#define WM8904_REG_CLOCK_RATE2 0x16
-
-/** Audio interface0 register*/
-#define WM8904_REG_AUD_INF0 0x18
-
-/** Audio interface1 register*/
-#define WM8904_REG_AUD_INF1 0x19
-/** Audio interface2 register*/
-#define WM8904_REG_AUD_INF2 0x1A
-/** Audio interface3 register*/
-#define WM8904_REG_AUD_INF3 0x1B
-
-/** ADC digital 0 register*/
-#define WM8904_REG_ADC_DIG0 0x20
-/** ADC digital 1 register*/
-#define WM8904_REG_ADC_DIG1 0x21
-
-/** Analogue left input 0 register*/
-#define WM8904_REG_ANALOGUE_LIN0 0x2C
-/** Analogue right input 0 register*/
-#define WM8904_REG_ANALOGUE_RIN0 0x2D
-
-/** Analogue left input 1 register*/
-#define WM8904_REG_ANALOGUE_LIN1 0x2E
-/** Analogue right input 1 register*/
-#define WM8904_REG_ANALOGUE_RIN1 0x2F
-
-/** Analogue left output 1 register*/
-#define WM8904_REG_ANALOGUE_LOUT1 0x39
-/** Analogue right output 1 register*/
-#define WM8904_REG_ANALOGUE_ROUT1 0x3A
-
-/** Analogue left output 2 register*/
-#define WM8904_REG_ANALOGUE_LOUT2 0x3B
-/** Analogue right output 2 register*/
-#define WM8904_REG_ANALOGUE_ROUT2 0x3C
-
-/** Analogue output 12 ZC register*/
-#define WM8904_REG_ANALOGUE_OUT12ZC 0x3D
-
-/** DC servo 0 register*/
-#define WM8904_REG_DC_SERVO0 0x43
-
-/** Analogue HP 0 register*/
-#define WM8904_REG_ANALOGUE_HP0 0x5A
-
-/** Charge pump 0 register*/
-#define WM8904_REG_CHARGE_PUMP0 0x62
-
-/** Class W 0 register*/
-#define WM8904_REG_CLASS0 0x68
-
-/** FLL control 1 register*/
-#define WM8904_REG_FLL_CRTL1 0x74
-/** FLL control 2 register*/
-#define WM8904_REG_FLL_CRTL2 0x75
-/** FLL control 3 register*/
-#define WM8904_REG_FLL_CRTL3 0x76
-/** FLL control 4 register*/
-#define WM8904_REG_FLL_CRTL4 0x77
-/** FLL control 5 register*/
-#define WM8904_REG_FLL_CRTL5 0x78
-
-/** DUMMY register*/
-#define WM8904_REG_END 0xFF
-
-/*----------------------------------------------------------------------------
- * Exported functions
- *----------------------------------------------------------------------------*/
-
-extern uint16_t WM8904_Read(Twid *pTwid, uint32_t device, uint32_t regAddr);
-extern void WM8904_Write(Twid *pTwid, uint32_t device, uint32_t regAddr,
- uint16_t data);
-extern uint8_t WM8904_Init(Twid *pTwid, uint32_t device, uint32_t PCK);
-extern uint8_t WM8904_VolumeSet(Twid *pTwid, uint32_t device, uint16_t value);
-extern void WM8904_IN2R_IN1L(Twid *pTwid, uint32_t device);
-#endif // WM8904_H
-
-
diff --git a/ports_module/cortex_m7/iar/example_build/libraries/libchip_samv7/chip.h b/ports_module/cortex_m7/iar/example_build/libraries/libchip_samv7/chip.h
deleted file mode 100644
index 0aef4dc6..00000000
--- a/ports_module/cortex_m7/iar/example_build/libraries/libchip_samv7/chip.h
+++ /dev/null
@@ -1,124 +0,0 @@
-/* ----------------------------------------------------------------------------
- * SAM Software Package License
- * ----------------------------------------------------------------------------
- * Copyright (c) 2014, Atmel Corporation
- *
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * - Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the disclaimer below.
- *
- * Atmel's name may not be used to endorse or promote products derived from
- * this software without specific prior written permission.
- *
- * DISCLAIMER: THIS SOFTWARE IS PROVIDED BY ATMEL "AS IS" AND ANY EXPRESS OR
- * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT ARE
- * DISCLAIMED. IN NO EVENT SHALL ATMEL BE LIABLE FOR ANY DIRECT, INDIRECT,
- * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA,
- * OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
- * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
- * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
- * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- * ----------------------------------------------------------------------------
- */
-
-#ifndef SAMS7_CHIP_H
-#define SAMS7_CHIP_H
-
-#include "compiler.h"
-
-
-/*************************************************
- * Memory type and its attribute
- *************************************************/
-#define SHAREABLE 1
-#define NON_SHAREABLE 0
- /*********************************************************************************************************************************************************************
- * Memory Type Definition Memory TEX attribute C attribute B attribute S attribute
- **********************************************************************************************************************************************************************/
-
-#define STRONGLY_ORDERED_SHAREABLE_TYPE (( 0x00 << MPU_RASR_TEX_Pos ) | ( DISABLE << MPU_RASR_C_Pos ) | ( DISABLE << MPU_RASR_B_Pos )) // DO not care //
-#define SHAREABLE_DEVICE_TYPE (( 0x00 << MPU_RASR_TEX_Pos ) | ( DISABLE << MPU_RASR_C_Pos ) | ( ENABLE << MPU_RASR_B_Pos )) // DO not care //
-#define INNER_OUTER_NORMAL_WT_NWA_TYPE(x) (( 0x00 << MPU_RASR_TEX_Pos ) | ( ENABLE << MPU_RASR_C_Pos ) | ( DISABLE << MPU_RASR_B_Pos ) | ( x << MPU_RASR_S_Pos ))
-#define INNER_OUTER_NORMAL_WB_NWA_TYPE(x) (( 0x00 << MPU_RASR_TEX_Pos ) | ( ENABLE << MPU_RASR_C_Pos ) | ( ENABLE << MPU_RASR_B_Pos ) | ( x << MPU_RASR_S_Pos ))
-#define INNER_OUTER_NORMAL_NOCACHE_TYPE(x) (( 0x01 << MPU_RASR_TEX_Pos ) | ( DISABLE << MPU_RASR_C_Pos ) | ( DISABLE << MPU_RASR_B_Pos ) | ( x << MPU_RASR_S_Pos ))
-#define INNER_OUTER_NORMAL_WB_RWA_TYPE(x) (( 0x01 << MPU_RASR_TEX_Pos ) | ( ENABLE << MPU_RASR_C_Pos ) | ( ENABLE << MPU_RASR_B_Pos ) | ( x << MPU_RASR_S_Pos ))
-#define NON_SHAREABLE_DEVICE_TYPE (( 0x02 << MPU_RASR_TEX_Pos ) | ( DISABLE << MPU_RASR_C_Pos ) | ( DISABLE << MPU_RASR_B_Pos )) // DO not care //
-
- /* Normal memory attributes with outer capability rules to Non_Cacable */
-
-#define INNER_NORMAL_NOCACHE_TYPE(x) (( 0x04 << MPU_RASR_TEX_Pos ) | ( DISABLE << MPU_RASR_C_Pos ) | ( DISABLE << MPU_RASR_B_Pos ) | ( x << MPU_RASR_S_Pos ))
-#define INNER_NORMAL_WB_RWA_TYPE(x) (( 0x04 << MPU_RASR_TEX_Pos ) | ( DISABLE << MPU_RASR_C_Pos ) | ( ENABLE << MPU_RASR_B_Pos ) | ( x << MPU_RASR_S_Pos ))
-#define INNER_NORMAL_WT_NWA_TYPE(x) (( 0x04 << MPU_RASR_TEX_Pos ) | ( ENABLE << MPU_RASR_C_Pos ) | ( DISABLE << MPU_RASR_B_Pos ) | ( x << MPU_RASR_S_Pos ))
-#define INNER_NORMAL_WB_NWA_TYPE(x) (( 0x04 << MPU_RASR_TEX_Pos ) | ( ENABLE << MPU_RASR_C_Pos ) | ( ENABLE << MPU_RASR_B_Pos ) | ( x << MPU_RASR_S_Pos ))
-
-/* SCB Interrupt Control State Register Definitions */
-#ifndef SCB_VTOR_TBLBASE_Pos
-#define SCB_VTOR_TBLBASE_Pos 29 /*!< SCB VTOR: TBLBASE Position */
-#define SCB_VTOR_TBLBASE_Msk (1UL << SCB_VTOR_TBLBASE_Pos) /*!< SCB VTOR: TBLBASE Mask */
-#endif
-
-
-/*
- * Peripherals
- */
-#include "include/acc.h"
-#include "include/aes.h"
-#include "include/afec.h"
-#include "include/efc.h"
-#include "include/pio.h"
-#include "include/pio_it.h"
-#include "include/efc.h"
-#include "include/rstc.h"
-#include "include/mpu.h"
-#include "include/gmac.h"
-#include "include/gmacd.h"
-#include "include/video.h"
-#include "include/icm.h"
-#include "include/isi.h"
-#include "include/exceptions.h"
-#include "include/pio_capture.h"
-#include "include/rtc.h"
-#include "include/rtt.h"
-#include "include/tc.h"
-#include "include/timetick.h"
-#include "include/twi.h"
-#include "include/flashd.h"
-#include "include/pmc.h"
-#include "include/pwmc.h"
-#include "include/mcan.h"
-#include "include/supc.h"
-#include "include/usart.h"
-#include "include/uart.h"
-#include "include/isi.h"
-#include "include/hsmci.h"
-#include "include/ssc.h"
-#include "include/twi.h"
-#include "include/trng.h"
-#include "include/wdt.h"
-#include "include/spi.h"
-#include "include/qspi.h"
-#include "include/trace.h"
-#include "include/xdmac.h"
-#include "include/xdma_hardware_interface.h"
-#include "include/xdmad.h"
-#include "include/mcid.h"
-#include "include/twid.h"
-#include "include/spi_dma.h"
-#include "include/qspi_dma.h"
-#include "include/uart_dma.h"
-#include "include/usart_dma.h"
-#include "include/twid.h"
-#include "include/afe_dma.h"
-#include "include/dac_dma.h"
-#include "include/usbhs.h"
-
-#define ENABLE_PERIPHERAL(dwId) PMC_EnablePeripheral( dwId )
-#define DISABLE_PERIPHERAL(dwId) PMC_DisablePeripheral( dwId )
-
-#endif /* SAMS7_CHIP_H */
diff --git a/ports_module/cortex_m7/iar/example_build/libraries/libchip_samv7/compiler.h b/ports_module/cortex_m7/iar/example_build/libraries/libchip_samv7/compiler.h
deleted file mode 100644
index 53c0d625..00000000
--- a/ports_module/cortex_m7/iar/example_build/libraries/libchip_samv7/compiler.h
+++ /dev/null
@@ -1,442 +0,0 @@
-/* ----------------------------------------------------------------------------
- * SAM Software Package License
- * ----------------------------------------------------------------------------
- * Copyright (c) 2011, Atmel Corporation
- *
- * All rights reserved.
- *
-
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * - Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the disclaimer below.
- *
- * Atmel's name may not be used to endorse or promote products derived from
- * this software without specific prior written permission.
- *
- * DISCLAIMER: THIS SOFTWARE IS PROVIDED BY ATMEL "AS IS" AND ANY EXPRESS OR
- * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT ARE
- * DISCLAIMED. IN NO EVENT SHALL ATMEL BE LIABLE FOR ANY DIRECT, INDIRECT,
- * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA,
- * OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
- * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
- * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
- * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- * ----------------------------------------------------------------------------
- */
-
-#ifndef _COMPILER_H_
-#define _COMPILER_H_
-
-/*
- * Peripherals registers definitions
- */
-#include "include/samv7/samv71.h"
-
-
-//_____ D E C L A R A T I O N S ____________________________________________
-
-#ifndef __ASSEMBLY__
-
-#include
-#include
-#include
-#include
-
-/* Define WEAK attribute */
-#if defined ( __CC_ARM )
- #define WEAK __attribute__ ((weak))
-#elif defined ( __ICCARM__ )
- #define WEAK __weak
-#elif defined ( __GNUC__ )
- #define WEAK __attribute__ ((weak))
-#endif
-
-/* Define Compiler name of tool chains */
-#if defined ( __CC_ARM )
- #define COMPILER_NAME "KEIL"
-#elif defined ( __ICCARM__ )
- #define COMPILER_NAME "IAR"
-#elif defined ( __GNUC__ )
- #define COMPILER_NAME "GCC"
-#endif
-
-/* Define NO_INIT attribute */
-#if defined ( __CC_ARM )
- #define NO_INIT
-#elif defined ( __ICCARM__ )
- #define NO_INIT __no_init
-#elif defined ( __GNUC__ )
- #define NO_INIT
-#endif
-
-
-/* Define memory sync for tool chains */
-#if defined ( __CC_ARM )
- #define memory_sync() __dsb(15);__isb(15);
-#elif defined ( __ICCARM__ )
- #define memory_sync() __DSB();__ISB();
-#elif defined ( __GNUC__ )
- #define memory_sync() __DSB();__ISB();
-#endif
-
-/* Define memory barrier for tool chains */
-#if defined ( __CC_ARM )
- #define memory_barrier() __dmb(15);
-#elif defined ( __ICCARM__ )
- #define memory_barrier() __DMB();
-#elif defined ( __GNUC__ )
- #define memory_barrier() __DMB();
-#endif
-
-/*! \name Token Paste
- *
- * Paste N preprocessing tokens together, these tokens being allowed to be \#defined.
- *
- * May be used only within macros with the tokens passed as arguments if the tokens are \#defined.
- *
- * For example, writing TPASTE2(U, WIDTH) within a macro \#defined by
- * UTYPE(WIDTH) and invoked as UTYPE(UL_WIDTH) with UL_WIDTH \#defined as 32 is
- * equivalent to writing U32.
- */
-//! @{
-#define TPASTE2( a, b) a##b
-#define TPASTE3( a, b, c) a##b##c
-//! @}
-
-/*! \name Absolute Token Paste
- *
- * Paste N preprocessing tokens together, these tokens being allowed to be \#defined.
- *
- * No restriction of use if the tokens are \#defined.
- *
- * For example, writing ATPASTE2(U, UL_WIDTH) anywhere with UL_WIDTH \#defined
- * as 32 is equivalent to writing U32.
- */
-//! @{
-#define ATPASTE2( a, b) TPASTE2( a, b)
-#define ATPASTE3( a, b, c) TPASTE3( a, b, c)
-//! @}
-
-
-/**
- * \brief Emit the compiler pragma \a arg.
- *
- * \param arg The pragma directive as it would appear after \e \#pragma
- * (i.e. not stringified).
- */
-#define COMPILER_PRAGMA(arg) _Pragma(#arg)
-
-/**
- * \def COMPILER_PACK_SET(alignment)
- * \brief Set maximum alignment for subsequent structure and union
- * definitions to \a alignment.
- */
-#define COMPILER_PACK_SET(alignment) COMPILER_PRAGMA(pack(alignment))
-
-/**
- * \def COMPILER_PACK_RESET()
- * \brief Set default alignment for subsequent structure and union
- * definitions.
- */
-#define COMPILER_PACK_RESET() COMPILER_PRAGMA(pack())
-
-/**
- * \brief Set user-defined section.
- * Place a data object or a function in a user-defined section.
- */
-#if defined ( __CC_ARM )
- #define COMPILER_SECTION(a) __attribute__((__section__(a)))
-#elif defined ( __ICCARM__ )
- #define COMPILER_SECTION(a) COMPILER_PRAGMA(location = a)
-#elif defined ( __GNUC__ )
- #define COMPILER_SECTION(a) __attribute__((__section__(a)))
-#endif
-
-/**
- * \brief Set aligned boundary.
- */
-#if defined ( __CC_ARM )
- #define COMPILER_ALIGNED(a) __attribute__((__aligned__(a)))
-#elif defined ( __ICCARM__ )
- #define COMPILER_ALIGNED(a) COMPILER_PRAGMA(data_alignment = a)
-#elif defined ( __GNUC__ )
- #define COMPILER_ALIGNED(a) __attribute__((__aligned__(a)))
-#endif
-
-/**
- * \brief Set word-aligned boundary.
- */
-
-#if defined ( __CC_ARM )
- #define COMPILER_WORD_ALIGNED __attribute__((__aligned__(4)))
-#elif defined ( __ICCARM__ )
- #define COMPILER_WORD_ALIGNED COMPILER_PRAGMA(data_alignment = 4)
-#elif defined ( __GNUC__ )
- #define COMPILER_WORD_ALIGNED __attribute__((__aligned__(4)))
-#endif
-
-
-
-/*! \name Mathematics
- *
- * The same considerations as for clz and ctz apply here but GCC does not
- * provide built-in functions to access the assembly instructions abs, min and
- * max and it does not produce them by itself in most cases, so two sets of
- * macros are defined here:
- * - Abs, Min and Max to apply to constant expressions (values known at
- * compile time);
- * - abs, min and max to apply to non-constant expressions (values unknown at
- * compile time), abs is found in stdlib.h.
- */
-//! @{
-
-/*! \brief Takes the absolute value of \a a.
- *
- * \param a Input value.
- *
- * \return Absolute value of \a a.
- *
- * \note More optimized if only used with values known at compile time.
- */
-#define Abs(a) (((a) < 0 ) ? -(a) : (a))
-
-/*! \brief Takes the minimal value of \a a and \a b.
- *
- * \param a Input value.
- * \param b Input value.
- *
- * \return Minimal value of \a a and \a b.
- *
- * \note More optimized if only used with values known at compile time.
- */
-#define Min(a, b) (((a) < (b)) ? (a) : (b))
-
-/*! \brief Takes the maximal value of \a a and \a b.
- *
- * \param a Input value.
- * \param b Input value.
- *
- * \return Maximal value of \a a and \a b.
- *
- * \note More optimized if only used with values known at compile time.
- */
-#define Max(a, b) (((a) > (b)) ? (a) : (b))
-
-// abs() is already defined by stdlib.h
-
-/*! \brief Takes the minimal value of \a a and \a b.
- *
- * \param a Input value.
- * \param b Input value.
- *
- * \return Minimal value of \a a and \a b.
- *
- * \note More optimized if only used with values unknown at compile time.
- */
-#define min(a, b) Min(a, b)
-
-/*! \brief Takes the maximal value of \a a and \a b.
- *
- * \param a Input value.
- * \param b Input value.
- *
- * \return Maximal value of \a a and \a b.
- *
- * \note More optimized if only used with values unknown at compile time.
- */
-#define max(a, b) Max(a, b)
-
-//! @}
-
-#define be32_to_cpu(x) __REV(x)
-#define cpu_to_be32(x) __REV(x)
-#define BE32_TO_CPU(x) __REV(x)
-#define CPU_TO_BE32(x) __REV(x)
-
-/**
- * \def UNUSED
- * \brief Marking \a v as a unused parameter or value.
- */
-#define UNUSED(v) (void)(v)
-
-/**
- * \weakgroup interrupt_group
- *
- * @{
- */
-
-/**
- * \name Interrupt Service Routine definition
- *
- * @{
- */
-
-/**
- * \brief Initialize interrupt vectors
- *
- * For NVIC the interrupt vectors are put in vector table. So nothing
- * to do to initialize them, except defined the vector function with
- * right name.
- *
- * This must be called prior to \ref irq_register_handler.
- */
-# define irq_initialize_vectors() \
- do { \
- } while(0)
-
-/**
- * \brief Register handler for interrupt
- *
- * For NVIC the interrupt vectors are put in vector table. So nothing
- * to do to register them, except defined the vector function with
- * right name.
- *
- * Usage:
- * \code
- irq_initialize_vectors();
- irq_register_handler(foo_irq_handler);
-\endcode
- *
- * \note The function \a func must be defined with the \ref ISR macro.
- * \note The functions prototypes can be found in the device exception header
- * files (exceptions.h).
- */
-# define irq_register_handler(int_num, int_prio) \
- NVIC_ClearPendingIRQ( (IRQn_Type)int_num); \
- NVIC_SetPriority( (IRQn_Type)int_num, int_prio); \
- NVIC_EnableIRQ( (IRQn_Type)int_num); \
-
-//@}
-
-
-# define cpu_irq_enable() \
- do { \
- /*g_interrupt_enabled = true; */ \
- __DMB(); \
- __enable_irq(); \
- } while (0)
-# define cpu_irq_disable() \
- do { \
- __disable_irq(); \
- __DMB(); \
- /*g_interrupt_enabled = false; */ \
- } while (0)
-
-typedef uint32_t irqflags_t;
-
-#if !defined(__DOXYGEN__)
-extern volatile bool g_interrupt_enabled;
-#endif
-
-#define cpu_irq_is_enabled() (__get_PRIMASK() == 0)
-
-static volatile uint32_t cpu_irq_critical_section_counter;
-static volatile bool cpu_irq_prev_interrupt_state;
-
-static inline irqflags_t cpu_irq_save(void)
-{
- irqflags_t flags = cpu_irq_is_enabled();
- cpu_irq_disable();
- return flags;
-}
-
-static inline bool cpu_irq_is_enabled_flags(irqflags_t flags)
-{
- return (flags);
-}
-
-static inline void cpu_irq_restore(irqflags_t flags)
-{
- if (cpu_irq_is_enabled_flags(flags))
- cpu_irq_enable();
-}
-/*
-void cpu_irq_enter_critical(void);
-void cpu_irq_leave_critical(void);*/
-
-/**
- * \weakgroup interrupt_deprecated_group
- * @{
- */
-
-#define Enable_global_interrupt() cpu_irq_enable()
-#define Disable_global_interrupt() cpu_irq_disable()
-#define Is_global_interrupt_enabled() cpu_irq_is_enabled()
-
-
-//_____ M A C R O S ________________________________________________________
-
-/*! \name Usual Constants
- */
-//! @{
-#define DISABLE 0
-#define ENABLE 1
-#define DISABLED 0
-#define ENABLED 1
-#define OFF 0
-#define ON 1
-#define FALSE 0
-#define TRUE 1
-#ifndef __cplusplus
-#if !defined(__bool_true_false_are_defined)
-#define false FALSE
-#define true TRUE
-#endif
-#endif
-#define KO 0
-#define OK 1
-#define PASS 0
-#define FAIL 1
-#define LOW 0
-#define HIGH 1
-#define CLR 0
-#define SET 1
-//! @}
-
-/*! \brief Counts the trailing zero bits of the given value considered as a 32-bit integer.
- *
- * \param u Value of which to count the trailing zero bits.
- *
- * \return The count of trailing zero bits in \a u.
- */
-#define ctz(u) ((u) & (1ul << 0) ? 0 : \
- (u) & (1ul << 1) ? 1 : \
- (u) & (1ul << 2) ? 2 : \
- (u) & (1ul << 3) ? 3 : \
- (u) & (1ul << 4) ? 4 : \
- (u) & (1ul << 5) ? 5 : \
- (u) & (1ul << 6) ? 6 : \
- (u) & (1ul << 7) ? 7 : \
- (u) & (1ul << 8) ? 8 : \
- (u) & (1ul << 9) ? 9 : \
- (u) & (1ul << 10) ? 10 : \
- (u) & (1ul << 11) ? 11 : \
- (u) & (1ul << 12) ? 12 : \
- (u) & (1ul << 13) ? 13 : \
- (u) & (1ul << 14) ? 14 : \
- (u) & (1ul << 15) ? 15 : \
- (u) & (1ul << 16) ? 16 : \
- (u) & (1ul << 17) ? 17 : \
- (u) & (1ul << 18) ? 18 : \
- (u) & (1ul << 19) ? 19 : \
- (u) & (1ul << 20) ? 20 : \
- (u) & (1ul << 21) ? 21 : \
- (u) & (1ul << 22) ? 22 : \
- (u) & (1ul << 23) ? 23 : \
- (u) & (1ul << 24) ? 24 : \
- (u) & (1ul << 25) ? 25 : \
- (u) & (1ul << 26) ? 26 : \
- (u) & (1ul << 27) ? 27 : \
- (u) & (1ul << 28) ? 28 : \
- (u) & (1ul << 29) ? 29 : \
- (u) & (1ul << 30) ? 30 : \
- (u) & (1ul << 31) ? 31 : \
- 32)
-
-#endif // __ASSEMBLY__
-
-#endif // _COMPILER_H_
diff --git a/ports_module/cortex_m7/iar/example_build/libraries/libchip_samv7/include/acc.h b/ports_module/cortex_m7/iar/example_build/libraries/libchip_samv7/include/acc.h
deleted file mode 100644
index 73c945a7..00000000
--- a/ports_module/cortex_m7/iar/example_build/libraries/libchip_samv7/include/acc.h
+++ /dev/null
@@ -1,151 +0,0 @@
-/* ----------------------------------------------------------------------------
- * SAM Software Package License
- * ----------------------------------------------------------------------------
- * Copyright (c) 2011, Atmel Corporation
- *
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * - Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the disclaimer below.
- *
- * Atmel's name may not be used to endorse or promote products derived from
- * this software without specific prior written permission.
- *
- * DISCLAIMER: THIS SOFTWARE IS PROVIDED BY ATMEL "AS IS" AND ANY EXPRESS OR
- * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT ARE
- * DISCLAIMED. IN NO EVENT SHALL ATMEL BE LIABLE FOR ANY DIRECT, INDIRECT,
- * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA,
- * OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
- * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
- * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
- * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- * ----------------------------------------------------------------------------
- */
-
-/**
- * \file
- *
- * \section Purpose
- *
- * Interface for configuration the Analog-to-Digital Converter (ACC) peripheral.
- *
- * \section Usage
- *
- * -# Configurate the pins for ACC
- * -# Initialize the ACC with ACC_Initialize().
- * -# Select the active channel using ACC_EnableChannel()
- * -# Start the conversion with ACC_StartConversion()
- * -# Wait the end of the conversion by polling status with ACC_GetStatus()
- * -# Finally, get the converted data using ACC_GetConvertedData()
- *
- */
-#ifndef _ACC_
-#define _ACC_
-
-/*----------------------------------------------------------------------------
- * Headers
- *----------------------------------------------------------------------------*/
-#include "chip.h"
-
-#include
-#include
-
-/*------------------------------------------------------------------------------
- * Definitions
- *------------------------------------------------------------------------------*/
-#define ACC_SELPLUS_AD12B0 0
-#define ACC_SELPLUS_AD12B1 1
-#define ACC_SELPLUS_AD12B2 2
-#define ACC_SELPLUS_AD12B3 3
-#define ACC_SELPLUS_AD12B4 4
-#define ACC_SELPLUS_AD12B5 5
-#define ACC_SELPLUS_AD12B6 6
-#define ACC_SELPLUS_AD12B7 7
-#define ACC_SELMINUS_TS 0
-#define ACC_SELMINUS_ADVREF 1
-#define ACC_SELMINUS_DAC0 2
-#define ACC_SELMINUS_DAC1 3
-#define ACC_SELMINUS_AD12B0 4
-#define ACC_SELMINUS_AD12B1 5
-#define ACC_SELMINUS_AD12B2 6
-#define ACC_SELMINUS_AD12B3 7
-
-#ifdef __cplusplus
- extern "C" {
-#endif
-
-/*------------------------------------------------------------------------------
- * Macros function of register access
- *------------------------------------------------------------------------------*/
-#define ACC_CfgModeReg(pAcc, mode) { \
- (pAcc)->ACC_MR = (mode);\
- }
-
-#define ACC_GetModeReg( pAcc ) ((pAcc)->ACC_MR)
-
-#define ACC_StartConversion( pAcc ) ((pAcc)->ACC_CR = ACC_CR_START)
-
-#define ACC_SoftReset( pAcc ) ((pAcc)->ACC_CR = ACC_CR_SWRST)
-
-#define ACC_EnableChannel( pAcc, dwChannel ) {\
- assert( dwChannel < 16 ) ;\
- (pAcc)->ACC_CHER = (1 << (dwChannel));\
- }
-
-#define ACC_DisableChannel( pAcc, dwChannel ) {\
- assert( dwChannel < 16 ) ;\
- (pAcc)->ACC_CHDR = (1 << (dwChannel));\
- }
-
-#define ACC_EnableIt( pAcc, dwMode ) {\
- assert( ((dwMode)&0xFFF00000)== 0 ) ;\
- (pAcc)->ACC_IER = (dwMode);\
- }
-
-#define ACC_DisableIt( pAcc, dwMode ) {\
- assert( ((dwMode)&0xFFF00000)== 0 ) ;\
- (pAcc)->ACC_IDR = (dwMode);\
- }
-
-#define ACC_EnableDataReadyIt( pAcc ) ((pAcc)->ACC_IER = AT91C_ACC_DRDY)
-
-#define ACC_GetStatus( pAcc ) ((pAcc)->ACC_ISR)
-
-#define ACC_GetChannelStatus( pAcc ) ((pAcc)->ACC_CHSR)
-
-#define ACC_GetInterruptMaskStatus( pAcc ) ((pAcc)->ACC_IMR)
-
-#define ACC_GetLastConvertedData( pAcc ) ((pAcc)->ACC_LCDR)
-
-#define ACC_CfgAnalogCtrlReg( pAcc, dwMode ) {\
- assert( ((dwMode) & 0xFFFCFF3C) == 0 ) ;\
- (pAcc)->ACC_ACR = (dwMode);\
- }
-
-#define ACC_CfgExtModeReg( pAcc, extmode ) {\
- assert( ((extmode) & 0xFF00FFFE) == 0 ) ;\
- (pAcc)->ACC_EMR = (extmode);\
- }
-
-#define ACC_GetAnalogCtrlReg( pAcc ) ((pAcc)->ACC_ACR)
-
-/*------------------------------------------------------------------------------
- * Exported functions
- *------------------------------------------------------------------------------*/
-extern void ACC_Configure( Acc *pAcc, uint8_t idAcc, uint8_t ucSelplus,
- uint8_t ucSelminus, uint16_t wAc_en, uint16_t wEdge, uint16_t wInvert ) ;
-
-extern void ACC_SetComparisonPair( Acc *pAcc, uint8_t ucSelplus, uint8_t ucSelminus ) ;
-
-extern uint32_t ACC_GetComparisonResult( Acc* pAcc, uint32_t dwStatus ) ;
-
-#ifdef __cplusplus
-}
-#endif
-
-#endif /* #ifndef _ACC_ */
diff --git a/ports_module/cortex_m7/iar/example_build/libraries/libchip_samv7/include/adc.h b/ports_module/cortex_m7/iar/example_build/libraries/libchip_samv7/include/adc.h
deleted file mode 100644
index d919742a..00000000
--- a/ports_module/cortex_m7/iar/example_build/libraries/libchip_samv7/include/adc.h
+++ /dev/null
@@ -1,178 +0,0 @@
-/* ----------------------------------------------------------------------------
- * SAM Software Package License
- * ----------------------------------------------------------------------------
- * Copyright (c) 2011, Atmel Corporation
- *
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * - Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the disclaimer below.
- *
- * Atmel's name may not be used to endorse or promote products derived from
- * this software without specific prior written permission.
- *
- * DISCLAIMER: THIS SOFTWARE IS PROVIDED BY ATMEL "AS IS" AND ANY EXPRESS OR
- * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT ARE
- * DISCLAIMED. IN NO EVENT SHALL ATMEL BE LIABLE FOR ANY DIRECT, INDIRECT,
- * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA,
- * OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
- * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
- * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
- * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- * ----------------------------------------------------------------------------
- */
-
-/**
- * \file
- *
- * \section Purpose
- *
- * Interface for configuration the Analog-to-Digital Converter (ADC) peripheral.
- *
- * \section Usage
- *
- * -# Configurate the pins for ADC.
- * -# Initialize the ADC with ADC_Initialize().
- * -# Set ADC clock and timing with ADC_SetClock() and ADC_SetTiming().
- * -# Select the active channel using ADC_EnableChannel().
- * -# Start the conversion with ADC_StartConversion().
- * -# Wait the end of the conversion by polling status with ADC_GetStatus().
- * -# Finally, get the converted data using ADC_GetConvertedData() or
- * ADC_GetLastConvertedData().
- *
-*/
-#ifndef _ADC_
-#define _ADC_
-
-/*----------------------------------------------------------------------------
- * Headers
- *----------------------------------------------------------------------------*/
-#include
-#include
-
-/*------------------------------------------------------------------------------
- * Definitions
- *------------------------------------------------------------------------------*/
-
-/* Max. ADC Clock Frequency (Hz) */
-#define ADC_CLOCK_MAX 20000000
-
-/* Max. normal ADC startup time (us) */
-#define ADC_STARTUP_NORMAL_MAX 40
-/* Max. fast ADC startup time (us) */
-#define ADC_STARTUP_FAST_MAX 12
-
-/* Definitions for ADC channels */
-#define ADC_CHANNEL_0 0
-#define ADC_CHANNEL_1 1
-#define ADC_CHANNEL_2 2
-#define ADC_CHANNEL_3 3
-#define ADC_CHANNEL_4 4
-#define ADC_CHANNEL_5 5
-#define ADC_CHANNEL_6 6
-#define ADC_CHANNEL_7 7
-#define ADC_CHANNEL_8 8
-#define ADC_CHANNEL_9 9
-#define ADC_CHANNEL_10 10
-#define ADC_CHANNEL_11 11
-#define ADC_CHANNEL_12 12
-#define ADC_CHANNEL_13 13
-#define ADC_CHANNEL_14 14
-#define ADC_CHANNEL_15 15
-
-#ifdef __cplusplus
- extern "C" {
-#endif
-
-/*------------------------------------------------------------------------------
- * Macros function of register access
- *------------------------------------------------------------------------------*/
-
-#define ADC_GetModeReg( pAdc ) ((pAdc)->ADC_MR)
-
-#define ADC_StartConversion( pAdc ) ((pAdc)->ADC_CR = ADC_CR_START)
-
-#define ADC_SetCalibMode(pAdc) ((pAdc)->ADC_CR |= ADC_CR_AUTOCAL)
-
-#define ADC_EnableChannel( pAdc, dwChannel ) {\
- (pAdc)->ADC_CHER = (1 << (dwChannel));\
- }
-
-#define ADC_DisableChannel(pAdc, dwChannel) {\
- (pAdc)->ADC_CHDR = (1 << (dwChannel));\
- }
-
-#define ADC_EnableIt(pAdc, dwMode) {\
- (pAdc)->ADC_IER = (dwMode);\
- }
-
-#define ADC_DisableIt(pAdc, dwMode) {\
- (pAdc)->ADC_IDR = (dwMode);\
- }
-
-#define ADC_SetChannelGain(pAdc,dwMode) {\
- (pAdc)->ADC_CGR = dwMode;\
- }
-
-#define ADC_SetChannelOffset(pAdc,dwMode) {\
- (pAdc)->ADC_COR = dwMode;\
- }
-
-#define ADC_EnableDataReadyIt(pAdc) ((pAdc)->ADC_IER = ADC_IER_DRDY)
-
-#define ADC_GetStatus(pAdc) ((pAdc)->ADC_ISR)
-
-#define ADC_GetCompareMode(pAdc) (((pAdc)->ADC_EMR)& (ADC_EMR_CMPMODE_Msk))
-
-#define ADC_GetChannelStatus(pAdc) ((pAdc)->ADC_CHSR)
-
-#define ADC_GetInterruptMaskStatus(pAdc) ((pAdc)->ADC_IMR)
-
-#define ADC_GetLastConvertedData(pAdc) ((pAdc)->ADC_LCDR)
-
-/*------------------------------------------------------------------------------
- * Exported functions
- *------------------------------------------------------------------------------*/
-extern void ADC_Initialize( Adc* pAdc, uint32_t dwId );
-extern uint32_t ADC_SetClock( Adc* pAdc, uint32_t dwPres, uint32_t dwMck );
-extern void ADC_SetTiming( Adc* pAdc, uint32_t dwStartup, uint32_t dwTracking,
- uint32_t dwSettling );
-extern void ADC_SetTrigger( Adc* pAdc, uint32_t dwTrgSel );
-extern void ADC_SetTriggerMode(Adc *pAdc, uint32_t dwMode);
-extern void ADC_SetLowResolution( Adc* pAdc, uint32_t bEnDis );
-extern void ADC_SetSleepMode( Adc *pAdc, uint8_t bEnDis );
-extern void ADC_SetFastWakeup( Adc *pAdc, uint8_t bEnDis );
-extern void ADC_SetSequenceMode( Adc *pAdc, uint8_t bEnDis );
-extern void ADC_SetSequence( Adc *pAdc, uint32_t dwSEQ1, uint32_t dwSEQ2 );
-extern void ADC_SetSequenceByList( Adc *pAdc, uint8_t ucChList[], uint8_t ucNumCh );
-extern void ADC_SetAnalogChange( Adc *pAdc, uint8_t bEnDis );
-extern void ADC_SetTagEnable( Adc *pAdc, uint8_t bEnDis );
-extern void ADC_SetCompareChannel( Adc* pAdc, uint32_t dwChannel ) ;
-extern void ADC_SetCompareMode( Adc* pAdc, uint32_t dwMode ) ;
-extern void ADC_SetComparisonWindow( Adc* pAdc, uint32_t dwHi_Lo ) ;
-extern uint8_t ADC_CheckConfiguration( Adc* pAdc, uint32_t dwMcK ) ;
-extern uint32_t ADC_GetConvertedData( Adc* pAdc, uint32_t dwChannel ) ;
-extern void ADC_SetTsAverage(Adc* pADC, uint32_t dwAvg2Conv);
-extern uint32_t ADC_GetTsXPosition(Adc *pADC);
-extern uint32_t ADC_GetTsYPosition(Adc *pADC);
-extern uint32_t ADC_GetTsPressure(Adc *pADC);
-extern void ADC_SetTsDebounce(Adc *pADC, uint32_t dwTime);
-extern void ADC_SetTsPenDetect(Adc* pADC, uint8_t bEnDis);
-extern void ADC_SetStartupTime( Adc *pAdc, uint32_t dwUs );
-extern void ADC_SetTrackingTime( Adc *pAdc, uint32_t dwNs );
-extern void ADC_SetTriggerPeriod(Adc *pAdc, uint32_t dwPeriod);
-extern void ADC_SetTsMode(Adc* pADC, uint32_t dwMode);
-extern void ADC_TsCalibration( Adc *pAdc );
-
-
-#ifdef __cplusplus
-}
-#endif
-
-#endif /* #ifndef _ADC_ */
-
diff --git a/ports_module/cortex_m7/iar/example_build/libraries/libchip_samv7/include/aes.h b/ports_module/cortex_m7/iar/example_build/libraries/libchip_samv7/include/aes.h
deleted file mode 100644
index d028b3ee..00000000
--- a/ports_module/cortex_m7/iar/example_build/libraries/libchip_samv7/include/aes.h
+++ /dev/null
@@ -1,68 +0,0 @@
-/* ----------------------------------------------------------------------------
- * SAM Software Package License
- * ----------------------------------------------------------------------------
- * Copyright (c) 2013, Atmel Corporation
- *
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * - Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the disclaimer below.
- *
- * Atmel's name may not be used to endorse or promote products derived from
- * this software without specific prior written permission.
- *
- * DISCLAIMER: THIS SOFTWARE IS PROVIDED BY ATMEL "AS IS" AND ANY EXPRESS OR
- * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT ARE
- * DISCLAIMED. IN NO EVENT SHALL ATMEL BE LIABLE FOR ANY DIRECT, INDIRECT,
- * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA,
- * OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
- * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
- * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
- * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- * ----------------------------------------------------------------------------
- */
-
-#ifndef _AES_
-#define _AES_
-
-/*------------------------------------------------------------------------------
- * Headers
- *----------------------------------------------------------------------------*/
-
-#include "chip.h"
-
-
-/*----------------------------------------------------------------------------*/
-/* Definition */
-/*----------------------------------------------------------------------------*/
-#define AES_MR_CIPHER_ENCRYPT 1
-#define AES_MR_CIPHER_DECRYPT 0
-/*----------------------------------------------------------------------------*/
-/* Exported functions */
-/*----------------------------------------------------------------------------*/
-
-extern void AES_Start(void);
-extern void AES_SoftReset(void);
-extern void AES_Recount(void);
-extern void AES_Configure(uint32_t mode);
-extern void AES_EnableIt(uint32_t sources);
-extern void AES_DisableIt(uint32_t sources);
-extern uint32_t AES_GetStatus(void);
-extern void AES_WriteKey(const uint32_t *pKey, uint32_t keyLength);
-extern void AES_SetInput(uint32_t *data);
-extern void AES_GetOutput(uint32_t *data);
-extern void AES_SetVector(const uint32_t *pVector);
-extern void AES_SetAadLen(uint32_t len);
-extern void AES_SetDataLen(uint32_t len);
-extern void AES_SetGcmHash(uint32_t * hash);
-extern void AES_GetGcmTag(uint32_t * tag);
-extern void AES_GetGcmCounter(uint32_t * counter);
-extern void AES_GetGcmH(uint32_t *h);
-
-
-#endif /* #ifndef _AES_ */
diff --git a/ports_module/cortex_m7/iar/example_build/libraries/libchip_samv7/include/afe_dma.h b/ports_module/cortex_m7/iar/example_build/libraries/libchip_samv7/include/afe_dma.h
deleted file mode 100644
index d769900c..00000000
--- a/ports_module/cortex_m7/iar/example_build/libraries/libchip_samv7/include/afe_dma.h
+++ /dev/null
@@ -1,118 +0,0 @@
-/* ----------------------------------------------------------------------------
- * SAM Software Package License
- * ----------------------------------------------------------------------------
- * Copyright (c) 2014, Atmel Corporation
- *
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * - Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the disclaimer below.
- *
- * Atmel's name may not be used to endorse or promote products derived from
- * this software without specific prior written permission.
- *
- * DISCLAIMER: THIS SOFTWARE IS PROVIDED BY ATMEL "AS IS" AND ANY EXPRESS OR
- * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT ARE
- * DISCLAIMED. IN NO EVENT SHALL ATMEL BE LIABLE FOR ANY DIRECT, INDIRECT,
- * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA,
- * OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
- * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
- * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
- * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- * ----------------------------------------------------------------------------
- */
-
-/**
- * \file
- *
- * \section Purpose
- *
- * Interface for configuration the Analog-to-Digital Converter (AFEC) peripheral.
- *
- * \section Usage
- *
- * -# Configurate the pins for AFEC.
- * -# Initialize the AFEC with AFEC_Initialize().
- * -# Set AFEC clock and timing with AFEC_SetClock() and AFEC_SetTiming().
- * -# Select the active channel using AFEC_EnableChannel().
- * -# Start the conversion with AFEC_StartConversion().
- * -# Wait the end of the conversion by polling status with AFEC_GetStatus().
- * -# Finally, get the converted data using AFEC_GetConvertedData() or
- * AFEC_GetLastConvertedData().
- *
-*/
-#ifndef _AFE_DMA_
-#define _AFE_DMA_
-
-/*----------------------------------------------------------------------------
- * Headers
- *----------------------------------------------------------------------------*/
-
-#include "chip.h"
-
-
-/*----------------------------------------------------------------------------
- * Types
- *----------------------------------------------------------------------------*/
-
-/** AFE transfer complete callback. */
-typedef void (*AfeCallback)( uint8_t, void* ) ;
-
-/** \brief Spi Transfer Request prepared by the application upper layer.
- *
- * This structure is sent to the AFE_SendCommand function to start the transfer.
- * At the end of the transfer, the callback is invoked by the interrupt handler.
- */
-typedef struct
-{
- /** Pointer to the Rx data. */
- uint32_t *pRxBuff;
- /** Rx size in bytes. */
- uint16_t RxSize;
- /** Callback function invoked at the end of transfer. */
- AfeCallback callback;
- /** Callback arguments. */
- void *pArgument;
-} AfeCmd ;
-
-
-/** Constant structure associated with AFE port. This structure prevents
- client applications to have access in the same time. */
-typedef struct
-{
- /** Pointer to AFE Hardware registers */
- Afec* pAfeHw ;
- /** Current SpiCommand being processed */
- AfeCmd *pCurrentCommand ;
- /** Pointer to DMA driver */
- sXdmad* pXdmad;
- /** AFEC Id as defined in the product datasheet */
- uint8_t afeId ;
- /** Mutual exclusion semaphore. */
- volatile int8_t semaphore ;
-} AfeDma;
-
-
-/*------------------------------------------------------------------------------
- * Definitions
- *----------------------------------------------------------------------------*/
-#define AFE_OK 0
-#define AFE_ERROR 1
-#define AFE_ERROR_LOCK 2
-/*------------------------------------------------------------------------------
- * Exported functions
- *----------------------------------------------------------------------------*/
-extern uint32_t Afe_ConfigureDma( AfeDma *pAfed ,
- Afec *pAfeHw ,
- uint8_t AfeId,
- sXdmad *pXdmad );
-extern uint32_t Afe_SendData( AfeDma *pAfed, AfeCmd *pCommand);
-
-
-#endif /* #ifndef _AFE_DMA_ */
-
diff --git a/ports_module/cortex_m7/iar/example_build/libraries/libchip_samv7/include/afec.h b/ports_module/cortex_m7/iar/example_build/libraries/libchip_samv7/include/afec.h
deleted file mode 100644
index ee9a1646..00000000
--- a/ports_module/cortex_m7/iar/example_build/libraries/libchip_samv7/include/afec.h
+++ /dev/null
@@ -1,187 +0,0 @@
-/* ----------------------------------------------------------------------------
- * SAM Software Package License
- * ----------------------------------------------------------------------------
- * Copyright (c) 2014, Atmel Corporation
- *
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * - Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the disclaimer below.
- *
- * Atmel's name may not be used to endorse or promote products derived from
- * this software without specific prior written permission.
- *
- * DISCLAIMER: THIS SOFTWARE IS PROVIDED BY ATMEL "AS IS" AND ANY EXPRESS OR
- * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT ARE
- * DISCLAIMED. IN NO EVENT SHALL ATMEL BE LIABLE FOR ANY DIRECT, INDIRECT,
- * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA,
- * OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
- * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
- * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
- * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- * ----------------------------------------------------------------------------
- */
-
-/**
- * \file
- *
- * \section Purpose
- *
- * Interface for configuration the Analog-to-Digital Converter (AFEC) peripheral.
- *
- * \section Usage
- *
- * -# Configurate the pins for AFEC.
- * -# Initialize the AFEC with AFEC_Initialize().
- * -# Set AFEC clock and timing with AFEC_SetClock() and AFEC_SetTiming().
- * -# Select the active channel using AFEC_EnableChannel().
- * -# Start the conversion with AFEC_StartConversion().
- * -# Wait the end of the conversion by polling status with AFEC_GetStatus().
- * -# Finally, get the converted data using AFEC_GetConvertedData() or
- * AFEC_GetLastConvertedData().
- *
-*/
-#ifndef _AFEC_
-#define _AFEC_
-
-/*----------------------------------------------------------------------------
- * Headers
- *----------------------------------------------------------------------------*/
-#include
-#include
-
-/*------------------------------------------------------------------------------
- * Definitions
- *------------------------------------------------------------------------------*/
-
-/* -------- AFEC_MR : (AFEC Offset: 0x04) AFEC Mode Register -------- */
-#define AFEC_MR_SETTLING_Pos 20
-#define AFEC_MR_SETTLING_Msk (0x3u << AFEC_MR_SETTLING_Pos)
-/**< \brief (AFEC_MR) Trigger Selection */
-#define AFEC_MR_SETTLING_AST3 (0x0u << 20)
-/**< \brief (AFEC_MR) ADC_SETTLING_AST3 3 periods of AFEClock */
-#define AFEC_MR_SETTLING_AST5 (0x1u << 20)
-/**< \brief (AFEC_MR) ADC_SETTLING_AST5 5 periods of AFEClock */
-#define AFEC_MR_SETTLING_AST9 (0x2u << 20)
-/**< \brief (AFEC_MR) ADC_SETTLING_AST9 9 periods of AFEClock*/
-#define AFEC_MR_SETTLING_AST17 (0x3u << 20)
-/**< \brief (AFEC_MR) ADC_SETTLING_AST17 17 periods of AFEClock*/
-
-/***************************** Single Trigger Mode ****************************/
-#define AFEC_EMR_STM_Pos 25
-#define AFEC_EMR_STM_Msk (0x1u << AFEC_EMR_STM_Pos)
-/**< \brief (AFEC_EMR) Single Trigger Mode */
-#define AFEC_EMR_STM_MULTI_TRIG (0x0u << 25)
-/**< \brief (AFEC_EMR) Single Trigger Mode: Multiple triggers are required to
- get an averaged result. */
-#define AFEC_EMR_STM_SINGLE_TRIG (0x1u << 25)
-/**< \brief (AFEC_EMR) Single Trigger Mode: Only a Single Trigger is required
- to get an averaged value. */
-
-/***************************** TAG of the AFEC_LDCR Register ******************/
-#define AFEC_EMR_TAG_Pos 24
-#define AFEC_EMR_TAG_Msk (0x1u << AFEC_EMR_TAG_Pos)
-/**< \brief (AFEC_EMR) TAG of the AFEC_LDCR Register */
-#define AFEC_EMR_TAG_CHNB_ZERO (0x0u << 24)
-/**< \brief (AFEC_EMR) TAG of the AFEC_LDCR Register: Sets CHNB to zero
-in AFEC_LDCR. */
-#define AFEC_EMR_TAG_APPENDS (0x1u << 24)
-/**< \brief (AFEC_EMR) TAG of the AFEC_LDCR Register: Appends the channel
-number to the conversion result in AFEC_LDCR register. */
-
-/***************************** Compare All Channels ******************/
-#define AFEC_EMR_CMPALL_Pos 9
-#define AFEC_EMR_CMPALL_Msk (0x1u << AFEC_EMR_TAG_Pos)
-/**< \brief (AFEC_EMR) Compare All Channels */
-#define AFEC_EMR_CMPALL_ONE_CHANNEL_COMP (0x0u << 9)
-/**< \brief (AFEC_EMR) Compare All Channels: Only channel indicated in
-CMPSEL field is compared. */
-#define AFEC_EMR_CMPALL_ALL_CHANNELS_COMP (0x1u << 9)
-/**< \brief (AFEC_EMR) Compare All Channels: All channels are compared. */
-
-#define AFEC_ACR_PGA0_ON (0x1u << 2)
-#define AFEC_ACR_PGA1_ON (0x1u << 3)
-
-#ifdef __cplusplus
- extern "C" {
-#endif
-
-/*------------------------------------------------------------------------------
- * Macros function of register access
- *------------------------------------------------------------------------------*/
-
-#define AFEC_GetModeReg( pAFEC ) ((pAFEC)->AFEC_MR)
-#define AFEC_SetModeReg( pAFEC, mode ) ((pAFEC)->AFEC_MR = mode)
-
-#define AFEC_GetExtModeReg( pAFEC ) ((pAFEC)->AFEC_EMR)
-#define AFEC_SetExtModeReg( pAFEC, mode ) ((pAFEC)->AFEC_EMR = mode)
-
-#define AFEC_StartConversion( pAFEC ) ((pAFEC)->AFEC_CR = AFEC_CR_START)
-
-#define AFEC_EnableChannel( pAFEC, dwChannel ) {\
- (pAFEC)->AFEC_CHER = (1 << (dwChannel));\
- }
-
-#define AFEC_DisableChannel(pAFEC, dwChannel) {\
- (pAFEC)->AFEC_CHDR = (1 << (dwChannel));\
- }
-
-#define AFEC_EnableIt(pAFEC, dwMode) {\
- (pAFEC)->AFEC_IER = (dwMode);\
- }
-
-#define AFEC_DisableIt(pAFEC, dwMode) {\
- (pAFEC)->AFEC_IDR = (dwMode);\
- }
-
-#define AFEC_SetChannelGain(pAFEC,dwMode) {\
- (pAFEC)->AFEC_CGR = dwMode;\
- }
-
-#define AFEC_EnableDataReadyIt(pAFEC) ((pAFEC)->AFEC_IER = AFEC_IER_DRDY)
-
-#define AFEC_GetStatus(pAFEC) ((pAFEC)->AFEC_ISR)
-
-#define AFEC_GetCompareMode(pAFEC) (((pAFEC)->AFEC_EMR)& (AFEC_EMR_CMPMODE_Msk))
-
-#define AFEC_GetChannelStatus(pAFEC) ((pAFEC)->AFEC_CHSR)
-
-#define AFEC_GetInterruptMaskStatus(pAFEC) ((pAFEC)->AFEC_IMR)
-
-#define AFEC_GetLastConvertedData(pAFEC) ((pAFEC)->AFEC_LCDR)
-
-/*------------------------------------------------------------------------------
- * Exported functions
- *------------------------------------------------------------------------------*/
-extern void AFEC_Initialize( Afec* pAFEC, uint32_t dwId );
-extern uint32_t AFEC_SetClock( Afec* pAFEC, uint32_t dwPres, uint32_t dwMck );
-extern void AFEC_SetTiming( Afec* pAFEC, uint32_t dwStartup, uint32_t dwTracking,
- uint32_t dwSettling );
-extern void AFEC_SetTrigger( Afec* pAFEC, uint32_t dwTrgSel );
-extern void AFEC_SetAnalogChange( Afec* pAFE, uint8_t bEnDis );
-extern void AFEC_SetSleepMode( Afec* pAFEC, uint8_t bEnDis );
-extern void AFEC_SetFastWakeup( Afec* pAFEC, uint8_t bEnDis );
-extern void AFEC_SetSequenceMode( Afec* pAFEC, uint8_t bEnDis );
-extern void AFEC_SetSequence( Afec* pAFEC, uint32_t dwSEQ1, uint32_t dwSEQ2 );
-extern void AFEC_SetSequenceByList( Afec* pAFEC, uint8_t ucChList[], uint8_t ucNumCh );
-extern void AFEC_SetTagEnable( Afec* pAFEC, uint8_t bEnDis );
-extern void AFEC_SetCompareChannel( Afec* pAFEC, uint32_t dwChannel ) ;
-extern void AFEC_SetCompareMode( Afec* pAFEC, uint32_t dwMode ) ;
-extern void AFEC_SetComparisonWindow( Afec* pAFEC, uint32_t dwHi_Lo ) ;
-extern uint8_t AFEC_CheckConfiguration( Afec* pAFEC, uint32_t dwMcK ) ;
-extern uint32_t AFEC_GetConvertedData( Afec* pAFEC, uint32_t dwChannel ) ;
-extern void AFEC_SetStartupTime( Afec* pAFEC, uint32_t dwUs );
-extern void AFEC_SetTrackingTime( Afec* pAFEC, uint32_t dwNs );
-extern void AFEC_SetAnalogOffset( Afec *pAFE, uint32_t dwChannel,uint32_t aoffset );
-extern void AFEC_SetAnalogControl( Afec *pAFE, uint32_t control);
-#ifdef __cplusplus
-}
-#endif
-
-#endif /* #ifndef _AFEC_ */
-
diff --git a/ports_module/cortex_m7/iar/example_build/libraries/libchip_samv7/include/cmsis/CMSIS/Include/arm_common_tables.h b/ports_module/cortex_m7/iar/example_build/libraries/libchip_samv7/include/cmsis/CMSIS/Include/arm_common_tables.h
deleted file mode 100644
index 039cc3d6..00000000
--- a/ports_module/cortex_m7/iar/example_build/libraries/libchip_samv7/include/cmsis/CMSIS/Include/arm_common_tables.h
+++ /dev/null
@@ -1,136 +0,0 @@
-/* ----------------------------------------------------------------------
-* Copyright (C) 2010-2014 ARM Limited. All rights reserved.
-*
-* $Date: 19. March 2015
-* $Revision: V.1.4.5
-*
-* Project: CMSIS DSP Library
-* Title: arm_common_tables.h
-*
-* Description: This file has extern declaration for common tables like Bitreverse, reciprocal etc which are used across different functions
-*
-* Target Processor: Cortex-M4/Cortex-M3
-*
-* Redistribution and use in source and binary forms, with or without
-* modification, are permitted provided that the following conditions
-* are met:
-* - Redistributions of source code must retain the above copyright
-* notice, this list of conditions and the following disclaimer.
-* - Redistributions in binary form must reproduce the above copyright
-* notice, this list of conditions and the following disclaimer in
-* the documentation and/or other materials provided with the
-* distribution.
-* - Neither the name of ARM LIMITED nor the names of its contributors
-* may be used to endorse or promote products derived from this
-* software without specific prior written permission.
-*
-* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
-* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
-* COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
-* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
-* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
-* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
-* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
-* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
-* ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
-* POSSIBILITY OF SUCH DAMAGE.
-* -------------------------------------------------------------------- */
-
-#ifndef _ARM_COMMON_TABLES_H
-#define _ARM_COMMON_TABLES_H
-
-#include "arm_math.h"
-
-extern const uint16_t armBitRevTable[1024];
-extern const q15_t armRecipTableQ15[64];
-extern const q31_t armRecipTableQ31[64];
-//extern const q31_t realCoefAQ31[1024];
-//extern const q31_t realCoefBQ31[1024];
-extern const float32_t twiddleCoef_16[32];
-extern const float32_t twiddleCoef_32[64];
-extern const float32_t twiddleCoef_64[128];
-extern const float32_t twiddleCoef_128[256];
-extern const float32_t twiddleCoef_256[512];
-extern const float32_t twiddleCoef_512[1024];
-extern const float32_t twiddleCoef_1024[2048];
-extern const float32_t twiddleCoef_2048[4096];
-extern const float32_t twiddleCoef_4096[8192];
-#define twiddleCoef twiddleCoef_4096
-extern const q31_t twiddleCoef_16_q31[24];
-extern const q31_t twiddleCoef_32_q31[48];
-extern const q31_t twiddleCoef_64_q31[96];
-extern const q31_t twiddleCoef_128_q31[192];
-extern const q31_t twiddleCoef_256_q31[384];
-extern const q31_t twiddleCoef_512_q31[768];
-extern const q31_t twiddleCoef_1024_q31[1536];
-extern const q31_t twiddleCoef_2048_q31[3072];
-extern const q31_t twiddleCoef_4096_q31[6144];
-extern const q15_t twiddleCoef_16_q15[24];
-extern const q15_t twiddleCoef_32_q15[48];
-extern const q15_t twiddleCoef_64_q15[96];
-extern const q15_t twiddleCoef_128_q15[192];
-extern const q15_t twiddleCoef_256_q15[384];
-extern const q15_t twiddleCoef_512_q15[768];
-extern const q15_t twiddleCoef_1024_q15[1536];
-extern const q15_t twiddleCoef_2048_q15[3072];
-extern const q15_t twiddleCoef_4096_q15[6144];
-extern const float32_t twiddleCoef_rfft_32[32];
-extern const float32_t twiddleCoef_rfft_64[64];
-extern const float32_t twiddleCoef_rfft_128[128];
-extern const float32_t twiddleCoef_rfft_256[256];
-extern const float32_t twiddleCoef_rfft_512[512];
-extern const float32_t twiddleCoef_rfft_1024[1024];
-extern const float32_t twiddleCoef_rfft_2048[2048];
-extern const float32_t twiddleCoef_rfft_4096[4096];
-
-
-/* floating-point bit reversal tables */
-#define ARMBITREVINDEXTABLE__16_TABLE_LENGTH ((uint16_t)20 )
-#define ARMBITREVINDEXTABLE__32_TABLE_LENGTH ((uint16_t)48 )
-#define ARMBITREVINDEXTABLE__64_TABLE_LENGTH ((uint16_t)56 )
-#define ARMBITREVINDEXTABLE_128_TABLE_LENGTH ((uint16_t)208 )
-#define ARMBITREVINDEXTABLE_256_TABLE_LENGTH ((uint16_t)440 )
-#define ARMBITREVINDEXTABLE_512_TABLE_LENGTH ((uint16_t)448 )
-#define ARMBITREVINDEXTABLE1024_TABLE_LENGTH ((uint16_t)1800)
-#define ARMBITREVINDEXTABLE2048_TABLE_LENGTH ((uint16_t)3808)
-#define ARMBITREVINDEXTABLE4096_TABLE_LENGTH ((uint16_t)4032)
-
-extern const uint16_t armBitRevIndexTable16[ARMBITREVINDEXTABLE__16_TABLE_LENGTH];
-extern const uint16_t armBitRevIndexTable32[ARMBITREVINDEXTABLE__32_TABLE_LENGTH];
-extern const uint16_t armBitRevIndexTable64[ARMBITREVINDEXTABLE__64_TABLE_LENGTH];
-extern const uint16_t armBitRevIndexTable128[ARMBITREVINDEXTABLE_128_TABLE_LENGTH];
-extern const uint16_t armBitRevIndexTable256[ARMBITREVINDEXTABLE_256_TABLE_LENGTH];
-extern const uint16_t armBitRevIndexTable512[ARMBITREVINDEXTABLE_512_TABLE_LENGTH];
-extern const uint16_t armBitRevIndexTable1024[ARMBITREVINDEXTABLE1024_TABLE_LENGTH];
-extern const uint16_t armBitRevIndexTable2048[ARMBITREVINDEXTABLE2048_TABLE_LENGTH];
-extern const uint16_t armBitRevIndexTable4096[ARMBITREVINDEXTABLE4096_TABLE_LENGTH];
-
-/* fixed-point bit reversal tables */
-#define ARMBITREVINDEXTABLE_FIXED___16_TABLE_LENGTH ((uint16_t)12 )
-#define ARMBITREVINDEXTABLE_FIXED___32_TABLE_LENGTH ((uint16_t)24 )
-#define ARMBITREVINDEXTABLE_FIXED___64_TABLE_LENGTH ((uint16_t)56 )
-#define ARMBITREVINDEXTABLE_FIXED__128_TABLE_LENGTH ((uint16_t)112 )
-#define ARMBITREVINDEXTABLE_FIXED__256_TABLE_LENGTH ((uint16_t)240 )
-#define ARMBITREVINDEXTABLE_FIXED__512_TABLE_LENGTH ((uint16_t)480 )
-#define ARMBITREVINDEXTABLE_FIXED_1024_TABLE_LENGTH ((uint16_t)992 )
-#define ARMBITREVINDEXTABLE_FIXED_2048_TABLE_LENGTH ((uint16_t)1984)
-#define ARMBITREVINDEXTABLE_FIXED_4096_TABLE_LENGTH ((uint16_t)4032)
-
-extern const uint16_t armBitRevIndexTable_fixed_16[ARMBITREVINDEXTABLE_FIXED___16_TABLE_LENGTH];
-extern const uint16_t armBitRevIndexTable_fixed_32[ARMBITREVINDEXTABLE_FIXED___32_TABLE_LENGTH];
-extern const uint16_t armBitRevIndexTable_fixed_64[ARMBITREVINDEXTABLE_FIXED___64_TABLE_LENGTH];
-extern const uint16_t armBitRevIndexTable_fixed_128[ARMBITREVINDEXTABLE_FIXED__128_TABLE_LENGTH];
-extern const uint16_t armBitRevIndexTable_fixed_256[ARMBITREVINDEXTABLE_FIXED__256_TABLE_LENGTH];
-extern const uint16_t armBitRevIndexTable_fixed_512[ARMBITREVINDEXTABLE_FIXED__512_TABLE_LENGTH];
-extern const uint16_t armBitRevIndexTable_fixed_1024[ARMBITREVINDEXTABLE_FIXED_1024_TABLE_LENGTH];
-extern const uint16_t armBitRevIndexTable_fixed_2048[ARMBITREVINDEXTABLE_FIXED_2048_TABLE_LENGTH];
-extern const uint16_t armBitRevIndexTable_fixed_4096[ARMBITREVINDEXTABLE_FIXED_4096_TABLE_LENGTH];
-
-/* Tables for Fast Math Sine and Cosine */
-extern const float32_t sinTable_f32[FAST_MATH_TABLE_SIZE + 1];
-extern const q31_t sinTable_q31[FAST_MATH_TABLE_SIZE + 1];
-extern const q15_t sinTable_q15[FAST_MATH_TABLE_SIZE + 1];
-
-#endif /* ARM_COMMON_TABLES_H */
diff --git a/ports_module/cortex_m7/iar/example_build/libraries/libchip_samv7/include/cmsis/CMSIS/Include/arm_const_structs.h b/ports_module/cortex_m7/iar/example_build/libraries/libchip_samv7/include/cmsis/CMSIS/Include/arm_const_structs.h
deleted file mode 100644
index 726d06eb..00000000
--- a/ports_module/cortex_m7/iar/example_build/libraries/libchip_samv7/include/cmsis/CMSIS/Include/arm_const_structs.h
+++ /dev/null
@@ -1,79 +0,0 @@
-/* ----------------------------------------------------------------------
-* Copyright (C) 2010-2014 ARM Limited. All rights reserved.
-*
-* $Date: 19. March 2015
-* $Revision: V.1.4.5
-*
-* Project: CMSIS DSP Library
-* Title: arm_const_structs.h
-*
-* Description: This file has constant structs that are initialized for
-* user convenience. For example, some can be given as
-* arguments to the arm_cfft_f32() function.
-*
-* Target Processor: Cortex-M4/Cortex-M3
-*
-* Redistribution and use in source and binary forms, with or without
-* modification, are permitted provided that the following conditions
-* are met:
-* - Redistributions of source code must retain the above copyright
-* notice, this list of conditions and the following disclaimer.
-* - Redistributions in binary form must reproduce the above copyright
-* notice, this list of conditions and the following disclaimer in
-* the documentation and/or other materials provided with the
-* distribution.
-* - Neither the name of ARM LIMITED nor the names of its contributors
-* may be used to endorse or promote products derived from this
-* software without specific prior written permission.
-*
-* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
-* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
-* COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
-* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
-* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
-* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
-* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
-* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
-* ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
-* POSSIBILITY OF SUCH DAMAGE.
-* -------------------------------------------------------------------- */
-
-#ifndef _ARM_CONST_STRUCTS_H
-#define _ARM_CONST_STRUCTS_H
-
-#include "arm_math.h"
-#include "arm_common_tables.h"
-
- extern const arm_cfft_instance_f32 arm_cfft_sR_f32_len16;
- extern const arm_cfft_instance_f32 arm_cfft_sR_f32_len32;
- extern const arm_cfft_instance_f32 arm_cfft_sR_f32_len64;
- extern const arm_cfft_instance_f32 arm_cfft_sR_f32_len128;
- extern const arm_cfft_instance_f32 arm_cfft_sR_f32_len256;
- extern const arm_cfft_instance_f32 arm_cfft_sR_f32_len512;
- extern const arm_cfft_instance_f32 arm_cfft_sR_f32_len1024;
- extern const arm_cfft_instance_f32 arm_cfft_sR_f32_len2048;
- extern const arm_cfft_instance_f32 arm_cfft_sR_f32_len4096;
-
- extern const arm_cfft_instance_q31 arm_cfft_sR_q31_len16;
- extern const arm_cfft_instance_q31 arm_cfft_sR_q31_len32;
- extern const arm_cfft_instance_q31 arm_cfft_sR_q31_len64;
- extern const arm_cfft_instance_q31 arm_cfft_sR_q31_len128;
- extern const arm_cfft_instance_q31 arm_cfft_sR_q31_len256;
- extern const arm_cfft_instance_q31 arm_cfft_sR_q31_len512;
- extern const arm_cfft_instance_q31 arm_cfft_sR_q31_len1024;
- extern const arm_cfft_instance_q31 arm_cfft_sR_q31_len2048;
- extern const arm_cfft_instance_q31 arm_cfft_sR_q31_len4096;
-
- extern const arm_cfft_instance_q15 arm_cfft_sR_q15_len16;
- extern const arm_cfft_instance_q15 arm_cfft_sR_q15_len32;
- extern const arm_cfft_instance_q15 arm_cfft_sR_q15_len64;
- extern const arm_cfft_instance_q15 arm_cfft_sR_q15_len128;
- extern const arm_cfft_instance_q15 arm_cfft_sR_q15_len256;
- extern const arm_cfft_instance_q15 arm_cfft_sR_q15_len512;
- extern const arm_cfft_instance_q15 arm_cfft_sR_q15_len1024;
- extern const arm_cfft_instance_q15 arm_cfft_sR_q15_len2048;
- extern const arm_cfft_instance_q15 arm_cfft_sR_q15_len4096;
-
-#endif
diff --git a/ports_module/cortex_m7/iar/example_build/libraries/libchip_samv7/include/cmsis/CMSIS/Include/arm_math.h b/ports_module/cortex_m7/iar/example_build/libraries/libchip_samv7/include/cmsis/CMSIS/Include/arm_math.h
deleted file mode 100644
index e4b2f62e..00000000
--- a/ports_module/cortex_m7/iar/example_build/libraries/libchip_samv7/include/cmsis/CMSIS/Include/arm_math.h
+++ /dev/null
@@ -1,7556 +0,0 @@
-/* ----------------------------------------------------------------------
-* Copyright (C) 2010-2015 ARM Limited. All rights reserved.
-*
-* $Date: 19. March 2015
-* $Revision: V.1.4.5
-*
-* Project: CMSIS DSP Library
-* Title: arm_math.h
-*
-* Description: Public header file for CMSIS DSP Library
-*
-* Target Processor: Cortex-M7/Cortex-M4/Cortex-M3/Cortex-M0
-*
-* Redistribution and use in source and binary forms, with or without
-* modification, are permitted provided that the following conditions
-* are met:
-* - Redistributions of source code must retain the above copyright
-* notice, this list of conditions and the following disclaimer.
-* - Redistributions in binary form must reproduce the above copyright
-* notice, this list of conditions and the following disclaimer in
-* the documentation and/or other materials provided with the
-* distribution.
-* - Neither the name of ARM LIMITED nor the names of its contributors
-* may be used to endorse or promote products derived from this
-* software without specific prior written permission.
-*
-* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
-* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
-* COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
-* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
-* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
-* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
-* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
-* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
-* ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
-* POSSIBILITY OF SUCH DAMAGE.
- * -------------------------------------------------------------------- */
-
-/**
- \mainpage CMSIS DSP Software Library
- *
- * Introduction
- * ------------
- *
- * This user manual describes the CMSIS DSP software library,
- * a suite of common signal processing functions for use on Cortex-M processor based devices.
- *
- * The library is divided into a number of functions each covering a specific category:
- * - Basic math functions
- * - Fast math functions
- * - Complex math functions
- * - Filters
- * - Matrix functions
- * - Transforms
- * - Motor control functions
- * - Statistical functions
- * - Support functions
- * - Interpolation functions
- *
- * The library has separate functions for operating on 8-bit integers, 16-bit integers,
- * 32-bit integer and 32-bit floating-point values.
- *
- * Using the Library
- * ------------
- *
- * The library installer contains prebuilt versions of the libraries in the Lib folder.
- * - arm_cortexM7lfdp_math.lib (Little endian and Double Precision Floating Point Unit on Cortex-M7)
- * - arm_cortexM7bfdp_math.lib (Big endian and Double Precision Floating Point Unit on Cortex-M7)
- * - arm_cortexM7lfsp_math.lib (Little endian and Single Precision Floating Point Unit on Cortex-M7)
- * - arm_cortexM7bfsp_math.lib (Big endian and Single Precision Floating Point Unit on Cortex-M7)
- * - arm_cortexM7l_math.lib (Little endian on Cortex-M7)
- * - arm_cortexM7b_math.lib (Big endian on Cortex-M7)
- * - arm_cortexM4lf_math.lib (Little endian and Floating Point Unit on Cortex-M4)
- * - arm_cortexM4bf_math.lib (Big endian and Floating Point Unit on Cortex-M4)
- * - arm_cortexM4l_math.lib (Little endian on Cortex-M4)
- * - arm_cortexM4b_math.lib (Big endian on Cortex-M4)
- * - arm_cortexM3l_math.lib (Little endian on Cortex-M3)
- * - arm_cortexM3b_math.lib (Big endian on Cortex-M3)
- * - arm_cortexM0l_math.lib (Little endian on Cortex-M0 / CortexM0+)
- * - arm_cortexM0b_math.lib (Big endian on Cortex-M0 / CortexM0+)
- *
- * The library functions are declared in the public file arm_math.h which is placed in the Include folder.
- * Simply include this file and link the appropriate library in the application and begin calling the library functions. The Library supports single
- * public header file arm_math.h for Cortex-M7/M4/M3/M0/M0+ with little endian and big endian. Same header file will be used for floating point unit(FPU) variants.
- * Define the appropriate pre processor MACRO ARM_MATH_CM7 or ARM_MATH_CM4 or ARM_MATH_CM3 or
- * ARM_MATH_CM0 or ARM_MATH_CM0PLUS depending on the target processor in the application.
- *
- * Examples
- * --------
- *
- * The library ships with a number of examples which demonstrate how to use the library functions.
- *
- * Toolchain Support
- * ------------
- *
- * The library has been developed and tested with MDK-ARM version 5.14.0.0
- * The library is being tested in GCC and IAR toolchains and updates on this activity will be made available shortly.
- *
- * Building the Library
- * ------------
- *
- * The library installer contains a project file to re build libraries on MDK-ARM Tool chain in the CMSIS\\DSP_Lib\\Source\\ARM folder.
- * - arm_cortexM_math.uvprojx
- *
- *
- * The libraries can be built by opening the arm_cortexM_math.uvprojx project in MDK-ARM, selecting a specific target, and defining the optional pre processor MACROs detailed above.
- *
- * Pre-processor Macros
- * ------------
- *
- * Each library project have differant pre-processor macros.
- *
- * - UNALIGNED_SUPPORT_DISABLE:
- *
- * Define macro UNALIGNED_SUPPORT_DISABLE, If the silicon does not support unaligned memory access
- *
- * - ARM_MATH_BIG_ENDIAN:
- *
- * Define macro ARM_MATH_BIG_ENDIAN to build the library for big endian targets. By default library builds for little endian targets.
- *
- * - ARM_MATH_MATRIX_CHECK:
- *
- * Define macro ARM_MATH_MATRIX_CHECK for checking on the input and output sizes of matrices
- *
- * - ARM_MATH_ROUNDING:
- *
- * Define macro ARM_MATH_ROUNDING for rounding on support functions
- *
- * - ARM_MATH_CMx:
- *
- * Define macro ARM_MATH_CM4 for building the library on Cortex-M4 target, ARM_MATH_CM3 for building library on Cortex-M3 target
- * and ARM_MATH_CM0 for building library on Cortex-M0 target, ARM_MATH_CM0PLUS for building library on Cortex-M0+ target, and
- * ARM_MATH_CM7 for building the library on cortex-M7.
- *
- * - __FPU_PRESENT:
- *
- * Initialize macro __FPU_PRESENT = 1 when building on FPU supported Targets. Enable this macro for M4bf and M4lf libraries
- *
- *
- * CMSIS-DSP in ARM::CMSIS Pack
- * -----------------------------
- *
- * The following files relevant to CMSIS-DSP are present in the ARM::CMSIS Pack directories:
- * |File/Folder |Content |
- * |------------------------------|------------------------------------------------------------------------|
- * |\b CMSIS\\Documentation\\DSP | This documentation |
- * |\b CMSIS\\DSP_Lib | Software license agreement (license.txt) |
- * |\b CMSIS\\DSP_Lib\\Examples | Example projects demonstrating the usage of the library functions |
- * |\b CMSIS\\DSP_Lib\\Source | Source files for rebuilding the library |
- *
- *
- * Revision History of CMSIS-DSP
- * ------------
- * Please refer to \ref ChangeLog_pg.
- *
- * Copyright Notice
- * ------------
- *
- * Copyright (C) 2010-2015 ARM Limited. All rights reserved.
- */
-
-
-/**
- * @defgroup groupMath Basic Math Functions
- */
-
-/**
- * @defgroup groupFastMath Fast Math Functions
- * This set of functions provides a fast approximation to sine, cosine, and square root.
- * As compared to most of the other functions in the CMSIS math library, the fast math functions
- * operate on individual values and not arrays.
- * There are separate functions for Q15, Q31, and floating-point data.
- *
- */
-
-/**
- * @defgroup groupCmplxMath Complex Math Functions
- * This set of functions operates on complex data vectors.
- * The data in the complex arrays is stored in an interleaved fashion
- * (real, imag, real, imag, ...).
- * In the API functions, the number of samples in a complex array refers
- * to the number of complex values; the array contains twice this number of
- * real values.
- */
-
-/**
- * @defgroup groupFilters Filtering Functions
- */
-
-/**
- * @defgroup groupMatrix Matrix Functions
- *
- * This set of functions provides basic matrix math operations.
- * The functions operate on matrix data structures. For example,
- * the type
- * definition for the floating-point matrix structure is shown
- * below:
- *
- * typedef struct
- * {
- * uint16_t numRows; // number of rows of the matrix.
- * uint16_t numCols; // number of columns of the matrix.
- * float32_t *pData; // points to the data of the matrix.
- * } arm_matrix_instance_f32;
- *
- * There are similar definitions for Q15 and Q31 data types.
- *
- * The structure specifies the size of the matrix and then points to
- * an array of data. The array is of size numRows X numCols
- * and the values are arranged in row order. That is, the
- * matrix element (i, j) is stored at:
- *
- * pData[i*numCols + j]
- *
- *
- * \par Init Functions
- * There is an associated initialization function for each type of matrix
- * data structure.
- * The initialization function sets the values of the internal structure fields.
- * Refer to the function arm_mat_init_f32(), arm_mat_init_q31()
- * and arm_mat_init_q15() for floating-point, Q31 and Q15 types, respectively.
- *
- * \par
- * Use of the initialization function is optional. However, if initialization function is used
- * then the instance structure cannot be placed into a const data section.
- * To place the instance structure in a const data
- * section, manually initialize the data structure. For example:
- *
- * arm_matrix_instance_f32 S = {nRows, nColumns, pData};
- * arm_matrix_instance_q31 S = {nRows, nColumns, pData};
- * arm_matrix_instance_q15 S = {nRows, nColumns, pData};
- *
- * where nRows specifies the number of rows, nColumns
- * specifies the number of columns, and pData points to the
- * data array.
- *
- * \par Size Checking
- * By default all of the matrix functions perform size checking on the input and
- * output matrices. For example, the matrix addition function verifies that the
- * two input matrices and the output matrix all have the same number of rows and
- * columns. If the size check fails the functions return:
- *
- * ARM_MATH_SIZE_MISMATCH
- *
- * Otherwise the functions return
- *
- * ARM_MATH_SUCCESS
- *
- * There is some overhead associated with this matrix size checking.
- * The matrix size checking is enabled via the \#define
- *
- * ARM_MATH_MATRIX_CHECK
- *
- * within the library project settings. By default this macro is defined
- * and size checking is enabled. By changing the project settings and
- * undefining this macro size checking is eliminated and the functions
- * run a bit faster. With size checking disabled the functions always
- * return ARM_MATH_SUCCESS.
- */
-
-/**
- * @defgroup groupTransforms Transform Functions
- */
-
-/**
- * @defgroup groupController Controller Functions
- */
-
-/**
- * @defgroup groupStats Statistics Functions
- */
-/**
- * @defgroup groupSupport Support Functions
- */
-
-/**
- * @defgroup groupInterpolation Interpolation Functions
- * These functions perform 1- and 2-dimensional interpolation of data.
- * Linear interpolation is used for 1-dimensional data and
- * bilinear interpolation is used for 2-dimensional data.
- */
-
-/**
- * @defgroup groupExamples Examples
- */
-#ifndef _ARM_MATH_H
-#define _ARM_MATH_H
-
-#define __CMSIS_GENERIC /* disable NVIC and Systick functions */
-
-#if defined(ARM_MATH_CM7)
- #include "core_cm7.h"
-#elif defined (ARM_MATH_CM4)
- #include "core_cm4.h"
-#elif defined (ARM_MATH_CM3)
- #include "core_cm3.h"
-#elif defined (ARM_MATH_CM0)
- #include "core_cm0.h"
-#define ARM_MATH_CM0_FAMILY
- #elif defined (ARM_MATH_CM0PLUS)
-#include "core_cm0plus.h"
- #define ARM_MATH_CM0_FAMILY
-#else
- #error "Define according the used Cortex core ARM_MATH_CM7, ARM_MATH_CM4, ARM_MATH_CM3, ARM_MATH_CM0PLUS or ARM_MATH_CM0"
-#endif
-
-#undef __CMSIS_GENERIC /* enable NVIC and Systick functions */
-#include "string.h"
-#include "math.h"
-#ifdef __cplusplus
-extern "C"
-{
-#endif
-
-
- /**
- * @brief Macros required for reciprocal calculation in Normalized LMS
- */
-
-#define DELTA_Q31 (0x100)
-#define DELTA_Q15 0x5
-#define INDEX_MASK 0x0000003F
-#ifndef PI
-#define PI 3.14159265358979f
-#endif
-
- /**
- * @brief Macros required for SINE and COSINE Fast math approximations
- */
-
-#define FAST_MATH_TABLE_SIZE 512
-#define FAST_MATH_Q31_SHIFT (32 - 10)
-#define FAST_MATH_Q15_SHIFT (16 - 10)
-#define CONTROLLER_Q31_SHIFT (32 - 9)
-#define TABLE_SIZE 256
-#define TABLE_SPACING_Q31 0x400000
-#define TABLE_SPACING_Q15 0x80
-
- /**
- * @brief Macros required for SINE and COSINE Controller functions
- */
- /* 1.31(q31) Fixed value of 2/360 */
- /* -1 to +1 is divided into 360 values so total spacing is (2/360) */
-#define INPUT_SPACING 0xB60B61
-
- /**
- * @brief Macro for Unaligned Support
- */
-#ifndef UNALIGNED_SUPPORT_DISABLE
- #define ALIGN4
-#else
- #if defined (__GNUC__)
- #define ALIGN4 __attribute__((aligned(4)))
- #else
- #define ALIGN4 __align(4)
- #endif
-#endif /* #ifndef UNALIGNED_SUPPORT_DISABLE */
-
- /**
- * @brief Error status returned by some functions in the library.
- */
-
- typedef enum
- {
- ARM_MATH_SUCCESS = 0, /**< No error */
- ARM_MATH_ARGUMENT_ERROR = -1, /**< One or more arguments are incorrect */
- ARM_MATH_LENGTH_ERROR = -2, /**< Length of data buffer is incorrect */
- ARM_MATH_SIZE_MISMATCH = -3, /**< Size of matrices is not compatible with the operation. */
- ARM_MATH_NANINF = -4, /**< Not-a-number (NaN) or infinity is generated */
- ARM_MATH_SINGULAR = -5, /**< Generated by matrix inversion if the input matrix is singular and cannot be inverted. */
- ARM_MATH_TEST_FAILURE = -6 /**< Test Failed */
- } arm_status;
-
- /**
- * @brief 8-bit fractional data type in 1.7 format.
- */
- typedef int8_t q7_t;
-
- /**
- * @brief 16-bit fractional data type in 1.15 format.
- */
- typedef int16_t q15_t;
-
- /**
- * @brief 32-bit fractional data type in 1.31 format.
- */
- typedef int32_t q31_t;
-
- /**
- * @brief 64-bit fractional data type in 1.63 format.
- */
- typedef int64_t q63_t;
-
- /**
- * @brief 32-bit floating-point type definition.
- */
- typedef float float32_t;
-
- /**
- * @brief 64-bit floating-point type definition.
- */
- typedef double float64_t;
-
- /**
- * @brief definition to read/write two 16 bit values.
- */
-#if defined __CC_ARM
- #define __SIMD32_TYPE int32_t __packed
- #define CMSIS_UNUSED __attribute__((unused))
-#elif defined __ICCARM__
- #define __SIMD32_TYPE int32_t __packed
- #define CMSIS_UNUSED
-#elif defined __GNUC__
- #define __SIMD32_TYPE int32_t
- #define CMSIS_UNUSED __attribute__((unused))
-#elif defined __CSMC__ /* Cosmic */
- #define __SIMD32_TYPE int32_t
- #define CMSIS_UNUSED
-#elif defined __TASKING__
- #define __SIMD32_TYPE __unaligned int32_t
- #define CMSIS_UNUSED
-#else
- #error Unknown compiler
-#endif
-
-#define __SIMD32(addr) (*(__SIMD32_TYPE **) & (addr))
-#define __SIMD32_CONST(addr) ((__SIMD32_TYPE *)(addr))
-
-#define _SIMD32_OFFSET(addr) (*(__SIMD32_TYPE *) (addr))
-
-#define __SIMD64(addr) (*(int64_t **) & (addr))
-
-#if defined (ARM_MATH_CM3) || defined (ARM_MATH_CM0_FAMILY)
- /**
- * @brief definition to pack two 16 bit values.
- */
-#define __PKHBT(ARG1, ARG2, ARG3) ( (((int32_t)(ARG1) << 0) & (int32_t)0x0000FFFF) | \
- (((int32_t)(ARG2) << ARG3) & (int32_t)0xFFFF0000) )
-#define __PKHTB(ARG1, ARG2, ARG3) ( (((int32_t)(ARG1) << 0) & (int32_t)0xFFFF0000) | \
- (((int32_t)(ARG2) >> ARG3) & (int32_t)0x0000FFFF) )
-
-#endif
-
-
- /**
- * @brief definition to pack four 8 bit values.
- */
-#ifndef ARM_MATH_BIG_ENDIAN
-
-#define __PACKq7(v0,v1,v2,v3) ( (((int32_t)(v0) << 0) & (int32_t)0x000000FF) | \
- (((int32_t)(v1) << 8) & (int32_t)0x0000FF00) | \
- (((int32_t)(v2) << 16) & (int32_t)0x00FF0000) | \
- (((int32_t)(v3) << 24) & (int32_t)0xFF000000) )
-#else
-
-#define __PACKq7(v0,v1,v2,v3) ( (((int32_t)(v3) << 0) & (int32_t)0x000000FF) | \
- (((int32_t)(v2) << 8) & (int32_t)0x0000FF00) | \
- (((int32_t)(v1) << 16) & (int32_t)0x00FF0000) | \
- (((int32_t)(v0) << 24) & (int32_t)0xFF000000) )
-
-#endif
-
-
- /**
- * @brief Clips Q63 to Q31 values.
- */
- static __INLINE q31_t clip_q63_to_q31(
- q63_t x)
- {
- return ((q31_t) (x >> 32) != ((q31_t) x >> 31)) ?
- ((0x7FFFFFFF ^ ((q31_t) (x >> 63)))) : (q31_t) x;
- }
-
- /**
- * @brief Clips Q63 to Q15 values.
- */
- static __INLINE q15_t clip_q63_to_q15(
- q63_t x)
- {
- return ((q31_t) (x >> 32) != ((q31_t) x >> 31)) ?
- ((0x7FFF ^ ((q15_t) (x >> 63)))) : (q15_t) (x >> 15);
- }
-
- /**
- * @brief Clips Q31 to Q7 values.
- */
- static __INLINE q7_t clip_q31_to_q7(
- q31_t x)
- {
- return ((q31_t) (x >> 24) != ((q31_t) x >> 23)) ?
- ((0x7F ^ ((q7_t) (x >> 31)))) : (q7_t) x;
- }
-
- /**
- * @brief Clips Q31 to Q15 values.
- */
- static __INLINE q15_t clip_q31_to_q15(
- q31_t x)
- {
- return ((q31_t) (x >> 16) != ((q31_t) x >> 15)) ?
- ((0x7FFF ^ ((q15_t) (x >> 31)))) : (q15_t) x;
- }
-
- /**
- * @brief Multiplies 32 X 64 and returns 32 bit result in 2.30 format.
- */
-
- static __INLINE q63_t mult32x64(
- q63_t x,
- q31_t y)
- {
- return ((((q63_t) (x & 0x00000000FFFFFFFF) * y) >> 32) +
- (((q63_t) (x >> 32) * y)));
- }
-
-
-//#if defined (ARM_MATH_CM0_FAMILY) && defined ( __CC_ARM )
-//#define __CLZ __clz
-//#endif
-
-//note: function can be removed when all toolchain support __CLZ for Cortex-M0
-#if defined (ARM_MATH_CM0_FAMILY) && ((defined (__ICCARM__)) )
-
- static __INLINE uint32_t __CLZ(
- q31_t data);
-
-
- static __INLINE uint32_t __CLZ(
- q31_t data)
- {
- uint32_t count = 0;
- uint32_t mask = 0x80000000;
-
- while((data & mask) == 0)
- {
- count += 1u;
- mask = mask >> 1u;
- }
-
- return (count);
-
- }
-
-#endif
-
- /**
- * @brief Function to Calculates 1/in (reciprocal) value of Q31 Data type.
- */
-
- static __INLINE uint32_t arm_recip_q31(
- q31_t in,
- q31_t * dst,
- q31_t * pRecipTable)
- {
-
- uint32_t out, tempVal;
- uint32_t index, i;
- uint32_t signBits;
-
- if(in > 0)
- {
- signBits = __CLZ(in) - 1;
- }
- else
- {
- signBits = __CLZ(-in) - 1;
- }
-
- /* Convert input sample to 1.31 format */
- in = in << signBits;
-
- /* calculation of index for initial approximated Val */
- index = (uint32_t) (in >> 24u);
- index = (index & INDEX_MASK);
-
- /* 1.31 with exp 1 */
- out = pRecipTable[index];
-
- /* calculation of reciprocal value */
- /* running approximation for two iterations */
- for (i = 0u; i < 2u; i++)
- {
- tempVal = (q31_t) (((q63_t) in * out) >> 31u);
- tempVal = 0x7FFFFFFF - tempVal;
- /* 1.31 with exp 1 */
- //out = (q31_t) (((q63_t) out * tempVal) >> 30u);
- out = (q31_t) clip_q63_to_q31(((q63_t) out * tempVal) >> 30u);
- }
-
- /* write output */
- *dst = out;
-
- /* return num of signbits of out = 1/in value */
- return (signBits + 1u);
-
- }
-
- /**
- * @brief Function to Calculates 1/in (reciprocal) value of Q15 Data type.
- */
- static __INLINE uint32_t arm_recip_q15(
- q15_t in,
- q15_t * dst,
- q15_t * pRecipTable)
- {
-
- uint32_t out = 0, tempVal = 0;
- uint32_t index = 0, i = 0;
- uint32_t signBits = 0;
-
- if(in > 0)
- {
- signBits = __CLZ(in) - 17;
- }
- else
- {
- signBits = __CLZ(-in) - 17;
- }
-
- /* Convert input sample to 1.15 format */
- in = in << signBits;
-
- /* calculation of index for initial approximated Val */
- index = in >> 8;
- index = (index & INDEX_MASK);
-
- /* 1.15 with exp 1 */
- out = pRecipTable[index];
-
- /* calculation of reciprocal value */
- /* running approximation for two iterations */
- for (i = 0; i < 2; i++)
- {
- tempVal = (q15_t) (((q31_t) in * out) >> 15);
- tempVal = 0x7FFF - tempVal;
- /* 1.15 with exp 1 */
- out = (q15_t) (((q31_t) out * tempVal) >> 14);
- }
-
- /* write output */
- *dst = out;
-
- /* return num of signbits of out = 1/in value */
- return (signBits + 1);
-
- }
-
-
- /*
- * @brief C custom defined intrinisic function for only M0 processors
- */
-#if defined(ARM_MATH_CM0_FAMILY)
-
- static __INLINE q31_t __SSAT(
- q31_t x,
- uint32_t y)
- {
- int32_t posMax, negMin;
- uint32_t i;
-
- posMax = 1;
- for (i = 0; i < (y - 1); i++)
- {
- posMax = posMax * 2;
- }
-
- if(x > 0)
- {
- posMax = (posMax - 1);
-
- if(x > posMax)
- {
- x = posMax;
- }
- }
- else
- {
- negMin = -posMax;
-
- if(x < negMin)
- {
- x = negMin;
- }
- }
- return (x);
-
-
- }
-
-#endif /* end of ARM_MATH_CM0_FAMILY */
-
-
-
- /*
- * @brief C custom defined intrinsic function for M3 and M0 processors
- */
-#if defined (ARM_MATH_CM3) || defined (ARM_MATH_CM0_FAMILY)
-
- /*
- * @brief C custom defined QADD8 for M3 and M0 processors
- */
- static __INLINE q31_t __QADD8(
- q31_t x,
- q31_t y)
- {
-
- q31_t sum;
- q7_t r, s, t, u;
-
- r = (q7_t) x;
- s = (q7_t) y;
-
- r = __SSAT((q31_t) (r + s), 8);
- s = __SSAT(((q31_t) (((x << 16) >> 24) + ((y << 16) >> 24))), 8);
- t = __SSAT(((q31_t) (((x << 8) >> 24) + ((y << 8) >> 24))), 8);
- u = __SSAT(((q31_t) ((x >> 24) + (y >> 24))), 8);
-
- sum =
- (((q31_t) u << 24) & 0xFF000000) | (((q31_t) t << 16) & 0x00FF0000) |
- (((q31_t) s << 8) & 0x0000FF00) | (r & 0x000000FF);
-
- return sum;
-
- }
-
- /*
- * @brief C custom defined QSUB8 for M3 and M0 processors
- */
- static __INLINE q31_t __QSUB8(
- q31_t x,
- q31_t y)
- {
-
- q31_t sum;
- q31_t r, s, t, u;
-
- r = (q7_t) x;
- s = (q7_t) y;
-
- r = __SSAT((r - s), 8);
- s = __SSAT(((q31_t) (((x << 16) >> 24) - ((y << 16) >> 24))), 8) << 8;
- t = __SSAT(((q31_t) (((x << 8) >> 24) - ((y << 8) >> 24))), 8) << 16;
- u = __SSAT(((q31_t) ((x >> 24) - (y >> 24))), 8) << 24;
-
- sum =
- (u & 0xFF000000) | (t & 0x00FF0000) | (s & 0x0000FF00) | (r &
- 0x000000FF);
-
- return sum;
- }
-
- /*
- * @brief C custom defined QADD16 for M3 and M0 processors
- */
-
- /*
- * @brief C custom defined QADD16 for M3 and M0 processors
- */
- static __INLINE q31_t __QADD16(
- q31_t x,
- q31_t y)
- {
-
- q31_t sum;
- q31_t r, s;
-
- r = (q15_t) x;
- s = (q15_t) y;
-
- r = __SSAT(r + s, 16);
- s = __SSAT(((q31_t) ((x >> 16) + (y >> 16))), 16) << 16;
-
- sum = (s & 0xFFFF0000) | (r & 0x0000FFFF);
-
- return sum;
-
- }
-
- /*
- * @brief C custom defined SHADD16 for M3 and M0 processors
- */
- static __INLINE q31_t __SHADD16(
- q31_t x,
- q31_t y)
- {
-
- q31_t sum;
- q31_t r, s;
-
- r = (q15_t) x;
- s = (q15_t) y;
-
- r = ((r >> 1) + (s >> 1));
- s = ((q31_t) ((x >> 17) + (y >> 17))) << 16;
-
- sum = (s & 0xFFFF0000) | (r & 0x0000FFFF);
-
- return sum;
-
- }
-
- /*
- * @brief C custom defined QSUB16 for M3 and M0 processors
- */
- static __INLINE q31_t __QSUB16(
- q31_t x,
- q31_t y)
- {
-
- q31_t sum;
- q31_t r, s;
-
- r = (q15_t) x;
- s = (q15_t) y;
-
- r = __SSAT(r - s, 16);
- s = __SSAT(((q31_t) ((x >> 16) - (y >> 16))), 16) << 16;
-
- sum = (s & 0xFFFF0000) | (r & 0x0000FFFF);
-
- return sum;
- }
-
- /*
- * @brief C custom defined SHSUB16 for M3 and M0 processors
- */
- static __INLINE q31_t __SHSUB16(
- q31_t x,
- q31_t y)
- {
-
- q31_t diff;
- q31_t r, s;
-
- r = (q15_t) x;
- s = (q15_t) y;
-
- r = ((r >> 1) - (s >> 1));
- s = (((x >> 17) - (y >> 17)) << 16);
-
- diff = (s & 0xFFFF0000) | (r & 0x0000FFFF);
-
- return diff;
- }
-
- /*
- * @brief C custom defined QASX for M3 and M0 processors
- */
- static __INLINE q31_t __QASX(
- q31_t x,
- q31_t y)
- {
-
- q31_t sum = 0;
-
- sum =
- ((sum +
- clip_q31_to_q15((q31_t) ((q15_t) (x >> 16) + (q15_t) y))) << 16) +
- clip_q31_to_q15((q31_t) ((q15_t) x - (q15_t) (y >> 16)));
-
- return sum;
- }
-
- /*
- * @brief C custom defined SHASX for M3 and M0 processors
- */
- static __INLINE q31_t __SHASX(
- q31_t x,
- q31_t y)
- {
-
- q31_t sum;
- q31_t r, s;
-
- r = (q15_t) x;
- s = (q15_t) y;
-
- r = ((r >> 1) - (y >> 17));
- s = (((x >> 17) + (s >> 1)) << 16);
-
- sum = (s & 0xFFFF0000) | (r & 0x0000FFFF);
-
- return sum;
- }
-
-
- /*
- * @brief C custom defined QSAX for M3 and M0 processors
- */
- static __INLINE q31_t __QSAX(
- q31_t x,
- q31_t y)
- {
-
- q31_t sum = 0;
-
- sum =
- ((sum +
- clip_q31_to_q15((q31_t) ((q15_t) (x >> 16) - (q15_t) y))) << 16) +
- clip_q31_to_q15((q31_t) ((q15_t) x + (q15_t) (y >> 16)));
-
- return sum;
- }
-
- /*
- * @brief C custom defined SHSAX for M3 and M0 processors
- */
- static __INLINE q31_t __SHSAX(
- q31_t x,
- q31_t y)
- {
-
- q31_t sum;
- q31_t r, s;
-
- r = (q15_t) x;
- s = (q15_t) y;
-
- r = ((r >> 1) + (y >> 17));
- s = (((x >> 17) - (s >> 1)) << 16);
-
- sum = (s & 0xFFFF0000) | (r & 0x0000FFFF);
-
- return sum;
- }
-
- /*
- * @brief C custom defined SMUSDX for M3 and M0 processors
- */
- static __INLINE q31_t __SMUSDX(
- q31_t x,
- q31_t y)
- {
-
- return ((q31_t) (((q15_t) x * (q15_t) (y >> 16)) -
- ((q15_t) (x >> 16) * (q15_t) y)));
- }
-
- /*
- * @brief C custom defined SMUADX for M3 and M0 processors
- */
- static __INLINE q31_t __SMUADX(
- q31_t x,
- q31_t y)
- {
-
- return ((q31_t) (((q15_t) x * (q15_t) (y >> 16)) +
- ((q15_t) (x >> 16) * (q15_t) y)));
- }
-
- /*
- * @brief C custom defined QADD for M3 and M0 processors
- */
- static __INLINE q31_t __QADD(
- q31_t x,
- q31_t y)
- {
- return clip_q63_to_q31((q63_t) x + y);
- }
-
- /*
- * @brief C custom defined QSUB for M3 and M0 processors
- */
- static __INLINE q31_t __QSUB(
- q31_t x,
- q31_t y)
- {
- return clip_q63_to_q31((q63_t) x - y);
- }
-
- /*
- * @brief C custom defined SMLAD for M3 and M0 processors
- */
- static __INLINE q31_t __SMLAD(
- q31_t x,
- q31_t y,
- q31_t sum)
- {
-
- return (sum + ((q15_t) (x >> 16) * (q15_t) (y >> 16)) +
- ((q15_t) x * (q15_t) y));
- }
-
- /*
- * @brief C custom defined SMLADX for M3 and M0 processors
- */
- static __INLINE q31_t __SMLADX(
- q31_t x,
- q31_t y,
- q31_t sum)
- {
-
- return (sum + ((q15_t) (x >> 16) * (q15_t) (y)) +
- ((q15_t) x * (q15_t) (y >> 16)));
- }
-
- /*
- * @brief C custom defined SMLSDX for M3 and M0 processors
- */
- static __INLINE q31_t __SMLSDX(
- q31_t x,
- q31_t y,
- q31_t sum)
- {
-
- return (sum - ((q15_t) (x >> 16) * (q15_t) (y)) +
- ((q15_t) x * (q15_t) (y >> 16)));
- }
-
- /*
- * @brief C custom defined SMLALD for M3 and M0 processors
- */
- static __INLINE q63_t __SMLALD(
- q31_t x,
- q31_t y,
- q63_t sum)
- {
-
- return (sum + ((q15_t) (x >> 16) * (q15_t) (y >> 16)) +
- ((q15_t) x * (q15_t) y));
- }
-
- /*
- * @brief C custom defined SMLALDX for M3 and M0 processors
- */
- static __INLINE q63_t __SMLALDX(
- q31_t x,
- q31_t y,
- q63_t sum)
- {
-
- return (sum + ((q15_t) (x >> 16) * (q15_t) y)) +
- ((q15_t) x * (q15_t) (y >> 16));
- }
-
- /*
- * @brief C custom defined SMUAD for M3 and M0 processors
- */
- static __INLINE q31_t __SMUAD(
- q31_t x,
- q31_t y)
- {
-
- return (((x >> 16) * (y >> 16)) +
- (((x << 16) >> 16) * ((y << 16) >> 16)));
- }
-
- /*
- * @brief C custom defined SMUSD for M3 and M0 processors
- */
- static __INLINE q31_t __SMUSD(
- q31_t x,
- q31_t y)
- {
-
- return (-((x >> 16) * (y >> 16)) +
- (((x << 16) >> 16) * ((y << 16) >> 16)));
- }
-
-
- /*
- * @brief C custom defined SXTB16 for M3 and M0 processors
- */
- static __INLINE q31_t __SXTB16(
- q31_t x)
- {
-
- return ((((x << 24) >> 24) & 0x0000FFFF) |
- (((x << 8) >> 8) & 0xFFFF0000));
- }
-
-
-#endif /* defined (ARM_MATH_CM3) || defined (ARM_MATH_CM0_FAMILY) */
-
-
- /**
- * @brief Instance structure for the Q7 FIR filter.
- */
- typedef struct
- {
- uint16_t numTaps; /**< number of filter coefficients in the filter. */
- q7_t *pState; /**< points to the state variable array. The array is of length numTaps+blockSize-1. */
- q7_t *pCoeffs; /**< points to the coefficient array. The array is of length numTaps.*/
- } arm_fir_instance_q7;
-
- /**
- * @brief Instance structure for the Q15 FIR filter.
- */
- typedef struct
- {
- uint16_t numTaps; /**< number of filter coefficients in the filter. */
- q15_t *pState; /**< points to the state variable array. The array is of length numTaps+blockSize-1. */
- q15_t *pCoeffs; /**< points to the coefficient array. The array is of length numTaps.*/
- } arm_fir_instance_q15;
-
- /**
- * @brief Instance structure for the Q31 FIR filter.
- */
- typedef struct
- {
- uint16_t numTaps; /**< number of filter coefficients in the filter. */
- q31_t *pState; /**< points to the state variable array. The array is of length numTaps+blockSize-1. */
- q31_t *pCoeffs; /**< points to the coefficient array. The array is of length numTaps. */
- } arm_fir_instance_q31;
-
- /**
- * @brief Instance structure for the floating-point FIR filter.
- */
- typedef struct
- {
- uint16_t numTaps; /**< number of filter coefficients in the filter. */
- float32_t *pState; /**< points to the state variable array. The array is of length numTaps+blockSize-1. */
- float32_t *pCoeffs; /**< points to the coefficient array. The array is of length numTaps. */
- } arm_fir_instance_f32;
-
-
- /**
- * @brief Processing function for the Q7 FIR filter.
- * @param[in] *S points to an instance of the Q7 FIR filter structure.
- * @param[in] *pSrc points to the block of input data.
- * @param[out] *pDst points to the block of output data.
- * @param[in] blockSize number of samples to process.
- * @return none.
- */
- void arm_fir_q7(
- const arm_fir_instance_q7 * S,
- q7_t * pSrc,
- q7_t * pDst,
- uint32_t blockSize);
-
-
- /**
- * @brief Initialization function for the Q7 FIR filter.
- * @param[in,out] *S points to an instance of the Q7 FIR structure.
- * @param[in] numTaps Number of filter coefficients in the filter.
- * @param[in] *pCoeffs points to the filter coefficients.
- * @param[in] *pState points to the state buffer.
- * @param[in] blockSize number of samples that are processed.
- * @return none
- */
- void arm_fir_init_q7(
- arm_fir_instance_q7 * S,
- uint16_t numTaps,
- q7_t * pCoeffs,
- q7_t * pState,
- uint32_t blockSize);
-
-
- /**
- * @brief Processing function for the Q15 FIR filter.
- * @param[in] *S points to an instance of the Q15 FIR structure.
- * @param[in] *pSrc points to the block of input data.
- * @param[out] *pDst points to the block of output data.
- * @param[in] blockSize number of samples to process.
- * @return none.
- */
- void arm_fir_q15(
- const arm_fir_instance_q15 * S,
- q15_t * pSrc,
- q15_t * pDst,
- uint32_t blockSize);
-
- /**
- * @brief Processing function for the fast Q15 FIR filter for Cortex-M3 and Cortex-M4.
- * @param[in] *S points to an instance of the Q15 FIR filter structure.
- * @param[in] *pSrc points to the block of input data.
- * @param[out] *pDst points to the block of output data.
- * @param[in] blockSize number of samples to process.
- * @return none.
- */
- void arm_fir_fast_q15(
- const arm_fir_instance_q15 * S,
- q15_t * pSrc,
- q15_t * pDst,
- uint32_t blockSize);
-
- /**
- * @brief Initialization function for the Q15 FIR filter.
- * @param[in,out] *S points to an instance of the Q15 FIR filter structure.
- * @param[in] numTaps Number of filter coefficients in the filter. Must be even and greater than or equal to 4.
- * @param[in] *pCoeffs points to the filter coefficients.
- * @param[in] *pState points to the state buffer.
- * @param[in] blockSize number of samples that are processed at a time.
- * @return The function returns ARM_MATH_SUCCESS if initialization was successful or ARM_MATH_ARGUMENT_ERROR if
- * numTaps is not a supported value.
- */
-
- arm_status arm_fir_init_q15(
- arm_fir_instance_q15 * S,
- uint16_t numTaps,
- q15_t * pCoeffs,
- q15_t * pState,
- uint32_t blockSize);
-
- /**
- * @brief Processing function for the Q31 FIR filter.
- * @param[in] *S points to an instance of the Q31 FIR filter structure.
- * @param[in] *pSrc points to the block of input data.
- * @param[out] *pDst points to the block of output data.
- * @param[in] blockSize number of samples to process.
- * @return none.
- */
- void arm_fir_q31(
- const arm_fir_instance_q31 * S,
- q31_t * pSrc,
- q31_t * pDst,
- uint32_t blockSize);
-
- /**
- * @brief Processing function for the fast Q31 FIR filter for Cortex-M3 and Cortex-M4.
- * @param[in] *S points to an instance of the Q31 FIR structure.
- * @param[in] *pSrc points to the block of input data.
- * @param[out] *pDst points to the block of output data.
- * @param[in] blockSize number of samples to process.
- * @return none.
- */
- void arm_fir_fast_q31(
- const arm_fir_instance_q31 * S,
- q31_t * pSrc,
- q31_t * pDst,
- uint32_t blockSize);
-
- /**
- * @brief Initialization function for the Q31 FIR filter.
- * @param[in,out] *S points to an instance of the Q31 FIR structure.
- * @param[in] numTaps Number of filter coefficients in the filter.
- * @param[in] *pCoeffs points to the filter coefficients.
- * @param[in] *pState points to the state buffer.
- * @param[in] blockSize number of samples that are processed at a time.
- * @return none.
- */
- void arm_fir_init_q31(
- arm_fir_instance_q31 * S,
- uint16_t numTaps,
- q31_t * pCoeffs,
- q31_t * pState,
- uint32_t blockSize);
-
- /**
- * @brief Processing function for the floating-point FIR filter.
- * @param[in] *S points to an instance of the floating-point FIR structure.
- * @param[in] *pSrc points to the block of input data.
- * @param[out] *pDst points to the block of output data.
- * @param[in] blockSize number of samples to process.
- * @return none.
- */
- void arm_fir_f32(
- const arm_fir_instance_f32 * S,
- float32_t * pSrc,
- float32_t * pDst,
- uint32_t blockSize);
-
- /**
- * @brief Initialization function for the floating-point FIR filter.
- * @param[in,out] *S points to an instance of the floating-point FIR filter structure.
- * @param[in] numTaps Number of filter coefficients in the filter.
- * @param[in] *pCoeffs points to the filter coefficients.
- * @param[in] *pState points to the state buffer.
- * @param[in] blockSize number of samples that are processed at a time.
- * @return none.
- */
- void arm_fir_init_f32(
- arm_fir_instance_f32 * S,
- uint16_t numTaps,
- float32_t * pCoeffs,
- float32_t * pState,
- uint32_t blockSize);
-
-
- /**
- * @brief Instance structure for the Q15 Biquad cascade filter.
- */
- typedef struct
- {
- int8_t numStages; /**< number of 2nd order stages in the filter. Overall order is 2*numStages. */
- q15_t *pState; /**< Points to the array of state coefficients. The array is of length 4*numStages. */
- q15_t *pCoeffs; /**< Points to the array of coefficients. The array is of length 5*numStages. */
- int8_t postShift; /**< Additional shift, in bits, applied to each output sample. */
-
- } arm_biquad_casd_df1_inst_q15;
-
-
- /**
- * @brief Instance structure for the Q31 Biquad cascade filter.
- */
- typedef struct
- {
- uint32_t numStages; /**< number of 2nd order stages in the filter. Overall order is 2*numStages. */
- q31_t *pState; /**< Points to the array of state coefficients. The array is of length 4*numStages. */
- q31_t *pCoeffs; /**< Points to the array of coefficients. The array is of length 5*numStages. */
- uint8_t postShift; /**< Additional shift, in bits, applied to each output sample. */
-
- } arm_biquad_casd_df1_inst_q31;
-
- /**
- * @brief Instance structure for the floating-point Biquad cascade filter.
- */
- typedef struct
- {
- uint32_t numStages; /**< number of 2nd order stages in the filter. Overall order is 2*numStages. */
- float32_t *pState; /**< Points to the array of state coefficients. The array is of length 4*numStages. */
- float32_t *pCoeffs; /**< Points to the array of coefficients. The array is of length 5*numStages. */
-
-
- } arm_biquad_casd_df1_inst_f32;
-
-
-
- /**
- * @brief Processing function for the Q15 Biquad cascade filter.
- * @param[in] *S points to an instance of the Q15 Biquad cascade structure.
- * @param[in] *pSrc points to the block of input data.
- * @param[out] *pDst points to the block of output data.
- * @param[in] blockSize number of samples to process.
- * @return none.
- */
-
- void arm_biquad_cascade_df1_q15(
- const arm_biquad_casd_df1_inst_q15 * S,
- q15_t * pSrc,
- q15_t * pDst,
- uint32_t blockSize);
-
- /**
- * @brief Initialization function for the Q15 Biquad cascade filter.
- * @param[in,out] *S points to an instance of the Q15 Biquad cascade structure.
- * @param[in] numStages number of 2nd order stages in the filter.
- * @param[in] *pCoeffs points to the filter coefficients.
- * @param[in] *pState points to the state buffer.
- * @param[in] postShift Shift to be applied to the output. Varies according to the coefficients format
- * @return none
- */
-
- void arm_biquad_cascade_df1_init_q15(
- arm_biquad_casd_df1_inst_q15 * S,
- uint8_t numStages,
- q15_t * pCoeffs,
- q15_t * pState,
- int8_t postShift);
-
-
- /**
- * @brief Fast but less precise processing function for the Q15 Biquad cascade filter for Cortex-M3 and Cortex-M4.
- * @param[in] *S points to an instance of the Q15 Biquad cascade structure.
- * @param[in] *pSrc points to the block of input data.
- * @param[out] *pDst points to the block of output data.
- * @param[in] blockSize number of samples to process.
- * @return none.
- */
-
- void arm_biquad_cascade_df1_fast_q15(
- const arm_biquad_casd_df1_inst_q15 * S,
- q15_t * pSrc,
- q15_t * pDst,
- uint32_t blockSize);
-
-
- /**
- * @brief Processing function for the Q31 Biquad cascade filter
- * @param[in] *S points to an instance of the Q31 Biquad cascade structure.
- * @param[in] *pSrc points to the block of input data.
- * @param[out] *pDst points to the block of output data.
- * @param[in] blockSize number of samples to process.
- * @return none.
- */
-
- void arm_biquad_cascade_df1_q31(
- const arm_biquad_casd_df1_inst_q31 * S,
- q31_t * pSrc,
- q31_t * pDst,
- uint32_t blockSize);
-
- /**
- * @brief Fast but less precise processing function for the Q31 Biquad cascade filter for Cortex-M3 and Cortex-M4.
- * @param[in] *S points to an instance of the Q31 Biquad cascade structure.
- * @param[in] *pSrc points to the block of input data.
- * @param[out] *pDst points to the block of output data.
- * @param[in] blockSize number of samples to process.
- * @return none.
- */
-
- void arm_biquad_cascade_df1_fast_q31(
- const arm_biquad_casd_df1_inst_q31 * S,
- q31_t * pSrc,
- q31_t * pDst,
- uint32_t blockSize);
-
- /**
- * @brief Initialization function for the Q31 Biquad cascade filter.
- * @param[in,out] *S points to an instance of the Q31 Biquad cascade structure.
- * @param[in] numStages number of 2nd order stages in the filter.
- * @param[in] *pCoeffs points to the filter coefficients.
- * @param[in] *pState points to the state buffer.
- * @param[in] postShift Shift to be applied to the output. Varies according to the coefficients format
- * @return none
- */
-
- void arm_biquad_cascade_df1_init_q31(
- arm_biquad_casd_df1_inst_q31 * S,
- uint8_t numStages,
- q31_t * pCoeffs,
- q31_t * pState,
- int8_t postShift);
-
- /**
- * @brief Processing function for the floating-point Biquad cascade filter.
- * @param[in] *S points to an instance of the floating-point Biquad cascade structure.
- * @param[in] *pSrc points to the block of input data.
- * @param[out] *pDst points to the block of output data.
- * @param[in] blockSize number of samples to process.
- * @return none.
- */
-
- void arm_biquad_cascade_df1_f32(
- const arm_biquad_casd_df1_inst_f32 * S,
- float32_t * pSrc,
- float32_t * pDst,
- uint32_t blockSize);
-
- /**
- * @brief Initialization function for the floating-point Biquad cascade filter.
- * @param[in,out] *S points to an instance of the floating-point Biquad cascade structure.
- * @param[in] numStages number of 2nd order stages in the filter.
- * @param[in] *pCoeffs points to the filter coefficients.
- * @param[in] *pState points to the state buffer.
- * @return none
- */
-
- void arm_biquad_cascade_df1_init_f32(
- arm_biquad_casd_df1_inst_f32 * S,
- uint8_t numStages,
- float32_t * pCoeffs,
- float32_t * pState);
-
-
- /**
- * @brief Instance structure for the floating-point matrix structure.
- */
-
- typedef struct
- {
- uint16_t numRows; /**< number of rows of the matrix. */
- uint16_t numCols; /**< number of columns of the matrix. */
- float32_t *pData; /**< points to the data of the matrix. */
- } arm_matrix_instance_f32;
-
-
- /**
- * @brief Instance structure for the floating-point matrix structure.
- */
-
- typedef struct
- {
- uint16_t numRows; /**< number of rows of the matrix. */
- uint16_t numCols; /**< number of columns of the matrix. */
- float64_t *pData; /**< points to the data of the matrix. */
- } arm_matrix_instance_f64;
-
- /**
- * @brief Instance structure for the Q15 matrix structure.
- */
-
- typedef struct
- {
- uint16_t numRows; /**< number of rows of the matrix. */
- uint16_t numCols; /**< number of columns of the matrix. */
- q15_t *pData; /**< points to the data of the matrix. */
-
- } arm_matrix_instance_q15;
-
- /**
- * @brief Instance structure for the Q31 matrix structure.
- */
-
- typedef struct
- {
- uint16_t numRows; /**< number of rows of the matrix. */
- uint16_t numCols; /**< number of columns of the matrix. */
- q31_t *pData; /**< points to the data of the matrix. */
-
- } arm_matrix_instance_q31;
-
-
-
- /**
- * @brief Floating-point matrix addition.
- * @param[in] *pSrcA points to the first input matrix structure
- * @param[in] *pSrcB points to the second input matrix structure
- * @param[out] *pDst points to output matrix structure
- * @return The function returns either
- * ARM_MATH_SIZE_MISMATCH or ARM_MATH_SUCCESS based on the outcome of size checking.
- */
-
- arm_status arm_mat_add_f32(
- const arm_matrix_instance_f32 * pSrcA,
- const arm_matrix_instance_f32 * pSrcB,
- arm_matrix_instance_f32 * pDst);
-
- /**
- * @brief Q15 matrix addition.
- * @param[in] *pSrcA points to the first input matrix structure
- * @param[in] *pSrcB points to the second input matrix structure
- * @param[out] *pDst points to output matrix structure
- * @return The function returns either
- * ARM_MATH_SIZE_MISMATCH or ARM_MATH_SUCCESS based on the outcome of size checking.
- */
-
- arm_status arm_mat_add_q15(
- const arm_matrix_instance_q15 * pSrcA,
- const arm_matrix_instance_q15 * pSrcB,
- arm_matrix_instance_q15 * pDst);
-
- /**
- * @brief Q31 matrix addition.
- * @param[in] *pSrcA points to the first input matrix structure
- * @param[in] *pSrcB points to the second input matrix structure
- * @param[out] *pDst points to output matrix structure
- * @return The function returns either
- * ARM_MATH_SIZE_MISMATCH or ARM_MATH_SUCCESS based on the outcome of size checking.
- */
-
- arm_status arm_mat_add_q31(
- const arm_matrix_instance_q31 * pSrcA,
- const arm_matrix_instance_q31 * pSrcB,
- arm_matrix_instance_q31 * pDst);
-
- /**
- * @brief Floating-point, complex, matrix multiplication.
- * @param[in] *pSrcA points to the first input matrix structure
- * @param[in] *pSrcB points to the second input matrix structure
- * @param[out] *pDst points to output matrix structure
- * @return The function returns either
- * ARM_MATH_SIZE_MISMATCH or ARM_MATH_SUCCESS based on the outcome of size checking.
- */
-
- arm_status arm_mat_cmplx_mult_f32(
- const arm_matrix_instance_f32 * pSrcA,
- const arm_matrix_instance_f32 * pSrcB,
- arm_matrix_instance_f32 * pDst);
-
- /**
- * @brief Q15, complex, matrix multiplication.
- * @param[in] *pSrcA points to the first input matrix structure
- * @param[in] *pSrcB points to the second input matrix structure
- * @param[out] *pDst points to output matrix structure
- * @return The function returns either
- * ARM_MATH_SIZE_MISMATCH or ARM_MATH_SUCCESS based on the outcome of size checking.
- */
-
- arm_status arm_mat_cmplx_mult_q15(
- const arm_matrix_instance_q15 * pSrcA,
- const arm_matrix_instance_q15 * pSrcB,
- arm_matrix_instance_q15 * pDst,
- q15_t * pScratch);
-
- /**
- * @brief Q31, complex, matrix multiplication.
- * @param[in] *pSrcA points to the first input matrix structure
- * @param[in] *pSrcB points to the second input matrix structure
- * @param[out] *pDst points to output matrix structure
- * @return The function returns either
- * ARM_MATH_SIZE_MISMATCH or ARM_MATH_SUCCESS based on the outcome of size checking.
- */
-
- arm_status arm_mat_cmplx_mult_q31(
- const arm_matrix_instance_q31 * pSrcA,
- const arm_matrix_instance_q31 * pSrcB,
- arm_matrix_instance_q31 * pDst);
-
-
- /**
- * @brief Floating-point matrix transpose.
- * @param[in] *pSrc points to the input matrix
- * @param[out] *pDst points to the output matrix
- * @return The function returns either ARM_MATH_SIZE_MISMATCH
- * or ARM_MATH_SUCCESS based on the outcome of size checking.
- */
-
- arm_status arm_mat_trans_f32(
- const arm_matrix_instance_f32 * pSrc,
- arm_matrix_instance_f32 * pDst);
-
-
- /**
- * @brief Q15 matrix transpose.
- * @param[in] *pSrc points to the input matrix
- * @param[out] *pDst points to the output matrix
- * @return The function returns either ARM_MATH_SIZE_MISMATCH
- * or ARM_MATH_SUCCESS based on the outcome of size checking.
- */
-
- arm_status arm_mat_trans_q15(
- const arm_matrix_instance_q15 * pSrc,
- arm_matrix_instance_q15 * pDst);
-
- /**
- * @brief Q31 matrix transpose.
- * @param[in] *pSrc points to the input matrix
- * @param[out] *pDst points to the output matrix
- * @return The function returns either ARM_MATH_SIZE_MISMATCH
- * or ARM_MATH_SUCCESS based on the outcome of size checking.
- */
-
- arm_status arm_mat_trans_q31(
- const arm_matrix_instance_q31 * pSrc,
- arm_matrix_instance_q31 * pDst);
-
-
- /**
- * @brief Floating-point matrix multiplication
- * @param[in] *pSrcA points to the first input matrix structure
- * @param[in] *pSrcB points to the second input matrix structure
- * @param[out] *pDst points to output matrix structure
- * @return The function returns either
- * ARM_MATH_SIZE_MISMATCH or ARM_MATH_SUCCESS based on the outcome of size checking.
- */
-
- arm_status arm_mat_mult_f32(
- const arm_matrix_instance_f32 * pSrcA,
- const arm_matrix_instance_f32 * pSrcB,
- arm_matrix_instance_f32 * pDst);
-
- /**
- * @brief Q15 matrix multiplication
- * @param[in] *pSrcA points to the first input matrix structure
- * @param[in] *pSrcB points to the second input matrix structure
- * @param[out] *pDst points to output matrix structure
- * @param[in] *pState points to the array for storing intermediate results
- * @return The function returns either
- * ARM_MATH_SIZE_MISMATCH or ARM_MATH_SUCCESS based on the outcome of size checking.
- */
-
- arm_status arm_mat_mult_q15(
- const arm_matrix_instance_q15 * pSrcA,
- const arm_matrix_instance_q15 * pSrcB,
- arm_matrix_instance_q15 * pDst,
- q15_t * pState);
-
- /**
- * @brief Q15 matrix multiplication (fast variant) for Cortex-M3 and Cortex-M4
- * @param[in] *pSrcA points to the first input matrix structure
- * @param[in] *pSrcB points to the second input matrix structure
- * @param[out] *pDst points to output matrix structure
- * @param[in] *pState points to the array for storing intermediate results
- * @return The function returns either
- * ARM_MATH_SIZE_MISMATCH or ARM_MATH_SUCCESS based on the outcome of size checking.
- */
-
- arm_status arm_mat_mult_fast_q15(
- const arm_matrix_instance_q15 * pSrcA,
- const arm_matrix_instance_q15 * pSrcB,
- arm_matrix_instance_q15 * pDst,
- q15_t * pState);
-
- /**
- * @brief Q31 matrix multiplication
- * @param[in] *pSrcA points to the first input matrix structure
- * @param[in] *pSrcB points to the second input matrix structure
- * @param[out] *pDst points to output matrix structure
- * @return The function returns either
- * ARM_MATH_SIZE_MISMATCH or ARM_MATH_SUCCESS based on the outcome of size checking.
- */
-
- arm_status arm_mat_mult_q31(
- const arm_matrix_instance_q31 * pSrcA,
- const arm_matrix_instance_q31 * pSrcB,
- arm_matrix_instance_q31 * pDst);
-
- /**
- * @brief Q31 matrix multiplication (fast variant) for Cortex-M3 and Cortex-M4
- * @param[in] *pSrcA points to the first input matrix structure
- * @param[in] *pSrcB points to the second input matrix structure
- * @param[out] *pDst points to output matrix structure
- * @return The function returns either
- * ARM_MATH_SIZE_MISMATCH or ARM_MATH_SUCCESS based on the outcome of size checking.
- */
-
- arm_status arm_mat_mult_fast_q31(
- const arm_matrix_instance_q31 * pSrcA,
- const arm_matrix_instance_q31 * pSrcB,
- arm_matrix_instance_q31 * pDst);
-
-
- /**
- * @brief Floating-point matrix subtraction
- * @param[in] *pSrcA points to the first input matrix structure
- * @param[in] *pSrcB points to the second input matrix structure
- * @param[out] *pDst points to output matrix structure
- * @return The function returns either
- * ARM_MATH_SIZE_MISMATCH or ARM_MATH_SUCCESS based on the outcome of size checking.
- */
-
- arm_status arm_mat_sub_f32(
- const arm_matrix_instance_f32 * pSrcA,
- const arm_matrix_instance_f32 * pSrcB,
- arm_matrix_instance_f32 * pDst);
-
- /**
- * @brief Q15 matrix subtraction
- * @param[in] *pSrcA points to the first input matrix structure
- * @param[in] *pSrcB points to the second input matrix structure
- * @param[out] *pDst points to output matrix structure
- * @return The function returns either
- * ARM_MATH_SIZE_MISMATCH or ARM_MATH_SUCCESS based on the outcome of size checking.
- */
-
- arm_status arm_mat_sub_q15(
- const arm_matrix_instance_q15 * pSrcA,
- const arm_matrix_instance_q15 * pSrcB,
- arm_matrix_instance_q15 * pDst);
-
- /**
- * @brief Q31 matrix subtraction
- * @param[in] *pSrcA points to the first input matrix structure
- * @param[in] *pSrcB points to the second input matrix structure
- * @param[out] *pDst points to output matrix structure
- * @return The function returns either
- * ARM_MATH_SIZE_MISMATCH or ARM_MATH_SUCCESS based on the outcome of size checking.
- */
-
- arm_status arm_mat_sub_q31(
- const arm_matrix_instance_q31 * pSrcA,
- const arm_matrix_instance_q31 * pSrcB,
- arm_matrix_instance_q31 * pDst);
-
- /**
- * @brief Floating-point matrix scaling.
- * @param[in] *pSrc points to the input matrix
- * @param[in] scale scale factor
- * @param[out] *pDst points to the output matrix
- * @return The function returns either
- * ARM_MATH_SIZE_MISMATCH or ARM_MATH_SUCCESS based on the outcome of size checking.
- */
-
- arm_status arm_mat_scale_f32(
- const arm_matrix_instance_f32 * pSrc,
- float32_t scale,
- arm_matrix_instance_f32 * pDst);
-
- /**
- * @brief Q15 matrix scaling.
- * @param[in] *pSrc points to input matrix
- * @param[in] scaleFract fractional portion of the scale factor
- * @param[in] shift number of bits to shift the result by
- * @param[out] *pDst points to output matrix
- * @return The function returns either
- * ARM_MATH_SIZE_MISMATCH or ARM_MATH_SUCCESS based on the outcome of size checking.
- */
-
- arm_status arm_mat_scale_q15(
- const arm_matrix_instance_q15 * pSrc,
- q15_t scaleFract,
- int32_t shift,
- arm_matrix_instance_q15 * pDst);
-
- /**
- * @brief Q31 matrix scaling.
- * @param[in] *pSrc points to input matrix
- * @param[in] scaleFract fractional portion of the scale factor
- * @param[in] shift number of bits to shift the result by
- * @param[out] *pDst points to output matrix structure
- * @return The function returns either
- * ARM_MATH_SIZE_MISMATCH or ARM_MATH_SUCCESS based on the outcome of size checking.
- */
-
- arm_status arm_mat_scale_q31(
- const arm_matrix_instance_q31 * pSrc,
- q31_t scaleFract,
- int32_t shift,
- arm_matrix_instance_q31 * pDst);
-
-
- /**
- * @brief Q31 matrix initialization.
- * @param[in,out] *S points to an instance of the floating-point matrix structure.
- * @param[in] nRows number of rows in the matrix.
- * @param[in] nColumns number of columns in the matrix.
- * @param[in] *pData points to the matrix data array.
- * @return none
- */
-
- void arm_mat_init_q31(
- arm_matrix_instance_q31 * S,
- uint16_t nRows,
- uint16_t nColumns,
- q31_t * pData);
-
- /**
- * @brief Q15 matrix initialization.
- * @param[in,out] *S points to an instance of the floating-point matrix structure.
- * @param[in] nRows number of rows in the matrix.
- * @param[in] nColumns number of columns in the matrix.
- * @param[in] *pData points to the matrix data array.
- * @return none
- */
-
- void arm_mat_init_q15(
- arm_matrix_instance_q15 * S,
- uint16_t nRows,
- uint16_t nColumns,
- q15_t * pData);
-
- /**
- * @brief Floating-point matrix initialization.
- * @param[in,out] *S points to an instance of the floating-point matrix structure.
- * @param[in] nRows number of rows in the matrix.
- * @param[in] nColumns number of columns in the matrix.
- * @param[in] *pData points to the matrix data array.
- * @return none
- */
-
- void arm_mat_init_f32(
- arm_matrix_instance_f32 * S,
- uint16_t nRows,
- uint16_t nColumns,
- float32_t * pData);
-
-
-
- /**
- * @brief Instance structure for the Q15 PID Control.
- */
- typedef struct
- {
- q15_t A0; /**< The derived gain, A0 = Kp + Ki + Kd . */
-#ifdef ARM_MATH_CM0_FAMILY
- q15_t A1;
- q15_t A2;
-#else
- q31_t A1; /**< The derived gain A1 = -Kp - 2Kd | Kd.*/
-#endif
- q15_t state[3]; /**< The state array of length 3. */
- q15_t Kp; /**< The proportional gain. */
- q15_t Ki; /**< The integral gain. */
- q15_t Kd; /**< The derivative gain. */
- } arm_pid_instance_q15;
-
- /**
- * @brief Instance structure for the Q31 PID Control.
- */
- typedef struct
- {
- q31_t A0; /**< The derived gain, A0 = Kp + Ki + Kd . */
- q31_t A1; /**< The derived gain, A1 = -Kp - 2Kd. */
- q31_t A2; /**< The derived gain, A2 = Kd . */
- q31_t state[3]; /**< The state array of length 3. */
- q31_t Kp; /**< The proportional gain. */
- q31_t Ki; /**< The integral gain. */
- q31_t Kd; /**< The derivative gain. */
-
- } arm_pid_instance_q31;
-
- /**
- * @brief Instance structure for the floating-point PID Control.
- */
- typedef struct
- {
- float32_t A0; /**< The derived gain, A0 = Kp + Ki + Kd . */
- float32_t A1; /**< The derived gain, A1 = -Kp - 2Kd. */
- float32_t A2; /**< The derived gain, A2 = Kd . */
- float32_t state[3]; /**< The state array of length 3. */
- float32_t Kp; /**< The proportional gain. */
- float32_t Ki; /**< The integral gain. */
- float32_t Kd; /**< The derivative gain. */
- } arm_pid_instance_f32;
-
-
-
- /**
- * @brief Initialization function for the floating-point PID Control.
- * @param[in,out] *S points to an instance of the PID structure.
- * @param[in] resetStateFlag flag to reset the state. 0 = no change in state 1 = reset the state.
- * @return none.
- */
- void arm_pid_init_f32(
- arm_pid_instance_f32 * S,
- int32_t resetStateFlag);
-
- /**
- * @brief Reset function for the floating-point PID Control.
- * @param[in,out] *S is an instance of the floating-point PID Control structure
- * @return none
- */
- void arm_pid_reset_f32(
- arm_pid_instance_f32 * S);
-
-
- /**
- * @brief Initialization function for the Q31 PID Control.
- * @param[in,out] *S points to an instance of the Q15 PID structure.
- * @param[in] resetStateFlag flag to reset the state. 0 = no change in state 1 = reset the state.
- * @return none.
- */
- void arm_pid_init_q31(
- arm_pid_instance_q31 * S,
- int32_t resetStateFlag);
-
-
- /**
- * @brief Reset function for the Q31 PID Control.
- * @param[in,out] *S points to an instance of the Q31 PID Control structure
- * @return none
- */
-
- void arm_pid_reset_q31(
- arm_pid_instance_q31 * S);
-
- /**
- * @brief Initialization function for the Q15 PID Control.
- * @param[in,out] *S points to an instance of the Q15 PID structure.
- * @param[in] resetStateFlag flag to reset the state. 0 = no change in state 1 = reset the state.
- * @return none.
- */
- void arm_pid_init_q15(
- arm_pid_instance_q15 * S,
- int32_t resetStateFlag);
-
- /**
- * @brief Reset function for the Q15 PID Control.
- * @param[in,out] *S points to an instance of the q15 PID Control structure
- * @return none
- */
- void arm_pid_reset_q15(
- arm_pid_instance_q15 * S);
-
-
- /**
- * @brief Instance structure for the floating-point Linear Interpolate function.
- */
- typedef struct
- {
- uint32_t nValues; /**< nValues */
- float32_t x1; /**< x1 */
- float32_t xSpacing; /**< xSpacing */
- float32_t *pYData; /**< pointer to the table of Y values */
- } arm_linear_interp_instance_f32;
-
- /**
- * @brief Instance structure for the floating-point bilinear interpolation function.
- */
-
- typedef struct
- {
- uint16_t numRows; /**< number of rows in the data table. */
- uint16_t numCols; /**< number of columns in the data table. */
- float32_t *pData; /**< points to the data table. */
- } arm_bilinear_interp_instance_f32;
-
- /**
- * @brief Instance structure for the Q31 bilinear interpolation function.
- */
-
- typedef struct
- {
- uint16_t numRows; /**< number of rows in the data table. */
- uint16_t numCols; /**< number of columns in the data table. */
- q31_t *pData; /**< points to the data table. */
- } arm_bilinear_interp_instance_q31;
-
- /**
- * @brief Instance structure for the Q15 bilinear interpolation function.
- */
-
- typedef struct
- {
- uint16_t numRows; /**< number of rows in the data table. */
- uint16_t numCols; /**< number of columns in the data table. */
- q15_t *pData; /**< points to the data table. */
- } arm_bilinear_interp_instance_q15;
-
- /**
- * @brief Instance structure for the Q15 bilinear interpolation function.
- */
-
- typedef struct
- {
- uint16_t numRows; /**< number of rows in the data table. */
- uint16_t numCols; /**< number of columns in the data table. */
- q7_t *pData; /**< points to the data table. */
- } arm_bilinear_interp_instance_q7;
-
-
- /**
- * @brief Q7 vector multiplication.
- * @param[in] *pSrcA points to the first input vector
- * @param[in] *pSrcB points to the second input vector
- * @param[out] *pDst points to the output vector
- * @param[in] blockSize number of samples in each vector
- * @return none.
- */
-
- void arm_mult_q7(
- q7_t * pSrcA,
- q7_t * pSrcB,
- q7_t * pDst,
- uint32_t blockSize);
-
- /**
- * @brief Q15 vector multiplication.
- * @param[in] *pSrcA points to the first input vector
- * @param[in] *pSrcB points to the second input vector
- * @param[out] *pDst points to the output vector
- * @param[in] blockSize number of samples in each vector
- * @return none.
- */
-
- void arm_mult_q15(
- q15_t * pSrcA,
- q15_t * pSrcB,
- q15_t * pDst,
- uint32_t blockSize);
-
- /**
- * @brief Q31 vector multiplication.
- * @param[in] *pSrcA points to the first input vector
- * @param[in] *pSrcB points to the second input vector
- * @param[out] *pDst points to the output vector
- * @param[in] blockSize number of samples in each vector
- * @return none.
- */
-
- void arm_mult_q31(
- q31_t * pSrcA,
- q31_t * pSrcB,
- q31_t * pDst,
- uint32_t blockSize);
-
- /**
- * @brief Floating-point vector multiplication.
- * @param[in] *pSrcA points to the first input vector
- * @param[in] *pSrcB points to the second input vector
- * @param[out] *pDst points to the output vector
- * @param[in] blockSize number of samples in each vector
- * @return none.
- */
-
- void arm_mult_f32(
- float32_t * pSrcA,
- float32_t * pSrcB,
- float32_t * pDst,
- uint32_t blockSize);
-
-
-
-
-
-
- /**
- * @brief Instance structure for the Q15 CFFT/CIFFT function.
- */
-
- typedef struct
- {
- uint16_t fftLen; /**< length of the FFT. */
- uint8_t ifftFlag; /**< flag that selects forward (ifftFlag=0) or inverse (ifftFlag=1) transform. */
- uint8_t bitReverseFlag; /**< flag that enables (bitReverseFlag=1) or disables (bitReverseFlag=0) bit reversal of output. */
- q15_t *pTwiddle; /**< points to the Sin twiddle factor table. */
- uint16_t *pBitRevTable; /**< points to the bit reversal table. */
- uint16_t twidCoefModifier; /**< twiddle coefficient modifier that supports different size FFTs with the same twiddle factor table. */
- uint16_t bitRevFactor; /**< bit reversal modifier that supports different size FFTs with the same bit reversal table. */
- } arm_cfft_radix2_instance_q15;
-
-/* Deprecated */
- arm_status arm_cfft_radix2_init_q15(
- arm_cfft_radix2_instance_q15 * S,
- uint16_t fftLen,
- uint8_t ifftFlag,
- uint8_t bitReverseFlag);
-
-/* Deprecated */
- void arm_cfft_radix2_q15(
- const arm_cfft_radix2_instance_q15 * S,
- q15_t * pSrc);
-
-
-
- /**
- * @brief Instance structure for the Q15 CFFT/CIFFT function.
- */
-
- typedef struct
- {
- uint16_t fftLen; /**< length of the FFT. */
- uint8_t ifftFlag; /**< flag that selects forward (ifftFlag=0) or inverse (ifftFlag=1) transform. */
- uint8_t bitReverseFlag; /**< flag that enables (bitReverseFlag=1) or disables (bitReverseFlag=0) bit reversal of output. */
- q15_t *pTwiddle; /**< points to the twiddle factor table. */
- uint16_t *pBitRevTable; /**< points to the bit reversal table. */
- uint16_t twidCoefModifier; /**< twiddle coefficient modifier that supports different size FFTs with the same twiddle factor table. */
- uint16_t bitRevFactor; /**< bit reversal modifier that supports different size FFTs with the same bit reversal table. */
- } arm_cfft_radix4_instance_q15;
-
-/* Deprecated */
- arm_status arm_cfft_radix4_init_q15(
- arm_cfft_radix4_instance_q15 * S,
- uint16_t fftLen,
- uint8_t ifftFlag,
- uint8_t bitReverseFlag);
-
-/* Deprecated */
- void arm_cfft_radix4_q15(
- const arm_cfft_radix4_instance_q15 * S,
- q15_t * pSrc);
-
- /**
- * @brief Instance structure for the Radix-2 Q31 CFFT/CIFFT function.
- */
-
- typedef struct
- {
- uint16_t fftLen; /**< length of the FFT. */
- uint8_t ifftFlag; /**< flag that selects forward (ifftFlag=0) or inverse (ifftFlag=1) transform. */
- uint8_t bitReverseFlag; /**< flag that enables (bitReverseFlag=1) or disables (bitReverseFlag=0) bit reversal of output. */
- q31_t *pTwiddle; /**< points to the Twiddle factor table. */
- uint16_t *pBitRevTable; /**< points to the bit reversal table. */
- uint16_t twidCoefModifier; /**< twiddle coefficient modifier that supports different size FFTs with the same twiddle factor table. */
- uint16_t bitRevFactor; /**< bit reversal modifier that supports different size FFTs with the same bit reversal table. */
- } arm_cfft_radix2_instance_q31;
-
-/* Deprecated */
- arm_status arm_cfft_radix2_init_q31(
- arm_cfft_radix2_instance_q31 * S,
- uint16_t fftLen,
- uint8_t ifftFlag,
- uint8_t bitReverseFlag);
-
-/* Deprecated */
- void arm_cfft_radix2_q31(
- const arm_cfft_radix2_instance_q31 * S,
- q31_t * pSrc);
-
- /**
- * @brief Instance structure for the Q31 CFFT/CIFFT function.
- */
-
- typedef struct
- {
- uint16_t fftLen; /**< length of the FFT. */
- uint8_t ifftFlag; /**< flag that selects forward (ifftFlag=0) or inverse (ifftFlag=1) transform. */
- uint8_t bitReverseFlag; /**< flag that enables (bitReverseFlag=1) or disables (bitReverseFlag=0) bit reversal of output. */
- q31_t *pTwiddle; /**< points to the twiddle factor table. */
- uint16_t *pBitRevTable; /**< points to the bit reversal table. */
- uint16_t twidCoefModifier; /**< twiddle coefficient modifier that supports different size FFTs with the same twiddle factor table. */
- uint16_t bitRevFactor; /**< bit reversal modifier that supports different size FFTs with the same bit reversal table. */
- } arm_cfft_radix4_instance_q31;
-
-/* Deprecated */
- void arm_cfft_radix4_q31(
- const arm_cfft_radix4_instance_q31 * S,
- q31_t * pSrc);
-
-/* Deprecated */
- arm_status arm_cfft_radix4_init_q31(
- arm_cfft_radix4_instance_q31 * S,
- uint16_t fftLen,
- uint8_t ifftFlag,
- uint8_t bitReverseFlag);
-
- /**
- * @brief Instance structure for the floating-point CFFT/CIFFT function.
- */
-
- typedef struct
- {
- uint16_t fftLen; /**< length of the FFT. */
- uint8_t ifftFlag; /**< flag that selects forward (ifftFlag=0) or inverse (ifftFlag=1) transform. */
- uint8_t bitReverseFlag; /**< flag that enables (bitReverseFlag=1) or disables (bitReverseFlag=0) bit reversal of output. */
- float32_t *pTwiddle; /**< points to the Twiddle factor table. */
- uint16_t *pBitRevTable; /**< points to the bit reversal table. */
- uint16_t twidCoefModifier; /**< twiddle coefficient modifier that supports different size FFTs with the same twiddle factor table. */
- uint16_t bitRevFactor; /**< bit reversal modifier that supports different size FFTs with the same bit reversal table. */
- float32_t onebyfftLen; /**< value of 1/fftLen. */
- } arm_cfft_radix2_instance_f32;
-
-/* Deprecated */
- arm_status arm_cfft_radix2_init_f32(
- arm_cfft_radix2_instance_f32 * S,
- uint16_t fftLen,
- uint8_t ifftFlag,
- uint8_t bitReverseFlag);
-
-/* Deprecated */
- void arm_cfft_radix2_f32(
- const arm_cfft_radix2_instance_f32 * S,
- float32_t * pSrc);
-
- /**
- * @brief Instance structure for the floating-point CFFT/CIFFT function.
- */
-
- typedef struct
- {
- uint16_t fftLen; /**< length of the FFT. */
- uint8_t ifftFlag; /**< flag that selects forward (ifftFlag=0) or inverse (ifftFlag=1) transform. */
- uint8_t bitReverseFlag; /**< flag that enables (bitReverseFlag=1) or disables (bitReverseFlag=0) bit reversal of output. */
- float32_t *pTwiddle; /**< points to the Twiddle factor table. */
- uint16_t *pBitRevTable; /**< points to the bit reversal table. */
- uint16_t twidCoefModifier; /**< twiddle coefficient modifier that supports different size FFTs with the same twiddle factor table. */
- uint16_t bitRevFactor; /**< bit reversal modifier that supports different size FFTs with the same bit reversal table. */
- float32_t onebyfftLen; /**< value of 1/fftLen. */
- } arm_cfft_radix4_instance_f32;
-
-/* Deprecated */
- arm_status arm_cfft_radix4_init_f32(
- arm_cfft_radix4_instance_f32 * S,
- uint16_t fftLen,
- uint8_t ifftFlag,
- uint8_t bitReverseFlag);
-
-/* Deprecated */
- void arm_cfft_radix4_f32(
- const arm_cfft_radix4_instance_f32 * S,
- float32_t * pSrc);
-
- /**
- * @brief Instance structure for the fixed-point CFFT/CIFFT function.
- */
-
- typedef struct
- {
- uint16_t fftLen; /**< length of the FFT. */
- const q15_t *pTwiddle; /**< points to the Twiddle factor table. */
- const uint16_t *pBitRevTable; /**< points to the bit reversal table. */
- uint16_t bitRevLength; /**< bit reversal table length. */
- } arm_cfft_instance_q15;
-
-void arm_cfft_q15(
- const arm_cfft_instance_q15 * S,
- q15_t * p1,
- uint8_t ifftFlag,
- uint8_t bitReverseFlag);
-
- /**
- * @brief Instance structure for the fixed-point CFFT/CIFFT function.
- */
-
- typedef struct
- {
- uint16_t fftLen; /**< length of the FFT. */
- const q31_t *pTwiddle; /**< points to the Twiddle factor table. */
- const uint16_t *pBitRevTable; /**< points to the bit reversal table. */
- uint16_t bitRevLength; /**< bit reversal table length. */
- } arm_cfft_instance_q31;
-
-void arm_cfft_q31(
- const arm_cfft_instance_q31 * S,
- q31_t * p1,
- uint8_t ifftFlag,
- uint8_t bitReverseFlag);
-
- /**
- * @brief Instance structure for the floating-point CFFT/CIFFT function.
- */
-
- typedef struct
- {
- uint16_t fftLen; /**< length of the FFT. */
- const float32_t *pTwiddle; /**< points to the Twiddle factor table. */
- const uint16_t *pBitRevTable; /**< points to the bit reversal table. */
- uint16_t bitRevLength; /**< bit reversal table length. */
- } arm_cfft_instance_f32;
-
- void arm_cfft_f32(
- const arm_cfft_instance_f32 * S,
- float32_t * p1,
- uint8_t ifftFlag,
- uint8_t bitReverseFlag);
-
- /**
- * @brief Instance structure for the Q15 RFFT/RIFFT function.
- */
-
- typedef struct
- {
- uint32_t fftLenReal; /**< length of the real FFT. */
- uint8_t ifftFlagR; /**< flag that selects forward (ifftFlagR=0) or inverse (ifftFlagR=1) transform. */
- uint8_t bitReverseFlagR; /**< flag that enables (bitReverseFlagR=1) or disables (bitReverseFlagR=0) bit reversal of output. */
- uint32_t twidCoefRModifier; /**< twiddle coefficient modifier that supports different size FFTs with the same twiddle factor table. */
- q15_t *pTwiddleAReal; /**< points to the real twiddle factor table. */
- q15_t *pTwiddleBReal; /**< points to the imag twiddle factor table. */
- const arm_cfft_instance_q15 *pCfft; /**< points to the complex FFT instance. */
- } arm_rfft_instance_q15;
-
- arm_status arm_rfft_init_q15(
- arm_rfft_instance_q15 * S,
- uint32_t fftLenReal,
- uint32_t ifftFlagR,
- uint32_t bitReverseFlag);
-
- void arm_rfft_q15(
- const arm_rfft_instance_q15 * S,
- q15_t * pSrc,
- q15_t * pDst);
-
- /**
- * @brief Instance structure for the Q31 RFFT/RIFFT function.
- */
-
- typedef struct
- {
- uint32_t fftLenReal; /**< length of the real FFT. */
- uint8_t ifftFlagR; /**< flag that selects forward (ifftFlagR=0) or inverse (ifftFlagR=1) transform. */
- uint8_t bitReverseFlagR; /**< flag that enables (bitReverseFlagR=1) or disables (bitReverseFlagR=0) bit reversal of output. */
- uint32_t twidCoefRModifier; /**< twiddle coefficient modifier that supports different size FFTs with the same twiddle factor table. */
- q31_t *pTwiddleAReal; /**< points to the real twiddle factor table. */
- q31_t *pTwiddleBReal; /**< points to the imag twiddle factor table. */
- const arm_cfft_instance_q31 *pCfft; /**< points to the complex FFT instance. */
- } arm_rfft_instance_q31;
-
- arm_status arm_rfft_init_q31(
- arm_rfft_instance_q31 * S,
- uint32_t fftLenReal,
- uint32_t ifftFlagR,
- uint32_t bitReverseFlag);
-
- void arm_rfft_q31(
- const arm_rfft_instance_q31 * S,
- q31_t * pSrc,
- q31_t * pDst);
-
- /**
- * @brief Instance structure for the floating-point RFFT/RIFFT function.
- */
-
- typedef struct
- {
- uint32_t fftLenReal; /**< length of the real FFT. */
- uint16_t fftLenBy2; /**< length of the complex FFT. */
- uint8_t ifftFlagR; /**< flag that selects forward (ifftFlagR=0) or inverse (ifftFlagR=1) transform. */
- uint8_t bitReverseFlagR; /**< flag that enables (bitReverseFlagR=1) or disables (bitReverseFlagR=0) bit reversal of output. */
- uint32_t twidCoefRModifier; /**< twiddle coefficient modifier that supports different size FFTs with the same twiddle factor table. */
- float32_t *pTwiddleAReal; /**< points to the real twiddle factor table. */
- float32_t *pTwiddleBReal; /**< points to the imag twiddle factor table. */
- arm_cfft_radix4_instance_f32 *pCfft; /**< points to the complex FFT instance. */
- } arm_rfft_instance_f32;
-
- arm_status arm_rfft_init_f32(
- arm_rfft_instance_f32 * S,
- arm_cfft_radix4_instance_f32 * S_CFFT,
- uint32_t fftLenReal,
- uint32_t ifftFlagR,
- uint32_t bitReverseFlag);
-
- void arm_rfft_f32(
- const arm_rfft_instance_f32 * S,
- float32_t * pSrc,
- float32_t * pDst);
-
- /**
- * @brief Instance structure for the floating-point RFFT/RIFFT function.
- */
-
-typedef struct
- {
- arm_cfft_instance_f32 Sint; /**< Internal CFFT structure. */
- uint16_t fftLenRFFT; /**< length of the real sequence */
- float32_t * pTwiddleRFFT; /**< Twiddle factors real stage */
- } arm_rfft_fast_instance_f32 ;
-
-arm_status arm_rfft_fast_init_f32 (
- arm_rfft_fast_instance_f32 * S,
- uint16_t fftLen);
-
-void arm_rfft_fast_f32(
- arm_rfft_fast_instance_f32 * S,
- float32_t * p, float32_t * pOut,
- uint8_t ifftFlag);
-
- /**
- * @brief Instance structure for the floating-point DCT4/IDCT4 function.
- */
-
- typedef struct
- {
- uint16_t N; /**< length of the DCT4. */
- uint16_t Nby2; /**< half of the length of the DCT4. */
- float32_t normalize; /**< normalizing factor. */
- float32_t *pTwiddle; /**< points to the twiddle factor table. */
- float32_t *pCosFactor; /**< points to the cosFactor table. */
- arm_rfft_instance_f32 *pRfft; /**< points to the real FFT instance. */
- arm_cfft_radix4_instance_f32 *pCfft; /**< points to the complex FFT instance. */
- } arm_dct4_instance_f32;
-
- /**
- * @brief Initialization function for the floating-point DCT4/IDCT4.
- * @param[in,out] *S points to an instance of floating-point DCT4/IDCT4 structure.
- * @param[in] *S_RFFT points to an instance of floating-point RFFT/RIFFT structure.
- * @param[in] *S_CFFT points to an instance of floating-point CFFT/CIFFT structure.
- * @param[in] N length of the DCT4.
- * @param[in] Nby2 half of the length of the DCT4.
- * @param[in] normalize normalizing factor.
- * @return arm_status function returns ARM_MATH_SUCCESS if initialization is successful or ARM_MATH_ARGUMENT_ERROR if fftLenReal is not a supported transform length.
- */
-
- arm_status arm_dct4_init_f32(
- arm_dct4_instance_f32 * S,
- arm_rfft_instance_f32 * S_RFFT,
- arm_cfft_radix4_instance_f32 * S_CFFT,
- uint16_t N,
- uint16_t Nby2,
- float32_t normalize);
-
- /**
- * @brief Processing function for the floating-point DCT4/IDCT4.
- * @param[in] *S points to an instance of the floating-point DCT4/IDCT4 structure.
- * @param[in] *pState points to state buffer.
- * @param[in,out] *pInlineBuffer points to the in-place input and output buffer.
- * @return none.
- */
-
- void arm_dct4_f32(
- const arm_dct4_instance_f32 * S,
- float32_t * pState,
- float32_t * pInlineBuffer);
-
- /**
- * @brief Instance structure for the Q31 DCT4/IDCT4 function.
- */
-
- typedef struct
- {
- uint16_t N; /**< length of the DCT4. */
- uint16_t Nby2; /**< half of the length of the DCT4. */
- q31_t normalize; /**< normalizing factor. */
- q31_t *pTwiddle; /**< points to the twiddle factor table. */
- q31_t *pCosFactor; /**< points to the cosFactor table. */
- arm_rfft_instance_q31 *pRfft; /**< points to the real FFT instance. */
- arm_cfft_radix4_instance_q31 *pCfft; /**< points to the complex FFT instance. */
- } arm_dct4_instance_q31;
-
- /**
- * @brief Initialization function for the Q31 DCT4/IDCT4.
- * @param[in,out] *S points to an instance of Q31 DCT4/IDCT4 structure.
- * @param[in] *S_RFFT points to an instance of Q31 RFFT/RIFFT structure
- * @param[in] *S_CFFT points to an instance of Q31 CFFT/CIFFT structure
- * @param[in] N length of the DCT4.
- * @param[in] Nby2 half of the length of the DCT4.
- * @param[in] normalize normalizing factor.
- * @return arm_status function returns ARM_MATH_SUCCESS if initialization is successful or ARM_MATH_ARGUMENT_ERROR if N is not a supported transform length.
- */
-
- arm_status arm_dct4_init_q31(
- arm_dct4_instance_q31 * S,
- arm_rfft_instance_q31 * S_RFFT,
- arm_cfft_radix4_instance_q31 * S_CFFT,
- uint16_t N,
- uint16_t Nby2,
- q31_t normalize);
-
- /**
- * @brief Processing function for the Q31 DCT4/IDCT4.
- * @param[in] *S points to an instance of the Q31 DCT4 structure.
- * @param[in] *pState points to state buffer.
- * @param[in,out] *pInlineBuffer points to the in-place input and output buffer.
- * @return none.
- */
-
- void arm_dct4_q31(
- const arm_dct4_instance_q31 * S,
- q31_t * pState,
- q31_t * pInlineBuffer);
-
- /**
- * @brief Instance structure for the Q15 DCT4/IDCT4 function.
- */
-
- typedef struct
- {
- uint16_t N; /**< length of the DCT4. */
- uint16_t Nby2; /**< half of the length of the DCT4. */
- q15_t normalize; /**< normalizing factor. */
- q15_t *pTwiddle; /**< points to the twiddle factor table. */
- q15_t *pCosFactor; /**< points to the cosFactor table. */
- arm_rfft_instance_q15 *pRfft; /**< points to the real FFT instance. */
- arm_cfft_radix4_instance_q15 *pCfft; /**< points to the complex FFT instance. */
- } arm_dct4_instance_q15;
-
- /**
- * @brief Initialization function for the Q15 DCT4/IDCT4.
- * @param[in,out] *S points to an instance of Q15 DCT4/IDCT4 structure.
- * @param[in] *S_RFFT points to an instance of Q15 RFFT/RIFFT structure.
- * @param[in] *S_CFFT points to an instance of Q15 CFFT/CIFFT structure.
- * @param[in] N length of the DCT4.
- * @param[in] Nby2 half of the length of the DCT4.
- * @param[in] normalize normalizing factor.
- * @return arm_status function returns ARM_MATH_SUCCESS if initialization is successful or ARM_MATH_ARGUMENT_ERROR if N is not a supported transform length.
- */
-
- arm_status arm_dct4_init_q15(
- arm_dct4_instance_q15 * S,
- arm_rfft_instance_q15 * S_RFFT,
- arm_cfft_radix4_instance_q15 * S_CFFT,
- uint16_t N,
- uint16_t Nby2,
- q15_t normalize);
-
- /**
- * @brief Processing function for the Q15 DCT4/IDCT4.
- * @param[in] *S points to an instance of the Q15 DCT4 structure.
- * @param[in] *pState points to state buffer.
- * @param[in,out] *pInlineBuffer points to the in-place input and output buffer.
- * @return none.
- */
-
- void arm_dct4_q15(
- const arm_dct4_instance_q15 * S,
- q15_t * pState,
- q15_t * pInlineBuffer);
-
- /**
- * @brief Floating-point vector addition.
- * @param[in] *pSrcA points to the first input vector
- * @param[in] *pSrcB points to the second input vector
- * @param[out] *pDst points to the output vector
- * @param[in] blockSize number of samples in each vector
- * @return none.
- */
-
- void arm_add_f32(
- float32_t * pSrcA,
- float32_t * pSrcB,
- float32_t * pDst,
- uint32_t blockSize);
-
- /**
- * @brief Q7 vector addition.
- * @param[in] *pSrcA points to the first input vector
- * @param[in] *pSrcB points to the second input vector
- * @param[out] *pDst points to the output vector
- * @param[in] blockSize number of samples in each vector
- * @return none.
- */
-
- void arm_add_q7(
- q7_t * pSrcA,
- q7_t * pSrcB,
- q7_t * pDst,
- uint32_t blockSize);
-
- /**
- * @brief Q15 vector addition.
- * @param[in] *pSrcA points to the first input vector
- * @param[in] *pSrcB points to the second input vector
- * @param[out] *pDst points to the output vector
- * @param[in] blockSize number of samples in each vector
- * @return none.
- */
-
- void arm_add_q15(
- q15_t * pSrcA,
- q15_t * pSrcB,
- q15_t * pDst,
- uint32_t blockSize);
-
- /**
- * @brief Q31 vector addition.
- * @param[in] *pSrcA points to the first input vector
- * @param[in] *pSrcB points to the second input vector
- * @param[out] *pDst points to the output vector
- * @param[in] blockSize number of samples in each vector
- * @return none.
- */
-
- void arm_add_q31(
- q31_t * pSrcA,
- q31_t * pSrcB,
- q31_t * pDst,
- uint32_t blockSize);
-
- /**
- * @brief Floating-point vector subtraction.
- * @param[in] *pSrcA points to the first input vector
- * @param[in] *pSrcB points to the second input vector
- * @param[out] *pDst points to the output vector
- * @param[in] blockSize number of samples in each vector
- * @return none.
- */
-
- void arm_sub_f32(
- float32_t * pSrcA,
- float32_t * pSrcB,
- float32_t * pDst,
- uint32_t blockSize);
-
- /**
- * @brief Q7 vector subtraction.
- * @param[in] *pSrcA points to the first input vector
- * @param[in] *pSrcB points to the second input vector
- * @param[out] *pDst points to the output vector
- * @param[in] blockSize number of samples in each vector
- * @return none.
- */
-
- void arm_sub_q7(
- q7_t * pSrcA,
- q7_t * pSrcB,
- q7_t * pDst,
- uint32_t blockSize);
-
- /**
- * @brief Q15 vector subtraction.
- * @param[in] *pSrcA points to the first input vector
- * @param[in] *pSrcB points to the second input vector
- * @param[out] *pDst points to the output vector
- * @param[in] blockSize number of samples in each vector
- * @return none.
- */
-
- void arm_sub_q15(
- q15_t * pSrcA,
- q15_t * pSrcB,
- q15_t * pDst,
- uint32_t blockSize);
-
- /**
- * @brief Q31 vector subtraction.
- * @param[in] *pSrcA points to the first input vector
- * @param[in] *pSrcB points to the second input vector
- * @param[out] *pDst points to the output vector
- * @param[in] blockSize number of samples in each vector
- * @return none.
- */
-
- void arm_sub_q31(
- q31_t * pSrcA,
- q31_t * pSrcB,
- q31_t * pDst,
- uint32_t blockSize);
-
- /**
- * @brief Multiplies a floating-point vector by a scalar.
- * @param[in] *pSrc points to the input vector
- * @param[in] scale scale factor to be applied
- * @param[out] *pDst points to the output vector
- * @param[in] blockSize number of samples in the vector
- * @return none.
- */
-
- void arm_scale_f32(
- float32_t * pSrc,
- float32_t scale,
- float32_t * pDst,
- uint32_t blockSize);
-
- /**
- * @brief Multiplies a Q7 vector by a scalar.
- * @param[in] *pSrc points to the input vector
- * @param[in] scaleFract fractional portion of the scale value
- * @param[in] shift number of bits to shift the result by
- * @param[out] *pDst points to the output vector
- * @param[in] blockSize number of samples in the vector
- * @return none.
- */
-
- void arm_scale_q7(
- q7_t * pSrc,
- q7_t scaleFract,
- int8_t shift,
- q7_t * pDst,
- uint32_t blockSize);
-
- /**
- * @brief Multiplies a Q15 vector by a scalar.
- * @param[in] *pSrc points to the input vector
- * @param[in] scaleFract fractional portion of the scale value
- * @param[in] shift number of bits to shift the result by
- * @param[out] *pDst points to the output vector
- * @param[in] blockSize number of samples in the vector
- * @return none.
- */
-
- void arm_scale_q15(
- q15_t * pSrc,
- q15_t scaleFract,
- int8_t shift,
- q15_t * pDst,
- uint32_t blockSize);
-
- /**
- * @brief Multiplies a Q31 vector by a scalar.
- * @param[in] *pSrc points to the input vector
- * @param[in] scaleFract fractional portion of the scale value
- * @param[in] shift number of bits to shift the result by
- * @param[out] *pDst points to the output vector
- * @param[in] blockSize number of samples in the vector
- * @return none.
- */
-
- void arm_scale_q31(
- q31_t * pSrc,
- q31_t scaleFract,
- int8_t shift,
- q31_t * pDst,
- uint32_t blockSize);
-
- /**
- * @brief Q7 vector absolute value.
- * @param[in] *pSrc points to the input buffer
- * @param[out] *pDst points to the output buffer
- * @param[in] blockSize number of samples in each vector
- * @return none.
- */
-
- void arm_abs_q7(
- q7_t * pSrc,
- q7_t * pDst,
- uint32_t blockSize);
-
- /**
- * @brief Floating-point vector absolute value.
- * @param[in] *pSrc points to the input buffer
- * @param[out] *pDst points to the output buffer
- * @param[in] blockSize number of samples in each vector
- * @return none.
- */
-
- void arm_abs_f32(
- float32_t * pSrc,
- float32_t * pDst,
- uint32_t blockSize);
-
- /**
- * @brief Q15 vector absolute value.
- * @param[in] *pSrc points to the input buffer
- * @param[out] *pDst points to the output buffer
- * @param[in] blockSize number of samples in each vector
- * @return none.
- */
-
- void arm_abs_q15(
- q15_t * pSrc,
- q15_t * pDst,
- uint32_t blockSize);
-
- /**
- * @brief Q31 vector absolute value.
- * @param[in] *pSrc points to the input buffer
- * @param[out] *pDst points to the output buffer
- * @param[in] blockSize number of samples in each vector
- * @return none.
- */
-
- void arm_abs_q31(
- q31_t * pSrc,
- q31_t * pDst,
- uint32_t blockSize);
-
- /**
- * @brief Dot product of floating-point vectors.
- * @param[in] *pSrcA points to the first input vector
- * @param[in] *pSrcB points to the second input vector
- * @param[in] blockSize number of samples in each vector
- * @param[out] *result output result returned here
- * @return none.
- */
-
- void arm_dot_prod_f32(
- float32_t * pSrcA,
- float32_t * pSrcB,
- uint32_t blockSize,
- float32_t * result);
-
- /**
- * @brief Dot product of Q7 vectors.
- * @param[in] *pSrcA points to the first input vector
- * @param[in] *pSrcB points to the second input vector
- * @param[in] blockSize number of samples in each vector
- * @param[out] *result output result returned here
- * @return none.
- */
-
- void arm_dot_prod_q7(
- q7_t * pSrcA,
- q7_t * pSrcB,
- uint32_t blockSize,
- q31_t * result);
-
- /**
- * @brief Dot product of Q15 vectors.
- * @param[in] *pSrcA points to the first input vector
- * @param[in] *pSrcB points to the second input vector
- * @param[in] blockSize number of samples in each vector
- * @param[out] *result output result returned here
- * @return none.
- */
-
- void arm_dot_prod_q15(
- q15_t * pSrcA,
- q15_t * pSrcB,
- uint32_t blockSize,
- q63_t * result);
-
- /**
- * @brief Dot product of Q31 vectors.
- * @param[in] *pSrcA points to the first input vector
- * @param[in] *pSrcB points to the second input vector
- * @param[in] blockSize number of samples in each vector
- * @param[out] *result output result returned here
- * @return none.
- */
-
- void arm_dot_prod_q31(
- q31_t * pSrcA,
- q31_t * pSrcB,
- uint32_t blockSize,
- q63_t * result);
-
- /**
- * @brief Shifts the elements of a Q7 vector a specified number of bits.
- * @param[in] *pSrc points to the input vector
- * @param[in] shiftBits number of bits to shift. A positive value shifts left; a negative value shifts right.
- * @param[out] *pDst points to the output vector
- * @param[in] blockSize number of samples in the vector
- * @return none.
- */
-
- void arm_shift_q7(
- q7_t * pSrc,
- int8_t shiftBits,
- q7_t * pDst,
- uint32_t blockSize);
-
- /**
- * @brief Shifts the elements of a Q15 vector a specified number of bits.
- * @param[in] *pSrc points to the input vector
- * @param[in] shiftBits number of bits to shift. A positive value shifts left; a negative value shifts right.
- * @param[out] *pDst points to the output vector
- * @param[in] blockSize number of samples in the vector
- * @return none.
- */
-
- void arm_shift_q15(
- q15_t * pSrc,
- int8_t shiftBits,
- q15_t * pDst,
- uint32_t blockSize);
-
- /**
- * @brief Shifts the elements of a Q31 vector a specified number of bits.
- * @param[in] *pSrc points to the input vector
- * @param[in] shiftBits number of bits to shift. A positive value shifts left; a negative value shifts right.
- * @param[out] *pDst points to the output vector
- * @param[in] blockSize number of samples in the vector
- * @return none.
- */
-
- void arm_shift_q31(
- q31_t * pSrc,
- int8_t shiftBits,
- q31_t * pDst,
- uint32_t blockSize);
-
- /**
- * @brief Adds a constant offset to a floating-point vector.
- * @param[in] *pSrc points to the input vector
- * @param[in] offset is the offset to be added
- * @param[out] *pDst points to the output vector
- * @param[in] blockSize number of samples in the vector
- * @return none.
- */
-
- void arm_offset_f32(
- float32_t * pSrc,
- float32_t offset,
- float32_t * pDst,
- uint32_t blockSize);
-
- /**
- * @brief Adds a constant offset to a Q7 vector.
- * @param[in] *pSrc points to the input vector
- * @param[in] offset is the offset to be added
- * @param[out] *pDst points to the output vector
- * @param[in] blockSize number of samples in the vector
- * @return none.
- */
-
- void arm_offset_q7(
- q7_t * pSrc,
- q7_t offset,
- q7_t * pDst,
- uint32_t blockSize);
-
- /**
- * @brief Adds a constant offset to a Q15 vector.
- * @param[in] *pSrc points to the input vector
- * @param[in] offset is the offset to be added
- * @param[out] *pDst points to the output vector
- * @param[in] blockSize number of samples in the vector
- * @return none.
- */
-
- void arm_offset_q15(
- q15_t * pSrc,
- q15_t offset,
- q15_t * pDst,
- uint32_t blockSize);
-
- /**
- * @brief Adds a constant offset to a Q31 vector.
- * @param[in] *pSrc points to the input vector
- * @param[in] offset is the offset to be added
- * @param[out] *pDst points to the output vector
- * @param[in] blockSize number of samples in the vector
- * @return none.
- */
-
- void arm_offset_q31(
- q31_t * pSrc,
- q31_t offset,
- q31_t * pDst,
- uint32_t blockSize);
-
- /**
- * @brief Negates the elements of a floating-point vector.
- * @param[in] *pSrc points to the input vector
- * @param[out] *pDst points to the output vector
- * @param[in] blockSize number of samples in the vector
- * @return none.
- */
-
- void arm_negate_f32(
- float32_t * pSrc,
- float32_t * pDst,
- uint32_t blockSize);
-
- /**
- * @brief Negates the elements of a Q7 vector.
- * @param[in] *pSrc points to the input vector
- * @param[out] *pDst points to the output vector
- * @param[in] blockSize number of samples in the vector
- * @return none.
- */
-
- void arm_negate_q7(
- q7_t * pSrc,
- q7_t * pDst,
- uint32_t blockSize);
-
- /**
- * @brief Negates the elements of a Q15 vector.
- * @param[in] *pSrc points to the input vector
- * @param[out] *pDst points to the output vector
- * @param[in] blockSize number of samples in the vector
- * @return none.
- */
-
- void arm_negate_q15(
- q15_t * pSrc,
- q15_t * pDst,
- uint32_t blockSize);
-
- /**
- * @brief Negates the elements of a Q31 vector.
- * @param[in] *pSrc points to the input vector
- * @param[out] *pDst points to the output vector
- * @param[in] blockSize number of samples in the vector
- * @return none.
- */
-
- void arm_negate_q31(
- q31_t * pSrc,
- q31_t * pDst,
- uint32_t blockSize);
- /**
- * @brief Copies the elements of a floating-point vector.
- * @param[in] *pSrc input pointer
- * @param[out] *pDst output pointer
- * @param[in] blockSize number of samples to process
- * @return none.
- */
- void arm_copy_f32(
- float32_t * pSrc,
- float32_t * pDst,
- uint32_t blockSize);
-
- /**
- * @brief Copies the elements of a Q7 vector.
- * @param[in] *pSrc input pointer
- * @param[out] *pDst output pointer
- * @param[in] blockSize number of samples to process
- * @return none.
- */
- void arm_copy_q7(
- q7_t * pSrc,
- q7_t * pDst,
- uint32_t blockSize);
-
- /**
- * @brief Copies the elements of a Q15 vector.
- * @param[in] *pSrc input pointer
- * @param[out] *pDst output pointer
- * @param[in] blockSize number of samples to process
- * @return none.
- */
- void arm_copy_q15(
- q15_t * pSrc,
- q15_t * pDst,
- uint32_t blockSize);
-
- /**
- * @brief Copies the elements of a Q31 vector.
- * @param[in] *pSrc input pointer
- * @param[out] *pDst output pointer
- * @param[in] blockSize number of samples to process
- * @return none.
- */
- void arm_copy_q31(
- q31_t * pSrc,
- q31_t * pDst,
- uint32_t blockSize);
- /**
- * @brief Fills a constant value into a floating-point vector.
- * @param[in] value input value to be filled
- * @param[out] *pDst output pointer
- * @param[in] blockSize number of samples to process
- * @return none.
- */
- void arm_fill_f32(
- float32_t value,
- float32_t * pDst,
- uint32_t blockSize);
-
- /**
- * @brief Fills a constant value into a Q7 vector.
- * @param[in] value input value to be filled
- * @param[out] *pDst output pointer
- * @param[in] blockSize number of samples to process
- * @return none.
- */
- void arm_fill_q7(
- q7_t value,
- q7_t * pDst,
- uint32_t blockSize);
-
- /**
- * @brief Fills a constant value into a Q15 vector.
- * @param[in] value input value to be filled
- * @param[out] *pDst output pointer
- * @param[in] blockSize number of samples to process
- * @return none.
- */
- void arm_fill_q15(
- q15_t value,
- q15_t * pDst,
- uint32_t blockSize);
-
- /**
- * @brief Fills a constant value into a Q31 vector.
- * @param[in] value input value to be filled
- * @param[out] *pDst output pointer
- * @param[in] blockSize number of samples to process
- * @return none.
- */
- void arm_fill_q31(
- q31_t value,
- q31_t * pDst,
- uint32_t blockSize);
-
-/**
- * @brief Convolution of floating-point sequences.
- * @param[in] *pSrcA points to the first input sequence.
- * @param[in] srcALen length of the first input sequence.
- * @param[in] *pSrcB points to the second input sequence.
- * @param[in] srcBLen length of the second input sequence.
- * @param[out] *pDst points to the location where the output result is written. Length srcALen+srcBLen-1.
- * @return none.
- */
-
- void arm_conv_f32(
- float32_t * pSrcA,
- uint32_t srcALen,
- float32_t * pSrcB,
- uint32_t srcBLen,
- float32_t * pDst);
-
-
- /**
- * @brief Convolution of Q15 sequences.
- * @param[in] *pSrcA points to the first input sequence.
- * @param[in] srcALen length of the first input sequence.
- * @param[in] *pSrcB points to the second input sequence.
- * @param[in] srcBLen length of the second input sequence.
- * @param[out] *pDst points to the block of output data Length srcALen+srcBLen-1.
- * @param[in] *pScratch1 points to scratch buffer of size max(srcALen, srcBLen) + 2*min(srcALen, srcBLen) - 2.
- * @param[in] *pScratch2 points to scratch buffer of size min(srcALen, srcBLen).
- * @return none.
- */
-
-
- void arm_conv_opt_q15(
- q15_t * pSrcA,
- uint32_t srcALen,
- q15_t * pSrcB,
- uint32_t srcBLen,
- q15_t * pDst,
- q15_t * pScratch1,
- q15_t * pScratch2);
-
-
-/**
- * @brief Convolution of Q15 sequences.
- * @param[in] *pSrcA points to the first input sequence.
- * @param[in] srcALen length of the first input sequence.
- * @param[in] *pSrcB points to the second input sequence.
- * @param[in] srcBLen length of the second input sequence.
- * @param[out] *pDst points to the location where the output result is written. Length srcALen+srcBLen-1.
- * @return none.
- */
-
- void arm_conv_q15(
- q15_t * pSrcA,
- uint32_t srcALen,
- q15_t * pSrcB,
- uint32_t srcBLen,
- q15_t * pDst);
-
- /**
- * @brief Convolution of Q15 sequences (fast version) for Cortex-M3 and Cortex-M4
- * @param[in] *pSrcA points to the first input sequence.
- * @param[in] srcALen length of the first input sequence.
- * @param[in] *pSrcB points to the second input sequence.
- * @param[in] srcBLen length of the second input sequence.
- * @param[out] *pDst points to the block of output data Length srcALen+srcBLen-1.
- * @return none.
- */
-
- void arm_conv_fast_q15(
- q15_t * pSrcA,
- uint32_t srcALen,
- q15_t * pSrcB,
- uint32_t srcBLen,
- q15_t * pDst);
-
- /**
- * @brief Convolution of Q15 sequences (fast version) for Cortex-M3 and Cortex-M4
- * @param[in] *pSrcA points to the first input sequence.
- * @param[in] srcALen length of the first input sequence.
- * @param[in] *pSrcB points to the second input sequence.
- * @param[in] srcBLen length of the second input sequence.
- * @param[out] *pDst points to the block of output data Length srcALen+srcBLen-1.
- * @param[in] *pScratch1 points to scratch buffer of size max(srcALen, srcBLen) + 2*min(srcALen, srcBLen) - 2.
- * @param[in] *pScratch2 points to scratch buffer of size min(srcALen, srcBLen).
- * @return none.
- */
-
- void arm_conv_fast_opt_q15(
- q15_t * pSrcA,
- uint32_t srcALen,
- q15_t * pSrcB,
- uint32_t srcBLen,
- q15_t * pDst,
- q15_t * pScratch1,
- q15_t * pScratch2);
-
-
-
- /**
- * @brief Convolution of Q31 sequences.
- * @param[in] *pSrcA points to the first input sequence.
- * @param[in] srcALen length of the first input sequence.
- * @param[in] *pSrcB points to the second input sequence.
- * @param[in] srcBLen length of the second input sequence.
- * @param[out] *pDst points to the block of output data Length srcALen+srcBLen-1.
- * @return none.
- */
-
- void arm_conv_q31(
- q31_t * pSrcA,
- uint32_t srcALen,
- q31_t * pSrcB,
- uint32_t srcBLen,
- q31_t * pDst);
-
- /**
- * @brief Convolution of Q31 sequences (fast version) for Cortex-M3 and Cortex-M4
- * @param[in] *pSrcA points to the first input sequence.
- * @param[in] srcALen length of the first input sequence.
- * @param[in] *pSrcB points to the second input sequence.
- * @param[in] srcBLen length of the second input sequence.
- * @param[out] *pDst points to the block of output data Length srcALen+srcBLen-1.
- * @return none.
- */
-
- void arm_conv_fast_q31(
- q31_t * pSrcA,
- uint32_t srcALen,
- q31_t * pSrcB,
- uint32_t srcBLen,
- q31_t * pDst);
-
-
- /**
- * @brief Convolution of Q7 sequences.
- * @param[in] *pSrcA points to the first input sequence.
- * @param[in] srcALen length of the first input sequence.
- * @param[in] *pSrcB points to the second input sequence.
- * @param[in] srcBLen length of the second input sequence.
- * @param[out] *pDst points to the block of output data Length srcALen+srcBLen-1.
- * @param[in] *pScratch1 points to scratch buffer(of type q15_t) of size max(srcALen, srcBLen) + 2*min(srcALen, srcBLen) - 2.
- * @param[in] *pScratch2 points to scratch buffer (of type q15_t) of size min(srcALen, srcBLen).
- * @return none.
- */
-
- void arm_conv_opt_q7(
- q7_t * pSrcA,
- uint32_t srcALen,
- q7_t * pSrcB,
- uint32_t srcBLen,
- q7_t * pDst,
- q15_t * pScratch1,
- q15_t * pScratch2);
-
-
-
- /**
- * @brief Convolution of Q7 sequences.
- * @param[in] *pSrcA points to the first input sequence.
- * @param[in] srcALen length of the first input sequence.
- * @param[in] *pSrcB points to the second input sequence.
- * @param[in] srcBLen length of the second input sequence.
- * @param[out] *pDst points to the block of output data Length srcALen+srcBLen-1.
- * @return none.
- */
-
- void arm_conv_q7(
- q7_t * pSrcA,
- uint32_t srcALen,
- q7_t * pSrcB,
- uint32_t srcBLen,
- q7_t * pDst);
-
-
- /**
- * @brief Partial convolution of floating-point sequences.
- * @param[in] *pSrcA points to the first input sequence.
- * @param[in] srcALen length of the first input sequence.
- * @param[in] *pSrcB points to the second input sequence.
- * @param[in] srcBLen length of the second input sequence.
- * @param[out] *pDst points to the block of output data
- * @param[in] firstIndex is the first output sample to start with.
- * @param[in] numPoints is the number of output points to be computed.
- * @return Returns either ARM_MATH_SUCCESS if the function completed correctly or ARM_MATH_ARGUMENT_ERROR if the requested subset is not in the range [0 srcALen+srcBLen-2].
- */
-
- arm_status arm_conv_partial_f32(
- float32_t * pSrcA,
- uint32_t srcALen,
- float32_t * pSrcB,
- uint32_t srcBLen,
- float32_t * pDst,
- uint32_t firstIndex,
- uint32_t numPoints);
-
- /**
- * @brief Partial convolution of Q15 sequences.
- * @param[in] *pSrcA points to the first input sequence.
- * @param[in] srcALen length of the first input sequence.
- * @param[in] *pSrcB points to the second input sequence.
- * @param[in] srcBLen length of the second input sequence.
- * @param[out] *pDst points to the block of output data
- * @param[in] firstIndex is the first output sample to start with.
- * @param[in] numPoints is the number of output points to be computed.
- * @param[in] * pScratch1 points to scratch buffer of size max(srcALen, srcBLen) + 2*min(srcALen, srcBLen) - 2.
- * @param[in] * pScratch2 points to scratch buffer of size min(srcALen, srcBLen).
- * @return Returns either ARM_MATH_SUCCESS if the function completed correctly or ARM_MATH_ARGUMENT_ERROR if the requested subset is not in the range [0 srcALen+srcBLen-2].
- */
-
- arm_status arm_conv_partial_opt_q15(
- q15_t * pSrcA,
- uint32_t srcALen,
- q15_t * pSrcB,
- uint32_t srcBLen,
- q15_t * pDst,
- uint32_t firstIndex,
- uint32_t numPoints,
- q15_t * pScratch1,
- q15_t * pScratch2);
-
-
-/**
- * @brief Partial convolution of Q15 sequences.
- * @param[in] *pSrcA points to the first input sequence.
- * @param[in] srcALen length of the first input sequence.
- * @param[in] *pSrcB points to the second input sequence.
- * @param[in] srcBLen length of the second input sequence.
- * @param[out] *pDst points to the block of output data
- * @param[in] firstIndex is the first output sample to start with.
- * @param[in] numPoints is the number of output points to be computed.
- * @return Returns either ARM_MATH_SUCCESS if the function completed correctly or ARM_MATH_ARGUMENT_ERROR if the requested subset is not in the range [0 srcALen+srcBLen-2].
- */
-
- arm_status arm_conv_partial_q15(
- q15_t * pSrcA,
- uint32_t srcALen,
- q15_t * pSrcB,
- uint32_t srcBLen,
- q15_t * pDst,
- uint32_t firstIndex,
- uint32_t numPoints);
-
- /**
- * @brief Partial convolution of Q15 sequences (fast version) for Cortex-M3 and Cortex-M4
- * @param[in] *pSrcA points to the first input sequence.
- * @param[in] srcALen length of the first input sequence.
- * @param[in] *pSrcB points to the second input sequence.
- * @param[in] srcBLen length of the second input sequence.
- * @param[out] *pDst points to the block of output data
- * @param[in] firstIndex is the first output sample to start with.
- * @param[in] numPoints is the number of output points to be computed.
- * @return Returns either ARM_MATH_SUCCESS if the function completed correctly or ARM_MATH_ARGUMENT_ERROR if the requested subset is not in the range [0 srcALen+srcBLen-2].
- */
-
- arm_status arm_conv_partial_fast_q15(
- q15_t * pSrcA,
- uint32_t srcALen,
- q15_t * pSrcB,
- uint32_t srcBLen,
- q15_t * pDst,
- uint32_t firstIndex,
- uint32_t numPoints);
-
-
- /**
- * @brief Partial convolution of Q15 sequences (fast version) for Cortex-M3 and Cortex-M4
- * @param[in] *pSrcA points to the first input sequence.
- * @param[in] srcALen length of the first input sequence.
- * @param[in] *pSrcB points to the second input sequence.
- * @param[in] srcBLen length of the second input sequence.
- * @param[out] *pDst points to the block of output data
- * @param[in] firstIndex is the first output sample to start with.
- * @param[in] numPoints is the number of output points to be computed.
- * @param[in] * pScratch1 points to scratch buffer of size max(srcALen, srcBLen) + 2*min(srcALen, srcBLen) - 2.
- * @param[in] * pScratch2 points to scratch buffer of size min(srcALen, srcBLen).
- * @return Returns either ARM_MATH_SUCCESS if the function completed correctly or ARM_MATH_ARGUMENT_ERROR if the requested subset is not in the range [0 srcALen+srcBLen-2].
- */
-
- arm_status arm_conv_partial_fast_opt_q15(
- q15_t * pSrcA,
- uint32_t srcALen,
- q15_t * pSrcB,
- uint32_t srcBLen,
- q15_t * pDst,
- uint32_t firstIndex,
- uint32_t numPoints,
- q15_t * pScratch1,
- q15_t * pScratch2);
-
-
- /**
- * @brief Partial convolution of Q31 sequences.
- * @param[in] *pSrcA points to the first input sequence.
- * @param[in] srcALen length of the first input sequence.
- * @param[in] *pSrcB points to the second input sequence.
- * @param[in] srcBLen length of the second input sequence.
- * @param[out] *pDst points to the block of output data
- * @param[in] firstIndex is the first output sample to start with.
- * @param[in] numPoints is the number of output points to be computed.
- * @return Returns either ARM_MATH_SUCCESS if the function completed correctly or ARM_MATH_ARGUMENT_ERROR if the requested subset is not in the range [0 srcALen+srcBLen-2].
- */
-
- arm_status arm_conv_partial_q31(
- q31_t * pSrcA,
- uint32_t srcALen,
- q31_t * pSrcB,
- uint32_t srcBLen,
- q31_t * pDst,
- uint32_t firstIndex,
- uint32_t numPoints);
-
-
- /**
- * @brief Partial convolution of Q31 sequences (fast version) for Cortex-M3 and Cortex-M4
- * @param[in] *pSrcA points to the first input sequence.
- * @param[in] srcALen length of the first input sequence.
- * @param[in] *pSrcB points to the second input sequence.
- * @param[in] srcBLen length of the second input sequence.
- * @param[out] *pDst points to the block of output data
- * @param[in] firstIndex is the first output sample to start with.
- * @param[in] numPoints is the number of output points to be computed.
- * @return Returns either ARM_MATH_SUCCESS if the function completed correctly or ARM_MATH_ARGUMENT_ERROR if the requested subset is not in the range [0 srcALen+srcBLen-2].
- */
-
- arm_status arm_conv_partial_fast_q31(
- q31_t * pSrcA,
- uint32_t srcALen,
- q31_t * pSrcB,
- uint32_t srcBLen,
- q31_t * pDst,
- uint32_t firstIndex,
- uint32_t numPoints);
-
-
- /**
- * @brief Partial convolution of Q7 sequences
- * @param[in] *pSrcA points to the first input sequence.
- * @param[in] srcALen length of the first input sequence.
- * @param[in] *pSrcB points to the second input sequence.
- * @param[in] srcBLen length of the second input sequence.
- * @param[out] *pDst points to the block of output data
- * @param[in] firstIndex is the first output sample to start with.
- * @param[in] numPoints is the number of output points to be computed.
- * @param[in] *pScratch1 points to scratch buffer(of type q15_t) of size max(srcALen, srcBLen) + 2*min(srcALen, srcBLen) - 2.
- * @param[in] *pScratch2 points to scratch buffer (of type q15_t) of size min(srcALen, srcBLen).
- * @return Returns either ARM_MATH_SUCCESS if the function completed correctly or ARM_MATH_ARGUMENT_ERROR if the requested subset is not in the range [0 srcALen+srcBLen-2].
- */
-
- arm_status arm_conv_partial_opt_q7(
- q7_t * pSrcA,
- uint32_t srcALen,
- q7_t * pSrcB,
- uint32_t srcBLen,
- q7_t * pDst,
- uint32_t firstIndex,
- uint32_t numPoints,
- q15_t * pScratch1,
- q15_t * pScratch2);
-
-
-/**
- * @brief Partial convolution of Q7 sequences.
- * @param[in] *pSrcA points to the first input sequence.
- * @param[in] srcALen length of the first input sequence.
- * @param[in] *pSrcB points to the second input sequence.
- * @param[in] srcBLen length of the second input sequence.
- * @param[out] *pDst points to the block of output data
- * @param[in] firstIndex is the first output sample to start with.
- * @param[in] numPoints is the number of output points to be computed.
- * @return Returns either ARM_MATH_SUCCESS if the function completed correctly or ARM_MATH_ARGUMENT_ERROR if the requested subset is not in the range [0 srcALen+srcBLen-2].
- */
-
- arm_status arm_conv_partial_q7(
- q7_t * pSrcA,
- uint32_t srcALen,
- q7_t * pSrcB,
- uint32_t srcBLen,
- q7_t * pDst,
- uint32_t firstIndex,
- uint32_t numPoints);
-
-
-
- /**
- * @brief Instance structure for the Q15 FIR decimator.
- */
-
- typedef struct
- {
- uint8_t M; /**< decimation factor. */
- uint16_t numTaps; /**< number of coefficients in the filter. */
- q15_t *pCoeffs; /**< points to the coefficient array. The array is of length numTaps.*/
- q15_t *pState; /**< points to the state variable array. The array is of length numTaps+blockSize-1. */
- } arm_fir_decimate_instance_q15;
-
- /**
- * @brief Instance structure for the Q31 FIR decimator.
- */
-
- typedef struct
- {
- uint8_t M; /**< decimation factor. */
- uint16_t numTaps; /**< number of coefficients in the filter. */
- q31_t *pCoeffs; /**< points to the coefficient array. The array is of length numTaps.*/
- q31_t *pState; /**< points to the state variable array. The array is of length numTaps+blockSize-1. */
-
- } arm_fir_decimate_instance_q31;
-
- /**
- * @brief Instance structure for the floating-point FIR decimator.
- */
-
- typedef struct
- {
- uint8_t M; /**< decimation factor. */
- uint16_t numTaps; /**< number of coefficients in the filter. */
- float32_t *pCoeffs; /**< points to the coefficient array. The array is of length numTaps.*/
- float32_t *pState; /**< points to the state variable array. The array is of length numTaps+blockSize-1. */
-
- } arm_fir_decimate_instance_f32;
-
-
-
- /**
- * @brief Processing function for the floating-point FIR decimator.
- * @param[in] *S points to an instance of the floating-point FIR decimator structure.
- * @param[in] *pSrc points to the block of input data.
- * @param[out] *pDst points to the block of output data
- * @param[in] blockSize number of input samples to process per call.
- * @return none
- */
-
- void arm_fir_decimate_f32(
- const arm_fir_decimate_instance_f32 * S,
- float32_t * pSrc,
- float32_t * pDst,
- uint32_t blockSize);
-
-
- /**
- * @brief Initialization function for the floating-point FIR decimator.
- * @param[in,out] *S points to an instance of the floating-point FIR decimator structure.
- * @param[in] numTaps number of coefficients in the filter.
- * @param[in] M decimation factor.
- * @param[in] *pCoeffs points to the filter coefficients.
- * @param[in] *pState points to the state buffer.
- * @param[in] blockSize number of input samples to process per call.
- * @return The function returns ARM_MATH_SUCCESS if initialization is successful or ARM_MATH_LENGTH_ERROR if
- * blockSize is not a multiple of M.
- */
-
- arm_status arm_fir_decimate_init_f32(
- arm_fir_decimate_instance_f32 * S,
- uint16_t numTaps,
- uint8_t M,
- float32_t * pCoeffs,
- float32_t * pState,
- uint32_t blockSize);
-
- /**
- * @brief Processing function for the Q15 FIR decimator.
- * @param[in] *S points to an instance of the Q15 FIR decimator structure.
- * @param[in] *pSrc points to the block of input data.
- * @param[out] *pDst points to the block of output data
- * @param[in] blockSize number of input samples to process per call.
- * @return none
- */
-
- void arm_fir_decimate_q15(
- const arm_fir_decimate_instance_q15 * S,
- q15_t * pSrc,
- q15_t * pDst,
- uint32_t blockSize);
-
- /**
- * @brief Processing function for the Q15 FIR decimator (fast variant) for Cortex-M3 and Cortex-M4.
- * @param[in] *S points to an instance of the Q15 FIR decimator structure.
- * @param[in] *pSrc points to the block of input data.
- * @param[out] *pDst points to the block of output data
- * @param[in] blockSize number of input samples to process per call.
- * @return none
- */
-
- void arm_fir_decimate_fast_q15(
- const arm_fir_decimate_instance_q15 * S,
- q15_t * pSrc,
- q15_t * pDst,
- uint32_t blockSize);
-
-
-
- /**
- * @brief Initialization function for the Q15 FIR decimator.
- * @param[in,out] *S points to an instance of the Q15 FIR decimator structure.
- * @param[in] numTaps number of coefficients in the filter.
- * @param[in] M decimation factor.
- * @param[in] *pCoeffs points to the filter coefficients.
- * @param[in] *pState points to the state buffer.
- * @param[in] blockSize number of input samples to process per call.
- * @return The function returns ARM_MATH_SUCCESS if initialization is successful or ARM_MATH_LENGTH_ERROR if
- * blockSize is not a multiple of M.
- */
-
- arm_status arm_fir_decimate_init_q15(
- arm_fir_decimate_instance_q15 * S,
- uint16_t numTaps,
- uint8_t M,
- q15_t * pCoeffs,
- q15_t * pState,
- uint32_t blockSize);
-
- /**
- * @brief Processing function for the Q31 FIR decimator.
- * @param[in] *S points to an instance of the Q31 FIR decimator structure.
- * @param[in] *pSrc points to the block of input data.
- * @param[out] *pDst points to the block of output data
- * @param[in] blockSize number of input samples to process per call.
- * @return none
- */
-
- void arm_fir_decimate_q31(
- const arm_fir_decimate_instance_q31 * S,
- q31_t * pSrc,
- q31_t * pDst,
- uint32_t blockSize);
-
- /**
- * @brief Processing function for the Q31 FIR decimator (fast variant) for Cortex-M3 and Cortex-M4.
- * @param[in] *S points to an instance of the Q31 FIR decimator structure.
- * @param[in] *pSrc points to the block of input data.
- * @param[out] *pDst points to the block of output data
- * @param[in] blockSize number of input samples to process per call.
- * @return none
- */
-
- void arm_fir_decimate_fast_q31(
- arm_fir_decimate_instance_q31 * S,
- q31_t * pSrc,
- q31_t * pDst,
- uint32_t blockSize);
-
-
- /**
- * @brief Initialization function for the Q31 FIR decimator.
- * @param[in,out] *S points to an instance of the Q31 FIR decimator structure.
- * @param[in] numTaps number of coefficients in the filter.
- * @param[in] M decimation factor.
- * @param[in] *pCoeffs points to the filter coefficients.
- * @param[in] *pState points to the state buffer.
- * @param[in] blockSize number of input samples to process per call.
- * @return The function returns ARM_MATH_SUCCESS if initialization is successful or ARM_MATH_LENGTH_ERROR if
- * blockSize is not a multiple of M.
- */
-
- arm_status arm_fir_decimate_init_q31(
- arm_fir_decimate_instance_q31 * S,
- uint16_t numTaps,
- uint8_t M,
- q31_t * pCoeffs,
- q31_t * pState,
- uint32_t blockSize);
-
-
-
- /**
- * @brief Instance structure for the Q15 FIR interpolator.
- */
-
- typedef struct
- {
- uint8_t L; /**< upsample factor. */
- uint16_t phaseLength; /**< length of each polyphase filter component. */
- q15_t *pCoeffs; /**< points to the coefficient array. The array is of length L*phaseLength. */
- q15_t *pState; /**< points to the state variable array. The array is of length blockSize+phaseLength-1. */
- } arm_fir_interpolate_instance_q15;
-
- /**
- * @brief Instance structure for the Q31 FIR interpolator.
- */
-
- typedef struct
- {
- uint8_t L; /**< upsample factor. */
- uint16_t phaseLength; /**< length of each polyphase filter component. */
- q31_t *pCoeffs; /**< points to the coefficient array. The array is of length L*phaseLength. */
- q31_t *pState; /**< points to the state variable array. The array is of length blockSize+phaseLength-1. */
- } arm_fir_interpolate_instance_q31;
-
- /**
- * @brief Instance structure for the floating-point FIR interpolator.
- */
-
- typedef struct
- {
- uint8_t L; /**< upsample factor. */
- uint16_t phaseLength; /**< length of each polyphase filter component. */
- float32_t *pCoeffs; /**< points to the coefficient array. The array is of length L*phaseLength. */
- float32_t *pState; /**< points to the state variable array. The array is of length phaseLength+numTaps-1. */
- } arm_fir_interpolate_instance_f32;
-
-
- /**
- * @brief Processing function for the Q15 FIR interpolator.
- * @param[in] *S points to an instance of the Q15 FIR interpolator structure.
- * @param[in] *pSrc points to the block of input data.
- * @param[out] *pDst points to the block of output data.
- * @param[in] blockSize number of input samples to process per call.
- * @return none.
- */
-
- void arm_fir_interpolate_q15(
- const arm_fir_interpolate_instance_q15 * S,
- q15_t * pSrc,
- q15_t * pDst,
- uint32_t blockSize);
-
-
- /**
- * @brief Initialization function for the Q15 FIR interpolator.
- * @param[in,out] *S points to an instance of the Q15 FIR interpolator structure.
- * @param[in] L upsample factor.
- * @param[in] numTaps number of filter coefficients in the filter.
- * @param[in] *pCoeffs points to the filter coefficient buffer.
- * @param[in] *pState points to the state buffer.
- * @param[in] blockSize number of input samples to process per call.
- * @return The function returns ARM_MATH_SUCCESS if initialization is successful or ARM_MATH_LENGTH_ERROR if
- * the filter length numTaps is not a multiple of the interpolation factor L.
- */
-
- arm_status arm_fir_interpolate_init_q15(
- arm_fir_interpolate_instance_q15 * S,
- uint8_t L,
- uint16_t numTaps,
- q15_t * pCoeffs,
- q15_t * pState,
- uint32_t blockSize);
-
- /**
- * @brief Processing function for the Q31 FIR interpolator.
- * @param[in] *S points to an instance of the Q15 FIR interpolator structure.
- * @param[in] *pSrc points to the block of input data.
- * @param[out] *pDst points to the block of output data.
- * @param[in] blockSize number of input samples to process per call.
- * @return none.
- */
-
- void arm_fir_interpolate_q31(
- const arm_fir_interpolate_instance_q31 * S,
- q31_t * pSrc,
- q31_t * pDst,
- uint32_t blockSize);
-
- /**
- * @brief Initialization function for the Q31 FIR interpolator.
- * @param[in,out] *S points to an instance of the Q31 FIR interpolator structure.
- * @param[in] L upsample factor.
- * @param[in] numTaps number of filter coefficients in the filter.
- * @param[in] *pCoeffs points to the filter coefficient buffer.
- * @param[in] *pState points to the state buffer.
- * @param[in] blockSize number of input samples to process per call.
- * @return The function returns ARM_MATH_SUCCESS if initialization is successful or ARM_MATH_LENGTH_ERROR if
- * the filter length numTaps is not a multiple of the interpolation factor L.
- */
-
- arm_status arm_fir_interpolate_init_q31(
- arm_fir_interpolate_instance_q31 * S,
- uint8_t L,
- uint16_t numTaps,
- q31_t * pCoeffs,
- q31_t * pState,
- uint32_t blockSize);
-
-
- /**
- * @brief Processing function for the floating-point FIR interpolator.
- * @param[in] *S points to an instance of the floating-point FIR interpolator structure.
- * @param[in] *pSrc points to the block of input data.
- * @param[out] *pDst points to the block of output data.
- * @param[in] blockSize number of input samples to process per call.
- * @return none.
- */
-
- void arm_fir_interpolate_f32(
- const arm_fir_interpolate_instance_f32 * S,
- float32_t * pSrc,
- float32_t * pDst,
- uint32_t blockSize);
-
- /**
- * @brief Initialization function for the floating-point FIR interpolator.
- * @param[in,out] *S points to an instance of the floating-point FIR interpolator structure.
- * @param[in] L upsample factor.
- * @param[in] numTaps number of filter coefficients in the filter.
- * @param[in] *pCoeffs points to the filter coefficient buffer.
- * @param[in] *pState points to the state buffer.
- * @param[in] blockSize number of input samples to process per call.
- * @return The function returns ARM_MATH_SUCCESS if initialization is successful or ARM_MATH_LENGTH_ERROR if
- * the filter length numTaps is not a multiple of the interpolation factor L.
- */
-
- arm_status arm_fir_interpolate_init_f32(
- arm_fir_interpolate_instance_f32 * S,
- uint8_t L,
- uint16_t numTaps,
- float32_t * pCoeffs,
- float32_t * pState,
- uint32_t blockSize);
-
- /**
- * @brief Instance structure for the high precision Q31 Biquad cascade filter.
- */
-
- typedef struct
- {
- uint8_t numStages; /**< number of 2nd order stages in the filter. Overall order is 2*numStages. */
- q63_t *pState; /**< points to the array of state coefficients. The array is of length 4*numStages. */
- q31_t *pCoeffs; /**< points to the array of coefficients. The array is of length 5*numStages. */
- uint8_t postShift; /**< additional shift, in bits, applied to each output sample. */
-
- } arm_biquad_cas_df1_32x64_ins_q31;
-
-
- /**
- * @param[in] *S points to an instance of the high precision Q31 Biquad cascade filter structure.
- * @param[in] *pSrc points to the block of input data.
- * @param[out] *pDst points to the block of output data
- * @param[in] blockSize number of samples to process.
- * @return none.
- */
-
- void arm_biquad_cas_df1_32x64_q31(
- const arm_biquad_cas_df1_32x64_ins_q31 * S,
- q31_t * pSrc,
- q31_t * pDst,
- uint32_t blockSize);
-
-
- /**
- * @param[in,out] *S points to an instance of the high precision Q31 Biquad cascade filter structure.
- * @param[in] numStages number of 2nd order stages in the filter.
- * @param[in] *pCoeffs points to the filter coefficients.
- * @param[in] *pState points to the state buffer.
- * @param[in] postShift shift to be applied to the output. Varies according to the coefficients format
- * @return none
- */
-
- void arm_biquad_cas_df1_32x64_init_q31(
- arm_biquad_cas_df1_32x64_ins_q31 * S,
- uint8_t numStages,
- q31_t * pCoeffs,
- q63_t * pState,
- uint8_t postShift);
-
-
-
- /**
- * @brief Instance structure for the floating-point transposed direct form II Biquad cascade filter.
- */
-
- typedef struct
- {
- uint8_t numStages; /**< number of 2nd order stages in the filter. Overall order is 2*numStages. */
- float32_t *pState; /**< points to the array of state coefficients. The array is of length 2*numStages. */
- float32_t *pCoeffs; /**< points to the array of coefficients. The array is of length 5*numStages. */
- } arm_biquad_cascade_df2T_instance_f32;
-
-
-
- /**
- * @brief Instance structure for the floating-point transposed direct form II Biquad cascade filter.
- */
-
- typedef struct
- {
- uint8_t numStages; /**< number of 2nd order stages in the filter. Overall order is 2*numStages. */
- float32_t *pState; /**< points to the array of state coefficients. The array is of length 4*numStages. */
- float32_t *pCoeffs; /**< points to the array of coefficients. The array is of length 5*numStages. */
- } arm_biquad_cascade_stereo_df2T_instance_f32;
-
-
-
- /**
- * @brief Instance structure for the floating-point transposed direct form II Biquad cascade filter.
- */
-
- typedef struct
- {
- uint8_t numStages; /**< number of 2nd order stages in the filter. Overall order is 2*numStages. */
- float64_t *pState; /**< points to the array of state coefficients. The array is of length 2*numStages. */
- float64_t *pCoeffs; /**< points to the array of coefficients. The array is of length 5*numStages. */
- } arm_biquad_cascade_df2T_instance_f64;
-
-
- /**
- * @brief Processing function for the floating-point transposed direct form II Biquad cascade filter.
- * @param[in] *S points to an instance of the filter data structure.
- * @param[in] *pSrc points to the block of input data.
- * @param[out] *pDst points to the block of output data
- * @param[in] blockSize number of samples to process.
- * @return none.
- */
-
- void arm_biquad_cascade_df2T_f32(
- const arm_biquad_cascade_df2T_instance_f32 * S,
- float32_t * pSrc,
- float32_t * pDst,
- uint32_t blockSize);
-
-
- /**
- * @brief Processing function for the floating-point transposed direct form II Biquad cascade filter. 2 channels
- * @param[in] *S points to an instance of the filter data structure.
- * @param[in] *pSrc points to the block of input data.
- * @param[out] *pDst points to the block of output data
- * @param[in] blockSize number of samples to process.
- * @return none.
- */
-
- void arm_biquad_cascade_stereo_df2T_f32(
- const arm_biquad_cascade_stereo_df2T_instance_f32 * S,
- float32_t * pSrc,
- float32_t * pDst,
- uint32_t blockSize);
-
- /**
- * @brief Processing function for the floating-point transposed direct form II Biquad cascade filter.
- * @param[in] *S points to an instance of the filter data structure.
- * @param[in] *pSrc points to the block of input data.
- * @param[out] *pDst points to the block of output data
- * @param[in] blockSize number of samples to process.
- * @return none.
- */
-
- void arm_biquad_cascade_df2T_f64(
- const arm_biquad_cascade_df2T_instance_f64 * S,
- float64_t * pSrc,
- float64_t * pDst,
- uint32_t blockSize);
-
-
- /**
- * @brief Initialization function for the floating-point transposed direct form II Biquad cascade filter.
- * @param[in,out] *S points to an instance of the filter data structure.
- * @param[in] numStages number of 2nd order stages in the filter.
- * @param[in] *pCoeffs points to the filter coefficients.
- * @param[in] *pState points to the state buffer.
- * @return none
- */
-
- void arm_biquad_cascade_df2T_init_f32(
- arm_biquad_cascade_df2T_instance_f32 * S,
- uint8_t numStages,
- float32_t * pCoeffs,
- float32_t * pState);
-
-
- /**
- * @brief Initialization function for the floating-point transposed direct form II Biquad cascade filter.
- * @param[in,out] *S points to an instance of the filter data structure.
- * @param[in] numStages number of 2nd order stages in the filter.
- * @param[in] *pCoeffs points to the filter coefficients.
- * @param[in] *pState points to the state buffer.
- * @return none
- */
-
- void arm_biquad_cascade_stereo_df2T_init_f32(
- arm_biquad_cascade_stereo_df2T_instance_f32 * S,
- uint8_t numStages,
- float32_t * pCoeffs,
- float32_t * pState);
-
-
- /**
- * @brief Initialization function for the floating-point transposed direct form II Biquad cascade filter.
- * @param[in,out] *S points to an instance of the filter data structure.
- * @param[in] numStages number of 2nd order stages in the filter.
- * @param[in] *pCoeffs points to the filter coefficients.
- * @param[in] *pState points to the state buffer.
- * @return none
- */
-
- void arm_biquad_cascade_df2T_init_f64(
- arm_biquad_cascade_df2T_instance_f64 * S,
- uint8_t numStages,
- float64_t * pCoeffs,
- float64_t * pState);
-
-
-
- /**
- * @brief Instance structure for the Q15 FIR lattice filter.
- */
-
- typedef struct
- {
- uint16_t numStages; /**< number of filter stages. */
- q15_t *pState; /**< points to the state variable array. The array is of length numStages. */
- q15_t *pCoeffs; /**< points to the coefficient array. The array is of length numStages. */
- } arm_fir_lattice_instance_q15;
-
- /**
- * @brief Instance structure for the Q31 FIR lattice filter.
- */
-
- typedef struct
- {
- uint16_t numStages; /**< number of filter stages. */
- q31_t *pState; /**< points to the state variable array. The array is of length numStages. */
- q31_t *pCoeffs; /**< points to the coefficient array. The array is of length numStages. */
- } arm_fir_lattice_instance_q31;
-
- /**
- * @brief Instance structure for the floating-point FIR lattice filter.
- */
-
- typedef struct
- {
- uint16_t numStages; /**< number of filter stages. */
- float32_t *pState; /**< points to the state variable array. The array is of length numStages. */
- float32_t *pCoeffs; /**< points to the coefficient array. The array is of length numStages. */
- } arm_fir_lattice_instance_f32;
-
- /**
- * @brief Initialization function for the Q15 FIR lattice filter.
- * @param[in] *S points to an instance of the Q15 FIR lattice structure.
- * @param[in] numStages number of filter stages.
- * @param[in] *pCoeffs points to the coefficient buffer. The array is of length numStages.
- * @param[in] *pState points to the state buffer. The array is of length numStages.
- * @return none.
- */
-
- void arm_fir_lattice_init_q15(
- arm_fir_lattice_instance_q15 * S,
- uint16_t numStages,
- q15_t * pCoeffs,
- q15_t * pState);
-
-
- /**
- * @brief Processing function for the Q15 FIR lattice filter.
- * @param[in] *S points to an instance of the Q15 FIR lattice structure.
- * @param[in] *pSrc points to the block of input data.
- * @param[out] *pDst points to the block of output data.
- * @param[in] blockSize number of samples to process.
- * @return none.
- */
- void arm_fir_lattice_q15(
- const arm_fir_lattice_instance_q15 * S,
- q15_t * pSrc,
- q15_t * pDst,
- uint32_t blockSize);
-
- /**
- * @brief Initialization function for the Q31 FIR lattice filter.
- * @param[in] *S points to an instance of the Q31 FIR lattice structure.
- * @param[in] numStages number of filter stages.
- * @param[in] *pCoeffs points to the coefficient buffer. The array is of length numStages.
- * @param[in] *pState points to the state buffer. The array is of length numStages.
- * @return none.
- */
-
- void arm_fir_lattice_init_q31(
- arm_fir_lattice_instance_q31 * S,
- uint16_t numStages,
- q31_t * pCoeffs,
- q31_t * pState);
-
-
- /**
- * @brief Processing function for the Q31 FIR lattice filter.
- * @param[in] *S points to an instance of the Q31 FIR lattice structure.
- * @param[in] *pSrc points to the block of input data.
- * @param[out] *pDst points to the block of output data
- * @param[in] blockSize number of samples to process.
- * @return none.
- */
-
- void arm_fir_lattice_q31(
- const arm_fir_lattice_instance_q31 * S,
- q31_t * pSrc,
- q31_t * pDst,
- uint32_t blockSize);
-
-/**
- * @brief Initialization function for the floating-point FIR lattice filter.
- * @param[in] *S points to an instance of the floating-point FIR lattice structure.
- * @param[in] numStages number of filter stages.
- * @param[in] *pCoeffs points to the coefficient buffer. The array is of length numStages.
- * @param[in] *pState points to the state buffer. The array is of length numStages.
- * @return none.
- */
-
- void arm_fir_lattice_init_f32(
- arm_fir_lattice_instance_f32 * S,
- uint16_t numStages,
- float32_t * pCoeffs,
- float32_t * pState);
-
- /**
- * @brief Processing function for the floating-point FIR lattice filter.
- * @param[in] *S points to an instance of the floating-point FIR lattice structure.
- * @param[in] *pSrc points to the block of input data.
- * @param[out] *pDst points to the block of output data
- * @param[in] blockSize number of samples to process.
- * @return none.
- */
-
- void arm_fir_lattice_f32(
- const arm_fir_lattice_instance_f32 * S,
- float32_t * pSrc,
- float32_t * pDst,
- uint32_t blockSize);
-
- /**
- * @brief Instance structure for the Q15 IIR lattice filter.
- */
- typedef struct
- {
- uint16_t numStages; /**< number of stages in the filter. */
- q15_t *pState; /**< points to the state variable array. The array is of length numStages+blockSize. */
- q15_t *pkCoeffs; /**< points to the reflection coefficient array. The array is of length numStages. */
- q15_t *pvCoeffs; /**< points to the ladder coefficient array. The array is of length numStages+1. */
- } arm_iir_lattice_instance_q15;
-
- /**
- * @brief Instance structure for the Q31 IIR lattice filter.
- */
- typedef struct
- {
- uint16_t numStages; /**< number of stages in the filter. */
- q31_t *pState; /**< points to the state variable array. The array is of length numStages+blockSize. */
- q31_t *pkCoeffs; /**< points to the reflection coefficient array. The array is of length numStages. */
- q31_t *pvCoeffs; /**< points to the ladder coefficient array. The array is of length numStages+1. */
- } arm_iir_lattice_instance_q31;
-
- /**
- * @brief Instance structure for the floating-point IIR lattice filter.
- */
- typedef struct
- {
- uint16_t numStages; /**< number of stages in the filter. */
- float32_t *pState; /**< points to the state variable array. The array is of length numStages+blockSize. */
- float32_t *pkCoeffs; /**< points to the reflection coefficient array. The array is of length numStages. */
- float32_t *pvCoeffs; /**< points to the ladder coefficient array. The array is of length numStages+1. */
- } arm_iir_lattice_instance_f32;
-
- /**
- * @brief Processing function for the floating-point IIR lattice filter.
- * @param[in] *S points to an instance of the floating-point IIR lattice structure.
- * @param[in] *pSrc points to the block of input data.
- * @param[out] *pDst points to the block of output data.
- * @param[in] blockSize number of samples to process.
- * @return none.
- */
-
- void arm_iir_lattice_f32(
- const arm_iir_lattice_instance_f32 * S,
- float32_t * pSrc,
- float32_t * pDst,
- uint32_t blockSize);
-
- /**
- * @brief Initialization function for the floating-point IIR lattice filter.
- * @param[in] *S points to an instance of the floating-point IIR lattice structure.
- * @param[in] numStages number of stages in the filter.
- * @param[in] *pkCoeffs points to the reflection coefficient buffer. The array is of length numStages.
- * @param[in] *pvCoeffs points to the ladder coefficient buffer. The array is of length numStages+1.
- * @param[in] *pState points to the state buffer. The array is of length numStages+blockSize-1.
- * @param[in] blockSize number of samples to process.
- * @return none.
- */
-
- void arm_iir_lattice_init_f32(
- arm_iir_lattice_instance_f32 * S,
- uint16_t numStages,
- float32_t * pkCoeffs,
- float32_t * pvCoeffs,
- float32_t * pState,
- uint32_t blockSize);
-
-
- /**
- * @brief Processing function for the Q31 IIR lattice filter.
- * @param[in] *S points to an instance of the Q31 IIR lattice structure.
- * @param[in] *pSrc points to the block of input data.
- * @param[out] *pDst points to the block of output data.
- * @param[in] blockSize number of samples to process.
- * @return none.
- */
-
- void arm_iir_lattice_q31(
- const arm_iir_lattice_instance_q31 * S,
- q31_t * pSrc,
- q31_t * pDst,
- uint32_t blockSize);
-
-
- /**
- * @brief Initialization function for the Q31 IIR lattice filter.
- * @param[in] *S points to an instance of the Q31 IIR lattice structure.
- * @param[in] numStages number of stages in the filter.
- * @param[in] *pkCoeffs points to the reflection coefficient buffer. The array is of length numStages.
- * @param[in] *pvCoeffs points to the ladder coefficient buffer. The array is of length numStages+1.
- * @param[in] *pState points to the state buffer. The array is of length numStages+blockSize.
- * @param[in] blockSize number of samples to process.
- * @return none.
- */
-
- void arm_iir_lattice_init_q31(
- arm_iir_lattice_instance_q31 * S,
- uint16_t numStages,
- q31_t * pkCoeffs,
- q31_t * pvCoeffs,
- q31_t * pState,
- uint32_t blockSize);
-
-
- /**
- * @brief Processing function for the Q15 IIR lattice filter.
- * @param[in] *S points to an instance of the Q15 IIR lattice structure.
- * @param[in] *pSrc points to the block of input data.
- * @param[out] *pDst points to the block of output data.
- * @param[in] blockSize number of samples to process.
- * @return none.
- */
-
- void arm_iir_lattice_q15(
- const arm_iir_lattice_instance_q15 * S,
- q15_t * pSrc,
- q15_t * pDst,
- uint32_t blockSize);
-
-
-/**
- * @brief Initialization function for the Q15 IIR lattice filter.
- * @param[in] *S points to an instance of the fixed-point Q15 IIR lattice structure.
- * @param[in] numStages number of stages in the filter.
- * @param[in] *pkCoeffs points to reflection coefficient buffer. The array is of length numStages.
- * @param[in] *pvCoeffs points to ladder coefficient buffer. The array is of length numStages+1.
- * @param[in] *pState points to state buffer. The array is of length numStages+blockSize.
- * @param[in] blockSize number of samples to process per call.
- * @return none.
- */
-
- void arm_iir_lattice_init_q15(
- arm_iir_lattice_instance_q15 * S,
- uint16_t numStages,
- q15_t * pkCoeffs,
- q15_t * pvCoeffs,
- q15_t * pState,
- uint32_t blockSize);
-
- /**
- * @brief Instance structure for the floating-point LMS filter.
- */
-
- typedef struct
- {
- uint16_t numTaps; /**< number of coefficients in the filter. */
- float32_t *pState; /**< points to the state variable array. The array is of length numTaps+blockSize-1. */
- float32_t *pCoeffs; /**< points to the coefficient array. The array is of length numTaps. */
- float32_t mu; /**< step size that controls filter coefficient updates. */
- } arm_lms_instance_f32;
-
- /**
- * @brief Processing function for floating-point LMS filter.
- * @param[in] *S points to an instance of the floating-point LMS filter structure.
- * @param[in] *pSrc points to the block of input data.
- * @param[in] *pRef points to the block of reference data.
- * @param[out] *pOut points to the block of output data.
- * @param[out] *pErr points to the block of error data.
- * @param[in] blockSize number of samples to process.
- * @return none.
- */
-
- void arm_lms_f32(
- const arm_lms_instance_f32 * S,
- float32_t * pSrc,
- float32_t * pRef,
- float32_t * pOut,
- float32_t * pErr,
- uint32_t blockSize);
-
- /**
- * @brief Initialization function for floating-point LMS filter.
- * @param[in] *S points to an instance of the floating-point LMS filter structure.
- * @param[in] numTaps number of filter coefficients.
- * @param[in] *pCoeffs points to the coefficient buffer.
- * @param[in] *pState points to state buffer.
- * @param[in] mu step size that controls filter coefficient updates.
- * @param[in] blockSize number of samples to process.
- * @return none.
- */
-
- void arm_lms_init_f32(
- arm_lms_instance_f32 * S,
- uint16_t numTaps,
- float32_t * pCoeffs,
- float32_t * pState,
- float32_t mu,
- uint32_t blockSize);
-
- /**
- * @brief Instance structure for the Q15 LMS filter.
- */
-
- typedef struct
- {
- uint16_t numTaps; /**< number of coefficients in the filter. */
- q15_t *pState; /**< points to the state variable array. The array is of length numTaps+blockSize-1. */
- q15_t *pCoeffs; /**< points to the coefficient array. The array is of length numTaps. */
- q15_t mu; /**< step size that controls filter coefficient updates. */
- uint32_t postShift; /**< bit shift applied to coefficients. */
- } arm_lms_instance_q15;
-
-
- /**
- * @brief Initialization function for the Q15 LMS filter.
- * @param[in] *S points to an instance of the Q15 LMS filter structure.
- * @param[in] numTaps number of filter coefficients.
- * @param[in] *pCoeffs points to the coefficient buffer.
- * @param[in] *pState points to the state buffer.
- * @param[in] mu step size that controls filter coefficient updates.
- * @param[in] blockSize number of samples to process.
- * @param[in] postShift bit shift applied to coefficients.
- * @return none.
- */
-
- void arm_lms_init_q15(
- arm_lms_instance_q15 * S,
- uint16_t numTaps,
- q15_t * pCoeffs,
- q15_t * pState,
- q15_t mu,
- uint32_t blockSize,
- uint32_t postShift);
-
- /**
- * @brief Processing function for Q15 LMS filter.
- * @param[in] *S points to an instance of the Q15 LMS filter structure.
- * @param[in] *pSrc points to the block of input data.
- * @param[in] *pRef points to the block of reference data.
- * @param[out] *pOut points to the block of output data.
- * @param[out] *pErr points to the block of error data.
- * @param[in] blockSize number of samples to process.
- * @return none.
- */
-
- void arm_lms_q15(
- const arm_lms_instance_q15 * S,
- q15_t * pSrc,
- q15_t * pRef,
- q15_t * pOut,
- q15_t * pErr,
- uint32_t blockSize);
-
-
- /**
- * @brief Instance structure for the Q31 LMS filter.
- */
-
- typedef struct
- {
- uint16_t numTaps; /**< number of coefficients in the filter. */
- q31_t *pState; /**< points to the state variable array. The array is of length numTaps+blockSize-1. */
- q31_t *pCoeffs; /**< points to the coefficient array. The array is of length numTaps. */
- q31_t mu; /**< step size that controls filter coefficient updates. */
- uint32_t postShift; /**< bit shift applied to coefficients. */
-
- } arm_lms_instance_q31;
-
- /**
- * @brief Processing function for Q31 LMS filter.
- * @param[in] *S points to an instance of the Q15 LMS filter structure.
- * @param[in] *pSrc points to the block of input data.
- * @param[in] *pRef points to the block of reference data.
- * @param[out] *pOut points to the block of output data.
- * @param[out] *pErr points to the block of error data.
- * @param[in] blockSize number of samples to process.
- * @return none.
- */
-
- void arm_lms_q31(
- const arm_lms_instance_q31 * S,
- q31_t * pSrc,
- q31_t * pRef,
- q31_t * pOut,
- q31_t * pErr,
- uint32_t blockSize);
-
- /**
- * @brief Initialization function for Q31 LMS filter.
- * @param[in] *S points to an instance of the Q31 LMS filter structure.
- * @param[in] numTaps number of filter coefficients.
- * @param[in] *pCoeffs points to coefficient buffer.
- * @param[in] *pState points to state buffer.
- * @param[in] mu step size that controls filter coefficient updates.
- * @param[in] blockSize number of samples to process.
- * @param[in] postShift bit shift applied to coefficients.
- * @return none.
- */
-
- void arm_lms_init_q31(
- arm_lms_instance_q31 * S,
- uint16_t numTaps,
- q31_t * pCoeffs,
- q31_t * pState,
- q31_t mu,
- uint32_t blockSize,
- uint32_t postShift);
-
- /**
- * @brief Instance structure for the floating-point normalized LMS filter.
- */
-
- typedef struct
- {
- uint16_t numTaps; /**< number of coefficients in the filter. */
- float32_t *pState; /**< points to the state variable array. The array is of length numTaps+blockSize-1. */
- float32_t *pCoeffs; /**< points to the coefficient array. The array is of length numTaps. */
- float32_t mu; /**< step size that control filter coefficient updates. */
- float32_t energy; /**< saves previous frame energy. */
- float32_t x0; /**< saves previous input sample. */
- } arm_lms_norm_instance_f32;
-
- /**
- * @brief Processing function for floating-point normalized LMS filter.
- * @param[in] *S points to an instance of the floating-point normalized LMS filter structure.
- * @param[in] *pSrc points to the block of input data.
- * @param[in] *pRef points to the block of reference data.
- * @param[out] *pOut points to the block of output data.
- * @param[out] *pErr points to the block of error data.
- * @param[in] blockSize number of samples to process.
- * @return none.
- */
-
- void arm_lms_norm_f32(
- arm_lms_norm_instance_f32 * S,
- float32_t * pSrc,
- float32_t * pRef,
- float32_t * pOut,
- float32_t * pErr,
- uint32_t blockSize);
-
- /**
- * @brief Initialization function for floating-point normalized LMS filter.
- * @param[in] *S points to an instance of the floating-point LMS filter structure.
- * @param[in] numTaps number of filter coefficients.
- * @param[in] *pCoeffs points to coefficient buffer.
- * @param[in] *pState points to state buffer.
- * @param[in] mu step size that controls filter coefficient updates.
- * @param[in] blockSize number of samples to process.
- * @return none.
- */
-
- void arm_lms_norm_init_f32(
- arm_lms_norm_instance_f32 * S,
- uint16_t numTaps,
- float32_t * pCoeffs,
- float32_t * pState,
- float32_t mu,
- uint32_t blockSize);
-
-
- /**
- * @brief Instance structure for the Q31 normalized LMS filter.
- */
- typedef struct
- {
- uint16_t numTaps; /**< number of coefficients in the filter. */
- q31_t *pState; /**< points to the state variable array. The array is of length numTaps+blockSize-1. */
- q31_t *pCoeffs; /**< points to the coefficient array. The array is of length numTaps. */
- q31_t mu; /**< step size that controls filter coefficient updates. */
- uint8_t postShift; /**< bit shift applied to coefficients. */
- q31_t *recipTable; /**< points to the reciprocal initial value table. */
- q31_t energy; /**< saves previous frame energy. */
- q31_t x0; /**< saves previous input sample. */
- } arm_lms_norm_instance_q31;
-
- /**
- * @brief Processing function for Q31 normalized LMS filter.
- * @param[in] *S points to an instance of the Q31 normalized LMS filter structure.
- * @param[in] *pSrc points to the block of input data.
- * @param[in] *pRef points to the block of reference data.
- * @param[out] *pOut points to the block of output data.
- * @param[out] *pErr points to the block of error data.
- * @param[in] blockSize number of samples to process.
- * @return none.
- */
-
- void arm_lms_norm_q31(
- arm_lms_norm_instance_q31 * S,
- q31_t * pSrc,
- q31_t * pRef,
- q31_t * pOut,
- q31_t * pErr,
- uint32_t blockSize);
-
- /**
- * @brief Initialization function for Q31 normalized LMS filter.
- * @param[in] *S points to an instance of the Q31 normalized LMS filter structure.
- * @param[in] numTaps number of filter coefficients.
- * @param[in] *pCoeffs points to coefficient buffer.
- * @param[in] *pState points to state buffer.
- * @param[in] mu step size that controls filter coefficient updates.
- * @param[in] blockSize number of samples to process.
- * @param[in] postShift bit shift applied to coefficients.
- * @return none.
- */
-
- void arm_lms_norm_init_q31(
- arm_lms_norm_instance_q31 * S,
- uint16_t numTaps,
- q31_t * pCoeffs,
- q31_t * pState,
- q31_t mu,
- uint32_t blockSize,
- uint8_t postShift);
-
- /**
- * @brief Instance structure for the Q15 normalized LMS filter.
- */
-
- typedef struct
- {
- uint16_t numTaps; /**< Number of coefficients in the filter. */
- q15_t *pState; /**< points to the state variable array. The array is of length numTaps+blockSize-1. */
- q15_t *pCoeffs; /**< points to the coefficient array. The array is of length numTaps. */
- q15_t mu; /**< step size that controls filter coefficient updates. */
- uint8_t postShift; /**< bit shift applied to coefficients. */
- q15_t *recipTable; /**< Points to the reciprocal initial value table. */
- q15_t energy; /**< saves previous frame energy. */
- q15_t x0; /**< saves previous input sample. */
- } arm_lms_norm_instance_q15;
-
- /**
- * @brief Processing function for Q15 normalized LMS filter.
- * @param[in] *S points to an instance of the Q15 normalized LMS filter structure.
- * @param[in] *pSrc points to the block of input data.
- * @param[in] *pRef points to the block of reference data.
- * @param[out] *pOut points to the block of output data.
- * @param[out] *pErr points to the block of error data.
- * @param[in] blockSize number of samples to process.
- * @return none.
- */
-
- void arm_lms_norm_q15(
- arm_lms_norm_instance_q15 * S,
- q15_t * pSrc,
- q15_t * pRef,
- q15_t * pOut,
- q15_t * pErr,
- uint32_t blockSize);
-
-
- /**
- * @brief Initialization function for Q15 normalized LMS filter.
- * @param[in] *S points to an instance of the Q15 normalized LMS filter structure.
- * @param[in] numTaps number of filter coefficients.
- * @param[in] *pCoeffs points to coefficient buffer.
- * @param[in] *pState points to state buffer.
- * @param[in] mu step size that controls filter coefficient updates.
- * @param[in] blockSize number of samples to process.
- * @param[in] postShift bit shift applied to coefficients.
- * @return none.
- */
-
- void arm_lms_norm_init_q15(
- arm_lms_norm_instance_q15 * S,
- uint16_t numTaps,
- q15_t * pCoeffs,
- q15_t * pState,
- q15_t mu,
- uint32_t blockSize,
- uint8_t postShift);
-
- /**
- * @brief Correlation of floating-point sequences.
- * @param[in] *pSrcA points to the first input sequence.
- * @param[in] srcALen length of the first input sequence.
- * @param[in] *pSrcB points to the second input sequence.
- * @param[in] srcBLen length of the second input sequence.
- * @param[out] *pDst points to the block of output data Length 2 * max(srcALen, srcBLen) - 1.
- * @return none.
- */
-
- void arm_correlate_f32(
- float32_t * pSrcA,
- uint32_t srcALen,
- float32_t * pSrcB,
- uint32_t srcBLen,
- float32_t * pDst);
-
-
- /**
- * @brief Correlation of Q15 sequences
- * @param[in] *pSrcA points to the first input sequence.
- * @param[in] srcALen length of the first input sequence.
- * @param[in] *pSrcB points to the second input sequence.
- * @param[in] srcBLen length of the second input sequence.
- * @param[out] *pDst points to the block of output data Length 2 * max(srcALen, srcBLen) - 1.
- * @param[in] *pScratch points to scratch buffer of size max(srcALen, srcBLen) + 2*min(srcALen, srcBLen) - 2.
- * @return none.
- */
- void arm_correlate_opt_q15(
- q15_t * pSrcA,
- uint32_t srcALen,
- q15_t * pSrcB,
- uint32_t srcBLen,
- q15_t * pDst,
- q15_t * pScratch);
-
-
- /**
- * @brief Correlation of Q15 sequences.
- * @param[in] *pSrcA points to the first input sequence.
- * @param[in] srcALen length of the first input sequence.
- * @param[in] *pSrcB points to the second input sequence.
- * @param[in] srcBLen length of the second input sequence.
- * @param[out] *pDst points to the block of output data Length 2 * max(srcALen, srcBLen) - 1.
- * @return none.
- */
-
- void arm_correlate_q15(
- q15_t * pSrcA,
- uint32_t srcALen,
- q15_t * pSrcB,
- uint32_t srcBLen,
- q15_t * pDst);
-
- /**
- * @brief Correlation of Q15 sequences (fast version) for Cortex-M3 and Cortex-M4.
- * @param[in] *pSrcA points to the first input sequence.
- * @param[in] srcALen length of the first input sequence.
- * @param[in] *pSrcB points to the second input sequence.
- * @param[in] srcBLen length of the second input sequence.
- * @param[out] *pDst points to the block of output data Length 2 * max(srcALen, srcBLen) - 1.
- * @return none.
- */
-
- void arm_correlate_fast_q15(
- q15_t * pSrcA,
- uint32_t srcALen,
- q15_t * pSrcB,
- uint32_t srcBLen,
- q15_t * pDst);
-
-
-
- /**
- * @brief Correlation of Q15 sequences (fast version) for Cortex-M3 and Cortex-M4.
- * @param[in] *pSrcA points to the first input sequence.
- * @param[in] srcALen length of the first input sequence.
- * @param[in] *pSrcB points to the second input sequence.
- * @param[in] srcBLen length of the second input sequence.
- * @param[out] *pDst points to the block of output data Length 2 * max(srcALen, srcBLen) - 1.
- * @param[in] *pScratch points to scratch buffer of size max(srcALen, srcBLen) + 2*min(srcALen, srcBLen) - 2.
- * @return none.
- */
-
- void arm_correlate_fast_opt_q15(
- q15_t * pSrcA,
- uint32_t srcALen,
- q15_t * pSrcB,
- uint32_t srcBLen,
- q15_t * pDst,
- q15_t * pScratch);
-
- /**
- * @brief Correlation of Q31 sequences.
- * @param[in] *pSrcA points to the first input sequence.
- * @param[in] srcALen length of the first input sequence.
- * @param[in] *pSrcB points to the second input sequence.
- * @param[in] srcBLen length of the second input sequence.
- * @param[out] *pDst points to the block of output data Length 2 * max(srcALen, srcBLen) - 1.
- * @return none.
- */
-
- void arm_correlate_q31(
- q31_t * pSrcA,
- uint32_t srcALen,
- q31_t * pSrcB,
- uint32_t srcBLen,
- q31_t * pDst);
-
- /**
- * @brief Correlation of Q31 sequences (fast version) for Cortex-M3 and Cortex-M4
- * @param[in] *pSrcA points to the first input sequence.
- * @param[in] srcALen length of the first input sequence.
- * @param[in] *pSrcB points to the second input sequence.
- * @param[in] srcBLen length of the second input sequence.
- * @param[out] *pDst points to the block of output data Length 2 * max(srcALen, srcBLen) - 1.
- * @return none.
- */
-
- void arm_correlate_fast_q31(
- q31_t * pSrcA,
- uint32_t srcALen,
- q31_t * pSrcB,
- uint32_t srcBLen,
- q31_t * pDst);
-
-
-
- /**
- * @brief Correlation of Q7 sequences.
- * @param[in] *pSrcA points to the first input sequence.
- * @param[in] srcALen length of the first input sequence.
- * @param[in] *pSrcB points to the second input sequence.
- * @param[in] srcBLen length of the second input sequence.
- * @param[out] *pDst points to the block of output data Length 2 * max(srcALen, srcBLen) - 1.
- * @param[in] *pScratch1 points to scratch buffer(of type q15_t) of size max(srcALen, srcBLen) + 2*min(srcALen, srcBLen) - 2.
- * @param[in] *pScratch2 points to scratch buffer (of type q15_t) of size min(srcALen, srcBLen).
- * @return none.
- */
-
- void arm_correlate_opt_q7(
- q7_t * pSrcA,
- uint32_t srcALen,
- q7_t * pSrcB,
- uint32_t srcBLen,
- q7_t * pDst,
- q15_t * pScratch1,
- q15_t * pScratch2);
-
-
- /**
- * @brief Correlation of Q7 sequences.
- * @param[in] *pSrcA points to the first input sequence.
- * @param[in] srcALen length of the first input sequence.
- * @param[in] *pSrcB points to the second input sequence.
- * @param[in] srcBLen length of the second input sequence.
- * @param[out] *pDst points to the block of output data Length 2 * max(srcALen, srcBLen) - 1.
- * @return none.
- */
-
- void arm_correlate_q7(
- q7_t * pSrcA,
- uint32_t srcALen,
- q7_t * pSrcB,
- uint32_t srcBLen,
- q7_t * pDst);
-
-
- /**
- * @brief Instance structure for the floating-point sparse FIR filter.
- */
- typedef struct
- {
- uint16_t numTaps; /**< number of coefficients in the filter. */
- uint16_t stateIndex; /**< state buffer index. Points to the oldest sample in the state buffer. */
- float32_t *pState; /**< points to the state buffer array. The array is of length maxDelay+blockSize-1. */
- float32_t *pCoeffs; /**< points to the coefficient array. The array is of length numTaps.*/
- uint16_t maxDelay; /**< maximum offset specified by the pTapDelay array. */
- int32_t *pTapDelay; /**< points to the array of delay values. The array is of length numTaps. */
- } arm_fir_sparse_instance_f32;
-
- /**
- * @brief Instance structure for the Q31 sparse FIR filter.
- */
-
- typedef struct
- {
- uint16_t numTaps; /**< number of coefficients in the filter. */
- uint16_t stateIndex; /**< state buffer index. Points to the oldest sample in the state buffer. */
- q31_t *pState; /**< points to the state buffer array. The array is of length maxDelay+blockSize-1. */
- q31_t *pCoeffs; /**< points to the coefficient array. The array is of length numTaps.*/
- uint16_t maxDelay; /**< maximum offset specified by the pTapDelay array. */
- int32_t *pTapDelay; /**< points to the array of delay values. The array is of length numTaps. */
- } arm_fir_sparse_instance_q31;
-
- /**
- * @brief Instance structure for the Q15 sparse FIR filter.
- */
-
- typedef struct
- {
- uint16_t numTaps; /**< number of coefficients in the filter. */
- uint16_t stateIndex; /**< state buffer index. Points to the oldest sample in the state buffer. */
- q15_t *pState; /**< points to the state buffer array. The array is of length maxDelay+blockSize-1. */
- q15_t *pCoeffs; /**< points to the coefficient array. The array is of length numTaps.*/
- uint16_t maxDelay; /**< maximum offset specified by the pTapDelay array. */
- int32_t *pTapDelay; /**< points to the array of delay values. The array is of length numTaps. */
- } arm_fir_sparse_instance_q15;
-
- /**
- * @brief Instance structure for the Q7 sparse FIR filter.
- */
-
- typedef struct
- {
- uint16_t numTaps; /**< number of coefficients in the filter. */
- uint16_t stateIndex; /**< state buffer index. Points to the oldest sample in the state buffer. */
- q7_t *pState; /**< points to the state buffer array. The array is of length maxDelay+blockSize-1. */
- q7_t *pCoeffs; /**< points to the coefficient array. The array is of length numTaps.*/
- uint16_t maxDelay; /**< maximum offset specified by the pTapDelay array. */
- int32_t *pTapDelay; /**< points to the array of delay values. The array is of length numTaps. */
- } arm_fir_sparse_instance_q7;
-
- /**
- * @brief Processing function for the floating-point sparse FIR filter.
- * @param[in] *S points to an instance of the floating-point sparse FIR structure.
- * @param[in] *pSrc points to the block of input data.
- * @param[out] *pDst points to the block of output data
- * @param[in] *pScratchIn points to a temporary buffer of size blockSize.
- * @param[in] blockSize number of input samples to process per call.
- * @return none.
- */
-
- void arm_fir_sparse_f32(
- arm_fir_sparse_instance_f32 * S,
- float32_t * pSrc,
- float32_t * pDst,
- float32_t * pScratchIn,
- uint32_t blockSize);
-
- /**
- * @brief Initialization function for the floating-point sparse FIR filter.
- * @param[in,out] *S points to an instance of the floating-point sparse FIR structure.
- * @param[in] numTaps number of nonzero coefficients in the filter.
- * @param[in] *pCoeffs points to the array of filter coefficients.
- * @param[in] *pState points to the state buffer.
- * @param[in] *pTapDelay points to the array of offset times.
- * @param[in] maxDelay maximum offset time supported.
- * @param[in] blockSize number of samples that will be processed per block.
- * @return none
- */
-
- void arm_fir_sparse_init_f32(
- arm_fir_sparse_instance_f32 * S,
- uint16_t numTaps,
- float32_t * pCoeffs,
- float32_t * pState,
- int32_t * pTapDelay,
- uint16_t maxDelay,
- uint32_t blockSize);
-
- /**
- * @brief Processing function for the Q31 sparse FIR filter.
- * @param[in] *S points to an instance of the Q31 sparse FIR structure.
- * @param[in] *pSrc points to the block of input data.
- * @param[out] *pDst points to the block of output data
- * @param[in] *pScratchIn points to a temporary buffer of size blockSize.
- * @param[in] blockSize number of input samples to process per call.
- * @return none.
- */
-
- void arm_fir_sparse_q31(
- arm_fir_sparse_instance_q31 * S,
- q31_t * pSrc,
- q31_t * pDst,
- q31_t * pScratchIn,
- uint32_t blockSize);
-
- /**
- * @brief Initialization function for the Q31 sparse FIR filter.
- * @param[in,out] *S points to an instance of the Q31 sparse FIR structure.
- * @param[in] numTaps number of nonzero coefficients in the filter.
- * @param[in] *pCoeffs points to the array of filter coefficients.
- * @param[in] *pState points to the state buffer.
- * @param[in] *pTapDelay points to the array of offset times.
- * @param[in] maxDelay maximum offset time supported.
- * @param[in] blockSize number of samples that will be processed per block.
- * @return none
- */
-
- void arm_fir_sparse_init_q31(
- arm_fir_sparse_instance_q31 * S,
- uint16_t numTaps,
- q31_t * pCoeffs,
- q31_t * pState,
- int32_t * pTapDelay,
- uint16_t maxDelay,
- uint32_t blockSize);
-
- /**
- * @brief Processing function for the Q15 sparse FIR filter.
- * @param[in] *S points to an instance of the Q15 sparse FIR structure.
- * @param[in] *pSrc points to the block of input data.
- * @param[out] *pDst points to the block of output data
- * @param[in] *pScratchIn points to a temporary buffer of size blockSize.
- * @param[in] *pScratchOut points to a temporary buffer of size blockSize.
- * @param[in] blockSize number of input samples to process per call.
- * @return none.
- */
-
- void arm_fir_sparse_q15(
- arm_fir_sparse_instance_q15 * S,
- q15_t * pSrc,
- q15_t * pDst,
- q15_t * pScratchIn,
- q31_t * pScratchOut,
- uint32_t blockSize);
-
-
- /**
- * @brief Initialization function for the Q15 sparse FIR filter.
- * @param[in,out] *S points to an instance of the Q15 sparse FIR structure.
- * @param[in] numTaps number of nonzero coefficients in the filter.
- * @param[in] *pCoeffs points to the array of filter coefficients.
- * @param[in] *pState points to the state buffer.
- * @param[in] *pTapDelay points to the array of offset times.
- * @param[in] maxDelay maximum offset time supported.
- * @param[in] blockSize number of samples that will be processed per block.
- * @return none
- */
-
- void arm_fir_sparse_init_q15(
- arm_fir_sparse_instance_q15 * S,
- uint16_t numTaps,
- q15_t * pCoeffs,
- q15_t * pState,
- int32_t * pTapDelay,
- uint16_t maxDelay,
- uint32_t blockSize);
-
- /**
- * @brief Processing function for the Q7 sparse FIR filter.
- * @param[in] *S points to an instance of the Q7 sparse FIR structure.
- * @param[in] *pSrc points to the block of input data.
- * @param[out] *pDst points to the block of output data
- * @param[in] *pScratchIn points to a temporary buffer of size blockSize.
- * @param[in] *pScratchOut points to a temporary buffer of size blockSize.
- * @param[in] blockSize number of input samples to process per call.
- * @return none.
- */
-
- void arm_fir_sparse_q7(
- arm_fir_sparse_instance_q7 * S,
- q7_t * pSrc,
- q7_t * pDst,
- q7_t * pScratchIn,
- q31_t * pScratchOut,
- uint32_t blockSize);
-
- /**
- * @brief Initialization function for the Q7 sparse FIR filter.
- * @param[in,out] *S points to an instance of the Q7 sparse FIR structure.
- * @param[in] numTaps number of nonzero coefficients in the filter.
- * @param[in] *pCoeffs points to the array of filter coefficients.
- * @param[in] *pState points to the state buffer.
- * @param[in] *pTapDelay points to the array of offset times.
- * @param[in] maxDelay maximum offset time supported.
- * @param[in] blockSize number of samples that will be processed per block.
- * @return none
- */
-
- void arm_fir_sparse_init_q7(
- arm_fir_sparse_instance_q7 * S,
- uint16_t numTaps,
- q7_t * pCoeffs,
- q7_t * pState,
- int32_t * pTapDelay,
- uint16_t maxDelay,
- uint32_t blockSize);
-
-
- /*
- * @brief Floating-point sin_cos function.
- * @param[in] theta input value in degrees
- * @param[out] *pSinVal points to the processed sine output.
- * @param[out] *pCosVal points to the processed cos output.
- * @return none.
- */
-
- void arm_sin_cos_f32(
- float32_t theta,
- float32_t * pSinVal,
- float32_t * pCcosVal);
-
- /*
- * @brief Q31 sin_cos function.
- * @param[in] theta scaled input value in degrees
- * @param[out] *pSinVal points to the processed sine output.
- * @param[out] *pCosVal points to the processed cosine output.
- * @return none.
- */
-
- void arm_sin_cos_q31(
- q31_t theta,
- q31_t * pSinVal,
- q31_t * pCosVal);
-
-
- /**
- * @brief Floating-point complex conjugate.
- * @param[in] *pSrc points to the input vector
- * @param[out] *pDst points to the output vector
- * @param[in] numSamples number of complex samples in each vector
- * @return none.
- */
-
- void arm_cmplx_conj_f32(
- float32_t * pSrc,
- float32_t * pDst,
- uint32_t numSamples);
-
- /**
- * @brief Q31 complex conjugate.
- * @param[in] *pSrc points to the input vector
- * @param[out] *pDst points to the output vector
- * @param[in] numSamples number of complex samples in each vector
- * @return none.
- */
-
- void arm_cmplx_conj_q31(
- q31_t * pSrc,
- q31_t * pDst,
- uint32_t numSamples);
-
- /**
- * @brief Q15 complex conjugate.
- * @param[in] *pSrc points to the input vector
- * @param[out] *pDst points to the output vector
- * @param[in] numSamples number of complex samples in each vector
- * @return none.
- */
-
- void arm_cmplx_conj_q15(
- q15_t * pSrc,
- q15_t * pDst,
- uint32_t numSamples);
-
-
-
- /**
- * @brief Floating-point complex magnitude squared
- * @param[in] *pSrc points to the complex input vector
- * @param[out] *pDst points to the real output vector
- * @param[in] numSamples number of complex samples in the input vector
- * @return none.
- */
-
- void arm_cmplx_mag_squared_f32(
- float32_t * pSrc,
- float32_t * pDst,
- uint32_t numSamples);
-
- /**
- * @brief Q31 complex magnitude squared
- * @param[in] *pSrc points to the complex input vector
- * @param[out] *pDst points to the real output vector
- * @param[in] numSamples number of complex samples in the input vector
- * @return none.
- */
-
- void arm_cmplx_mag_squared_q31(
- q31_t * pSrc,
- q31_t * pDst,
- uint32_t numSamples);
-
- /**
- * @brief Q15 complex magnitude squared
- * @param[in] *pSrc points to the complex input vector
- * @param[out] *pDst points to the real output vector
- * @param[in] numSamples number of complex samples in the input vector
- * @return none.
- */
-
- void arm_cmplx_mag_squared_q15(
- q15_t * pSrc,
- q15_t * pDst,
- uint32_t numSamples);
-
-
- /**
- * @ingroup groupController
- */
-
- /**
- * @defgroup PID PID Motor Control
- *
- * A Proportional Integral Derivative (PID) controller is a generic feedback control
- * loop mechanism widely used in industrial control systems.
- * A PID controller is the most commonly used type of feedback controller.
- *
- * This set of functions implements (PID) controllers
- * for Q15, Q31, and floating-point data types. The functions operate on a single sample
- * of data and each call to the function returns a single processed value.
- * S points to an instance of the PID control data structure. in
- * is the input sample value. The functions return the output value.
- *
- * \par Algorithm:
- *
- *
- * \par
- * where \c Kp is proportional constant, \c Ki is Integral constant and \c Kd is Derivative constant
- *
- * \par
- * \image html PID.gif "Proportional Integral Derivative Controller"
- *
- * \par
- * The PID controller calculates an "error" value as the difference between
- * the measured output and the reference input.
- * The controller attempts to minimize the error by adjusting the process control inputs.
- * The proportional value determines the reaction to the current error,
- * the integral value determines the reaction based on the sum of recent errors,
- * and the derivative value determines the reaction based on the rate at which the error has been changing.
- *
- * \par Instance Structure
- * The Gains A0, A1, A2 and state variables for a PID controller are stored together in an instance data structure.
- * A separate instance structure must be defined for each PID Controller.
- * There are separate instance structure declarations for each of the 3 supported data types.
- *
- * \par Reset Functions
- * There is also an associated reset function for each data type which clears the state array.
- *
- * \par Initialization Functions
- * There is also an associated initialization function for each data type.
- * The initialization function performs the following operations:
- * - Initializes the Gains A0, A1, A2 from Kp,Ki, Kd gains.
- * - Zeros out the values in the state buffer.
- *
- * \par
- * Instance structure cannot be placed into a const data section and it is recommended to use the initialization function.
- *
- * \par Fixed-Point Behavior
- * Care must be taken when using the fixed-point versions of the PID Controller functions.
- * In particular, the overflow and saturation behavior of the accumulator used in each function must be considered.
- * Refer to the function specific documentation below for usage guidelines.
- */
-
- /**
- * @addtogroup PID
- * @{
- */
-
- /**
- * @brief Process function for the floating-point PID Control.
- * @param[in,out] *S is an instance of the floating-point PID Control structure
- * @param[in] in input sample to process
- * @return out processed output sample.
- */
-
-
- static __INLINE float32_t arm_pid_f32(
- arm_pid_instance_f32 * S,
- float32_t in)
- {
- float32_t out;
-
- /* y[n] = y[n-1] + A0 * x[n] + A1 * x[n-1] + A2 * x[n-2] */
- out = (S->A0 * in) +
- (S->A1 * S->state[0]) + (S->A2 * S->state[1]) + (S->state[2]);
-
- /* Update state */
- S->state[1] = S->state[0];
- S->state[0] = in;
- S->state[2] = out;
-
- /* return to application */
- return (out);
-
- }
-
- /**
- * @brief Process function for the Q31 PID Control.
- * @param[in,out] *S points to an instance of the Q31 PID Control structure
- * @param[in] in input sample to process
- * @return out processed output sample.
- *
- * Scaling and Overflow Behavior:
- * \par
- * The function is implemented using an internal 64-bit accumulator.
- * The accumulator has a 2.62 format and maintains full precision of the intermediate multiplication results but provides only a single guard bit.
- * Thus, if the accumulator result overflows it wraps around rather than clip.
- * In order to avoid overflows completely the input signal must be scaled down by 2 bits as there are four additions.
- * After all multiply-accumulates are performed, the 2.62 accumulator is truncated to 1.32 format and then saturated to 1.31 format.
- */
-
- static __INLINE q31_t arm_pid_q31(
- arm_pid_instance_q31 * S,
- q31_t in)
- {
- q63_t acc;
- q31_t out;
-
- /* acc = A0 * x[n] */
- acc = (q63_t) S->A0 * in;
-
- /* acc += A1 * x[n-1] */
- acc += (q63_t) S->A1 * S->state[0];
-
- /* acc += A2 * x[n-2] */
- acc += (q63_t) S->A2 * S->state[1];
-
- /* convert output to 1.31 format to add y[n-1] */
- out = (q31_t) (acc >> 31u);
-
- /* out += y[n-1] */
- out += S->state[2];
-
- /* Update state */
- S->state[1] = S->state[0];
- S->state[0] = in;
- S->state[2] = out;
-
- /* return to application */
- return (out);
-
- }
-
- /**
- * @brief Process function for the Q15 PID Control.
- * @param[in,out] *S points to an instance of the Q15 PID Control structure
- * @param[in] in input sample to process
- * @return out processed output sample.
- *
- * Scaling and Overflow Behavior:
- * \par
- * The function is implemented using a 64-bit internal accumulator.
- * Both Gains and state variables are represented in 1.15 format and multiplications yield a 2.30 result.
- * The 2.30 intermediate results are accumulated in a 64-bit accumulator in 34.30 format.
- * There is no risk of internal overflow with this approach and the full precision of intermediate multiplications is preserved.
- * After all additions have been performed, the accumulator is truncated to 34.15 format by discarding low 15 bits.
- * Lastly, the accumulator is saturated to yield a result in 1.15 format.
- */
-
- static __INLINE q15_t arm_pid_q15(
- arm_pid_instance_q15 * S,
- q15_t in)
- {
- q63_t acc;
- q15_t out;
-
-#ifndef ARM_MATH_CM0_FAMILY
- __SIMD32_TYPE *vstate;
-
- /* Implementation of PID controller */
-
- /* acc = A0 * x[n] */
- acc = (q31_t) __SMUAD(S->A0, in);
-
- /* acc += A1 * x[n-1] + A2 * x[n-2] */
- vstate = __SIMD32_CONST(S->state);
- acc = __SMLALD(S->A1, (q31_t) *vstate, acc);
-
-#else
- /* acc = A0 * x[n] */
- acc = ((q31_t) S->A0) * in;
-
- /* acc += A1 * x[n-1] + A2 * x[n-2] */
- acc += (q31_t) S->A1 * S->state[0];
- acc += (q31_t) S->A2 * S->state[1];
-
-#endif
-
- /* acc += y[n-1] */
- acc += (q31_t) S->state[2] << 15;
-
- /* saturate the output */
- out = (q15_t) (__SSAT((acc >> 15), 16));
-
- /* Update state */
- S->state[1] = S->state[0];
- S->state[0] = in;
- S->state[2] = out;
-
- /* return to application */
- return (out);
-
- }
-
- /**
- * @} end of PID group
- */
-
-
- /**
- * @brief Floating-point matrix inverse.
- * @param[in] *src points to the instance of the input floating-point matrix structure.
- * @param[out] *dst points to the instance of the output floating-point matrix structure.
- * @return The function returns ARM_MATH_SIZE_MISMATCH, if the dimensions do not match.
- * If the input matrix is singular (does not have an inverse), then the algorithm terminates and returns error status ARM_MATH_SINGULAR.
- */
-
- arm_status arm_mat_inverse_f32(
- const arm_matrix_instance_f32 * src,
- arm_matrix_instance_f32 * dst);
-
-
- /**
- * @brief Floating-point matrix inverse.
- * @param[in] *src points to the instance of the input floating-point matrix structure.
- * @param[out] *dst points to the instance of the output floating-point matrix structure.
- * @return The function returns ARM_MATH_SIZE_MISMATCH, if the dimensions do not match.
- * If the input matrix is singular (does not have an inverse), then the algorithm terminates and returns error status ARM_MATH_SINGULAR.
- */
-
- arm_status arm_mat_inverse_f64(
- const arm_matrix_instance_f64 * src,
- arm_matrix_instance_f64 * dst);
-
-
-
- /**
- * @ingroup groupController
- */
-
-
- /**
- * @defgroup clarke Vector Clarke Transform
- * Forward Clarke transform converts the instantaneous stator phases into a two-coordinate time invariant vector.
- * Generally the Clarke transform uses three-phase currents Ia, Ib and Ic to calculate currents
- * in the two-phase orthogonal stator axis Ialpha and Ibeta.
- * When Ialpha is superposed with Ia as shown in the figure below
- * \image html clarke.gif Stator current space vector and its components in (a,b).
- * and Ia + Ib + Ic = 0, in this condition Ialpha and Ibeta
- * can be calculated using only Ia and Ib.
- *
- * The function operates on a single sample of data and each call to the function returns the processed output.
- * The library provides separate functions for Q31 and floating-point data types.
- * \par Algorithm
- * \image html clarkeFormula.gif
- * where Ia and Ib are the instantaneous stator phases and
- * pIalpha and pIbeta are the two coordinates of time invariant vector.
- * \par Fixed-Point Behavior
- * Care must be taken when using the Q31 version of the Clarke transform.
- * In particular, the overflow and saturation behavior of the accumulator used must be considered.
- * Refer to the function specific documentation below for usage guidelines.
- */
-
- /**
- * @addtogroup clarke
- * @{
- */
-
- /**
- *
- * @brief Floating-point Clarke transform
- * @param[in] Ia input three-phase coordinate a
- * @param[in] Ib input three-phase coordinate b
- * @param[out] *pIalpha points to output two-phase orthogonal vector axis alpha
- * @param[out] *pIbeta points to output two-phase orthogonal vector axis beta
- * @return none.
- */
-
- static __INLINE void arm_clarke_f32(
- float32_t Ia,
- float32_t Ib,
- float32_t * pIalpha,
- float32_t * pIbeta)
- {
- /* Calculate pIalpha using the equation, pIalpha = Ia */
- *pIalpha = Ia;
-
- /* Calculate pIbeta using the equation, pIbeta = (1/sqrt(3)) * Ia + (2/sqrt(3)) * Ib */
- *pIbeta =
- ((float32_t) 0.57735026919 * Ia + (float32_t) 1.15470053838 * Ib);
-
- }
-
- /**
- * @brief Clarke transform for Q31 version
- * @param[in] Ia input three-phase coordinate a
- * @param[in] Ib input three-phase coordinate b
- * @param[out] *pIalpha points to output two-phase orthogonal vector axis alpha
- * @param[out] *pIbeta points to output two-phase orthogonal vector axis beta
- * @return none.
- *
- * Scaling and Overflow Behavior:
- * \par
- * The function is implemented using an internal 32-bit accumulator.
- * The accumulator maintains 1.31 format by truncating lower 31 bits of the intermediate multiplication in 2.62 format.
- * There is saturation on the addition, hence there is no risk of overflow.
- */
-
- static __INLINE void arm_clarke_q31(
- q31_t Ia,
- q31_t Ib,
- q31_t * pIalpha,
- q31_t * pIbeta)
- {
- q31_t product1, product2; /* Temporary variables used to store intermediate results */
-
- /* Calculating pIalpha from Ia by equation pIalpha = Ia */
- *pIalpha = Ia;
-
- /* Intermediate product is calculated by (1/(sqrt(3)) * Ia) */
- product1 = (q31_t) (((q63_t) Ia * 0x24F34E8B) >> 30);
-
- /* Intermediate product is calculated by (2/sqrt(3) * Ib) */
- product2 = (q31_t) (((q63_t) Ib * 0x49E69D16) >> 30);
-
- /* pIbeta is calculated by adding the intermediate products */
- *pIbeta = __QADD(product1, product2);
- }
-
- /**
- * @} end of clarke group
- */
-
- /**
- * @brief Converts the elements of the Q7 vector to Q31 vector.
- * @param[in] *pSrc input pointer
- * @param[out] *pDst output pointer
- * @param[in] blockSize number of samples to process
- * @return none.
- */
- void arm_q7_to_q31(
- q7_t * pSrc,
- q31_t * pDst,
- uint32_t blockSize);
-
-
-
-
- /**
- * @ingroup groupController
- */
-
- /**
- * @defgroup inv_clarke Vector Inverse Clarke Transform
- * Inverse Clarke transform converts the two-coordinate time invariant vector into instantaneous stator phases.
- *
- * The function operates on a single sample of data and each call to the function returns the processed output.
- * The library provides separate functions for Q31 and floating-point data types.
- * \par Algorithm
- * \image html clarkeInvFormula.gif
- * where pIa and pIb are the instantaneous stator phases and
- * Ialpha and Ibeta are the two coordinates of time invariant vector.
- * \par Fixed-Point Behavior
- * Care must be taken when using the Q31 version of the Clarke transform.
- * In particular, the overflow and saturation behavior of the accumulator used must be considered.
- * Refer to the function specific documentation below for usage guidelines.
- */
-
- /**
- * @addtogroup inv_clarke
- * @{
- */
-
- /**
- * @brief Floating-point Inverse Clarke transform
- * @param[in] Ialpha input two-phase orthogonal vector axis alpha
- * @param[in] Ibeta input two-phase orthogonal vector axis beta
- * @param[out] *pIa points to output three-phase coordinate a
- * @param[out] *pIb points to output three-phase coordinate b
- * @return none.
- */
-
-
- static __INLINE void arm_inv_clarke_f32(
- float32_t Ialpha,
- float32_t Ibeta,
- float32_t * pIa,
- float32_t * pIb)
- {
- /* Calculating pIa from Ialpha by equation pIa = Ialpha */
- *pIa = Ialpha;
-
- /* Calculating pIb from Ialpha and Ibeta by equation pIb = -(1/2) * Ialpha + (sqrt(3)/2) * Ibeta */
- *pIb = -0.5 * Ialpha + (float32_t) 0.8660254039 *Ibeta;
-
- }
-
- /**
- * @brief Inverse Clarke transform for Q31 version
- * @param[in] Ialpha input two-phase orthogonal vector axis alpha
- * @param[in] Ibeta input two-phase orthogonal vector axis beta
- * @param[out] *pIa points to output three-phase coordinate a
- * @param[out] *pIb points to output three-phase coordinate b
- * @return none.
- *
- * Scaling and Overflow Behavior:
- * \par
- * The function is implemented using an internal 32-bit accumulator.
- * The accumulator maintains 1.31 format by truncating lower 31 bits of the intermediate multiplication in 2.62 format.
- * There is saturation on the subtraction, hence there is no risk of overflow.
- */
-
- static __INLINE void arm_inv_clarke_q31(
- q31_t Ialpha,
- q31_t Ibeta,
- q31_t * pIa,
- q31_t * pIb)
- {
- q31_t product1, product2; /* Temporary variables used to store intermediate results */
-
- /* Calculating pIa from Ialpha by equation pIa = Ialpha */
- *pIa = Ialpha;
-
- /* Intermediate product is calculated by (1/(2*sqrt(3)) * Ia) */
- product1 = (q31_t) (((q63_t) (Ialpha) * (0x40000000)) >> 31);
-
- /* Intermediate product is calculated by (1/sqrt(3) * pIb) */
- product2 = (q31_t) (((q63_t) (Ibeta) * (0x6ED9EBA1)) >> 31);
-
- /* pIb is calculated by subtracting the products */
- *pIb = __QSUB(product2, product1);
-
- }
-
- /**
- * @} end of inv_clarke group
- */
-
- /**
- * @brief Converts the elements of the Q7 vector to Q15 vector.
- * @param[in] *pSrc input pointer
- * @param[out] *pDst output pointer
- * @param[in] blockSize number of samples to process
- * @return none.
- */
- void arm_q7_to_q15(
- q7_t * pSrc,
- q15_t * pDst,
- uint32_t blockSize);
-
-
-
- /**
- * @ingroup groupController
- */
-
- /**
- * @defgroup park Vector Park Transform
- *
- * Forward Park transform converts the input two-coordinate vector to flux and torque components.
- * The Park transform can be used to realize the transformation of the Ialpha and the Ibeta currents
- * from the stationary to the moving reference frame and control the spatial relationship between
- * the stator vector current and rotor flux vector.
- * If we consider the d axis aligned with the rotor flux, the diagram below shows the
- * current vector and the relationship from the two reference frames:
- * \image html park.gif "Stator current space vector and its component in (a,b) and in the d,q rotating reference frame"
- *
- * The function operates on a single sample of data and each call to the function returns the processed output.
- * The library provides separate functions for Q31 and floating-point data types.
- * \par Algorithm
- * \image html parkFormula.gif
- * where Ialpha and Ibeta are the stator vector components,
- * pId and pIq are rotor vector components and cosVal and sinVal are the
- * cosine and sine values of theta (rotor flux position).
- * \par Fixed-Point Behavior
- * Care must be taken when using the Q31 version of the Park transform.
- * In particular, the overflow and saturation behavior of the accumulator used must be considered.
- * Refer to the function specific documentation below for usage guidelines.
- */
-
- /**
- * @addtogroup park
- * @{
- */
-
- /**
- * @brief Floating-point Park transform
- * @param[in] Ialpha input two-phase vector coordinate alpha
- * @param[in] Ibeta input two-phase vector coordinate beta
- * @param[out] *pId points to output rotor reference frame d
- * @param[out] *pIq points to output rotor reference frame q
- * @param[in] sinVal sine value of rotation angle theta
- * @param[in] cosVal cosine value of rotation angle theta
- * @return none.
- *
- * The function implements the forward Park transform.
- *
- */
-
- static __INLINE void arm_park_f32(
- float32_t Ialpha,
- float32_t Ibeta,
- float32_t * pId,
- float32_t * pIq,
- float32_t sinVal,
- float32_t cosVal)
- {
- /* Calculate pId using the equation, pId = Ialpha * cosVal + Ibeta * sinVal */
- *pId = Ialpha * cosVal + Ibeta * sinVal;
-
- /* Calculate pIq using the equation, pIq = - Ialpha * sinVal + Ibeta * cosVal */
- *pIq = -Ialpha * sinVal + Ibeta * cosVal;
-
- }
-
- /**
- * @brief Park transform for Q31 version
- * @param[in] Ialpha input two-phase vector coordinate alpha
- * @param[in] Ibeta input two-phase vector coordinate beta
- * @param[out] *pId points to output rotor reference frame d
- * @param[out] *pIq points to output rotor reference frame q
- * @param[in] sinVal sine value of rotation angle theta
- * @param[in] cosVal cosine value of rotation angle theta
- * @return none.
- *
- * Scaling and Overflow Behavior:
- * \par
- * The function is implemented using an internal 32-bit accumulator.
- * The accumulator maintains 1.31 format by truncating lower 31 bits of the intermediate multiplication in 2.62 format.
- * There is saturation on the addition and subtraction, hence there is no risk of overflow.
- */
-
-
- static __INLINE void arm_park_q31(
- q31_t Ialpha,
- q31_t Ibeta,
- q31_t * pId,
- q31_t * pIq,
- q31_t sinVal,
- q31_t cosVal)
- {
- q31_t product1, product2; /* Temporary variables used to store intermediate results */
- q31_t product3, product4; /* Temporary variables used to store intermediate results */
-
- /* Intermediate product is calculated by (Ialpha * cosVal) */
- product1 = (q31_t) (((q63_t) (Ialpha) * (cosVal)) >> 31);
-
- /* Intermediate product is calculated by (Ibeta * sinVal) */
- product2 = (q31_t) (((q63_t) (Ibeta) * (sinVal)) >> 31);
-
-
- /* Intermediate product is calculated by (Ialpha * sinVal) */
- product3 = (q31_t) (((q63_t) (Ialpha) * (sinVal)) >> 31);
-
- /* Intermediate product is calculated by (Ibeta * cosVal) */
- product4 = (q31_t) (((q63_t) (Ibeta) * (cosVal)) >> 31);
-
- /* Calculate pId by adding the two intermediate products 1 and 2 */
- *pId = __QADD(product1, product2);
-
- /* Calculate pIq by subtracting the two intermediate products 3 from 4 */
- *pIq = __QSUB(product4, product3);
- }
-
- /**
- * @} end of park group
- */
-
- /**
- * @brief Converts the elements of the Q7 vector to floating-point vector.
- * @param[in] *pSrc is input pointer
- * @param[out] *pDst is output pointer
- * @param[in] blockSize is the number of samples to process
- * @return none.
- */
- void arm_q7_to_float(
- q7_t * pSrc,
- float32_t * pDst,
- uint32_t blockSize);
-
-
- /**
- * @ingroup groupController
- */
-
- /**
- * @defgroup inv_park Vector Inverse Park transform
- * Inverse Park transform converts the input flux and torque components to two-coordinate vector.
- *
- * The function operates on a single sample of data and each call to the function returns the processed output.
- * The library provides separate functions for Q31 and floating-point data types.
- * \par Algorithm
- * \image html parkInvFormula.gif
- * where pIalpha and pIbeta are the stator vector components,
- * Id and Iq are rotor vector components and cosVal and sinVal are the
- * cosine and sine values of theta (rotor flux position).
- * \par Fixed-Point Behavior
- * Care must be taken when using the Q31 version of the Park transform.
- * In particular, the overflow and saturation behavior of the accumulator used must be considered.
- * Refer to the function specific documentation below for usage guidelines.
- */
-
- /**
- * @addtogroup inv_park
- * @{
- */
-
- /**
- * @brief Floating-point Inverse Park transform
- * @param[in] Id input coordinate of rotor reference frame d
- * @param[in] Iq input coordinate of rotor reference frame q
- * @param[out] *pIalpha points to output two-phase orthogonal vector axis alpha
- * @param[out] *pIbeta points to output two-phase orthogonal vector axis beta
- * @param[in] sinVal sine value of rotation angle theta
- * @param[in] cosVal cosine value of rotation angle theta
- * @return none.
- */
-
- static __INLINE void arm_inv_park_f32(
- float32_t Id,
- float32_t Iq,
- float32_t * pIalpha,
- float32_t * pIbeta,
- float32_t sinVal,
- float32_t cosVal)
- {
- /* Calculate pIalpha using the equation, pIalpha = Id * cosVal - Iq * sinVal */
- *pIalpha = Id * cosVal - Iq * sinVal;
-
- /* Calculate pIbeta using the equation, pIbeta = Id * sinVal + Iq * cosVal */
- *pIbeta = Id * sinVal + Iq * cosVal;
-
- }
-
-
- /**
- * @brief Inverse Park transform for Q31 version
- * @param[in] Id input coordinate of rotor reference frame d
- * @param[in] Iq input coordinate of rotor reference frame q
- * @param[out] *pIalpha points to output two-phase orthogonal vector axis alpha
- * @param[out] *pIbeta points to output two-phase orthogonal vector axis beta
- * @param[in] sinVal sine value of rotation angle theta
- * @param[in] cosVal cosine value of rotation angle theta
- * @return none.
- *
- * Scaling and Overflow Behavior:
- * \par
- * The function is implemented using an internal 32-bit accumulator.
- * The accumulator maintains 1.31 format by truncating lower 31 bits of the intermediate multiplication in 2.62 format.
- * There is saturation on the addition, hence there is no risk of overflow.
- */
-
-
- static __INLINE void arm_inv_park_q31(
- q31_t Id,
- q31_t Iq,
- q31_t * pIalpha,
- q31_t * pIbeta,
- q31_t sinVal,
- q31_t cosVal)
- {
- q31_t product1, product2; /* Temporary variables used to store intermediate results */
- q31_t product3, product4; /* Temporary variables used to store intermediate results */
-
- /* Intermediate product is calculated by (Id * cosVal) */
- product1 = (q31_t) (((q63_t) (Id) * (cosVal)) >> 31);
-
- /* Intermediate product is calculated by (Iq * sinVal) */
- product2 = (q31_t) (((q63_t) (Iq) * (sinVal)) >> 31);
-
-
- /* Intermediate product is calculated by (Id * sinVal) */
- product3 = (q31_t) (((q63_t) (Id) * (sinVal)) >> 31);
-
- /* Intermediate product is calculated by (Iq * cosVal) */
- product4 = (q31_t) (((q63_t) (Iq) * (cosVal)) >> 31);
-
- /* Calculate pIalpha by using the two intermediate products 1 and 2 */
- *pIalpha = __QSUB(product1, product2);
-
- /* Calculate pIbeta by using the two intermediate products 3 and 4 */
- *pIbeta = __QADD(product4, product3);
-
- }
-
- /**
- * @} end of Inverse park group
- */
-
-
- /**
- * @brief Converts the elements of the Q31 vector to floating-point vector.
- * @param[in] *pSrc is input pointer
- * @param[out] *pDst is output pointer
- * @param[in] blockSize is the number of samples to process
- * @return none.
- */
- void arm_q31_to_float(
- q31_t * pSrc,
- float32_t * pDst,
- uint32_t blockSize);
-
- /**
- * @ingroup groupInterpolation
- */
-
- /**
- * @defgroup LinearInterpolate Linear Interpolation
- *
- * Linear interpolation is a method of curve fitting using linear polynomials.
- * Linear interpolation works by effectively drawing a straight line between two neighboring samples and returning the appropriate point along that line
- *
- * \par
- * \image html LinearInterp.gif "Linear interpolation"
- *
- * \par
- * A Linear Interpolate function calculates an output value(y), for the input(x)
- * using linear interpolation of the input values x0, x1( nearest input values) and the output values y0 and y1(nearest output values)
- *
- * \par Algorithm:
- *
- * y = y0 + (x - x0) * ((y1 - y0)/(x1-x0))
- * where x0, x1 are nearest values of input x
- * y0, y1 are nearest values to output y
- *
- *
- * \par
- * This set of functions implements Linear interpolation process
- * for Q7, Q15, Q31, and floating-point data types. The functions operate on a single
- * sample of data and each call to the function returns a single processed value.
- * S points to an instance of the Linear Interpolate function data structure.
- * x is the input sample value. The functions returns the output value.
- *
- * \par
- * if x is outside of the table boundary, Linear interpolation returns first value of the table
- * if x is below input range and returns last value of table if x is above range.
- */
-
- /**
- * @addtogroup LinearInterpolate
- * @{
- */
-
- /**
- * @brief Process function for the floating-point Linear Interpolation Function.
- * @param[in,out] *S is an instance of the floating-point Linear Interpolation structure
- * @param[in] x input sample to process
- * @return y processed output sample.
- *
- */
-
- static __INLINE float32_t arm_linear_interp_f32(
- arm_linear_interp_instance_f32 * S,
- float32_t x)
- {
-
- float32_t y;
- float32_t x0, x1; /* Nearest input values */
- float32_t y0, y1; /* Nearest output values */
- float32_t xSpacing = S->xSpacing; /* spacing between input values */
- int32_t i; /* Index variable */
- float32_t *pYData = S->pYData; /* pointer to output table */
-
- /* Calculation of index */
- i = (int32_t) ((x - S->x1) / xSpacing);
-
- if(i < 0)
- {
- /* Iniatilize output for below specified range as least output value of table */
- y = pYData[0];
- }
- else if((uint32_t)i >= S->nValues)
- {
- /* Iniatilize output for above specified range as last output value of table */
- y = pYData[S->nValues - 1];
- }
- else
- {
- /* Calculation of nearest input values */
- x0 = S->x1 + i * xSpacing;
- x1 = S->x1 + (i + 1) * xSpacing;
-
- /* Read of nearest output values */
- y0 = pYData[i];
- y1 = pYData[i + 1];
-
- /* Calculation of output */
- y = y0 + (x - x0) * ((y1 - y0) / (x1 - x0));
-
- }
-
- /* returns output value */
- return (y);
- }
-
- /**
- *
- * @brief Process function for the Q31 Linear Interpolation Function.
- * @param[in] *pYData pointer to Q31 Linear Interpolation table
- * @param[in] x input sample to process
- * @param[in] nValues number of table values
- * @return y processed output sample.
- *
- * \par
- * Input sample x is in 12.20 format which contains 12 bits for table index and 20 bits for fractional part.
- * This function can support maximum of table size 2^12.
- *
- */
-
-
- static __INLINE q31_t arm_linear_interp_q31(
- q31_t * pYData,
- q31_t x,
- uint32_t nValues)
- {
- q31_t y; /* output */
- q31_t y0, y1; /* Nearest output values */
- q31_t fract; /* fractional part */
- int32_t index; /* Index to read nearest output values */
-
- /* Input is in 12.20 format */
- /* 12 bits for the table index */
- /* Index value calculation */
- index = ((x & 0xFFF00000) >> 20);
-
- if(index >= (int32_t)(nValues - 1))
- {
- return (pYData[nValues - 1]);
- }
- else if(index < 0)
- {
- return (pYData[0]);
- }
- else
- {
-
- /* 20 bits for the fractional part */
- /* shift left by 11 to keep fract in 1.31 format */
- fract = (x & 0x000FFFFF) << 11;
-
- /* Read two nearest output values from the index in 1.31(q31) format */
- y0 = pYData[index];
- y1 = pYData[index + 1u];
-
- /* Calculation of y0 * (1-fract) and y is in 2.30 format */
- y = ((q31_t) ((q63_t) y0 * (0x7FFFFFFF - fract) >> 32));
-
- /* Calculation of y0 * (1-fract) + y1 *fract and y is in 2.30 format */
- y += ((q31_t) (((q63_t) y1 * fract) >> 32));
-
- /* Convert y to 1.31 format */
- return (y << 1u);
-
- }
-
- }
-
- /**
- *
- * @brief Process function for the Q15 Linear Interpolation Function.
- * @param[in] *pYData pointer to Q15 Linear Interpolation table
- * @param[in] x input sample to process
- * @param[in] nValues number of table values
- * @return y processed output sample.
- *
- * \par
- * Input sample x is in 12.20 format which contains 12 bits for table index and 20 bits for fractional part.
- * This function can support maximum of table size 2^12.
- *
- */
-
-
- static __INLINE q15_t arm_linear_interp_q15(
- q15_t * pYData,
- q31_t x,
- uint32_t nValues)
- {
- q63_t y; /* output */
- q15_t y0, y1; /* Nearest output values */
- q31_t fract; /* fractional part */
- int32_t index; /* Index to read nearest output values */
-
- /* Input is in 12.20 format */
- /* 12 bits for the table index */
- /* Index value calculation */
- index = ((x & 0xFFF00000) >> 20u);
-
- if(index >= (int32_t)(nValues - 1))
- {
- return (pYData[nValues - 1]);
- }
- else if(index < 0)
- {
- return (pYData[0]);
- }
- else
- {
- /* 20 bits for the fractional part */
- /* fract is in 12.20 format */
- fract = (x & 0x000FFFFF);
-
- /* Read two nearest output values from the index */
- y0 = pYData[index];
- y1 = pYData[index + 1u];
-
- /* Calculation of y0 * (1-fract) and y is in 13.35 format */
- y = ((q63_t) y0 * (0xFFFFF - fract));
-
- /* Calculation of (y0 * (1-fract) + y1 * fract) and y is in 13.35 format */
- y += ((q63_t) y1 * (fract));
-
- /* convert y to 1.15 format */
- return (y >> 20);
- }
-
-
- }
-
- /**
- *
- * @brief Process function for the Q7 Linear Interpolation Function.
- * @param[in] *pYData pointer to Q7 Linear Interpolation table
- * @param[in] x input sample to process
- * @param[in] nValues number of table values
- * @return y processed output sample.
- *
- * \par
- * Input sample x is in 12.20 format which contains 12 bits for table index and 20 bits for fractional part.
- * This function can support maximum of table size 2^12.
- */
-
-
- static __INLINE q7_t arm_linear_interp_q7(
- q7_t * pYData,
- q31_t x,
- uint32_t nValues)
- {
- q31_t y; /* output */
- q7_t y0, y1; /* Nearest output values */
- q31_t fract; /* fractional part */
- uint32_t index; /* Index to read nearest output values */
-
- /* Input is in 12.20 format */
- /* 12 bits for the table index */
- /* Index value calculation */
- if (x < 0)
- {
- return (pYData[0]);
- }
- index = (x >> 20) & 0xfff;
-
-
- if(index >= (nValues - 1))
- {
- return (pYData[nValues - 1]);
- }
- else
- {
-
- /* 20 bits for the fractional part */
- /* fract is in 12.20 format */
- fract = (x & 0x000FFFFF);
-
- /* Read two nearest output values from the index and are in 1.7(q7) format */
- y0 = pYData[index];
- y1 = pYData[index + 1u];
-
- /* Calculation of y0 * (1-fract ) and y is in 13.27(q27) format */
- y = ((y0 * (0xFFFFF - fract)));
-
- /* Calculation of y1 * fract + y0 * (1-fract) and y is in 13.27(q27) format */
- y += (y1 * fract);
-
- /* convert y to 1.7(q7) format */
- return (y >> 20u);
-
- }
-
- }
- /**
- * @} end of LinearInterpolate group
- */
-
- /**
- * @brief Fast approximation to the trigonometric sine function for floating-point data.
- * @param[in] x input value in radians.
- * @return sin(x).
- */
-
- float32_t arm_sin_f32(
- float32_t x);
-
- /**
- * @brief Fast approximation to the trigonometric sine function for Q31 data.
- * @param[in] x Scaled input value in radians.
- * @return sin(x).
- */
-
- q31_t arm_sin_q31(
- q31_t x);
-
- /**
- * @brief Fast approximation to the trigonometric sine function for Q15 data.
- * @param[in] x Scaled input value in radians.
- * @return sin(x).
- */
-
- q15_t arm_sin_q15(
- q15_t x);
-
- /**
- * @brief Fast approximation to the trigonometric cosine function for floating-point data.
- * @param[in] x input value in radians.
- * @return cos(x).
- */
-
- float32_t arm_cos_f32(
- float32_t x);
-
- /**
- * @brief Fast approximation to the trigonometric cosine function for Q31 data.
- * @param[in] x Scaled input value in radians.
- * @return cos(x).
- */
-
- q31_t arm_cos_q31(
- q31_t x);
-
- /**
- * @brief Fast approximation to the trigonometric cosine function for Q15 data.
- * @param[in] x Scaled input value in radians.
- * @return cos(x).
- */
-
- q15_t arm_cos_q15(
- q15_t x);
-
-
- /**
- * @ingroup groupFastMath
- */
-
-
- /**
- * @defgroup SQRT Square Root
- *
- * Computes the square root of a number.
- * There are separate functions for Q15, Q31, and floating-point data types.
- * The square root function is computed using the Newton-Raphson algorithm.
- * This is an iterative algorithm of the form:
- *
- * x1 = x0 - f(x0)/f'(x0)
- *
- * where x1 is the current estimate,
- * x0 is the previous estimate, and
- * f'(x0) is the derivative of f() evaluated at x0.
- * For the square root function, the algorithm reduces to:
- *
- *
- * \par
- * where numRows specifies the number of rows in the table;
- * numCols specifies the number of columns in the table;
- * and pData points to an array of size numRows*numCols values.
- * The data table pTable is organized in row order and the supplied data values fall on integer indexes.
- * That is, table element (x,y) is located at pTable[x + y*numCols] where x and y are integers.
- *
- * \par
- * Let (x, y) specify the desired interpolation point. Then define:
- *
- * XF = floor(x)
- * YF = floor(y)
- *
- * \par
- * The interpolated output point is computed as:
- *
- *
- * For more accurate information, please look at the DMAC section of the
- * Datasheet.
- *
- * \sa \ref dmad_module
- *
- * Related files :\n
- * \ref dmac.c\n
- * \ref dmac.h.\n
- *
- */
-
-#ifndef DMAC_H
-#define DMAC_H
-/**@{*/
-
-/*------------------------------------------------------------------------------
- * Headers
- *----------------------------------------------------------------------------*/
-
-#include "chip.h"
-
-#include
-#include
-
-/*------------------------------------------------------------------------------
- * Definitions
- *----------------------------------------------------------------------------*/
-
-/** \addtogroup dmac_defines DMAC Definitions
- * @{
- */
-/** Number of DMA channels */
-#define XDMAC_CONTROLLER_NUM 1
-/** Number of DMA channels */
-#define XDMAC_CHANNEL_NUM 24
-/** Max DMA single transfer size */
-#define XDMAC_MAX_BT_SIZE 0xFFFF
-/** @}*/
-
-/*----------------------------------------------------------------------------
- * Macro
- *----------------------------------------------------------------------------*/
-#define XDMA_GET_DATASIZE(size) ((size==0)? XDMAC_CC_DWIDTH_BYTE : \
- ((size==1)? XDMAC_CC_DWIDTH_HALFWORD : \
- (XDMAC_CC_DWIDTH_WORD )))
-#define XDMA_GET_CC_SAM(s) ((s==0)? XDMAC_CC_SAM_FIXED_AM : \
- ((s==1)? XDMAC_CC_SAM_INCREMENTED_AM : \
- ((s==2)? XDMAC_CC_SAM_UBS_AM : \
- XDMAC_CC_SAM_UBS_DS_AM )))
-#define XDMA_GET_CC_DAM(d) ((d==0)? XDMAC_CC_DAM_FIXED_AM : \
- ((d==1)? XDMAC_CC_DAM_INCREMENTED_AM : \
- ((d==2)? XDMAC_CC_DAM_UBS_AM : \
- XDMAC_CC_DAM_UBS_DS_AM )))
-#define XDMA_GET_CC_MEMSET(m) ((m==0)? XDMAC_CC_MEMSET_NORMAL_MODE : \
- XDMAC_CC_MEMSET_HW_MODE)
-
-/*------------------------------------------------------------------------------
- * Global functions
- *----------------------------------------------------------------------------*/
-/** \addtogroup dmac_functions
- * @{
- */
-
-#ifdef __cplusplus
- extern "C" {
-#endif
-
-extern uint32_t XDMAC_GetType( Xdmac *pXdmac);
-extern uint32_t XDMAC_GetConfig( Xdmac *pXdmac);
-extern uint32_t XDMAC_GetArbiter( Xdmac *pXdmac);
-extern void XDMAC_EnableGIt (Xdmac *pXdmac, uint8_t dwInteruptMask );
-extern void XDMAC_DisableGIt (Xdmac *pXdmac, uint8_t dwInteruptMask );
-extern uint32_t XDMAC_GetGItMask( Xdmac *pXdmac );
-extern uint32_t XDMAC_GetGIsr( Xdmac *pXdmac );
-extern uint32_t XDMAC_GetMaskedGIsr( Xdmac *pXdmac );
-extern void XDMAC_EnableChannel( Xdmac *pXdmac, uint8_t channel );
-extern void XDMAC_EnableChannels( Xdmac *pXdmac, uint32_t bmChannels );
-extern void XDMAC_DisableChannel( Xdmac *pXdmac, uint8_t channel );
-extern void XDMAC_DisableChannels( Xdmac *pXdmac, uint32_t bmChannels );
-extern uint32_t XDMAC_GetGlobalChStatus(Xdmac *pXdmac);
-extern void XDMAC_SuspendReadChannel( Xdmac *pXdmac, uint8_t channel );
-extern void XDMAC_SuspendWriteChannel( Xdmac *pXdmac, uint8_t channel );
-extern void XDMAC_SuspendReadWriteChannel( Xdmac *pXdmac, uint8_t channel );
-extern void XDMAC_ResumeReadWriteChannel( Xdmac *pXdmac, uint8_t channel );
-extern void XDMAC_SoftwareTransferReq(Xdmac *pXdmac, uint8_t channel);
-extern uint32_t XDMAC_GetSoftwareTransferStatus(Xdmac *pXdmac);
-extern void XDMAC_SoftwareFlushReq(Xdmac *pXdmac, uint8_t channel);
-extern void XDMAC_EnableChannelIt (Xdmac *pXdmac, uint8_t channel,
- uint8_t dwInteruptMask );
-extern void XDMAC_DisableChannelIt (Xdmac *pXdmac, uint8_t channel,
- uint8_t dwInteruptMask );
-extern uint32_t XDMAC_GetChannelItMask (Xdmac *pXdmac, uint8_t channel);
-extern uint32_t XDMAC_GetChannelIsr (Xdmac *pXdmac, uint8_t channel);
-extern uint32_t XDMAC_GetMaskChannelIsr (Xdmac *pXdmac, uint8_t channel);
-extern void XDMAC_SetSourceAddr(Xdmac *pXdmac, uint8_t channel, uint32_t addr);
-extern void XDMAC_SetDestinationAddr(Xdmac *pXdmac, uint8_t channel,
- uint32_t addr);
-extern void XDMAC_SetDescriptorAddr(Xdmac *pXdmac, uint8_t channel,
- uint32_t addr, uint8_t ndaif);
-extern void XDMAC_SetDescriptorControl(Xdmac *pXdmac, uint8_t channel,
- uint8_t config);
-extern void XDMAC_SetMicroblockControl(Xdmac *pXdmac, uint8_t channel,
- uint32_t ublen);
-extern void XDMAC_SetBlockControl(Xdmac *pXdmac, uint8_t channel,
- uint16_t blen);
-extern void XDMAC_SetChannelConfig(Xdmac *pXdmac, uint8_t channel,
- uint32_t config);
-extern uint32_t XDMAC_GetChannelConfig(Xdmac *pXdmac, uint8_t channel);
-extern void XDMAC_SetDataStride_MemPattern(Xdmac *pXdmac, uint8_t channel,
- uint32_t dds_msp);
-extern void XDMAC_SetSourceMicroBlockStride(Xdmac *pXdmac, uint8_t channel,
- uint32_t subs);
-extern void XDMAC_SetDestinationMicroBlockStride(Xdmac *pXdmac, uint8_t channel,
- uint32_t dubs);
-extern uint32_t XDMAC_GetChDestinationAddr(Xdmac *pXdmac, uint8_t channel);
-#ifdef __cplusplus
-}
-#endif
-
-/** @}*/
-/**@}*/
-#endif //#ifndef DMAC_H
-
diff --git a/ports_module/cortex_m7/iar/example_build/libraries/libchip_samv7/include/xdmad.h b/ports_module/cortex_m7/iar/example_build/libraries/libchip_samv7/include/xdmad.h
deleted file mode 100644
index 877f20e7..00000000
--- a/ports_module/cortex_m7/iar/example_build/libraries/libchip_samv7/include/xdmad.h
+++ /dev/null
@@ -1,260 +0,0 @@
-/* ----------------------------------------------------------------------------
- * SAM Software Package License
- * ----------------------------------------------------------------------------
- * Copyright (c) 2013, Atmel Corporation
- *
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * - Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the disclaimer below.
- *
- * Atmel's name may not be used to endorse or promote products derived from
- * this software without specific prior written permission.
- *
- * DISCLAIMER: THIS SOFTWARE IS PROVIDED BY ATMEL "AS IS" AND ANY EXPRESS OR
- * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT ARE
- * DISCLAIMED. IN NO EVENT SHALL ATMEL BE LIABLE FOR ANY DIRECT, INDIRECT,
- * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA,
- * OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
- * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
- * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
- * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- * ----------------------------------------------------------------------------
- */
-
-#ifndef _XDMAD_H
-#define _XDMAD_H
-
-
-/*----------------------------------------------------------------------------
- * Includes
- *----------------------------------------------------------------------------*/
-
-#include "chip.h"
-#include
-
-
-/** \addtogroup dmad_defines DMA Driver Defines
- @{*/
-/*----------------------------------------------------------------------------
- * Consts
- *----------------------------------------------------------------------------*/
-#define XDMAD_TRANSFER_MEMORY 0xFF /**< DMA transfer from or to memory */
-#define XDMAD_ALLOC_FAILED 0xFFFF /**< Channel allocate failed */
-
-#define XDMAD_TRANSFER_TX 0
-#define XDMAD_TRANSFER_RX 1
-
-/* XDMA_MBR_UBC */
-#define XDMA_UBC_NDE (0x1u << 24)
-#define XDMA_UBC_NDE_FETCH_DIS (0x0u << 24)
-#define XDMA_UBC_NDE_FETCH_EN (0x1u << 24)
-#define XDMA_UBC_NSEN (0x1u << 25)
-#define XDMA_UBC_NSEN_UNCHANGED (0x0u << 25)
-#define XDMA_UBC_NSEN_UPDATED (0x1u << 25)
-#define XDMA_UBC_NDEN (0x1u << 26)
-#define XDMA_UBC_NDEN_UNCHANGED (0x0u << 26)
-#define XDMA_UBC_NDEN_UPDATED (0x1u << 26)
-#define XDMA_UBC_NVIEW_Pos 27
-#define XDMA_UBC_NVIEW_Msk (0x3u << XDMA_UBC_NVIEW_Pos)
-#define XDMA_UBC_NVIEW_NDV0 (0x0u << XDMA_UBC_NVIEW_Pos)
-#define XDMA_UBC_NVIEW_NDV1 (0x1u << XDMA_UBC_NVIEW_Pos)
-#define XDMA_UBC_NVIEW_NDV2 (0x2u << XDMA_UBC_NVIEW_Pos)
-#define XDMA_UBC_NVIEW_NDV3 (0x3u << XDMA_UBC_NVIEW_Pos)
-
-/*----------------------------------------------------------------------------
- * MACRO
- *----------------------------------------------------------------------------*/
-
-/** @}*/
-
-/*----------------------------------------------------------------------------
- * Types
- *----------------------------------------------------------------------------*/
-/** \addtogroup dmad_structs DMA Driver Structs
- @{*/
-
-/** DMA status or return code */
-typedef enum _XdmadStatus {
- XDMAD_OK = 0, /**< Operation is successful */
- XDMAD_PARTIAL_DONE,
- XDMAD_DONE,
- XDMAD_BUSY, /**< Channel occupied or transfer not finished */
- XDMAD_ERROR, /**< Operation failed */
- XDMAD_CANCELED /**< Operation cancelled */
-} eXdmadStatus, eXdmadRC;
-
-/** DMA state for channel */
-typedef enum _XdmadState {
- XDMAD_STATE_FREE = 0, /**< Free channel */
- XDMAD_STATE_ALLOCATED, /**< Allocated to some peripheral */
- XDMAD_STATE_START, /**< DMA started */
- XDMAD_STATE_IN_XFR, /**< DMA in transferring */
- XDMAD_STATE_DONE, /**< DMA transfer done */
- XDMAD_STATE_HALTED, /**< DMA transfer stopped */
-} eXdmadState;
-
-/** DMA Programming state for channel */
-typedef enum _XdmadProgState {
- XDMAD_SINGLE= 0,
- XDMAD_MULTI,
- XDMAD_LLI,
-} eXdmadProgState;
-
-/** DMA transfer callback */
-typedef void (*XdmadTransferCallback)(uint32_t Channel, void* pArg);
-
-/** DMA driver channel */
-typedef struct _XdmadChannel {
- XdmadTransferCallback fCallback; /**< Callback */
- void* pArg; /**< Callback argument */
- uint8_t bIrqOwner; /**< Uses DMA handler or external one */
- uint8_t bSrcPeriphID; /**< HW ID for source */
- uint8_t bDstPeriphID; /**< HW ID for destination */
- uint8_t bSrcTxIfID; /**< DMA Tx Interface ID for source */
- uint8_t bSrcRxIfID; /**< DMA Rx Interface ID for source */
- uint8_t bDstTxIfID; /**< DMA Tx Interface ID for destination */
- uint8_t bDstRxIfID; /**< DMA Rx Interface ID for destination */
- volatile uint8_t state; /**< DMA channel state */
-} sXdmadChannel;
-
-/** DMA driver instance */
-typedef struct _Xdmad {
- Xdmac *pXdmacs;
- sXdmadChannel XdmaChannels[XDMACCHID_NUMBER];
- uint8_t numControllers;
- uint8_t numChannels;
- uint8_t pollingMode;
- uint8_t pollingTimeout;
- uint8_t xdmaMutex;
-} sXdmad;
-
-typedef struct _XdmadCfg {
- /** Microblock Control Member. */
- uint32_t mbr_ubc;
- /** Source Address Member. */
- uint32_t mbr_sa;
- /** Destination Address Member. */
- uint32_t mbr_da;
- /** Configuration Register. */
- uint32_t mbr_cfg;
- /** Block Control Member. */
- uint32_t mbr_bc;
- /** Data Stride Member. */
- uint32_t mbr_ds;
- /** Source Microblock Stride Member. */
- uint32_t mbr_sus;
- /** Destination Microblock Stride Member. */
- uint32_t mbr_dus;
-} sXdmadCfg;
-
-/** \brief Structure for storing parameters for DMA view0 that can be
- * performed by the DMA Master transfer.*/
-typedef struct _LinkedListDescriporView0
-{
- /** Next Descriptor Address number. */
- uint32_t mbr_nda;
- /** Microblock Control Member. */
- uint32_t mbr_ubc;
- /** Transfer Address Member. */
- uint32_t mbr_ta;
-}LinkedListDescriporView0;
-
-/** \brief Structure for storing parameters for DMA view1 that can be
- * performed by the DMA Master transfer.*/
-typedef struct _LinkedListDescriporView1
-{
- /** Next Descriptor Address number. */
- uint32_t mbr_nda;
- /** Microblock Control Member. */
- uint32_t mbr_ubc;
- /** Source Address Member. */
- uint32_t mbr_sa;
- /** Destination Address Member. */
- uint32_t mbr_da;
-}LinkedListDescriporView1;
-
-/** \brief Structure for storing parameters for DMA view2 that can be
- * performed by the DMA Master transfer.*/
-typedef struct _LinkedListDescriporView2
-{
- /** Next Descriptor Address number. */
- uint32_t mbr_nda;
- /** Microblock Control Member. */
- uint32_t mbr_ubc;
- /** Source Address Member. */
- uint32_t mbr_sa;
- /** Destination Address Member. */
- uint32_t mbr_da;
- /** Configuration Register. */
- uint32_t mbr_cfg;
-}LinkedListDescriporView2;
-
-/** \brief Structure for storing parameters for DMA view3 that can be
- * performed by the DMA Master transfer.*/
-typedef struct _LinkedListDescriporView3
-{
- /** Next Descriptor Address number. */
- uint32_t mbr_nda;
- /** Microblock Control Member. */
- uint32_t mbr_ubc;
- /** Source Address Member. */
- uint32_t mbr_sa;
- /** Destination Address Member. */
- uint32_t mbr_da;
- /** Configuration Register. */
- uint32_t mbr_cfg;
- /** Block Control Member. */
- uint32_t mbr_bc;
- /** Data Stride Member. */
- uint32_t mbr_ds;
- /** Source Microblock Stride Member. */
- uint32_t mbr_sus;
- /** Destination Microblock Stride Member. */
- uint32_t mbr_dus;
-}LinkedListDescriporView3;
-
-/** @}*/
-
-/*----------------------------------------------------------------------------
- * Exported functions
- *----------------------------------------------------------------------------*/
-/** \addtogroup dmad_functions DMA Driver Functions
- @{*/
-extern void XDMAD_Initialize( sXdmad *pXdmad,
- uint8_t bPollingMode );
-
-extern void XDMAD_Handler( sXdmad *pDmad);
-
-extern uint32_t XDMAD_AllocateChannel( sXdmad *pXdmad,
- uint8_t bSrcID, uint8_t bDstID);
-extern eXdmadRC XDMAD_FreeChannel( sXdmad *pXdmad, uint32_t dwChannel );
-
-extern eXdmadRC XDMAD_ConfigureTransfer( sXdmad *pXdmad,
- uint32_t dwChannel,
- sXdmadCfg *pXdmaParam,
- uint32_t dwXdmaDescCfg,
- uint32_t dwXdmaDescAddr,
- uint32_t dwXdmaIntEn);
-
-extern eXdmadRC XDMAD_PrepareChannel( sXdmad *pXdmad, uint32_t dwChannel);
-
-extern eXdmadRC XDMAD_IsTransferDone( sXdmad *pXdmad, uint32_t dwChannel );
-
-extern eXdmadRC XDMAD_StartTransfer( sXdmad *pXdmad, uint32_t dwChannel );
-
-extern eXdmadRC XDMAD_SetCallback( sXdmad *pXdmad,
- uint32_t dwChannel,
- XdmadTransferCallback fCallback,
- void* pArg );
-
-extern eXdmadRC XDMAD_StopTransfer( sXdmad *pXdmad, uint32_t dwChannel );
-/** @}*/
-/**@}*/
-#endif //#ifndef _XDMAD_H
-
diff --git a/ports_module/cortex_m7/iar/example_build/libraries/libraries.a b/ports_module/cortex_m7/iar/example_build/libraries/libraries.a
deleted file mode 100644
index 1e67e315..00000000
Binary files a/ports_module/cortex_m7/iar/example_build/libraries/libraries.a and /dev/null differ
diff --git a/ports_module/cortex_m7/iar/example_build/libraries/utils/md5/md5.h b/ports_module/cortex_m7/iar/example_build/libraries/utils/md5/md5.h
deleted file mode 100644
index 698c995d..00000000
--- a/ports_module/cortex_m7/iar/example_build/libraries/utils/md5/md5.h
+++ /dev/null
@@ -1,91 +0,0 @@
-/*
- Copyright (C) 1999, 2002 Aladdin Enterprises. All rights reserved.
-
- This software is provided 'as-is', without any express or implied
- warranty. In no event will the authors be held liable for any damages
- arising from the use of this software.
-
- Permission is granted to anyone to use this software for any purpose,
- including commercial applications, and to alter it and redistribute it
- freely, subject to the following restrictions:
-
- 1. The origin of this software must not be misrepresented; you must not
- claim that you wrote the original software. If you use this software
- in a product, an acknowledgment in the product documentation would be
- appreciated but is not required.
- 2. Altered source versions must be plainly marked as such, and must not be
- misrepresented as being the original software.
- 3. This notice may not be removed or altered from any source distribution.
-
- L. Peter Deutsch
- ghost@aladdin.com
-
- */
-/* $Id: md5.h,v 1.4 2002/04/13 19:20:28 lpd Exp $ */
-/*
- Independent implementation of MD5 (RFC 1321).
-
- This code implements the MD5 Algorithm defined in RFC 1321, whose
- text is available at
- http://www.ietf.org/rfc/rfc1321.txt
- The code is derived from the text of the RFC, including the test suite
- (section A.5) but excluding the rest of Appendix A. It does not include
- any code or documentation that is identified in the RFC as being
- copyrighted.
-
- The original and principal author of md5.h is L. Peter Deutsch
- . Other authors are noted in the change history
- that follows (in reverse chronological order):
-
- 2002-04-13 lpd Removed support for non-ANSI compilers; removed
- references to Ghostscript; clarified derivation from RFC 1321;
- now handles byte order either statically or dynamically.
- 1999-11-04 lpd Edited comments slightly for automatic TOC extraction.
- 1999-10-18 lpd Fixed typo in header comment (ansi2knr rather than md5);
- added conditionalization for C++ compilation from Martin
- Purschke .
- 1999-05-03 lpd Original version.
- */
-
-#ifndef md5_INCLUDED
-# define md5_INCLUDED
-
-/*
- * This package supports both compile-time and run-time determination of CPU
- * byte order. If ARCH_IS_BIG_ENDIAN is defined as 0, the code will be
- * compiled to run only on little-endian CPUs; if ARCH_IS_BIG_ENDIAN is
- * defined as non-zero, the code will be compiled to run only on big-endian
- * CPUs; if ARCH_IS_BIG_ENDIAN is not defined, the code will be compiled to
- * run on either big- or little-endian CPUs, but will run slightly less
- * efficiently on either one than if ARCH_IS_BIG_ENDIAN is defined.
- */
-
-typedef unsigned char md5_byte_t; /* 8-bit byte */
-typedef unsigned int md5_word_t; /* 32-bit word */
-
-/* Define the state of the MD5 Algorithm. */
-typedef struct md5_state_s {
- md5_word_t count[2]; /* message length in bits, lsw first */
- md5_word_t abcd[4]; /* digest buffer */
- md5_byte_t buf[64]; /* accumulate block */
-} md5_state_t;
-
-#ifdef __cplusplus
-extern "C"
-{
-#endif
-
-/* Initialize the algorithm. */
-void md5_init(md5_state_t *pms);
-
-/* Append a string to the message. */
-void md5_append(md5_state_t *pms, const md5_byte_t *data, int nbytes);
-
-/* Finish the message and return the digest. */
-void md5_finish(md5_state_t *pms, md5_byte_t digest[16]);
-
-#ifdef __cplusplus
-} /* end extern "C" */
-#endif
-
-#endif /* md5_INCLUDED */
diff --git a/ports_module/cortex_m7/iar/example_build/libraries/utils/utility.h b/ports_module/cortex_m7/iar/example_build/libraries/utils/utility.h
deleted file mode 100644
index e695376a..00000000
--- a/ports_module/cortex_m7/iar/example_build/libraries/utils/utility.h
+++ /dev/null
@@ -1,111 +0,0 @@
-/* ----------------------------------------------------------------------------
- * SAM Software Package License
- * ----------------------------------------------------------------------------
- * Copyright (c) 2014, Atmel Corporation
- *
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * - Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the disclaimer below.
- *
- * Atmel's name may not be used to endorse or promote products derived from
- * this software without specific prior written permission.
- *
- * DISCLAIMER: THIS SOFTWARE IS PROVIDED BY ATMEL "AS IS" AND ANY EXPRESS OR
- * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT ARE
- * DISCLAIMED. IN NO EVENT SHALL ATMEL BE LIABLE FOR ANY DIRECT, INDIRECT,
- * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA,
- * OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
- * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
- * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
- * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- * ----------------------------------------------------------------------------
- */
-
-#ifndef UTILITY_H
-#define UTILITY_H
-
-#include "chip.h"
-
-
-
-#define RESET_CYCLE_COUNTER() do { \
- CoreDebug->DEMCR = CoreDebug_DEMCR_TRCENA_Msk; \
- __DSB(); DWT->LAR = 0xC5ACCE55; __DSB(); \
- DWT->CTRL &= ~DWT_CTRL_CYCCNTENA_Msk; \
- DWT->CYCCNT = 0; \
- DWT->CTRL = DWT_CTRL_CYCCNTENA_Msk; \
- }while(0)
-
-#define GET_CYCLE_COUNTER(x) x=DWT->CYCCNT;
-
-#define LockMutex(mut, timeout) get_lock(&mut, 1, &timeout)
-
-#define ReleaseMutex(mut) free_lock(&mut)
-
-#define GetResource(mut, max, timeout) get_lock(&mut, max, &timeout)
-
-#define FreeResource(mut) free_lock(&mut)
-
-
-__STATIC_INLINE uint8_t Is_LockFree(volatile uint8_t *Lock_Variable)
-{
- /* return Variable value*/
- return __LDREXB(Lock_Variable);
-
-}
-
-__STATIC_INLINE uint8_t get_lock(volatile uint8_t *Lock_Variable, const uint8_t maxValue, volatile uint32_t *pTimeout)
-{
- while (*pTimeout)
- {
- if(__LDREXB(Lock_Variable) < maxValue)
- {
- /* Set the Variable */
- while( __STREXB(((*Lock_Variable) + 1), Lock_Variable) )
- {
- if(!(*pTimeout)--)
- {
- return 1; // quit if timeout
- }
- }
- /* Memory access barrier */
- __DMB();
- TRACE_DEBUG("Mutex locked ");
- return 0;
- }
-
- ((*pTimeout)--);
- }
- return 1;
-}
-
-
-
-__STATIC_INLINE uint8_t free_lock(volatile uint8_t *Lock_Variable)
-{
- /* Memory access barrier Ensure memory operations completed before releasing lock */
- __DSB();
- if(__LDREXB(Lock_Variable))
- {
- __STREXB( ((*Lock_Variable) - 1), Lock_Variable);
- TRACE_DEBUG("Mutex freed ");
- __DSB();
- __DMB(); // Ensure memory operations completed before
- return 0;
- }
- else
- {
- return 1;
- }
-
-
-}
-
-
-#endif /* UTILITY_H */
diff --git a/ports_module/cortex_m7/iar/inc/tx_port.h b/ports_module/cortex_m7/iar/inc/tx_port.h
index 4bcad5d2..70975f91 100644
--- a/ports_module/cortex_m7/iar/inc/tx_port.h
+++ b/ports_module/cortex_m7/iar/inc/tx_port.h
@@ -26,7 +26,7 @@
/* PORT SPECIFIC C INFORMATION RELEASE */
/* */
/* tx_port.h Cortex-M7/IAR */
-/* 6.1.9 */
+/* 6.1.11 */
/* */
/* AUTHOR */
/* */
@@ -48,6 +48,9 @@
/* DATE NAME DESCRIPTION */
/* */
/* 10-15-2021 Scott Larson Initial Version 6.1.9 */
+/* 04-25-2022 Scott Larson Modified comments and added */
+/* volatile to registers, */
+/* resulting in version 6.1.11 */
/* */
/**************************************************************************/
@@ -127,14 +130,14 @@ typedef unsigned short USHORT;
For example, if the time source is at the address 0x0a800024 and is 16-bits in size, the clock
source constants would be:
-#define TX_TRACE_TIME_SOURCE *((ULONG *) 0x0a800024)
+#define TX_TRACE_TIME_SOURCE *((volatile ULONG *) 0x0a800024)
#define TX_TRACE_TIME_MASK 0x0000FFFFUL
*/
#ifndef TX_MISRA_ENABLE
#ifndef TX_TRACE_TIME_SOURCE
-#define TX_TRACE_TIME_SOURCE *((ULONG *) 0xE0001004)
+#define TX_TRACE_TIME_SOURCE *((volatile ULONG *) 0xE0001004)
#endif
#else
ULONG _tx_misra_time_stamp_get(VOID);
@@ -325,7 +328,7 @@ void _tx_misra_vfp_touch(void);
else \
{ \
ULONG _tx_fpccr; \
- _tx_fpccr = *((ULONG *) 0xE000EF34); \
+ _tx_fpccr = *((volatile ULONG *) 0xE000EF34); \
_tx_fpccr = _tx_fpccr & ((ULONG) 0x01); \
if (_tx_fpccr == ((ULONG) 0x01)) \
{ \
@@ -476,7 +479,7 @@ static void _tx_thread_system_return_inline(void)
__istate_t interrupt_save;
/* Set PendSV to invoke ThreadX scheduler. */
- *((ULONG *) 0xE000ED04) = ((ULONG) 0x10000000);
+ *((volatile ULONG *) 0xE000ED04) = ((ULONG) 0x10000000);
if (__get_IPSR() == 0)
{
interrupt_save = __get_interrupt_state();
diff --git a/ports_module/cortex_m7/iar/module_manager/src/tx_thread_schedule.s b/ports_module/cortex_m7/iar/module_manager/src/tx_thread_schedule.s
index 9162c7ab..2d66551f 100644
--- a/ports_module/cortex_m7/iar/module_manager/src/tx_thread_schedule.s
+++ b/ports_module/cortex_m7/iar/module_manager/src/tx_thread_schedule.s
@@ -36,7 +36,7 @@
/* FUNCTION RELEASE */
/* */
/* _tx_thread_schedule Cortex-M7/IAR */
-/* 6.1.9 */
+/* 6.1.11 */
/* AUTHOR */
/* */
/* Scott Larson, Microsoft Corporation */
@@ -63,13 +63,15 @@
/* */
/* _tx_initialize_kernel_enter ThreadX entry function */
/* _tx_thread_system_return Return to system from thread */
-/* _tx_thread_context_restore Restore thread's context */
/* */
/* RELEASE HISTORY */
/* */
/* DATE NAME DESCRIPTION */
/* */
/* 10-15-2021 Scott Larson Initial Version 6.1.9 */
+/* 04-25-2022 Scott Larson Optimized MPU configuration, */
+/* added BASEPRI support, */
+/* resulting in version 6.1.11 */
/* */
/**************************************************************************/
// VOID _tx_thread_schedule(VOID)
@@ -125,8 +127,12 @@ __tx_wait_here:
MemManage_Handler:
BusFault_Handler:
UsageFault_Handler:
-
+#ifdef TX_PORT_USE_BASEPRI
+ LDR r1, =TX_PORT_BASEPRI // Mask interrupt priorities =< TX_PORT_BASEPRI
+ MSR BASEPRI, r1
+#else
CPSID i // Disable interrupts
+#endif /* TX_PORT_USE_BASEPRI */
/* Now pickup and store all the fault related information. */
@@ -209,7 +215,12 @@ UsageFault_Handler:
LDR r1, =0x10000000 // Set PENDSVSET bit
STR r1, [r0] // Store ICSR
DSB // Wait for memory access to complete
+#ifdef TX_PORT_USE_BASEPRI
+ MOV r0, 0 // Disable BASEPRI masking (enable interrupts)
+ MSR BASEPRI, r0
+#else
CPSIE i // Enable interrupts
+#endif
MOV lr, #0xFFFFFFFD // Load exception return code
BX lr // Return from exception
@@ -227,12 +238,22 @@ __tx_ts_handler:
#if (defined(TX_ENABLE_EXECUTION_CHANGE_NOTIFY) || defined(TX_EXECUTION_PROFILE_ENABLE))
/* Call the thread exit function to indicate the thread is no longer executing. */
+#ifdef TX_PORT_USE_BASEPRI
+ LDR r1, =TX_PORT_BASEPRI // Mask interrupt priorities =< TX_PORT_BASEPRI
+ MSR BASEPRI, r1
+#else
CPSID i // Disable interrupts
+#endif /* TX_PORT_USE_BASEPRI */
PUSH {r0, lr} // Save LR (and r0 just for alignment)
BL _tx_execution_thread_exit // Call the thread exit function
POP {r0, lr} // Recover LR
+#ifdef TX_PORT_USE_BASEPRI
+ MOV r0, 0 // Disable BASEPRI masking (enable interrupts)
+ MSR BASEPRI, r0
+#else
CPSIE i // Enable interrupts
-#endif
+#endif /* TX_PORT_USE_BASEPRI */
+#endif /* EXECUTION PROFILE */
LDR r0, =_tx_thread_current_ptr // Build current thread pointer address
LDR r2, =_tx_thread_execute_ptr // Build execute thread pointer address
@@ -277,7 +298,12 @@ __tx_ts_new:
/* Now we are looking for a new thread to execute! */
+#ifdef TX_PORT_USE_BASEPRI
+ LDR r1, =TX_PORT_BASEPRI // Mask interrupt priorities =< TX_PORT_BASEPRI
+ MSR BASEPRI, r1
+#else
CPSID i // Disable interrupts
+#endif
LDR r1, [r2] // Is there another thread ready to execute?
CBNZ r1, __tx_ts_restore // Yes, schedule it
@@ -286,7 +312,12 @@ __tx_ts_new:
are disabled to allow use of WFI for waiting for a thread to arrive. */
__tx_ts_wait:
+#ifdef TX_PORT_USE_BASEPRI
+ LDR r1, =TX_PORT_BASEPRI // Mask interrupt priorities =< TX_PORT_BASEPRI
+ MSR BASEPRI, r1
+#else
CPSID i // Disable interrupts
+#endif
LDR r1, [r2] // Pickup the next thread to execute pointer
CBNZ r1, __tx_ts_ready // If non-NULL, a new thread is ready!
#ifdef TX_ENABLE_WFI
@@ -294,7 +325,12 @@ __tx_ts_wait:
WFI // Wait for interrupt
ISB // Ensure pipeline is flushed
#endif
+#ifdef TX_PORT_USE_BASEPRI
+ MOV r4, #0 // Disable BASEPRI masking (enable interrupts)
+ MSR BASEPRI, r4
+#else
CPSIE i // Enable interrupts
+#endif
B __tx_ts_wait // Loop to continue waiting
/* At this point, we have a new thread ready to go. Clear any newly pended PendSV - since we are
@@ -311,7 +347,12 @@ __tx_ts_restore:
and enable interrupts. */
STR r1, [r0] // Setup the current thread pointer to the new thread
+#ifdef TX_PORT_USE_BASEPRI
+ MOV r4, #0 // Disable BASEPRI masking (enable interrupts)
+ MSR BASEPRI, r4
+#else
CPSIE i // Enable interrupts
+#endif
/* Increment the thread run count. */
@@ -347,27 +388,34 @@ __tx_ts_restore:
STR r3, [r0] // Disable MPU
LDR r0, [r1, #0x90] // Pickup the module instance pointer
CBZ r0, skip_mpu_setup // Is this thread owned by a module? No, skip MPU setup
- LDR r1, [r0, #0x64] // Pickup MPU register[0]
- CBZ r1, skip_mpu_setup // Is protection required for this module? No, skip MPU setup
- LDR r1, =0xE000ED9C // Build address of MPU base register
+
+ LDR r2, [r0, #0x8C] // Pickup MPU region 5 address
+ CBZ r2, skip_mpu_setup // Is protection required for this module? No, skip MPU setup
+
+ // Is the MPU already set up for this module?
+ MOV r1, #5 // Select region 5 from MPU
+ LDR r3, =0xE000ED98 // MPU_RNR register address
+ STR r1, [r3] // Set region to 5
+ LDR r1, =0xE000ED9C // MPU_RBAR register address
+ LDR r3, [r1] // Load address stored in MPU region 5
+ BIC r2, r2, #0x10 // Clear VALID bit
+ CMP r2, r3 // Is module already loaded?
+ BEQ _tx_enable_mpu // Yes - skip MPU reconfiguration
// Use alias registers to quickly load MPU
ADD r0, r0, #100 // Build address of MPU register start in thread control block
-#ifdef TXM_MODULE_MANAGER_16_MPU
+
LDM r0!,{r2-r9} // Load MPU regions 0-3
STM r1,{r2-r9} // Store MPU regions 0-3
LDM r0!,{r2-r9} // Load MPU regions 4-7
STM r1,{r2-r9} // Store MPU regions 4-7
+#ifdef TXM_MODULE_MANAGER_16_MPU
LDM r0!,{r2-r9} // Load MPU regions 8-11
STM r1,{r2-r9} // Store MPU regions 8-11
LDM r0,{r2-r9} // Load MPU regions 12-15
STM r1,{r2-r9} // Store MPU regions 12-15
-#else
- LDM r0!,{r2-r9} // Load first four MPU regions
- STM r1,{r2-r9} // Store first four MPU regions
- LDM r0,{r2-r9} // Load second four MPU regions
- STM r1,{r2-r9} // Store second four MPU regions
#endif
+_tx_enable_mpu:
LDR r0, =0xE000ED94 // Build MPU control reg address
MOV r1, #5 // Build enable value with background region enabled
STR r1, [r0] // Enable MPU
diff --git a/ports_smp/cortex_a34_smp/ac6/src/tx_thread_context_restore.S b/ports_smp/cortex_a34_smp/ac6/src/tx_thread_context_restore.S
index 4df471ac..21deb75d 100644
--- a/ports_smp/cortex_a34_smp/ac6/src/tx_thread_context_restore.S
+++ b/ports_smp/cortex_a34_smp/ac6/src/tx_thread_context_restore.S
@@ -21,9 +21,6 @@
/**************************************************************************/
-/* Include macros for modifying the wait list. */
-#include "tx_thread_smp_protection_wait_list_macros.h"
-
.text
.align 3
/**************************************************************************/
@@ -31,7 +28,7 @@
/* FUNCTION RELEASE */
/* */
/* _tx_thread_context_restore ARMv8-A-SMP */
-/* 6.1.10 */
+/* 6.1.11 */
/* AUTHOR */
/* */
/* William E. Lamie, Microsoft Corporation */
@@ -63,10 +60,13 @@
/* */
/* DATE NAME DESCRIPTION */
/* */
-/* 09-30-2020 William E. Lamie Initial Version 6.1 */
-/* 01-31-2022 Andres Mlinar Updated comments, */
-/* added ARMv8.2-A support, */
+/* 09-30-2020 William E. Lamie Initial Version 6.1 */
+/* 01-31-2022 Andres Mlinar Updated comments, */
+/* added ARMv8.2-A support, */
/* resulting in version 6.1.10 */
+/* 04-25-2022 William E. Lamie Modified comments, removed */
+/* FIFO queueing, */
+/* resulting in version 6.1.11 */
/* */
/**************************************************************************/
// VOID _tx_thread_context_restore(VOID)
@@ -216,74 +216,6 @@ __tx_thread_no_preempt_restore:
// {
__tx_thread_preempt_restore:
- /* Was the thread being preempted waiting for the lock? */
- // if (_tx_thread_smp_protect_wait_counts[this_core] != 0)
- // {
-
- LDR x2, =_tx_thread_smp_protect_wait_counts // Load waiting count list
- LDR w3, [x2, x8, LSL #2] // Load waiting value for this core
- CMP w3, #0
- BEQ _nobody_waiting_for_lock // Is the core waiting for the lock?
-
- /* Do we not have the lock? This means the ISR never got the inter-core lock. */
- // if (_tx_thread_smp_protection.tx_thread_smp_protect_owned != this_core)
- // {
-
- LDR x2, =_tx_thread_smp_protection // Load address of protection structure
- LDR w3, [x2, #4] // Pickup the owning core
- CMP w8, w3 // Compare our core to the owning core
- BEQ _this_core_has_lock // Do we have the lock?
-
- /* We don't have the lock. This core should be in the list. Remove it. */
- // _tx_thread_smp_protect_wait_list_remove(this_core);
-
- _tx_thread_smp_protect_wait_list_remove // Call macro to remove core from the list
- B _nobody_waiting_for_lock // Leave
-
- // }
- // else
- // {
- /* We have the lock. This means the ISR got the inter-core lock, but
- never released it because it saw that there was someone waiting.
- Note this core is not in the list. */
-
-_this_core_has_lock:
-
- /* We're no longer waiting. Note that this should be zero since this happens during thread preemption. */
- // _tx_thread_smp_protect_wait_counts[core]--;
-
- LDR x2, =_tx_thread_smp_protect_wait_counts // Load waiting count list
- LDR w3, [x2, x8, LSL #2] // Load waiting value for this core
- SUB w3, w3, #1 // Decrement waiting value. Should be zero now
- STR w3, [x2, x8, LSL #2] // Store new waiting value
-
- /* Now release the inter-core lock. */
-
- /* Set protected core as invalid. */
- // _tx_thread_smp_protection.tx_thread_smp_protect_core = 0xFFFFFFFF;
-
- LDR x2, =_tx_thread_smp_protection // Load address of protection structure
- MOV w3, #0xFFFFFFFF // Build invalid value
- STR w3, [x2, #4] // Mark the protected core as invalid
- DMB ISH // Ensure that accesses to shared resource have completed
-
- /* Release protection. */
- // _tx_thread_smp_protection.tx_thread_smp_protect_in_force = 0;
-
- MOV w3, #0 // Build release protection value
- STR w3, [x2, #0] // Release the protection
- DSB ISH // To ensure update of the protection occurs before other CPUs awake
-
- /* Wake up waiting processors. Note interrupts are already enabled. */
-
-#ifdef TX_ENABLE_WFE
- SEV // Send event to other CPUs
-#endif
-
- // }
- // }
-
-_nobody_waiting_for_lock:
LDR x4, [x0, #8] // Switch to thread stack pointer
MOV sp, x4 //
diff --git a/ports_smp/cortex_a34_smp/ac6/src/tx_thread_smp_protect.S b/ports_smp/cortex_a34_smp/ac6/src/tx_thread_smp_protect.S
index 9cde3e08..b65af329 100644
--- a/ports_smp/cortex_a34_smp/ac6/src/tx_thread_smp_protect.S
+++ b/ports_smp/cortex_a34_smp/ac6/src/tx_thread_smp_protect.S
@@ -32,7 +32,7 @@
/* FUNCTION RELEASE */
/* */
/* _tx_thread_smp_protect Cortex-A35-SMP/AC6 */
-/* 6.1.10 */
+/* 6.1.11 */
/* AUTHOR */
/* */
/* William E. Lamie, Microsoft Corporation */
@@ -63,11 +63,14 @@
/* */
/* DATE NAME DESCRIPTION */
/* */
-/* 09-30-2020 William E. Lamie Initial Version 6.1 */
-/* 01-31-2022 Andres Mlinar Updated comments, */
-/* added ARMv8.2-A support, */
-/* improved SMP code, */
+/* 09-30-2020 William E. Lamie Initial Version 6.1 */
+/* 01-31-2022 Andres Mlinar Updated comments, */
+/* added ARMv8.2-A support, */
+/* improved SMP code, */
/* resulting in version 6.1.10 */
+/* 04-25-2022 William E. Lamie Modified comments, removed */
+/* FIFO queueing, */
+/* resulting in version 6.1.11 */
/* */
/**************************************************************************/
.global _tx_thread_smp_protect
@@ -81,293 +84,49 @@ _tx_thread_smp_protect:
/* Pickup the CPU ID. */
- MRS x1, MPIDR_EL1 // Pickup the core ID
+ MRS x2, MPIDR_EL1 // Pickup the core ID
#ifdef TX_ARMV8_2
#if TX_THREAD_SMP_CLUSTERS > 1
- UBFX x7, x1, #16, #8 // Isolate cluster ID
+ UBFX x7, x2, #16, #8 // Isolate cluster ID
#endif
- UBFX x1, x1, #8, #8 // Isolate core ID
+ UBFX x2, x2, #8, #8 // Isolate core ID
#else
#if TX_THREAD_SMP_CLUSTERS > 1
- UBFX x7, x1, #8, #8 // Isolate cluster ID
+ UBFX x7, x2, #8, #8 // Isolate cluster ID
#endif
- UBFX x1, x1, #0, #8 // Isolate core ID
+ UBFX x2, x2, #0, #8 // Isolate core ID
#endif
#if TX_THREAD_SMP_CLUSTERS > 1
- ADDS x1, x1, x7, LSL #2 // Calculate CPU ID
+ ADDS x2, x2, x7, LSL #2 // Calculate CPU ID
#endif
- /* Do we already have protection? */
- // if (this_core == _tx_thread_smp_protection.tx_thread_smp_protect_core)
- // {
-
- LDR x2, =_tx_thread_smp_protection // Build address to protection structure
- LDR w3, [x2, #4] // Pickup the owning core
- CMP w1, w3 // Is it not this core?
- BNE _protection_not_owned // No, the protection is not already owned
-
- /* We already have protection. */
-
- /* Increment the protection count. */
- // _tx_thread_smp_protection.tx_thread_smp_protect_count++;
-
- LDR w3, [x2, #8] // Pickup ownership count
- ADD w3, w3, #1 // Increment ownership count
- STR w3, [x2, #8] // Store ownership count
- DMB ISH
-
- B _return
-
-_protection_not_owned:
-
- /* Is the lock available? */
- // if (_tx_thread_smp_protection.tx_thread_smp_protect_in_force == 0)
- // {
-
- LDAXR w3, [x2, #0] // Pickup the protection flag
- CMP w3, #0
- BNE _start_waiting // No, protection not available
-
- /* Is the list empty? */
- // if (_tx_thread_smp_protect_wait_list_head == _tx_thread_smp_protect_wait_list_tail)
- // {
-
- LDR x3, =_tx_thread_smp_protect_wait_list_head
- LDR w3, [x3]
- LDR x4, =_tx_thread_smp_protect_wait_list_tail
- LDR w4, [x4]
- CMP w3, w4
- BNE _list_not_empty
-
- /* Try to get the lock. */
- // if (write_exclusive(&_tx_thread_smp_protection.tx_thread_smp_protect_in_force, 1) == SUCCESS)
- // {
-
- MOV w3, #1 // Build lock value
- STXR w4, w3, [x2, #0] // Attempt to get the protection
- CMP w4, #0
- BNE _start_waiting // Did it fail?
-
- /* We got the lock! */
- // _tx_thread_smp_protect_lock_got();
-
- DMB ISH // Ensure write to protection finishes
- _tx_thread_smp_protect_lock_got // Call the lock got function
-
- B _return
-
-_list_not_empty:
-
- /* Are we at the front of the list? */
- // if (this_core == _tx_thread_smp_protect_wait_list[_tx_thread_smp_protect_wait_list_head])
- // {
-
- LDR x3, =_tx_thread_smp_protect_wait_list_head // Get the address of the head
- LDR w3, [x3] // Get the value of the head
- LDR x4, =_tx_thread_smp_protect_wait_list // Get the address of the list
- LDR w4, [x4, x3, LSL #2] // Get the value at the head index
-
- CMP w1, w4
- BNE _start_waiting
-
- /* Is the lock still available? */
- // if (_tx_thread_smp_protection.tx_thread_smp_protect_in_force == 0)
- // {
-
- LDAXR w3, [x2, #0] // Pickup the protection flag
- CMP w3, #0
- BNE _start_waiting // No, protection not available
-
- /* Get the lock. */
- // _tx_thread_smp_protection.tx_thread_smp_protect_in_force = 1;
-
- MOV w3, #1 // Build lock value
- STXR w4, w3, [x2, #0] // Attempt to get the protection
- CMP w4, #0
- BNE _start_waiting // Did it fail?
- DMB ISH //
-
- /* Got the lock. */
- // _tx_thread_smp_protect_lock_got();
-
- _tx_thread_smp_protect_lock_got
-
- /* Remove this core from the wait list. */
- // _tx_thread_smp_protect_remove_from_front_of_list();
-
- _tx_thread_smp_protect_remove_from_front_of_list
-
- B _return
-
-_start_waiting:
-
- /* For one reason or another, we didn't get the lock. */
-
- /* Increment wait count. */
- // _tx_thread_smp_protect_wait_counts[this_core]++;
-
- LDR x3, =_tx_thread_smp_protect_wait_counts // Load wait list counts
- LDR w4, [x3, x1, LSL #2] // Load waiting value for this core
- ADD w4, w4, #1 // Increment wait value
- STR w4, [x3, x1, LSL #2] // Store new wait value
-
- /* Have we not added ourselves to the list yet? */
- // if (_tx_thread_smp_protect_wait_counts[this_core] == 1)
- // {
-
- CMP w4, #1
- BNE _already_in_list0 // Is this core already waiting?
-
- /* Add ourselves to the list. */
- // _tx_thread_smp_protect_wait_list_add(this_core);
-
- _tx_thread_smp_protect_wait_list_add // Call macro to add ourselves to the list
-
- // }
-
-_already_in_list0:
-
- /* Restore interrupts. */
+ LDR x1, =_tx_thread_smp_protection // Build address to protection structure
+ LDR w3, [x1, #4] // Pickup the owning core
+ CMP w3, w2 // Is it this core?
+ BEQ _owned // Yes, the protection is already owned
+ LDAXR w4, [x1, #0] // Pickup the protection flag
+ CBZ w4, _get_protection // Yes, get the protection
MSR DAIF, x0 // Restore interrupts
ISB //
#ifdef TX_ENABLE_WFE
WFE // Go into standby
#endif
+ B _tx_thread_smp_protect // On waking, restart the protection attempt
- /* We do this until we have the lock. */
- // while (1)
- // {
-
-_try_to_get_lock:
-
- /* Disable interrupts so we don't get preempted. */
-
- MRS x0, DAIF // Pickup current interrupt posture
- MSR DAIFSet, 0x3 // Lockout interrupts
-
- /* Pickup the CPU ID. */
-
- MRS x1, MPIDR_EL1 // Pickup the core ID
-#ifdef TX_ARMV8_2
-#if TX_THREAD_SMP_CLUSTERS > 1
- UBFX x7, x1, #16, #8 // Isolate cluster ID
-#endif
- UBFX x1, x1, #8, #8 // Isolate core ID
-#else
-#if TX_THREAD_SMP_CLUSTERS > 1
- UBFX x7, x1, #8, #8 // Isolate cluster ID
-#endif
- UBFX x1, x1, #0, #8 // Isolate core ID
-#endif
-#if TX_THREAD_SMP_CLUSTERS > 1
- ADDS x1, x1, x7, LSL #2 // Calculate CPU ID
-#endif
-
- /* Do we already have protection? */
- // if (this_core == _tx_thread_smp_protection.tx_thread_smp_protect_core)
- // {
-
- LDR w3, [x2, #4] // Pickup the owning core
- CMP w3, w1 // Is it this core?
- BEQ _got_lock_after_waiting // Yes, the protection is already owned. This means
- // an ISR preempted us and got protection
-
- // }
-
- /* Are we at the front of the list? */
- // if (this_core == _tx_thread_smp_protect_wait_list[_tx_thread_smp_protect_wait_list_head])
- // {
-
- LDR x3, =_tx_thread_smp_protect_wait_list_head // Get the address of the head
- LDR w3, [x3] // Get the value of the head
- LDR x4, =_tx_thread_smp_protect_wait_list // Get the address of the list
- LDR w4, [x4, x3, LSL #2] // Get the value at the head index
-
- CMP w1, w4
- BNE _did_not_get_lock
-
- /* Is the lock still available? */
- // if (_tx_thread_smp_protection.tx_thread_smp_protect_in_force == 0)
- // {
-
- LDAXR w3, [x2, #0] // Pickup the protection flag
- CMP w3, #0
- BNE _did_not_get_lock // No, protection not available
-
- /* Get the lock. */
- // _tx_thread_smp_protection.tx_thread_smp_protect_in_force = 1;
-
- MOV w3, #1 // Build lock value
- STXR w4, w3, [x2, #0] // Attempt to get the protection
- CMP w4, #0
- BNE _did_not_get_lock // Did it fail?
- DMB ISH //
-
- /* Got the lock. */
- // _tx_thread_smp_protect_lock_got();
-
- _tx_thread_smp_protect_lock_got
-
- /* Remove this core from the wait list. */
- // _tx_thread_smp_protect_remove_from_front_of_list();
-
- _tx_thread_smp_protect_remove_from_front_of_list
-
- B _got_lock_after_waiting
-
-_did_not_get_lock:
-
- /* For one reason or another, we didn't get the lock. */
-
- /* Were we removed from the list? This can happen if we're a thread
- and we got preempted. */
- // if (_tx_thread_smp_protect_wait_counts[this_core] == 0)
- // {
-
- LDR x3, =_tx_thread_smp_protect_wait_counts // Load wait list counts
- LDR w4, [x3, x1, LSL #2] // Load waiting value for this core
- CMP w4, #0
- BNE _already_in_list1 // Is this core already in the list?
-
- /* Add ourselves to the list. */
- // _tx_thread_smp_protect_wait_list_add(this_core);
-
- _tx_thread_smp_protect_wait_list_add // Call macro to add ourselves to the list
-
- /* Our waiting count was also reset when we were preempted. Increment it again. */
- // _tx_thread_smp_protect_wait_counts[this_core]++;
-
- LDR x3, =_tx_thread_smp_protect_wait_counts // Load wait list counts
- LDR w4, [x3, x1, LSL #2] // Load waiting value for this core
- ADD w4, w4, #1 // Increment wait value
- STR w4, [x3, x1, LSL #2] // Store new wait value value
-
- // }
-
-_already_in_list1:
-
- /* Restore interrupts and try again. */
-
+_get_protection:
+ MOV x4, #1 // Build lock value
+ STXR w5, w4, [x1] // Attempt to get the protection
+ CBZ w5, _got_protection // Did it succeed? w5 = 0 means success!
MSR DAIF, x0 // Restore interrupts
- ISB //
-#ifdef TX_ENABLE_WFE
- WFE // Go into standby
-#endif
- B _try_to_get_lock // On waking, restart the protection attempt
-
-_got_lock_after_waiting:
-
- /* We're no longer waiting. */
- // _tx_thread_smp_protect_wait_counts[this_core]--;
-
- LDR x3, =_tx_thread_smp_protect_wait_counts // Load waiting list
- LDR w4, [x3, x1, LSL #2] // Load current wait value
- SUB w4, w4, #1 // Decrement wait value
- STR w4, [x3, x1, LSL #2] // Store new wait value value
-
- /* Restore registers and return. */
-
-_return:
-
+ B _tx_thread_smp_protect // Restart the protection attempt
+
+_got_protection:
+ DMB ISH //
+ STR w2, [x1, #4] // Save owning core
+_owned:
+ LDR w5, [x1, #8] // Pickup ownership count
+ ADD w5, w5, #1 // Increment ownership count
+ STR w5, [x1, #8] // Store ownership count
+ DMB ISH //
RET
diff --git a/ports_smp/cortex_a34_smp/ac6/src/tx_thread_smp_unprotect.S b/ports_smp/cortex_a34_smp/ac6/src/tx_thread_smp_unprotect.S
index a783cde6..80159f1b 100644
--- a/ports_smp/cortex_a34_smp/ac6/src/tx_thread_smp_unprotect.S
+++ b/ports_smp/cortex_a34_smp/ac6/src/tx_thread_smp_unprotect.S
@@ -28,7 +28,7 @@
/* FUNCTION RELEASE */
/* */
/* _tx_thread_smp_unprotect Cortex-A35-SMP/AC6 */
-/* 6.1.10 */
+/* 6.1.11 */
/* AUTHOR */
/* */
/* William E. Lamie, Microsoft Corporation */
@@ -60,10 +60,13 @@
/* */
/* DATE NAME DESCRIPTION */
/* */
-/* 09-30-2020 William E. Lamie Initial Version 6.1 */
-/* 01-31-2022 Andres Mlinar Updated comments, */
-/* added ARMv8.2-A support, */
+/* 09-30-2020 William E. Lamie Initial Version 6.1 */
+/* 01-31-2022 Andres Mlinar Updated comments, */
+/* added ARMv8.2-A support, */
/* resulting in version 6.1.10 */
+/* 04-25-2022 William E. Lamie Modified comments, removed */
+/* FIFO queueing, */
+/* resulting in version 6.1.11 */
/* */
/**************************************************************************/
.global _tx_thread_smp_unprotect
@@ -105,11 +108,6 @@ _tx_thread_smp_unprotect:
CMP w3, #0 // Is the preempt disable flag set?
BNE _still_protected // Yes, skip the protection release
- LDR x2,=_tx_thread_smp_protect_wait_counts // Build build address of wait counts
- LDR w3, [x2, x1, LSL #2] // Pickup wait list value
- CMP w3, #0 // Are any entities on this core waiting?
- BNE _still_protected // Yes, skip the protection release
-
LDR x2,=_tx_thread_smp_protection // Build address of protection structure
MOV w3, #0xFFFFFFFF // Build invalid value
STR w3, [x2, #4] // Mark the protected core as invalid
diff --git a/ports_smp/cortex_a34_smp/gnu/src/tx_thread_context_restore.S b/ports_smp/cortex_a34_smp/gnu/src/tx_thread_context_restore.S
index 4df471ac..21deb75d 100644
--- a/ports_smp/cortex_a34_smp/gnu/src/tx_thread_context_restore.S
+++ b/ports_smp/cortex_a34_smp/gnu/src/tx_thread_context_restore.S
@@ -21,9 +21,6 @@
/**************************************************************************/
-/* Include macros for modifying the wait list. */
-#include "tx_thread_smp_protection_wait_list_macros.h"
-
.text
.align 3
/**************************************************************************/
@@ -31,7 +28,7 @@
/* FUNCTION RELEASE */
/* */
/* _tx_thread_context_restore ARMv8-A-SMP */
-/* 6.1.10 */
+/* 6.1.11 */
/* AUTHOR */
/* */
/* William E. Lamie, Microsoft Corporation */
@@ -63,10 +60,13 @@
/* */
/* DATE NAME DESCRIPTION */
/* */
-/* 09-30-2020 William E. Lamie Initial Version 6.1 */
-/* 01-31-2022 Andres Mlinar Updated comments, */
-/* added ARMv8.2-A support, */
+/* 09-30-2020 William E. Lamie Initial Version 6.1 */
+/* 01-31-2022 Andres Mlinar Updated comments, */
+/* added ARMv8.2-A support, */
/* resulting in version 6.1.10 */
+/* 04-25-2022 William E. Lamie Modified comments, removed */
+/* FIFO queueing, */
+/* resulting in version 6.1.11 */
/* */
/**************************************************************************/
// VOID _tx_thread_context_restore(VOID)
@@ -216,74 +216,6 @@ __tx_thread_no_preempt_restore:
// {
__tx_thread_preempt_restore:
- /* Was the thread being preempted waiting for the lock? */
- // if (_tx_thread_smp_protect_wait_counts[this_core] != 0)
- // {
-
- LDR x2, =_tx_thread_smp_protect_wait_counts // Load waiting count list
- LDR w3, [x2, x8, LSL #2] // Load waiting value for this core
- CMP w3, #0
- BEQ _nobody_waiting_for_lock // Is the core waiting for the lock?
-
- /* Do we not have the lock? This means the ISR never got the inter-core lock. */
- // if (_tx_thread_smp_protection.tx_thread_smp_protect_owned != this_core)
- // {
-
- LDR x2, =_tx_thread_smp_protection // Load address of protection structure
- LDR w3, [x2, #4] // Pickup the owning core
- CMP w8, w3 // Compare our core to the owning core
- BEQ _this_core_has_lock // Do we have the lock?
-
- /* We don't have the lock. This core should be in the list. Remove it. */
- // _tx_thread_smp_protect_wait_list_remove(this_core);
-
- _tx_thread_smp_protect_wait_list_remove // Call macro to remove core from the list
- B _nobody_waiting_for_lock // Leave
-
- // }
- // else
- // {
- /* We have the lock. This means the ISR got the inter-core lock, but
- never released it because it saw that there was someone waiting.
- Note this core is not in the list. */
-
-_this_core_has_lock:
-
- /* We're no longer waiting. Note that this should be zero since this happens during thread preemption. */
- // _tx_thread_smp_protect_wait_counts[core]--;
-
- LDR x2, =_tx_thread_smp_protect_wait_counts // Load waiting count list
- LDR w3, [x2, x8, LSL #2] // Load waiting value for this core
- SUB w3, w3, #1 // Decrement waiting value. Should be zero now
- STR w3, [x2, x8, LSL #2] // Store new waiting value
-
- /* Now release the inter-core lock. */
-
- /* Set protected core as invalid. */
- // _tx_thread_smp_protection.tx_thread_smp_protect_core = 0xFFFFFFFF;
-
- LDR x2, =_tx_thread_smp_protection // Load address of protection structure
- MOV w3, #0xFFFFFFFF // Build invalid value
- STR w3, [x2, #4] // Mark the protected core as invalid
- DMB ISH // Ensure that accesses to shared resource have completed
-
- /* Release protection. */
- // _tx_thread_smp_protection.tx_thread_smp_protect_in_force = 0;
-
- MOV w3, #0 // Build release protection value
- STR w3, [x2, #0] // Release the protection
- DSB ISH // To ensure update of the protection occurs before other CPUs awake
-
- /* Wake up waiting processors. Note interrupts are already enabled. */
-
-#ifdef TX_ENABLE_WFE
- SEV // Send event to other CPUs
-#endif
-
- // }
- // }
-
-_nobody_waiting_for_lock:
LDR x4, [x0, #8] // Switch to thread stack pointer
MOV sp, x4 //
diff --git a/ports_smp/cortex_a34_smp/gnu/src/tx_thread_smp_protect.S b/ports_smp/cortex_a34_smp/gnu/src/tx_thread_smp_protect.S
index 9cde3e08..b65af329 100644
--- a/ports_smp/cortex_a34_smp/gnu/src/tx_thread_smp_protect.S
+++ b/ports_smp/cortex_a34_smp/gnu/src/tx_thread_smp_protect.S
@@ -32,7 +32,7 @@
/* FUNCTION RELEASE */
/* */
/* _tx_thread_smp_protect Cortex-A35-SMP/AC6 */
-/* 6.1.10 */
+/* 6.1.11 */
/* AUTHOR */
/* */
/* William E. Lamie, Microsoft Corporation */
@@ -63,11 +63,14 @@
/* */
/* DATE NAME DESCRIPTION */
/* */
-/* 09-30-2020 William E. Lamie Initial Version 6.1 */
-/* 01-31-2022 Andres Mlinar Updated comments, */
-/* added ARMv8.2-A support, */
-/* improved SMP code, */
+/* 09-30-2020 William E. Lamie Initial Version 6.1 */
+/* 01-31-2022 Andres Mlinar Updated comments, */
+/* added ARMv8.2-A support, */
+/* improved SMP code, */
/* resulting in version 6.1.10 */
+/* 04-25-2022 William E. Lamie Modified comments, removed */
+/* FIFO queueing, */
+/* resulting in version 6.1.11 */
/* */
/**************************************************************************/
.global _tx_thread_smp_protect
@@ -81,293 +84,49 @@ _tx_thread_smp_protect:
/* Pickup the CPU ID. */
- MRS x1, MPIDR_EL1 // Pickup the core ID
+ MRS x2, MPIDR_EL1 // Pickup the core ID
#ifdef TX_ARMV8_2
#if TX_THREAD_SMP_CLUSTERS > 1
- UBFX x7, x1, #16, #8 // Isolate cluster ID
+ UBFX x7, x2, #16, #8 // Isolate cluster ID
#endif
- UBFX x1, x1, #8, #8 // Isolate core ID
+ UBFX x2, x2, #8, #8 // Isolate core ID
#else
#if TX_THREAD_SMP_CLUSTERS > 1
- UBFX x7, x1, #8, #8 // Isolate cluster ID
+ UBFX x7, x2, #8, #8 // Isolate cluster ID
#endif
- UBFX x1, x1, #0, #8 // Isolate core ID
+ UBFX x2, x2, #0, #8 // Isolate core ID
#endif
#if TX_THREAD_SMP_CLUSTERS > 1
- ADDS x1, x1, x7, LSL #2 // Calculate CPU ID
+ ADDS x2, x2, x7, LSL #2 // Calculate CPU ID
#endif
- /* Do we already have protection? */
- // if (this_core == _tx_thread_smp_protection.tx_thread_smp_protect_core)
- // {
-
- LDR x2, =_tx_thread_smp_protection // Build address to protection structure
- LDR w3, [x2, #4] // Pickup the owning core
- CMP w1, w3 // Is it not this core?
- BNE _protection_not_owned // No, the protection is not already owned
-
- /* We already have protection. */
-
- /* Increment the protection count. */
- // _tx_thread_smp_protection.tx_thread_smp_protect_count++;
-
- LDR w3, [x2, #8] // Pickup ownership count
- ADD w3, w3, #1 // Increment ownership count
- STR w3, [x2, #8] // Store ownership count
- DMB ISH
-
- B _return
-
-_protection_not_owned:
-
- /* Is the lock available? */
- // if (_tx_thread_smp_protection.tx_thread_smp_protect_in_force == 0)
- // {
-
- LDAXR w3, [x2, #0] // Pickup the protection flag
- CMP w3, #0
- BNE _start_waiting // No, protection not available
-
- /* Is the list empty? */
- // if (_tx_thread_smp_protect_wait_list_head == _tx_thread_smp_protect_wait_list_tail)
- // {
-
- LDR x3, =_tx_thread_smp_protect_wait_list_head
- LDR w3, [x3]
- LDR x4, =_tx_thread_smp_protect_wait_list_tail
- LDR w4, [x4]
- CMP w3, w4
- BNE _list_not_empty
-
- /* Try to get the lock. */
- // if (write_exclusive(&_tx_thread_smp_protection.tx_thread_smp_protect_in_force, 1) == SUCCESS)
- // {
-
- MOV w3, #1 // Build lock value
- STXR w4, w3, [x2, #0] // Attempt to get the protection
- CMP w4, #0
- BNE _start_waiting // Did it fail?
-
- /* We got the lock! */
- // _tx_thread_smp_protect_lock_got();
-
- DMB ISH // Ensure write to protection finishes
- _tx_thread_smp_protect_lock_got // Call the lock got function
-
- B _return
-
-_list_not_empty:
-
- /* Are we at the front of the list? */
- // if (this_core == _tx_thread_smp_protect_wait_list[_tx_thread_smp_protect_wait_list_head])
- // {
-
- LDR x3, =_tx_thread_smp_protect_wait_list_head // Get the address of the head
- LDR w3, [x3] // Get the value of the head
- LDR x4, =_tx_thread_smp_protect_wait_list // Get the address of the list
- LDR w4, [x4, x3, LSL #2] // Get the value at the head index
-
- CMP w1, w4
- BNE _start_waiting
-
- /* Is the lock still available? */
- // if (_tx_thread_smp_protection.tx_thread_smp_protect_in_force == 0)
- // {
-
- LDAXR w3, [x2, #0] // Pickup the protection flag
- CMP w3, #0
- BNE _start_waiting // No, protection not available
-
- /* Get the lock. */
- // _tx_thread_smp_protection.tx_thread_smp_protect_in_force = 1;
-
- MOV w3, #1 // Build lock value
- STXR w4, w3, [x2, #0] // Attempt to get the protection
- CMP w4, #0
- BNE _start_waiting // Did it fail?
- DMB ISH //
-
- /* Got the lock. */
- // _tx_thread_smp_protect_lock_got();
-
- _tx_thread_smp_protect_lock_got
-
- /* Remove this core from the wait list. */
- // _tx_thread_smp_protect_remove_from_front_of_list();
-
- _tx_thread_smp_protect_remove_from_front_of_list
-
- B _return
-
-_start_waiting:
-
- /* For one reason or another, we didn't get the lock. */
-
- /* Increment wait count. */
- // _tx_thread_smp_protect_wait_counts[this_core]++;
-
- LDR x3, =_tx_thread_smp_protect_wait_counts // Load wait list counts
- LDR w4, [x3, x1, LSL #2] // Load waiting value for this core
- ADD w4, w4, #1 // Increment wait value
- STR w4, [x3, x1, LSL #2] // Store new wait value
-
- /* Have we not added ourselves to the list yet? */
- // if (_tx_thread_smp_protect_wait_counts[this_core] == 1)
- // {
-
- CMP w4, #1
- BNE _already_in_list0 // Is this core already waiting?
-
- /* Add ourselves to the list. */
- // _tx_thread_smp_protect_wait_list_add(this_core);
-
- _tx_thread_smp_protect_wait_list_add // Call macro to add ourselves to the list
-
- // }
-
-_already_in_list0:
-
- /* Restore interrupts. */
+ LDR x1, =_tx_thread_smp_protection // Build address to protection structure
+ LDR w3, [x1, #4] // Pickup the owning core
+ CMP w3, w2 // Is it this core?
+ BEQ _owned // Yes, the protection is already owned
+ LDAXR w4, [x1, #0] // Pickup the protection flag
+ CBZ w4, _get_protection // Yes, get the protection
MSR DAIF, x0 // Restore interrupts
ISB //
#ifdef TX_ENABLE_WFE
WFE // Go into standby
#endif
+ B _tx_thread_smp_protect // On waking, restart the protection attempt
- /* We do this until we have the lock. */
- // while (1)
- // {
-
-_try_to_get_lock:
-
- /* Disable interrupts so we don't get preempted. */
-
- MRS x0, DAIF // Pickup current interrupt posture
- MSR DAIFSet, 0x3 // Lockout interrupts
-
- /* Pickup the CPU ID. */
-
- MRS x1, MPIDR_EL1 // Pickup the core ID
-#ifdef TX_ARMV8_2
-#if TX_THREAD_SMP_CLUSTERS > 1
- UBFX x7, x1, #16, #8 // Isolate cluster ID
-#endif
- UBFX x1, x1, #8, #8 // Isolate core ID
-#else
-#if TX_THREAD_SMP_CLUSTERS > 1
- UBFX x7, x1, #8, #8 // Isolate cluster ID
-#endif
- UBFX x1, x1, #0, #8 // Isolate core ID
-#endif
-#if TX_THREAD_SMP_CLUSTERS > 1
- ADDS x1, x1, x7, LSL #2 // Calculate CPU ID
-#endif
-
- /* Do we already have protection? */
- // if (this_core == _tx_thread_smp_protection.tx_thread_smp_protect_core)
- // {
-
- LDR w3, [x2, #4] // Pickup the owning core
- CMP w3, w1 // Is it this core?
- BEQ _got_lock_after_waiting // Yes, the protection is already owned. This means
- // an ISR preempted us and got protection
-
- // }
-
- /* Are we at the front of the list? */
- // if (this_core == _tx_thread_smp_protect_wait_list[_tx_thread_smp_protect_wait_list_head])
- // {
-
- LDR x3, =_tx_thread_smp_protect_wait_list_head // Get the address of the head
- LDR w3, [x3] // Get the value of the head
- LDR x4, =_tx_thread_smp_protect_wait_list // Get the address of the list
- LDR w4, [x4, x3, LSL #2] // Get the value at the head index
-
- CMP w1, w4
- BNE _did_not_get_lock
-
- /* Is the lock still available? */
- // if (_tx_thread_smp_protection.tx_thread_smp_protect_in_force == 0)
- // {
-
- LDAXR w3, [x2, #0] // Pickup the protection flag
- CMP w3, #0
- BNE _did_not_get_lock // No, protection not available
-
- /* Get the lock. */
- // _tx_thread_smp_protection.tx_thread_smp_protect_in_force = 1;
-
- MOV w3, #1 // Build lock value
- STXR w4, w3, [x2, #0] // Attempt to get the protection
- CMP w4, #0
- BNE _did_not_get_lock // Did it fail?
- DMB ISH //
-
- /* Got the lock. */
- // _tx_thread_smp_protect_lock_got();
-
- _tx_thread_smp_protect_lock_got
-
- /* Remove this core from the wait list. */
- // _tx_thread_smp_protect_remove_from_front_of_list();
-
- _tx_thread_smp_protect_remove_from_front_of_list
-
- B _got_lock_after_waiting
-
-_did_not_get_lock:
-
- /* For one reason or another, we didn't get the lock. */
-
- /* Were we removed from the list? This can happen if we're a thread
- and we got preempted. */
- // if (_tx_thread_smp_protect_wait_counts[this_core] == 0)
- // {
-
- LDR x3, =_tx_thread_smp_protect_wait_counts // Load wait list counts
- LDR w4, [x3, x1, LSL #2] // Load waiting value for this core
- CMP w4, #0
- BNE _already_in_list1 // Is this core already in the list?
-
- /* Add ourselves to the list. */
- // _tx_thread_smp_protect_wait_list_add(this_core);
-
- _tx_thread_smp_protect_wait_list_add // Call macro to add ourselves to the list
-
- /* Our waiting count was also reset when we were preempted. Increment it again. */
- // _tx_thread_smp_protect_wait_counts[this_core]++;
-
- LDR x3, =_tx_thread_smp_protect_wait_counts // Load wait list counts
- LDR w4, [x3, x1, LSL #2] // Load waiting value for this core
- ADD w4, w4, #1 // Increment wait value
- STR w4, [x3, x1, LSL #2] // Store new wait value value
-
- // }
-
-_already_in_list1:
-
- /* Restore interrupts and try again. */
-
+_get_protection:
+ MOV x4, #1 // Build lock value
+ STXR w5, w4, [x1] // Attempt to get the protection
+ CBZ w5, _got_protection // Did it succeed? w5 = 0 means success!
MSR DAIF, x0 // Restore interrupts
- ISB //
-#ifdef TX_ENABLE_WFE
- WFE // Go into standby
-#endif
- B _try_to_get_lock // On waking, restart the protection attempt
-
-_got_lock_after_waiting:
-
- /* We're no longer waiting. */
- // _tx_thread_smp_protect_wait_counts[this_core]--;
-
- LDR x3, =_tx_thread_smp_protect_wait_counts // Load waiting list
- LDR w4, [x3, x1, LSL #2] // Load current wait value
- SUB w4, w4, #1 // Decrement wait value
- STR w4, [x3, x1, LSL #2] // Store new wait value value
-
- /* Restore registers and return. */
-
-_return:
-
+ B _tx_thread_smp_protect // Restart the protection attempt
+
+_got_protection:
+ DMB ISH //
+ STR w2, [x1, #4] // Save owning core
+_owned:
+ LDR w5, [x1, #8] // Pickup ownership count
+ ADD w5, w5, #1 // Increment ownership count
+ STR w5, [x1, #8] // Store ownership count
+ DMB ISH //
RET
diff --git a/ports_smp/cortex_a34_smp/gnu/src/tx_thread_smp_unprotect.S b/ports_smp/cortex_a34_smp/gnu/src/tx_thread_smp_unprotect.S
index a783cde6..80159f1b 100644
--- a/ports_smp/cortex_a34_smp/gnu/src/tx_thread_smp_unprotect.S
+++ b/ports_smp/cortex_a34_smp/gnu/src/tx_thread_smp_unprotect.S
@@ -28,7 +28,7 @@
/* FUNCTION RELEASE */
/* */
/* _tx_thread_smp_unprotect Cortex-A35-SMP/AC6 */
-/* 6.1.10 */
+/* 6.1.11 */
/* AUTHOR */
/* */
/* William E. Lamie, Microsoft Corporation */
@@ -60,10 +60,13 @@
/* */
/* DATE NAME DESCRIPTION */
/* */
-/* 09-30-2020 William E. Lamie Initial Version 6.1 */
-/* 01-31-2022 Andres Mlinar Updated comments, */
-/* added ARMv8.2-A support, */
+/* 09-30-2020 William E. Lamie Initial Version 6.1 */
+/* 01-31-2022 Andres Mlinar Updated comments, */
+/* added ARMv8.2-A support, */
/* resulting in version 6.1.10 */
+/* 04-25-2022 William E. Lamie Modified comments, removed */
+/* FIFO queueing, */
+/* resulting in version 6.1.11 */
/* */
/**************************************************************************/
.global _tx_thread_smp_unprotect
@@ -105,11 +108,6 @@ _tx_thread_smp_unprotect:
CMP w3, #0 // Is the preempt disable flag set?
BNE _still_protected // Yes, skip the protection release
- LDR x2,=_tx_thread_smp_protect_wait_counts // Build build address of wait counts
- LDR w3, [x2, x1, LSL #2] // Pickup wait list value
- CMP w3, #0 // Are any entities on this core waiting?
- BNE _still_protected // Yes, skip the protection release
-
LDR x2,=_tx_thread_smp_protection // Build address of protection structure
MOV w3, #0xFFFFFFFF // Build invalid value
STR w3, [x2, #4] // Mark the protected core as invalid
diff --git a/ports_smp/cortex_a35_smp/ac6/example_build/tx/.cproject b/ports_smp/cortex_a35_smp/ac6/example_build/tx/.cproject
index 77ab8b02..b9781eea 100644
--- a/ports_smp/cortex_a35_smp/ac6/example_build/tx/.cproject
+++ b/ports_smp/cortex_a35_smp/ac6/example_build/tx/.cproject
@@ -131,7 +131,7 @@
-
+
diff --git a/ports_smp/cortex_a35_smp/ac6/src/tx_thread_context_restore.S b/ports_smp/cortex_a35_smp/ac6/src/tx_thread_context_restore.S
index 4df471ac..21deb75d 100644
--- a/ports_smp/cortex_a35_smp/ac6/src/tx_thread_context_restore.S
+++ b/ports_smp/cortex_a35_smp/ac6/src/tx_thread_context_restore.S
@@ -21,9 +21,6 @@
/**************************************************************************/
-/* Include macros for modifying the wait list. */
-#include "tx_thread_smp_protection_wait_list_macros.h"
-
.text
.align 3
/**************************************************************************/
@@ -31,7 +28,7 @@
/* FUNCTION RELEASE */
/* */
/* _tx_thread_context_restore ARMv8-A-SMP */
-/* 6.1.10 */
+/* 6.1.11 */
/* AUTHOR */
/* */
/* William E. Lamie, Microsoft Corporation */
@@ -63,10 +60,13 @@
/* */
/* DATE NAME DESCRIPTION */
/* */
-/* 09-30-2020 William E. Lamie Initial Version 6.1 */
-/* 01-31-2022 Andres Mlinar Updated comments, */
-/* added ARMv8.2-A support, */
+/* 09-30-2020 William E. Lamie Initial Version 6.1 */
+/* 01-31-2022 Andres Mlinar Updated comments, */
+/* added ARMv8.2-A support, */
/* resulting in version 6.1.10 */
+/* 04-25-2022 William E. Lamie Modified comments, removed */
+/* FIFO queueing, */
+/* resulting in version 6.1.11 */
/* */
/**************************************************************************/
// VOID _tx_thread_context_restore(VOID)
@@ -216,74 +216,6 @@ __tx_thread_no_preempt_restore:
// {
__tx_thread_preempt_restore:
- /* Was the thread being preempted waiting for the lock? */
- // if (_tx_thread_smp_protect_wait_counts[this_core] != 0)
- // {
-
- LDR x2, =_tx_thread_smp_protect_wait_counts // Load waiting count list
- LDR w3, [x2, x8, LSL #2] // Load waiting value for this core
- CMP w3, #0
- BEQ _nobody_waiting_for_lock // Is the core waiting for the lock?
-
- /* Do we not have the lock? This means the ISR never got the inter-core lock. */
- // if (_tx_thread_smp_protection.tx_thread_smp_protect_owned != this_core)
- // {
-
- LDR x2, =_tx_thread_smp_protection // Load address of protection structure
- LDR w3, [x2, #4] // Pickup the owning core
- CMP w8, w3 // Compare our core to the owning core
- BEQ _this_core_has_lock // Do we have the lock?
-
- /* We don't have the lock. This core should be in the list. Remove it. */
- // _tx_thread_smp_protect_wait_list_remove(this_core);
-
- _tx_thread_smp_protect_wait_list_remove // Call macro to remove core from the list
- B _nobody_waiting_for_lock // Leave
-
- // }
- // else
- // {
- /* We have the lock. This means the ISR got the inter-core lock, but
- never released it because it saw that there was someone waiting.
- Note this core is not in the list. */
-
-_this_core_has_lock:
-
- /* We're no longer waiting. Note that this should be zero since this happens during thread preemption. */
- // _tx_thread_smp_protect_wait_counts[core]--;
-
- LDR x2, =_tx_thread_smp_protect_wait_counts // Load waiting count list
- LDR w3, [x2, x8, LSL #2] // Load waiting value for this core
- SUB w3, w3, #1 // Decrement waiting value. Should be zero now
- STR w3, [x2, x8, LSL #2] // Store new waiting value
-
- /* Now release the inter-core lock. */
-
- /* Set protected core as invalid. */
- // _tx_thread_smp_protection.tx_thread_smp_protect_core = 0xFFFFFFFF;
-
- LDR x2, =_tx_thread_smp_protection // Load address of protection structure
- MOV w3, #0xFFFFFFFF // Build invalid value
- STR w3, [x2, #4] // Mark the protected core as invalid
- DMB ISH // Ensure that accesses to shared resource have completed
-
- /* Release protection. */
- // _tx_thread_smp_protection.tx_thread_smp_protect_in_force = 0;
-
- MOV w3, #0 // Build release protection value
- STR w3, [x2, #0] // Release the protection
- DSB ISH // To ensure update of the protection occurs before other CPUs awake
-
- /* Wake up waiting processors. Note interrupts are already enabled. */
-
-#ifdef TX_ENABLE_WFE
- SEV // Send event to other CPUs
-#endif
-
- // }
- // }
-
-_nobody_waiting_for_lock:
LDR x4, [x0, #8] // Switch to thread stack pointer
MOV sp, x4 //
diff --git a/ports_smp/cortex_a35_smp/ac6/src/tx_thread_smp_protect.S b/ports_smp/cortex_a35_smp/ac6/src/tx_thread_smp_protect.S
index 9cde3e08..b65af329 100644
--- a/ports_smp/cortex_a35_smp/ac6/src/tx_thread_smp_protect.S
+++ b/ports_smp/cortex_a35_smp/ac6/src/tx_thread_smp_protect.S
@@ -32,7 +32,7 @@
/* FUNCTION RELEASE */
/* */
/* _tx_thread_smp_protect Cortex-A35-SMP/AC6 */
-/* 6.1.10 */
+/* 6.1.11 */
/* AUTHOR */
/* */
/* William E. Lamie, Microsoft Corporation */
@@ -63,11 +63,14 @@
/* */
/* DATE NAME DESCRIPTION */
/* */
-/* 09-30-2020 William E. Lamie Initial Version 6.1 */
-/* 01-31-2022 Andres Mlinar Updated comments, */
-/* added ARMv8.2-A support, */
-/* improved SMP code, */
+/* 09-30-2020 William E. Lamie Initial Version 6.1 */
+/* 01-31-2022 Andres Mlinar Updated comments, */
+/* added ARMv8.2-A support, */
+/* improved SMP code, */
/* resulting in version 6.1.10 */
+/* 04-25-2022 William E. Lamie Modified comments, removed */
+/* FIFO queueing, */
+/* resulting in version 6.1.11 */
/* */
/**************************************************************************/
.global _tx_thread_smp_protect
@@ -81,293 +84,49 @@ _tx_thread_smp_protect:
/* Pickup the CPU ID. */
- MRS x1, MPIDR_EL1 // Pickup the core ID
+ MRS x2, MPIDR_EL1 // Pickup the core ID
#ifdef TX_ARMV8_2
#if TX_THREAD_SMP_CLUSTERS > 1
- UBFX x7, x1, #16, #8 // Isolate cluster ID
+ UBFX x7, x2, #16, #8 // Isolate cluster ID
#endif
- UBFX x1, x1, #8, #8 // Isolate core ID
+ UBFX x2, x2, #8, #8 // Isolate core ID
#else
#if TX_THREAD_SMP_CLUSTERS > 1
- UBFX x7, x1, #8, #8 // Isolate cluster ID
+ UBFX x7, x2, #8, #8 // Isolate cluster ID
#endif
- UBFX x1, x1, #0, #8 // Isolate core ID
+ UBFX x2, x2, #0, #8 // Isolate core ID
#endif
#if TX_THREAD_SMP_CLUSTERS > 1
- ADDS x1, x1, x7, LSL #2 // Calculate CPU ID
+ ADDS x2, x2, x7, LSL #2 // Calculate CPU ID
#endif
- /* Do we already have protection? */
- // if (this_core == _tx_thread_smp_protection.tx_thread_smp_protect_core)
- // {
-
- LDR x2, =_tx_thread_smp_protection // Build address to protection structure
- LDR w3, [x2, #4] // Pickup the owning core
- CMP w1, w3 // Is it not this core?
- BNE _protection_not_owned // No, the protection is not already owned
-
- /* We already have protection. */
-
- /* Increment the protection count. */
- // _tx_thread_smp_protection.tx_thread_smp_protect_count++;
-
- LDR w3, [x2, #8] // Pickup ownership count
- ADD w3, w3, #1 // Increment ownership count
- STR w3, [x2, #8] // Store ownership count
- DMB ISH
-
- B _return
-
-_protection_not_owned:
-
- /* Is the lock available? */
- // if (_tx_thread_smp_protection.tx_thread_smp_protect_in_force == 0)
- // {
-
- LDAXR w3, [x2, #0] // Pickup the protection flag
- CMP w3, #0
- BNE _start_waiting // No, protection not available
-
- /* Is the list empty? */
- // if (_tx_thread_smp_protect_wait_list_head == _tx_thread_smp_protect_wait_list_tail)
- // {
-
- LDR x3, =_tx_thread_smp_protect_wait_list_head
- LDR w3, [x3]
- LDR x4, =_tx_thread_smp_protect_wait_list_tail
- LDR w4, [x4]
- CMP w3, w4
- BNE _list_not_empty
-
- /* Try to get the lock. */
- // if (write_exclusive(&_tx_thread_smp_protection.tx_thread_smp_protect_in_force, 1) == SUCCESS)
- // {
-
- MOV w3, #1 // Build lock value
- STXR w4, w3, [x2, #0] // Attempt to get the protection
- CMP w4, #0
- BNE _start_waiting // Did it fail?
-
- /* We got the lock! */
- // _tx_thread_smp_protect_lock_got();
-
- DMB ISH // Ensure write to protection finishes
- _tx_thread_smp_protect_lock_got // Call the lock got function
-
- B _return
-
-_list_not_empty:
-
- /* Are we at the front of the list? */
- // if (this_core == _tx_thread_smp_protect_wait_list[_tx_thread_smp_protect_wait_list_head])
- // {
-
- LDR x3, =_tx_thread_smp_protect_wait_list_head // Get the address of the head
- LDR w3, [x3] // Get the value of the head
- LDR x4, =_tx_thread_smp_protect_wait_list // Get the address of the list
- LDR w4, [x4, x3, LSL #2] // Get the value at the head index
-
- CMP w1, w4
- BNE _start_waiting
-
- /* Is the lock still available? */
- // if (_tx_thread_smp_protection.tx_thread_smp_protect_in_force == 0)
- // {
-
- LDAXR w3, [x2, #0] // Pickup the protection flag
- CMP w3, #0
- BNE _start_waiting // No, protection not available
-
- /* Get the lock. */
- // _tx_thread_smp_protection.tx_thread_smp_protect_in_force = 1;
-
- MOV w3, #1 // Build lock value
- STXR w4, w3, [x2, #0] // Attempt to get the protection
- CMP w4, #0
- BNE _start_waiting // Did it fail?
- DMB ISH //
-
- /* Got the lock. */
- // _tx_thread_smp_protect_lock_got();
-
- _tx_thread_smp_protect_lock_got
-
- /* Remove this core from the wait list. */
- // _tx_thread_smp_protect_remove_from_front_of_list();
-
- _tx_thread_smp_protect_remove_from_front_of_list
-
- B _return
-
-_start_waiting:
-
- /* For one reason or another, we didn't get the lock. */
-
- /* Increment wait count. */
- // _tx_thread_smp_protect_wait_counts[this_core]++;
-
- LDR x3, =_tx_thread_smp_protect_wait_counts // Load wait list counts
- LDR w4, [x3, x1, LSL #2] // Load waiting value for this core
- ADD w4, w4, #1 // Increment wait value
- STR w4, [x3, x1, LSL #2] // Store new wait value
-
- /* Have we not added ourselves to the list yet? */
- // if (_tx_thread_smp_protect_wait_counts[this_core] == 1)
- // {
-
- CMP w4, #1
- BNE _already_in_list0 // Is this core already waiting?
-
- /* Add ourselves to the list. */
- // _tx_thread_smp_protect_wait_list_add(this_core);
-
- _tx_thread_smp_protect_wait_list_add // Call macro to add ourselves to the list
-
- // }
-
-_already_in_list0:
-
- /* Restore interrupts. */
+ LDR x1, =_tx_thread_smp_protection // Build address to protection structure
+ LDR w3, [x1, #4] // Pickup the owning core
+ CMP w3, w2 // Is it this core?
+ BEQ _owned // Yes, the protection is already owned
+ LDAXR w4, [x1, #0] // Pickup the protection flag
+ CBZ w4, _get_protection // Yes, get the protection
MSR DAIF, x0 // Restore interrupts
ISB //
#ifdef TX_ENABLE_WFE
WFE // Go into standby
#endif
+ B _tx_thread_smp_protect // On waking, restart the protection attempt
- /* We do this until we have the lock. */
- // while (1)
- // {
-
-_try_to_get_lock:
-
- /* Disable interrupts so we don't get preempted. */
-
- MRS x0, DAIF // Pickup current interrupt posture
- MSR DAIFSet, 0x3 // Lockout interrupts
-
- /* Pickup the CPU ID. */
-
- MRS x1, MPIDR_EL1 // Pickup the core ID
-#ifdef TX_ARMV8_2
-#if TX_THREAD_SMP_CLUSTERS > 1
- UBFX x7, x1, #16, #8 // Isolate cluster ID
-#endif
- UBFX x1, x1, #8, #8 // Isolate core ID
-#else
-#if TX_THREAD_SMP_CLUSTERS > 1
- UBFX x7, x1, #8, #8 // Isolate cluster ID
-#endif
- UBFX x1, x1, #0, #8 // Isolate core ID
-#endif
-#if TX_THREAD_SMP_CLUSTERS > 1
- ADDS x1, x1, x7, LSL #2 // Calculate CPU ID
-#endif
-
- /* Do we already have protection? */
- // if (this_core == _tx_thread_smp_protection.tx_thread_smp_protect_core)
- // {
-
- LDR w3, [x2, #4] // Pickup the owning core
- CMP w3, w1 // Is it this core?
- BEQ _got_lock_after_waiting // Yes, the protection is already owned. This means
- // an ISR preempted us and got protection
-
- // }
-
- /* Are we at the front of the list? */
- // if (this_core == _tx_thread_smp_protect_wait_list[_tx_thread_smp_protect_wait_list_head])
- // {
-
- LDR x3, =_tx_thread_smp_protect_wait_list_head // Get the address of the head
- LDR w3, [x3] // Get the value of the head
- LDR x4, =_tx_thread_smp_protect_wait_list // Get the address of the list
- LDR w4, [x4, x3, LSL #2] // Get the value at the head index
-
- CMP w1, w4
- BNE _did_not_get_lock
-
- /* Is the lock still available? */
- // if (_tx_thread_smp_protection.tx_thread_smp_protect_in_force == 0)
- // {
-
- LDAXR w3, [x2, #0] // Pickup the protection flag
- CMP w3, #0
- BNE _did_not_get_lock // No, protection not available
-
- /* Get the lock. */
- // _tx_thread_smp_protection.tx_thread_smp_protect_in_force = 1;
-
- MOV w3, #1 // Build lock value
- STXR w4, w3, [x2, #0] // Attempt to get the protection
- CMP w4, #0
- BNE _did_not_get_lock // Did it fail?
- DMB ISH //
-
- /* Got the lock. */
- // _tx_thread_smp_protect_lock_got();
-
- _tx_thread_smp_protect_lock_got
-
- /* Remove this core from the wait list. */
- // _tx_thread_smp_protect_remove_from_front_of_list();
-
- _tx_thread_smp_protect_remove_from_front_of_list
-
- B _got_lock_after_waiting
-
-_did_not_get_lock:
-
- /* For one reason or another, we didn't get the lock. */
-
- /* Were we removed from the list? This can happen if we're a thread
- and we got preempted. */
- // if (_tx_thread_smp_protect_wait_counts[this_core] == 0)
- // {
-
- LDR x3, =_tx_thread_smp_protect_wait_counts // Load wait list counts
- LDR w4, [x3, x1, LSL #2] // Load waiting value for this core
- CMP w4, #0
- BNE _already_in_list1 // Is this core already in the list?
-
- /* Add ourselves to the list. */
- // _tx_thread_smp_protect_wait_list_add(this_core);
-
- _tx_thread_smp_protect_wait_list_add // Call macro to add ourselves to the list
-
- /* Our waiting count was also reset when we were preempted. Increment it again. */
- // _tx_thread_smp_protect_wait_counts[this_core]++;
-
- LDR x3, =_tx_thread_smp_protect_wait_counts // Load wait list counts
- LDR w4, [x3, x1, LSL #2] // Load waiting value for this core
- ADD w4, w4, #1 // Increment wait value
- STR w4, [x3, x1, LSL #2] // Store new wait value value
-
- // }
-
-_already_in_list1:
-
- /* Restore interrupts and try again. */
-
+_get_protection:
+ MOV x4, #1 // Build lock value
+ STXR w5, w4, [x1] // Attempt to get the protection
+ CBZ w5, _got_protection // Did it succeed? w5 = 0 means success!
MSR DAIF, x0 // Restore interrupts
- ISB //
-#ifdef TX_ENABLE_WFE
- WFE // Go into standby
-#endif
- B _try_to_get_lock // On waking, restart the protection attempt
-
-_got_lock_after_waiting:
-
- /* We're no longer waiting. */
- // _tx_thread_smp_protect_wait_counts[this_core]--;
-
- LDR x3, =_tx_thread_smp_protect_wait_counts // Load waiting list
- LDR w4, [x3, x1, LSL #2] // Load current wait value
- SUB w4, w4, #1 // Decrement wait value
- STR w4, [x3, x1, LSL #2] // Store new wait value value
-
- /* Restore registers and return. */
-
-_return:
-
+ B _tx_thread_smp_protect // Restart the protection attempt
+
+_got_protection:
+ DMB ISH //
+ STR w2, [x1, #4] // Save owning core
+_owned:
+ LDR w5, [x1, #8] // Pickup ownership count
+ ADD w5, w5, #1 // Increment ownership count
+ STR w5, [x1, #8] // Store ownership count
+ DMB ISH //
RET
diff --git a/ports_smp/cortex_a35_smp/ac6/src/tx_thread_smp_unprotect.S b/ports_smp/cortex_a35_smp/ac6/src/tx_thread_smp_unprotect.S
index a783cde6..f9d8692a 100644
--- a/ports_smp/cortex_a35_smp/ac6/src/tx_thread_smp_unprotect.S
+++ b/ports_smp/cortex_a35_smp/ac6/src/tx_thread_smp_unprotect.S
@@ -60,10 +60,13 @@
/* */
/* DATE NAME DESCRIPTION */
/* */
-/* 09-30-2020 William E. Lamie Initial Version 6.1 */
-/* 01-31-2022 Andres Mlinar Updated comments, */
-/* added ARMv8.2-A support, */
+/* 09-30-2020 William E. Lamie Initial Version 6.1 */
+/* 01-31-2022 Andres Mlinar Updated comments, */
+/* added ARMv8.2-A support, */
/* resulting in version 6.1.10 */
+/* 04-25-2022 William E. Lamie Modified comments, removed */
+/* FIFO queueing, */
+/* resulting in version 6.1.11 */
/* */
/**************************************************************************/
.global _tx_thread_smp_unprotect
@@ -105,11 +108,6 @@ _tx_thread_smp_unprotect:
CMP w3, #0 // Is the preempt disable flag set?
BNE _still_protected // Yes, skip the protection release
- LDR x2,=_tx_thread_smp_protect_wait_counts // Build build address of wait counts
- LDR w3, [x2, x1, LSL #2] // Pickup wait list value
- CMP w3, #0 // Are any entities on this core waiting?
- BNE _still_protected // Yes, skip the protection release
-
LDR x2,=_tx_thread_smp_protection // Build address of protection structure
MOV w3, #0xFFFFFFFF // Build invalid value
STR w3, [x2, #4] // Mark the protected core as invalid
diff --git a/ports_smp/cortex_a35_smp/gnu/src/tx_thread_context_restore.S b/ports_smp/cortex_a35_smp/gnu/src/tx_thread_context_restore.S
index 4df471ac..21deb75d 100644
--- a/ports_smp/cortex_a35_smp/gnu/src/tx_thread_context_restore.S
+++ b/ports_smp/cortex_a35_smp/gnu/src/tx_thread_context_restore.S
@@ -21,9 +21,6 @@
/**************************************************************************/
-/* Include macros for modifying the wait list. */
-#include "tx_thread_smp_protection_wait_list_macros.h"
-
.text
.align 3
/**************************************************************************/
@@ -31,7 +28,7 @@
/* FUNCTION RELEASE */
/* */
/* _tx_thread_context_restore ARMv8-A-SMP */
-/* 6.1.10 */
+/* 6.1.11 */
/* AUTHOR */
/* */
/* William E. Lamie, Microsoft Corporation */
@@ -63,10 +60,13 @@
/* */
/* DATE NAME DESCRIPTION */
/* */
-/* 09-30-2020 William E. Lamie Initial Version 6.1 */
-/* 01-31-2022 Andres Mlinar Updated comments, */
-/* added ARMv8.2-A support, */
+/* 09-30-2020 William E. Lamie Initial Version 6.1 */
+/* 01-31-2022 Andres Mlinar Updated comments, */
+/* added ARMv8.2-A support, */
/* resulting in version 6.1.10 */
+/* 04-25-2022 William E. Lamie Modified comments, removed */
+/* FIFO queueing, */
+/* resulting in version 6.1.11 */
/* */
/**************************************************************************/
// VOID _tx_thread_context_restore(VOID)
@@ -216,74 +216,6 @@ __tx_thread_no_preempt_restore:
// {
__tx_thread_preempt_restore:
- /* Was the thread being preempted waiting for the lock? */
- // if (_tx_thread_smp_protect_wait_counts[this_core] != 0)
- // {
-
- LDR x2, =_tx_thread_smp_protect_wait_counts // Load waiting count list
- LDR w3, [x2, x8, LSL #2] // Load waiting value for this core
- CMP w3, #0
- BEQ _nobody_waiting_for_lock // Is the core waiting for the lock?
-
- /* Do we not have the lock? This means the ISR never got the inter-core lock. */
- // if (_tx_thread_smp_protection.tx_thread_smp_protect_owned != this_core)
- // {
-
- LDR x2, =_tx_thread_smp_protection // Load address of protection structure
- LDR w3, [x2, #4] // Pickup the owning core
- CMP w8, w3 // Compare our core to the owning core
- BEQ _this_core_has_lock // Do we have the lock?
-
- /* We don't have the lock. This core should be in the list. Remove it. */
- // _tx_thread_smp_protect_wait_list_remove(this_core);
-
- _tx_thread_smp_protect_wait_list_remove // Call macro to remove core from the list
- B _nobody_waiting_for_lock // Leave
-
- // }
- // else
- // {
- /* We have the lock. This means the ISR got the inter-core lock, but
- never released it because it saw that there was someone waiting.
- Note this core is not in the list. */
-
-_this_core_has_lock:
-
- /* We're no longer waiting. Note that this should be zero since this happens during thread preemption. */
- // _tx_thread_smp_protect_wait_counts[core]--;
-
- LDR x2, =_tx_thread_smp_protect_wait_counts // Load waiting count list
- LDR w3, [x2, x8, LSL #2] // Load waiting value for this core
- SUB w3, w3, #1 // Decrement waiting value. Should be zero now
- STR w3, [x2, x8, LSL #2] // Store new waiting value
-
- /* Now release the inter-core lock. */
-
- /* Set protected core as invalid. */
- // _tx_thread_smp_protection.tx_thread_smp_protect_core = 0xFFFFFFFF;
-
- LDR x2, =_tx_thread_smp_protection // Load address of protection structure
- MOV w3, #0xFFFFFFFF // Build invalid value
- STR w3, [x2, #4] // Mark the protected core as invalid
- DMB ISH // Ensure that accesses to shared resource have completed
-
- /* Release protection. */
- // _tx_thread_smp_protection.tx_thread_smp_protect_in_force = 0;
-
- MOV w3, #0 // Build release protection value
- STR w3, [x2, #0] // Release the protection
- DSB ISH // To ensure update of the protection occurs before other CPUs awake
-
- /* Wake up waiting processors. Note interrupts are already enabled. */
-
-#ifdef TX_ENABLE_WFE
- SEV // Send event to other CPUs
-#endif
-
- // }
- // }
-
-_nobody_waiting_for_lock:
LDR x4, [x0, #8] // Switch to thread stack pointer
MOV sp, x4 //
diff --git a/ports_smp/cortex_a35_smp/gnu/src/tx_thread_smp_protect.S b/ports_smp/cortex_a35_smp/gnu/src/tx_thread_smp_protect.S
index 9cde3e08..b65af329 100644
--- a/ports_smp/cortex_a35_smp/gnu/src/tx_thread_smp_protect.S
+++ b/ports_smp/cortex_a35_smp/gnu/src/tx_thread_smp_protect.S
@@ -32,7 +32,7 @@
/* FUNCTION RELEASE */
/* */
/* _tx_thread_smp_protect Cortex-A35-SMP/AC6 */
-/* 6.1.10 */
+/* 6.1.11 */
/* AUTHOR */
/* */
/* William E. Lamie, Microsoft Corporation */
@@ -63,11 +63,14 @@
/* */
/* DATE NAME DESCRIPTION */
/* */
-/* 09-30-2020 William E. Lamie Initial Version 6.1 */
-/* 01-31-2022 Andres Mlinar Updated comments, */
-/* added ARMv8.2-A support, */
-/* improved SMP code, */
+/* 09-30-2020 William E. Lamie Initial Version 6.1 */
+/* 01-31-2022 Andres Mlinar Updated comments, */
+/* added ARMv8.2-A support, */
+/* improved SMP code, */
/* resulting in version 6.1.10 */
+/* 04-25-2022 William E. Lamie Modified comments, removed */
+/* FIFO queueing, */
+/* resulting in version 6.1.11 */
/* */
/**************************************************************************/
.global _tx_thread_smp_protect
@@ -81,293 +84,49 @@ _tx_thread_smp_protect:
/* Pickup the CPU ID. */
- MRS x1, MPIDR_EL1 // Pickup the core ID
+ MRS x2, MPIDR_EL1 // Pickup the core ID
#ifdef TX_ARMV8_2
#if TX_THREAD_SMP_CLUSTERS > 1
- UBFX x7, x1, #16, #8 // Isolate cluster ID
+ UBFX x7, x2, #16, #8 // Isolate cluster ID
#endif
- UBFX x1, x1, #8, #8 // Isolate core ID
+ UBFX x2, x2, #8, #8 // Isolate core ID
#else
#if TX_THREAD_SMP_CLUSTERS > 1
- UBFX x7, x1, #8, #8 // Isolate cluster ID
+ UBFX x7, x2, #8, #8 // Isolate cluster ID
#endif
- UBFX x1, x1, #0, #8 // Isolate core ID
+ UBFX x2, x2, #0, #8 // Isolate core ID
#endif
#if TX_THREAD_SMP_CLUSTERS > 1
- ADDS x1, x1, x7, LSL #2 // Calculate CPU ID
+ ADDS x2, x2, x7, LSL #2 // Calculate CPU ID
#endif
- /* Do we already have protection? */
- // if (this_core == _tx_thread_smp_protection.tx_thread_smp_protect_core)
- // {
-
- LDR x2, =_tx_thread_smp_protection // Build address to protection structure
- LDR w3, [x2, #4] // Pickup the owning core
- CMP w1, w3 // Is it not this core?
- BNE _protection_not_owned // No, the protection is not already owned
-
- /* We already have protection. */
-
- /* Increment the protection count. */
- // _tx_thread_smp_protection.tx_thread_smp_protect_count++;
-
- LDR w3, [x2, #8] // Pickup ownership count
- ADD w3, w3, #1 // Increment ownership count
- STR w3, [x2, #8] // Store ownership count
- DMB ISH
-
- B _return
-
-_protection_not_owned:
-
- /* Is the lock available? */
- // if (_tx_thread_smp_protection.tx_thread_smp_protect_in_force == 0)
- // {
-
- LDAXR w3, [x2, #0] // Pickup the protection flag
- CMP w3, #0
- BNE _start_waiting // No, protection not available
-
- /* Is the list empty? */
- // if (_tx_thread_smp_protect_wait_list_head == _tx_thread_smp_protect_wait_list_tail)
- // {
-
- LDR x3, =_tx_thread_smp_protect_wait_list_head
- LDR w3, [x3]
- LDR x4, =_tx_thread_smp_protect_wait_list_tail
- LDR w4, [x4]
- CMP w3, w4
- BNE _list_not_empty
-
- /* Try to get the lock. */
- // if (write_exclusive(&_tx_thread_smp_protection.tx_thread_smp_protect_in_force, 1) == SUCCESS)
- // {
-
- MOV w3, #1 // Build lock value
- STXR w4, w3, [x2, #0] // Attempt to get the protection
- CMP w4, #0
- BNE _start_waiting // Did it fail?
-
- /* We got the lock! */
- // _tx_thread_smp_protect_lock_got();
-
- DMB ISH // Ensure write to protection finishes
- _tx_thread_smp_protect_lock_got // Call the lock got function
-
- B _return
-
-_list_not_empty:
-
- /* Are we at the front of the list? */
- // if (this_core == _tx_thread_smp_protect_wait_list[_tx_thread_smp_protect_wait_list_head])
- // {
-
- LDR x3, =_tx_thread_smp_protect_wait_list_head // Get the address of the head
- LDR w3, [x3] // Get the value of the head
- LDR x4, =_tx_thread_smp_protect_wait_list // Get the address of the list
- LDR w4, [x4, x3, LSL #2] // Get the value at the head index
-
- CMP w1, w4
- BNE _start_waiting
-
- /* Is the lock still available? */
- // if (_tx_thread_smp_protection.tx_thread_smp_protect_in_force == 0)
- // {
-
- LDAXR w3, [x2, #0] // Pickup the protection flag
- CMP w3, #0
- BNE _start_waiting // No, protection not available
-
- /* Get the lock. */
- // _tx_thread_smp_protection.tx_thread_smp_protect_in_force = 1;
-
- MOV w3, #1 // Build lock value
- STXR w4, w3, [x2, #0] // Attempt to get the protection
- CMP w4, #0
- BNE _start_waiting // Did it fail?
- DMB ISH //
-
- /* Got the lock. */
- // _tx_thread_smp_protect_lock_got();
-
- _tx_thread_smp_protect_lock_got
-
- /* Remove this core from the wait list. */
- // _tx_thread_smp_protect_remove_from_front_of_list();
-
- _tx_thread_smp_protect_remove_from_front_of_list
-
- B _return
-
-_start_waiting:
-
- /* For one reason or another, we didn't get the lock. */
-
- /* Increment wait count. */
- // _tx_thread_smp_protect_wait_counts[this_core]++;
-
- LDR x3, =_tx_thread_smp_protect_wait_counts // Load wait list counts
- LDR w4, [x3, x1, LSL #2] // Load waiting value for this core
- ADD w4, w4, #1 // Increment wait value
- STR w4, [x3, x1, LSL #2] // Store new wait value
-
- /* Have we not added ourselves to the list yet? */
- // if (_tx_thread_smp_protect_wait_counts[this_core] == 1)
- // {
-
- CMP w4, #1
- BNE _already_in_list0 // Is this core already waiting?
-
- /* Add ourselves to the list. */
- // _tx_thread_smp_protect_wait_list_add(this_core);
-
- _tx_thread_smp_protect_wait_list_add // Call macro to add ourselves to the list
-
- // }
-
-_already_in_list0:
-
- /* Restore interrupts. */
+ LDR x1, =_tx_thread_smp_protection // Build address to protection structure
+ LDR w3, [x1, #4] // Pickup the owning core
+ CMP w3, w2 // Is it this core?
+ BEQ _owned // Yes, the protection is already owned
+ LDAXR w4, [x1, #0] // Pickup the protection flag
+ CBZ w4, _get_protection // Yes, get the protection
MSR DAIF, x0 // Restore interrupts
ISB //
#ifdef TX_ENABLE_WFE
WFE // Go into standby
#endif
+ B _tx_thread_smp_protect // On waking, restart the protection attempt
- /* We do this until we have the lock. */
- // while (1)
- // {
-
-_try_to_get_lock:
-
- /* Disable interrupts so we don't get preempted. */
-
- MRS x0, DAIF // Pickup current interrupt posture
- MSR DAIFSet, 0x3 // Lockout interrupts
-
- /* Pickup the CPU ID. */
-
- MRS x1, MPIDR_EL1 // Pickup the core ID
-#ifdef TX_ARMV8_2
-#if TX_THREAD_SMP_CLUSTERS > 1
- UBFX x7, x1, #16, #8 // Isolate cluster ID
-#endif
- UBFX x1, x1, #8, #8 // Isolate core ID
-#else
-#if TX_THREAD_SMP_CLUSTERS > 1
- UBFX x7, x1, #8, #8 // Isolate cluster ID
-#endif
- UBFX x1, x1, #0, #8 // Isolate core ID
-#endif
-#if TX_THREAD_SMP_CLUSTERS > 1
- ADDS x1, x1, x7, LSL #2 // Calculate CPU ID
-#endif
-
- /* Do we already have protection? */
- // if (this_core == _tx_thread_smp_protection.tx_thread_smp_protect_core)
- // {
-
- LDR w3, [x2, #4] // Pickup the owning core
- CMP w3, w1 // Is it this core?
- BEQ _got_lock_after_waiting // Yes, the protection is already owned. This means
- // an ISR preempted us and got protection
-
- // }
-
- /* Are we at the front of the list? */
- // if (this_core == _tx_thread_smp_protect_wait_list[_tx_thread_smp_protect_wait_list_head])
- // {
-
- LDR x3, =_tx_thread_smp_protect_wait_list_head // Get the address of the head
- LDR w3, [x3] // Get the value of the head
- LDR x4, =_tx_thread_smp_protect_wait_list // Get the address of the list
- LDR w4, [x4, x3, LSL #2] // Get the value at the head index
-
- CMP w1, w4
- BNE _did_not_get_lock
-
- /* Is the lock still available? */
- // if (_tx_thread_smp_protection.tx_thread_smp_protect_in_force == 0)
- // {
-
- LDAXR w3, [x2, #0] // Pickup the protection flag
- CMP w3, #0
- BNE _did_not_get_lock // No, protection not available
-
- /* Get the lock. */
- // _tx_thread_smp_protection.tx_thread_smp_protect_in_force = 1;
-
- MOV w3, #1 // Build lock value
- STXR w4, w3, [x2, #0] // Attempt to get the protection
- CMP w4, #0
- BNE _did_not_get_lock // Did it fail?
- DMB ISH //
-
- /* Got the lock. */
- // _tx_thread_smp_protect_lock_got();
-
- _tx_thread_smp_protect_lock_got
-
- /* Remove this core from the wait list. */
- // _tx_thread_smp_protect_remove_from_front_of_list();
-
- _tx_thread_smp_protect_remove_from_front_of_list
-
- B _got_lock_after_waiting
-
-_did_not_get_lock:
-
- /* For one reason or another, we didn't get the lock. */
-
- /* Were we removed from the list? This can happen if we're a thread
- and we got preempted. */
- // if (_tx_thread_smp_protect_wait_counts[this_core] == 0)
- // {
-
- LDR x3, =_tx_thread_smp_protect_wait_counts // Load wait list counts
- LDR w4, [x3, x1, LSL #2] // Load waiting value for this core
- CMP w4, #0
- BNE _already_in_list1 // Is this core already in the list?
-
- /* Add ourselves to the list. */
- // _tx_thread_smp_protect_wait_list_add(this_core);
-
- _tx_thread_smp_protect_wait_list_add // Call macro to add ourselves to the list
-
- /* Our waiting count was also reset when we were preempted. Increment it again. */
- // _tx_thread_smp_protect_wait_counts[this_core]++;
-
- LDR x3, =_tx_thread_smp_protect_wait_counts // Load wait list counts
- LDR w4, [x3, x1, LSL #2] // Load waiting value for this core
- ADD w4, w4, #1 // Increment wait value
- STR w4, [x3, x1, LSL #2] // Store new wait value value
-
- // }
-
-_already_in_list1:
-
- /* Restore interrupts and try again. */
-
+_get_protection:
+ MOV x4, #1 // Build lock value
+ STXR w5, w4, [x1] // Attempt to get the protection
+ CBZ w5, _got_protection // Did it succeed? w5 = 0 means success!
MSR DAIF, x0 // Restore interrupts
- ISB //
-#ifdef TX_ENABLE_WFE
- WFE // Go into standby
-#endif
- B _try_to_get_lock // On waking, restart the protection attempt
-
-_got_lock_after_waiting:
-
- /* We're no longer waiting. */
- // _tx_thread_smp_protect_wait_counts[this_core]--;
-
- LDR x3, =_tx_thread_smp_protect_wait_counts // Load waiting list
- LDR w4, [x3, x1, LSL #2] // Load current wait value
- SUB w4, w4, #1 // Decrement wait value
- STR w4, [x3, x1, LSL #2] // Store new wait value value
-
- /* Restore registers and return. */
-
-_return:
-
+ B _tx_thread_smp_protect // Restart the protection attempt
+
+_got_protection:
+ DMB ISH //
+ STR w2, [x1, #4] // Save owning core
+_owned:
+ LDR w5, [x1, #8] // Pickup ownership count
+ ADD w5, w5, #1 // Increment ownership count
+ STR w5, [x1, #8] // Store ownership count
+ DMB ISH //
RET
diff --git a/ports_smp/cortex_a35_smp/gnu/src/tx_thread_smp_unprotect.S b/ports_smp/cortex_a35_smp/gnu/src/tx_thread_smp_unprotect.S
index a783cde6..f9d8692a 100644
--- a/ports_smp/cortex_a35_smp/gnu/src/tx_thread_smp_unprotect.S
+++ b/ports_smp/cortex_a35_smp/gnu/src/tx_thread_smp_unprotect.S
@@ -60,10 +60,13 @@
/* */
/* DATE NAME DESCRIPTION */
/* */
-/* 09-30-2020 William E. Lamie Initial Version 6.1 */
-/* 01-31-2022 Andres Mlinar Updated comments, */
-/* added ARMv8.2-A support, */
+/* 09-30-2020 William E. Lamie Initial Version 6.1 */
+/* 01-31-2022 Andres Mlinar Updated comments, */
+/* added ARMv8.2-A support, */
/* resulting in version 6.1.10 */
+/* 04-25-2022 William E. Lamie Modified comments, removed */
+/* FIFO queueing, */
+/* resulting in version 6.1.11 */
/* */
/**************************************************************************/
.global _tx_thread_smp_unprotect
@@ -105,11 +108,6 @@ _tx_thread_smp_unprotect:
CMP w3, #0 // Is the preempt disable flag set?
BNE _still_protected // Yes, skip the protection release
- LDR x2,=_tx_thread_smp_protect_wait_counts // Build build address of wait counts
- LDR w3, [x2, x1, LSL #2] // Pickup wait list value
- CMP w3, #0 // Are any entities on this core waiting?
- BNE _still_protected // Yes, skip the protection release
-
LDR x2,=_tx_thread_smp_protection // Build address of protection structure
MOV w3, #0xFFFFFFFF // Build invalid value
STR w3, [x2, #4] // Mark the protected core as invalid
diff --git a/ports_smp/cortex_a53_smp/ac6/example_build/sample_threadx/.cproject b/ports_smp/cortex_a53_smp/ac6/example_build/sample_threadx/.cproject
index eb3c1a32..195794e6 100644
--- a/ports_smp/cortex_a53_smp/ac6/example_build/sample_threadx/.cproject
+++ b/ports_smp/cortex_a53_smp/ac6/example_build/sample_threadx/.cproject
@@ -23,7 +23,7 @@
-
+
@@ -47,7 +47,7 @@
-
+
@@ -63,7 +63,7 @@
-
+
diff --git a/ports_smp/cortex_a53_smp/ac6/example_build/tx/.cproject b/ports_smp/cortex_a53_smp/ac6/example_build/tx/.cproject
index 527407ae..324bfa33 100644
--- a/ports_smp/cortex_a53_smp/ac6/example_build/tx/.cproject
+++ b/ports_smp/cortex_a53_smp/ac6/example_build/tx/.cproject
@@ -49,7 +49,7 @@
-
+
@@ -129,11 +129,7 @@
-
-
-
-
-
+
diff --git a/ports_smp/cortex_a53_smp/ac6/src/tx_thread_context_restore.S b/ports_smp/cortex_a53_smp/ac6/src/tx_thread_context_restore.S
index 4df471ac..21deb75d 100644
--- a/ports_smp/cortex_a53_smp/ac6/src/tx_thread_context_restore.S
+++ b/ports_smp/cortex_a53_smp/ac6/src/tx_thread_context_restore.S
@@ -21,9 +21,6 @@
/**************************************************************************/
-/* Include macros for modifying the wait list. */
-#include "tx_thread_smp_protection_wait_list_macros.h"
-
.text
.align 3
/**************************************************************************/
@@ -31,7 +28,7 @@
/* FUNCTION RELEASE */
/* */
/* _tx_thread_context_restore ARMv8-A-SMP */
-/* 6.1.10 */
+/* 6.1.11 */
/* AUTHOR */
/* */
/* William E. Lamie, Microsoft Corporation */
@@ -63,10 +60,13 @@
/* */
/* DATE NAME DESCRIPTION */
/* */
-/* 09-30-2020 William E. Lamie Initial Version 6.1 */
-/* 01-31-2022 Andres Mlinar Updated comments, */
-/* added ARMv8.2-A support, */
+/* 09-30-2020 William E. Lamie Initial Version 6.1 */
+/* 01-31-2022 Andres Mlinar Updated comments, */
+/* added ARMv8.2-A support, */
/* resulting in version 6.1.10 */
+/* 04-25-2022 William E. Lamie Modified comments, removed */
+/* FIFO queueing, */
+/* resulting in version 6.1.11 */
/* */
/**************************************************************************/
// VOID _tx_thread_context_restore(VOID)
@@ -216,74 +216,6 @@ __tx_thread_no_preempt_restore:
// {
__tx_thread_preempt_restore:
- /* Was the thread being preempted waiting for the lock? */
- // if (_tx_thread_smp_protect_wait_counts[this_core] != 0)
- // {
-
- LDR x2, =_tx_thread_smp_protect_wait_counts // Load waiting count list
- LDR w3, [x2, x8, LSL #2] // Load waiting value for this core
- CMP w3, #0
- BEQ _nobody_waiting_for_lock // Is the core waiting for the lock?
-
- /* Do we not have the lock? This means the ISR never got the inter-core lock. */
- // if (_tx_thread_smp_protection.tx_thread_smp_protect_owned != this_core)
- // {
-
- LDR x2, =_tx_thread_smp_protection // Load address of protection structure
- LDR w3, [x2, #4] // Pickup the owning core
- CMP w8, w3 // Compare our core to the owning core
- BEQ _this_core_has_lock // Do we have the lock?
-
- /* We don't have the lock. This core should be in the list. Remove it. */
- // _tx_thread_smp_protect_wait_list_remove(this_core);
-
- _tx_thread_smp_protect_wait_list_remove // Call macro to remove core from the list
- B _nobody_waiting_for_lock // Leave
-
- // }
- // else
- // {
- /* We have the lock. This means the ISR got the inter-core lock, but
- never released it because it saw that there was someone waiting.
- Note this core is not in the list. */
-
-_this_core_has_lock:
-
- /* We're no longer waiting. Note that this should be zero since this happens during thread preemption. */
- // _tx_thread_smp_protect_wait_counts[core]--;
-
- LDR x2, =_tx_thread_smp_protect_wait_counts // Load waiting count list
- LDR w3, [x2, x8, LSL #2] // Load waiting value for this core
- SUB w3, w3, #1 // Decrement waiting value. Should be zero now
- STR w3, [x2, x8, LSL #2] // Store new waiting value
-
- /* Now release the inter-core lock. */
-
- /* Set protected core as invalid. */
- // _tx_thread_smp_protection.tx_thread_smp_protect_core = 0xFFFFFFFF;
-
- LDR x2, =_tx_thread_smp_protection // Load address of protection structure
- MOV w3, #0xFFFFFFFF // Build invalid value
- STR w3, [x2, #4] // Mark the protected core as invalid
- DMB ISH // Ensure that accesses to shared resource have completed
-
- /* Release protection. */
- // _tx_thread_smp_protection.tx_thread_smp_protect_in_force = 0;
-
- MOV w3, #0 // Build release protection value
- STR w3, [x2, #0] // Release the protection
- DSB ISH // To ensure update of the protection occurs before other CPUs awake
-
- /* Wake up waiting processors. Note interrupts are already enabled. */
-
-#ifdef TX_ENABLE_WFE
- SEV // Send event to other CPUs
-#endif
-
- // }
- // }
-
-_nobody_waiting_for_lock:
LDR x4, [x0, #8] // Switch to thread stack pointer
MOV sp, x4 //
diff --git a/ports_smp/cortex_a53_smp/ac6/src/tx_thread_smp_protect.S b/ports_smp/cortex_a53_smp/ac6/src/tx_thread_smp_protect.S
index 9cde3e08..b65af329 100644
--- a/ports_smp/cortex_a53_smp/ac6/src/tx_thread_smp_protect.S
+++ b/ports_smp/cortex_a53_smp/ac6/src/tx_thread_smp_protect.S
@@ -32,7 +32,7 @@
/* FUNCTION RELEASE */
/* */
/* _tx_thread_smp_protect Cortex-A35-SMP/AC6 */
-/* 6.1.10 */
+/* 6.1.11 */
/* AUTHOR */
/* */
/* William E. Lamie, Microsoft Corporation */
@@ -63,11 +63,14 @@
/* */
/* DATE NAME DESCRIPTION */
/* */
-/* 09-30-2020 William E. Lamie Initial Version 6.1 */
-/* 01-31-2022 Andres Mlinar Updated comments, */
-/* added ARMv8.2-A support, */
-/* improved SMP code, */
+/* 09-30-2020 William E. Lamie Initial Version 6.1 */
+/* 01-31-2022 Andres Mlinar Updated comments, */
+/* added ARMv8.2-A support, */
+/* improved SMP code, */
/* resulting in version 6.1.10 */
+/* 04-25-2022 William E. Lamie Modified comments, removed */
+/* FIFO queueing, */
+/* resulting in version 6.1.11 */
/* */
/**************************************************************************/
.global _tx_thread_smp_protect
@@ -81,293 +84,49 @@ _tx_thread_smp_protect:
/* Pickup the CPU ID. */
- MRS x1, MPIDR_EL1 // Pickup the core ID
+ MRS x2, MPIDR_EL1 // Pickup the core ID
#ifdef TX_ARMV8_2
#if TX_THREAD_SMP_CLUSTERS > 1
- UBFX x7, x1, #16, #8 // Isolate cluster ID
+ UBFX x7, x2, #16, #8 // Isolate cluster ID
#endif
- UBFX x1, x1, #8, #8 // Isolate core ID
+ UBFX x2, x2, #8, #8 // Isolate core ID
#else
#if TX_THREAD_SMP_CLUSTERS > 1
- UBFX x7, x1, #8, #8 // Isolate cluster ID
+ UBFX x7, x2, #8, #8 // Isolate cluster ID
#endif
- UBFX x1, x1, #0, #8 // Isolate core ID
+ UBFX x2, x2, #0, #8 // Isolate core ID
#endif
#if TX_THREAD_SMP_CLUSTERS > 1
- ADDS x1, x1, x7, LSL #2 // Calculate CPU ID
+ ADDS x2, x2, x7, LSL #2 // Calculate CPU ID
#endif
- /* Do we already have protection? */
- // if (this_core == _tx_thread_smp_protection.tx_thread_smp_protect_core)
- // {
-
- LDR x2, =_tx_thread_smp_protection // Build address to protection structure
- LDR w3, [x2, #4] // Pickup the owning core
- CMP w1, w3 // Is it not this core?
- BNE _protection_not_owned // No, the protection is not already owned
-
- /* We already have protection. */
-
- /* Increment the protection count. */
- // _tx_thread_smp_protection.tx_thread_smp_protect_count++;
-
- LDR w3, [x2, #8] // Pickup ownership count
- ADD w3, w3, #1 // Increment ownership count
- STR w3, [x2, #8] // Store ownership count
- DMB ISH
-
- B _return
-
-_protection_not_owned:
-
- /* Is the lock available? */
- // if (_tx_thread_smp_protection.tx_thread_smp_protect_in_force == 0)
- // {
-
- LDAXR w3, [x2, #0] // Pickup the protection flag
- CMP w3, #0
- BNE _start_waiting // No, protection not available
-
- /* Is the list empty? */
- // if (_tx_thread_smp_protect_wait_list_head == _tx_thread_smp_protect_wait_list_tail)
- // {
-
- LDR x3, =_tx_thread_smp_protect_wait_list_head
- LDR w3, [x3]
- LDR x4, =_tx_thread_smp_protect_wait_list_tail
- LDR w4, [x4]
- CMP w3, w4
- BNE _list_not_empty
-
- /* Try to get the lock. */
- // if (write_exclusive(&_tx_thread_smp_protection.tx_thread_smp_protect_in_force, 1) == SUCCESS)
- // {
-
- MOV w3, #1 // Build lock value
- STXR w4, w3, [x2, #0] // Attempt to get the protection
- CMP w4, #0
- BNE _start_waiting // Did it fail?
-
- /* We got the lock! */
- // _tx_thread_smp_protect_lock_got();
-
- DMB ISH // Ensure write to protection finishes
- _tx_thread_smp_protect_lock_got // Call the lock got function
-
- B _return
-
-_list_not_empty:
-
- /* Are we at the front of the list? */
- // if (this_core == _tx_thread_smp_protect_wait_list[_tx_thread_smp_protect_wait_list_head])
- // {
-
- LDR x3, =_tx_thread_smp_protect_wait_list_head // Get the address of the head
- LDR w3, [x3] // Get the value of the head
- LDR x4, =_tx_thread_smp_protect_wait_list // Get the address of the list
- LDR w4, [x4, x3, LSL #2] // Get the value at the head index
-
- CMP w1, w4
- BNE _start_waiting
-
- /* Is the lock still available? */
- // if (_tx_thread_smp_protection.tx_thread_smp_protect_in_force == 0)
- // {
-
- LDAXR w3, [x2, #0] // Pickup the protection flag
- CMP w3, #0
- BNE _start_waiting // No, protection not available
-
- /* Get the lock. */
- // _tx_thread_smp_protection.tx_thread_smp_protect_in_force = 1;
-
- MOV w3, #1 // Build lock value
- STXR w4, w3, [x2, #0] // Attempt to get the protection
- CMP w4, #0
- BNE _start_waiting // Did it fail?
- DMB ISH //
-
- /* Got the lock. */
- // _tx_thread_smp_protect_lock_got();
-
- _tx_thread_smp_protect_lock_got
-
- /* Remove this core from the wait list. */
- // _tx_thread_smp_protect_remove_from_front_of_list();
-
- _tx_thread_smp_protect_remove_from_front_of_list
-
- B _return
-
-_start_waiting:
-
- /* For one reason or another, we didn't get the lock. */
-
- /* Increment wait count. */
- // _tx_thread_smp_protect_wait_counts[this_core]++;
-
- LDR x3, =_tx_thread_smp_protect_wait_counts // Load wait list counts
- LDR w4, [x3, x1, LSL #2] // Load waiting value for this core
- ADD w4, w4, #1 // Increment wait value
- STR w4, [x3, x1, LSL #2] // Store new wait value
-
- /* Have we not added ourselves to the list yet? */
- // if (_tx_thread_smp_protect_wait_counts[this_core] == 1)
- // {
-
- CMP w4, #1
- BNE _already_in_list0 // Is this core already waiting?
-
- /* Add ourselves to the list. */
- // _tx_thread_smp_protect_wait_list_add(this_core);
-
- _tx_thread_smp_protect_wait_list_add // Call macro to add ourselves to the list
-
- // }
-
-_already_in_list0:
-
- /* Restore interrupts. */
+ LDR x1, =_tx_thread_smp_protection // Build address to protection structure
+ LDR w3, [x1, #4] // Pickup the owning core
+ CMP w3, w2 // Is it this core?
+ BEQ _owned // Yes, the protection is already owned
+ LDAXR w4, [x1, #0] // Pickup the protection flag
+ CBZ w4, _get_protection // Yes, get the protection
MSR DAIF, x0 // Restore interrupts
ISB //
#ifdef TX_ENABLE_WFE
WFE // Go into standby
#endif
+ B _tx_thread_smp_protect // On waking, restart the protection attempt
- /* We do this until we have the lock. */
- // while (1)
- // {
-
-_try_to_get_lock:
-
- /* Disable interrupts so we don't get preempted. */
-
- MRS x0, DAIF // Pickup current interrupt posture
- MSR DAIFSet, 0x3 // Lockout interrupts
-
- /* Pickup the CPU ID. */
-
- MRS x1, MPIDR_EL1 // Pickup the core ID
-#ifdef TX_ARMV8_2
-#if TX_THREAD_SMP_CLUSTERS > 1
- UBFX x7, x1, #16, #8 // Isolate cluster ID
-#endif
- UBFX x1, x1, #8, #8 // Isolate core ID
-#else
-#if TX_THREAD_SMP_CLUSTERS > 1
- UBFX x7, x1, #8, #8 // Isolate cluster ID
-#endif
- UBFX x1, x1, #0, #8 // Isolate core ID
-#endif
-#if TX_THREAD_SMP_CLUSTERS > 1
- ADDS x1, x1, x7, LSL #2 // Calculate CPU ID
-#endif
-
- /* Do we already have protection? */
- // if (this_core == _tx_thread_smp_protection.tx_thread_smp_protect_core)
- // {
-
- LDR w3, [x2, #4] // Pickup the owning core
- CMP w3, w1 // Is it this core?
- BEQ _got_lock_after_waiting // Yes, the protection is already owned. This means
- // an ISR preempted us and got protection
-
- // }
-
- /* Are we at the front of the list? */
- // if (this_core == _tx_thread_smp_protect_wait_list[_tx_thread_smp_protect_wait_list_head])
- // {
-
- LDR x3, =_tx_thread_smp_protect_wait_list_head // Get the address of the head
- LDR w3, [x3] // Get the value of the head
- LDR x4, =_tx_thread_smp_protect_wait_list // Get the address of the list
- LDR w4, [x4, x3, LSL #2] // Get the value at the head index
-
- CMP w1, w4
- BNE _did_not_get_lock
-
- /* Is the lock still available? */
- // if (_tx_thread_smp_protection.tx_thread_smp_protect_in_force == 0)
- // {
-
- LDAXR w3, [x2, #0] // Pickup the protection flag
- CMP w3, #0
- BNE _did_not_get_lock // No, protection not available
-
- /* Get the lock. */
- // _tx_thread_smp_protection.tx_thread_smp_protect_in_force = 1;
-
- MOV w3, #1 // Build lock value
- STXR w4, w3, [x2, #0] // Attempt to get the protection
- CMP w4, #0
- BNE _did_not_get_lock // Did it fail?
- DMB ISH //
-
- /* Got the lock. */
- // _tx_thread_smp_protect_lock_got();
-
- _tx_thread_smp_protect_lock_got
-
- /* Remove this core from the wait list. */
- // _tx_thread_smp_protect_remove_from_front_of_list();
-
- _tx_thread_smp_protect_remove_from_front_of_list
-
- B _got_lock_after_waiting
-
-_did_not_get_lock:
-
- /* For one reason or another, we didn't get the lock. */
-
- /* Were we removed from the list? This can happen if we're a thread
- and we got preempted. */
- // if (_tx_thread_smp_protect_wait_counts[this_core] == 0)
- // {
-
- LDR x3, =_tx_thread_smp_protect_wait_counts // Load wait list counts
- LDR w4, [x3, x1, LSL #2] // Load waiting value for this core
- CMP w4, #0
- BNE _already_in_list1 // Is this core already in the list?
-
- /* Add ourselves to the list. */
- // _tx_thread_smp_protect_wait_list_add(this_core);
-
- _tx_thread_smp_protect_wait_list_add // Call macro to add ourselves to the list
-
- /* Our waiting count was also reset when we were preempted. Increment it again. */
- // _tx_thread_smp_protect_wait_counts[this_core]++;
-
- LDR x3, =_tx_thread_smp_protect_wait_counts // Load wait list counts
- LDR w4, [x3, x1, LSL #2] // Load waiting value for this core
- ADD w4, w4, #1 // Increment wait value
- STR w4, [x3, x1, LSL #2] // Store new wait value value
-
- // }
-
-_already_in_list1:
-
- /* Restore interrupts and try again. */
-
+_get_protection:
+ MOV x4, #1 // Build lock value
+ STXR w5, w4, [x1] // Attempt to get the protection
+ CBZ w5, _got_protection // Did it succeed? w5 = 0 means success!
MSR DAIF, x0 // Restore interrupts
- ISB //
-#ifdef TX_ENABLE_WFE
- WFE // Go into standby
-#endif
- B _try_to_get_lock // On waking, restart the protection attempt
-
-_got_lock_after_waiting:
-
- /* We're no longer waiting. */
- // _tx_thread_smp_protect_wait_counts[this_core]--;
-
- LDR x3, =_tx_thread_smp_protect_wait_counts // Load waiting list
- LDR w4, [x3, x1, LSL #2] // Load current wait value
- SUB w4, w4, #1 // Decrement wait value
- STR w4, [x3, x1, LSL #2] // Store new wait value value
-
- /* Restore registers and return. */
-
-_return:
-
+ B _tx_thread_smp_protect // Restart the protection attempt
+
+_got_protection:
+ DMB ISH //
+ STR w2, [x1, #4] // Save owning core
+_owned:
+ LDR w5, [x1, #8] // Pickup ownership count
+ ADD w5, w5, #1 // Increment ownership count
+ STR w5, [x1, #8] // Store ownership count
+ DMB ISH //
RET
diff --git a/ports_smp/cortex_a53_smp/ac6/src/tx_thread_smp_unprotect.S b/ports_smp/cortex_a53_smp/ac6/src/tx_thread_smp_unprotect.S
index a783cde6..f9d8692a 100644
--- a/ports_smp/cortex_a53_smp/ac6/src/tx_thread_smp_unprotect.S
+++ b/ports_smp/cortex_a53_smp/ac6/src/tx_thread_smp_unprotect.S
@@ -60,10 +60,13 @@
/* */
/* DATE NAME DESCRIPTION */
/* */
-/* 09-30-2020 William E. Lamie Initial Version 6.1 */
-/* 01-31-2022 Andres Mlinar Updated comments, */
-/* added ARMv8.2-A support, */
+/* 09-30-2020 William E. Lamie Initial Version 6.1 */
+/* 01-31-2022 Andres Mlinar Updated comments, */
+/* added ARMv8.2-A support, */
/* resulting in version 6.1.10 */
+/* 04-25-2022 William E. Lamie Modified comments, removed */
+/* FIFO queueing, */
+/* resulting in version 6.1.11 */
/* */
/**************************************************************************/
.global _tx_thread_smp_unprotect
@@ -105,11 +108,6 @@ _tx_thread_smp_unprotect:
CMP w3, #0 // Is the preempt disable flag set?
BNE _still_protected // Yes, skip the protection release
- LDR x2,=_tx_thread_smp_protect_wait_counts // Build build address of wait counts
- LDR w3, [x2, x1, LSL #2] // Pickup wait list value
- CMP w3, #0 // Are any entities on this core waiting?
- BNE _still_protected // Yes, skip the protection release
-
LDR x2,=_tx_thread_smp_protection // Build address of protection structure
MOV w3, #0xFFFFFFFF // Build invalid value
STR w3, [x2, #4] // Mark the protected core as invalid
diff --git a/ports_smp/cortex_a53_smp/gnu/example_build/tx/.cproject b/ports_smp/cortex_a53_smp/gnu/example_build/tx/.cproject
index ec20edd2..3547a1d3 100644
--- a/ports_smp/cortex_a53_smp/gnu/example_build/tx/.cproject
+++ b/ports_smp/cortex_a53_smp/gnu/example_build/tx/.cproject
@@ -129,6 +129,12 @@
+
+
+
+
+
+
diff --git a/ports_smp/cortex_a53_smp/gnu/src/tx_thread_context_restore.S b/ports_smp/cortex_a53_smp/gnu/src/tx_thread_context_restore.S
index 4df471ac..21deb75d 100644
--- a/ports_smp/cortex_a53_smp/gnu/src/tx_thread_context_restore.S
+++ b/ports_smp/cortex_a53_smp/gnu/src/tx_thread_context_restore.S
@@ -21,9 +21,6 @@
/**************************************************************************/
-/* Include macros for modifying the wait list. */
-#include "tx_thread_smp_protection_wait_list_macros.h"
-
.text
.align 3
/**************************************************************************/
@@ -31,7 +28,7 @@
/* FUNCTION RELEASE */
/* */
/* _tx_thread_context_restore ARMv8-A-SMP */
-/* 6.1.10 */
+/* 6.1.11 */
/* AUTHOR */
/* */
/* William E. Lamie, Microsoft Corporation */
@@ -63,10 +60,13 @@
/* */
/* DATE NAME DESCRIPTION */
/* */
-/* 09-30-2020 William E. Lamie Initial Version 6.1 */
-/* 01-31-2022 Andres Mlinar Updated comments, */
-/* added ARMv8.2-A support, */
+/* 09-30-2020 William E. Lamie Initial Version 6.1 */
+/* 01-31-2022 Andres Mlinar Updated comments, */
+/* added ARMv8.2-A support, */
/* resulting in version 6.1.10 */
+/* 04-25-2022 William E. Lamie Modified comments, removed */
+/* FIFO queueing, */
+/* resulting in version 6.1.11 */
/* */
/**************************************************************************/
// VOID _tx_thread_context_restore(VOID)
@@ -216,74 +216,6 @@ __tx_thread_no_preempt_restore:
// {
__tx_thread_preempt_restore:
- /* Was the thread being preempted waiting for the lock? */
- // if (_tx_thread_smp_protect_wait_counts[this_core] != 0)
- // {
-
- LDR x2, =_tx_thread_smp_protect_wait_counts // Load waiting count list
- LDR w3, [x2, x8, LSL #2] // Load waiting value for this core
- CMP w3, #0
- BEQ _nobody_waiting_for_lock // Is the core waiting for the lock?
-
- /* Do we not have the lock? This means the ISR never got the inter-core lock. */
- // if (_tx_thread_smp_protection.tx_thread_smp_protect_owned != this_core)
- // {
-
- LDR x2, =_tx_thread_smp_protection // Load address of protection structure
- LDR w3, [x2, #4] // Pickup the owning core
- CMP w8, w3 // Compare our core to the owning core
- BEQ _this_core_has_lock // Do we have the lock?
-
- /* We don't have the lock. This core should be in the list. Remove it. */
- // _tx_thread_smp_protect_wait_list_remove(this_core);
-
- _tx_thread_smp_protect_wait_list_remove // Call macro to remove core from the list
- B _nobody_waiting_for_lock // Leave
-
- // }
- // else
- // {
- /* We have the lock. This means the ISR got the inter-core lock, but
- never released it because it saw that there was someone waiting.
- Note this core is not in the list. */
-
-_this_core_has_lock:
-
- /* We're no longer waiting. Note that this should be zero since this happens during thread preemption. */
- // _tx_thread_smp_protect_wait_counts[core]--;
-
- LDR x2, =_tx_thread_smp_protect_wait_counts // Load waiting count list
- LDR w3, [x2, x8, LSL #2] // Load waiting value for this core
- SUB w3, w3, #1 // Decrement waiting value. Should be zero now
- STR w3, [x2, x8, LSL #2] // Store new waiting value
-
- /* Now release the inter-core lock. */
-
- /* Set protected core as invalid. */
- // _tx_thread_smp_protection.tx_thread_smp_protect_core = 0xFFFFFFFF;
-
- LDR x2, =_tx_thread_smp_protection // Load address of protection structure
- MOV w3, #0xFFFFFFFF // Build invalid value
- STR w3, [x2, #4] // Mark the protected core as invalid
- DMB ISH // Ensure that accesses to shared resource have completed
-
- /* Release protection. */
- // _tx_thread_smp_protection.tx_thread_smp_protect_in_force = 0;
-
- MOV w3, #0 // Build release protection value
- STR w3, [x2, #0] // Release the protection
- DSB ISH // To ensure update of the protection occurs before other CPUs awake
-
- /* Wake up waiting processors. Note interrupts are already enabled. */
-
-#ifdef TX_ENABLE_WFE
- SEV // Send event to other CPUs
-#endif
-
- // }
- // }
-
-_nobody_waiting_for_lock:
LDR x4, [x0, #8] // Switch to thread stack pointer
MOV sp, x4 //
diff --git a/ports_smp/cortex_a53_smp/gnu/src/tx_thread_smp_protect.S b/ports_smp/cortex_a53_smp/gnu/src/tx_thread_smp_protect.S
index 9cde3e08..b65af329 100644
--- a/ports_smp/cortex_a53_smp/gnu/src/tx_thread_smp_protect.S
+++ b/ports_smp/cortex_a53_smp/gnu/src/tx_thread_smp_protect.S
@@ -32,7 +32,7 @@
/* FUNCTION RELEASE */
/* */
/* _tx_thread_smp_protect Cortex-A35-SMP/AC6 */
-/* 6.1.10 */
+/* 6.1.11 */
/* AUTHOR */
/* */
/* William E. Lamie, Microsoft Corporation */
@@ -63,11 +63,14 @@
/* */
/* DATE NAME DESCRIPTION */
/* */
-/* 09-30-2020 William E. Lamie Initial Version 6.1 */
-/* 01-31-2022 Andres Mlinar Updated comments, */
-/* added ARMv8.2-A support, */
-/* improved SMP code, */
+/* 09-30-2020 William E. Lamie Initial Version 6.1 */
+/* 01-31-2022 Andres Mlinar Updated comments, */
+/* added ARMv8.2-A support, */
+/* improved SMP code, */
/* resulting in version 6.1.10 */
+/* 04-25-2022 William E. Lamie Modified comments, removed */
+/* FIFO queueing, */
+/* resulting in version 6.1.11 */
/* */
/**************************************************************************/
.global _tx_thread_smp_protect
@@ -81,293 +84,49 @@ _tx_thread_smp_protect:
/* Pickup the CPU ID. */
- MRS x1, MPIDR_EL1 // Pickup the core ID
+ MRS x2, MPIDR_EL1 // Pickup the core ID
#ifdef TX_ARMV8_2
#if TX_THREAD_SMP_CLUSTERS > 1
- UBFX x7, x1, #16, #8 // Isolate cluster ID
+ UBFX x7, x2, #16, #8 // Isolate cluster ID
#endif
- UBFX x1, x1, #8, #8 // Isolate core ID
+ UBFX x2, x2, #8, #8 // Isolate core ID
#else
#if TX_THREAD_SMP_CLUSTERS > 1
- UBFX x7, x1, #8, #8 // Isolate cluster ID
+ UBFX x7, x2, #8, #8 // Isolate cluster ID
#endif
- UBFX x1, x1, #0, #8 // Isolate core ID
+ UBFX x2, x2, #0, #8 // Isolate core ID
#endif
#if TX_THREAD_SMP_CLUSTERS > 1
- ADDS x1, x1, x7, LSL #2 // Calculate CPU ID
+ ADDS x2, x2, x7, LSL #2 // Calculate CPU ID
#endif
- /* Do we already have protection? */
- // if (this_core == _tx_thread_smp_protection.tx_thread_smp_protect_core)
- // {
-
- LDR x2, =_tx_thread_smp_protection // Build address to protection structure
- LDR w3, [x2, #4] // Pickup the owning core
- CMP w1, w3 // Is it not this core?
- BNE _protection_not_owned // No, the protection is not already owned
-
- /* We already have protection. */
-
- /* Increment the protection count. */
- // _tx_thread_smp_protection.tx_thread_smp_protect_count++;
-
- LDR w3, [x2, #8] // Pickup ownership count
- ADD w3, w3, #1 // Increment ownership count
- STR w3, [x2, #8] // Store ownership count
- DMB ISH
-
- B _return
-
-_protection_not_owned:
-
- /* Is the lock available? */
- // if (_tx_thread_smp_protection.tx_thread_smp_protect_in_force == 0)
- // {
-
- LDAXR w3, [x2, #0] // Pickup the protection flag
- CMP w3, #0
- BNE _start_waiting // No, protection not available
-
- /* Is the list empty? */
- // if (_tx_thread_smp_protect_wait_list_head == _tx_thread_smp_protect_wait_list_tail)
- // {
-
- LDR x3, =_tx_thread_smp_protect_wait_list_head
- LDR w3, [x3]
- LDR x4, =_tx_thread_smp_protect_wait_list_tail
- LDR w4, [x4]
- CMP w3, w4
- BNE _list_not_empty
-
- /* Try to get the lock. */
- // if (write_exclusive(&_tx_thread_smp_protection.tx_thread_smp_protect_in_force, 1) == SUCCESS)
- // {
-
- MOV w3, #1 // Build lock value
- STXR w4, w3, [x2, #0] // Attempt to get the protection
- CMP w4, #0
- BNE _start_waiting // Did it fail?
-
- /* We got the lock! */
- // _tx_thread_smp_protect_lock_got();
-
- DMB ISH // Ensure write to protection finishes
- _tx_thread_smp_protect_lock_got // Call the lock got function
-
- B _return
-
-_list_not_empty:
-
- /* Are we at the front of the list? */
- // if (this_core == _tx_thread_smp_protect_wait_list[_tx_thread_smp_protect_wait_list_head])
- // {
-
- LDR x3, =_tx_thread_smp_protect_wait_list_head // Get the address of the head
- LDR w3, [x3] // Get the value of the head
- LDR x4, =_tx_thread_smp_protect_wait_list // Get the address of the list
- LDR w4, [x4, x3, LSL #2] // Get the value at the head index
-
- CMP w1, w4
- BNE _start_waiting
-
- /* Is the lock still available? */
- // if (_tx_thread_smp_protection.tx_thread_smp_protect_in_force == 0)
- // {
-
- LDAXR w3, [x2, #0] // Pickup the protection flag
- CMP w3, #0
- BNE _start_waiting // No, protection not available
-
- /* Get the lock. */
- // _tx_thread_smp_protection.tx_thread_smp_protect_in_force = 1;
-
- MOV w3, #1 // Build lock value
- STXR w4, w3, [x2, #0] // Attempt to get the protection
- CMP w4, #0
- BNE _start_waiting // Did it fail?
- DMB ISH //
-
- /* Got the lock. */
- // _tx_thread_smp_protect_lock_got();
-
- _tx_thread_smp_protect_lock_got
-
- /* Remove this core from the wait list. */
- // _tx_thread_smp_protect_remove_from_front_of_list();
-
- _tx_thread_smp_protect_remove_from_front_of_list
-
- B _return
-
-_start_waiting:
-
- /* For one reason or another, we didn't get the lock. */
-
- /* Increment wait count. */
- // _tx_thread_smp_protect_wait_counts[this_core]++;
-
- LDR x3, =_tx_thread_smp_protect_wait_counts // Load wait list counts
- LDR w4, [x3, x1, LSL #2] // Load waiting value for this core
- ADD w4, w4, #1 // Increment wait value
- STR w4, [x3, x1, LSL #2] // Store new wait value
-
- /* Have we not added ourselves to the list yet? */
- // if (_tx_thread_smp_protect_wait_counts[this_core] == 1)
- // {
-
- CMP w4, #1
- BNE _already_in_list0 // Is this core already waiting?
-
- /* Add ourselves to the list. */
- // _tx_thread_smp_protect_wait_list_add(this_core);
-
- _tx_thread_smp_protect_wait_list_add // Call macro to add ourselves to the list
-
- // }
-
-_already_in_list0:
-
- /* Restore interrupts. */
+ LDR x1, =_tx_thread_smp_protection // Build address to protection structure
+ LDR w3, [x1, #4] // Pickup the owning core
+ CMP w3, w2 // Is it this core?
+ BEQ _owned // Yes, the protection is already owned
+ LDAXR w4, [x1, #0] // Pickup the protection flag
+ CBZ w4, _get_protection // Yes, get the protection
MSR DAIF, x0 // Restore interrupts
ISB //
#ifdef TX_ENABLE_WFE
WFE // Go into standby
#endif
+ B _tx_thread_smp_protect // On waking, restart the protection attempt
- /* We do this until we have the lock. */
- // while (1)
- // {
-
-_try_to_get_lock:
-
- /* Disable interrupts so we don't get preempted. */
-
- MRS x0, DAIF // Pickup current interrupt posture
- MSR DAIFSet, 0x3 // Lockout interrupts
-
- /* Pickup the CPU ID. */
-
- MRS x1, MPIDR_EL1 // Pickup the core ID
-#ifdef TX_ARMV8_2
-#if TX_THREAD_SMP_CLUSTERS > 1
- UBFX x7, x1, #16, #8 // Isolate cluster ID
-#endif
- UBFX x1, x1, #8, #8 // Isolate core ID
-#else
-#if TX_THREAD_SMP_CLUSTERS > 1
- UBFX x7, x1, #8, #8 // Isolate cluster ID
-#endif
- UBFX x1, x1, #0, #8 // Isolate core ID
-#endif
-#if TX_THREAD_SMP_CLUSTERS > 1
- ADDS x1, x1, x7, LSL #2 // Calculate CPU ID
-#endif
-
- /* Do we already have protection? */
- // if (this_core == _tx_thread_smp_protection.tx_thread_smp_protect_core)
- // {
-
- LDR w3, [x2, #4] // Pickup the owning core
- CMP w3, w1 // Is it this core?
- BEQ _got_lock_after_waiting // Yes, the protection is already owned. This means
- // an ISR preempted us and got protection
-
- // }
-
- /* Are we at the front of the list? */
- // if (this_core == _tx_thread_smp_protect_wait_list[_tx_thread_smp_protect_wait_list_head])
- // {
-
- LDR x3, =_tx_thread_smp_protect_wait_list_head // Get the address of the head
- LDR w3, [x3] // Get the value of the head
- LDR x4, =_tx_thread_smp_protect_wait_list // Get the address of the list
- LDR w4, [x4, x3, LSL #2] // Get the value at the head index
-
- CMP w1, w4
- BNE _did_not_get_lock
-
- /* Is the lock still available? */
- // if (_tx_thread_smp_protection.tx_thread_smp_protect_in_force == 0)
- // {
-
- LDAXR w3, [x2, #0] // Pickup the protection flag
- CMP w3, #0
- BNE _did_not_get_lock // No, protection not available
-
- /* Get the lock. */
- // _tx_thread_smp_protection.tx_thread_smp_protect_in_force = 1;
-
- MOV w3, #1 // Build lock value
- STXR w4, w3, [x2, #0] // Attempt to get the protection
- CMP w4, #0
- BNE _did_not_get_lock // Did it fail?
- DMB ISH //
-
- /* Got the lock. */
- // _tx_thread_smp_protect_lock_got();
-
- _tx_thread_smp_protect_lock_got
-
- /* Remove this core from the wait list. */
- // _tx_thread_smp_protect_remove_from_front_of_list();
-
- _tx_thread_smp_protect_remove_from_front_of_list
-
- B _got_lock_after_waiting
-
-_did_not_get_lock:
-
- /* For one reason or another, we didn't get the lock. */
-
- /* Were we removed from the list? This can happen if we're a thread
- and we got preempted. */
- // if (_tx_thread_smp_protect_wait_counts[this_core] == 0)
- // {
-
- LDR x3, =_tx_thread_smp_protect_wait_counts // Load wait list counts
- LDR w4, [x3, x1, LSL #2] // Load waiting value for this core
- CMP w4, #0
- BNE _already_in_list1 // Is this core already in the list?
-
- /* Add ourselves to the list. */
- // _tx_thread_smp_protect_wait_list_add(this_core);
-
- _tx_thread_smp_protect_wait_list_add // Call macro to add ourselves to the list
-
- /* Our waiting count was also reset when we were preempted. Increment it again. */
- // _tx_thread_smp_protect_wait_counts[this_core]++;
-
- LDR x3, =_tx_thread_smp_protect_wait_counts // Load wait list counts
- LDR w4, [x3, x1, LSL #2] // Load waiting value for this core
- ADD w4, w4, #1 // Increment wait value
- STR w4, [x3, x1, LSL #2] // Store new wait value value
-
- // }
-
-_already_in_list1:
-
- /* Restore interrupts and try again. */
-
+_get_protection:
+ MOV x4, #1 // Build lock value
+ STXR w5, w4, [x1] // Attempt to get the protection
+ CBZ w5, _got_protection // Did it succeed? w5 = 0 means success!
MSR DAIF, x0 // Restore interrupts
- ISB //
-#ifdef TX_ENABLE_WFE
- WFE // Go into standby
-#endif
- B _try_to_get_lock // On waking, restart the protection attempt
-
-_got_lock_after_waiting:
-
- /* We're no longer waiting. */
- // _tx_thread_smp_protect_wait_counts[this_core]--;
-
- LDR x3, =_tx_thread_smp_protect_wait_counts // Load waiting list
- LDR w4, [x3, x1, LSL #2] // Load current wait value
- SUB w4, w4, #1 // Decrement wait value
- STR w4, [x3, x1, LSL #2] // Store new wait value value
-
- /* Restore registers and return. */
-
-_return:
-
+ B _tx_thread_smp_protect // Restart the protection attempt
+
+_got_protection:
+ DMB ISH //
+ STR w2, [x1, #4] // Save owning core
+_owned:
+ LDR w5, [x1, #8] // Pickup ownership count
+ ADD w5, w5, #1 // Increment ownership count
+ STR w5, [x1, #8] // Store ownership count
+ DMB ISH //
RET
diff --git a/ports_smp/cortex_a53_smp/gnu/src/tx_thread_smp_unprotect.S b/ports_smp/cortex_a53_smp/gnu/src/tx_thread_smp_unprotect.S
index a783cde6..f9d8692a 100644
--- a/ports_smp/cortex_a53_smp/gnu/src/tx_thread_smp_unprotect.S
+++ b/ports_smp/cortex_a53_smp/gnu/src/tx_thread_smp_unprotect.S
@@ -60,10 +60,13 @@
/* */
/* DATE NAME DESCRIPTION */
/* */
-/* 09-30-2020 William E. Lamie Initial Version 6.1 */
-/* 01-31-2022 Andres Mlinar Updated comments, */
-/* added ARMv8.2-A support, */
+/* 09-30-2020 William E. Lamie Initial Version 6.1 */
+/* 01-31-2022 Andres Mlinar Updated comments, */
+/* added ARMv8.2-A support, */
/* resulting in version 6.1.10 */
+/* 04-25-2022 William E. Lamie Modified comments, removed */
+/* FIFO queueing, */
+/* resulting in version 6.1.11 */
/* */
/**************************************************************************/
.global _tx_thread_smp_unprotect
@@ -105,11 +108,6 @@ _tx_thread_smp_unprotect:
CMP w3, #0 // Is the preempt disable flag set?
BNE _still_protected // Yes, skip the protection release
- LDR x2,=_tx_thread_smp_protect_wait_counts // Build build address of wait counts
- LDR w3, [x2, x1, LSL #2] // Pickup wait list value
- CMP w3, #0 // Are any entities on this core waiting?
- BNE _still_protected // Yes, skip the protection release
-
LDR x2,=_tx_thread_smp_protection // Build address of protection structure
MOV w3, #0xFFFFFFFF // Build invalid value
STR w3, [x2, #4] // Mark the protected core as invalid
diff --git a/ports_smp/cortex_a55_smp/ac6/example_build/sample_threadx/.cproject b/ports_smp/cortex_a55_smp/ac6/example_build/sample_threadx/.cproject
index 5c025c1c..1413ef4e 100644
--- a/ports_smp/cortex_a55_smp/ac6/example_build/sample_threadx/.cproject
+++ b/ports_smp/cortex_a55_smp/ac6/example_build/sample_threadx/.cproject
@@ -23,7 +23,7 @@
-
+
@@ -47,7 +47,7 @@
-
+
@@ -63,7 +63,7 @@
-
+
diff --git a/ports_smp/cortex_a55_smp/ac6/example_build/tx/.cproject b/ports_smp/cortex_a55_smp/ac6/example_build/tx/.cproject
index 9cafc8db..5e360650 100644
--- a/ports_smp/cortex_a55_smp/ac6/example_build/tx/.cproject
+++ b/ports_smp/cortex_a55_smp/ac6/example_build/tx/.cproject
@@ -49,7 +49,7 @@
-
+
@@ -129,11 +129,7 @@
-
-
-
-
-
+
diff --git a/ports_smp/cortex_a55_smp/ac6/src/tx_thread_context_restore.S b/ports_smp/cortex_a55_smp/ac6/src/tx_thread_context_restore.S
index 4df471ac..21deb75d 100644
--- a/ports_smp/cortex_a55_smp/ac6/src/tx_thread_context_restore.S
+++ b/ports_smp/cortex_a55_smp/ac6/src/tx_thread_context_restore.S
@@ -21,9 +21,6 @@
/**************************************************************************/
-/* Include macros for modifying the wait list. */
-#include "tx_thread_smp_protection_wait_list_macros.h"
-
.text
.align 3
/**************************************************************************/
@@ -31,7 +28,7 @@
/* FUNCTION RELEASE */
/* */
/* _tx_thread_context_restore ARMv8-A-SMP */
-/* 6.1.10 */
+/* 6.1.11 */
/* AUTHOR */
/* */
/* William E. Lamie, Microsoft Corporation */
@@ -63,10 +60,13 @@
/* */
/* DATE NAME DESCRIPTION */
/* */
-/* 09-30-2020 William E. Lamie Initial Version 6.1 */
-/* 01-31-2022 Andres Mlinar Updated comments, */
-/* added ARMv8.2-A support, */
+/* 09-30-2020 William E. Lamie Initial Version 6.1 */
+/* 01-31-2022 Andres Mlinar Updated comments, */
+/* added ARMv8.2-A support, */
/* resulting in version 6.1.10 */
+/* 04-25-2022 William E. Lamie Modified comments, removed */
+/* FIFO queueing, */
+/* resulting in version 6.1.11 */
/* */
/**************************************************************************/
// VOID _tx_thread_context_restore(VOID)
@@ -216,74 +216,6 @@ __tx_thread_no_preempt_restore:
// {
__tx_thread_preempt_restore:
- /* Was the thread being preempted waiting for the lock? */
- // if (_tx_thread_smp_protect_wait_counts[this_core] != 0)
- // {
-
- LDR x2, =_tx_thread_smp_protect_wait_counts // Load waiting count list
- LDR w3, [x2, x8, LSL #2] // Load waiting value for this core
- CMP w3, #0
- BEQ _nobody_waiting_for_lock // Is the core waiting for the lock?
-
- /* Do we not have the lock? This means the ISR never got the inter-core lock. */
- // if (_tx_thread_smp_protection.tx_thread_smp_protect_owned != this_core)
- // {
-
- LDR x2, =_tx_thread_smp_protection // Load address of protection structure
- LDR w3, [x2, #4] // Pickup the owning core
- CMP w8, w3 // Compare our core to the owning core
- BEQ _this_core_has_lock // Do we have the lock?
-
- /* We don't have the lock. This core should be in the list. Remove it. */
- // _tx_thread_smp_protect_wait_list_remove(this_core);
-
- _tx_thread_smp_protect_wait_list_remove // Call macro to remove core from the list
- B _nobody_waiting_for_lock // Leave
-
- // }
- // else
- // {
- /* We have the lock. This means the ISR got the inter-core lock, but
- never released it because it saw that there was someone waiting.
- Note this core is not in the list. */
-
-_this_core_has_lock:
-
- /* We're no longer waiting. Note that this should be zero since this happens during thread preemption. */
- // _tx_thread_smp_protect_wait_counts[core]--;
-
- LDR x2, =_tx_thread_smp_protect_wait_counts // Load waiting count list
- LDR w3, [x2, x8, LSL #2] // Load waiting value for this core
- SUB w3, w3, #1 // Decrement waiting value. Should be zero now
- STR w3, [x2, x8, LSL #2] // Store new waiting value
-
- /* Now release the inter-core lock. */
-
- /* Set protected core as invalid. */
- // _tx_thread_smp_protection.tx_thread_smp_protect_core = 0xFFFFFFFF;
-
- LDR x2, =_tx_thread_smp_protection // Load address of protection structure
- MOV w3, #0xFFFFFFFF // Build invalid value
- STR w3, [x2, #4] // Mark the protected core as invalid
- DMB ISH // Ensure that accesses to shared resource have completed
-
- /* Release protection. */
- // _tx_thread_smp_protection.tx_thread_smp_protect_in_force = 0;
-
- MOV w3, #0 // Build release protection value
- STR w3, [x2, #0] // Release the protection
- DSB ISH // To ensure update of the protection occurs before other CPUs awake
-
- /* Wake up waiting processors. Note interrupts are already enabled. */
-
-#ifdef TX_ENABLE_WFE
- SEV // Send event to other CPUs
-#endif
-
- // }
- // }
-
-_nobody_waiting_for_lock:
LDR x4, [x0, #8] // Switch to thread stack pointer
MOV sp, x4 //
diff --git a/ports_smp/cortex_a55_smp/ac6/src/tx_thread_smp_protect.S b/ports_smp/cortex_a55_smp/ac6/src/tx_thread_smp_protect.S
index 9cde3e08..b65af329 100644
--- a/ports_smp/cortex_a55_smp/ac6/src/tx_thread_smp_protect.S
+++ b/ports_smp/cortex_a55_smp/ac6/src/tx_thread_smp_protect.S
@@ -32,7 +32,7 @@
/* FUNCTION RELEASE */
/* */
/* _tx_thread_smp_protect Cortex-A35-SMP/AC6 */
-/* 6.1.10 */
+/* 6.1.11 */
/* AUTHOR */
/* */
/* William E. Lamie, Microsoft Corporation */
@@ -63,11 +63,14 @@
/* */
/* DATE NAME DESCRIPTION */
/* */
-/* 09-30-2020 William E. Lamie Initial Version 6.1 */
-/* 01-31-2022 Andres Mlinar Updated comments, */
-/* added ARMv8.2-A support, */
-/* improved SMP code, */
+/* 09-30-2020 William E. Lamie Initial Version 6.1 */
+/* 01-31-2022 Andres Mlinar Updated comments, */
+/* added ARMv8.2-A support, */
+/* improved SMP code, */
/* resulting in version 6.1.10 */
+/* 04-25-2022 William E. Lamie Modified comments, removed */
+/* FIFO queueing, */
+/* resulting in version 6.1.11 */
/* */
/**************************************************************************/
.global _tx_thread_smp_protect
@@ -81,293 +84,49 @@ _tx_thread_smp_protect:
/* Pickup the CPU ID. */
- MRS x1, MPIDR_EL1 // Pickup the core ID
+ MRS x2, MPIDR_EL1 // Pickup the core ID
#ifdef TX_ARMV8_2
#if TX_THREAD_SMP_CLUSTERS > 1
- UBFX x7, x1, #16, #8 // Isolate cluster ID
+ UBFX x7, x2, #16, #8 // Isolate cluster ID
#endif
- UBFX x1, x1, #8, #8 // Isolate core ID
+ UBFX x2, x2, #8, #8 // Isolate core ID
#else
#if TX_THREAD_SMP_CLUSTERS > 1
- UBFX x7, x1, #8, #8 // Isolate cluster ID
+ UBFX x7, x2, #8, #8 // Isolate cluster ID
#endif
- UBFX x1, x1, #0, #8 // Isolate core ID
+ UBFX x2, x2, #0, #8 // Isolate core ID
#endif
#if TX_THREAD_SMP_CLUSTERS > 1
- ADDS x1, x1, x7, LSL #2 // Calculate CPU ID
+ ADDS x2, x2, x7, LSL #2 // Calculate CPU ID
#endif
- /* Do we already have protection? */
- // if (this_core == _tx_thread_smp_protection.tx_thread_smp_protect_core)
- // {
-
- LDR x2, =_tx_thread_smp_protection // Build address to protection structure
- LDR w3, [x2, #4] // Pickup the owning core
- CMP w1, w3 // Is it not this core?
- BNE _protection_not_owned // No, the protection is not already owned
-
- /* We already have protection. */
-
- /* Increment the protection count. */
- // _tx_thread_smp_protection.tx_thread_smp_protect_count++;
-
- LDR w3, [x2, #8] // Pickup ownership count
- ADD w3, w3, #1 // Increment ownership count
- STR w3, [x2, #8] // Store ownership count
- DMB ISH
-
- B _return
-
-_protection_not_owned:
-
- /* Is the lock available? */
- // if (_tx_thread_smp_protection.tx_thread_smp_protect_in_force == 0)
- // {
-
- LDAXR w3, [x2, #0] // Pickup the protection flag
- CMP w3, #0
- BNE _start_waiting // No, protection not available
-
- /* Is the list empty? */
- // if (_tx_thread_smp_protect_wait_list_head == _tx_thread_smp_protect_wait_list_tail)
- // {
-
- LDR x3, =_tx_thread_smp_protect_wait_list_head
- LDR w3, [x3]
- LDR x4, =_tx_thread_smp_protect_wait_list_tail
- LDR w4, [x4]
- CMP w3, w4
- BNE _list_not_empty
-
- /* Try to get the lock. */
- // if (write_exclusive(&_tx_thread_smp_protection.tx_thread_smp_protect_in_force, 1) == SUCCESS)
- // {
-
- MOV w3, #1 // Build lock value
- STXR w4, w3, [x2, #0] // Attempt to get the protection
- CMP w4, #0
- BNE _start_waiting // Did it fail?
-
- /* We got the lock! */
- // _tx_thread_smp_protect_lock_got();
-
- DMB ISH // Ensure write to protection finishes
- _tx_thread_smp_protect_lock_got // Call the lock got function
-
- B _return
-
-_list_not_empty:
-
- /* Are we at the front of the list? */
- // if (this_core == _tx_thread_smp_protect_wait_list[_tx_thread_smp_protect_wait_list_head])
- // {
-
- LDR x3, =_tx_thread_smp_protect_wait_list_head // Get the address of the head
- LDR w3, [x3] // Get the value of the head
- LDR x4, =_tx_thread_smp_protect_wait_list // Get the address of the list
- LDR w4, [x4, x3, LSL #2] // Get the value at the head index
-
- CMP w1, w4
- BNE _start_waiting
-
- /* Is the lock still available? */
- // if (_tx_thread_smp_protection.tx_thread_smp_protect_in_force == 0)
- // {
-
- LDAXR w3, [x2, #0] // Pickup the protection flag
- CMP w3, #0
- BNE _start_waiting // No, protection not available
-
- /* Get the lock. */
- // _tx_thread_smp_protection.tx_thread_smp_protect_in_force = 1;
-
- MOV w3, #1 // Build lock value
- STXR w4, w3, [x2, #0] // Attempt to get the protection
- CMP w4, #0
- BNE _start_waiting // Did it fail?
- DMB ISH //
-
- /* Got the lock. */
- // _tx_thread_smp_protect_lock_got();
-
- _tx_thread_smp_protect_lock_got
-
- /* Remove this core from the wait list. */
- // _tx_thread_smp_protect_remove_from_front_of_list();
-
- _tx_thread_smp_protect_remove_from_front_of_list
-
- B _return
-
-_start_waiting:
-
- /* For one reason or another, we didn't get the lock. */
-
- /* Increment wait count. */
- // _tx_thread_smp_protect_wait_counts[this_core]++;
-
- LDR x3, =_tx_thread_smp_protect_wait_counts // Load wait list counts
- LDR w4, [x3, x1, LSL #2] // Load waiting value for this core
- ADD w4, w4, #1 // Increment wait value
- STR w4, [x3, x1, LSL #2] // Store new wait value
-
- /* Have we not added ourselves to the list yet? */
- // if (_tx_thread_smp_protect_wait_counts[this_core] == 1)
- // {
-
- CMP w4, #1
- BNE _already_in_list0 // Is this core already waiting?
-
- /* Add ourselves to the list. */
- // _tx_thread_smp_protect_wait_list_add(this_core);
-
- _tx_thread_smp_protect_wait_list_add // Call macro to add ourselves to the list
-
- // }
-
-_already_in_list0:
-
- /* Restore interrupts. */
+ LDR x1, =_tx_thread_smp_protection // Build address to protection structure
+ LDR w3, [x1, #4] // Pickup the owning core
+ CMP w3, w2 // Is it this core?
+ BEQ _owned // Yes, the protection is already owned
+ LDAXR w4, [x1, #0] // Pickup the protection flag
+ CBZ w4, _get_protection // Yes, get the protection
MSR DAIF, x0 // Restore interrupts
ISB //
#ifdef TX_ENABLE_WFE
WFE // Go into standby
#endif
+ B _tx_thread_smp_protect // On waking, restart the protection attempt
- /* We do this until we have the lock. */
- // while (1)
- // {
-
-_try_to_get_lock:
-
- /* Disable interrupts so we don't get preempted. */
-
- MRS x0, DAIF // Pickup current interrupt posture
- MSR DAIFSet, 0x3 // Lockout interrupts
-
- /* Pickup the CPU ID. */
-
- MRS x1, MPIDR_EL1 // Pickup the core ID
-#ifdef TX_ARMV8_2
-#if TX_THREAD_SMP_CLUSTERS > 1
- UBFX x7, x1, #16, #8 // Isolate cluster ID
-#endif
- UBFX x1, x1, #8, #8 // Isolate core ID
-#else
-#if TX_THREAD_SMP_CLUSTERS > 1
- UBFX x7, x1, #8, #8 // Isolate cluster ID
-#endif
- UBFX x1, x1, #0, #8 // Isolate core ID
-#endif
-#if TX_THREAD_SMP_CLUSTERS > 1
- ADDS x1, x1, x7, LSL #2 // Calculate CPU ID
-#endif
-
- /* Do we already have protection? */
- // if (this_core == _tx_thread_smp_protection.tx_thread_smp_protect_core)
- // {
-
- LDR w3, [x2, #4] // Pickup the owning core
- CMP w3, w1 // Is it this core?
- BEQ _got_lock_after_waiting // Yes, the protection is already owned. This means
- // an ISR preempted us and got protection
-
- // }
-
- /* Are we at the front of the list? */
- // if (this_core == _tx_thread_smp_protect_wait_list[_tx_thread_smp_protect_wait_list_head])
- // {
-
- LDR x3, =_tx_thread_smp_protect_wait_list_head // Get the address of the head
- LDR w3, [x3] // Get the value of the head
- LDR x4, =_tx_thread_smp_protect_wait_list // Get the address of the list
- LDR w4, [x4, x3, LSL #2] // Get the value at the head index
-
- CMP w1, w4
- BNE _did_not_get_lock
-
- /* Is the lock still available? */
- // if (_tx_thread_smp_protection.tx_thread_smp_protect_in_force == 0)
- // {
-
- LDAXR w3, [x2, #0] // Pickup the protection flag
- CMP w3, #0
- BNE _did_not_get_lock // No, protection not available
-
- /* Get the lock. */
- // _tx_thread_smp_protection.tx_thread_smp_protect_in_force = 1;
-
- MOV w3, #1 // Build lock value
- STXR w4, w3, [x2, #0] // Attempt to get the protection
- CMP w4, #0
- BNE _did_not_get_lock // Did it fail?
- DMB ISH //
-
- /* Got the lock. */
- // _tx_thread_smp_protect_lock_got();
-
- _tx_thread_smp_protect_lock_got
-
- /* Remove this core from the wait list. */
- // _tx_thread_smp_protect_remove_from_front_of_list();
-
- _tx_thread_smp_protect_remove_from_front_of_list
-
- B _got_lock_after_waiting
-
-_did_not_get_lock:
-
- /* For one reason or another, we didn't get the lock. */
-
- /* Were we removed from the list? This can happen if we're a thread
- and we got preempted. */
- // if (_tx_thread_smp_protect_wait_counts[this_core] == 0)
- // {
-
- LDR x3, =_tx_thread_smp_protect_wait_counts // Load wait list counts
- LDR w4, [x3, x1, LSL #2] // Load waiting value for this core
- CMP w4, #0
- BNE _already_in_list1 // Is this core already in the list?
-
- /* Add ourselves to the list. */
- // _tx_thread_smp_protect_wait_list_add(this_core);
-
- _tx_thread_smp_protect_wait_list_add // Call macro to add ourselves to the list
-
- /* Our waiting count was also reset when we were preempted. Increment it again. */
- // _tx_thread_smp_protect_wait_counts[this_core]++;
-
- LDR x3, =_tx_thread_smp_protect_wait_counts // Load wait list counts
- LDR w4, [x3, x1, LSL #2] // Load waiting value for this core
- ADD w4, w4, #1 // Increment wait value
- STR w4, [x3, x1, LSL #2] // Store new wait value value
-
- // }
-
-_already_in_list1:
-
- /* Restore interrupts and try again. */
-
+_get_protection:
+ MOV x4, #1 // Build lock value
+ STXR w5, w4, [x1] // Attempt to get the protection
+ CBZ w5, _got_protection // Did it succeed? w5 = 0 means success!
MSR DAIF, x0 // Restore interrupts
- ISB //
-#ifdef TX_ENABLE_WFE
- WFE // Go into standby
-#endif
- B _try_to_get_lock // On waking, restart the protection attempt
-
-_got_lock_after_waiting:
-
- /* We're no longer waiting. */
- // _tx_thread_smp_protect_wait_counts[this_core]--;
-
- LDR x3, =_tx_thread_smp_protect_wait_counts // Load waiting list
- LDR w4, [x3, x1, LSL #2] // Load current wait value
- SUB w4, w4, #1 // Decrement wait value
- STR w4, [x3, x1, LSL #2] // Store new wait value value
-
- /* Restore registers and return. */
-
-_return:
-
+ B _tx_thread_smp_protect // Restart the protection attempt
+
+_got_protection:
+ DMB ISH //
+ STR w2, [x1, #4] // Save owning core
+_owned:
+ LDR w5, [x1, #8] // Pickup ownership count
+ ADD w5, w5, #1 // Increment ownership count
+ STR w5, [x1, #8] // Store ownership count
+ DMB ISH //
RET
diff --git a/ports_smp/cortex_a55_smp/ac6/src/tx_thread_smp_unprotect.S b/ports_smp/cortex_a55_smp/ac6/src/tx_thread_smp_unprotect.S
index a783cde6..f9d8692a 100644
--- a/ports_smp/cortex_a55_smp/ac6/src/tx_thread_smp_unprotect.S
+++ b/ports_smp/cortex_a55_smp/ac6/src/tx_thread_smp_unprotect.S
@@ -60,10 +60,13 @@
/* */
/* DATE NAME DESCRIPTION */
/* */
-/* 09-30-2020 William E. Lamie Initial Version 6.1 */
-/* 01-31-2022 Andres Mlinar Updated comments, */
-/* added ARMv8.2-A support, */
+/* 09-30-2020 William E. Lamie Initial Version 6.1 */
+/* 01-31-2022 Andres Mlinar Updated comments, */
+/* added ARMv8.2-A support, */
/* resulting in version 6.1.10 */
+/* 04-25-2022 William E. Lamie Modified comments, removed */
+/* FIFO queueing, */
+/* resulting in version 6.1.11 */
/* */
/**************************************************************************/
.global _tx_thread_smp_unprotect
@@ -105,11 +108,6 @@ _tx_thread_smp_unprotect:
CMP w3, #0 // Is the preempt disable flag set?
BNE _still_protected // Yes, skip the protection release
- LDR x2,=_tx_thread_smp_protect_wait_counts // Build build address of wait counts
- LDR w3, [x2, x1, LSL #2] // Pickup wait list value
- CMP w3, #0 // Are any entities on this core waiting?
- BNE _still_protected // Yes, skip the protection release
-
LDR x2,=_tx_thread_smp_protection // Build address of protection structure
MOV w3, #0xFFFFFFFF // Build invalid value
STR w3, [x2, #4] // Mark the protected core as invalid
diff --git a/ports_smp/cortex_a55_smp/gnu/example_build/tx/.cproject b/ports_smp/cortex_a55_smp/gnu/example_build/tx/.cproject
index ec20edd2..3547a1d3 100644
--- a/ports_smp/cortex_a55_smp/gnu/example_build/tx/.cproject
+++ b/ports_smp/cortex_a55_smp/gnu/example_build/tx/.cproject
@@ -129,6 +129,12 @@
+
+
+
+
+
+
diff --git a/ports_smp/cortex_a55_smp/gnu/src/tx_thread_context_restore.S b/ports_smp/cortex_a55_smp/gnu/src/tx_thread_context_restore.S
index 4df471ac..21deb75d 100644
--- a/ports_smp/cortex_a55_smp/gnu/src/tx_thread_context_restore.S
+++ b/ports_smp/cortex_a55_smp/gnu/src/tx_thread_context_restore.S
@@ -21,9 +21,6 @@
/**************************************************************************/
-/* Include macros for modifying the wait list. */
-#include "tx_thread_smp_protection_wait_list_macros.h"
-
.text
.align 3
/**************************************************************************/
@@ -31,7 +28,7 @@
/* FUNCTION RELEASE */
/* */
/* _tx_thread_context_restore ARMv8-A-SMP */
-/* 6.1.10 */
+/* 6.1.11 */
/* AUTHOR */
/* */
/* William E. Lamie, Microsoft Corporation */
@@ -63,10 +60,13 @@
/* */
/* DATE NAME DESCRIPTION */
/* */
-/* 09-30-2020 William E. Lamie Initial Version 6.1 */
-/* 01-31-2022 Andres Mlinar Updated comments, */
-/* added ARMv8.2-A support, */
+/* 09-30-2020 William E. Lamie Initial Version 6.1 */
+/* 01-31-2022 Andres Mlinar Updated comments, */
+/* added ARMv8.2-A support, */
/* resulting in version 6.1.10 */
+/* 04-25-2022 William E. Lamie Modified comments, removed */
+/* FIFO queueing, */
+/* resulting in version 6.1.11 */
/* */
/**************************************************************************/
// VOID _tx_thread_context_restore(VOID)
@@ -216,74 +216,6 @@ __tx_thread_no_preempt_restore:
// {
__tx_thread_preempt_restore:
- /* Was the thread being preempted waiting for the lock? */
- // if (_tx_thread_smp_protect_wait_counts[this_core] != 0)
- // {
-
- LDR x2, =_tx_thread_smp_protect_wait_counts // Load waiting count list
- LDR w3, [x2, x8, LSL #2] // Load waiting value for this core
- CMP w3, #0
- BEQ _nobody_waiting_for_lock // Is the core waiting for the lock?
-
- /* Do we not have the lock? This means the ISR never got the inter-core lock. */
- // if (_tx_thread_smp_protection.tx_thread_smp_protect_owned != this_core)
- // {
-
- LDR x2, =_tx_thread_smp_protection // Load address of protection structure
- LDR w3, [x2, #4] // Pickup the owning core
- CMP w8, w3 // Compare our core to the owning core
- BEQ _this_core_has_lock // Do we have the lock?
-
- /* We don't have the lock. This core should be in the list. Remove it. */
- // _tx_thread_smp_protect_wait_list_remove(this_core);
-
- _tx_thread_smp_protect_wait_list_remove // Call macro to remove core from the list
- B _nobody_waiting_for_lock // Leave
-
- // }
- // else
- // {
- /* We have the lock. This means the ISR got the inter-core lock, but
- never released it because it saw that there was someone waiting.
- Note this core is not in the list. */
-
-_this_core_has_lock:
-
- /* We're no longer waiting. Note that this should be zero since this happens during thread preemption. */
- // _tx_thread_smp_protect_wait_counts[core]--;
-
- LDR x2, =_tx_thread_smp_protect_wait_counts // Load waiting count list
- LDR w3, [x2, x8, LSL #2] // Load waiting value for this core
- SUB w3, w3, #1 // Decrement waiting value. Should be zero now
- STR w3, [x2, x8, LSL #2] // Store new waiting value
-
- /* Now release the inter-core lock. */
-
- /* Set protected core as invalid. */
- // _tx_thread_smp_protection.tx_thread_smp_protect_core = 0xFFFFFFFF;
-
- LDR x2, =_tx_thread_smp_protection // Load address of protection structure
- MOV w3, #0xFFFFFFFF // Build invalid value
- STR w3, [x2, #4] // Mark the protected core as invalid
- DMB ISH // Ensure that accesses to shared resource have completed
-
- /* Release protection. */
- // _tx_thread_smp_protection.tx_thread_smp_protect_in_force = 0;
-
- MOV w3, #0 // Build release protection value
- STR w3, [x2, #0] // Release the protection
- DSB ISH // To ensure update of the protection occurs before other CPUs awake
-
- /* Wake up waiting processors. Note interrupts are already enabled. */
-
-#ifdef TX_ENABLE_WFE
- SEV // Send event to other CPUs
-#endif
-
- // }
- // }
-
-_nobody_waiting_for_lock:
LDR x4, [x0, #8] // Switch to thread stack pointer
MOV sp, x4 //
diff --git a/ports_smp/cortex_a55_smp/gnu/src/tx_thread_smp_protect.S b/ports_smp/cortex_a55_smp/gnu/src/tx_thread_smp_protect.S
index 9cde3e08..b65af329 100644
--- a/ports_smp/cortex_a55_smp/gnu/src/tx_thread_smp_protect.S
+++ b/ports_smp/cortex_a55_smp/gnu/src/tx_thread_smp_protect.S
@@ -32,7 +32,7 @@
/* FUNCTION RELEASE */
/* */
/* _tx_thread_smp_protect Cortex-A35-SMP/AC6 */
-/* 6.1.10 */
+/* 6.1.11 */
/* AUTHOR */
/* */
/* William E. Lamie, Microsoft Corporation */
@@ -63,11 +63,14 @@
/* */
/* DATE NAME DESCRIPTION */
/* */
-/* 09-30-2020 William E. Lamie Initial Version 6.1 */
-/* 01-31-2022 Andres Mlinar Updated comments, */
-/* added ARMv8.2-A support, */
-/* improved SMP code, */
+/* 09-30-2020 William E. Lamie Initial Version 6.1 */
+/* 01-31-2022 Andres Mlinar Updated comments, */
+/* added ARMv8.2-A support, */
+/* improved SMP code, */
/* resulting in version 6.1.10 */
+/* 04-25-2022 William E. Lamie Modified comments, removed */
+/* FIFO queueing, */
+/* resulting in version 6.1.11 */
/* */
/**************************************************************************/
.global _tx_thread_smp_protect
@@ -81,293 +84,49 @@ _tx_thread_smp_protect:
/* Pickup the CPU ID. */
- MRS x1, MPIDR_EL1 // Pickup the core ID
+ MRS x2, MPIDR_EL1 // Pickup the core ID
#ifdef TX_ARMV8_2
#if TX_THREAD_SMP_CLUSTERS > 1
- UBFX x7, x1, #16, #8 // Isolate cluster ID
+ UBFX x7, x2, #16, #8 // Isolate cluster ID
#endif
- UBFX x1, x1, #8, #8 // Isolate core ID
+ UBFX x2, x2, #8, #8 // Isolate core ID
#else
#if TX_THREAD_SMP_CLUSTERS > 1
- UBFX x7, x1, #8, #8 // Isolate cluster ID
+ UBFX x7, x2, #8, #8 // Isolate cluster ID
#endif
- UBFX x1, x1, #0, #8 // Isolate core ID
+ UBFX x2, x2, #0, #8 // Isolate core ID
#endif
#if TX_THREAD_SMP_CLUSTERS > 1
- ADDS x1, x1, x7, LSL #2 // Calculate CPU ID
+ ADDS x2, x2, x7, LSL #2 // Calculate CPU ID
#endif
- /* Do we already have protection? */
- // if (this_core == _tx_thread_smp_protection.tx_thread_smp_protect_core)
- // {
-
- LDR x2, =_tx_thread_smp_protection // Build address to protection structure
- LDR w3, [x2, #4] // Pickup the owning core
- CMP w1, w3 // Is it not this core?
- BNE _protection_not_owned // No, the protection is not already owned
-
- /* We already have protection. */
-
- /* Increment the protection count. */
- // _tx_thread_smp_protection.tx_thread_smp_protect_count++;
-
- LDR w3, [x2, #8] // Pickup ownership count
- ADD w3, w3, #1 // Increment ownership count
- STR w3, [x2, #8] // Store ownership count
- DMB ISH
-
- B _return
-
-_protection_not_owned:
-
- /* Is the lock available? */
- // if (_tx_thread_smp_protection.tx_thread_smp_protect_in_force == 0)
- // {
-
- LDAXR w3, [x2, #0] // Pickup the protection flag
- CMP w3, #0
- BNE _start_waiting // No, protection not available
-
- /* Is the list empty? */
- // if (_tx_thread_smp_protect_wait_list_head == _tx_thread_smp_protect_wait_list_tail)
- // {
-
- LDR x3, =_tx_thread_smp_protect_wait_list_head
- LDR w3, [x3]
- LDR x4, =_tx_thread_smp_protect_wait_list_tail
- LDR w4, [x4]
- CMP w3, w4
- BNE _list_not_empty
-
- /* Try to get the lock. */
- // if (write_exclusive(&_tx_thread_smp_protection.tx_thread_smp_protect_in_force, 1) == SUCCESS)
- // {
-
- MOV w3, #1 // Build lock value
- STXR w4, w3, [x2, #0] // Attempt to get the protection
- CMP w4, #0
- BNE _start_waiting // Did it fail?
-
- /* We got the lock! */
- // _tx_thread_smp_protect_lock_got();
-
- DMB ISH // Ensure write to protection finishes
- _tx_thread_smp_protect_lock_got // Call the lock got function
-
- B _return
-
-_list_not_empty:
-
- /* Are we at the front of the list? */
- // if (this_core == _tx_thread_smp_protect_wait_list[_tx_thread_smp_protect_wait_list_head])
- // {
-
- LDR x3, =_tx_thread_smp_protect_wait_list_head // Get the address of the head
- LDR w3, [x3] // Get the value of the head
- LDR x4, =_tx_thread_smp_protect_wait_list // Get the address of the list
- LDR w4, [x4, x3, LSL #2] // Get the value at the head index
-
- CMP w1, w4
- BNE _start_waiting
-
- /* Is the lock still available? */
- // if (_tx_thread_smp_protection.tx_thread_smp_protect_in_force == 0)
- // {
-
- LDAXR w3, [x2, #0] // Pickup the protection flag
- CMP w3, #0
- BNE _start_waiting // No, protection not available
-
- /* Get the lock. */
- // _tx_thread_smp_protection.tx_thread_smp_protect_in_force = 1;
-
- MOV w3, #1 // Build lock value
- STXR w4, w3, [x2, #0] // Attempt to get the protection
- CMP w4, #0
- BNE _start_waiting // Did it fail?
- DMB ISH //
-
- /* Got the lock. */
- // _tx_thread_smp_protect_lock_got();
-
- _tx_thread_smp_protect_lock_got
-
- /* Remove this core from the wait list. */
- // _tx_thread_smp_protect_remove_from_front_of_list();
-
- _tx_thread_smp_protect_remove_from_front_of_list
-
- B _return
-
-_start_waiting:
-
- /* For one reason or another, we didn't get the lock. */
-
- /* Increment wait count. */
- // _tx_thread_smp_protect_wait_counts[this_core]++;
-
- LDR x3, =_tx_thread_smp_protect_wait_counts // Load wait list counts
- LDR w4, [x3, x1, LSL #2] // Load waiting value for this core
- ADD w4, w4, #1 // Increment wait value
- STR w4, [x3, x1, LSL #2] // Store new wait value
-
- /* Have we not added ourselves to the list yet? */
- // if (_tx_thread_smp_protect_wait_counts[this_core] == 1)
- // {
-
- CMP w4, #1
- BNE _already_in_list0 // Is this core already waiting?
-
- /* Add ourselves to the list. */
- // _tx_thread_smp_protect_wait_list_add(this_core);
-
- _tx_thread_smp_protect_wait_list_add // Call macro to add ourselves to the list
-
- // }
-
-_already_in_list0:
-
- /* Restore interrupts. */
+ LDR x1, =_tx_thread_smp_protection // Build address to protection structure
+ LDR w3, [x1, #4] // Pickup the owning core
+ CMP w3, w2 // Is it this core?
+ BEQ _owned // Yes, the protection is already owned
+ LDAXR w4, [x1, #0] // Pickup the protection flag
+ CBZ w4, _get_protection // Yes, get the protection
MSR DAIF, x0 // Restore interrupts
ISB //
#ifdef TX_ENABLE_WFE
WFE // Go into standby
#endif
+ B _tx_thread_smp_protect // On waking, restart the protection attempt
- /* We do this until we have the lock. */
- // while (1)
- // {
-
-_try_to_get_lock:
-
- /* Disable interrupts so we don't get preempted. */
-
- MRS x0, DAIF // Pickup current interrupt posture
- MSR DAIFSet, 0x3 // Lockout interrupts
-
- /* Pickup the CPU ID. */
-
- MRS x1, MPIDR_EL1 // Pickup the core ID
-#ifdef TX_ARMV8_2
-#if TX_THREAD_SMP_CLUSTERS > 1
- UBFX x7, x1, #16, #8 // Isolate cluster ID
-#endif
- UBFX x1, x1, #8, #8 // Isolate core ID
-#else
-#if TX_THREAD_SMP_CLUSTERS > 1
- UBFX x7, x1, #8, #8 // Isolate cluster ID
-#endif
- UBFX x1, x1, #0, #8 // Isolate core ID
-#endif
-#if TX_THREAD_SMP_CLUSTERS > 1
- ADDS x1, x1, x7, LSL #2 // Calculate CPU ID
-#endif
-
- /* Do we already have protection? */
- // if (this_core == _tx_thread_smp_protection.tx_thread_smp_protect_core)
- // {
-
- LDR w3, [x2, #4] // Pickup the owning core
- CMP w3, w1 // Is it this core?
- BEQ _got_lock_after_waiting // Yes, the protection is already owned. This means
- // an ISR preempted us and got protection
-
- // }
-
- /* Are we at the front of the list? */
- // if (this_core == _tx_thread_smp_protect_wait_list[_tx_thread_smp_protect_wait_list_head])
- // {
-
- LDR x3, =_tx_thread_smp_protect_wait_list_head // Get the address of the head
- LDR w3, [x3] // Get the value of the head
- LDR x4, =_tx_thread_smp_protect_wait_list // Get the address of the list
- LDR w4, [x4, x3, LSL #2] // Get the value at the head index
-
- CMP w1, w4
- BNE _did_not_get_lock
-
- /* Is the lock still available? */
- // if (_tx_thread_smp_protection.tx_thread_smp_protect_in_force == 0)
- // {
-
- LDAXR w3, [x2, #0] // Pickup the protection flag
- CMP w3, #0
- BNE _did_not_get_lock // No, protection not available
-
- /* Get the lock. */
- // _tx_thread_smp_protection.tx_thread_smp_protect_in_force = 1;
-
- MOV w3, #1 // Build lock value
- STXR w4, w3, [x2, #0] // Attempt to get the protection
- CMP w4, #0
- BNE _did_not_get_lock // Did it fail?
- DMB ISH //
-
- /* Got the lock. */
- // _tx_thread_smp_protect_lock_got();
-
- _tx_thread_smp_protect_lock_got
-
- /* Remove this core from the wait list. */
- // _tx_thread_smp_protect_remove_from_front_of_list();
-
- _tx_thread_smp_protect_remove_from_front_of_list
-
- B _got_lock_after_waiting
-
-_did_not_get_lock:
-
- /* For one reason or another, we didn't get the lock. */
-
- /* Were we removed from the list? This can happen if we're a thread
- and we got preempted. */
- // if (_tx_thread_smp_protect_wait_counts[this_core] == 0)
- // {
-
- LDR x3, =_tx_thread_smp_protect_wait_counts // Load wait list counts
- LDR w4, [x3, x1, LSL #2] // Load waiting value for this core
- CMP w4, #0
- BNE _already_in_list1 // Is this core already in the list?
-
- /* Add ourselves to the list. */
- // _tx_thread_smp_protect_wait_list_add(this_core);
-
- _tx_thread_smp_protect_wait_list_add // Call macro to add ourselves to the list
-
- /* Our waiting count was also reset when we were preempted. Increment it again. */
- // _tx_thread_smp_protect_wait_counts[this_core]++;
-
- LDR x3, =_tx_thread_smp_protect_wait_counts // Load wait list counts
- LDR w4, [x3, x1, LSL #2] // Load waiting value for this core
- ADD w4, w4, #1 // Increment wait value
- STR w4, [x3, x1, LSL #2] // Store new wait value value
-
- // }
-
-_already_in_list1:
-
- /* Restore interrupts and try again. */
-
+_get_protection:
+ MOV x4, #1 // Build lock value
+ STXR w5, w4, [x1] // Attempt to get the protection
+ CBZ w5, _got_protection // Did it succeed? w5 = 0 means success!
MSR DAIF, x0 // Restore interrupts
- ISB //
-#ifdef TX_ENABLE_WFE
- WFE // Go into standby
-#endif
- B _try_to_get_lock // On waking, restart the protection attempt
-
-_got_lock_after_waiting:
-
- /* We're no longer waiting. */
- // _tx_thread_smp_protect_wait_counts[this_core]--;
-
- LDR x3, =_tx_thread_smp_protect_wait_counts // Load waiting list
- LDR w4, [x3, x1, LSL #2] // Load current wait value
- SUB w4, w4, #1 // Decrement wait value
- STR w4, [x3, x1, LSL #2] // Store new wait value value
-
- /* Restore registers and return. */
-
-_return:
-
+ B _tx_thread_smp_protect // Restart the protection attempt
+
+_got_protection:
+ DMB ISH //
+ STR w2, [x1, #4] // Save owning core
+_owned:
+ LDR w5, [x1, #8] // Pickup ownership count
+ ADD w5, w5, #1 // Increment ownership count
+ STR w5, [x1, #8] // Store ownership count
+ DMB ISH //
RET
diff --git a/ports_smp/cortex_a55_smp/gnu/src/tx_thread_smp_unprotect.S b/ports_smp/cortex_a55_smp/gnu/src/tx_thread_smp_unprotect.S
index a783cde6..f9d8692a 100644
--- a/ports_smp/cortex_a55_smp/gnu/src/tx_thread_smp_unprotect.S
+++ b/ports_smp/cortex_a55_smp/gnu/src/tx_thread_smp_unprotect.S
@@ -60,10 +60,13 @@
/* */
/* DATE NAME DESCRIPTION */
/* */
-/* 09-30-2020 William E. Lamie Initial Version 6.1 */
-/* 01-31-2022 Andres Mlinar Updated comments, */
-/* added ARMv8.2-A support, */
+/* 09-30-2020 William E. Lamie Initial Version 6.1 */
+/* 01-31-2022 Andres Mlinar Updated comments, */
+/* added ARMv8.2-A support, */
/* resulting in version 6.1.10 */
+/* 04-25-2022 William E. Lamie Modified comments, removed */
+/* FIFO queueing, */
+/* resulting in version 6.1.11 */
/* */
/**************************************************************************/
.global _tx_thread_smp_unprotect
@@ -105,11 +108,6 @@ _tx_thread_smp_unprotect:
CMP w3, #0 // Is the preempt disable flag set?
BNE _still_protected // Yes, skip the protection release
- LDR x2,=_tx_thread_smp_protect_wait_counts // Build build address of wait counts
- LDR w3, [x2, x1, LSL #2] // Pickup wait list value
- CMP w3, #0 // Are any entities on this core waiting?
- BNE _still_protected // Yes, skip the protection release
-
LDR x2,=_tx_thread_smp_protection // Build address of protection structure
MOV w3, #0xFFFFFFFF // Build invalid value
STR w3, [x2, #4] // Mark the protected core as invalid
diff --git a/ports_smp/cortex_a57_smp/ac6/example_build/sample_threadx/.cproject b/ports_smp/cortex_a57_smp/ac6/example_build/sample_threadx/.cproject
index 2f75cb03..c5b82669 100644
--- a/ports_smp/cortex_a57_smp/ac6/example_build/sample_threadx/.cproject
+++ b/ports_smp/cortex_a57_smp/ac6/example_build/sample_threadx/.cproject
@@ -23,7 +23,7 @@
-
+
@@ -47,7 +47,7 @@
-
+
@@ -63,7 +63,7 @@
-
+
diff --git a/ports_smp/cortex_a57_smp/ac6/example_build/tx/.cproject b/ports_smp/cortex_a57_smp/ac6/example_build/tx/.cproject
index 324a0057..bf972591 100644
--- a/ports_smp/cortex_a57_smp/ac6/example_build/tx/.cproject
+++ b/ports_smp/cortex_a57_smp/ac6/example_build/tx/.cproject
@@ -49,7 +49,7 @@
-
+
@@ -129,11 +129,7 @@
-
-
-
-
-
+
diff --git a/ports_smp/cortex_a57_smp/ac6/src/tx_thread_context_restore.S b/ports_smp/cortex_a57_smp/ac6/src/tx_thread_context_restore.S
index 4df471ac..21deb75d 100644
--- a/ports_smp/cortex_a57_smp/ac6/src/tx_thread_context_restore.S
+++ b/ports_smp/cortex_a57_smp/ac6/src/tx_thread_context_restore.S
@@ -21,9 +21,6 @@
/**************************************************************************/
-/* Include macros for modifying the wait list. */
-#include "tx_thread_smp_protection_wait_list_macros.h"
-
.text
.align 3
/**************************************************************************/
@@ -31,7 +28,7 @@
/* FUNCTION RELEASE */
/* */
/* _tx_thread_context_restore ARMv8-A-SMP */
-/* 6.1.10 */
+/* 6.1.11 */
/* AUTHOR */
/* */
/* William E. Lamie, Microsoft Corporation */
@@ -63,10 +60,13 @@
/* */
/* DATE NAME DESCRIPTION */
/* */
-/* 09-30-2020 William E. Lamie Initial Version 6.1 */
-/* 01-31-2022 Andres Mlinar Updated comments, */
-/* added ARMv8.2-A support, */
+/* 09-30-2020 William E. Lamie Initial Version 6.1 */
+/* 01-31-2022 Andres Mlinar Updated comments, */
+/* added ARMv8.2-A support, */
/* resulting in version 6.1.10 */
+/* 04-25-2022 William E. Lamie Modified comments, removed */
+/* FIFO queueing, */
+/* resulting in version 6.1.11 */
/* */
/**************************************************************************/
// VOID _tx_thread_context_restore(VOID)
@@ -216,74 +216,6 @@ __tx_thread_no_preempt_restore:
// {
__tx_thread_preempt_restore:
- /* Was the thread being preempted waiting for the lock? */
- // if (_tx_thread_smp_protect_wait_counts[this_core] != 0)
- // {
-
- LDR x2, =_tx_thread_smp_protect_wait_counts // Load waiting count list
- LDR w3, [x2, x8, LSL #2] // Load waiting value for this core
- CMP w3, #0
- BEQ _nobody_waiting_for_lock // Is the core waiting for the lock?
-
- /* Do we not have the lock? This means the ISR never got the inter-core lock. */
- // if (_tx_thread_smp_protection.tx_thread_smp_protect_owned != this_core)
- // {
-
- LDR x2, =_tx_thread_smp_protection // Load address of protection structure
- LDR w3, [x2, #4] // Pickup the owning core
- CMP w8, w3 // Compare our core to the owning core
- BEQ _this_core_has_lock // Do we have the lock?
-
- /* We don't have the lock. This core should be in the list. Remove it. */
- // _tx_thread_smp_protect_wait_list_remove(this_core);
-
- _tx_thread_smp_protect_wait_list_remove // Call macro to remove core from the list
- B _nobody_waiting_for_lock // Leave
-
- // }
- // else
- // {
- /* We have the lock. This means the ISR got the inter-core lock, but
- never released it because it saw that there was someone waiting.
- Note this core is not in the list. */
-
-_this_core_has_lock:
-
- /* We're no longer waiting. Note that this should be zero since this happens during thread preemption. */
- // _tx_thread_smp_protect_wait_counts[core]--;
-
- LDR x2, =_tx_thread_smp_protect_wait_counts // Load waiting count list
- LDR w3, [x2, x8, LSL #2] // Load waiting value for this core
- SUB w3, w3, #1 // Decrement waiting value. Should be zero now
- STR w3, [x2, x8, LSL #2] // Store new waiting value
-
- /* Now release the inter-core lock. */
-
- /* Set protected core as invalid. */
- // _tx_thread_smp_protection.tx_thread_smp_protect_core = 0xFFFFFFFF;
-
- LDR x2, =_tx_thread_smp_protection // Load address of protection structure
- MOV w3, #0xFFFFFFFF // Build invalid value
- STR w3, [x2, #4] // Mark the protected core as invalid
- DMB ISH // Ensure that accesses to shared resource have completed
-
- /* Release protection. */
- // _tx_thread_smp_protection.tx_thread_smp_protect_in_force = 0;
-
- MOV w3, #0 // Build release protection value
- STR w3, [x2, #0] // Release the protection
- DSB ISH // To ensure update of the protection occurs before other CPUs awake
-
- /* Wake up waiting processors. Note interrupts are already enabled. */
-
-#ifdef TX_ENABLE_WFE
- SEV // Send event to other CPUs
-#endif
-
- // }
- // }
-
-_nobody_waiting_for_lock:
LDR x4, [x0, #8] // Switch to thread stack pointer
MOV sp, x4 //
diff --git a/ports_smp/cortex_a57_smp/ac6/src/tx_thread_smp_protect.S b/ports_smp/cortex_a57_smp/ac6/src/tx_thread_smp_protect.S
index 9cde3e08..b65af329 100644
--- a/ports_smp/cortex_a57_smp/ac6/src/tx_thread_smp_protect.S
+++ b/ports_smp/cortex_a57_smp/ac6/src/tx_thread_smp_protect.S
@@ -32,7 +32,7 @@
/* FUNCTION RELEASE */
/* */
/* _tx_thread_smp_protect Cortex-A35-SMP/AC6 */
-/* 6.1.10 */
+/* 6.1.11 */
/* AUTHOR */
/* */
/* William E. Lamie, Microsoft Corporation */
@@ -63,11 +63,14 @@
/* */
/* DATE NAME DESCRIPTION */
/* */
-/* 09-30-2020 William E. Lamie Initial Version 6.1 */
-/* 01-31-2022 Andres Mlinar Updated comments, */
-/* added ARMv8.2-A support, */
-/* improved SMP code, */
+/* 09-30-2020 William E. Lamie Initial Version 6.1 */
+/* 01-31-2022 Andres Mlinar Updated comments, */
+/* added ARMv8.2-A support, */
+/* improved SMP code, */
/* resulting in version 6.1.10 */
+/* 04-25-2022 William E. Lamie Modified comments, removed */
+/* FIFO queueing, */
+/* resulting in version 6.1.11 */
/* */
/**************************************************************************/
.global _tx_thread_smp_protect
@@ -81,293 +84,49 @@ _tx_thread_smp_protect:
/* Pickup the CPU ID. */
- MRS x1, MPIDR_EL1 // Pickup the core ID
+ MRS x2, MPIDR_EL1 // Pickup the core ID
#ifdef TX_ARMV8_2
#if TX_THREAD_SMP_CLUSTERS > 1
- UBFX x7, x1, #16, #8 // Isolate cluster ID
+ UBFX x7, x2, #16, #8 // Isolate cluster ID
#endif
- UBFX x1, x1, #8, #8 // Isolate core ID
+ UBFX x2, x2, #8, #8 // Isolate core ID
#else
#if TX_THREAD_SMP_CLUSTERS > 1
- UBFX x7, x1, #8, #8 // Isolate cluster ID
+ UBFX x7, x2, #8, #8 // Isolate cluster ID
#endif
- UBFX x1, x1, #0, #8 // Isolate core ID
+ UBFX x2, x2, #0, #8 // Isolate core ID
#endif
#if TX_THREAD_SMP_CLUSTERS > 1
- ADDS x1, x1, x7, LSL #2 // Calculate CPU ID
+ ADDS x2, x2, x7, LSL #2 // Calculate CPU ID
#endif
- /* Do we already have protection? */
- // if (this_core == _tx_thread_smp_protection.tx_thread_smp_protect_core)
- // {
-
- LDR x2, =_tx_thread_smp_protection // Build address to protection structure
- LDR w3, [x2, #4] // Pickup the owning core
- CMP w1, w3 // Is it not this core?
- BNE _protection_not_owned // No, the protection is not already owned
-
- /* We already have protection. */
-
- /* Increment the protection count. */
- // _tx_thread_smp_protection.tx_thread_smp_protect_count++;
-
- LDR w3, [x2, #8] // Pickup ownership count
- ADD w3, w3, #1 // Increment ownership count
- STR w3, [x2, #8] // Store ownership count
- DMB ISH
-
- B _return
-
-_protection_not_owned:
-
- /* Is the lock available? */
- // if (_tx_thread_smp_protection.tx_thread_smp_protect_in_force == 0)
- // {
-
- LDAXR w3, [x2, #0] // Pickup the protection flag
- CMP w3, #0
- BNE _start_waiting // No, protection not available
-
- /* Is the list empty? */
- // if (_tx_thread_smp_protect_wait_list_head == _tx_thread_smp_protect_wait_list_tail)
- // {
-
- LDR x3, =_tx_thread_smp_protect_wait_list_head
- LDR w3, [x3]
- LDR x4, =_tx_thread_smp_protect_wait_list_tail
- LDR w4, [x4]
- CMP w3, w4
- BNE _list_not_empty
-
- /* Try to get the lock. */
- // if (write_exclusive(&_tx_thread_smp_protection.tx_thread_smp_protect_in_force, 1) == SUCCESS)
- // {
-
- MOV w3, #1 // Build lock value
- STXR w4, w3, [x2, #0] // Attempt to get the protection
- CMP w4, #0
- BNE _start_waiting // Did it fail?
-
- /* We got the lock! */
- // _tx_thread_smp_protect_lock_got();
-
- DMB ISH // Ensure write to protection finishes
- _tx_thread_smp_protect_lock_got // Call the lock got function
-
- B _return
-
-_list_not_empty:
-
- /* Are we at the front of the list? */
- // if (this_core == _tx_thread_smp_protect_wait_list[_tx_thread_smp_protect_wait_list_head])
- // {
-
- LDR x3, =_tx_thread_smp_protect_wait_list_head // Get the address of the head
- LDR w3, [x3] // Get the value of the head
- LDR x4, =_tx_thread_smp_protect_wait_list // Get the address of the list
- LDR w4, [x4, x3, LSL #2] // Get the value at the head index
-
- CMP w1, w4
- BNE _start_waiting
-
- /* Is the lock still available? */
- // if (_tx_thread_smp_protection.tx_thread_smp_protect_in_force == 0)
- // {
-
- LDAXR w3, [x2, #0] // Pickup the protection flag
- CMP w3, #0
- BNE _start_waiting // No, protection not available
-
- /* Get the lock. */
- // _tx_thread_smp_protection.tx_thread_smp_protect_in_force = 1;
-
- MOV w3, #1 // Build lock value
- STXR w4, w3, [x2, #0] // Attempt to get the protection
- CMP w4, #0
- BNE _start_waiting // Did it fail?
- DMB ISH //
-
- /* Got the lock. */
- // _tx_thread_smp_protect_lock_got();
-
- _tx_thread_smp_protect_lock_got
-
- /* Remove this core from the wait list. */
- // _tx_thread_smp_protect_remove_from_front_of_list();
-
- _tx_thread_smp_protect_remove_from_front_of_list
-
- B _return
-
-_start_waiting:
-
- /* For one reason or another, we didn't get the lock. */
-
- /* Increment wait count. */
- // _tx_thread_smp_protect_wait_counts[this_core]++;
-
- LDR x3, =_tx_thread_smp_protect_wait_counts // Load wait list counts
- LDR w4, [x3, x1, LSL #2] // Load waiting value for this core
- ADD w4, w4, #1 // Increment wait value
- STR w4, [x3, x1, LSL #2] // Store new wait value
-
- /* Have we not added ourselves to the list yet? */
- // if (_tx_thread_smp_protect_wait_counts[this_core] == 1)
- // {
-
- CMP w4, #1
- BNE _already_in_list0 // Is this core already waiting?
-
- /* Add ourselves to the list. */
- // _tx_thread_smp_protect_wait_list_add(this_core);
-
- _tx_thread_smp_protect_wait_list_add // Call macro to add ourselves to the list
-
- // }
-
-_already_in_list0:
-
- /* Restore interrupts. */
+ LDR x1, =_tx_thread_smp_protection // Build address to protection structure
+ LDR w3, [x1, #4] // Pickup the owning core
+ CMP w3, w2 // Is it this core?
+ BEQ _owned // Yes, the protection is already owned
+ LDAXR w4, [x1, #0] // Pickup the protection flag
+ CBZ w4, _get_protection // Yes, get the protection
MSR DAIF, x0 // Restore interrupts
ISB //
#ifdef TX_ENABLE_WFE
WFE // Go into standby
#endif
+ B _tx_thread_smp_protect // On waking, restart the protection attempt
- /* We do this until we have the lock. */
- // while (1)
- // {
-
-_try_to_get_lock:
-
- /* Disable interrupts so we don't get preempted. */
-
- MRS x0, DAIF // Pickup current interrupt posture
- MSR DAIFSet, 0x3 // Lockout interrupts
-
- /* Pickup the CPU ID. */
-
- MRS x1, MPIDR_EL1 // Pickup the core ID
-#ifdef TX_ARMV8_2
-#if TX_THREAD_SMP_CLUSTERS > 1
- UBFX x7, x1, #16, #8 // Isolate cluster ID
-#endif
- UBFX x1, x1, #8, #8 // Isolate core ID
-#else
-#if TX_THREAD_SMP_CLUSTERS > 1
- UBFX x7, x1, #8, #8 // Isolate cluster ID
-#endif
- UBFX x1, x1, #0, #8 // Isolate core ID
-#endif
-#if TX_THREAD_SMP_CLUSTERS > 1
- ADDS x1, x1, x7, LSL #2 // Calculate CPU ID
-#endif
-
- /* Do we already have protection? */
- // if (this_core == _tx_thread_smp_protection.tx_thread_smp_protect_core)
- // {
-
- LDR w3, [x2, #4] // Pickup the owning core
- CMP w3, w1 // Is it this core?
- BEQ _got_lock_after_waiting // Yes, the protection is already owned. This means
- // an ISR preempted us and got protection
-
- // }
-
- /* Are we at the front of the list? */
- // if (this_core == _tx_thread_smp_protect_wait_list[_tx_thread_smp_protect_wait_list_head])
- // {
-
- LDR x3, =_tx_thread_smp_protect_wait_list_head // Get the address of the head
- LDR w3, [x3] // Get the value of the head
- LDR x4, =_tx_thread_smp_protect_wait_list // Get the address of the list
- LDR w4, [x4, x3, LSL #2] // Get the value at the head index
-
- CMP w1, w4
- BNE _did_not_get_lock
-
- /* Is the lock still available? */
- // if (_tx_thread_smp_protection.tx_thread_smp_protect_in_force == 0)
- // {
-
- LDAXR w3, [x2, #0] // Pickup the protection flag
- CMP w3, #0
- BNE _did_not_get_lock // No, protection not available
-
- /* Get the lock. */
- // _tx_thread_smp_protection.tx_thread_smp_protect_in_force = 1;
-
- MOV w3, #1 // Build lock value
- STXR w4, w3, [x2, #0] // Attempt to get the protection
- CMP w4, #0
- BNE _did_not_get_lock // Did it fail?
- DMB ISH //
-
- /* Got the lock. */
- // _tx_thread_smp_protect_lock_got();
-
- _tx_thread_smp_protect_lock_got
-
- /* Remove this core from the wait list. */
- // _tx_thread_smp_protect_remove_from_front_of_list();
-
- _tx_thread_smp_protect_remove_from_front_of_list
-
- B _got_lock_after_waiting
-
-_did_not_get_lock:
-
- /* For one reason or another, we didn't get the lock. */
-
- /* Were we removed from the list? This can happen if we're a thread
- and we got preempted. */
- // if (_tx_thread_smp_protect_wait_counts[this_core] == 0)
- // {
-
- LDR x3, =_tx_thread_smp_protect_wait_counts // Load wait list counts
- LDR w4, [x3, x1, LSL #2] // Load waiting value for this core
- CMP w4, #0
- BNE _already_in_list1 // Is this core already in the list?
-
- /* Add ourselves to the list. */
- // _tx_thread_smp_protect_wait_list_add(this_core);
-
- _tx_thread_smp_protect_wait_list_add // Call macro to add ourselves to the list
-
- /* Our waiting count was also reset when we were preempted. Increment it again. */
- // _tx_thread_smp_protect_wait_counts[this_core]++;
-
- LDR x3, =_tx_thread_smp_protect_wait_counts // Load wait list counts
- LDR w4, [x3, x1, LSL #2] // Load waiting value for this core
- ADD w4, w4, #1 // Increment wait value
- STR w4, [x3, x1, LSL #2] // Store new wait value value
-
- // }
-
-_already_in_list1:
-
- /* Restore interrupts and try again. */
-
+_get_protection:
+ MOV x4, #1 // Build lock value
+ STXR w5, w4, [x1] // Attempt to get the protection
+ CBZ w5, _got_protection // Did it succeed? w5 = 0 means success!
MSR DAIF, x0 // Restore interrupts
- ISB //
-#ifdef TX_ENABLE_WFE
- WFE // Go into standby
-#endif
- B _try_to_get_lock // On waking, restart the protection attempt
-
-_got_lock_after_waiting:
-
- /* We're no longer waiting. */
- // _tx_thread_smp_protect_wait_counts[this_core]--;
-
- LDR x3, =_tx_thread_smp_protect_wait_counts // Load waiting list
- LDR w4, [x3, x1, LSL #2] // Load current wait value
- SUB w4, w4, #1 // Decrement wait value
- STR w4, [x3, x1, LSL #2] // Store new wait value value
-
- /* Restore registers and return. */
-
-_return:
-
+ B _tx_thread_smp_protect // Restart the protection attempt
+
+_got_protection:
+ DMB ISH //
+ STR w2, [x1, #4] // Save owning core
+_owned:
+ LDR w5, [x1, #8] // Pickup ownership count
+ ADD w5, w5, #1 // Increment ownership count
+ STR w5, [x1, #8] // Store ownership count
+ DMB ISH //
RET
diff --git a/ports_smp/cortex_a57_smp/ac6/src/tx_thread_smp_unprotect.S b/ports_smp/cortex_a57_smp/ac6/src/tx_thread_smp_unprotect.S
index a783cde6..f9d8692a 100644
--- a/ports_smp/cortex_a57_smp/ac6/src/tx_thread_smp_unprotect.S
+++ b/ports_smp/cortex_a57_smp/ac6/src/tx_thread_smp_unprotect.S
@@ -60,10 +60,13 @@
/* */
/* DATE NAME DESCRIPTION */
/* */
-/* 09-30-2020 William E. Lamie Initial Version 6.1 */
-/* 01-31-2022 Andres Mlinar Updated comments, */
-/* added ARMv8.2-A support, */
+/* 09-30-2020 William E. Lamie Initial Version 6.1 */
+/* 01-31-2022 Andres Mlinar Updated comments, */
+/* added ARMv8.2-A support, */
/* resulting in version 6.1.10 */
+/* 04-25-2022 William E. Lamie Modified comments, removed */
+/* FIFO queueing, */
+/* resulting in version 6.1.11 */
/* */
/**************************************************************************/
.global _tx_thread_smp_unprotect
@@ -105,11 +108,6 @@ _tx_thread_smp_unprotect:
CMP w3, #0 // Is the preempt disable flag set?
BNE _still_protected // Yes, skip the protection release
- LDR x2,=_tx_thread_smp_protect_wait_counts // Build build address of wait counts
- LDR w3, [x2, x1, LSL #2] // Pickup wait list value
- CMP w3, #0 // Are any entities on this core waiting?
- BNE _still_protected // Yes, skip the protection release
-
LDR x2,=_tx_thread_smp_protection // Build address of protection structure
MOV w3, #0xFFFFFFFF // Build invalid value
STR w3, [x2, #4] // Mark the protected core as invalid
diff --git a/ports_smp/cortex_a57_smp/gnu/example_build/tx/.cproject b/ports_smp/cortex_a57_smp/gnu/example_build/tx/.cproject
index ec20edd2..3547a1d3 100644
--- a/ports_smp/cortex_a57_smp/gnu/example_build/tx/.cproject
+++ b/ports_smp/cortex_a57_smp/gnu/example_build/tx/.cproject
@@ -129,6 +129,12 @@
+
+
+
+
+
+
diff --git a/ports_smp/cortex_a57_smp/gnu/src/tx_thread_context_restore.S b/ports_smp/cortex_a57_smp/gnu/src/tx_thread_context_restore.S
index 4df471ac..21deb75d 100644
--- a/ports_smp/cortex_a57_smp/gnu/src/tx_thread_context_restore.S
+++ b/ports_smp/cortex_a57_smp/gnu/src/tx_thread_context_restore.S
@@ -21,9 +21,6 @@
/**************************************************************************/
-/* Include macros for modifying the wait list. */
-#include "tx_thread_smp_protection_wait_list_macros.h"
-
.text
.align 3
/**************************************************************************/
@@ -31,7 +28,7 @@
/* FUNCTION RELEASE */
/* */
/* _tx_thread_context_restore ARMv8-A-SMP */
-/* 6.1.10 */
+/* 6.1.11 */
/* AUTHOR */
/* */
/* William E. Lamie, Microsoft Corporation */
@@ -63,10 +60,13 @@
/* */
/* DATE NAME DESCRIPTION */
/* */
-/* 09-30-2020 William E. Lamie Initial Version 6.1 */
-/* 01-31-2022 Andres Mlinar Updated comments, */
-/* added ARMv8.2-A support, */
+/* 09-30-2020 William E. Lamie Initial Version 6.1 */
+/* 01-31-2022 Andres Mlinar Updated comments, */
+/* added ARMv8.2-A support, */
/* resulting in version 6.1.10 */
+/* 04-25-2022 William E. Lamie Modified comments, removed */
+/* FIFO queueing, */
+/* resulting in version 6.1.11 */
/* */
/**************************************************************************/
// VOID _tx_thread_context_restore(VOID)
@@ -216,74 +216,6 @@ __tx_thread_no_preempt_restore:
// {
__tx_thread_preempt_restore:
- /* Was the thread being preempted waiting for the lock? */
- // if (_tx_thread_smp_protect_wait_counts[this_core] != 0)
- // {
-
- LDR x2, =_tx_thread_smp_protect_wait_counts // Load waiting count list
- LDR w3, [x2, x8, LSL #2] // Load waiting value for this core
- CMP w3, #0
- BEQ _nobody_waiting_for_lock // Is the core waiting for the lock?
-
- /* Do we not have the lock? This means the ISR never got the inter-core lock. */
- // if (_tx_thread_smp_protection.tx_thread_smp_protect_owned != this_core)
- // {
-
- LDR x2, =_tx_thread_smp_protection // Load address of protection structure
- LDR w3, [x2, #4] // Pickup the owning core
- CMP w8, w3 // Compare our core to the owning core
- BEQ _this_core_has_lock // Do we have the lock?
-
- /* We don't have the lock. This core should be in the list. Remove it. */
- // _tx_thread_smp_protect_wait_list_remove(this_core);
-
- _tx_thread_smp_protect_wait_list_remove // Call macro to remove core from the list
- B _nobody_waiting_for_lock // Leave
-
- // }
- // else
- // {
- /* We have the lock. This means the ISR got the inter-core lock, but
- never released it because it saw that there was someone waiting.
- Note this core is not in the list. */
-
-_this_core_has_lock:
-
- /* We're no longer waiting. Note that this should be zero since this happens during thread preemption. */
- // _tx_thread_smp_protect_wait_counts[core]--;
-
- LDR x2, =_tx_thread_smp_protect_wait_counts // Load waiting count list
- LDR w3, [x2, x8, LSL #2] // Load waiting value for this core
- SUB w3, w3, #1 // Decrement waiting value. Should be zero now
- STR w3, [x2, x8, LSL #2] // Store new waiting value
-
- /* Now release the inter-core lock. */
-
- /* Set protected core as invalid. */
- // _tx_thread_smp_protection.tx_thread_smp_protect_core = 0xFFFFFFFF;
-
- LDR x2, =_tx_thread_smp_protection // Load address of protection structure
- MOV w3, #0xFFFFFFFF // Build invalid value
- STR w3, [x2, #4] // Mark the protected core as invalid
- DMB ISH // Ensure that accesses to shared resource have completed
-
- /* Release protection. */
- // _tx_thread_smp_protection.tx_thread_smp_protect_in_force = 0;
-
- MOV w3, #0 // Build release protection value
- STR w3, [x2, #0] // Release the protection
- DSB ISH // To ensure update of the protection occurs before other CPUs awake
-
- /* Wake up waiting processors. Note interrupts are already enabled. */
-
-#ifdef TX_ENABLE_WFE
- SEV // Send event to other CPUs
-#endif
-
- // }
- // }
-
-_nobody_waiting_for_lock:
LDR x4, [x0, #8] // Switch to thread stack pointer
MOV sp, x4 //
diff --git a/ports_smp/cortex_a57_smp/gnu/src/tx_thread_smp_protect.S b/ports_smp/cortex_a57_smp/gnu/src/tx_thread_smp_protect.S
index 9cde3e08..b65af329 100644
--- a/ports_smp/cortex_a57_smp/gnu/src/tx_thread_smp_protect.S
+++ b/ports_smp/cortex_a57_smp/gnu/src/tx_thread_smp_protect.S
@@ -32,7 +32,7 @@
/* FUNCTION RELEASE */
/* */
/* _tx_thread_smp_protect Cortex-A35-SMP/AC6 */
-/* 6.1.10 */
+/* 6.1.11 */
/* AUTHOR */
/* */
/* William E. Lamie, Microsoft Corporation */
@@ -63,11 +63,14 @@
/* */
/* DATE NAME DESCRIPTION */
/* */
-/* 09-30-2020 William E. Lamie Initial Version 6.1 */
-/* 01-31-2022 Andres Mlinar Updated comments, */
-/* added ARMv8.2-A support, */
-/* improved SMP code, */
+/* 09-30-2020 William E. Lamie Initial Version 6.1 */
+/* 01-31-2022 Andres Mlinar Updated comments, */
+/* added ARMv8.2-A support, */
+/* improved SMP code, */
/* resulting in version 6.1.10 */
+/* 04-25-2022 William E. Lamie Modified comments, removed */
+/* FIFO queueing, */
+/* resulting in version 6.1.11 */
/* */
/**************************************************************************/
.global _tx_thread_smp_protect
@@ -81,293 +84,49 @@ _tx_thread_smp_protect:
/* Pickup the CPU ID. */
- MRS x1, MPIDR_EL1 // Pickup the core ID
+ MRS x2, MPIDR_EL1 // Pickup the core ID
#ifdef TX_ARMV8_2
#if TX_THREAD_SMP_CLUSTERS > 1
- UBFX x7, x1, #16, #8 // Isolate cluster ID
+ UBFX x7, x2, #16, #8 // Isolate cluster ID
#endif
- UBFX x1, x1, #8, #8 // Isolate core ID
+ UBFX x2, x2, #8, #8 // Isolate core ID
#else
#if TX_THREAD_SMP_CLUSTERS > 1
- UBFX x7, x1, #8, #8 // Isolate cluster ID
+ UBFX x7, x2, #8, #8 // Isolate cluster ID
#endif
- UBFX x1, x1, #0, #8 // Isolate core ID
+ UBFX x2, x2, #0, #8 // Isolate core ID
#endif
#if TX_THREAD_SMP_CLUSTERS > 1
- ADDS x1, x1, x7, LSL #2 // Calculate CPU ID
+ ADDS x2, x2, x7, LSL #2 // Calculate CPU ID
#endif
- /* Do we already have protection? */
- // if (this_core == _tx_thread_smp_protection.tx_thread_smp_protect_core)
- // {
-
- LDR x2, =_tx_thread_smp_protection // Build address to protection structure
- LDR w3, [x2, #4] // Pickup the owning core
- CMP w1, w3 // Is it not this core?
- BNE _protection_not_owned // No, the protection is not already owned
-
- /* We already have protection. */
-
- /* Increment the protection count. */
- // _tx_thread_smp_protection.tx_thread_smp_protect_count++;
-
- LDR w3, [x2, #8] // Pickup ownership count
- ADD w3, w3, #1 // Increment ownership count
- STR w3, [x2, #8] // Store ownership count
- DMB ISH
-
- B _return
-
-_protection_not_owned:
-
- /* Is the lock available? */
- // if (_tx_thread_smp_protection.tx_thread_smp_protect_in_force == 0)
- // {
-
- LDAXR w3, [x2, #0] // Pickup the protection flag
- CMP w3, #0
- BNE _start_waiting // No, protection not available
-
- /* Is the list empty? */
- // if (_tx_thread_smp_protect_wait_list_head == _tx_thread_smp_protect_wait_list_tail)
- // {
-
- LDR x3, =_tx_thread_smp_protect_wait_list_head
- LDR w3, [x3]
- LDR x4, =_tx_thread_smp_protect_wait_list_tail
- LDR w4, [x4]
- CMP w3, w4
- BNE _list_not_empty
-
- /* Try to get the lock. */
- // if (write_exclusive(&_tx_thread_smp_protection.tx_thread_smp_protect_in_force, 1) == SUCCESS)
- // {
-
- MOV w3, #1 // Build lock value
- STXR w4, w3, [x2, #0] // Attempt to get the protection
- CMP w4, #0
- BNE _start_waiting // Did it fail?
-
- /* We got the lock! */
- // _tx_thread_smp_protect_lock_got();
-
- DMB ISH // Ensure write to protection finishes
- _tx_thread_smp_protect_lock_got // Call the lock got function
-
- B _return
-
-_list_not_empty:
-
- /* Are we at the front of the list? */
- // if (this_core == _tx_thread_smp_protect_wait_list[_tx_thread_smp_protect_wait_list_head])
- // {
-
- LDR x3, =_tx_thread_smp_protect_wait_list_head // Get the address of the head
- LDR w3, [x3] // Get the value of the head
- LDR x4, =_tx_thread_smp_protect_wait_list // Get the address of the list
- LDR w4, [x4, x3, LSL #2] // Get the value at the head index
-
- CMP w1, w4
- BNE _start_waiting
-
- /* Is the lock still available? */
- // if (_tx_thread_smp_protection.tx_thread_smp_protect_in_force == 0)
- // {
-
- LDAXR w3, [x2, #0] // Pickup the protection flag
- CMP w3, #0
- BNE _start_waiting // No, protection not available
-
- /* Get the lock. */
- // _tx_thread_smp_protection.tx_thread_smp_protect_in_force = 1;
-
- MOV w3, #1 // Build lock value
- STXR w4, w3, [x2, #0] // Attempt to get the protection
- CMP w4, #0
- BNE _start_waiting // Did it fail?
- DMB ISH //
-
- /* Got the lock. */
- // _tx_thread_smp_protect_lock_got();
-
- _tx_thread_smp_protect_lock_got
-
- /* Remove this core from the wait list. */
- // _tx_thread_smp_protect_remove_from_front_of_list();
-
- _tx_thread_smp_protect_remove_from_front_of_list
-
- B _return
-
-_start_waiting:
-
- /* For one reason or another, we didn't get the lock. */
-
- /* Increment wait count. */
- // _tx_thread_smp_protect_wait_counts[this_core]++;
-
- LDR x3, =_tx_thread_smp_protect_wait_counts // Load wait list counts
- LDR w4, [x3, x1, LSL #2] // Load waiting value for this core
- ADD w4, w4, #1 // Increment wait value
- STR w4, [x3, x1, LSL #2] // Store new wait value
-
- /* Have we not added ourselves to the list yet? */
- // if (_tx_thread_smp_protect_wait_counts[this_core] == 1)
- // {
-
- CMP w4, #1
- BNE _already_in_list0 // Is this core already waiting?
-
- /* Add ourselves to the list. */
- // _tx_thread_smp_protect_wait_list_add(this_core);
-
- _tx_thread_smp_protect_wait_list_add // Call macro to add ourselves to the list
-
- // }
-
-_already_in_list0:
-
- /* Restore interrupts. */
+ LDR x1, =_tx_thread_smp_protection // Build address to protection structure
+ LDR w3, [x1, #4] // Pickup the owning core
+ CMP w3, w2 // Is it this core?
+ BEQ _owned // Yes, the protection is already owned
+ LDAXR w4, [x1, #0] // Pickup the protection flag
+ CBZ w4, _get_protection // Yes, get the protection
MSR DAIF, x0 // Restore interrupts
ISB //
#ifdef TX_ENABLE_WFE
WFE // Go into standby
#endif
+ B _tx_thread_smp_protect // On waking, restart the protection attempt
- /* We do this until we have the lock. */
- // while (1)
- // {
-
-_try_to_get_lock:
-
- /* Disable interrupts so we don't get preempted. */
-
- MRS x0, DAIF // Pickup current interrupt posture
- MSR DAIFSet, 0x3 // Lockout interrupts
-
- /* Pickup the CPU ID. */
-
- MRS x1, MPIDR_EL1 // Pickup the core ID
-#ifdef TX_ARMV8_2
-#if TX_THREAD_SMP_CLUSTERS > 1
- UBFX x7, x1, #16, #8 // Isolate cluster ID
-#endif
- UBFX x1, x1, #8, #8 // Isolate core ID
-#else
-#if TX_THREAD_SMP_CLUSTERS > 1
- UBFX x7, x1, #8, #8 // Isolate cluster ID
-#endif
- UBFX x1, x1, #0, #8 // Isolate core ID
-#endif
-#if TX_THREAD_SMP_CLUSTERS > 1
- ADDS x1, x1, x7, LSL #2 // Calculate CPU ID
-#endif
-
- /* Do we already have protection? */
- // if (this_core == _tx_thread_smp_protection.tx_thread_smp_protect_core)
- // {
-
- LDR w3, [x2, #4] // Pickup the owning core
- CMP w3, w1 // Is it this core?
- BEQ _got_lock_after_waiting // Yes, the protection is already owned. This means
- // an ISR preempted us and got protection
-
- // }
-
- /* Are we at the front of the list? */
- // if (this_core == _tx_thread_smp_protect_wait_list[_tx_thread_smp_protect_wait_list_head])
- // {
-
- LDR x3, =_tx_thread_smp_protect_wait_list_head // Get the address of the head
- LDR w3, [x3] // Get the value of the head
- LDR x4, =_tx_thread_smp_protect_wait_list // Get the address of the list
- LDR w4, [x4, x3, LSL #2] // Get the value at the head index
-
- CMP w1, w4
- BNE _did_not_get_lock
-
- /* Is the lock still available? */
- // if (_tx_thread_smp_protection.tx_thread_smp_protect_in_force == 0)
- // {
-
- LDAXR w3, [x2, #0] // Pickup the protection flag
- CMP w3, #0
- BNE _did_not_get_lock // No, protection not available
-
- /* Get the lock. */
- // _tx_thread_smp_protection.tx_thread_smp_protect_in_force = 1;
-
- MOV w3, #1 // Build lock value
- STXR w4, w3, [x2, #0] // Attempt to get the protection
- CMP w4, #0
- BNE _did_not_get_lock // Did it fail?
- DMB ISH //
-
- /* Got the lock. */
- // _tx_thread_smp_protect_lock_got();
-
- _tx_thread_smp_protect_lock_got
-
- /* Remove this core from the wait list. */
- // _tx_thread_smp_protect_remove_from_front_of_list();
-
- _tx_thread_smp_protect_remove_from_front_of_list
-
- B _got_lock_after_waiting
-
-_did_not_get_lock:
-
- /* For one reason or another, we didn't get the lock. */
-
- /* Were we removed from the list? This can happen if we're a thread
- and we got preempted. */
- // if (_tx_thread_smp_protect_wait_counts[this_core] == 0)
- // {
-
- LDR x3, =_tx_thread_smp_protect_wait_counts // Load wait list counts
- LDR w4, [x3, x1, LSL #2] // Load waiting value for this core
- CMP w4, #0
- BNE _already_in_list1 // Is this core already in the list?
-
- /* Add ourselves to the list. */
- // _tx_thread_smp_protect_wait_list_add(this_core);
-
- _tx_thread_smp_protect_wait_list_add // Call macro to add ourselves to the list
-
- /* Our waiting count was also reset when we were preempted. Increment it again. */
- // _tx_thread_smp_protect_wait_counts[this_core]++;
-
- LDR x3, =_tx_thread_smp_protect_wait_counts // Load wait list counts
- LDR w4, [x3, x1, LSL #2] // Load waiting value for this core
- ADD w4, w4, #1 // Increment wait value
- STR w4, [x3, x1, LSL #2] // Store new wait value value
-
- // }
-
-_already_in_list1:
-
- /* Restore interrupts and try again. */
-
+_get_protection:
+ MOV x4, #1 // Build lock value
+ STXR w5, w4, [x1] // Attempt to get the protection
+ CBZ w5, _got_protection // Did it succeed? w5 = 0 means success!
MSR DAIF, x0 // Restore interrupts
- ISB //
-#ifdef TX_ENABLE_WFE
- WFE // Go into standby
-#endif
- B _try_to_get_lock // On waking, restart the protection attempt
-
-_got_lock_after_waiting:
-
- /* We're no longer waiting. */
- // _tx_thread_smp_protect_wait_counts[this_core]--;
-
- LDR x3, =_tx_thread_smp_protect_wait_counts // Load waiting list
- LDR w4, [x3, x1, LSL #2] // Load current wait value
- SUB w4, w4, #1 // Decrement wait value
- STR w4, [x3, x1, LSL #2] // Store new wait value value
-
- /* Restore registers and return. */
-
-_return:
-
+ B _tx_thread_smp_protect // Restart the protection attempt
+
+_got_protection:
+ DMB ISH //
+ STR w2, [x1, #4] // Save owning core
+_owned:
+ LDR w5, [x1, #8] // Pickup ownership count
+ ADD w5, w5, #1 // Increment ownership count
+ STR w5, [x1, #8] // Store ownership count
+ DMB ISH //
RET
diff --git a/ports_smp/cortex_a57_smp/gnu/src/tx_thread_smp_unprotect.S b/ports_smp/cortex_a57_smp/gnu/src/tx_thread_smp_unprotect.S
index a783cde6..f9d8692a 100644
--- a/ports_smp/cortex_a57_smp/gnu/src/tx_thread_smp_unprotect.S
+++ b/ports_smp/cortex_a57_smp/gnu/src/tx_thread_smp_unprotect.S
@@ -60,10 +60,13 @@
/* */
/* DATE NAME DESCRIPTION */
/* */
-/* 09-30-2020 William E. Lamie Initial Version 6.1 */
-/* 01-31-2022 Andres Mlinar Updated comments, */
-/* added ARMv8.2-A support, */
+/* 09-30-2020 William E. Lamie Initial Version 6.1 */
+/* 01-31-2022 Andres Mlinar Updated comments, */
+/* added ARMv8.2-A support, */
/* resulting in version 6.1.10 */
+/* 04-25-2022 William E. Lamie Modified comments, removed */
+/* FIFO queueing, */
+/* resulting in version 6.1.11 */
/* */
/**************************************************************************/
.global _tx_thread_smp_unprotect
@@ -105,11 +108,6 @@ _tx_thread_smp_unprotect:
CMP w3, #0 // Is the preempt disable flag set?
BNE _still_protected // Yes, skip the protection release
- LDR x2,=_tx_thread_smp_protect_wait_counts // Build build address of wait counts
- LDR w3, [x2, x1, LSL #2] // Pickup wait list value
- CMP w3, #0 // Are any entities on this core waiting?
- BNE _still_protected // Yes, skip the protection release
-
LDR x2,=_tx_thread_smp_protection // Build address of protection structure
MOV w3, #0xFFFFFFFF // Build invalid value
STR w3, [x2, #4] // Mark the protected core as invalid
diff --git a/ports_smp/cortex_a5x_smp/ac6/src/tx_thread_context_restore.S b/ports_smp/cortex_a5x_smp/ac6/src/tx_thread_context_restore.S
index 10c9b909..67aa5afb 100644
--- a/ports_smp/cortex_a5x_smp/ac6/src/tx_thread_context_restore.S
+++ b/ports_smp/cortex_a5x_smp/ac6/src/tx_thread_context_restore.S
@@ -21,9 +21,6 @@
/**************************************************************************/
-/* Include macros for modifying the wait list. */
-#include "tx_thread_smp_protection_wait_list_macros.h"
-
.text
.align 3
/**************************************************************************/
@@ -31,7 +28,7 @@
/* FUNCTION RELEASE */
/* */
/* _tx_thread_context_restore Cortex-A5x-SMP/AC6 */
-/* 6.1.9 */
+/* 6.1.11 */
/* AUTHOR */
/* */
/* William E. Lamie, Microsoft Corporation */
@@ -63,10 +60,13 @@
/* */
/* DATE NAME DESCRIPTION */
/* */
-/* 09-30-2020 William E. Lamie Initial Version 6.1 */
-/* 10-15-2021 Andres Mlinar Updated comments, */
-/* added ARMv8.2-A support, */
-/* resulting in version 6.1.9 */
+/* 09-30-2020 William E. Lamie Initial Version 6.1 */
+/* 10-15-2021 Andres Mlinar Updated comments, */
+/* added ARMv8.2-A support, */
+/* resulting in version 6.1.9 */
+/* 04-25-2022 William E. Lamie Modified comments, removed */
+/* FIFO queueing, */
+/* resulting in version 6.1.11 */
/* */
/**************************************************************************/
// VOID _tx_thread_context_restore(VOID)
@@ -79,7 +79,7 @@ _tx_thread_context_restore:
MSR DAIFSet, 0x3 // Lockout interrupts
-#ifdef TX_ENABLE_EXECUTION_CHANGE_NOTIFY
+#if (defined(TX_ENABLE_EXECUTION_CHANGE_NOTIFY) || defined(TX_EXECUTION_PROFILE_ENABLE))
/* Call the ISR exit function to indicate an ISR is complete. */
@@ -216,74 +216,6 @@ __tx_thread_no_preempt_restore:
// {
__tx_thread_preempt_restore:
- /* Was the thread being preempted waiting for the lock? */
- // if (_tx_thread_smp_protect_wait_counts[this_core] != 0)
- // {
-
- LDR x2, =_tx_thread_smp_protect_wait_counts // Load waiting count list
- LDR w3, [x2, x8, LSL #2] // Load waiting value for this core
- CMP w3, #0
- BEQ _nobody_waiting_for_lock // Is the core waiting for the lock?
-
- /* Do we not have the lock? This means the ISR never got the inter-core lock. */
- // if (_tx_thread_smp_protection.tx_thread_smp_protect_owned != this_core)
- // {
-
- LDR x2, =_tx_thread_smp_protection // Load address of protection structure
- LDR w3, [x2, #4] // Pickup the owning core
- CMP w8, w3 // Compare our core to the owning core
- BEQ _this_core_has_lock // Do we have the lock?
-
- /* We don't have the lock. This core should be in the list. Remove it. */
- // _tx_thread_smp_protect_wait_list_remove(this_core);
-
- _tx_thread_smp_protect_wait_list_remove // Call macro to remove core from the list
- B _nobody_waiting_for_lock // Leave
-
- // }
- // else
- // {
- /* We have the lock. This means the ISR got the inter-core lock, but
- never released it because it saw that there was someone waiting.
- Note this core is not in the list. */
-
-_this_core_has_lock:
-
- /* We're no longer waiting. Note that this should be zero since this happens during thread preemption. */
- // _tx_thread_smp_protect_wait_counts[core]--;
-
- LDR x2, =_tx_thread_smp_protect_wait_counts // Load waiting count list
- LDR w3, [x2, x8, LSL #2] // Load waiting value for this core
- SUB w3, w3, #1 // Decrement waiting value. Should be zero now
- STR w3, [x2, x8, LSL #2] // Store new waiting value
-
- /* Now release the inter-core lock. */
-
- /* Set protected core as invalid. */
- // _tx_thread_smp_protection.tx_thread_smp_protect_core = 0xFFFFFFFF;
-
- LDR x2, =_tx_thread_smp_protection // Load address of protection structure
- MOV w3, #0xFFFFFFFF // Build invalid value
- STR w3, [x2, #4] // Mark the protected core as invalid
- DMB ISH // Ensure that accesses to shared resource have completed
-
- /* Release protection. */
- // _tx_thread_smp_protection.tx_thread_smp_protect_in_force = 0;
-
- MOV w3, #0 // Build release protection value
- STR w3, [x2, #0] // Release the protection
- DSB ISH // To ensure update of the protection occurs before other CPUs awake
-
- /* Wake up waiting processors. Note interrupts are already enabled. */
-
-#ifdef TX_ENABLE_WFE
- SEV // Send event to other CPUs
-#endif
-
- // }
- // }
-
-_nobody_waiting_for_lock:
LDR x4, [x0, #8] // Switch to thread stack pointer
MOV sp, x4 //
diff --git a/ports_smp/cortex_a5x_smp/ac6/src/tx_thread_smp_protect.S b/ports_smp/cortex_a5x_smp/ac6/src/tx_thread_smp_protect.S
index 3c03defd..26ad8d32 100644
--- a/ports_smp/cortex_a5x_smp/ac6/src/tx_thread_smp_protect.S
+++ b/ports_smp/cortex_a5x_smp/ac6/src/tx_thread_smp_protect.S
@@ -32,7 +32,7 @@
/* FUNCTION RELEASE */
/* */
/* _tx_thread_smp_protect Cortex-A5x-SMP/AC6 */
-/* 6.1.9 */
+/* 6.1.11 */
/* AUTHOR */
/* */
/* William E. Lamie, Microsoft Corporation */
@@ -63,11 +63,14 @@
/* */
/* DATE NAME DESCRIPTION */
/* */
-/* 09-30-2020 William E. Lamie Initial Version 6.1 */
-/* 10-15-2021 Andres Mlinar Updated comments, */
-/* added ARMv8.2-A support, */
-/* improved SMP code, */
-/* resulting in version 6.1.9 */
+/* 09-30-2020 William E. Lamie Initial Version 6.1 */
+/* 10-15-2021 Andres Mlinar Updated comments, */
+/* added ARMv8.2-A support, */
+/* improved SMP code, */
+/* resulting in version 6.1.9 */
+/* 04-25-2022 William E. Lamie Modified comments, removed */
+/* FIFO queueing, */
+/* resulting in version 6.1.11 */
/* */
/**************************************************************************/
.global _tx_thread_smp_protect
@@ -81,293 +84,49 @@ _tx_thread_smp_protect:
/* Pickup the CPU ID. */
- MRS x1, MPIDR_EL1 // Pickup the core ID
+ MRS x2, MPIDR_EL1 // Pickup the core ID
#ifdef TX_ARMV8_2
#if TX_THREAD_SMP_CLUSTERS > 1
- UBFX x7, x1, #16, #8 // Isolate cluster ID
+ UBFX x7, x2, #16, #8 // Isolate cluster ID
#endif
- UBFX x1, x1, #8, #8 // Isolate core ID
+ UBFX x2, x2, #8, #8 // Isolate core ID
#else
#if TX_THREAD_SMP_CLUSTERS > 1
- UBFX x7, x1, #8, #8 // Isolate cluster ID
+ UBFX x7, x2, #8, #8 // Isolate cluster ID
#endif
- UBFX x1, x1, #0, #8 // Isolate core ID
+ UBFX x2, x2, #0, #8 // Isolate core ID
#endif
#if TX_THREAD_SMP_CLUSTERS > 1
- ADDS x1, x1, x7, LSL #2 // Calculate CPU ID
+ ADDS x2, x2, x7, LSL #2 // Calculate CPU ID
#endif
- /* Do we already have protection? */
- // if (this_core == _tx_thread_smp_protection.tx_thread_smp_protect_core)
- // {
-
- LDR x2, =_tx_thread_smp_protection // Build address to protection structure
- LDR w3, [x2, #4] // Pickup the owning core
- CMP w1, w3 // Is it not this core?
- BNE _protection_not_owned // No, the protection is not already owned
-
- /* We already have protection. */
-
- /* Increment the protection count. */
- // _tx_thread_smp_protection.tx_thread_smp_protect_count++;
-
- LDR w3, [x2, #8] // Pickup ownership count
- ADD w3, w3, #1 // Increment ownership count
- STR w3, [x2, #8] // Store ownership count
- DMB ISH
-
- B _return
-
-_protection_not_owned:
-
- /* Is the lock available? */
- // if (_tx_thread_smp_protection.tx_thread_smp_protect_in_force == 0)
- // {
-
- LDAXR w3, [x2, #0] // Pickup the protection flag
- CMP w3, #0
- BNE _start_waiting // No, protection not available
-
- /* Is the list empty? */
- // if (_tx_thread_smp_protect_wait_list_head == _tx_thread_smp_protect_wait_list_tail)
- // {
-
- LDR x3, =_tx_thread_smp_protect_wait_list_head
- LDR w3, [x3]
- LDR x4, =_tx_thread_smp_protect_wait_list_tail
- LDR w4, [x4]
- CMP w3, w4
- BNE _list_not_empty
-
- /* Try to get the lock. */
- // if (write_exclusive(&_tx_thread_smp_protection.tx_thread_smp_protect_in_force, 1) == SUCCESS)
- // {
-
- MOV w3, #1 // Build lock value
- STXR w4, w3, [x2, #0] // Attempt to get the protection
- CMP w4, #0
- BNE _start_waiting // Did it fail?
-
- /* We got the lock! */
- // _tx_thread_smp_protect_lock_got();
-
- DMB ISH // Ensure write to protection finishes
- _tx_thread_smp_protect_lock_got // Call the lock got function
-
- B _return
-
-_list_not_empty:
-
- /* Are we at the front of the list? */
- // if (this_core == _tx_thread_smp_protect_wait_list[_tx_thread_smp_protect_wait_list_head])
- // {
-
- LDR x3, =_tx_thread_smp_protect_wait_list_head // Get the address of the head
- LDR w3, [x3] // Get the value of the head
- LDR x4, =_tx_thread_smp_protect_wait_list // Get the address of the list
- LDR w4, [x4, x3, LSL #2] // Get the value at the head index
-
- CMP w1, w4
- BNE _start_waiting
-
- /* Is the lock still available? */
- // if (_tx_thread_smp_protection.tx_thread_smp_protect_in_force == 0)
- // {
-
- LDAXR w3, [x2, #0] // Pickup the protection flag
- CMP w3, #0
- BNE _start_waiting // No, protection not available
-
- /* Get the lock. */
- // _tx_thread_smp_protection.tx_thread_smp_protect_in_force = 1;
-
- MOV w3, #1 // Build lock value
- STXR w4, w3, [x2, #0] // Attempt to get the protection
- CMP w4, #0
- BNE _start_waiting // Did it fail?
- DMB ISH //
-
- /* Got the lock. */
- // _tx_thread_smp_protect_lock_got();
-
- _tx_thread_smp_protect_lock_got
-
- /* Remove this core from the wait list. */
- // _tx_thread_smp_protect_remove_from_front_of_list();
-
- _tx_thread_smp_protect_remove_from_front_of_list
-
- B _return
-
-_start_waiting:
-
- /* For one reason or another, we didn't get the lock. */
-
- /* Increment wait count. */
- // _tx_thread_smp_protect_wait_counts[this_core]++;
-
- LDR x3, =_tx_thread_smp_protect_wait_counts // Load wait list counts
- LDR w4, [x3, x1, LSL #2] // Load waiting value for this core
- ADD w4, w4, #1 // Increment wait value
- STR w4, [x3, x1, LSL #2] // Store new wait value
-
- /* Have we not added ourselves to the list yet? */
- // if (_tx_thread_smp_protect_wait_counts[this_core] == 1)
- // {
-
- CMP w4, #1
- BNE _already_in_list0 // Is this core already waiting?
-
- /* Add ourselves to the list. */
- // _tx_thread_smp_protect_wait_list_add(this_core);
-
- _tx_thread_smp_protect_wait_list_add // Call macro to add ourselves to the list
-
- // }
-
-_already_in_list0:
-
- /* Restore interrupts. */
+ LDR x1, =_tx_thread_smp_protection // Build address to protection structure
+ LDR w3, [x1, #4] // Pickup the owning core
+ CMP w3, w2 // Is it this core?
+ BEQ _owned // Yes, the protection is already owned
+ LDAXR w4, [x1, #0] // Pickup the protection flag
+ CBZ w4, _get_protection // Yes, get the protection
MSR DAIF, x0 // Restore interrupts
ISB //
#ifdef TX_ENABLE_WFE
WFE // Go into standby
#endif
+ B _tx_thread_smp_protect // On waking, restart the protection attempt
- /* We do this until we have the lock. */
- // while (1)
- // {
-
-_try_to_get_lock:
-
- /* Disable interrupts so we don't get preempted. */
-
- MRS x0, DAIF // Pickup current interrupt posture
- MSR DAIFSet, 0x3 // Lockout interrupts
-
- /* Pickup the CPU ID. */
-
- MRS x1, MPIDR_EL1 // Pickup the core ID
-#ifdef TX_ARMV8_2
-#if TX_THREAD_SMP_CLUSTERS > 1
- UBFX x7, x1, #16, #8 // Isolate cluster ID
-#endif
- UBFX x1, x1, #8, #8 // Isolate core ID
-#else
-#if TX_THREAD_SMP_CLUSTERS > 1
- UBFX x7, x1, #8, #8 // Isolate cluster ID
-#endif
- UBFX x1, x1, #0, #8 // Isolate core ID
-#endif
-#if TX_THREAD_SMP_CLUSTERS > 1
- ADDS x1, x1, x7, LSL #2 // Calculate CPU ID
-#endif
-
- /* Do we already have protection? */
- // if (this_core == _tx_thread_smp_protection.tx_thread_smp_protect_core)
- // {
-
- LDR w3, [x2, #4] // Pickup the owning core
- CMP w3, w1 // Is it this core?
- BEQ _got_lock_after_waiting // Yes, the protection is already owned. This means
- // an ISR preempted us and got protection
-
- // }
-
- /* Are we at the front of the list? */
- // if (this_core == _tx_thread_smp_protect_wait_list[_tx_thread_smp_protect_wait_list_head])
- // {
-
- LDR x3, =_tx_thread_smp_protect_wait_list_head // Get the address of the head
- LDR w3, [x3] // Get the value of the head
- LDR x4, =_tx_thread_smp_protect_wait_list // Get the address of the list
- LDR w4, [x4, x3, LSL #2] // Get the value at the head index
-
- CMP w1, w4
- BNE _did_not_get_lock
-
- /* Is the lock still available? */
- // if (_tx_thread_smp_protection.tx_thread_smp_protect_in_force == 0)
- // {
-
- LDAXR w3, [x2, #0] // Pickup the protection flag
- CMP w3, #0
- BNE _did_not_get_lock // No, protection not available
-
- /* Get the lock. */
- // _tx_thread_smp_protection.tx_thread_smp_protect_in_force = 1;
-
- MOV w3, #1 // Build lock value
- STXR w4, w3, [x2, #0] // Attempt to get the protection
- CMP w4, #0
- BNE _did_not_get_lock // Did it fail?
- DMB ISH //
-
- /* Got the lock. */
- // _tx_thread_smp_protect_lock_got();
-
- _tx_thread_smp_protect_lock_got
-
- /* Remove this core from the wait list. */
- // _tx_thread_smp_protect_remove_from_front_of_list();
-
- _tx_thread_smp_protect_remove_from_front_of_list
-
- B _got_lock_after_waiting
-
-_did_not_get_lock:
-
- /* For one reason or another, we didn't get the lock. */
-
- /* Were we removed from the list? This can happen if we're a thread
- and we got preempted. */
- // if (_tx_thread_smp_protect_wait_counts[this_core] == 0)
- // {
-
- LDR x3, =_tx_thread_smp_protect_wait_counts // Load wait list counts
- LDR w4, [x3, x1, LSL #2] // Load waiting value for this core
- CMP w4, #0
- BNE _already_in_list1 // Is this core already in the list?
-
- /* Add ourselves to the list. */
- // _tx_thread_smp_protect_wait_list_add(this_core);
-
- _tx_thread_smp_protect_wait_list_add // Call macro to add ourselves to the list
-
- /* Our waiting count was also reset when we were preempted. Increment it again. */
- // _tx_thread_smp_protect_wait_counts[this_core]++;
-
- LDR x3, =_tx_thread_smp_protect_wait_counts // Load wait list counts
- LDR w4, [x3, x1, LSL #2] // Load waiting value for this core
- ADD w4, w4, #1 // Increment wait value
- STR w4, [x3, x1, LSL #2] // Store new wait value value
-
- // }
-
-_already_in_list1:
-
- /* Restore interrupts and try again. */
-
+_get_protection:
+ MOV x4, #1 // Build lock value
+ STXR w5, w4, [x1] // Attempt to get the protection
+ CBZ w5, _got_protection // Did it succeed? w5 = 0 means success!
MSR DAIF, x0 // Restore interrupts
- ISB //
-#ifdef TX_ENABLE_WFE
- WFE // Go into standby
-#endif
- B _try_to_get_lock // On waking, restart the protection attempt
-
-_got_lock_after_waiting:
-
- /* We're no longer waiting. */
- // _tx_thread_smp_protect_wait_counts[this_core]--;
-
- LDR x3, =_tx_thread_smp_protect_wait_counts // Load waiting list
- LDR w4, [x3, x1, LSL #2] // Load current wait value
- SUB w4, w4, #1 // Decrement wait value
- STR w4, [x3, x1, LSL #2] // Store new wait value value
-
- /* Restore registers and return. */
-
-_return:
-
+ B _tx_thread_smp_protect // Restart the protection attempt
+
+_got_protection:
+ DMB ISH //
+ STR w2, [x1, #4] // Save owning core
+_owned:
+ LDR w5, [x1, #8] // Pickup ownership count
+ ADD w5, w5, #1 // Increment ownership count
+ STR w5, [x1, #8] // Store ownership count
+ DMB ISH //
RET
diff --git a/ports_smp/cortex_a5x_smp/ac6/src/tx_thread_smp_unprotect.S b/ports_smp/cortex_a5x_smp/ac6/src/tx_thread_smp_unprotect.S
index 253ae916..30901452 100644
--- a/ports_smp/cortex_a5x_smp/ac6/src/tx_thread_smp_unprotect.S
+++ b/ports_smp/cortex_a5x_smp/ac6/src/tx_thread_smp_unprotect.S
@@ -28,7 +28,7 @@
/* FUNCTION RELEASE */
/* */
/* _tx_thread_smp_unprotect Cortex-A5x-SMP/AC6 */
-/* 6.1.9 */
+/* 6.1.11 */
/* AUTHOR */
/* */
/* William E. Lamie, Microsoft Corporation */
@@ -60,10 +60,13 @@
/* */
/* DATE NAME DESCRIPTION */
/* */
-/* 09-30-2020 William E. Lamie Initial Version 6.1 */
-/* 10-15-2021 Andres Mlinar Updated comments, */
-/* added ARMv8.2-A support, */
-/* resulting in version 6.1.9 */
+/* 09-30-2020 William E. Lamie Initial Version 6.1 */
+/* 10-15-2021 Andres Mlinar Updated comments, */
+/* added ARMv8.2-A support, */
+/* resulting in version 6.1.9 */
+/* 04-25-2022 William E. Lamie Modified comments, removed */
+/* FIFO queueing, */
+/* resulting in version 6.1.11 */
/* */
/**************************************************************************/
.global _tx_thread_smp_unprotect
@@ -105,11 +108,6 @@ _tx_thread_smp_unprotect:
CMP w3, #0 // Is the preempt disable flag set?
BNE _still_protected // Yes, skip the protection release
- LDR x2,=_tx_thread_smp_protect_wait_counts // Build build address of wait counts
- LDR w3, [x2, x1, LSL #2] // Pickup wait list value
- CMP w3, #0 // Are any entities on this core waiting?
- BNE _still_protected // Yes, skip the protection release
-
LDR x2,=_tx_thread_smp_protection // Build address of protection structure
MOV w3, #0xFFFFFFFF // Build invalid value
STR w3, [x2, #4] // Mark the protected core as invalid
diff --git a/ports_smp/cortex_a5x_smp/gnu/src/tx_thread_context_restore.S b/ports_smp/cortex_a5x_smp/gnu/src/tx_thread_context_restore.S
index a604623e..06fbb655 100644
--- a/ports_smp/cortex_a5x_smp/gnu/src/tx_thread_context_restore.S
+++ b/ports_smp/cortex_a5x_smp/gnu/src/tx_thread_context_restore.S
@@ -21,9 +21,6 @@
/**************************************************************************/
-/* Include macros for modifying the wait list. */
-#include "tx_thread_smp_protection_wait_list_macros.h"
-
.text
.align 3
/**************************************************************************/
@@ -31,7 +28,7 @@
/* FUNCTION RELEASE */
/* */
/* _tx_thread_context_restore Cortex-A5x-SMP/GCC */
-/* 6.1.9 */
+/* 6.1.11 */
/* AUTHOR */
/* */
/* William E. Lamie, Microsoft Corporation */
@@ -63,10 +60,13 @@
/* */
/* DATE NAME DESCRIPTION */
/* */
-/* 09-30-2020 William E. Lamie Initial Version 6.1 */
-/* 10-15-2021 Andres Mlinar Updated comments, */
-/* added ARMv8.2-A support, */
-/* resulting in version 6.1.9 */
+/* 09-30-2020 William E. Lamie Initial Version 6.1 */
+/* 10-15-2021 Andres Mlinar Updated comments, */
+/* added ARMv8.2-A support, */
+/* resulting in version 6.1.9 */
+/* 04-25-2022 William E. Lamie Modified comments, removed */
+/* FIFO queueing, */
+/* resulting in version 6.1.11 */
/* */
/**************************************************************************/
// VOID _tx_thread_context_restore(VOID)
@@ -79,7 +79,7 @@ _tx_thread_context_restore:
MSR DAIFSet, 0x3 // Lockout interrupts
-#ifdef TX_ENABLE_EXECUTION_CHANGE_NOTIFY
+#if (defined(TX_ENABLE_EXECUTION_CHANGE_NOTIFY) || defined(TX_EXECUTION_PROFILE_ENABLE))
/* Call the ISR exit function to indicate an ISR is complete. */
@@ -216,74 +216,6 @@ __tx_thread_no_preempt_restore:
// {
__tx_thread_preempt_restore:
- /* Was the thread being preempted waiting for the lock? */
- // if (_tx_thread_smp_protect_wait_counts[this_core] != 0)
- // {
-
- LDR x2, =_tx_thread_smp_protect_wait_counts // Load waiting count list
- LDR w3, [x2, x8, LSL #2] // Load waiting value for this core
- CMP w3, #0
- BEQ _nobody_waiting_for_lock // Is the core waiting for the lock?
-
- /* Do we not have the lock? This means the ISR never got the inter-core lock. */
- // if (_tx_thread_smp_protection.tx_thread_smp_protect_owned != this_core)
- // {
-
- LDR x2, =_tx_thread_smp_protection // Load address of protection structure
- LDR w3, [x2, #4] // Pickup the owning core
- CMP w8, w3 // Compare our core to the owning core
- BEQ _this_core_has_lock // Do we have the lock?
-
- /* We don't have the lock. This core should be in the list. Remove it. */
- // _tx_thread_smp_protect_wait_list_remove(this_core);
-
- _tx_thread_smp_protect_wait_list_remove // Call macro to remove core from the list
- B _nobody_waiting_for_lock // Leave
-
- // }
- // else
- // {
- /* We have the lock. This means the ISR got the inter-core lock, but
- never released it because it saw that there was someone waiting.
- Note this core is not in the list. */
-
-_this_core_has_lock:
-
- /* We're no longer waiting. Note that this should be zero since this happens during thread preemption. */
- // _tx_thread_smp_protect_wait_counts[core]--;
-
- LDR x2, =_tx_thread_smp_protect_wait_counts // Load waiting count list
- LDR w3, [x2, x8, LSL #2] // Load waiting value for this core
- SUB w3, w3, #1 // Decrement waiting value. Should be zero now
- STR w3, [x2, x8, LSL #2] // Store new waiting value
-
- /* Now release the inter-core lock. */
-
- /* Set protected core as invalid. */
- // _tx_thread_smp_protection.tx_thread_smp_protect_core = 0xFFFFFFFF;
-
- LDR x2, =_tx_thread_smp_protection // Load address of protection structure
- MOV w3, #0xFFFFFFFF // Build invalid value
- STR w3, [x2, #4] // Mark the protected core as invalid
- DMB ISH // Ensure that accesses to shared resource have completed
-
- /* Release protection. */
- // _tx_thread_smp_protection.tx_thread_smp_protect_in_force = 0;
-
- MOV w3, #0 // Build release protection value
- STR w3, [x2, #0] // Release the protection
- DSB ISH // To ensure update of the protection occurs before other CPUs awake
-
- /* Wake up waiting processors. Note interrupts are already enabled. */
-
-#ifdef TX_ENABLE_WFE
- SEV // Send event to other CPUs
-#endif
-
- // }
- // }
-
-_nobody_waiting_for_lock:
LDR x4, [x0, #8] // Switch to thread stack pointer
MOV sp, x4 //
diff --git a/ports_smp/cortex_a5x_smp/gnu/src/tx_thread_smp_protect.S b/ports_smp/cortex_a5x_smp/gnu/src/tx_thread_smp_protect.S
index 09de5c57..f95083e3 100644
--- a/ports_smp/cortex_a5x_smp/gnu/src/tx_thread_smp_protect.S
+++ b/ports_smp/cortex_a5x_smp/gnu/src/tx_thread_smp_protect.S
@@ -32,7 +32,7 @@
/* FUNCTION RELEASE */
/* */
/* _tx_thread_smp_protect Cortex-A5x-SMP/GCC */
-/* 6.1.9 */
+/* 6.1.11 */
/* AUTHOR */
/* */
/* William E. Lamie, Microsoft Corporation */
@@ -63,11 +63,14 @@
/* */
/* DATE NAME DESCRIPTION */
/* */
-/* 09-30-2020 William E. Lamie Initial Version 6.1 */
-/* 10-15-2021 Andres Mlinar Updated comments, */
-/* added ARMv8.2-A support, */
-/* improved SMP code, */
-/* resulting in version 6.1.9 */
+/* 09-30-2020 William E. Lamie Initial Version 6.1 */
+/* 10-15-2021 Andres Mlinar Updated comments, */
+/* added ARMv8.2-A support, */
+/* improved SMP code, */
+/* resulting in version 6.1.9 */
+/* 04-25-2022 William E. Lamie Modified comments, removed */
+/* FIFO queueing, */
+/* resulting in version 6.1.11 */
/* */
/**************************************************************************/
.global _tx_thread_smp_protect
@@ -81,293 +84,49 @@ _tx_thread_smp_protect:
/* Pickup the CPU ID. */
- MRS x1, MPIDR_EL1 // Pickup the core ID
+ MRS x2, MPIDR_EL1 // Pickup the core ID
#ifdef TX_ARMV8_2
#if TX_THREAD_SMP_CLUSTERS > 1
- UBFX x7, x1, #16, #8 // Isolate cluster ID
+ UBFX x7, x2, #16, #8 // Isolate cluster ID
#endif
- UBFX x1, x1, #8, #8 // Isolate core ID
+ UBFX x2, x2, #8, #8 // Isolate core ID
#else
#if TX_THREAD_SMP_CLUSTERS > 1
- UBFX x7, x1, #8, #8 // Isolate cluster ID
+ UBFX x7, x2, #8, #8 // Isolate cluster ID
#endif
- UBFX x1, x1, #0, #8 // Isolate core ID
+ UBFX x2, x2, #0, #8 // Isolate core ID
#endif
#if TX_THREAD_SMP_CLUSTERS > 1
- ADDS x1, x1, x7, LSL #2 // Calculate CPU ID
+ ADDS x2, x2, x7, LSL #2 // Calculate CPU ID
#endif
- /* Do we already have protection? */
- // if (this_core == _tx_thread_smp_protection.tx_thread_smp_protect_core)
- // {
-
- LDR x2, =_tx_thread_smp_protection // Build address to protection structure
- LDR w3, [x2, #4] // Pickup the owning core
- CMP w1, w3 // Is it not this core?
- BNE _protection_not_owned // No, the protection is not already owned
-
- /* We already have protection. */
-
- /* Increment the protection count. */
- // _tx_thread_smp_protection.tx_thread_smp_protect_count++;
-
- LDR w3, [x2, #8] // Pickup ownership count
- ADD w3, w3, #1 // Increment ownership count
- STR w3, [x2, #8] // Store ownership count
- DMB ISH
-
- B _return
-
-_protection_not_owned:
-
- /* Is the lock available? */
- // if (_tx_thread_smp_protection.tx_thread_smp_protect_in_force == 0)
- // {
-
- LDAXR w3, [x2, #0] // Pickup the protection flag
- CMP w3, #0
- BNE _start_waiting // No, protection not available
-
- /* Is the list empty? */
- // if (_tx_thread_smp_protect_wait_list_head == _tx_thread_smp_protect_wait_list_tail)
- // {
-
- LDR x3, =_tx_thread_smp_protect_wait_list_head
- LDR w3, [x3]
- LDR x4, =_tx_thread_smp_protect_wait_list_tail
- LDR w4, [x4]
- CMP w3, w4
- BNE _list_not_empty
-
- /* Try to get the lock. */
- // if (write_exclusive(&_tx_thread_smp_protection.tx_thread_smp_protect_in_force, 1) == SUCCESS)
- // {
-
- MOV w3, #1 // Build lock value
- STXR w4, w3, [x2, #0] // Attempt to get the protection
- CMP w4, #0
- BNE _start_waiting // Did it fail?
-
- /* We got the lock! */
- // _tx_thread_smp_protect_lock_got();
-
- DMB ISH // Ensure write to protection finishes
- _tx_thread_smp_protect_lock_got // Call the lock got function
-
- B _return
-
-_list_not_empty:
-
- /* Are we at the front of the list? */
- // if (this_core == _tx_thread_smp_protect_wait_list[_tx_thread_smp_protect_wait_list_head])
- // {
-
- LDR x3, =_tx_thread_smp_protect_wait_list_head // Get the address of the head
- LDR w3, [x3] // Get the value of the head
- LDR x4, =_tx_thread_smp_protect_wait_list // Get the address of the list
- LDR w4, [x4, x3, LSL #2] // Get the value at the head index
-
- CMP w1, w4
- BNE _start_waiting
-
- /* Is the lock still available? */
- // if (_tx_thread_smp_protection.tx_thread_smp_protect_in_force == 0)
- // {
-
- LDAXR w3, [x2, #0] // Pickup the protection flag
- CMP w3, #0
- BNE _start_waiting // No, protection not available
-
- /* Get the lock. */
- // _tx_thread_smp_protection.tx_thread_smp_protect_in_force = 1;
-
- MOV w3, #1 // Build lock value
- STXR w4, w3, [x2, #0] // Attempt to get the protection
- CMP w4, #0
- BNE _start_waiting // Did it fail?
- DMB ISH //
-
- /* Got the lock. */
- // _tx_thread_smp_protect_lock_got();
-
- _tx_thread_smp_protect_lock_got
-
- /* Remove this core from the wait list. */
- // _tx_thread_smp_protect_remove_from_front_of_list();
-
- _tx_thread_smp_protect_remove_from_front_of_list
-
- B _return
-
-_start_waiting:
-
- /* For one reason or another, we didn't get the lock. */
-
- /* Increment wait count. */
- // _tx_thread_smp_protect_wait_counts[this_core]++;
-
- LDR x3, =_tx_thread_smp_protect_wait_counts // Load wait list counts
- LDR w4, [x3, x1, LSL #2] // Load waiting value for this core
- ADD w4, w4, #1 // Increment wait value
- STR w4, [x3, x1, LSL #2] // Store new wait value
-
- /* Have we not added ourselves to the list yet? */
- // if (_tx_thread_smp_protect_wait_counts[this_core] == 1)
- // {
-
- CMP w4, #1
- BNE _already_in_list0 // Is this core already waiting?
-
- /* Add ourselves to the list. */
- // _tx_thread_smp_protect_wait_list_add(this_core);
-
- _tx_thread_smp_protect_wait_list_add // Call macro to add ourselves to the list
-
- // }
-
-_already_in_list0:
-
- /* Restore interrupts. */
+ LDR x1, =_tx_thread_smp_protection // Build address to protection structure
+ LDR w3, [x1, #4] // Pickup the owning core
+ CMP w3, w2 // Is it this core?
+ BEQ _owned // Yes, the protection is already owned
+ LDAXR w4, [x1, #0] // Pickup the protection flag
+ CBZ w4, _get_protection // Yes, get the protection
MSR DAIF, x0 // Restore interrupts
ISB //
#ifdef TX_ENABLE_WFE
WFE // Go into standby
#endif
+ B _tx_thread_smp_protect // On waking, restart the protection attempt
- /* We do this until we have the lock. */
- // while (1)
- // {
-
-_try_to_get_lock:
-
- /* Disable interrupts so we don't get preempted. */
-
- MRS x0, DAIF // Pickup current interrupt posture
- MSR DAIFSet, 0x3 // Lockout interrupts
-
- /* Pickup the CPU ID. */
-
- MRS x1, MPIDR_EL1 // Pickup the core ID
-#ifdef TX_ARMV8_2
-#if TX_THREAD_SMP_CLUSTERS > 1
- UBFX x7, x1, #16, #8 // Isolate cluster ID
-#endif
- UBFX x1, x1, #8, #8 // Isolate core ID
-#else
-#if TX_THREAD_SMP_CLUSTERS > 1
- UBFX x7, x1, #8, #8 // Isolate cluster ID
-#endif
- UBFX x1, x1, #0, #8 // Isolate core ID
-#endif
-#if TX_THREAD_SMP_CLUSTERS > 1
- ADDS x1, x1, x7, LSL #2 // Calculate CPU ID
-#endif
-
- /* Do we already have protection? */
- // if (this_core == _tx_thread_smp_protection.tx_thread_smp_protect_core)
- // {
-
- LDR w3, [x2, #4] // Pickup the owning core
- CMP w3, w1 // Is it this core?
- BEQ _got_lock_after_waiting // Yes, the protection is already owned. This means
- // an ISR preempted us and got protection
-
- // }
-
- /* Are we at the front of the list? */
- // if (this_core == _tx_thread_smp_protect_wait_list[_tx_thread_smp_protect_wait_list_head])
- // {
-
- LDR x3, =_tx_thread_smp_protect_wait_list_head // Get the address of the head
- LDR w3, [x3] // Get the value of the head
- LDR x4, =_tx_thread_smp_protect_wait_list // Get the address of the list
- LDR w4, [x4, x3, LSL #2] // Get the value at the head index
-
- CMP w1, w4
- BNE _did_not_get_lock
-
- /* Is the lock still available? */
- // if (_tx_thread_smp_protection.tx_thread_smp_protect_in_force == 0)
- // {
-
- LDAXR w3, [x2, #0] // Pickup the protection flag
- CMP w3, #0
- BNE _did_not_get_lock // No, protection not available
-
- /* Get the lock. */
- // _tx_thread_smp_protection.tx_thread_smp_protect_in_force = 1;
-
- MOV w3, #1 // Build lock value
- STXR w4, w3, [x2, #0] // Attempt to get the protection
- CMP w4, #0
- BNE _did_not_get_lock // Did it fail?
- DMB ISH //
-
- /* Got the lock. */
- // _tx_thread_smp_protect_lock_got();
-
- _tx_thread_smp_protect_lock_got
-
- /* Remove this core from the wait list. */
- // _tx_thread_smp_protect_remove_from_front_of_list();
-
- _tx_thread_smp_protect_remove_from_front_of_list
-
- B _got_lock_after_waiting
-
-_did_not_get_lock:
-
- /* For one reason or another, we didn't get the lock. */
-
- /* Were we removed from the list? This can happen if we're a thread
- and we got preempted. */
- // if (_tx_thread_smp_protect_wait_counts[this_core] == 0)
- // {
-
- LDR x3, =_tx_thread_smp_protect_wait_counts // Load wait list counts
- LDR w4, [x3, x1, LSL #2] // Load waiting value for this core
- CMP w4, #0
- BNE _already_in_list1 // Is this core already in the list?
-
- /* Add ourselves to the list. */
- // _tx_thread_smp_protect_wait_list_add(this_core);
-
- _tx_thread_smp_protect_wait_list_add // Call macro to add ourselves to the list
-
- /* Our waiting count was also reset when we were preempted. Increment it again. */
- // _tx_thread_smp_protect_wait_counts[this_core]++;
-
- LDR x3, =_tx_thread_smp_protect_wait_counts // Load wait list counts
- LDR w4, [x3, x1, LSL #2] // Load waiting value for this core
- ADD w4, w4, #1 // Increment wait value
- STR w4, [x3, x1, LSL #2] // Store new wait value value
-
- // }
-
-_already_in_list1:
-
- /* Restore interrupts and try again. */
-
+_get_protection:
+ MOV x4, #1 // Build lock value
+ STXR w5, w4, [x1] // Attempt to get the protection
+ CBZ w5, _got_protection // Did it succeed? w5 = 0 means success!
MSR DAIF, x0 // Restore interrupts
- ISB //
-#ifdef TX_ENABLE_WFE
- WFE // Go into standby
-#endif
- B _try_to_get_lock // On waking, restart the protection attempt
-
-_got_lock_after_waiting:
-
- /* We're no longer waiting. */
- // _tx_thread_smp_protect_wait_counts[this_core]--;
-
- LDR x3, =_tx_thread_smp_protect_wait_counts // Load waiting list
- LDR w4, [x3, x1, LSL #2] // Load current wait value
- SUB w4, w4, #1 // Decrement wait value
- STR w4, [x3, x1, LSL #2] // Store new wait value value
-
- /* Restore registers and return. */
-
-_return:
-
+ B _tx_thread_smp_protect // Restart the protection attempt
+
+_got_protection:
+ DMB ISH //
+ STR w2, [x1, #4] // Save owning core
+_owned:
+ LDR w5, [x1, #8] // Pickup ownership count
+ ADD w5, w5, #1 // Increment ownership count
+ STR w5, [x1, #8] // Store ownership count
+ DMB ISH //
RET
diff --git a/ports_smp/cortex_a5x_smp/gnu/src/tx_thread_smp_unprotect.S b/ports_smp/cortex_a5x_smp/gnu/src/tx_thread_smp_unprotect.S
index 936539be..b4f934ae 100644
--- a/ports_smp/cortex_a5x_smp/gnu/src/tx_thread_smp_unprotect.S
+++ b/ports_smp/cortex_a5x_smp/gnu/src/tx_thread_smp_unprotect.S
@@ -28,7 +28,7 @@
/* FUNCTION RELEASE */
/* */
/* _tx_thread_smp_unprotect Cortex-A5x-SMP/GCC */
-/* 6.1.9 */
+/* 6.1.11 */
/* AUTHOR */
/* */
/* William E. Lamie, Microsoft Corporation */
@@ -60,9 +60,12 @@
/* */
/* DATE NAME DESCRIPTION */
/* */
-/* 09-30-2020 William E. Lamie Initial Version 6.1 */
-/* 10-15-2021 Andres Mlinar Updated comments, */
-/* resulting in version 6.1.9 */
+/* 09-30-2020 William E. Lamie Initial Version 6.1 */
+/* 10-15-2021 Andres Mlinar Updated comments, */
+/* resulting in version 6.1.9 */
+/* 04-25-2022 William E. Lamie Modified comments, removed */
+/* FIFO queueing, */
+/* resulting in version 6.1.11 */
/* */
/**************************************************************************/
.global _tx_thread_smp_unprotect
@@ -104,11 +107,6 @@ _tx_thread_smp_unprotect:
CMP w3, #0 // Is the preempt disable flag set?
BNE _still_protected // Yes, skip the protection release
- LDR x2,=_tx_thread_smp_protect_wait_counts // Build build address of wait counts
- LDR w3, [x2, x1, LSL #2] // Pickup wait list value
- CMP w3, #0 // Are any entities on this core waiting?
- BNE _still_protected // Yes, skip the protection release
-
LDR x2,=_tx_thread_smp_protection // Build address of protection structure
MOV w3, #0xFFFFFFFF // Build invalid value
STR w3, [x2, #4] // Mark the protected core as invalid
diff --git a/ports_smp/cortex_a5x_smp/iar/src/tx_thread_context_restore.S b/ports_smp/cortex_a5x_smp/iar/src/tx_thread_context_restore.S
index 384c1ada..8aa5116b 100644
--- a/ports_smp/cortex_a5x_smp/iar/src/tx_thread_context_restore.S
+++ b/ports_smp/cortex_a5x_smp/iar/src/tx_thread_context_restore.S
@@ -21,17 +21,6 @@
/**************************************************************************/
-/* #define TX_SOURCE_CODE */
-
-
-/* Include necessary system files. */
-
-/*
-#include "tx_api.h"
-#include "tx_thread.h"
-#include "tx_timer.h"
-*/
-
EXTERN _tx_thread_system_state
EXTERN _tx_thread_current_ptr
EXTERN _tx_thread_execute_ptr
@@ -54,15 +43,12 @@
SECTION `.text`:CODE:NOROOT(3)
CODE
-/* Include macros for modifying the wait list. */
-#include "tx_thread_smp_protection_wait_list_macros.h"
-
/**************************************************************************/
/* */
/* FUNCTION RELEASE */
/* */
/* _tx_thread_context_restore Cortex-A5x-SMP/IAR */
-/* 6.1.9 */
+/* 6.1.11 */
/* AUTHOR */
/* */
/* William E. Lamie, Microsoft Corporation */
@@ -95,10 +81,13 @@
/* DATE NAME DESCRIPTION */
/* */
/* 10-15-2021 William E. Lamie Initial Version 6.1.9 */
+/* 04-25-2022 William E. Lamie Modified comments, removed */
+/* FIFO queueing, */
+/* resulting in version 6.1.11 */
/* */
/**************************************************************************/
-/* VOID _tx_thread_context_restore(VOID)
-{ */
+// VOID _tx_thread_context_restore(VOID)
+// {
PUBLIC _tx_thread_context_restore
_tx_thread_context_restore:
@@ -106,7 +95,7 @@ _tx_thread_context_restore:
MSR DAIFSet, 0x3 // Lockout interrupts
-#ifdef TX_ENABLE_EXECUTION_CHANGE_NOTIFY
+#if (defined(TX_ENABLE_EXECUTION_CHANGE_NOTIFY) || defined(TX_EXECUTION_PROFILE_ENABLE))
/* Call the ISR exit function to indicate an ISR is complete. */
@@ -132,8 +121,8 @@ _tx_thread_context_restore:
#endif
/* Determine if interrupts are nested. */
- /* if (--_tx_thread_system_state)
- { */
+ // if (--_tx_thread_system_state)
+ // {
LDR x3, =_tx_thread_system_state // Pickup address of system state var
LDR w2, [x3, x8, LSL #2] // Pickup system state
@@ -173,13 +162,13 @@ _tx_thread_context_restore:
LDP x29, x30, [sp], #16 // Recover x29, x30
ERET // Return to point of interrupt
- /* } */
+ // }
__tx_thread_not_nested_restore:
/* Determine if a thread was interrupted and no preemption is required. */
- /* else if (((_tx_thread_current_ptr) && (_tx_thread_current_ptr == _tx_thread_execute_ptr)
- || (_tx_thread_preempt_disable))
- { */
+ // else if (((_tx_thread_current_ptr) && (_tx_thread_current_ptr == _tx_thread_execute_ptr)
+ // || (_tx_thread_preempt_disable))
+ // {
LDR x1, =_tx_thread_current_ptr // Pickup address of current thread ptr
LDR x0, [x1, x8, LSL #3] // Pickup actual current thread pointer
@@ -205,7 +194,7 @@ __tx_thread_no_preempt_restore:
/* Restore interrupted thread or ISR. */
/* Pickup the saved stack pointer. */
- /* sp = _tx_thread_current_ptr -> tx_thread_stack_ptr; */
+ // sp = _tx_thread_current_ptr -> tx_thread_stack_ptr;
LDR x4, [x0, #8] // Switch to thread stack pointer
MOV sp, x4 //
@@ -238,80 +227,11 @@ __tx_thread_no_preempt_restore:
LDP x29, x30, [sp], #16 // Recover x29, x30
ERET // Return to point of interrupt
- /* }
- else
- { */
+ // }
+ // else
+ // {
__tx_thread_preempt_restore:
- /* Was the thread being preempted waiting for the lock? */
- /* if (_tx_thread_smp_protect_wait_counts[this_core] != 0)
- { */
-
- LDR x2, =_tx_thread_smp_protect_wait_counts // Load waiting count list
- LDR w3, [x2, x8, LSL #2] // Load waiting value for this core
- CMP w3, #0
- B.EQ _nobody_waiting_for_lock // Is the core waiting for the lock?
-
- /* Do we not have the lock? This means the ISR never got the inter-core lock. */
- /* if (_tx_thread_smp_protection.tx_thread_smp_protect_owned != this_core)
- { */
-
- LDR x2, =_tx_thread_smp_protection // Load address of protection structure
- LDR w3, [x2, #4] // Pickup the owning core
- CMP w8, w3 // Compare our core to the owning core
- B.EQ _this_core_has_lock // Do we have the lock?
-
- /* We don't have the lock. This core should be in the list. Remove it. */
- /* _tx_thread_smp_protect_wait_list_remove(this_core); */
-
- _tx_thread_smp_protect_wait_list_remove // Call macro to remove core from the list
-
- B _nobody_waiting_for_lock // Leave
-
- /* }
- else
- { */
- /* We have the lock. This means the ISR got the inter-core lock, but
- never released it because it saw that there was someone waiting.
- Note this core is not in the list. */
-
-_this_core_has_lock:
-
- /* We're no longer waiting. Note that this should be zero since this happens during thread preemption. */
- /* _tx_thread_smp_protect_wait_counts[core]--; */
-
- LDR x2, =_tx_thread_smp_protect_wait_counts // Load waiting count list
- LDR w3, [x2, x8, LSL #2] // Load waiting value for this core
- SUB w3, w3, #1 // Decrement waiting value. Should be zero now
- STR w3, [x2, x8, LSL #2] // Store new waiting value
-
- /* Now release the inter-core lock. */
-
- /* Set protected core as invalid. */
- /* _tx_thread_smp_protection.tx_thread_smp_protect_core = 0xFFFFFFFF; */
-
- LDR x2, =_tx_thread_smp_protection // Load address of protection structure
- MOV w3, #0xFFFFFFFF // Build invalid value
- STR w3, [x2, #4] // Mark the protected core as invalid
- DMB ISH // Ensure that accesses to shared resource have completed
-
- /* Release protection. */
- /* _tx_thread_smp_protection.tx_thread_smp_protect_in_force = 0; */
-
- MOV w3, #0 // Build release protection value
- STR w3, [x2, #0] // Release the protection
- DSB ISH // To ensure update of the protection occurs before other CPUs awake
-
- /* Wake up waiting processors. Note interrupts are already enabled. */
-
-#ifdef TX_ENABLE_WFE
- SEV // Send event to other CPUs
-#endif
-
- /* }
- } */
-
-_nobody_waiting_for_lock:
LDR x4, [x0, #8] // Switch to thread stack pointer
MOV sp, x4 //
@@ -358,27 +278,27 @@ _skip_fp_save:
/* Save the remaining time-slice and disable it. */
- /* if (_tx_timer_time_slice)
- { */
+ // if (_tx_timer_time_slice)
+ // {
LDR x3, =_tx_timer_time_slice // Pickup time-slice variable address
LDR w2, [x3, x8, LSL #2] // Pickup time-slice
CMP w2, #0 // Is it active?
B.EQ __tx_thread_dont_save_ts // No, don't save it
- /* _tx_thread_current_ptr -> tx_thread_time_slice = _tx_timer_time_slice;
- _tx_timer_time_slice = 0; */
+ // _tx_thread_current_ptr -> tx_thread_time_slice = _tx_timer_time_slice;
+ // _tx_timer_time_slice = 0;
STR w2, [x0, #36] // Save thread's time-slice
MOV w2, #0 // Clear value
STR w2, [x3, x8, LSL #2] // Disable global time-slice flag
- /* } */
+ // }
__tx_thread_dont_save_ts:
/* Clear the current task pointer. */
- /* _tx_thread_current_ptr = TX_NULL; */
+ // _tx_thread_current_ptr = TX_NULL;
MOV x2, #0 // NULL value
STR x2, [x1, x8, LSL #3] // Clear current thread pointer
@@ -386,13 +306,13 @@ __tx_thread_dont_save_ts:
/* Set bit indicating this thread is ready for execution. */
MOV x2, #1 // Build ready flag
- DMB ISH // Ensure that accesses to shared resource have completed
STR w2, [x0, #260] // Set thread's ready flag
+ DMB ISH // Ensure that accesses to shared resource have completed
/* Return to the scheduler. */
- /* _tx_thread_schedule(); */
+ // _tx_thread_schedule();
- /* } */
+ // }
__tx_thread_idle_system_restore:
@@ -415,5 +335,5 @@ __tx_thread_idle_system_restore:
#endif
#endif
ERET // Return to scheduler
-/* } */
+// }
END
diff --git a/ports_smp/cortex_a5x_smp/iar/src/tx_thread_smp_protect.S b/ports_smp/cortex_a5x_smp/iar/src/tx_thread_smp_protect.S
index c5b30d2f..e6f6e62a 100644
--- a/ports_smp/cortex_a5x_smp/iar/src/tx_thread_smp_protect.S
+++ b/ports_smp/cortex_a5x_smp/iar/src/tx_thread_smp_protect.S
@@ -45,15 +45,13 @@
SECTION `.text`:CODE:NOROOT(3)
CODE
- /* Include macros for modifying the wait list. */
- #include "tx_thread_smp_protection_wait_list_macros.h"
/**************************************************************************/
/* */
/* FUNCTION RELEASE */
/* */
/* _tx_thread_smp_protect Cortex-A5x-SMP/IAR */
-/* 6.1.9 */
+/* 6.1.11 */
/* AUTHOR */
/* */
/* William E. Lamie, Microsoft Corporation */
@@ -85,6 +83,9 @@
/* DATE NAME DESCRIPTION */
/* */
/* 10-15-2021 William E. Lamie Initial Version 6.1.9 */
+/* 04-25-2022 William E. Lamie Modified comments, removed */
+/* FIFO queueing, */
+/* resulting in version 6.1.11 */
/* */
/**************************************************************************/
PUBLIC _tx_thread_smp_protect
@@ -97,295 +98,51 @@ _tx_thread_smp_protect:
/* Pickup the CPU ID. */
- MRS x1, MPIDR_EL1 // Pickup the core ID
+ MRS x2, MPIDR_EL1 // Pickup the core ID
#ifdef TX_ARMV8_2
#if TX_THREAD_SMP_CLUSTERS > 1
- UBFX x7, x1, #16, #8 // Isolate cluster ID
+ UBFX x7, x2, #16, #8 // Isolate cluster ID
#endif
- UBFX x1, x1, #8, #8 // Isolate core ID
+ UBFX x2, x2, #8, #8 // Isolate core ID
#else
#if TX_THREAD_SMP_CLUSTERS > 1
- UBFX x7, x1, #8, #8 // Isolate cluster ID
+ UBFX x7, x2, #8, #8 // Isolate cluster ID
#endif
- UBFX x1, x1, #0, #8 // Isolate core ID
+ UBFX x2, x2, #0, #8 // Isolate core ID
#endif
#if TX_THREAD_SMP_CLUSTERS > 1
- ADDS x1, x1, x7, LSL #2 // Calculate CPU ID
+ ADDS x2, x2, x7, LSL #2 // Calculate CPU ID
#endif
- /* Do we already have protection? */
- /* if (this_core == _tx_thread_smp_protection.tx_thread_smp_protect_core)
- { */
-
- LDR x2, =_tx_thread_smp_protection // Build address to protection structure
- LDR w3, [x2, #4] // Pickup the owning core
- CMP w1, w3 // Is it not this core?
- B.NE _protection_not_owned // No, the protection is not already owned
-
- /* We already have protection. */
-
- /* Increment the protection count. */
- /* _tx_thread_smp_protection.tx_thread_smp_protect_count++; */
-
- LDR w3, [x2, #8] // Pickup ownership count
- ADD w3, w3, #1 // Increment ownership count
- STR w3, [x2, #8] // Store ownership count
- DMB ISH
-
- B _return
-
-_protection_not_owned:
-
- /* Is the lock available? */
- /* if (_tx_thread_smp_protection.tx_thread_smp_protect_in_force == 0)
- { */
-
- LDAXR w3, [x2, #0] // Pickup the protection flag
- CMP w3, #0
- B.NE _start_waiting // No, protection not available
-
- /* Is the list empty? */
- /* if (_tx_thread_smp_protect_wait_list_head == _tx_thread_smp_protect_wait_list_tail)
- { */
-
- LDR x3, =_tx_thread_smp_protect_wait_list_head
- LDR w3, [x3]
- LDR x4, =_tx_thread_smp_protect_wait_list_tail
- LDR w4, [x4]
- CMP w3, w4
- B.NE _list_not_empty
-
- /* Try to get the lock. */
- /* if (write_exclusive(&_tx_thread_smp_protection.tx_thread_smp_protect_in_force, 1) == SUCCESS)
- { */
-
- MOV w3, #1 // Build lock value
- STXR w4, w3, [x2, #0] // Attempt to get the protection
- CMP w4, #0
- B.NE _start_waiting // Did it fail?
-
- /* We got the lock! */
- /* _tx_thread_smp_protect_lock_got(); */
-
- DMB ISH // Ensure write to protection finishes
- _tx_thread_smp_protect_lock_got // Call the lock got function
-
- B _return
-
-_list_not_empty:
-
- /* Are we at the front of the list? */
- /* if (this_core == _tx_thread_smp_protect_wait_list[_tx_thread_smp_protect_wait_list_head])
- { */
-
- LDR x3, =_tx_thread_smp_protect_wait_list_head // Get the address of the head
- LDR w3, [x3] // Get the value of the head
- LDR x4, =_tx_thread_smp_protect_wait_list // Get the address of the list
- LDR w4, [x4, x3, LSL #2] // Get the value at the head index
-
- CMP w1, w4
- B.NE _start_waiting
-
- /* Is the lock still available? */
- /* if (_tx_thread_smp_protection.tx_thread_smp_protect_in_force == 0)
- { */
-
- LDAXR w3, [x2, #0] // Pickup the protection flag
- CMP w3, #0
- B.NE _start_waiting // No, protection not available
-
- /* Get the lock. */
- /* _tx_thread_smp_protection.tx_thread_smp_protect_in_force = 1; */
-
- MOV w3, #1 // Build lock value
- STXR w4, w3, [x2, #0] // Attempt to get the protection
- CMP w4, #0
- B.NE _start_waiting // Did it fail?
- DMB ISH //
-
- /* Got the lock. */
- /* _tx_thread_smp_protect_lock_got(); */
-
- _tx_thread_smp_protect_lock_got
-
- /* Remove this core from the wait list. */
- /* _tx_thread_smp_protect_remove_from_front_of_list(); */
-
- _tx_thread_smp_protect_remove_from_front_of_list
-
- B _return
-
-_start_waiting:
-
- /* For one reason or another, we didn't get the lock. */
-
- /* Increment wait count. */
- /* _tx_thread_smp_protect_wait_counts[this_core]++; */
-
- LDR x3, =_tx_thread_smp_protect_wait_counts // Load wait list counts
- LDR w4, [x3, x1, LSL #2] // Load waiting value for this core
- ADD w4, w4, #1 // Increment wait value
- STR w4, [x3, x1, LSL #2] // Store new wait value
-
- /* Have we not added ourselves to the list yet? */
- /* if (_tx_thread_smp_protect_wait_counts[this_core] == 1)
- { */
-
- CMP w4, #1
- B.NE _already_in_list0 // Is this core already waiting?
-
- /* Add ourselves to the list. */
- /* _tx_thread_smp_protect_wait_list_add(this_core); */
-
- _tx_thread_smp_protect_wait_list_add // Call macro to add ourselves to the list
-
- /* } */
-
-_already_in_list0:
-
- /* Restore interrupts. */
+ LDR x1, =_tx_thread_smp_protection // Build address to protection structure
+ LDR w3, [x1, #4] // Pickup the owning core
+ CMP w3, w2 // Is it this core?
+ B.EQ _owned // Yes, the protection is already owned
+ LDAXR w4, [x1, #0] // Pickup the protection flag
+ CBZ w4, _get_protection // Yes, get the protection
MSR DAIF, x0 // Restore interrupts
ISB //
#ifdef TX_ENABLE_WFE
WFE // Go into standby
#endif
+ B _tx_thread_smp_protect // On waking, restart the protection attempt
- /* We do this until we have the lock. */
- /* while (1)
- { */
-
-_try_to_get_lock:
-
- /* Disable interrupts so we don't get preempted. */
-
- MRS x0, DAIF // Pickup current interrupt posture
- MSR DAIFSet, 0x3 // Lockout interrupts
-
- /* Pickup the CPU ID. */
-
- MRS x1, MPIDR_EL1 // Pickup the core ID
-#ifdef TX_ARMV8_2
-#if TX_THREAD_SMP_CLUSTERS > 1
- UBFX x7, x1, #16, #8 // Isolate cluster ID
-#endif
- UBFX x1, x1, #8, #8 // Isolate core ID
-#else
-#if TX_THREAD_SMP_CLUSTERS > 1
- UBFX x7, x1, #8, #8 // Isolate cluster ID
-#endif
- UBFX x1, x1, #0, #8 // Isolate core ID
-#endif
-#if TX_THREAD_SMP_CLUSTERS > 1
- ADDS x1, x1, x7, LSL #2 // Calculate CPU ID
-#endif
-
- /* Do we already have protection? */
- /* if (this_core == _tx_thread_smp_protection.tx_thread_smp_protect_core)
- { */
-
- LDR w3, [x2, #4] // Pickup the owning core
- CMP w3, w1 // Is it this core?
- B.EQ _got_lock_after_waiting // Yes, the protection is already owned. This means
- // an ISR preempted us and got protection
-
- /* } */
-
- /* Are we at the front of the list? */
- /* if (this_core == _tx_thread_smp_protect_wait_list[_tx_thread_smp_protect_wait_list_head])
- { */
-
- LDR x3, =_tx_thread_smp_protect_wait_list_head // Get the address of the head
- LDR w3, [x3] // Get the value of the head
- LDR x4, =_tx_thread_smp_protect_wait_list // Get the address of the list
- LDR w4, [x4, x3, LSL #2] // Get the value at the head index
-
- CMP w1, w4
- B.NE _did_not_get_lock
-
- /* Is the lock still available? */
- /* if (_tx_thread_smp_protection.tx_thread_smp_protect_in_force == 0)
- { */
-
- LDAXR w3, [x2, #0] // Pickup the protection flag
- CMP w3, #0
- B.NE _did_not_get_lock // No, protection not available
-
- /* Get the lock. */
- /* _tx_thread_smp_protection.tx_thread_smp_protect_in_force = 1; */
-
- MOV w3, #1 // Build lock value
- STXR w4, w3, [x2, #0] // Attempt to get the protection
- CMP w4, #0
- B.NE _did_not_get_lock // Did it fail?
- DMB ISH //
-
- /* Got the lock. */
- /* _tx_thread_smp_protect_lock_got(); */
-
- _tx_thread_smp_protect_lock_got
-
- /* Remove this core from the wait list. */
- /* _tx_thread_smp_protect_remove_from_front_of_list(); */
-
- _tx_thread_smp_protect_remove_from_front_of_list
-
- B _got_lock_after_waiting
-
-_did_not_get_lock:
-
- /* For one reason or another, we didn't get the lock. */
-
- /* Were we removed from the list? This can happen if we're a thread
- and we got preempted. */
- /* if (_tx_thread_smp_protect_wait_counts[this_core] == 0)
- { */
-
- LDR x3, =_tx_thread_smp_protect_wait_counts // Load wait list counts
- LDR w4, [x3, x1, LSL #2] // Load waiting value for this core
- CMP w4, #0
- B.NE _already_in_list1 // Is this core already in the list?
-
- /* Add ourselves to the list. */
- /* _tx_thread_smp_protect_wait_list_add(this_core); */
-
- _tx_thread_smp_protect_wait_list_add // Call macro to add ourselves to the list
-
- /* Our waiting count was also reset when we were preempted. Increment it again. */
- /* _tx_thread_smp_protect_wait_counts[this_core]++; */
-
- LDR x3, =_tx_thread_smp_protect_wait_counts // Load wait list counts
- LDR w4, [x3, x1, LSL #2] // Load waiting value for this core
- ADD w4, w4, #1 // Increment wait value
- STR w4, [x3, x1, LSL #2] // Store new wait value value
-
- /* } */
-
-_already_in_list1:
-
- /* Restore interrupts and try again. */
-
+_get_protection:
+ MOV x4, #1 // Build lock value
+ STXR w5, w4, [x1] // Attempt to get the protection
+ CBZ w5, _got_protection // Did it succeed? w5 = 0 means success!
MSR DAIF, x0 // Restore interrupts
- ISB //
-#ifdef TX_ENABLE_WFE
- WFE // Go into standby
-#endif
- B _try_to_get_lock // On waking, restart the protection attempt
-
-_got_lock_after_waiting:
-
- /* We're no longer waiting. */
- /* _tx_thread_smp_protect_wait_counts[this_core]--; */
-
- LDR x3, =_tx_thread_smp_protect_wait_counts // Load waiting list
- LDR w4, [x3, x1, LSL #2] // Load current wait value
- SUB w4, w4, #1 // Decrement wait value
- STR w4, [x3, x1, LSL #2] // Store new wait value value
-
- /* Restore registers and return. */
-
-_return:
-
+ B _tx_thread_smp_protect // Restart the protection attempt
+
+_got_protection:
+ DMB ISH //
+ STR w2, [x1, #4] // Save owning core
+_owned:
+ LDR w5, [x1, #8] // Pickup ownership count
+ ADD w5, w5, #1 // Increment ownership count
+ STR w5, [x1, #8] // Store ownership count
+ DMB ISH //
RET
END
diff --git a/ports_smp/cortex_a5x_smp/iar/src/tx_thread_smp_unprotect.S b/ports_smp/cortex_a5x_smp/iar/src/tx_thread_smp_unprotect.S
index 0b99bc7e..0657a439 100644
--- a/ports_smp/cortex_a5x_smp/iar/src/tx_thread_smp_unprotect.S
+++ b/ports_smp/cortex_a5x_smp/iar/src/tx_thread_smp_unprotect.S
@@ -21,19 +21,6 @@
/**************************************************************************/
-/*
-#define TX_SOURCE_CODE
-#define TX_THREAD_SMP_SOURCE_CODE
-*/
-
-/* Include necessary system files. */
-
-/*
-#include "tx_api.h"
-#include "tx_thread.h"
-#include "tx_timer.h"
-*/
-
EXTERN _tx_thread_current_ptr
EXTERN _tx_thread_smp_protection
EXTERN _tx_thread_preempt_disable
@@ -46,7 +33,7 @@
/* FUNCTION RELEASE */
/* */
/* _tx_thread_smp_unprotect Cortex-A5x-SMP/IAR */
-/* 6.1.9 */
+/* 6.1.11 */
/* AUTHOR */
/* */
/* William E. Lamie, Microsoft Corporation */
@@ -79,6 +66,9 @@
/* DATE NAME DESCRIPTION */
/* */
/* 10-15-2021 William E. Lamie Initial Version 6.1.9 */
+/* 04-25-2022 William E. Lamie Modified comments, removed */
+/* FIFO queueing, */
+/* resulting in version 6.1.11 */
/* */
/**************************************************************************/
PUBLIC _tx_thread_smp_unprotect
@@ -119,11 +109,6 @@ _tx_thread_smp_unprotect:
CMP w3, #0 // Is the preempt disable flag set?
B.NE _still_protected // Yes, skip the protection release
- LDR x2,=_tx_thread_smp_protect_wait_counts // Build build address of wait counts
- LDR w3, [x2, x1, LSL #2] // Pickup wait list value
- CMP w3, #0 // Are any entities on this core waiting?
- B.NE _still_protected // Yes, skip the protection release
-
LDR x2,=_tx_thread_smp_protection // Build address of protection structure
MOV w3, #0xFFFFFFFF // Build invalid value
STR w3, [x2, #4] // Mark the protected core as invalid
diff --git a/ports_smp/cortex_a65_smp/ac6/example_build/tx/.cproject b/ports_smp/cortex_a65_smp/ac6/example_build/tx/.cproject
index 0d5e8570..c23b96cd 100644
--- a/ports_smp/cortex_a65_smp/ac6/example_build/tx/.cproject
+++ b/ports_smp/cortex_a65_smp/ac6/example_build/tx/.cproject
@@ -129,11 +129,7 @@
-
-
-
-
-
+
diff --git a/ports_smp/cortex_a65_smp/ac6/src/tx_thread_context_restore.S b/ports_smp/cortex_a65_smp/ac6/src/tx_thread_context_restore.S
index 4df471ac..21deb75d 100644
--- a/ports_smp/cortex_a65_smp/ac6/src/tx_thread_context_restore.S
+++ b/ports_smp/cortex_a65_smp/ac6/src/tx_thread_context_restore.S
@@ -21,9 +21,6 @@
/**************************************************************************/
-/* Include macros for modifying the wait list. */
-#include "tx_thread_smp_protection_wait_list_macros.h"
-
.text
.align 3
/**************************************************************************/
@@ -31,7 +28,7 @@
/* FUNCTION RELEASE */
/* */
/* _tx_thread_context_restore ARMv8-A-SMP */
-/* 6.1.10 */
+/* 6.1.11 */
/* AUTHOR */
/* */
/* William E. Lamie, Microsoft Corporation */
@@ -63,10 +60,13 @@
/* */
/* DATE NAME DESCRIPTION */
/* */
-/* 09-30-2020 William E. Lamie Initial Version 6.1 */
-/* 01-31-2022 Andres Mlinar Updated comments, */
-/* added ARMv8.2-A support, */
+/* 09-30-2020 William E. Lamie Initial Version 6.1 */
+/* 01-31-2022 Andres Mlinar Updated comments, */
+/* added ARMv8.2-A support, */
/* resulting in version 6.1.10 */
+/* 04-25-2022 William E. Lamie Modified comments, removed */
+/* FIFO queueing, */
+/* resulting in version 6.1.11 */
/* */
/**************************************************************************/
// VOID _tx_thread_context_restore(VOID)
@@ -216,74 +216,6 @@ __tx_thread_no_preempt_restore:
// {
__tx_thread_preempt_restore:
- /* Was the thread being preempted waiting for the lock? */
- // if (_tx_thread_smp_protect_wait_counts[this_core] != 0)
- // {
-
- LDR x2, =_tx_thread_smp_protect_wait_counts // Load waiting count list
- LDR w3, [x2, x8, LSL #2] // Load waiting value for this core
- CMP w3, #0
- BEQ _nobody_waiting_for_lock // Is the core waiting for the lock?
-
- /* Do we not have the lock? This means the ISR never got the inter-core lock. */
- // if (_tx_thread_smp_protection.tx_thread_smp_protect_owned != this_core)
- // {
-
- LDR x2, =_tx_thread_smp_protection // Load address of protection structure
- LDR w3, [x2, #4] // Pickup the owning core
- CMP w8, w3 // Compare our core to the owning core
- BEQ _this_core_has_lock // Do we have the lock?
-
- /* We don't have the lock. This core should be in the list. Remove it. */
- // _tx_thread_smp_protect_wait_list_remove(this_core);
-
- _tx_thread_smp_protect_wait_list_remove // Call macro to remove core from the list
- B _nobody_waiting_for_lock // Leave
-
- // }
- // else
- // {
- /* We have the lock. This means the ISR got the inter-core lock, but
- never released it because it saw that there was someone waiting.
- Note this core is not in the list. */
-
-_this_core_has_lock:
-
- /* We're no longer waiting. Note that this should be zero since this happens during thread preemption. */
- // _tx_thread_smp_protect_wait_counts[core]--;
-
- LDR x2, =_tx_thread_smp_protect_wait_counts // Load waiting count list
- LDR w3, [x2, x8, LSL #2] // Load waiting value for this core
- SUB w3, w3, #1 // Decrement waiting value. Should be zero now
- STR w3, [x2, x8, LSL #2] // Store new waiting value
-
- /* Now release the inter-core lock. */
-
- /* Set protected core as invalid. */
- // _tx_thread_smp_protection.tx_thread_smp_protect_core = 0xFFFFFFFF;
-
- LDR x2, =_tx_thread_smp_protection // Load address of protection structure
- MOV w3, #0xFFFFFFFF // Build invalid value
- STR w3, [x2, #4] // Mark the protected core as invalid
- DMB ISH // Ensure that accesses to shared resource have completed
-
- /* Release protection. */
- // _tx_thread_smp_protection.tx_thread_smp_protect_in_force = 0;
-
- MOV w3, #0 // Build release protection value
- STR w3, [x2, #0] // Release the protection
- DSB ISH // To ensure update of the protection occurs before other CPUs awake
-
- /* Wake up waiting processors. Note interrupts are already enabled. */
-
-#ifdef TX_ENABLE_WFE
- SEV // Send event to other CPUs
-#endif
-
- // }
- // }
-
-_nobody_waiting_for_lock:
LDR x4, [x0, #8] // Switch to thread stack pointer
MOV sp, x4 //
diff --git a/ports_smp/cortex_a65_smp/ac6/src/tx_thread_smp_protect.S b/ports_smp/cortex_a65_smp/ac6/src/tx_thread_smp_protect.S
index 9cde3e08..b65af329 100644
--- a/ports_smp/cortex_a65_smp/ac6/src/tx_thread_smp_protect.S
+++ b/ports_smp/cortex_a65_smp/ac6/src/tx_thread_smp_protect.S
@@ -32,7 +32,7 @@
/* FUNCTION RELEASE */
/* */
/* _tx_thread_smp_protect Cortex-A35-SMP/AC6 */
-/* 6.1.10 */
+/* 6.1.11 */
/* AUTHOR */
/* */
/* William E. Lamie, Microsoft Corporation */
@@ -63,11 +63,14 @@
/* */
/* DATE NAME DESCRIPTION */
/* */
-/* 09-30-2020 William E. Lamie Initial Version 6.1 */
-/* 01-31-2022 Andres Mlinar Updated comments, */
-/* added ARMv8.2-A support, */
-/* improved SMP code, */
+/* 09-30-2020 William E. Lamie Initial Version 6.1 */
+/* 01-31-2022 Andres Mlinar Updated comments, */
+/* added ARMv8.2-A support, */
+/* improved SMP code, */
/* resulting in version 6.1.10 */
+/* 04-25-2022 William E. Lamie Modified comments, removed */
+/* FIFO queueing, */
+/* resulting in version 6.1.11 */
/* */
/**************************************************************************/
.global _tx_thread_smp_protect
@@ -81,293 +84,49 @@ _tx_thread_smp_protect:
/* Pickup the CPU ID. */
- MRS x1, MPIDR_EL1 // Pickup the core ID
+ MRS x2, MPIDR_EL1 // Pickup the core ID
#ifdef TX_ARMV8_2
#if TX_THREAD_SMP_CLUSTERS > 1
- UBFX x7, x1, #16, #8 // Isolate cluster ID
+ UBFX x7, x2, #16, #8 // Isolate cluster ID
#endif
- UBFX x1, x1, #8, #8 // Isolate core ID
+ UBFX x2, x2, #8, #8 // Isolate core ID
#else
#if TX_THREAD_SMP_CLUSTERS > 1
- UBFX x7, x1, #8, #8 // Isolate cluster ID
+ UBFX x7, x2, #8, #8 // Isolate cluster ID
#endif
- UBFX x1, x1, #0, #8 // Isolate core ID
+ UBFX x2, x2, #0, #8 // Isolate core ID
#endif
#if TX_THREAD_SMP_CLUSTERS > 1
- ADDS x1, x1, x7, LSL #2 // Calculate CPU ID
+ ADDS x2, x2, x7, LSL #2 // Calculate CPU ID
#endif
- /* Do we already have protection? */
- // if (this_core == _tx_thread_smp_protection.tx_thread_smp_protect_core)
- // {
-
- LDR x2, =_tx_thread_smp_protection // Build address to protection structure
- LDR w3, [x2, #4] // Pickup the owning core
- CMP w1, w3 // Is it not this core?
- BNE _protection_not_owned // No, the protection is not already owned
-
- /* We already have protection. */
-
- /* Increment the protection count. */
- // _tx_thread_smp_protection.tx_thread_smp_protect_count++;
-
- LDR w3, [x2, #8] // Pickup ownership count
- ADD w3, w3, #1 // Increment ownership count
- STR w3, [x2, #8] // Store ownership count
- DMB ISH
-
- B _return
-
-_protection_not_owned:
-
- /* Is the lock available? */
- // if (_tx_thread_smp_protection.tx_thread_smp_protect_in_force == 0)
- // {
-
- LDAXR w3, [x2, #0] // Pickup the protection flag
- CMP w3, #0
- BNE _start_waiting // No, protection not available
-
- /* Is the list empty? */
- // if (_tx_thread_smp_protect_wait_list_head == _tx_thread_smp_protect_wait_list_tail)
- // {
-
- LDR x3, =_tx_thread_smp_protect_wait_list_head
- LDR w3, [x3]
- LDR x4, =_tx_thread_smp_protect_wait_list_tail
- LDR w4, [x4]
- CMP w3, w4
- BNE _list_not_empty
-
- /* Try to get the lock. */
- // if (write_exclusive(&_tx_thread_smp_protection.tx_thread_smp_protect_in_force, 1) == SUCCESS)
- // {
-
- MOV w3, #1 // Build lock value
- STXR w4, w3, [x2, #0] // Attempt to get the protection
- CMP w4, #0
- BNE _start_waiting // Did it fail?
-
- /* We got the lock! */
- // _tx_thread_smp_protect_lock_got();
-
- DMB ISH // Ensure write to protection finishes
- _tx_thread_smp_protect_lock_got // Call the lock got function
-
- B _return
-
-_list_not_empty:
-
- /* Are we at the front of the list? */
- // if (this_core == _tx_thread_smp_protect_wait_list[_tx_thread_smp_protect_wait_list_head])
- // {
-
- LDR x3, =_tx_thread_smp_protect_wait_list_head // Get the address of the head
- LDR w3, [x3] // Get the value of the head
- LDR x4, =_tx_thread_smp_protect_wait_list // Get the address of the list
- LDR w4, [x4, x3, LSL #2] // Get the value at the head index
-
- CMP w1, w4
- BNE _start_waiting
-
- /* Is the lock still available? */
- // if (_tx_thread_smp_protection.tx_thread_smp_protect_in_force == 0)
- // {
-
- LDAXR w3, [x2, #0] // Pickup the protection flag
- CMP w3, #0
- BNE _start_waiting // No, protection not available
-
- /* Get the lock. */
- // _tx_thread_smp_protection.tx_thread_smp_protect_in_force = 1;
-
- MOV w3, #1 // Build lock value
- STXR w4, w3, [x2, #0] // Attempt to get the protection
- CMP w4, #0
- BNE _start_waiting // Did it fail?
- DMB ISH //
-
- /* Got the lock. */
- // _tx_thread_smp_protect_lock_got();
-
- _tx_thread_smp_protect_lock_got
-
- /* Remove this core from the wait list. */
- // _tx_thread_smp_protect_remove_from_front_of_list();
-
- _tx_thread_smp_protect_remove_from_front_of_list
-
- B _return
-
-_start_waiting:
-
- /* For one reason or another, we didn't get the lock. */
-
- /* Increment wait count. */
- // _tx_thread_smp_protect_wait_counts[this_core]++;
-
- LDR x3, =_tx_thread_smp_protect_wait_counts // Load wait list counts
- LDR w4, [x3, x1, LSL #2] // Load waiting value for this core
- ADD w4, w4, #1 // Increment wait value
- STR w4, [x3, x1, LSL #2] // Store new wait value
-
- /* Have we not added ourselves to the list yet? */
- // if (_tx_thread_smp_protect_wait_counts[this_core] == 1)
- // {
-
- CMP w4, #1
- BNE _already_in_list0 // Is this core already waiting?
-
- /* Add ourselves to the list. */
- // _tx_thread_smp_protect_wait_list_add(this_core);
-
- _tx_thread_smp_protect_wait_list_add // Call macro to add ourselves to the list
-
- // }
-
-_already_in_list0:
-
- /* Restore interrupts. */
+ LDR x1, =_tx_thread_smp_protection // Build address to protection structure
+ LDR w3, [x1, #4] // Pickup the owning core
+ CMP w3, w2 // Is it this core?
+ BEQ _owned // Yes, the protection is already owned
+ LDAXR w4, [x1, #0] // Pickup the protection flag
+ CBZ w4, _get_protection // Yes, get the protection
MSR DAIF, x0 // Restore interrupts
ISB //
#ifdef TX_ENABLE_WFE
WFE // Go into standby
#endif
+ B _tx_thread_smp_protect // On waking, restart the protection attempt
- /* We do this until we have the lock. */
- // while (1)
- // {
-
-_try_to_get_lock:
-
- /* Disable interrupts so we don't get preempted. */
-
- MRS x0, DAIF // Pickup current interrupt posture
- MSR DAIFSet, 0x3 // Lockout interrupts
-
- /* Pickup the CPU ID. */
-
- MRS x1, MPIDR_EL1 // Pickup the core ID
-#ifdef TX_ARMV8_2
-#if TX_THREAD_SMP_CLUSTERS > 1
- UBFX x7, x1, #16, #8 // Isolate cluster ID
-#endif
- UBFX x1, x1, #8, #8 // Isolate core ID
-#else
-#if TX_THREAD_SMP_CLUSTERS > 1
- UBFX x7, x1, #8, #8 // Isolate cluster ID
-#endif
- UBFX x1, x1, #0, #8 // Isolate core ID
-#endif
-#if TX_THREAD_SMP_CLUSTERS > 1
- ADDS x1, x1, x7, LSL #2 // Calculate CPU ID
-#endif
-
- /* Do we already have protection? */
- // if (this_core == _tx_thread_smp_protection.tx_thread_smp_protect_core)
- // {
-
- LDR w3, [x2, #4] // Pickup the owning core
- CMP w3, w1 // Is it this core?
- BEQ _got_lock_after_waiting // Yes, the protection is already owned. This means
- // an ISR preempted us and got protection
-
- // }
-
- /* Are we at the front of the list? */
- // if (this_core == _tx_thread_smp_protect_wait_list[_tx_thread_smp_protect_wait_list_head])
- // {
-
- LDR x3, =_tx_thread_smp_protect_wait_list_head // Get the address of the head
- LDR w3, [x3] // Get the value of the head
- LDR x4, =_tx_thread_smp_protect_wait_list // Get the address of the list
- LDR w4, [x4, x3, LSL #2] // Get the value at the head index
-
- CMP w1, w4
- BNE _did_not_get_lock
-
- /* Is the lock still available? */
- // if (_tx_thread_smp_protection.tx_thread_smp_protect_in_force == 0)
- // {
-
- LDAXR w3, [x2, #0] // Pickup the protection flag
- CMP w3, #0
- BNE _did_not_get_lock // No, protection not available
-
- /* Get the lock. */
- // _tx_thread_smp_protection.tx_thread_smp_protect_in_force = 1;
-
- MOV w3, #1 // Build lock value
- STXR w4, w3, [x2, #0] // Attempt to get the protection
- CMP w4, #0
- BNE _did_not_get_lock // Did it fail?
- DMB ISH //
-
- /* Got the lock. */
- // _tx_thread_smp_protect_lock_got();
-
- _tx_thread_smp_protect_lock_got
-
- /* Remove this core from the wait list. */
- // _tx_thread_smp_protect_remove_from_front_of_list();
-
- _tx_thread_smp_protect_remove_from_front_of_list
-
- B _got_lock_after_waiting
-
-_did_not_get_lock:
-
- /* For one reason or another, we didn't get the lock. */
-
- /* Were we removed from the list? This can happen if we're a thread
- and we got preempted. */
- // if (_tx_thread_smp_protect_wait_counts[this_core] == 0)
- // {
-
- LDR x3, =_tx_thread_smp_protect_wait_counts // Load wait list counts
- LDR w4, [x3, x1, LSL #2] // Load waiting value for this core
- CMP w4, #0
- BNE _already_in_list1 // Is this core already in the list?
-
- /* Add ourselves to the list. */
- // _tx_thread_smp_protect_wait_list_add(this_core);
-
- _tx_thread_smp_protect_wait_list_add // Call macro to add ourselves to the list
-
- /* Our waiting count was also reset when we were preempted. Increment it again. */
- // _tx_thread_smp_protect_wait_counts[this_core]++;
-
- LDR x3, =_tx_thread_smp_protect_wait_counts // Load wait list counts
- LDR w4, [x3, x1, LSL #2] // Load waiting value for this core
- ADD w4, w4, #1 // Increment wait value
- STR w4, [x3, x1, LSL #2] // Store new wait value value
-
- // }
-
-_already_in_list1:
-
- /* Restore interrupts and try again. */
-
+_get_protection:
+ MOV x4, #1 // Build lock value
+ STXR w5, w4, [x1] // Attempt to get the protection
+ CBZ w5, _got_protection // Did it succeed? w5 = 0 means success!
MSR DAIF, x0 // Restore interrupts
- ISB //
-#ifdef TX_ENABLE_WFE
- WFE // Go into standby
-#endif
- B _try_to_get_lock // On waking, restart the protection attempt
-
-_got_lock_after_waiting:
-
- /* We're no longer waiting. */
- // _tx_thread_smp_protect_wait_counts[this_core]--;
-
- LDR x3, =_tx_thread_smp_protect_wait_counts // Load waiting list
- LDR w4, [x3, x1, LSL #2] // Load current wait value
- SUB w4, w4, #1 // Decrement wait value
- STR w4, [x3, x1, LSL #2] // Store new wait value value
-
- /* Restore registers and return. */
-
-_return:
-
+ B _tx_thread_smp_protect // Restart the protection attempt
+
+_got_protection:
+ DMB ISH //
+ STR w2, [x1, #4] // Save owning core
+_owned:
+ LDR w5, [x1, #8] // Pickup ownership count
+ ADD w5, w5, #1 // Increment ownership count
+ STR w5, [x1, #8] // Store ownership count
+ DMB ISH //
RET
diff --git a/ports_smp/cortex_a65_smp/ac6/src/tx_thread_smp_unprotect.S b/ports_smp/cortex_a65_smp/ac6/src/tx_thread_smp_unprotect.S
index a783cde6..f9d8692a 100644
--- a/ports_smp/cortex_a65_smp/ac6/src/tx_thread_smp_unprotect.S
+++ b/ports_smp/cortex_a65_smp/ac6/src/tx_thread_smp_unprotect.S
@@ -60,10 +60,13 @@
/* */
/* DATE NAME DESCRIPTION */
/* */
-/* 09-30-2020 William E. Lamie Initial Version 6.1 */
-/* 01-31-2022 Andres Mlinar Updated comments, */
-/* added ARMv8.2-A support, */
+/* 09-30-2020 William E. Lamie Initial Version 6.1 */
+/* 01-31-2022 Andres Mlinar Updated comments, */
+/* added ARMv8.2-A support, */
/* resulting in version 6.1.10 */
+/* 04-25-2022 William E. Lamie Modified comments, removed */
+/* FIFO queueing, */
+/* resulting in version 6.1.11 */
/* */
/**************************************************************************/
.global _tx_thread_smp_unprotect
@@ -105,11 +108,6 @@ _tx_thread_smp_unprotect:
CMP w3, #0 // Is the preempt disable flag set?
BNE _still_protected // Yes, skip the protection release
- LDR x2,=_tx_thread_smp_protect_wait_counts // Build build address of wait counts
- LDR w3, [x2, x1, LSL #2] // Pickup wait list value
- CMP w3, #0 // Are any entities on this core waiting?
- BNE _still_protected // Yes, skip the protection release
-
LDR x2,=_tx_thread_smp_protection // Build address of protection structure
MOV w3, #0xFFFFFFFF // Build invalid value
STR w3, [x2, #4] // Mark the protected core as invalid
diff --git a/ports_smp/cortex_a65_smp/gnu/example_build/tx/.cproject b/ports_smp/cortex_a65_smp/gnu/example_build/tx/.cproject
index ec20edd2..3547a1d3 100644
--- a/ports_smp/cortex_a65_smp/gnu/example_build/tx/.cproject
+++ b/ports_smp/cortex_a65_smp/gnu/example_build/tx/.cproject
@@ -129,6 +129,12 @@
+
+
+
+
+
+
diff --git a/ports_smp/cortex_a65_smp/gnu/src/tx_thread_context_restore.S b/ports_smp/cortex_a65_smp/gnu/src/tx_thread_context_restore.S
index 4df471ac..21deb75d 100644
--- a/ports_smp/cortex_a65_smp/gnu/src/tx_thread_context_restore.S
+++ b/ports_smp/cortex_a65_smp/gnu/src/tx_thread_context_restore.S
@@ -21,9 +21,6 @@
/**************************************************************************/
-/* Include macros for modifying the wait list. */
-#include "tx_thread_smp_protection_wait_list_macros.h"
-
.text
.align 3
/**************************************************************************/
@@ -31,7 +28,7 @@
/* FUNCTION RELEASE */
/* */
/* _tx_thread_context_restore ARMv8-A-SMP */
-/* 6.1.10 */
+/* 6.1.11 */
/* AUTHOR */
/* */
/* William E. Lamie, Microsoft Corporation */
@@ -63,10 +60,13 @@
/* */
/* DATE NAME DESCRIPTION */
/* */
-/* 09-30-2020 William E. Lamie Initial Version 6.1 */
-/* 01-31-2022 Andres Mlinar Updated comments, */
-/* added ARMv8.2-A support, */
+/* 09-30-2020 William E. Lamie Initial Version 6.1 */
+/* 01-31-2022 Andres Mlinar Updated comments, */
+/* added ARMv8.2-A support, */
/* resulting in version 6.1.10 */
+/* 04-25-2022 William E. Lamie Modified comments, removed */
+/* FIFO queueing, */
+/* resulting in version 6.1.11 */
/* */
/**************************************************************************/
// VOID _tx_thread_context_restore(VOID)
@@ -216,74 +216,6 @@ __tx_thread_no_preempt_restore:
// {
__tx_thread_preempt_restore:
- /* Was the thread being preempted waiting for the lock? */
- // if (_tx_thread_smp_protect_wait_counts[this_core] != 0)
- // {
-
- LDR x2, =_tx_thread_smp_protect_wait_counts // Load waiting count list
- LDR w3, [x2, x8, LSL #2] // Load waiting value for this core
- CMP w3, #0
- BEQ _nobody_waiting_for_lock // Is the core waiting for the lock?
-
- /* Do we not have the lock? This means the ISR never got the inter-core lock. */
- // if (_tx_thread_smp_protection.tx_thread_smp_protect_owned != this_core)
- // {
-
- LDR x2, =_tx_thread_smp_protection // Load address of protection structure
- LDR w3, [x2, #4] // Pickup the owning core
- CMP w8, w3 // Compare our core to the owning core
- BEQ _this_core_has_lock // Do we have the lock?
-
- /* We don't have the lock. This core should be in the list. Remove it. */
- // _tx_thread_smp_protect_wait_list_remove(this_core);
-
- _tx_thread_smp_protect_wait_list_remove // Call macro to remove core from the list
- B _nobody_waiting_for_lock // Leave
-
- // }
- // else
- // {
- /* We have the lock. This means the ISR got the inter-core lock, but
- never released it because it saw that there was someone waiting.
- Note this core is not in the list. */
-
-_this_core_has_lock:
-
- /* We're no longer waiting. Note that this should be zero since this happens during thread preemption. */
- // _tx_thread_smp_protect_wait_counts[core]--;
-
- LDR x2, =_tx_thread_smp_protect_wait_counts // Load waiting count list
- LDR w3, [x2, x8, LSL #2] // Load waiting value for this core
- SUB w3, w3, #1 // Decrement waiting value. Should be zero now
- STR w3, [x2, x8, LSL #2] // Store new waiting value
-
- /* Now release the inter-core lock. */
-
- /* Set protected core as invalid. */
- // _tx_thread_smp_protection.tx_thread_smp_protect_core = 0xFFFFFFFF;
-
- LDR x2, =_tx_thread_smp_protection // Load address of protection structure
- MOV w3, #0xFFFFFFFF // Build invalid value
- STR w3, [x2, #4] // Mark the protected core as invalid
- DMB ISH // Ensure that accesses to shared resource have completed
-
- /* Release protection. */
- // _tx_thread_smp_protection.tx_thread_smp_protect_in_force = 0;
-
- MOV w3, #0 // Build release protection value
- STR w3, [x2, #0] // Release the protection
- DSB ISH // To ensure update of the protection occurs before other CPUs awake
-
- /* Wake up waiting processors. Note interrupts are already enabled. */
-
-#ifdef TX_ENABLE_WFE
- SEV // Send event to other CPUs
-#endif
-
- // }
- // }
-
-_nobody_waiting_for_lock:
LDR x4, [x0, #8] // Switch to thread stack pointer
MOV sp, x4 //
diff --git a/ports_smp/cortex_a65_smp/gnu/src/tx_thread_smp_protect.S b/ports_smp/cortex_a65_smp/gnu/src/tx_thread_smp_protect.S
index 9cde3e08..b65af329 100644
--- a/ports_smp/cortex_a65_smp/gnu/src/tx_thread_smp_protect.S
+++ b/ports_smp/cortex_a65_smp/gnu/src/tx_thread_smp_protect.S
@@ -32,7 +32,7 @@
/* FUNCTION RELEASE */
/* */
/* _tx_thread_smp_protect Cortex-A35-SMP/AC6 */
-/* 6.1.10 */
+/* 6.1.11 */
/* AUTHOR */
/* */
/* William E. Lamie, Microsoft Corporation */
@@ -63,11 +63,14 @@
/* */
/* DATE NAME DESCRIPTION */
/* */
-/* 09-30-2020 William E. Lamie Initial Version 6.1 */
-/* 01-31-2022 Andres Mlinar Updated comments, */
-/* added ARMv8.2-A support, */
-/* improved SMP code, */
+/* 09-30-2020 William E. Lamie Initial Version 6.1 */
+/* 01-31-2022 Andres Mlinar Updated comments, */
+/* added ARMv8.2-A support, */
+/* improved SMP code, */
/* resulting in version 6.1.10 */
+/* 04-25-2022 William E. Lamie Modified comments, removed */
+/* FIFO queueing, */
+/* resulting in version 6.1.11 */
/* */
/**************************************************************************/
.global _tx_thread_smp_protect
@@ -81,293 +84,49 @@ _tx_thread_smp_protect:
/* Pickup the CPU ID. */
- MRS x1, MPIDR_EL1 // Pickup the core ID
+ MRS x2, MPIDR_EL1 // Pickup the core ID
#ifdef TX_ARMV8_2
#if TX_THREAD_SMP_CLUSTERS > 1
- UBFX x7, x1, #16, #8 // Isolate cluster ID
+ UBFX x7, x2, #16, #8 // Isolate cluster ID
#endif
- UBFX x1, x1, #8, #8 // Isolate core ID
+ UBFX x2, x2, #8, #8 // Isolate core ID
#else
#if TX_THREAD_SMP_CLUSTERS > 1
- UBFX x7, x1, #8, #8 // Isolate cluster ID
+ UBFX x7, x2, #8, #8 // Isolate cluster ID
#endif
- UBFX x1, x1, #0, #8 // Isolate core ID
+ UBFX x2, x2, #0, #8 // Isolate core ID
#endif
#if TX_THREAD_SMP_CLUSTERS > 1
- ADDS x1, x1, x7, LSL #2 // Calculate CPU ID
+ ADDS x2, x2, x7, LSL #2 // Calculate CPU ID
#endif
- /* Do we already have protection? */
- // if (this_core == _tx_thread_smp_protection.tx_thread_smp_protect_core)
- // {
-
- LDR x2, =_tx_thread_smp_protection // Build address to protection structure
- LDR w3, [x2, #4] // Pickup the owning core
- CMP w1, w3 // Is it not this core?
- BNE _protection_not_owned // No, the protection is not already owned
-
- /* We already have protection. */
-
- /* Increment the protection count. */
- // _tx_thread_smp_protection.tx_thread_smp_protect_count++;
-
- LDR w3, [x2, #8] // Pickup ownership count
- ADD w3, w3, #1 // Increment ownership count
- STR w3, [x2, #8] // Store ownership count
- DMB ISH
-
- B _return
-
-_protection_not_owned:
-
- /* Is the lock available? */
- // if (_tx_thread_smp_protection.tx_thread_smp_protect_in_force == 0)
- // {
-
- LDAXR w3, [x2, #0] // Pickup the protection flag
- CMP w3, #0
- BNE _start_waiting // No, protection not available
-
- /* Is the list empty? */
- // if (_tx_thread_smp_protect_wait_list_head == _tx_thread_smp_protect_wait_list_tail)
- // {
-
- LDR x3, =_tx_thread_smp_protect_wait_list_head
- LDR w3, [x3]
- LDR x4, =_tx_thread_smp_protect_wait_list_tail
- LDR w4, [x4]
- CMP w3, w4
- BNE _list_not_empty
-
- /* Try to get the lock. */
- // if (write_exclusive(&_tx_thread_smp_protection.tx_thread_smp_protect_in_force, 1) == SUCCESS)
- // {
-
- MOV w3, #1 // Build lock value
- STXR w4, w3, [x2, #0] // Attempt to get the protection
- CMP w4, #0
- BNE _start_waiting // Did it fail?
-
- /* We got the lock! */
- // _tx_thread_smp_protect_lock_got();
-
- DMB ISH // Ensure write to protection finishes
- _tx_thread_smp_protect_lock_got // Call the lock got function
-
- B _return
-
-_list_not_empty:
-
- /* Are we at the front of the list? */
- // if (this_core == _tx_thread_smp_protect_wait_list[_tx_thread_smp_protect_wait_list_head])
- // {
-
- LDR x3, =_tx_thread_smp_protect_wait_list_head // Get the address of the head
- LDR w3, [x3] // Get the value of the head
- LDR x4, =_tx_thread_smp_protect_wait_list // Get the address of the list
- LDR w4, [x4, x3, LSL #2] // Get the value at the head index
-
- CMP w1, w4
- BNE _start_waiting
-
- /* Is the lock still available? */
- // if (_tx_thread_smp_protection.tx_thread_smp_protect_in_force == 0)
- // {
-
- LDAXR w3, [x2, #0] // Pickup the protection flag
- CMP w3, #0
- BNE _start_waiting // No, protection not available
-
- /* Get the lock. */
- // _tx_thread_smp_protection.tx_thread_smp_protect_in_force = 1;
-
- MOV w3, #1 // Build lock value
- STXR w4, w3, [x2, #0] // Attempt to get the protection
- CMP w4, #0
- BNE _start_waiting // Did it fail?
- DMB ISH //
-
- /* Got the lock. */
- // _tx_thread_smp_protect_lock_got();
-
- _tx_thread_smp_protect_lock_got
-
- /* Remove this core from the wait list. */
- // _tx_thread_smp_protect_remove_from_front_of_list();
-
- _tx_thread_smp_protect_remove_from_front_of_list
-
- B _return
-
-_start_waiting:
-
- /* For one reason or another, we didn't get the lock. */
-
- /* Increment wait count. */
- // _tx_thread_smp_protect_wait_counts[this_core]++;
-
- LDR x3, =_tx_thread_smp_protect_wait_counts // Load wait list counts
- LDR w4, [x3, x1, LSL #2] // Load waiting value for this core
- ADD w4, w4, #1 // Increment wait value
- STR w4, [x3, x1, LSL #2] // Store new wait value
-
- /* Have we not added ourselves to the list yet? */
- // if (_tx_thread_smp_protect_wait_counts[this_core] == 1)
- // {
-
- CMP w4, #1
- BNE _already_in_list0 // Is this core already waiting?
-
- /* Add ourselves to the list. */
- // _tx_thread_smp_protect_wait_list_add(this_core);
-
- _tx_thread_smp_protect_wait_list_add // Call macro to add ourselves to the list
-
- // }
-
-_already_in_list0:
-
- /* Restore interrupts. */
+ LDR x1, =_tx_thread_smp_protection // Build address to protection structure
+ LDR w3, [x1, #4] // Pickup the owning core
+ CMP w3, w2 // Is it this core?
+ BEQ _owned // Yes, the protection is already owned
+ LDAXR w4, [x1, #0] // Pickup the protection flag
+ CBZ w4, _get_protection // Yes, get the protection
MSR DAIF, x0 // Restore interrupts
ISB //
#ifdef TX_ENABLE_WFE
WFE // Go into standby
#endif
+ B _tx_thread_smp_protect // On waking, restart the protection attempt
- /* We do this until we have the lock. */
- // while (1)
- // {
-
-_try_to_get_lock:
-
- /* Disable interrupts so we don't get preempted. */
-
- MRS x0, DAIF // Pickup current interrupt posture
- MSR DAIFSet, 0x3 // Lockout interrupts
-
- /* Pickup the CPU ID. */
-
- MRS x1, MPIDR_EL1 // Pickup the core ID
-#ifdef TX_ARMV8_2
-#if TX_THREAD_SMP_CLUSTERS > 1
- UBFX x7, x1, #16, #8 // Isolate cluster ID
-#endif
- UBFX x1, x1, #8, #8 // Isolate core ID
-#else
-#if TX_THREAD_SMP_CLUSTERS > 1
- UBFX x7, x1, #8, #8 // Isolate cluster ID
-#endif
- UBFX x1, x1, #0, #8 // Isolate core ID
-#endif
-#if TX_THREAD_SMP_CLUSTERS > 1
- ADDS x1, x1, x7, LSL #2 // Calculate CPU ID
-#endif
-
- /* Do we already have protection? */
- // if (this_core == _tx_thread_smp_protection.tx_thread_smp_protect_core)
- // {
-
- LDR w3, [x2, #4] // Pickup the owning core
- CMP w3, w1 // Is it this core?
- BEQ _got_lock_after_waiting // Yes, the protection is already owned. This means
- // an ISR preempted us and got protection
-
- // }
-
- /* Are we at the front of the list? */
- // if (this_core == _tx_thread_smp_protect_wait_list[_tx_thread_smp_protect_wait_list_head])
- // {
-
- LDR x3, =_tx_thread_smp_protect_wait_list_head // Get the address of the head
- LDR w3, [x3] // Get the value of the head
- LDR x4, =_tx_thread_smp_protect_wait_list // Get the address of the list
- LDR w4, [x4, x3, LSL #2] // Get the value at the head index
-
- CMP w1, w4
- BNE _did_not_get_lock
-
- /* Is the lock still available? */
- // if (_tx_thread_smp_protection.tx_thread_smp_protect_in_force == 0)
- // {
-
- LDAXR w3, [x2, #0] // Pickup the protection flag
- CMP w3, #0
- BNE _did_not_get_lock // No, protection not available
-
- /* Get the lock. */
- // _tx_thread_smp_protection.tx_thread_smp_protect_in_force = 1;
-
- MOV w3, #1 // Build lock value
- STXR w4, w3, [x2, #0] // Attempt to get the protection
- CMP w4, #0
- BNE _did_not_get_lock // Did it fail?
- DMB ISH //
-
- /* Got the lock. */
- // _tx_thread_smp_protect_lock_got();
-
- _tx_thread_smp_protect_lock_got
-
- /* Remove this core from the wait list. */
- // _tx_thread_smp_protect_remove_from_front_of_list();
-
- _tx_thread_smp_protect_remove_from_front_of_list
-
- B _got_lock_after_waiting
-
-_did_not_get_lock:
-
- /* For one reason or another, we didn't get the lock. */
-
- /* Were we removed from the list? This can happen if we're a thread
- and we got preempted. */
- // if (_tx_thread_smp_protect_wait_counts[this_core] == 0)
- // {
-
- LDR x3, =_tx_thread_smp_protect_wait_counts // Load wait list counts
- LDR w4, [x3, x1, LSL #2] // Load waiting value for this core
- CMP w4, #0
- BNE _already_in_list1 // Is this core already in the list?
-
- /* Add ourselves to the list. */
- // _tx_thread_smp_protect_wait_list_add(this_core);
-
- _tx_thread_smp_protect_wait_list_add // Call macro to add ourselves to the list
-
- /* Our waiting count was also reset when we were preempted. Increment it again. */
- // _tx_thread_smp_protect_wait_counts[this_core]++;
-
- LDR x3, =_tx_thread_smp_protect_wait_counts // Load wait list counts
- LDR w4, [x3, x1, LSL #2] // Load waiting value for this core
- ADD w4, w4, #1 // Increment wait value
- STR w4, [x3, x1, LSL #2] // Store new wait value value
-
- // }
-
-_already_in_list1:
-
- /* Restore interrupts and try again. */
-
+_get_protection:
+ MOV x4, #1 // Build lock value
+ STXR w5, w4, [x1] // Attempt to get the protection
+ CBZ w5, _got_protection // Did it succeed? w5 = 0 means success!
MSR DAIF, x0 // Restore interrupts
- ISB //
-#ifdef TX_ENABLE_WFE
- WFE // Go into standby
-#endif
- B _try_to_get_lock // On waking, restart the protection attempt
-
-_got_lock_after_waiting:
-
- /* We're no longer waiting. */
- // _tx_thread_smp_protect_wait_counts[this_core]--;
-
- LDR x3, =_tx_thread_smp_protect_wait_counts // Load waiting list
- LDR w4, [x3, x1, LSL #2] // Load current wait value
- SUB w4, w4, #1 // Decrement wait value
- STR w4, [x3, x1, LSL #2] // Store new wait value value
-
- /* Restore registers and return. */
-
-_return:
-
+ B _tx_thread_smp_protect // Restart the protection attempt
+
+_got_protection:
+ DMB ISH //
+ STR w2, [x1, #4] // Save owning core
+_owned:
+ LDR w5, [x1, #8] // Pickup ownership count
+ ADD w5, w5, #1 // Increment ownership count
+ STR w5, [x1, #8] // Store ownership count
+ DMB ISH //
RET
diff --git a/ports_smp/cortex_a65_smp/gnu/src/tx_thread_smp_unprotect.S b/ports_smp/cortex_a65_smp/gnu/src/tx_thread_smp_unprotect.S
index a783cde6..f9d8692a 100644
--- a/ports_smp/cortex_a65_smp/gnu/src/tx_thread_smp_unprotect.S
+++ b/ports_smp/cortex_a65_smp/gnu/src/tx_thread_smp_unprotect.S
@@ -60,10 +60,13 @@
/* */
/* DATE NAME DESCRIPTION */
/* */
-/* 09-30-2020 William E. Lamie Initial Version 6.1 */
-/* 01-31-2022 Andres Mlinar Updated comments, */
-/* added ARMv8.2-A support, */
+/* 09-30-2020 William E. Lamie Initial Version 6.1 */
+/* 01-31-2022 Andres Mlinar Updated comments, */
+/* added ARMv8.2-A support, */
/* resulting in version 6.1.10 */
+/* 04-25-2022 William E. Lamie Modified comments, removed */
+/* FIFO queueing, */
+/* resulting in version 6.1.11 */
/* */
/**************************************************************************/
.global _tx_thread_smp_unprotect
@@ -105,11 +108,6 @@ _tx_thread_smp_unprotect:
CMP w3, #0 // Is the preempt disable flag set?
BNE _still_protected // Yes, skip the protection release
- LDR x2,=_tx_thread_smp_protect_wait_counts // Build build address of wait counts
- LDR w3, [x2, x1, LSL #2] // Pickup wait list value
- CMP w3, #0 // Are any entities on this core waiting?
- BNE _still_protected // Yes, skip the protection release
-
LDR x2,=_tx_thread_smp_protection // Build address of protection structure
MOV w3, #0xFFFFFFFF // Build invalid value
STR w3, [x2, #4] // Mark the protected core as invalid
diff --git a/ports_smp/cortex_a65ae_smp/ac6/example_build/tx/.cproject b/ports_smp/cortex_a65ae_smp/ac6/example_build/tx/.cproject
index f82c4a12..21738a63 100644
--- a/ports_smp/cortex_a65ae_smp/ac6/example_build/tx/.cproject
+++ b/ports_smp/cortex_a65ae_smp/ac6/example_build/tx/.cproject
@@ -129,11 +129,7 @@
-
-
-
-
-
+
diff --git a/ports_smp/cortex_a65ae_smp/ac6/src/tx_thread_context_restore.S b/ports_smp/cortex_a65ae_smp/ac6/src/tx_thread_context_restore.S
index 4df471ac..21deb75d 100644
--- a/ports_smp/cortex_a65ae_smp/ac6/src/tx_thread_context_restore.S
+++ b/ports_smp/cortex_a65ae_smp/ac6/src/tx_thread_context_restore.S
@@ -21,9 +21,6 @@
/**************************************************************************/
-/* Include macros for modifying the wait list. */
-#include "tx_thread_smp_protection_wait_list_macros.h"
-
.text
.align 3
/**************************************************************************/
@@ -31,7 +28,7 @@
/* FUNCTION RELEASE */
/* */
/* _tx_thread_context_restore ARMv8-A-SMP */
-/* 6.1.10 */
+/* 6.1.11 */
/* AUTHOR */
/* */
/* William E. Lamie, Microsoft Corporation */
@@ -63,10 +60,13 @@
/* */
/* DATE NAME DESCRIPTION */
/* */
-/* 09-30-2020 William E. Lamie Initial Version 6.1 */
-/* 01-31-2022 Andres Mlinar Updated comments, */
-/* added ARMv8.2-A support, */
+/* 09-30-2020 William E. Lamie Initial Version 6.1 */
+/* 01-31-2022 Andres Mlinar Updated comments, */
+/* added ARMv8.2-A support, */
/* resulting in version 6.1.10 */
+/* 04-25-2022 William E. Lamie Modified comments, removed */
+/* FIFO queueing, */
+/* resulting in version 6.1.11 */
/* */
/**************************************************************************/
// VOID _tx_thread_context_restore(VOID)
@@ -216,74 +216,6 @@ __tx_thread_no_preempt_restore:
// {
__tx_thread_preempt_restore:
- /* Was the thread being preempted waiting for the lock? */
- // if (_tx_thread_smp_protect_wait_counts[this_core] != 0)
- // {
-
- LDR x2, =_tx_thread_smp_protect_wait_counts // Load waiting count list
- LDR w3, [x2, x8, LSL #2] // Load waiting value for this core
- CMP w3, #0
- BEQ _nobody_waiting_for_lock // Is the core waiting for the lock?
-
- /* Do we not have the lock? This means the ISR never got the inter-core lock. */
- // if (_tx_thread_smp_protection.tx_thread_smp_protect_owned != this_core)
- // {
-
- LDR x2, =_tx_thread_smp_protection // Load address of protection structure
- LDR w3, [x2, #4] // Pickup the owning core
- CMP w8, w3 // Compare our core to the owning core
- BEQ _this_core_has_lock // Do we have the lock?
-
- /* We don't have the lock. This core should be in the list. Remove it. */
- // _tx_thread_smp_protect_wait_list_remove(this_core);
-
- _tx_thread_smp_protect_wait_list_remove // Call macro to remove core from the list
- B _nobody_waiting_for_lock // Leave
-
- // }
- // else
- // {
- /* We have the lock. This means the ISR got the inter-core lock, but
- never released it because it saw that there was someone waiting.
- Note this core is not in the list. */
-
-_this_core_has_lock:
-
- /* We're no longer waiting. Note that this should be zero since this happens during thread preemption. */
- // _tx_thread_smp_protect_wait_counts[core]--;
-
- LDR x2, =_tx_thread_smp_protect_wait_counts // Load waiting count list
- LDR w3, [x2, x8, LSL #2] // Load waiting value for this core
- SUB w3, w3, #1 // Decrement waiting value. Should be zero now
- STR w3, [x2, x8, LSL #2] // Store new waiting value
-
- /* Now release the inter-core lock. */
-
- /* Set protected core as invalid. */
- // _tx_thread_smp_protection.tx_thread_smp_protect_core = 0xFFFFFFFF;
-
- LDR x2, =_tx_thread_smp_protection // Load address of protection structure
- MOV w3, #0xFFFFFFFF // Build invalid value
- STR w3, [x2, #4] // Mark the protected core as invalid
- DMB ISH // Ensure that accesses to shared resource have completed
-
- /* Release protection. */
- // _tx_thread_smp_protection.tx_thread_smp_protect_in_force = 0;
-
- MOV w3, #0 // Build release protection value
- STR w3, [x2, #0] // Release the protection
- DSB ISH // To ensure update of the protection occurs before other CPUs awake
-
- /* Wake up waiting processors. Note interrupts are already enabled. */
-
-#ifdef TX_ENABLE_WFE
- SEV // Send event to other CPUs
-#endif
-
- // }
- // }
-
-_nobody_waiting_for_lock:
LDR x4, [x0, #8] // Switch to thread stack pointer
MOV sp, x4 //
diff --git a/ports_smp/cortex_a65ae_smp/ac6/src/tx_thread_smp_protect.S b/ports_smp/cortex_a65ae_smp/ac6/src/tx_thread_smp_protect.S
index 9cde3e08..b65af329 100644
--- a/ports_smp/cortex_a65ae_smp/ac6/src/tx_thread_smp_protect.S
+++ b/ports_smp/cortex_a65ae_smp/ac6/src/tx_thread_smp_protect.S
@@ -32,7 +32,7 @@
/* FUNCTION RELEASE */
/* */
/* _tx_thread_smp_protect Cortex-A35-SMP/AC6 */
-/* 6.1.10 */
+/* 6.1.11 */
/* AUTHOR */
/* */
/* William E. Lamie, Microsoft Corporation */
@@ -63,11 +63,14 @@
/* */
/* DATE NAME DESCRIPTION */
/* */
-/* 09-30-2020 William E. Lamie Initial Version 6.1 */
-/* 01-31-2022 Andres Mlinar Updated comments, */
-/* added ARMv8.2-A support, */
-/* improved SMP code, */
+/* 09-30-2020 William E. Lamie Initial Version 6.1 */
+/* 01-31-2022 Andres Mlinar Updated comments, */
+/* added ARMv8.2-A support, */
+/* improved SMP code, */
/* resulting in version 6.1.10 */
+/* 04-25-2022 William E. Lamie Modified comments, removed */
+/* FIFO queueing, */
+/* resulting in version 6.1.11 */
/* */
/**************************************************************************/
.global _tx_thread_smp_protect
@@ -81,293 +84,49 @@ _tx_thread_smp_protect:
/* Pickup the CPU ID. */
- MRS x1, MPIDR_EL1 // Pickup the core ID
+ MRS x2, MPIDR_EL1 // Pickup the core ID
#ifdef TX_ARMV8_2
#if TX_THREAD_SMP_CLUSTERS > 1
- UBFX x7, x1, #16, #8 // Isolate cluster ID
+ UBFX x7, x2, #16, #8 // Isolate cluster ID
#endif
- UBFX x1, x1, #8, #8 // Isolate core ID
+ UBFX x2, x2, #8, #8 // Isolate core ID
#else
#if TX_THREAD_SMP_CLUSTERS > 1
- UBFX x7, x1, #8, #8 // Isolate cluster ID
+ UBFX x7, x2, #8, #8 // Isolate cluster ID
#endif
- UBFX x1, x1, #0, #8 // Isolate core ID
+ UBFX x2, x2, #0, #8 // Isolate core ID
#endif
#if TX_THREAD_SMP_CLUSTERS > 1
- ADDS x1, x1, x7, LSL #2 // Calculate CPU ID
+ ADDS x2, x2, x7, LSL #2 // Calculate CPU ID
#endif
- /* Do we already have protection? */
- // if (this_core == _tx_thread_smp_protection.tx_thread_smp_protect_core)
- // {
-
- LDR x2, =_tx_thread_smp_protection // Build address to protection structure
- LDR w3, [x2, #4] // Pickup the owning core
- CMP w1, w3 // Is it not this core?
- BNE _protection_not_owned // No, the protection is not already owned
-
- /* We already have protection. */
-
- /* Increment the protection count. */
- // _tx_thread_smp_protection.tx_thread_smp_protect_count++;
-
- LDR w3, [x2, #8] // Pickup ownership count
- ADD w3, w3, #1 // Increment ownership count
- STR w3, [x2, #8] // Store ownership count
- DMB ISH
-
- B _return
-
-_protection_not_owned:
-
- /* Is the lock available? */
- // if (_tx_thread_smp_protection.tx_thread_smp_protect_in_force == 0)
- // {
-
- LDAXR w3, [x2, #0] // Pickup the protection flag
- CMP w3, #0
- BNE _start_waiting // No, protection not available
-
- /* Is the list empty? */
- // if (_tx_thread_smp_protect_wait_list_head == _tx_thread_smp_protect_wait_list_tail)
- // {
-
- LDR x3, =_tx_thread_smp_protect_wait_list_head
- LDR w3, [x3]
- LDR x4, =_tx_thread_smp_protect_wait_list_tail
- LDR w4, [x4]
- CMP w3, w4
- BNE _list_not_empty
-
- /* Try to get the lock. */
- // if (write_exclusive(&_tx_thread_smp_protection.tx_thread_smp_protect_in_force, 1) == SUCCESS)
- // {
-
- MOV w3, #1 // Build lock value
- STXR w4, w3, [x2, #0] // Attempt to get the protection
- CMP w4, #0
- BNE _start_waiting // Did it fail?
-
- /* We got the lock! */
- // _tx_thread_smp_protect_lock_got();
-
- DMB ISH // Ensure write to protection finishes
- _tx_thread_smp_protect_lock_got // Call the lock got function
-
- B _return
-
-_list_not_empty:
-
- /* Are we at the front of the list? */
- // if (this_core == _tx_thread_smp_protect_wait_list[_tx_thread_smp_protect_wait_list_head])
- // {
-
- LDR x3, =_tx_thread_smp_protect_wait_list_head // Get the address of the head
- LDR w3, [x3] // Get the value of the head
- LDR x4, =_tx_thread_smp_protect_wait_list // Get the address of the list
- LDR w4, [x4, x3, LSL #2] // Get the value at the head index
-
- CMP w1, w4
- BNE _start_waiting
-
- /* Is the lock still available? */
- // if (_tx_thread_smp_protection.tx_thread_smp_protect_in_force == 0)
- // {
-
- LDAXR w3, [x2, #0] // Pickup the protection flag
- CMP w3, #0
- BNE _start_waiting // No, protection not available
-
- /* Get the lock. */
- // _tx_thread_smp_protection.tx_thread_smp_protect_in_force = 1;
-
- MOV w3, #1 // Build lock value
- STXR w4, w3, [x2, #0] // Attempt to get the protection
- CMP w4, #0
- BNE _start_waiting // Did it fail?
- DMB ISH //
-
- /* Got the lock. */
- // _tx_thread_smp_protect_lock_got();
-
- _tx_thread_smp_protect_lock_got
-
- /* Remove this core from the wait list. */
- // _tx_thread_smp_protect_remove_from_front_of_list();
-
- _tx_thread_smp_protect_remove_from_front_of_list
-
- B _return
-
-_start_waiting:
-
- /* For one reason or another, we didn't get the lock. */
-
- /* Increment wait count. */
- // _tx_thread_smp_protect_wait_counts[this_core]++;
-
- LDR x3, =_tx_thread_smp_protect_wait_counts // Load wait list counts
- LDR w4, [x3, x1, LSL #2] // Load waiting value for this core
- ADD w4, w4, #1 // Increment wait value
- STR w4, [x3, x1, LSL #2] // Store new wait value
-
- /* Have we not added ourselves to the list yet? */
- // if (_tx_thread_smp_protect_wait_counts[this_core] == 1)
- // {
-
- CMP w4, #1
- BNE _already_in_list0 // Is this core already waiting?
-
- /* Add ourselves to the list. */
- // _tx_thread_smp_protect_wait_list_add(this_core);
-
- _tx_thread_smp_protect_wait_list_add // Call macro to add ourselves to the list
-
- // }
-
-_already_in_list0:
-
- /* Restore interrupts. */
+ LDR x1, =_tx_thread_smp_protection // Build address to protection structure
+ LDR w3, [x1, #4] // Pickup the owning core
+ CMP w3, w2 // Is it this core?
+ BEQ _owned // Yes, the protection is already owned
+ LDAXR w4, [x1, #0] // Pickup the protection flag
+ CBZ w4, _get_protection // Yes, get the protection
MSR DAIF, x0 // Restore interrupts
ISB //
#ifdef TX_ENABLE_WFE
WFE // Go into standby
#endif
+ B _tx_thread_smp_protect // On waking, restart the protection attempt
- /* We do this until we have the lock. */
- // while (1)
- // {
-
-_try_to_get_lock:
-
- /* Disable interrupts so we don't get preempted. */
-
- MRS x0, DAIF // Pickup current interrupt posture
- MSR DAIFSet, 0x3 // Lockout interrupts
-
- /* Pickup the CPU ID. */
-
- MRS x1, MPIDR_EL1 // Pickup the core ID
-#ifdef TX_ARMV8_2
-#if TX_THREAD_SMP_CLUSTERS > 1
- UBFX x7, x1, #16, #8 // Isolate cluster ID
-#endif
- UBFX x1, x1, #8, #8 // Isolate core ID
-#else
-#if TX_THREAD_SMP_CLUSTERS > 1
- UBFX x7, x1, #8, #8 // Isolate cluster ID
-#endif
- UBFX x1, x1, #0, #8 // Isolate core ID
-#endif
-#if TX_THREAD_SMP_CLUSTERS > 1
- ADDS x1, x1, x7, LSL #2 // Calculate CPU ID
-#endif
-
- /* Do we already have protection? */
- // if (this_core == _tx_thread_smp_protection.tx_thread_smp_protect_core)
- // {
-
- LDR w3, [x2, #4] // Pickup the owning core
- CMP w3, w1 // Is it this core?
- BEQ _got_lock_after_waiting // Yes, the protection is already owned. This means
- // an ISR preempted us and got protection
-
- // }
-
- /* Are we at the front of the list? */
- // if (this_core == _tx_thread_smp_protect_wait_list[_tx_thread_smp_protect_wait_list_head])
- // {
-
- LDR x3, =_tx_thread_smp_protect_wait_list_head // Get the address of the head
- LDR w3, [x3] // Get the value of the head
- LDR x4, =_tx_thread_smp_protect_wait_list // Get the address of the list
- LDR w4, [x4, x3, LSL #2] // Get the value at the head index
-
- CMP w1, w4
- BNE _did_not_get_lock
-
- /* Is the lock still available? */
- // if (_tx_thread_smp_protection.tx_thread_smp_protect_in_force == 0)
- // {
-
- LDAXR w3, [x2, #0] // Pickup the protection flag
- CMP w3, #0
- BNE _did_not_get_lock // No, protection not available
-
- /* Get the lock. */
- // _tx_thread_smp_protection.tx_thread_smp_protect_in_force = 1;
-
- MOV w3, #1 // Build lock value
- STXR w4, w3, [x2, #0] // Attempt to get the protection
- CMP w4, #0
- BNE _did_not_get_lock // Did it fail?
- DMB ISH //
-
- /* Got the lock. */
- // _tx_thread_smp_protect_lock_got();
-
- _tx_thread_smp_protect_lock_got
-
- /* Remove this core from the wait list. */
- // _tx_thread_smp_protect_remove_from_front_of_list();
-
- _tx_thread_smp_protect_remove_from_front_of_list
-
- B _got_lock_after_waiting
-
-_did_not_get_lock:
-
- /* For one reason or another, we didn't get the lock. */
-
- /* Were we removed from the list? This can happen if we're a thread
- and we got preempted. */
- // if (_tx_thread_smp_protect_wait_counts[this_core] == 0)
- // {
-
- LDR x3, =_tx_thread_smp_protect_wait_counts // Load wait list counts
- LDR w4, [x3, x1, LSL #2] // Load waiting value for this core
- CMP w4, #0
- BNE _already_in_list1 // Is this core already in the list?
-
- /* Add ourselves to the list. */
- // _tx_thread_smp_protect_wait_list_add(this_core);
-
- _tx_thread_smp_protect_wait_list_add // Call macro to add ourselves to the list
-
- /* Our waiting count was also reset when we were preempted. Increment it again. */
- // _tx_thread_smp_protect_wait_counts[this_core]++;
-
- LDR x3, =_tx_thread_smp_protect_wait_counts // Load wait list counts
- LDR w4, [x3, x1, LSL #2] // Load waiting value for this core
- ADD w4, w4, #1 // Increment wait value
- STR w4, [x3, x1, LSL #2] // Store new wait value value
-
- // }
-
-_already_in_list1:
-
- /* Restore interrupts and try again. */
-
+_get_protection:
+ MOV x4, #1 // Build lock value
+ STXR w5, w4, [x1] // Attempt to get the protection
+ CBZ w5, _got_protection // Did it succeed? w5 = 0 means success!
MSR DAIF, x0 // Restore interrupts
- ISB //
-#ifdef TX_ENABLE_WFE
- WFE // Go into standby
-#endif
- B _try_to_get_lock // On waking, restart the protection attempt
-
-_got_lock_after_waiting:
-
- /* We're no longer waiting. */
- // _tx_thread_smp_protect_wait_counts[this_core]--;
-
- LDR x3, =_tx_thread_smp_protect_wait_counts // Load waiting list
- LDR w4, [x3, x1, LSL #2] // Load current wait value
- SUB w4, w4, #1 // Decrement wait value
- STR w4, [x3, x1, LSL #2] // Store new wait value value
-
- /* Restore registers and return. */
-
-_return:
-
+ B _tx_thread_smp_protect // Restart the protection attempt
+
+_got_protection:
+ DMB ISH //
+ STR w2, [x1, #4] // Save owning core
+_owned:
+ LDR w5, [x1, #8] // Pickup ownership count
+ ADD w5, w5, #1 // Increment ownership count
+ STR w5, [x1, #8] // Store ownership count
+ DMB ISH //
RET
diff --git a/ports_smp/cortex_a65ae_smp/ac6/src/tx_thread_smp_unprotect.S b/ports_smp/cortex_a65ae_smp/ac6/src/tx_thread_smp_unprotect.S
index a783cde6..f9d8692a 100644
--- a/ports_smp/cortex_a65ae_smp/ac6/src/tx_thread_smp_unprotect.S
+++ b/ports_smp/cortex_a65ae_smp/ac6/src/tx_thread_smp_unprotect.S
@@ -60,10 +60,13 @@
/* */
/* DATE NAME DESCRIPTION */
/* */
-/* 09-30-2020 William E. Lamie Initial Version 6.1 */
-/* 01-31-2022 Andres Mlinar Updated comments, */
-/* added ARMv8.2-A support, */
+/* 09-30-2020 William E. Lamie Initial Version 6.1 */
+/* 01-31-2022 Andres Mlinar Updated comments, */
+/* added ARMv8.2-A support, */
/* resulting in version 6.1.10 */
+/* 04-25-2022 William E. Lamie Modified comments, removed */
+/* FIFO queueing, */
+/* resulting in version 6.1.11 */
/* */
/**************************************************************************/
.global _tx_thread_smp_unprotect
@@ -105,11 +108,6 @@ _tx_thread_smp_unprotect:
CMP w3, #0 // Is the preempt disable flag set?
BNE _still_protected // Yes, skip the protection release
- LDR x2,=_tx_thread_smp_protect_wait_counts // Build build address of wait counts
- LDR w3, [x2, x1, LSL #2] // Pickup wait list value
- CMP w3, #0 // Are any entities on this core waiting?
- BNE _still_protected // Yes, skip the protection release
-
LDR x2,=_tx_thread_smp_protection // Build address of protection structure
MOV w3, #0xFFFFFFFF // Build invalid value
STR w3, [x2, #4] // Mark the protected core as invalid
diff --git a/ports_smp/cortex_a65ae_smp/gnu/example_build/tx/.cproject b/ports_smp/cortex_a65ae_smp/gnu/example_build/tx/.cproject
index ec20edd2..3547a1d3 100644
--- a/ports_smp/cortex_a65ae_smp/gnu/example_build/tx/.cproject
+++ b/ports_smp/cortex_a65ae_smp/gnu/example_build/tx/.cproject
@@ -129,6 +129,12 @@
+
+
+
+
+
+
diff --git a/ports_smp/cortex_a65ae_smp/gnu/src/tx_thread_context_restore.S b/ports_smp/cortex_a65ae_smp/gnu/src/tx_thread_context_restore.S
index 4df471ac..21deb75d 100644
--- a/ports_smp/cortex_a65ae_smp/gnu/src/tx_thread_context_restore.S
+++ b/ports_smp/cortex_a65ae_smp/gnu/src/tx_thread_context_restore.S
@@ -21,9 +21,6 @@
/**************************************************************************/
-/* Include macros for modifying the wait list. */
-#include "tx_thread_smp_protection_wait_list_macros.h"
-
.text
.align 3
/**************************************************************************/
@@ -31,7 +28,7 @@
/* FUNCTION RELEASE */
/* */
/* _tx_thread_context_restore ARMv8-A-SMP */
-/* 6.1.10 */
+/* 6.1.11 */
/* AUTHOR */
/* */
/* William E. Lamie, Microsoft Corporation */
@@ -63,10 +60,13 @@
/* */
/* DATE NAME DESCRIPTION */
/* */
-/* 09-30-2020 William E. Lamie Initial Version 6.1 */
-/* 01-31-2022 Andres Mlinar Updated comments, */
-/* added ARMv8.2-A support, */
+/* 09-30-2020 William E. Lamie Initial Version 6.1 */
+/* 01-31-2022 Andres Mlinar Updated comments, */
+/* added ARMv8.2-A support, */
/* resulting in version 6.1.10 */
+/* 04-25-2022 William E. Lamie Modified comments, removed */
+/* FIFO queueing, */
+/* resulting in version 6.1.11 */
/* */
/**************************************************************************/
// VOID _tx_thread_context_restore(VOID)
@@ -216,74 +216,6 @@ __tx_thread_no_preempt_restore:
// {
__tx_thread_preempt_restore:
- /* Was the thread being preempted waiting for the lock? */
- // if (_tx_thread_smp_protect_wait_counts[this_core] != 0)
- // {
-
- LDR x2, =_tx_thread_smp_protect_wait_counts // Load waiting count list
- LDR w3, [x2, x8, LSL #2] // Load waiting value for this core
- CMP w3, #0
- BEQ _nobody_waiting_for_lock // Is the core waiting for the lock?
-
- /* Do we not have the lock? This means the ISR never got the inter-core lock. */
- // if (_tx_thread_smp_protection.tx_thread_smp_protect_owned != this_core)
- // {
-
- LDR x2, =_tx_thread_smp_protection // Load address of protection structure
- LDR w3, [x2, #4] // Pickup the owning core
- CMP w8, w3 // Compare our core to the owning core
- BEQ _this_core_has_lock // Do we have the lock?
-
- /* We don't have the lock. This core should be in the list. Remove it. */
- // _tx_thread_smp_protect_wait_list_remove(this_core);
-
- _tx_thread_smp_protect_wait_list_remove // Call macro to remove core from the list
- B _nobody_waiting_for_lock // Leave
-
- // }
- // else
- // {
- /* We have the lock. This means the ISR got the inter-core lock, but
- never released it because it saw that there was someone waiting.
- Note this core is not in the list. */
-
-_this_core_has_lock:
-
- /* We're no longer waiting. Note that this should be zero since this happens during thread preemption. */
- // _tx_thread_smp_protect_wait_counts[core]--;
-
- LDR x2, =_tx_thread_smp_protect_wait_counts // Load waiting count list
- LDR w3, [x2, x8, LSL #2] // Load waiting value for this core
- SUB w3, w3, #1 // Decrement waiting value. Should be zero now
- STR w3, [x2, x8, LSL #2] // Store new waiting value
-
- /* Now release the inter-core lock. */
-
- /* Set protected core as invalid. */
- // _tx_thread_smp_protection.tx_thread_smp_protect_core = 0xFFFFFFFF;
-
- LDR x2, =_tx_thread_smp_protection // Load address of protection structure
- MOV w3, #0xFFFFFFFF // Build invalid value
- STR w3, [x2, #4] // Mark the protected core as invalid
- DMB ISH // Ensure that accesses to shared resource have completed
-
- /* Release protection. */
- // _tx_thread_smp_protection.tx_thread_smp_protect_in_force = 0;
-
- MOV w3, #0 // Build release protection value
- STR w3, [x2, #0] // Release the protection
- DSB ISH // To ensure update of the protection occurs before other CPUs awake
-
- /* Wake up waiting processors. Note interrupts are already enabled. */
-
-#ifdef TX_ENABLE_WFE
- SEV // Send event to other CPUs
-#endif
-
- // }
- // }
-
-_nobody_waiting_for_lock:
LDR x4, [x0, #8] // Switch to thread stack pointer
MOV sp, x4 //
diff --git a/ports_smp/cortex_a65ae_smp/gnu/src/tx_thread_smp_protect.S b/ports_smp/cortex_a65ae_smp/gnu/src/tx_thread_smp_protect.S
index 9cde3e08..b65af329 100644
--- a/ports_smp/cortex_a65ae_smp/gnu/src/tx_thread_smp_protect.S
+++ b/ports_smp/cortex_a65ae_smp/gnu/src/tx_thread_smp_protect.S
@@ -32,7 +32,7 @@
/* FUNCTION RELEASE */
/* */
/* _tx_thread_smp_protect Cortex-A35-SMP/AC6 */
-/* 6.1.10 */
+/* 6.1.11 */
/* AUTHOR */
/* */
/* William E. Lamie, Microsoft Corporation */
@@ -63,11 +63,14 @@
/* */
/* DATE NAME DESCRIPTION */
/* */
-/* 09-30-2020 William E. Lamie Initial Version 6.1 */
-/* 01-31-2022 Andres Mlinar Updated comments, */
-/* added ARMv8.2-A support, */
-/* improved SMP code, */
+/* 09-30-2020 William E. Lamie Initial Version 6.1 */
+/* 01-31-2022 Andres Mlinar Updated comments, */
+/* added ARMv8.2-A support, */
+/* improved SMP code, */
/* resulting in version 6.1.10 */
+/* 04-25-2022 William E. Lamie Modified comments, removed */
+/* FIFO queueing, */
+/* resulting in version 6.1.11 */
/* */
/**************************************************************************/
.global _tx_thread_smp_protect
@@ -81,293 +84,49 @@ _tx_thread_smp_protect:
/* Pickup the CPU ID. */
- MRS x1, MPIDR_EL1 // Pickup the core ID
+ MRS x2, MPIDR_EL1 // Pickup the core ID
#ifdef TX_ARMV8_2
#if TX_THREAD_SMP_CLUSTERS > 1
- UBFX x7, x1, #16, #8 // Isolate cluster ID
+ UBFX x7, x2, #16, #8 // Isolate cluster ID
#endif
- UBFX x1, x1, #8, #8 // Isolate core ID
+ UBFX x2, x2, #8, #8 // Isolate core ID
#else
#if TX_THREAD_SMP_CLUSTERS > 1
- UBFX x7, x1, #8, #8 // Isolate cluster ID
+ UBFX x7, x2, #8, #8 // Isolate cluster ID
#endif
- UBFX x1, x1, #0, #8 // Isolate core ID
+ UBFX x2, x2, #0, #8 // Isolate core ID
#endif
#if TX_THREAD_SMP_CLUSTERS > 1
- ADDS x1, x1, x7, LSL #2 // Calculate CPU ID
+ ADDS x2, x2, x7, LSL #2 // Calculate CPU ID
#endif
- /* Do we already have protection? */
- // if (this_core == _tx_thread_smp_protection.tx_thread_smp_protect_core)
- // {
-
- LDR x2, =_tx_thread_smp_protection // Build address to protection structure
- LDR w3, [x2, #4] // Pickup the owning core
- CMP w1, w3 // Is it not this core?
- BNE _protection_not_owned // No, the protection is not already owned
-
- /* We already have protection. */
-
- /* Increment the protection count. */
- // _tx_thread_smp_protection.tx_thread_smp_protect_count++;
-
- LDR w3, [x2, #8] // Pickup ownership count
- ADD w3, w3, #1 // Increment ownership count
- STR w3, [x2, #8] // Store ownership count
- DMB ISH
-
- B _return
-
-_protection_not_owned:
-
- /* Is the lock available? */
- // if (_tx_thread_smp_protection.tx_thread_smp_protect_in_force == 0)
- // {
-
- LDAXR w3, [x2, #0] // Pickup the protection flag
- CMP w3, #0
- BNE _start_waiting // No, protection not available
-
- /* Is the list empty? */
- // if (_tx_thread_smp_protect_wait_list_head == _tx_thread_smp_protect_wait_list_tail)
- // {
-
- LDR x3, =_tx_thread_smp_protect_wait_list_head
- LDR w3, [x3]
- LDR x4, =_tx_thread_smp_protect_wait_list_tail
- LDR w4, [x4]
- CMP w3, w4
- BNE _list_not_empty
-
- /* Try to get the lock. */
- // if (write_exclusive(&_tx_thread_smp_protection.tx_thread_smp_protect_in_force, 1) == SUCCESS)
- // {
-
- MOV w3, #1 // Build lock value
- STXR w4, w3, [x2, #0] // Attempt to get the protection
- CMP w4, #0
- BNE _start_waiting // Did it fail?
-
- /* We got the lock! */
- // _tx_thread_smp_protect_lock_got();
-
- DMB ISH // Ensure write to protection finishes
- _tx_thread_smp_protect_lock_got // Call the lock got function
-
- B _return
-
-_list_not_empty:
-
- /* Are we at the front of the list? */
- // if (this_core == _tx_thread_smp_protect_wait_list[_tx_thread_smp_protect_wait_list_head])
- // {
-
- LDR x3, =_tx_thread_smp_protect_wait_list_head // Get the address of the head
- LDR w3, [x3] // Get the value of the head
- LDR x4, =_tx_thread_smp_protect_wait_list // Get the address of the list
- LDR w4, [x4, x3, LSL #2] // Get the value at the head index
-
- CMP w1, w4
- BNE _start_waiting
-
- /* Is the lock still available? */
- // if (_tx_thread_smp_protection.tx_thread_smp_protect_in_force == 0)
- // {
-
- LDAXR w3, [x2, #0] // Pickup the protection flag
- CMP w3, #0
- BNE _start_waiting // No, protection not available
-
- /* Get the lock. */
- // _tx_thread_smp_protection.tx_thread_smp_protect_in_force = 1;
-
- MOV w3, #1 // Build lock value
- STXR w4, w3, [x2, #0] // Attempt to get the protection
- CMP w4, #0
- BNE _start_waiting // Did it fail?
- DMB ISH //
-
- /* Got the lock. */
- // _tx_thread_smp_protect_lock_got();
-
- _tx_thread_smp_protect_lock_got
-
- /* Remove this core from the wait list. */
- // _tx_thread_smp_protect_remove_from_front_of_list();
-
- _tx_thread_smp_protect_remove_from_front_of_list
-
- B _return
-
-_start_waiting:
-
- /* For one reason or another, we didn't get the lock. */
-
- /* Increment wait count. */
- // _tx_thread_smp_protect_wait_counts[this_core]++;
-
- LDR x3, =_tx_thread_smp_protect_wait_counts // Load wait list counts
- LDR w4, [x3, x1, LSL #2] // Load waiting value for this core
- ADD w4, w4, #1 // Increment wait value
- STR w4, [x3, x1, LSL #2] // Store new wait value
-
- /* Have we not added ourselves to the list yet? */
- // if (_tx_thread_smp_protect_wait_counts[this_core] == 1)
- // {
-
- CMP w4, #1
- BNE _already_in_list0 // Is this core already waiting?
-
- /* Add ourselves to the list. */
- // _tx_thread_smp_protect_wait_list_add(this_core);
-
- _tx_thread_smp_protect_wait_list_add // Call macro to add ourselves to the list
-
- // }
-
-_already_in_list0:
-
- /* Restore interrupts. */
+ LDR x1, =_tx_thread_smp_protection // Build address to protection structure
+ LDR w3, [x1, #4] // Pickup the owning core
+ CMP w3, w2 // Is it this core?
+ BEQ _owned // Yes, the protection is already owned
+ LDAXR w4, [x1, #0] // Pickup the protection flag
+ CBZ w4, _get_protection // Yes, get the protection
MSR DAIF, x0 // Restore interrupts
ISB //
#ifdef TX_ENABLE_WFE
WFE // Go into standby
#endif
+ B _tx_thread_smp_protect // On waking, restart the protection attempt
- /* We do this until we have the lock. */
- // while (1)
- // {
-
-_try_to_get_lock:
-
- /* Disable interrupts so we don't get preempted. */
-
- MRS x0, DAIF // Pickup current interrupt posture
- MSR DAIFSet, 0x3 // Lockout interrupts
-
- /* Pickup the CPU ID. */
-
- MRS x1, MPIDR_EL1 // Pickup the core ID
-#ifdef TX_ARMV8_2
-#if TX_THREAD_SMP_CLUSTERS > 1
- UBFX x7, x1, #16, #8 // Isolate cluster ID
-#endif
- UBFX x1, x1, #8, #8 // Isolate core ID
-#else
-#if TX_THREAD_SMP_CLUSTERS > 1
- UBFX x7, x1, #8, #8 // Isolate cluster ID
-#endif
- UBFX x1, x1, #0, #8 // Isolate core ID
-#endif
-#if TX_THREAD_SMP_CLUSTERS > 1
- ADDS x1, x1, x7, LSL #2 // Calculate CPU ID
-#endif
-
- /* Do we already have protection? */
- // if (this_core == _tx_thread_smp_protection.tx_thread_smp_protect_core)
- // {
-
- LDR w3, [x2, #4] // Pickup the owning core
- CMP w3, w1 // Is it this core?
- BEQ _got_lock_after_waiting // Yes, the protection is already owned. This means
- // an ISR preempted us and got protection
-
- // }
-
- /* Are we at the front of the list? */
- // if (this_core == _tx_thread_smp_protect_wait_list[_tx_thread_smp_protect_wait_list_head])
- // {
-
- LDR x3, =_tx_thread_smp_protect_wait_list_head // Get the address of the head
- LDR w3, [x3] // Get the value of the head
- LDR x4, =_tx_thread_smp_protect_wait_list // Get the address of the list
- LDR w4, [x4, x3, LSL #2] // Get the value at the head index
-
- CMP w1, w4
- BNE _did_not_get_lock
-
- /* Is the lock still available? */
- // if (_tx_thread_smp_protection.tx_thread_smp_protect_in_force == 0)
- // {
-
- LDAXR w3, [x2, #0] // Pickup the protection flag
- CMP w3, #0
- BNE _did_not_get_lock // No, protection not available
-
- /* Get the lock. */
- // _tx_thread_smp_protection.tx_thread_smp_protect_in_force = 1;
-
- MOV w3, #1 // Build lock value
- STXR w4, w3, [x2, #0] // Attempt to get the protection
- CMP w4, #0
- BNE _did_not_get_lock // Did it fail?
- DMB ISH //
-
- /* Got the lock. */
- // _tx_thread_smp_protect_lock_got();
-
- _tx_thread_smp_protect_lock_got
-
- /* Remove this core from the wait list. */
- // _tx_thread_smp_protect_remove_from_front_of_list();
-
- _tx_thread_smp_protect_remove_from_front_of_list
-
- B _got_lock_after_waiting
-
-_did_not_get_lock:
-
- /* For one reason or another, we didn't get the lock. */
-
- /* Were we removed from the list? This can happen if we're a thread
- and we got preempted. */
- // if (_tx_thread_smp_protect_wait_counts[this_core] == 0)
- // {
-
- LDR x3, =_tx_thread_smp_protect_wait_counts // Load wait list counts
- LDR w4, [x3, x1, LSL #2] // Load waiting value for this core
- CMP w4, #0
- BNE _already_in_list1 // Is this core already in the list?
-
- /* Add ourselves to the list. */
- // _tx_thread_smp_protect_wait_list_add(this_core);
-
- _tx_thread_smp_protect_wait_list_add // Call macro to add ourselves to the list
-
- /* Our waiting count was also reset when we were preempted. Increment it again. */
- // _tx_thread_smp_protect_wait_counts[this_core]++;
-
- LDR x3, =_tx_thread_smp_protect_wait_counts // Load wait list counts
- LDR w4, [x3, x1, LSL #2] // Load waiting value for this core
- ADD w4, w4, #1 // Increment wait value
- STR w4, [x3, x1, LSL #2] // Store new wait value value
-
- // }
-
-_already_in_list1:
-
- /* Restore interrupts and try again. */
-
+_get_protection:
+ MOV x4, #1 // Build lock value
+ STXR w5, w4, [x1] // Attempt to get the protection
+ CBZ w5, _got_protection // Did it succeed? w5 = 0 means success!
MSR DAIF, x0 // Restore interrupts
- ISB //
-#ifdef TX_ENABLE_WFE
- WFE // Go into standby
-#endif
- B _try_to_get_lock // On waking, restart the protection attempt
-
-_got_lock_after_waiting:
-
- /* We're no longer waiting. */
- // _tx_thread_smp_protect_wait_counts[this_core]--;
-
- LDR x3, =_tx_thread_smp_protect_wait_counts // Load waiting list
- LDR w4, [x3, x1, LSL #2] // Load current wait value
- SUB w4, w4, #1 // Decrement wait value
- STR w4, [x3, x1, LSL #2] // Store new wait value value
-
- /* Restore registers and return. */
-
-_return:
-
+ B _tx_thread_smp_protect // Restart the protection attempt
+
+_got_protection:
+ DMB ISH //
+ STR w2, [x1, #4] // Save owning core
+_owned:
+ LDR w5, [x1, #8] // Pickup ownership count
+ ADD w5, w5, #1 // Increment ownership count
+ STR w5, [x1, #8] // Store ownership count
+ DMB ISH //
RET
diff --git a/ports_smp/cortex_a65ae_smp/gnu/src/tx_thread_smp_unprotect.S b/ports_smp/cortex_a65ae_smp/gnu/src/tx_thread_smp_unprotect.S
index a783cde6..f9d8692a 100644
--- a/ports_smp/cortex_a65ae_smp/gnu/src/tx_thread_smp_unprotect.S
+++ b/ports_smp/cortex_a65ae_smp/gnu/src/tx_thread_smp_unprotect.S
@@ -60,10 +60,13 @@
/* */
/* DATE NAME DESCRIPTION */
/* */
-/* 09-30-2020 William E. Lamie Initial Version 6.1 */
-/* 01-31-2022 Andres Mlinar Updated comments, */
-/* added ARMv8.2-A support, */
+/* 09-30-2020 William E. Lamie Initial Version 6.1 */
+/* 01-31-2022 Andres Mlinar Updated comments, */
+/* added ARMv8.2-A support, */
/* resulting in version 6.1.10 */
+/* 04-25-2022 William E. Lamie Modified comments, removed */
+/* FIFO queueing, */
+/* resulting in version 6.1.11 */
/* */
/**************************************************************************/
.global _tx_thread_smp_unprotect
@@ -105,11 +108,6 @@ _tx_thread_smp_unprotect:
CMP w3, #0 // Is the preempt disable flag set?
BNE _still_protected // Yes, skip the protection release
- LDR x2,=_tx_thread_smp_protect_wait_counts // Build build address of wait counts
- LDR w3, [x2, x1, LSL #2] // Pickup wait list value
- CMP w3, #0 // Are any entities on this core waiting?
- BNE _still_protected // Yes, skip the protection release
-
LDR x2,=_tx_thread_smp_protection // Build address of protection structure
MOV w3, #0xFFFFFFFF // Build invalid value
STR w3, [x2, #4] // Mark the protected core as invalid
diff --git a/ports_smp/cortex_a72_smp/ac6/example_build/sample_threadx/.cproject b/ports_smp/cortex_a72_smp/ac6/example_build/sample_threadx/.cproject
index 188d23b9..acbfe8ce 100644
--- a/ports_smp/cortex_a72_smp/ac6/example_build/sample_threadx/.cproject
+++ b/ports_smp/cortex_a72_smp/ac6/example_build/sample_threadx/.cproject
@@ -23,7 +23,7 @@
-
+
@@ -47,7 +47,7 @@
-
+
@@ -63,7 +63,7 @@
-
+
diff --git a/ports_smp/cortex_a72_smp/ac6/example_build/tx/.cproject b/ports_smp/cortex_a72_smp/ac6/example_build/tx/.cproject
index e944fcf6..aa87f098 100644
--- a/ports_smp/cortex_a72_smp/ac6/example_build/tx/.cproject
+++ b/ports_smp/cortex_a72_smp/ac6/example_build/tx/.cproject
@@ -49,7 +49,7 @@
-
+
@@ -129,11 +129,7 @@
-
-
-
-
-
+
diff --git a/ports_smp/cortex_a72_smp/ac6/src/tx_thread_context_restore.S b/ports_smp/cortex_a72_smp/ac6/src/tx_thread_context_restore.S
index 4df471ac..21deb75d 100644
--- a/ports_smp/cortex_a72_smp/ac6/src/tx_thread_context_restore.S
+++ b/ports_smp/cortex_a72_smp/ac6/src/tx_thread_context_restore.S
@@ -21,9 +21,6 @@
/**************************************************************************/
-/* Include macros for modifying the wait list. */
-#include "tx_thread_smp_protection_wait_list_macros.h"
-
.text
.align 3
/**************************************************************************/
@@ -31,7 +28,7 @@
/* FUNCTION RELEASE */
/* */
/* _tx_thread_context_restore ARMv8-A-SMP */
-/* 6.1.10 */
+/* 6.1.11 */
/* AUTHOR */
/* */
/* William E. Lamie, Microsoft Corporation */
@@ -63,10 +60,13 @@
/* */
/* DATE NAME DESCRIPTION */
/* */
-/* 09-30-2020 William E. Lamie Initial Version 6.1 */
-/* 01-31-2022 Andres Mlinar Updated comments, */
-/* added ARMv8.2-A support, */
+/* 09-30-2020 William E. Lamie Initial Version 6.1 */
+/* 01-31-2022 Andres Mlinar Updated comments, */
+/* added ARMv8.2-A support, */
/* resulting in version 6.1.10 */
+/* 04-25-2022 William E. Lamie Modified comments, removed */
+/* FIFO queueing, */
+/* resulting in version 6.1.11 */
/* */
/**************************************************************************/
// VOID _tx_thread_context_restore(VOID)
@@ -216,74 +216,6 @@ __tx_thread_no_preempt_restore:
// {
__tx_thread_preempt_restore:
- /* Was the thread being preempted waiting for the lock? */
- // if (_tx_thread_smp_protect_wait_counts[this_core] != 0)
- // {
-
- LDR x2, =_tx_thread_smp_protect_wait_counts // Load waiting count list
- LDR w3, [x2, x8, LSL #2] // Load waiting value for this core
- CMP w3, #0
- BEQ _nobody_waiting_for_lock // Is the core waiting for the lock?
-
- /* Do we not have the lock? This means the ISR never got the inter-core lock. */
- // if (_tx_thread_smp_protection.tx_thread_smp_protect_owned != this_core)
- // {
-
- LDR x2, =_tx_thread_smp_protection // Load address of protection structure
- LDR w3, [x2, #4] // Pickup the owning core
- CMP w8, w3 // Compare our core to the owning core
- BEQ _this_core_has_lock // Do we have the lock?
-
- /* We don't have the lock. This core should be in the list. Remove it. */
- // _tx_thread_smp_protect_wait_list_remove(this_core);
-
- _tx_thread_smp_protect_wait_list_remove // Call macro to remove core from the list
- B _nobody_waiting_for_lock // Leave
-
- // }
- // else
- // {
- /* We have the lock. This means the ISR got the inter-core lock, but
- never released it because it saw that there was someone waiting.
- Note this core is not in the list. */
-
-_this_core_has_lock:
-
- /* We're no longer waiting. Note that this should be zero since this happens during thread preemption. */
- // _tx_thread_smp_protect_wait_counts[core]--;
-
- LDR x2, =_tx_thread_smp_protect_wait_counts // Load waiting count list
- LDR w3, [x2, x8, LSL #2] // Load waiting value for this core
- SUB w3, w3, #1 // Decrement waiting value. Should be zero now
- STR w3, [x2, x8, LSL #2] // Store new waiting value
-
- /* Now release the inter-core lock. */
-
- /* Set protected core as invalid. */
- // _tx_thread_smp_protection.tx_thread_smp_protect_core = 0xFFFFFFFF;
-
- LDR x2, =_tx_thread_smp_protection // Load address of protection structure
- MOV w3, #0xFFFFFFFF // Build invalid value
- STR w3, [x2, #4] // Mark the protected core as invalid
- DMB ISH // Ensure that accesses to shared resource have completed
-
- /* Release protection. */
- // _tx_thread_smp_protection.tx_thread_smp_protect_in_force = 0;
-
- MOV w3, #0 // Build release protection value
- STR w3, [x2, #0] // Release the protection
- DSB ISH // To ensure update of the protection occurs before other CPUs awake
-
- /* Wake up waiting processors. Note interrupts are already enabled. */
-
-#ifdef TX_ENABLE_WFE
- SEV // Send event to other CPUs
-#endif
-
- // }
- // }
-
-_nobody_waiting_for_lock:
LDR x4, [x0, #8] // Switch to thread stack pointer
MOV sp, x4 //
diff --git a/ports_smp/cortex_a72_smp/ac6/src/tx_thread_smp_protect.S b/ports_smp/cortex_a72_smp/ac6/src/tx_thread_smp_protect.S
index 9cde3e08..b65af329 100644
--- a/ports_smp/cortex_a72_smp/ac6/src/tx_thread_smp_protect.S
+++ b/ports_smp/cortex_a72_smp/ac6/src/tx_thread_smp_protect.S
@@ -32,7 +32,7 @@
/* FUNCTION RELEASE */
/* */
/* _tx_thread_smp_protect Cortex-A35-SMP/AC6 */
-/* 6.1.10 */
+/* 6.1.11 */
/* AUTHOR */
/* */
/* William E. Lamie, Microsoft Corporation */
@@ -63,11 +63,14 @@
/* */
/* DATE NAME DESCRIPTION */
/* */
-/* 09-30-2020 William E. Lamie Initial Version 6.1 */
-/* 01-31-2022 Andres Mlinar Updated comments, */
-/* added ARMv8.2-A support, */
-/* improved SMP code, */
+/* 09-30-2020 William E. Lamie Initial Version 6.1 */
+/* 01-31-2022 Andres Mlinar Updated comments, */
+/* added ARMv8.2-A support, */
+/* improved SMP code, */
/* resulting in version 6.1.10 */
+/* 04-25-2022 William E. Lamie Modified comments, removed */
+/* FIFO queueing, */
+/* resulting in version 6.1.11 */
/* */
/**************************************************************************/
.global _tx_thread_smp_protect
@@ -81,293 +84,49 @@ _tx_thread_smp_protect:
/* Pickup the CPU ID. */
- MRS x1, MPIDR_EL1 // Pickup the core ID
+ MRS x2, MPIDR_EL1 // Pickup the core ID
#ifdef TX_ARMV8_2
#if TX_THREAD_SMP_CLUSTERS > 1
- UBFX x7, x1, #16, #8 // Isolate cluster ID
+ UBFX x7, x2, #16, #8 // Isolate cluster ID
#endif
- UBFX x1, x1, #8, #8 // Isolate core ID
+ UBFX x2, x2, #8, #8 // Isolate core ID
#else
#if TX_THREAD_SMP_CLUSTERS > 1
- UBFX x7, x1, #8, #8 // Isolate cluster ID
+ UBFX x7, x2, #8, #8 // Isolate cluster ID
#endif
- UBFX x1, x1, #0, #8 // Isolate core ID
+ UBFX x2, x2, #0, #8 // Isolate core ID
#endif
#if TX_THREAD_SMP_CLUSTERS > 1
- ADDS x1, x1, x7, LSL #2 // Calculate CPU ID
+ ADDS x2, x2, x7, LSL #2 // Calculate CPU ID
#endif
- /* Do we already have protection? */
- // if (this_core == _tx_thread_smp_protection.tx_thread_smp_protect_core)
- // {
-
- LDR x2, =_tx_thread_smp_protection // Build address to protection structure
- LDR w3, [x2, #4] // Pickup the owning core
- CMP w1, w3 // Is it not this core?
- BNE _protection_not_owned // No, the protection is not already owned
-
- /* We already have protection. */
-
- /* Increment the protection count. */
- // _tx_thread_smp_protection.tx_thread_smp_protect_count++;
-
- LDR w3, [x2, #8] // Pickup ownership count
- ADD w3, w3, #1 // Increment ownership count
- STR w3, [x2, #8] // Store ownership count
- DMB ISH
-
- B _return
-
-_protection_not_owned:
-
- /* Is the lock available? */
- // if (_tx_thread_smp_protection.tx_thread_smp_protect_in_force == 0)
- // {
-
- LDAXR w3, [x2, #0] // Pickup the protection flag
- CMP w3, #0
- BNE _start_waiting // No, protection not available
-
- /* Is the list empty? */
- // if (_tx_thread_smp_protect_wait_list_head == _tx_thread_smp_protect_wait_list_tail)
- // {
-
- LDR x3, =_tx_thread_smp_protect_wait_list_head
- LDR w3, [x3]
- LDR x4, =_tx_thread_smp_protect_wait_list_tail
- LDR w4, [x4]
- CMP w3, w4
- BNE _list_not_empty
-
- /* Try to get the lock. */
- // if (write_exclusive(&_tx_thread_smp_protection.tx_thread_smp_protect_in_force, 1) == SUCCESS)
- // {
-
- MOV w3, #1 // Build lock value
- STXR w4, w3, [x2, #0] // Attempt to get the protection
- CMP w4, #0
- BNE _start_waiting // Did it fail?
-
- /* We got the lock! */
- // _tx_thread_smp_protect_lock_got();
-
- DMB ISH // Ensure write to protection finishes
- _tx_thread_smp_protect_lock_got // Call the lock got function
-
- B _return
-
-_list_not_empty:
-
- /* Are we at the front of the list? */
- // if (this_core == _tx_thread_smp_protect_wait_list[_tx_thread_smp_protect_wait_list_head])
- // {
-
- LDR x3, =_tx_thread_smp_protect_wait_list_head // Get the address of the head
- LDR w3, [x3] // Get the value of the head
- LDR x4, =_tx_thread_smp_protect_wait_list // Get the address of the list
- LDR w4, [x4, x3, LSL #2] // Get the value at the head index
-
- CMP w1, w4
- BNE _start_waiting
-
- /* Is the lock still available? */
- // if (_tx_thread_smp_protection.tx_thread_smp_protect_in_force == 0)
- // {
-
- LDAXR w3, [x2, #0] // Pickup the protection flag
- CMP w3, #0
- BNE _start_waiting // No, protection not available
-
- /* Get the lock. */
- // _tx_thread_smp_protection.tx_thread_smp_protect_in_force = 1;
-
- MOV w3, #1 // Build lock value
- STXR w4, w3, [x2, #0] // Attempt to get the protection
- CMP w4, #0
- BNE _start_waiting // Did it fail?
- DMB ISH //
-
- /* Got the lock. */
- // _tx_thread_smp_protect_lock_got();
-
- _tx_thread_smp_protect_lock_got
-
- /* Remove this core from the wait list. */
- // _tx_thread_smp_protect_remove_from_front_of_list();
-
- _tx_thread_smp_protect_remove_from_front_of_list
-
- B _return
-
-_start_waiting:
-
- /* For one reason or another, we didn't get the lock. */
-
- /* Increment wait count. */
- // _tx_thread_smp_protect_wait_counts[this_core]++;
-
- LDR x3, =_tx_thread_smp_protect_wait_counts // Load wait list counts
- LDR w4, [x3, x1, LSL #2] // Load waiting value for this core
- ADD w4, w4, #1 // Increment wait value
- STR w4, [x3, x1, LSL #2] // Store new wait value
-
- /* Have we not added ourselves to the list yet? */
- // if (_tx_thread_smp_protect_wait_counts[this_core] == 1)
- // {
-
- CMP w4, #1
- BNE _already_in_list0 // Is this core already waiting?
-
- /* Add ourselves to the list. */
- // _tx_thread_smp_protect_wait_list_add(this_core);
-
- _tx_thread_smp_protect_wait_list_add // Call macro to add ourselves to the list
-
- // }
-
-_already_in_list0:
-
- /* Restore interrupts. */
+ LDR x1, =_tx_thread_smp_protection // Build address to protection structure
+ LDR w3, [x1, #4] // Pickup the owning core
+ CMP w3, w2 // Is it this core?
+ BEQ _owned // Yes, the protection is already owned
+ LDAXR w4, [x1, #0] // Pickup the protection flag
+ CBZ w4, _get_protection // Yes, get the protection
MSR DAIF, x0 // Restore interrupts
ISB //
#ifdef TX_ENABLE_WFE
WFE // Go into standby
#endif
+ B _tx_thread_smp_protect // On waking, restart the protection attempt
- /* We do this until we have the lock. */
- // while (1)
- // {
-
-_try_to_get_lock:
-
- /* Disable interrupts so we don't get preempted. */
-
- MRS x0, DAIF // Pickup current interrupt posture
- MSR DAIFSet, 0x3 // Lockout interrupts
-
- /* Pickup the CPU ID. */
-
- MRS x1, MPIDR_EL1 // Pickup the core ID
-#ifdef TX_ARMV8_2
-#if TX_THREAD_SMP_CLUSTERS > 1
- UBFX x7, x1, #16, #8 // Isolate cluster ID
-#endif
- UBFX x1, x1, #8, #8 // Isolate core ID
-#else
-#if TX_THREAD_SMP_CLUSTERS > 1
- UBFX x7, x1, #8, #8 // Isolate cluster ID
-#endif
- UBFX x1, x1, #0, #8 // Isolate core ID
-#endif
-#if TX_THREAD_SMP_CLUSTERS > 1
- ADDS x1, x1, x7, LSL #2 // Calculate CPU ID
-#endif
-
- /* Do we already have protection? */
- // if (this_core == _tx_thread_smp_protection.tx_thread_smp_protect_core)
- // {
-
- LDR w3, [x2, #4] // Pickup the owning core
- CMP w3, w1 // Is it this core?
- BEQ _got_lock_after_waiting // Yes, the protection is already owned. This means
- // an ISR preempted us and got protection
-
- // }
-
- /* Are we at the front of the list? */
- // if (this_core == _tx_thread_smp_protect_wait_list[_tx_thread_smp_protect_wait_list_head])
- // {
-
- LDR x3, =_tx_thread_smp_protect_wait_list_head // Get the address of the head
- LDR w3, [x3] // Get the value of the head
- LDR x4, =_tx_thread_smp_protect_wait_list // Get the address of the list
- LDR w4, [x4, x3, LSL #2] // Get the value at the head index
-
- CMP w1, w4
- BNE _did_not_get_lock
-
- /* Is the lock still available? */
- // if (_tx_thread_smp_protection.tx_thread_smp_protect_in_force == 0)
- // {
-
- LDAXR w3, [x2, #0] // Pickup the protection flag
- CMP w3, #0
- BNE _did_not_get_lock // No, protection not available
-
- /* Get the lock. */
- // _tx_thread_smp_protection.tx_thread_smp_protect_in_force = 1;
-
- MOV w3, #1 // Build lock value
- STXR w4, w3, [x2, #0] // Attempt to get the protection
- CMP w4, #0
- BNE _did_not_get_lock // Did it fail?
- DMB ISH //
-
- /* Got the lock. */
- // _tx_thread_smp_protect_lock_got();
-
- _tx_thread_smp_protect_lock_got
-
- /* Remove this core from the wait list. */
- // _tx_thread_smp_protect_remove_from_front_of_list();
-
- _tx_thread_smp_protect_remove_from_front_of_list
-
- B _got_lock_after_waiting
-
-_did_not_get_lock:
-
- /* For one reason or another, we didn't get the lock. */
-
- /* Were we removed from the list? This can happen if we're a thread
- and we got preempted. */
- // if (_tx_thread_smp_protect_wait_counts[this_core] == 0)
- // {
-
- LDR x3, =_tx_thread_smp_protect_wait_counts // Load wait list counts
- LDR w4, [x3, x1, LSL #2] // Load waiting value for this core
- CMP w4, #0
- BNE _already_in_list1 // Is this core already in the list?
-
- /* Add ourselves to the list. */
- // _tx_thread_smp_protect_wait_list_add(this_core);
-
- _tx_thread_smp_protect_wait_list_add // Call macro to add ourselves to the list
-
- /* Our waiting count was also reset when we were preempted. Increment it again. */
- // _tx_thread_smp_protect_wait_counts[this_core]++;
-
- LDR x3, =_tx_thread_smp_protect_wait_counts // Load wait list counts
- LDR w4, [x3, x1, LSL #2] // Load waiting value for this core
- ADD w4, w4, #1 // Increment wait value
- STR w4, [x3, x1, LSL #2] // Store new wait value value
-
- // }
-
-_already_in_list1:
-
- /* Restore interrupts and try again. */
-
+_get_protection:
+ MOV x4, #1 // Build lock value
+ STXR w5, w4, [x1] // Attempt to get the protection
+ CBZ w5, _got_protection // Did it succeed? w5 = 0 means success!
MSR DAIF, x0 // Restore interrupts
- ISB //
-#ifdef TX_ENABLE_WFE
- WFE // Go into standby
-#endif
- B _try_to_get_lock // On waking, restart the protection attempt
-
-_got_lock_after_waiting:
-
- /* We're no longer waiting. */
- // _tx_thread_smp_protect_wait_counts[this_core]--;
-
- LDR x3, =_tx_thread_smp_protect_wait_counts // Load waiting list
- LDR w4, [x3, x1, LSL #2] // Load current wait value
- SUB w4, w4, #1 // Decrement wait value
- STR w4, [x3, x1, LSL #2] // Store new wait value value
-
- /* Restore registers and return. */
-
-_return:
-
+ B _tx_thread_smp_protect // Restart the protection attempt
+
+_got_protection:
+ DMB ISH //
+ STR w2, [x1, #4] // Save owning core
+_owned:
+ LDR w5, [x1, #8] // Pickup ownership count
+ ADD w5, w5, #1 // Increment ownership count
+ STR w5, [x1, #8] // Store ownership count
+ DMB ISH //
RET
diff --git a/ports_smp/cortex_a72_smp/ac6/src/tx_thread_smp_unprotect.S b/ports_smp/cortex_a72_smp/ac6/src/tx_thread_smp_unprotect.S
index a783cde6..f9d8692a 100644
--- a/ports_smp/cortex_a72_smp/ac6/src/tx_thread_smp_unprotect.S
+++ b/ports_smp/cortex_a72_smp/ac6/src/tx_thread_smp_unprotect.S
@@ -60,10 +60,13 @@
/* */
/* DATE NAME DESCRIPTION */
/* */
-/* 09-30-2020 William E. Lamie Initial Version 6.1 */
-/* 01-31-2022 Andres Mlinar Updated comments, */
-/* added ARMv8.2-A support, */
+/* 09-30-2020 William E. Lamie Initial Version 6.1 */
+/* 01-31-2022 Andres Mlinar Updated comments, */
+/* added ARMv8.2-A support, */
/* resulting in version 6.1.10 */
+/* 04-25-2022 William E. Lamie Modified comments, removed */
+/* FIFO queueing, */
+/* resulting in version 6.1.11 */
/* */
/**************************************************************************/
.global _tx_thread_smp_unprotect
@@ -105,11 +108,6 @@ _tx_thread_smp_unprotect:
CMP w3, #0 // Is the preempt disable flag set?
BNE _still_protected // Yes, skip the protection release
- LDR x2,=_tx_thread_smp_protect_wait_counts // Build build address of wait counts
- LDR w3, [x2, x1, LSL #2] // Pickup wait list value
- CMP w3, #0 // Are any entities on this core waiting?
- BNE _still_protected // Yes, skip the protection release
-
LDR x2,=_tx_thread_smp_protection // Build address of protection structure
MOV w3, #0xFFFFFFFF // Build invalid value
STR w3, [x2, #4] // Mark the protected core as invalid
diff --git a/ports_smp/cortex_a72_smp/gnu/example_build/tx/.cproject b/ports_smp/cortex_a72_smp/gnu/example_build/tx/.cproject
index ec20edd2..3547a1d3 100644
--- a/ports_smp/cortex_a72_smp/gnu/example_build/tx/.cproject
+++ b/ports_smp/cortex_a72_smp/gnu/example_build/tx/.cproject
@@ -129,6 +129,12 @@
+
+
+
+
+
+
diff --git a/ports_smp/cortex_a72_smp/gnu/src/tx_thread_context_restore.S b/ports_smp/cortex_a72_smp/gnu/src/tx_thread_context_restore.S
index 4df471ac..21deb75d 100644
--- a/ports_smp/cortex_a72_smp/gnu/src/tx_thread_context_restore.S
+++ b/ports_smp/cortex_a72_smp/gnu/src/tx_thread_context_restore.S
@@ -21,9 +21,6 @@
/**************************************************************************/
-/* Include macros for modifying the wait list. */
-#include "tx_thread_smp_protection_wait_list_macros.h"
-
.text
.align 3
/**************************************************************************/
@@ -31,7 +28,7 @@
/* FUNCTION RELEASE */
/* */
/* _tx_thread_context_restore ARMv8-A-SMP */
-/* 6.1.10 */
+/* 6.1.11 */
/* AUTHOR */
/* */
/* William E. Lamie, Microsoft Corporation */
@@ -63,10 +60,13 @@
/* */
/* DATE NAME DESCRIPTION */
/* */
-/* 09-30-2020 William E. Lamie Initial Version 6.1 */
-/* 01-31-2022 Andres Mlinar Updated comments, */
-/* added ARMv8.2-A support, */
+/* 09-30-2020 William E. Lamie Initial Version 6.1 */
+/* 01-31-2022 Andres Mlinar Updated comments, */
+/* added ARMv8.2-A support, */
/* resulting in version 6.1.10 */
+/* 04-25-2022 William E. Lamie Modified comments, removed */
+/* FIFO queueing, */
+/* resulting in version 6.1.11 */
/* */
/**************************************************************************/
// VOID _tx_thread_context_restore(VOID)
@@ -216,74 +216,6 @@ __tx_thread_no_preempt_restore:
// {
__tx_thread_preempt_restore:
- /* Was the thread being preempted waiting for the lock? */
- // if (_tx_thread_smp_protect_wait_counts[this_core] != 0)
- // {
-
- LDR x2, =_tx_thread_smp_protect_wait_counts // Load waiting count list
- LDR w3, [x2, x8, LSL #2] // Load waiting value for this core
- CMP w3, #0
- BEQ _nobody_waiting_for_lock // Is the core waiting for the lock?
-
- /* Do we not have the lock? This means the ISR never got the inter-core lock. */
- // if (_tx_thread_smp_protection.tx_thread_smp_protect_owned != this_core)
- // {
-
- LDR x2, =_tx_thread_smp_protection // Load address of protection structure
- LDR w3, [x2, #4] // Pickup the owning core
- CMP w8, w3 // Compare our core to the owning core
- BEQ _this_core_has_lock // Do we have the lock?
-
- /* We don't have the lock. This core should be in the list. Remove it. */
- // _tx_thread_smp_protect_wait_list_remove(this_core);
-
- _tx_thread_smp_protect_wait_list_remove // Call macro to remove core from the list
- B _nobody_waiting_for_lock // Leave
-
- // }
- // else
- // {
- /* We have the lock. This means the ISR got the inter-core lock, but
- never released it because it saw that there was someone waiting.
- Note this core is not in the list. */
-
-_this_core_has_lock:
-
- /* We're no longer waiting. Note that this should be zero since this happens during thread preemption. */
- // _tx_thread_smp_protect_wait_counts[core]--;
-
- LDR x2, =_tx_thread_smp_protect_wait_counts // Load waiting count list
- LDR w3, [x2, x8, LSL #2] // Load waiting value for this core
- SUB w3, w3, #1 // Decrement waiting value. Should be zero now
- STR w3, [x2, x8, LSL #2] // Store new waiting value
-
- /* Now release the inter-core lock. */
-
- /* Set protected core as invalid. */
- // _tx_thread_smp_protection.tx_thread_smp_protect_core = 0xFFFFFFFF;
-
- LDR x2, =_tx_thread_smp_protection // Load address of protection structure
- MOV w3, #0xFFFFFFFF // Build invalid value
- STR w3, [x2, #4] // Mark the protected core as invalid
- DMB ISH // Ensure that accesses to shared resource have completed
-
- /* Release protection. */
- // _tx_thread_smp_protection.tx_thread_smp_protect_in_force = 0;
-
- MOV w3, #0 // Build release protection value
- STR w3, [x2, #0] // Release the protection
- DSB ISH // To ensure update of the protection occurs before other CPUs awake
-
- /* Wake up waiting processors. Note interrupts are already enabled. */
-
-#ifdef TX_ENABLE_WFE
- SEV // Send event to other CPUs
-#endif
-
- // }
- // }
-
-_nobody_waiting_for_lock:
LDR x4, [x0, #8] // Switch to thread stack pointer
MOV sp, x4 //
diff --git a/ports_smp/cortex_a72_smp/gnu/src/tx_thread_smp_protect.S b/ports_smp/cortex_a72_smp/gnu/src/tx_thread_smp_protect.S
index 9cde3e08..b65af329 100644
--- a/ports_smp/cortex_a72_smp/gnu/src/tx_thread_smp_protect.S
+++ b/ports_smp/cortex_a72_smp/gnu/src/tx_thread_smp_protect.S
@@ -32,7 +32,7 @@
/* FUNCTION RELEASE */
/* */
/* _tx_thread_smp_protect Cortex-A35-SMP/AC6 */
-/* 6.1.10 */
+/* 6.1.11 */
/* AUTHOR */
/* */
/* William E. Lamie, Microsoft Corporation */
@@ -63,11 +63,14 @@
/* */
/* DATE NAME DESCRIPTION */
/* */
-/* 09-30-2020 William E. Lamie Initial Version 6.1 */
-/* 01-31-2022 Andres Mlinar Updated comments, */
-/* added ARMv8.2-A support, */
-/* improved SMP code, */
+/* 09-30-2020 William E. Lamie Initial Version 6.1 */
+/* 01-31-2022 Andres Mlinar Updated comments, */
+/* added ARMv8.2-A support, */
+/* improved SMP code, */
/* resulting in version 6.1.10 */
+/* 04-25-2022 William E. Lamie Modified comments, removed */
+/* FIFO queueing, */
+/* resulting in version 6.1.11 */
/* */
/**************************************************************************/
.global _tx_thread_smp_protect
@@ -81,293 +84,49 @@ _tx_thread_smp_protect:
/* Pickup the CPU ID. */
- MRS x1, MPIDR_EL1 // Pickup the core ID
+ MRS x2, MPIDR_EL1 // Pickup the core ID
#ifdef TX_ARMV8_2
#if TX_THREAD_SMP_CLUSTERS > 1
- UBFX x7, x1, #16, #8 // Isolate cluster ID
+ UBFX x7, x2, #16, #8 // Isolate cluster ID
#endif
- UBFX x1, x1, #8, #8 // Isolate core ID
+ UBFX x2, x2, #8, #8 // Isolate core ID
#else
#if TX_THREAD_SMP_CLUSTERS > 1
- UBFX x7, x1, #8, #8 // Isolate cluster ID
+ UBFX x7, x2, #8, #8 // Isolate cluster ID
#endif
- UBFX x1, x1, #0, #8 // Isolate core ID
+ UBFX x2, x2, #0, #8 // Isolate core ID
#endif
#if TX_THREAD_SMP_CLUSTERS > 1
- ADDS x1, x1, x7, LSL #2 // Calculate CPU ID
+ ADDS x2, x2, x7, LSL #2 // Calculate CPU ID
#endif
- /* Do we already have protection? */
- // if (this_core == _tx_thread_smp_protection.tx_thread_smp_protect_core)
- // {
-
- LDR x2, =_tx_thread_smp_protection // Build address to protection structure
- LDR w3, [x2, #4] // Pickup the owning core
- CMP w1, w3 // Is it not this core?
- BNE _protection_not_owned // No, the protection is not already owned
-
- /* We already have protection. */
-
- /* Increment the protection count. */
- // _tx_thread_smp_protection.tx_thread_smp_protect_count++;
-
- LDR w3, [x2, #8] // Pickup ownership count
- ADD w3, w3, #1 // Increment ownership count
- STR w3, [x2, #8] // Store ownership count
- DMB ISH
-
- B _return
-
-_protection_not_owned:
-
- /* Is the lock available? */
- // if (_tx_thread_smp_protection.tx_thread_smp_protect_in_force == 0)
- // {
-
- LDAXR w3, [x2, #0] // Pickup the protection flag
- CMP w3, #0
- BNE _start_waiting // No, protection not available
-
- /* Is the list empty? */
- // if (_tx_thread_smp_protect_wait_list_head == _tx_thread_smp_protect_wait_list_tail)
- // {
-
- LDR x3, =_tx_thread_smp_protect_wait_list_head
- LDR w3, [x3]
- LDR x4, =_tx_thread_smp_protect_wait_list_tail
- LDR w4, [x4]
- CMP w3, w4
- BNE _list_not_empty
-
- /* Try to get the lock. */
- // if (write_exclusive(&_tx_thread_smp_protection.tx_thread_smp_protect_in_force, 1) == SUCCESS)
- // {
-
- MOV w3, #1 // Build lock value
- STXR w4, w3, [x2, #0] // Attempt to get the protection
- CMP w4, #0
- BNE _start_waiting // Did it fail?
-
- /* We got the lock! */
- // _tx_thread_smp_protect_lock_got();
-
- DMB ISH // Ensure write to protection finishes
- _tx_thread_smp_protect_lock_got // Call the lock got function
-
- B _return
-
-_list_not_empty:
-
- /* Are we at the front of the list? */
- // if (this_core == _tx_thread_smp_protect_wait_list[_tx_thread_smp_protect_wait_list_head])
- // {
-
- LDR x3, =_tx_thread_smp_protect_wait_list_head // Get the address of the head
- LDR w3, [x3] // Get the value of the head
- LDR x4, =_tx_thread_smp_protect_wait_list // Get the address of the list
- LDR w4, [x4, x3, LSL #2] // Get the value at the head index
-
- CMP w1, w4
- BNE _start_waiting
-
- /* Is the lock still available? */
- // if (_tx_thread_smp_protection.tx_thread_smp_protect_in_force == 0)
- // {
-
- LDAXR w3, [x2, #0] // Pickup the protection flag
- CMP w3, #0
- BNE _start_waiting // No, protection not available
-
- /* Get the lock. */
- // _tx_thread_smp_protection.tx_thread_smp_protect_in_force = 1;
-
- MOV w3, #1 // Build lock value
- STXR w4, w3, [x2, #0] // Attempt to get the protection
- CMP w4, #0
- BNE _start_waiting // Did it fail?
- DMB ISH //
-
- /* Got the lock. */
- // _tx_thread_smp_protect_lock_got();
-
- _tx_thread_smp_protect_lock_got
-
- /* Remove this core from the wait list. */
- // _tx_thread_smp_protect_remove_from_front_of_list();
-
- _tx_thread_smp_protect_remove_from_front_of_list
-
- B _return
-
-_start_waiting:
-
- /* For one reason or another, we didn't get the lock. */
-
- /* Increment wait count. */
- // _tx_thread_smp_protect_wait_counts[this_core]++;
-
- LDR x3, =_tx_thread_smp_protect_wait_counts // Load wait list counts
- LDR w4, [x3, x1, LSL #2] // Load waiting value for this core
- ADD w4, w4, #1 // Increment wait value
- STR w4, [x3, x1, LSL #2] // Store new wait value
-
- /* Have we not added ourselves to the list yet? */
- // if (_tx_thread_smp_protect_wait_counts[this_core] == 1)
- // {
-
- CMP w4, #1
- BNE _already_in_list0 // Is this core already waiting?
-
- /* Add ourselves to the list. */
- // _tx_thread_smp_protect_wait_list_add(this_core);
-
- _tx_thread_smp_protect_wait_list_add // Call macro to add ourselves to the list
-
- // }
-
-_already_in_list0:
-
- /* Restore interrupts. */
+ LDR x1, =_tx_thread_smp_protection // Build address to protection structure
+ LDR w3, [x1, #4] // Pickup the owning core
+ CMP w3, w2 // Is it this core?
+ BEQ _owned // Yes, the protection is already owned
+ LDAXR w4, [x1, #0] // Pickup the protection flag
+ CBZ w4, _get_protection // Yes, get the protection
MSR DAIF, x0 // Restore interrupts
ISB //
#ifdef TX_ENABLE_WFE
WFE // Go into standby
#endif
+ B _tx_thread_smp_protect // On waking, restart the protection attempt
- /* We do this until we have the lock. */
- // while (1)
- // {
-
-_try_to_get_lock:
-
- /* Disable interrupts so we don't get preempted. */
-
- MRS x0, DAIF // Pickup current interrupt posture
- MSR DAIFSet, 0x3 // Lockout interrupts
-
- /* Pickup the CPU ID. */
-
- MRS x1, MPIDR_EL1 // Pickup the core ID
-#ifdef TX_ARMV8_2
-#if TX_THREAD_SMP_CLUSTERS > 1
- UBFX x7, x1, #16, #8 // Isolate cluster ID
-#endif
- UBFX x1, x1, #8, #8 // Isolate core ID
-#else
-#if TX_THREAD_SMP_CLUSTERS > 1
- UBFX x7, x1, #8, #8 // Isolate cluster ID
-#endif
- UBFX x1, x1, #0, #8 // Isolate core ID
-#endif
-#if TX_THREAD_SMP_CLUSTERS > 1
- ADDS x1, x1, x7, LSL #2 // Calculate CPU ID
-#endif
-
- /* Do we already have protection? */
- // if (this_core == _tx_thread_smp_protection.tx_thread_smp_protect_core)
- // {
-
- LDR w3, [x2, #4] // Pickup the owning core
- CMP w3, w1 // Is it this core?
- BEQ _got_lock_after_waiting // Yes, the protection is already owned. This means
- // an ISR preempted us and got protection
-
- // }
-
- /* Are we at the front of the list? */
- // if (this_core == _tx_thread_smp_protect_wait_list[_tx_thread_smp_protect_wait_list_head])
- // {
-
- LDR x3, =_tx_thread_smp_protect_wait_list_head // Get the address of the head
- LDR w3, [x3] // Get the value of the head
- LDR x4, =_tx_thread_smp_protect_wait_list // Get the address of the list
- LDR w4, [x4, x3, LSL #2] // Get the value at the head index
-
- CMP w1, w4
- BNE _did_not_get_lock
-
- /* Is the lock still available? */
- // if (_tx_thread_smp_protection.tx_thread_smp_protect_in_force == 0)
- // {
-
- LDAXR w3, [x2, #0] // Pickup the protection flag
- CMP w3, #0
- BNE _did_not_get_lock // No, protection not available
-
- /* Get the lock. */
- // _tx_thread_smp_protection.tx_thread_smp_protect_in_force = 1;
-
- MOV w3, #1 // Build lock value
- STXR w4, w3, [x2, #0] // Attempt to get the protection
- CMP w4, #0
- BNE _did_not_get_lock // Did it fail?
- DMB ISH //
-
- /* Got the lock. */
- // _tx_thread_smp_protect_lock_got();
-
- _tx_thread_smp_protect_lock_got
-
- /* Remove this core from the wait list. */
- // _tx_thread_smp_protect_remove_from_front_of_list();
-
- _tx_thread_smp_protect_remove_from_front_of_list
-
- B _got_lock_after_waiting
-
-_did_not_get_lock:
-
- /* For one reason or another, we didn't get the lock. */
-
- /* Were we removed from the list? This can happen if we're a thread
- and we got preempted. */
- // if (_tx_thread_smp_protect_wait_counts[this_core] == 0)
- // {
-
- LDR x3, =_tx_thread_smp_protect_wait_counts // Load wait list counts
- LDR w4, [x3, x1, LSL #2] // Load waiting value for this core
- CMP w4, #0
- BNE _already_in_list1 // Is this core already in the list?
-
- /* Add ourselves to the list. */
- // _tx_thread_smp_protect_wait_list_add(this_core);
-
- _tx_thread_smp_protect_wait_list_add // Call macro to add ourselves to the list
-
- /* Our waiting count was also reset when we were preempted. Increment it again. */
- // _tx_thread_smp_protect_wait_counts[this_core]++;
-
- LDR x3, =_tx_thread_smp_protect_wait_counts // Load wait list counts
- LDR w4, [x3, x1, LSL #2] // Load waiting value for this core
- ADD w4, w4, #1 // Increment wait value
- STR w4, [x3, x1, LSL #2] // Store new wait value value
-
- // }
-
-_already_in_list1:
-
- /* Restore interrupts and try again. */
-
+_get_protection:
+ MOV x4, #1 // Build lock value
+ STXR w5, w4, [x1] // Attempt to get the protection
+ CBZ w5, _got_protection // Did it succeed? w5 = 0 means success!
MSR DAIF, x0 // Restore interrupts
- ISB //
-#ifdef TX_ENABLE_WFE
- WFE // Go into standby
-#endif
- B _try_to_get_lock // On waking, restart the protection attempt
-
-_got_lock_after_waiting:
-
- /* We're no longer waiting. */
- // _tx_thread_smp_protect_wait_counts[this_core]--;
-
- LDR x3, =_tx_thread_smp_protect_wait_counts // Load waiting list
- LDR w4, [x3, x1, LSL #2] // Load current wait value
- SUB w4, w4, #1 // Decrement wait value
- STR w4, [x3, x1, LSL #2] // Store new wait value value
-
- /* Restore registers and return. */
-
-_return:
-
+ B _tx_thread_smp_protect // Restart the protection attempt
+
+_got_protection:
+ DMB ISH //
+ STR w2, [x1, #4] // Save owning core
+_owned:
+ LDR w5, [x1, #8] // Pickup ownership count
+ ADD w5, w5, #1 // Increment ownership count
+ STR w5, [x1, #8] // Store ownership count
+ DMB ISH //
RET
diff --git a/ports_smp/cortex_a72_smp/gnu/src/tx_thread_smp_unprotect.S b/ports_smp/cortex_a72_smp/gnu/src/tx_thread_smp_unprotect.S
index a783cde6..f9d8692a 100644
--- a/ports_smp/cortex_a72_smp/gnu/src/tx_thread_smp_unprotect.S
+++ b/ports_smp/cortex_a72_smp/gnu/src/tx_thread_smp_unprotect.S
@@ -60,10 +60,13 @@
/* */
/* DATE NAME DESCRIPTION */
/* */
-/* 09-30-2020 William E. Lamie Initial Version 6.1 */
-/* 01-31-2022 Andres Mlinar Updated comments, */
-/* added ARMv8.2-A support, */
+/* 09-30-2020 William E. Lamie Initial Version 6.1 */
+/* 01-31-2022 Andres Mlinar Updated comments, */
+/* added ARMv8.2-A support, */
/* resulting in version 6.1.10 */
+/* 04-25-2022 William E. Lamie Modified comments, removed */
+/* FIFO queueing, */
+/* resulting in version 6.1.11 */
/* */
/**************************************************************************/
.global _tx_thread_smp_unprotect
@@ -105,11 +108,6 @@ _tx_thread_smp_unprotect:
CMP w3, #0 // Is the preempt disable flag set?
BNE _still_protected // Yes, skip the protection release
- LDR x2,=_tx_thread_smp_protect_wait_counts // Build build address of wait counts
- LDR w3, [x2, x1, LSL #2] // Pickup wait list value
- CMP w3, #0 // Are any entities on this core waiting?
- BNE _still_protected // Yes, skip the protection release
-
LDR x2,=_tx_thread_smp_protection // Build address of protection structure
MOV w3, #0xFFFFFFFF // Build invalid value
STR w3, [x2, #4] // Mark the protected core as invalid
diff --git a/ports_smp/cortex_a73_smp/ac6/example_build/sample_threadx/.cproject b/ports_smp/cortex_a73_smp/ac6/example_build/sample_threadx/.cproject
index eb56d9c0..47d41421 100644
--- a/ports_smp/cortex_a73_smp/ac6/example_build/sample_threadx/.cproject
+++ b/ports_smp/cortex_a73_smp/ac6/example_build/sample_threadx/.cproject
@@ -23,7 +23,7 @@
-
+
@@ -47,7 +47,7 @@
-
+
@@ -63,7 +63,7 @@
-
+
diff --git a/ports_smp/cortex_a73_smp/ac6/example_build/tx/.cproject b/ports_smp/cortex_a73_smp/ac6/example_build/tx/.cproject
index dd9e40a1..cb27433f 100644
--- a/ports_smp/cortex_a73_smp/ac6/example_build/tx/.cproject
+++ b/ports_smp/cortex_a73_smp/ac6/example_build/tx/.cproject
@@ -49,7 +49,7 @@
-
+
@@ -129,11 +129,7 @@
-
-
-
-
-
+
diff --git a/ports_smp/cortex_a73_smp/ac6/src/tx_thread_context_restore.S b/ports_smp/cortex_a73_smp/ac6/src/tx_thread_context_restore.S
index 4df471ac..21deb75d 100644
--- a/ports_smp/cortex_a73_smp/ac6/src/tx_thread_context_restore.S
+++ b/ports_smp/cortex_a73_smp/ac6/src/tx_thread_context_restore.S
@@ -21,9 +21,6 @@
/**************************************************************************/
-/* Include macros for modifying the wait list. */
-#include "tx_thread_smp_protection_wait_list_macros.h"
-
.text
.align 3
/**************************************************************************/
@@ -31,7 +28,7 @@
/* FUNCTION RELEASE */
/* */
/* _tx_thread_context_restore ARMv8-A-SMP */
-/* 6.1.10 */
+/* 6.1.11 */
/* AUTHOR */
/* */
/* William E. Lamie, Microsoft Corporation */
@@ -63,10 +60,13 @@
/* */
/* DATE NAME DESCRIPTION */
/* */
-/* 09-30-2020 William E. Lamie Initial Version 6.1 */
-/* 01-31-2022 Andres Mlinar Updated comments, */
-/* added ARMv8.2-A support, */
+/* 09-30-2020 William E. Lamie Initial Version 6.1 */
+/* 01-31-2022 Andres Mlinar Updated comments, */
+/* added ARMv8.2-A support, */
/* resulting in version 6.1.10 */
+/* 04-25-2022 William E. Lamie Modified comments, removed */
+/* FIFO queueing, */
+/* resulting in version 6.1.11 */
/* */
/**************************************************************************/
// VOID _tx_thread_context_restore(VOID)
@@ -216,74 +216,6 @@ __tx_thread_no_preempt_restore:
// {
__tx_thread_preempt_restore:
- /* Was the thread being preempted waiting for the lock? */
- // if (_tx_thread_smp_protect_wait_counts[this_core] != 0)
- // {
-
- LDR x2, =_tx_thread_smp_protect_wait_counts // Load waiting count list
- LDR w3, [x2, x8, LSL #2] // Load waiting value for this core
- CMP w3, #0
- BEQ _nobody_waiting_for_lock // Is the core waiting for the lock?
-
- /* Do we not have the lock? This means the ISR never got the inter-core lock. */
- // if (_tx_thread_smp_protection.tx_thread_smp_protect_owned != this_core)
- // {
-
- LDR x2, =_tx_thread_smp_protection // Load address of protection structure
- LDR w3, [x2, #4] // Pickup the owning core
- CMP w8, w3 // Compare our core to the owning core
- BEQ _this_core_has_lock // Do we have the lock?
-
- /* We don't have the lock. This core should be in the list. Remove it. */
- // _tx_thread_smp_protect_wait_list_remove(this_core);
-
- _tx_thread_smp_protect_wait_list_remove // Call macro to remove core from the list
- B _nobody_waiting_for_lock // Leave
-
- // }
- // else
- // {
- /* We have the lock. This means the ISR got the inter-core lock, but
- never released it because it saw that there was someone waiting.
- Note this core is not in the list. */
-
-_this_core_has_lock:
-
- /* We're no longer waiting. Note that this should be zero since this happens during thread preemption. */
- // _tx_thread_smp_protect_wait_counts[core]--;
-
- LDR x2, =_tx_thread_smp_protect_wait_counts // Load waiting count list
- LDR w3, [x2, x8, LSL #2] // Load waiting value for this core
- SUB w3, w3, #1 // Decrement waiting value. Should be zero now
- STR w3, [x2, x8, LSL #2] // Store new waiting value
-
- /* Now release the inter-core lock. */
-
- /* Set protected core as invalid. */
- // _tx_thread_smp_protection.tx_thread_smp_protect_core = 0xFFFFFFFF;
-
- LDR x2, =_tx_thread_smp_protection // Load address of protection structure
- MOV w3, #0xFFFFFFFF // Build invalid value
- STR w3, [x2, #4] // Mark the protected core as invalid
- DMB ISH // Ensure that accesses to shared resource have completed
-
- /* Release protection. */
- // _tx_thread_smp_protection.tx_thread_smp_protect_in_force = 0;
-
- MOV w3, #0 // Build release protection value
- STR w3, [x2, #0] // Release the protection
- DSB ISH // To ensure update of the protection occurs before other CPUs awake
-
- /* Wake up waiting processors. Note interrupts are already enabled. */
-
-#ifdef TX_ENABLE_WFE
- SEV // Send event to other CPUs
-#endif
-
- // }
- // }
-
-_nobody_waiting_for_lock:
LDR x4, [x0, #8] // Switch to thread stack pointer
MOV sp, x4 //
diff --git a/ports_smp/cortex_a73_smp/ac6/src/tx_thread_smp_protect.S b/ports_smp/cortex_a73_smp/ac6/src/tx_thread_smp_protect.S
index 9cde3e08..b65af329 100644
--- a/ports_smp/cortex_a73_smp/ac6/src/tx_thread_smp_protect.S
+++ b/ports_smp/cortex_a73_smp/ac6/src/tx_thread_smp_protect.S
@@ -32,7 +32,7 @@
/* FUNCTION RELEASE */
/* */
/* _tx_thread_smp_protect Cortex-A35-SMP/AC6 */
-/* 6.1.10 */
+/* 6.1.11 */
/* AUTHOR */
/* */
/* William E. Lamie, Microsoft Corporation */
@@ -63,11 +63,14 @@
/* */
/* DATE NAME DESCRIPTION */
/* */
-/* 09-30-2020 William E. Lamie Initial Version 6.1 */
-/* 01-31-2022 Andres Mlinar Updated comments, */
-/* added ARMv8.2-A support, */
-/* improved SMP code, */
+/* 09-30-2020 William E. Lamie Initial Version 6.1 */
+/* 01-31-2022 Andres Mlinar Updated comments, */
+/* added ARMv8.2-A support, */
+/* improved SMP code, */
/* resulting in version 6.1.10 */
+/* 04-25-2022 William E. Lamie Modified comments, removed */
+/* FIFO queueing, */
+/* resulting in version 6.1.11 */
/* */
/**************************************************************************/
.global _tx_thread_smp_protect
@@ -81,293 +84,49 @@ _tx_thread_smp_protect:
/* Pickup the CPU ID. */
- MRS x1, MPIDR_EL1 // Pickup the core ID
+ MRS x2, MPIDR_EL1 // Pickup the core ID
#ifdef TX_ARMV8_2
#if TX_THREAD_SMP_CLUSTERS > 1
- UBFX x7, x1, #16, #8 // Isolate cluster ID
+ UBFX x7, x2, #16, #8 // Isolate cluster ID
#endif
- UBFX x1, x1, #8, #8 // Isolate core ID
+ UBFX x2, x2, #8, #8 // Isolate core ID
#else
#if TX_THREAD_SMP_CLUSTERS > 1
- UBFX x7, x1, #8, #8 // Isolate cluster ID
+ UBFX x7, x2, #8, #8 // Isolate cluster ID
#endif
- UBFX x1, x1, #0, #8 // Isolate core ID
+ UBFX x2, x2, #0, #8 // Isolate core ID
#endif
#if TX_THREAD_SMP_CLUSTERS > 1
- ADDS x1, x1, x7, LSL #2 // Calculate CPU ID
+ ADDS x2, x2, x7, LSL #2 // Calculate CPU ID
#endif
- /* Do we already have protection? */
- // if (this_core == _tx_thread_smp_protection.tx_thread_smp_protect_core)
- // {
-
- LDR x2, =_tx_thread_smp_protection // Build address to protection structure
- LDR w3, [x2, #4] // Pickup the owning core
- CMP w1, w3 // Is it not this core?
- BNE _protection_not_owned // No, the protection is not already owned
-
- /* We already have protection. */
-
- /* Increment the protection count. */
- // _tx_thread_smp_protection.tx_thread_smp_protect_count++;
-
- LDR w3, [x2, #8] // Pickup ownership count
- ADD w3, w3, #1 // Increment ownership count
- STR w3, [x2, #8] // Store ownership count
- DMB ISH
-
- B _return
-
-_protection_not_owned:
-
- /* Is the lock available? */
- // if (_tx_thread_smp_protection.tx_thread_smp_protect_in_force == 0)
- // {
-
- LDAXR w3, [x2, #0] // Pickup the protection flag
- CMP w3, #0
- BNE _start_waiting // No, protection not available
-
- /* Is the list empty? */
- // if (_tx_thread_smp_protect_wait_list_head == _tx_thread_smp_protect_wait_list_tail)
- // {
-
- LDR x3, =_tx_thread_smp_protect_wait_list_head
- LDR w3, [x3]
- LDR x4, =_tx_thread_smp_protect_wait_list_tail
- LDR w4, [x4]
- CMP w3, w4
- BNE _list_not_empty
-
- /* Try to get the lock. */
- // if (write_exclusive(&_tx_thread_smp_protection.tx_thread_smp_protect_in_force, 1) == SUCCESS)
- // {
-
- MOV w3, #1 // Build lock value
- STXR w4, w3, [x2, #0] // Attempt to get the protection
- CMP w4, #0
- BNE _start_waiting // Did it fail?
-
- /* We got the lock! */
- // _tx_thread_smp_protect_lock_got();
-
- DMB ISH // Ensure write to protection finishes
- _tx_thread_smp_protect_lock_got // Call the lock got function
-
- B _return
-
-_list_not_empty:
-
- /* Are we at the front of the list? */
- // if (this_core == _tx_thread_smp_protect_wait_list[_tx_thread_smp_protect_wait_list_head])
- // {
-
- LDR x3, =_tx_thread_smp_protect_wait_list_head // Get the address of the head
- LDR w3, [x3] // Get the value of the head
- LDR x4, =_tx_thread_smp_protect_wait_list // Get the address of the list
- LDR w4, [x4, x3, LSL #2] // Get the value at the head index
-
- CMP w1, w4
- BNE _start_waiting
-
- /* Is the lock still available? */
- // if (_tx_thread_smp_protection.tx_thread_smp_protect_in_force == 0)
- // {
-
- LDAXR w3, [x2, #0] // Pickup the protection flag
- CMP w3, #0
- BNE _start_waiting // No, protection not available
-
- /* Get the lock. */
- // _tx_thread_smp_protection.tx_thread_smp_protect_in_force = 1;
-
- MOV w3, #1 // Build lock value
- STXR w4, w3, [x2, #0] // Attempt to get the protection
- CMP w4, #0
- BNE _start_waiting // Did it fail?
- DMB ISH //
-
- /* Got the lock. */
- // _tx_thread_smp_protect_lock_got();
-
- _tx_thread_smp_protect_lock_got
-
- /* Remove this core from the wait list. */
- // _tx_thread_smp_protect_remove_from_front_of_list();
-
- _tx_thread_smp_protect_remove_from_front_of_list
-
- B _return
-
-_start_waiting:
-
- /* For one reason or another, we didn't get the lock. */
-
- /* Increment wait count. */
- // _tx_thread_smp_protect_wait_counts[this_core]++;
-
- LDR x3, =_tx_thread_smp_protect_wait_counts // Load wait list counts
- LDR w4, [x3, x1, LSL #2] // Load waiting value for this core
- ADD w4, w4, #1 // Increment wait value
- STR w4, [x3, x1, LSL #2] // Store new wait value
-
- /* Have we not added ourselves to the list yet? */
- // if (_tx_thread_smp_protect_wait_counts[this_core] == 1)
- // {
-
- CMP w4, #1
- BNE _already_in_list0 // Is this core already waiting?
-
- /* Add ourselves to the list. */
- // _tx_thread_smp_protect_wait_list_add(this_core);
-
- _tx_thread_smp_protect_wait_list_add // Call macro to add ourselves to the list
-
- // }
-
-_already_in_list0:
-
- /* Restore interrupts. */
+ LDR x1, =_tx_thread_smp_protection // Build address to protection structure
+ LDR w3, [x1, #4] // Pickup the owning core
+ CMP w3, w2 // Is it this core?
+ BEQ _owned // Yes, the protection is already owned
+ LDAXR w4, [x1, #0] // Pickup the protection flag
+ CBZ w4, _get_protection // Yes, get the protection
MSR DAIF, x0 // Restore interrupts
ISB //
#ifdef TX_ENABLE_WFE
WFE // Go into standby
#endif
+ B _tx_thread_smp_protect // On waking, restart the protection attempt
- /* We do this until we have the lock. */
- // while (1)
- // {
-
-_try_to_get_lock:
-
- /* Disable interrupts so we don't get preempted. */
-
- MRS x0, DAIF // Pickup current interrupt posture
- MSR DAIFSet, 0x3 // Lockout interrupts
-
- /* Pickup the CPU ID. */
-
- MRS x1, MPIDR_EL1 // Pickup the core ID
-#ifdef TX_ARMV8_2
-#if TX_THREAD_SMP_CLUSTERS > 1
- UBFX x7, x1, #16, #8 // Isolate cluster ID
-#endif
- UBFX x1, x1, #8, #8 // Isolate core ID
-#else
-#if TX_THREAD_SMP_CLUSTERS > 1
- UBFX x7, x1, #8, #8 // Isolate cluster ID
-#endif
- UBFX x1, x1, #0, #8 // Isolate core ID
-#endif
-#if TX_THREAD_SMP_CLUSTERS > 1
- ADDS x1, x1, x7, LSL #2 // Calculate CPU ID
-#endif
-
- /* Do we already have protection? */
- // if (this_core == _tx_thread_smp_protection.tx_thread_smp_protect_core)
- // {
-
- LDR w3, [x2, #4] // Pickup the owning core
- CMP w3, w1 // Is it this core?
- BEQ _got_lock_after_waiting // Yes, the protection is already owned. This means
- // an ISR preempted us and got protection
-
- // }
-
- /* Are we at the front of the list? */
- // if (this_core == _tx_thread_smp_protect_wait_list[_tx_thread_smp_protect_wait_list_head])
- // {
-
- LDR x3, =_tx_thread_smp_protect_wait_list_head // Get the address of the head
- LDR w3, [x3] // Get the value of the head
- LDR x4, =_tx_thread_smp_protect_wait_list // Get the address of the list
- LDR w4, [x4, x3, LSL #2] // Get the value at the head index
-
- CMP w1, w4
- BNE _did_not_get_lock
-
- /* Is the lock still available? */
- // if (_tx_thread_smp_protection.tx_thread_smp_protect_in_force == 0)
- // {
-
- LDAXR w3, [x2, #0] // Pickup the protection flag
- CMP w3, #0
- BNE _did_not_get_lock // No, protection not available
-
- /* Get the lock. */
- // _tx_thread_smp_protection.tx_thread_smp_protect_in_force = 1;
-
- MOV w3, #1 // Build lock value
- STXR w4, w3, [x2, #0] // Attempt to get the protection
- CMP w4, #0
- BNE _did_not_get_lock // Did it fail?
- DMB ISH //
-
- /* Got the lock. */
- // _tx_thread_smp_protect_lock_got();
-
- _tx_thread_smp_protect_lock_got
-
- /* Remove this core from the wait list. */
- // _tx_thread_smp_protect_remove_from_front_of_list();
-
- _tx_thread_smp_protect_remove_from_front_of_list
-
- B _got_lock_after_waiting
-
-_did_not_get_lock:
-
- /* For one reason or another, we didn't get the lock. */
-
- /* Were we removed from the list? This can happen if we're a thread
- and we got preempted. */
- // if (_tx_thread_smp_protect_wait_counts[this_core] == 0)
- // {
-
- LDR x3, =_tx_thread_smp_protect_wait_counts // Load wait list counts
- LDR w4, [x3, x1, LSL #2] // Load waiting value for this core
- CMP w4, #0
- BNE _already_in_list1 // Is this core already in the list?
-
- /* Add ourselves to the list. */
- // _tx_thread_smp_protect_wait_list_add(this_core);
-
- _tx_thread_smp_protect_wait_list_add // Call macro to add ourselves to the list
-
- /* Our waiting count was also reset when we were preempted. Increment it again. */
- // _tx_thread_smp_protect_wait_counts[this_core]++;
-
- LDR x3, =_tx_thread_smp_protect_wait_counts // Load wait list counts
- LDR w4, [x3, x1, LSL #2] // Load waiting value for this core
- ADD w4, w4, #1 // Increment wait value
- STR w4, [x3, x1, LSL #2] // Store new wait value value
-
- // }
-
-_already_in_list1:
-
- /* Restore interrupts and try again. */
-
+_get_protection:
+ MOV x4, #1 // Build lock value
+ STXR w5, w4, [x1] // Attempt to get the protection
+ CBZ w5, _got_protection // Did it succeed? w5 = 0 means success!
MSR DAIF, x0 // Restore interrupts
- ISB //
-#ifdef TX_ENABLE_WFE
- WFE // Go into standby
-#endif
- B _try_to_get_lock // On waking, restart the protection attempt
-
-_got_lock_after_waiting:
-
- /* We're no longer waiting. */
- // _tx_thread_smp_protect_wait_counts[this_core]--;
-
- LDR x3, =_tx_thread_smp_protect_wait_counts // Load waiting list
- LDR w4, [x3, x1, LSL #2] // Load current wait value
- SUB w4, w4, #1 // Decrement wait value
- STR w4, [x3, x1, LSL #2] // Store new wait value value
-
- /* Restore registers and return. */
-
-_return:
-
+ B _tx_thread_smp_protect // Restart the protection attempt
+
+_got_protection:
+ DMB ISH //
+ STR w2, [x1, #4] // Save owning core
+_owned:
+ LDR w5, [x1, #8] // Pickup ownership count
+ ADD w5, w5, #1 // Increment ownership count
+ STR w5, [x1, #8] // Store ownership count
+ DMB ISH //
RET
diff --git a/ports_smp/cortex_a73_smp/ac6/src/tx_thread_smp_unprotect.S b/ports_smp/cortex_a73_smp/ac6/src/tx_thread_smp_unprotect.S
index a783cde6..f9d8692a 100644
--- a/ports_smp/cortex_a73_smp/ac6/src/tx_thread_smp_unprotect.S
+++ b/ports_smp/cortex_a73_smp/ac6/src/tx_thread_smp_unprotect.S
@@ -60,10 +60,13 @@
/* */
/* DATE NAME DESCRIPTION */
/* */
-/* 09-30-2020 William E. Lamie Initial Version 6.1 */
-/* 01-31-2022 Andres Mlinar Updated comments, */
-/* added ARMv8.2-A support, */
+/* 09-30-2020 William E. Lamie Initial Version 6.1 */
+/* 01-31-2022 Andres Mlinar Updated comments, */
+/* added ARMv8.2-A support, */
/* resulting in version 6.1.10 */
+/* 04-25-2022 William E. Lamie Modified comments, removed */
+/* FIFO queueing, */
+/* resulting in version 6.1.11 */
/* */
/**************************************************************************/
.global _tx_thread_smp_unprotect
@@ -105,11 +108,6 @@ _tx_thread_smp_unprotect:
CMP w3, #0 // Is the preempt disable flag set?
BNE _still_protected // Yes, skip the protection release
- LDR x2,=_tx_thread_smp_protect_wait_counts // Build build address of wait counts
- LDR w3, [x2, x1, LSL #2] // Pickup wait list value
- CMP w3, #0 // Are any entities on this core waiting?
- BNE _still_protected // Yes, skip the protection release
-
LDR x2,=_tx_thread_smp_protection // Build address of protection structure
MOV w3, #0xFFFFFFFF // Build invalid value
STR w3, [x2, #4] // Mark the protected core as invalid
diff --git a/ports_smp/cortex_a73_smp/gnu/example_build/tx/.cproject b/ports_smp/cortex_a73_smp/gnu/example_build/tx/.cproject
index ec20edd2..3547a1d3 100644
--- a/ports_smp/cortex_a73_smp/gnu/example_build/tx/.cproject
+++ b/ports_smp/cortex_a73_smp/gnu/example_build/tx/.cproject
@@ -129,6 +129,12 @@
+
+
+
+
+
+
diff --git a/ports_smp/cortex_a73_smp/gnu/src/tx_thread_context_restore.S b/ports_smp/cortex_a73_smp/gnu/src/tx_thread_context_restore.S
index 4df471ac..21deb75d 100644
--- a/ports_smp/cortex_a73_smp/gnu/src/tx_thread_context_restore.S
+++ b/ports_smp/cortex_a73_smp/gnu/src/tx_thread_context_restore.S
@@ -21,9 +21,6 @@
/**************************************************************************/
-/* Include macros for modifying the wait list. */
-#include "tx_thread_smp_protection_wait_list_macros.h"
-
.text
.align 3
/**************************************************************************/
@@ -31,7 +28,7 @@
/* FUNCTION RELEASE */
/* */
/* _tx_thread_context_restore ARMv8-A-SMP */
-/* 6.1.10 */
+/* 6.1.11 */
/* AUTHOR */
/* */
/* William E. Lamie, Microsoft Corporation */
@@ -63,10 +60,13 @@
/* */
/* DATE NAME DESCRIPTION */
/* */
-/* 09-30-2020 William E. Lamie Initial Version 6.1 */
-/* 01-31-2022 Andres Mlinar Updated comments, */
-/* added ARMv8.2-A support, */
+/* 09-30-2020 William E. Lamie Initial Version 6.1 */
+/* 01-31-2022 Andres Mlinar Updated comments, */
+/* added ARMv8.2-A support, */
/* resulting in version 6.1.10 */
+/* 04-25-2022 William E. Lamie Modified comments, removed */
+/* FIFO queueing, */
+/* resulting in version 6.1.11 */
/* */
/**************************************************************************/
// VOID _tx_thread_context_restore(VOID)
@@ -216,74 +216,6 @@ __tx_thread_no_preempt_restore:
// {
__tx_thread_preempt_restore:
- /* Was the thread being preempted waiting for the lock? */
- // if (_tx_thread_smp_protect_wait_counts[this_core] != 0)
- // {
-
- LDR x2, =_tx_thread_smp_protect_wait_counts // Load waiting count list
- LDR w3, [x2, x8, LSL #2] // Load waiting value for this core
- CMP w3, #0
- BEQ _nobody_waiting_for_lock // Is the core waiting for the lock?
-
- /* Do we not have the lock? This means the ISR never got the inter-core lock. */
- // if (_tx_thread_smp_protection.tx_thread_smp_protect_owned != this_core)
- // {
-
- LDR x2, =_tx_thread_smp_protection // Load address of protection structure
- LDR w3, [x2, #4] // Pickup the owning core
- CMP w8, w3 // Compare our core to the owning core
- BEQ _this_core_has_lock // Do we have the lock?
-
- /* We don't have the lock. This core should be in the list. Remove it. */
- // _tx_thread_smp_protect_wait_list_remove(this_core);
-
- _tx_thread_smp_protect_wait_list_remove // Call macro to remove core from the list
- B _nobody_waiting_for_lock // Leave
-
- // }
- // else
- // {
- /* We have the lock. This means the ISR got the inter-core lock, but
- never released it because it saw that there was someone waiting.
- Note this core is not in the list. */
-
-_this_core_has_lock:
-
- /* We're no longer waiting. Note that this should be zero since this happens during thread preemption. */
- // _tx_thread_smp_protect_wait_counts[core]--;
-
- LDR x2, =_tx_thread_smp_protect_wait_counts // Load waiting count list
- LDR w3, [x2, x8, LSL #2] // Load waiting value for this core
- SUB w3, w3, #1 // Decrement waiting value. Should be zero now
- STR w3, [x2, x8, LSL #2] // Store new waiting value
-
- /* Now release the inter-core lock. */
-
- /* Set protected core as invalid. */
- // _tx_thread_smp_protection.tx_thread_smp_protect_core = 0xFFFFFFFF;
-
- LDR x2, =_tx_thread_smp_protection // Load address of protection structure
- MOV w3, #0xFFFFFFFF // Build invalid value
- STR w3, [x2, #4] // Mark the protected core as invalid
- DMB ISH // Ensure that accesses to shared resource have completed
-
- /* Release protection. */
- // _tx_thread_smp_protection.tx_thread_smp_protect_in_force = 0;
-
- MOV w3, #0 // Build release protection value
- STR w3, [x2, #0] // Release the protection
- DSB ISH // To ensure update of the protection occurs before other CPUs awake
-
- /* Wake up waiting processors. Note interrupts are already enabled. */
-
-#ifdef TX_ENABLE_WFE
- SEV // Send event to other CPUs
-#endif
-
- // }
- // }
-
-_nobody_waiting_for_lock:
LDR x4, [x0, #8] // Switch to thread stack pointer
MOV sp, x4 //
diff --git a/ports_smp/cortex_a73_smp/gnu/src/tx_thread_smp_protect.S b/ports_smp/cortex_a73_smp/gnu/src/tx_thread_smp_protect.S
index 9cde3e08..b65af329 100644
--- a/ports_smp/cortex_a73_smp/gnu/src/tx_thread_smp_protect.S
+++ b/ports_smp/cortex_a73_smp/gnu/src/tx_thread_smp_protect.S
@@ -32,7 +32,7 @@
/* FUNCTION RELEASE */
/* */
/* _tx_thread_smp_protect Cortex-A35-SMP/AC6 */
-/* 6.1.10 */
+/* 6.1.11 */
/* AUTHOR */
/* */
/* William E. Lamie, Microsoft Corporation */
@@ -63,11 +63,14 @@
/* */
/* DATE NAME DESCRIPTION */
/* */
-/* 09-30-2020 William E. Lamie Initial Version 6.1 */
-/* 01-31-2022 Andres Mlinar Updated comments, */
-/* added ARMv8.2-A support, */
-/* improved SMP code, */
+/* 09-30-2020 William E. Lamie Initial Version 6.1 */
+/* 01-31-2022 Andres Mlinar Updated comments, */
+/* added ARMv8.2-A support, */
+/* improved SMP code, */
/* resulting in version 6.1.10 */
+/* 04-25-2022 William E. Lamie Modified comments, removed */
+/* FIFO queueing, */
+/* resulting in version 6.1.11 */
/* */
/**************************************************************************/
.global _tx_thread_smp_protect
@@ -81,293 +84,49 @@ _tx_thread_smp_protect:
/* Pickup the CPU ID. */
- MRS x1, MPIDR_EL1 // Pickup the core ID
+ MRS x2, MPIDR_EL1 // Pickup the core ID
#ifdef TX_ARMV8_2
#if TX_THREAD_SMP_CLUSTERS > 1
- UBFX x7, x1, #16, #8 // Isolate cluster ID
+ UBFX x7, x2, #16, #8 // Isolate cluster ID
#endif
- UBFX x1, x1, #8, #8 // Isolate core ID
+ UBFX x2, x2, #8, #8 // Isolate core ID
#else
#if TX_THREAD_SMP_CLUSTERS > 1
- UBFX x7, x1, #8, #8 // Isolate cluster ID
+ UBFX x7, x2, #8, #8 // Isolate cluster ID
#endif
- UBFX x1, x1, #0, #8 // Isolate core ID
+ UBFX x2, x2, #0, #8 // Isolate core ID
#endif
#if TX_THREAD_SMP_CLUSTERS > 1
- ADDS x1, x1, x7, LSL #2 // Calculate CPU ID
+ ADDS x2, x2, x7, LSL #2 // Calculate CPU ID
#endif
- /* Do we already have protection? */
- // if (this_core == _tx_thread_smp_protection.tx_thread_smp_protect_core)
- // {
-
- LDR x2, =_tx_thread_smp_protection // Build address to protection structure
- LDR w3, [x2, #4] // Pickup the owning core
- CMP w1, w3 // Is it not this core?
- BNE _protection_not_owned // No, the protection is not already owned
-
- /* We already have protection. */
-
- /* Increment the protection count. */
- // _tx_thread_smp_protection.tx_thread_smp_protect_count++;
-
- LDR w3, [x2, #8] // Pickup ownership count
- ADD w3, w3, #1 // Increment ownership count
- STR w3, [x2, #8] // Store ownership count
- DMB ISH
-
- B _return
-
-_protection_not_owned:
-
- /* Is the lock available? */
- // if (_tx_thread_smp_protection.tx_thread_smp_protect_in_force == 0)
- // {
-
- LDAXR w3, [x2, #0] // Pickup the protection flag
- CMP w3, #0
- BNE _start_waiting // No, protection not available
-
- /* Is the list empty? */
- // if (_tx_thread_smp_protect_wait_list_head == _tx_thread_smp_protect_wait_list_tail)
- // {
-
- LDR x3, =_tx_thread_smp_protect_wait_list_head
- LDR w3, [x3]
- LDR x4, =_tx_thread_smp_protect_wait_list_tail
- LDR w4, [x4]
- CMP w3, w4
- BNE _list_not_empty
-
- /* Try to get the lock. */
- // if (write_exclusive(&_tx_thread_smp_protection.tx_thread_smp_protect_in_force, 1) == SUCCESS)
- // {
-
- MOV w3, #1 // Build lock value
- STXR w4, w3, [x2, #0] // Attempt to get the protection
- CMP w4, #0
- BNE _start_waiting // Did it fail?
-
- /* We got the lock! */
- // _tx_thread_smp_protect_lock_got();
-
- DMB ISH // Ensure write to protection finishes
- _tx_thread_smp_protect_lock_got // Call the lock got function
-
- B _return
-
-_list_not_empty:
-
- /* Are we at the front of the list? */
- // if (this_core == _tx_thread_smp_protect_wait_list[_tx_thread_smp_protect_wait_list_head])
- // {
-
- LDR x3, =_tx_thread_smp_protect_wait_list_head // Get the address of the head
- LDR w3, [x3] // Get the value of the head
- LDR x4, =_tx_thread_smp_protect_wait_list // Get the address of the list
- LDR w4, [x4, x3, LSL #2] // Get the value at the head index
-
- CMP w1, w4
- BNE _start_waiting
-
- /* Is the lock still available? */
- // if (_tx_thread_smp_protection.tx_thread_smp_protect_in_force == 0)
- // {
-
- LDAXR w3, [x2, #0] // Pickup the protection flag
- CMP w3, #0
- BNE _start_waiting // No, protection not available
-
- /* Get the lock. */
- // _tx_thread_smp_protection.tx_thread_smp_protect_in_force = 1;
-
- MOV w3, #1 // Build lock value
- STXR w4, w3, [x2, #0] // Attempt to get the protection
- CMP w4, #0
- BNE _start_waiting // Did it fail?
- DMB ISH //
-
- /* Got the lock. */
- // _tx_thread_smp_protect_lock_got();
-
- _tx_thread_smp_protect_lock_got
-
- /* Remove this core from the wait list. */
- // _tx_thread_smp_protect_remove_from_front_of_list();
-
- _tx_thread_smp_protect_remove_from_front_of_list
-
- B _return
-
-_start_waiting:
-
- /* For one reason or another, we didn't get the lock. */
-
- /* Increment wait count. */
- // _tx_thread_smp_protect_wait_counts[this_core]++;
-
- LDR x3, =_tx_thread_smp_protect_wait_counts // Load wait list counts
- LDR w4, [x3, x1, LSL #2] // Load waiting value for this core
- ADD w4, w4, #1 // Increment wait value
- STR w4, [x3, x1, LSL #2] // Store new wait value
-
- /* Have we not added ourselves to the list yet? */
- // if (_tx_thread_smp_protect_wait_counts[this_core] == 1)
- // {
-
- CMP w4, #1
- BNE _already_in_list0 // Is this core already waiting?
-
- /* Add ourselves to the list. */
- // _tx_thread_smp_protect_wait_list_add(this_core);
-
- _tx_thread_smp_protect_wait_list_add // Call macro to add ourselves to the list
-
- // }
-
-_already_in_list0:
-
- /* Restore interrupts. */
+ LDR x1, =_tx_thread_smp_protection // Build address to protection structure
+ LDR w3, [x1, #4] // Pickup the owning core
+ CMP w3, w2 // Is it this core?
+ BEQ _owned // Yes, the protection is already owned
+ LDAXR w4, [x1, #0] // Pickup the protection flag
+ CBZ w4, _get_protection // Yes, get the protection
MSR DAIF, x0 // Restore interrupts
ISB //
#ifdef TX_ENABLE_WFE
WFE // Go into standby
#endif
+ B _tx_thread_smp_protect // On waking, restart the protection attempt
- /* We do this until we have the lock. */
- // while (1)
- // {
-
-_try_to_get_lock:
-
- /* Disable interrupts so we don't get preempted. */
-
- MRS x0, DAIF // Pickup current interrupt posture
- MSR DAIFSet, 0x3 // Lockout interrupts
-
- /* Pickup the CPU ID. */
-
- MRS x1, MPIDR_EL1 // Pickup the core ID
-#ifdef TX_ARMV8_2
-#if TX_THREAD_SMP_CLUSTERS > 1
- UBFX x7, x1, #16, #8 // Isolate cluster ID
-#endif
- UBFX x1, x1, #8, #8 // Isolate core ID
-#else
-#if TX_THREAD_SMP_CLUSTERS > 1
- UBFX x7, x1, #8, #8 // Isolate cluster ID
-#endif
- UBFX x1, x1, #0, #8 // Isolate core ID
-#endif
-#if TX_THREAD_SMP_CLUSTERS > 1
- ADDS x1, x1, x7, LSL #2 // Calculate CPU ID
-#endif
-
- /* Do we already have protection? */
- // if (this_core == _tx_thread_smp_protection.tx_thread_smp_protect_core)
- // {
-
- LDR w3, [x2, #4] // Pickup the owning core
- CMP w3, w1 // Is it this core?
- BEQ _got_lock_after_waiting // Yes, the protection is already owned. This means
- // an ISR preempted us and got protection
-
- // }
-
- /* Are we at the front of the list? */
- // if (this_core == _tx_thread_smp_protect_wait_list[_tx_thread_smp_protect_wait_list_head])
- // {
-
- LDR x3, =_tx_thread_smp_protect_wait_list_head // Get the address of the head
- LDR w3, [x3] // Get the value of the head
- LDR x4, =_tx_thread_smp_protect_wait_list // Get the address of the list
- LDR w4, [x4, x3, LSL #2] // Get the value at the head index
-
- CMP w1, w4
- BNE _did_not_get_lock
-
- /* Is the lock still available? */
- // if (_tx_thread_smp_protection.tx_thread_smp_protect_in_force == 0)
- // {
-
- LDAXR w3, [x2, #0] // Pickup the protection flag
- CMP w3, #0
- BNE _did_not_get_lock // No, protection not available
-
- /* Get the lock. */
- // _tx_thread_smp_protection.tx_thread_smp_protect_in_force = 1;
-
- MOV w3, #1 // Build lock value
- STXR w4, w3, [x2, #0] // Attempt to get the protection
- CMP w4, #0
- BNE _did_not_get_lock // Did it fail?
- DMB ISH //
-
- /* Got the lock. */
- // _tx_thread_smp_protect_lock_got();
-
- _tx_thread_smp_protect_lock_got
-
- /* Remove this core from the wait list. */
- // _tx_thread_smp_protect_remove_from_front_of_list();
-
- _tx_thread_smp_protect_remove_from_front_of_list
-
- B _got_lock_after_waiting
-
-_did_not_get_lock:
-
- /* For one reason or another, we didn't get the lock. */
-
- /* Were we removed from the list? This can happen if we're a thread
- and we got preempted. */
- // if (_tx_thread_smp_protect_wait_counts[this_core] == 0)
- // {
-
- LDR x3, =_tx_thread_smp_protect_wait_counts // Load wait list counts
- LDR w4, [x3, x1, LSL #2] // Load waiting value for this core
- CMP w4, #0
- BNE _already_in_list1 // Is this core already in the list?
-
- /* Add ourselves to the list. */
- // _tx_thread_smp_protect_wait_list_add(this_core);
-
- _tx_thread_smp_protect_wait_list_add // Call macro to add ourselves to the list
-
- /* Our waiting count was also reset when we were preempted. Increment it again. */
- // _tx_thread_smp_protect_wait_counts[this_core]++;
-
- LDR x3, =_tx_thread_smp_protect_wait_counts // Load wait list counts
- LDR w4, [x3, x1, LSL #2] // Load waiting value for this core
- ADD w4, w4, #1 // Increment wait value
- STR w4, [x3, x1, LSL #2] // Store new wait value value
-
- // }
-
-_already_in_list1:
-
- /* Restore interrupts and try again. */
-
+_get_protection:
+ MOV x4, #1 // Build lock value
+ STXR w5, w4, [x1] // Attempt to get the protection
+ CBZ w5, _got_protection // Did it succeed? w5 = 0 means success!
MSR DAIF, x0 // Restore interrupts
- ISB //
-#ifdef TX_ENABLE_WFE
- WFE // Go into standby
-#endif
- B _try_to_get_lock // On waking, restart the protection attempt
-
-_got_lock_after_waiting:
-
- /* We're no longer waiting. */
- // _tx_thread_smp_protect_wait_counts[this_core]--;
-
- LDR x3, =_tx_thread_smp_protect_wait_counts // Load waiting list
- LDR w4, [x3, x1, LSL #2] // Load current wait value
- SUB w4, w4, #1 // Decrement wait value
- STR w4, [x3, x1, LSL #2] // Store new wait value value
-
- /* Restore registers and return. */
-
-_return:
-
+ B _tx_thread_smp_protect // Restart the protection attempt
+
+_got_protection:
+ DMB ISH //
+ STR w2, [x1, #4] // Save owning core
+_owned:
+ LDR w5, [x1, #8] // Pickup ownership count
+ ADD w5, w5, #1 // Increment ownership count
+ STR w5, [x1, #8] // Store ownership count
+ DMB ISH //
RET
diff --git a/ports_smp/cortex_a73_smp/gnu/src/tx_thread_smp_unprotect.S b/ports_smp/cortex_a73_smp/gnu/src/tx_thread_smp_unprotect.S
index a783cde6..f9d8692a 100644
--- a/ports_smp/cortex_a73_smp/gnu/src/tx_thread_smp_unprotect.S
+++ b/ports_smp/cortex_a73_smp/gnu/src/tx_thread_smp_unprotect.S
@@ -60,10 +60,13 @@
/* */
/* DATE NAME DESCRIPTION */
/* */
-/* 09-30-2020 William E. Lamie Initial Version 6.1 */
-/* 01-31-2022 Andres Mlinar Updated comments, */
-/* added ARMv8.2-A support, */
+/* 09-30-2020 William E. Lamie Initial Version 6.1 */
+/* 01-31-2022 Andres Mlinar Updated comments, */
+/* added ARMv8.2-A support, */
/* resulting in version 6.1.10 */
+/* 04-25-2022 William E. Lamie Modified comments, removed */
+/* FIFO queueing, */
+/* resulting in version 6.1.11 */
/* */
/**************************************************************************/
.global _tx_thread_smp_unprotect
@@ -105,11 +108,6 @@ _tx_thread_smp_unprotect:
CMP w3, #0 // Is the preempt disable flag set?
BNE _still_protected // Yes, skip the protection release
- LDR x2,=_tx_thread_smp_protect_wait_counts // Build build address of wait counts
- LDR w3, [x2, x1, LSL #2] // Pickup wait list value
- CMP w3, #0 // Are any entities on this core waiting?
- BNE _still_protected // Yes, skip the protection release
-
LDR x2,=_tx_thread_smp_protection // Build address of protection structure
MOV w3, #0xFFFFFFFF // Build invalid value
STR w3, [x2, #4] // Mark the protected core as invalid
diff --git a/ports_smp/cortex_a75_smp/ac6/example_build/sample_threadx/.cproject b/ports_smp/cortex_a75_smp/ac6/example_build/sample_threadx/.cproject
index 55342a4b..ee87537f 100644
--- a/ports_smp/cortex_a75_smp/ac6/example_build/sample_threadx/.cproject
+++ b/ports_smp/cortex_a75_smp/ac6/example_build/sample_threadx/.cproject
@@ -23,7 +23,7 @@
-
+
@@ -47,7 +47,7 @@
-
+
@@ -63,7 +63,7 @@
-
+
diff --git a/ports_smp/cortex_a75_smp/ac6/example_build/tx/.cproject b/ports_smp/cortex_a75_smp/ac6/example_build/tx/.cproject
index 75bd3a7c..53c6179f 100644
--- a/ports_smp/cortex_a75_smp/ac6/example_build/tx/.cproject
+++ b/ports_smp/cortex_a75_smp/ac6/example_build/tx/.cproject
@@ -49,7 +49,7 @@
-
+
@@ -129,11 +129,7 @@
-
-
-
-
-
+
diff --git a/ports_smp/cortex_a75_smp/ac6/src/tx_thread_context_restore.S b/ports_smp/cortex_a75_smp/ac6/src/tx_thread_context_restore.S
index 4df471ac..21deb75d 100644
--- a/ports_smp/cortex_a75_smp/ac6/src/tx_thread_context_restore.S
+++ b/ports_smp/cortex_a75_smp/ac6/src/tx_thread_context_restore.S
@@ -21,9 +21,6 @@
/**************************************************************************/
-/* Include macros for modifying the wait list. */
-#include "tx_thread_smp_protection_wait_list_macros.h"
-
.text
.align 3
/**************************************************************************/
@@ -31,7 +28,7 @@
/* FUNCTION RELEASE */
/* */
/* _tx_thread_context_restore ARMv8-A-SMP */
-/* 6.1.10 */
+/* 6.1.11 */
/* AUTHOR */
/* */
/* William E. Lamie, Microsoft Corporation */
@@ -63,10 +60,13 @@
/* */
/* DATE NAME DESCRIPTION */
/* */
-/* 09-30-2020 William E. Lamie Initial Version 6.1 */
-/* 01-31-2022 Andres Mlinar Updated comments, */
-/* added ARMv8.2-A support, */
+/* 09-30-2020 William E. Lamie Initial Version 6.1 */
+/* 01-31-2022 Andres Mlinar Updated comments, */
+/* added ARMv8.2-A support, */
/* resulting in version 6.1.10 */
+/* 04-25-2022 William E. Lamie Modified comments, removed */
+/* FIFO queueing, */
+/* resulting in version 6.1.11 */
/* */
/**************************************************************************/
// VOID _tx_thread_context_restore(VOID)
@@ -216,74 +216,6 @@ __tx_thread_no_preempt_restore:
// {
__tx_thread_preempt_restore:
- /* Was the thread being preempted waiting for the lock? */
- // if (_tx_thread_smp_protect_wait_counts[this_core] != 0)
- // {
-
- LDR x2, =_tx_thread_smp_protect_wait_counts // Load waiting count list
- LDR w3, [x2, x8, LSL #2] // Load waiting value for this core
- CMP w3, #0
- BEQ _nobody_waiting_for_lock // Is the core waiting for the lock?
-
- /* Do we not have the lock? This means the ISR never got the inter-core lock. */
- // if (_tx_thread_smp_protection.tx_thread_smp_protect_owned != this_core)
- // {
-
- LDR x2, =_tx_thread_smp_protection // Load address of protection structure
- LDR w3, [x2, #4] // Pickup the owning core
- CMP w8, w3 // Compare our core to the owning core
- BEQ _this_core_has_lock // Do we have the lock?
-
- /* We don't have the lock. This core should be in the list. Remove it. */
- // _tx_thread_smp_protect_wait_list_remove(this_core);
-
- _tx_thread_smp_protect_wait_list_remove // Call macro to remove core from the list
- B _nobody_waiting_for_lock // Leave
-
- // }
- // else
- // {
- /* We have the lock. This means the ISR got the inter-core lock, but
- never released it because it saw that there was someone waiting.
- Note this core is not in the list. */
-
-_this_core_has_lock:
-
- /* We're no longer waiting. Note that this should be zero since this happens during thread preemption. */
- // _tx_thread_smp_protect_wait_counts[core]--;
-
- LDR x2, =_tx_thread_smp_protect_wait_counts // Load waiting count list
- LDR w3, [x2, x8, LSL #2] // Load waiting value for this core
- SUB w3, w3, #1 // Decrement waiting value. Should be zero now
- STR w3, [x2, x8, LSL #2] // Store new waiting value
-
- /* Now release the inter-core lock. */
-
- /* Set protected core as invalid. */
- // _tx_thread_smp_protection.tx_thread_smp_protect_core = 0xFFFFFFFF;
-
- LDR x2, =_tx_thread_smp_protection // Load address of protection structure
- MOV w3, #0xFFFFFFFF // Build invalid value
- STR w3, [x2, #4] // Mark the protected core as invalid
- DMB ISH // Ensure that accesses to shared resource have completed
-
- /* Release protection. */
- // _tx_thread_smp_protection.tx_thread_smp_protect_in_force = 0;
-
- MOV w3, #0 // Build release protection value
- STR w3, [x2, #0] // Release the protection
- DSB ISH // To ensure update of the protection occurs before other CPUs awake
-
- /* Wake up waiting processors. Note interrupts are already enabled. */
-
-#ifdef TX_ENABLE_WFE
- SEV // Send event to other CPUs
-#endif
-
- // }
- // }
-
-_nobody_waiting_for_lock:
LDR x4, [x0, #8] // Switch to thread stack pointer
MOV sp, x4 //
diff --git a/ports_smp/cortex_a75_smp/ac6/src/tx_thread_smp_protect.S b/ports_smp/cortex_a75_smp/ac6/src/tx_thread_smp_protect.S
index 9cde3e08..b65af329 100644
--- a/ports_smp/cortex_a75_smp/ac6/src/tx_thread_smp_protect.S
+++ b/ports_smp/cortex_a75_smp/ac6/src/tx_thread_smp_protect.S
@@ -32,7 +32,7 @@
/* FUNCTION RELEASE */
/* */
/* _tx_thread_smp_protect Cortex-A35-SMP/AC6 */
-/* 6.1.10 */
+/* 6.1.11 */
/* AUTHOR */
/* */
/* William E. Lamie, Microsoft Corporation */
@@ -63,11 +63,14 @@
/* */
/* DATE NAME DESCRIPTION */
/* */
-/* 09-30-2020 William E. Lamie Initial Version 6.1 */
-/* 01-31-2022 Andres Mlinar Updated comments, */
-/* added ARMv8.2-A support, */
-/* improved SMP code, */
+/* 09-30-2020 William E. Lamie Initial Version 6.1 */
+/* 01-31-2022 Andres Mlinar Updated comments, */
+/* added ARMv8.2-A support, */
+/* improved SMP code, */
/* resulting in version 6.1.10 */
+/* 04-25-2022 William E. Lamie Modified comments, removed */
+/* FIFO queueing, */
+/* resulting in version 6.1.11 */
/* */
/**************************************************************************/
.global _tx_thread_smp_protect
@@ -81,293 +84,49 @@ _tx_thread_smp_protect:
/* Pickup the CPU ID. */
- MRS x1, MPIDR_EL1 // Pickup the core ID
+ MRS x2, MPIDR_EL1 // Pickup the core ID
#ifdef TX_ARMV8_2
#if TX_THREAD_SMP_CLUSTERS > 1
- UBFX x7, x1, #16, #8 // Isolate cluster ID
+ UBFX x7, x2, #16, #8 // Isolate cluster ID
#endif
- UBFX x1, x1, #8, #8 // Isolate core ID
+ UBFX x2, x2, #8, #8 // Isolate core ID
#else
#if TX_THREAD_SMP_CLUSTERS > 1
- UBFX x7, x1, #8, #8 // Isolate cluster ID
+ UBFX x7, x2, #8, #8 // Isolate cluster ID
#endif
- UBFX x1, x1, #0, #8 // Isolate core ID
+ UBFX x2, x2, #0, #8 // Isolate core ID
#endif
#if TX_THREAD_SMP_CLUSTERS > 1
- ADDS x1, x1, x7, LSL #2 // Calculate CPU ID
+ ADDS x2, x2, x7, LSL #2 // Calculate CPU ID
#endif
- /* Do we already have protection? */
- // if (this_core == _tx_thread_smp_protection.tx_thread_smp_protect_core)
- // {
-
- LDR x2, =_tx_thread_smp_protection // Build address to protection structure
- LDR w3, [x2, #4] // Pickup the owning core
- CMP w1, w3 // Is it not this core?
- BNE _protection_not_owned // No, the protection is not already owned
-
- /* We already have protection. */
-
- /* Increment the protection count. */
- // _tx_thread_smp_protection.tx_thread_smp_protect_count++;
-
- LDR w3, [x2, #8] // Pickup ownership count
- ADD w3, w3, #1 // Increment ownership count
- STR w3, [x2, #8] // Store ownership count
- DMB ISH
-
- B _return
-
-_protection_not_owned:
-
- /* Is the lock available? */
- // if (_tx_thread_smp_protection.tx_thread_smp_protect_in_force == 0)
- // {
-
- LDAXR w3, [x2, #0] // Pickup the protection flag
- CMP w3, #0
- BNE _start_waiting // No, protection not available
-
- /* Is the list empty? */
- // if (_tx_thread_smp_protect_wait_list_head == _tx_thread_smp_protect_wait_list_tail)
- // {
-
- LDR x3, =_tx_thread_smp_protect_wait_list_head
- LDR w3, [x3]
- LDR x4, =_tx_thread_smp_protect_wait_list_tail
- LDR w4, [x4]
- CMP w3, w4
- BNE _list_not_empty
-
- /* Try to get the lock. */
- // if (write_exclusive(&_tx_thread_smp_protection.tx_thread_smp_protect_in_force, 1) == SUCCESS)
- // {
-
- MOV w3, #1 // Build lock value
- STXR w4, w3, [x2, #0] // Attempt to get the protection
- CMP w4, #0
- BNE _start_waiting // Did it fail?
-
- /* We got the lock! */
- // _tx_thread_smp_protect_lock_got();
-
- DMB ISH // Ensure write to protection finishes
- _tx_thread_smp_protect_lock_got // Call the lock got function
-
- B _return
-
-_list_not_empty:
-
- /* Are we at the front of the list? */
- // if (this_core == _tx_thread_smp_protect_wait_list[_tx_thread_smp_protect_wait_list_head])
- // {
-
- LDR x3, =_tx_thread_smp_protect_wait_list_head // Get the address of the head
- LDR w3, [x3] // Get the value of the head
- LDR x4, =_tx_thread_smp_protect_wait_list // Get the address of the list
- LDR w4, [x4, x3, LSL #2] // Get the value at the head index
-
- CMP w1, w4
- BNE _start_waiting
-
- /* Is the lock still available? */
- // if (_tx_thread_smp_protection.tx_thread_smp_protect_in_force == 0)
- // {
-
- LDAXR w3, [x2, #0] // Pickup the protection flag
- CMP w3, #0
- BNE _start_waiting // No, protection not available
-
- /* Get the lock. */
- // _tx_thread_smp_protection.tx_thread_smp_protect_in_force = 1;
-
- MOV w3, #1 // Build lock value
- STXR w4, w3, [x2, #0] // Attempt to get the protection
- CMP w4, #0
- BNE _start_waiting // Did it fail?
- DMB ISH //
-
- /* Got the lock. */
- // _tx_thread_smp_protect_lock_got();
-
- _tx_thread_smp_protect_lock_got
-
- /* Remove this core from the wait list. */
- // _tx_thread_smp_protect_remove_from_front_of_list();
-
- _tx_thread_smp_protect_remove_from_front_of_list
-
- B _return
-
-_start_waiting:
-
- /* For one reason or another, we didn't get the lock. */
-
- /* Increment wait count. */
- // _tx_thread_smp_protect_wait_counts[this_core]++;
-
- LDR x3, =_tx_thread_smp_protect_wait_counts // Load wait list counts
- LDR w4, [x3, x1, LSL #2] // Load waiting value for this core
- ADD w4, w4, #1 // Increment wait value
- STR w4, [x3, x1, LSL #2] // Store new wait value
-
- /* Have we not added ourselves to the list yet? */
- // if (_tx_thread_smp_protect_wait_counts[this_core] == 1)
- // {
-
- CMP w4, #1
- BNE _already_in_list0 // Is this core already waiting?
-
- /* Add ourselves to the list. */
- // _tx_thread_smp_protect_wait_list_add(this_core);
-
- _tx_thread_smp_protect_wait_list_add // Call macro to add ourselves to the list
-
- // }
-
-_already_in_list0:
-
- /* Restore interrupts. */
+ LDR x1, =_tx_thread_smp_protection // Build address to protection structure
+ LDR w3, [x1, #4] // Pickup the owning core
+ CMP w3, w2 // Is it this core?
+ BEQ _owned // Yes, the protection is already owned
+ LDAXR w4, [x1, #0] // Pickup the protection flag
+ CBZ w4, _get_protection // Yes, get the protection
MSR DAIF, x0 // Restore interrupts
ISB //
#ifdef TX_ENABLE_WFE
WFE // Go into standby
#endif
+ B _tx_thread_smp_protect // On waking, restart the protection attempt
- /* We do this until we have the lock. */
- // while (1)
- // {
-
-_try_to_get_lock:
-
- /* Disable interrupts so we don't get preempted. */
-
- MRS x0, DAIF // Pickup current interrupt posture
- MSR DAIFSet, 0x3 // Lockout interrupts
-
- /* Pickup the CPU ID. */
-
- MRS x1, MPIDR_EL1 // Pickup the core ID
-#ifdef TX_ARMV8_2
-#if TX_THREAD_SMP_CLUSTERS > 1
- UBFX x7, x1, #16, #8 // Isolate cluster ID
-#endif
- UBFX x1, x1, #8, #8 // Isolate core ID
-#else
-#if TX_THREAD_SMP_CLUSTERS > 1
- UBFX x7, x1, #8, #8 // Isolate cluster ID
-#endif
- UBFX x1, x1, #0, #8 // Isolate core ID
-#endif
-#if TX_THREAD_SMP_CLUSTERS > 1
- ADDS x1, x1, x7, LSL #2 // Calculate CPU ID
-#endif
-
- /* Do we already have protection? */
- // if (this_core == _tx_thread_smp_protection.tx_thread_smp_protect_core)
- // {
-
- LDR w3, [x2, #4] // Pickup the owning core
- CMP w3, w1 // Is it this core?
- BEQ _got_lock_after_waiting // Yes, the protection is already owned. This means
- // an ISR preempted us and got protection
-
- // }
-
- /* Are we at the front of the list? */
- // if (this_core == _tx_thread_smp_protect_wait_list[_tx_thread_smp_protect_wait_list_head])
- // {
-
- LDR x3, =_tx_thread_smp_protect_wait_list_head // Get the address of the head
- LDR w3, [x3] // Get the value of the head
- LDR x4, =_tx_thread_smp_protect_wait_list // Get the address of the list
- LDR w4, [x4, x3, LSL #2] // Get the value at the head index
-
- CMP w1, w4
- BNE _did_not_get_lock
-
- /* Is the lock still available? */
- // if (_tx_thread_smp_protection.tx_thread_smp_protect_in_force == 0)
- // {
-
- LDAXR w3, [x2, #0] // Pickup the protection flag
- CMP w3, #0
- BNE _did_not_get_lock // No, protection not available
-
- /* Get the lock. */
- // _tx_thread_smp_protection.tx_thread_smp_protect_in_force = 1;
-
- MOV w3, #1 // Build lock value
- STXR w4, w3, [x2, #0] // Attempt to get the protection
- CMP w4, #0
- BNE _did_not_get_lock // Did it fail?
- DMB ISH //
-
- /* Got the lock. */
- // _tx_thread_smp_protect_lock_got();
-
- _tx_thread_smp_protect_lock_got
-
- /* Remove this core from the wait list. */
- // _tx_thread_smp_protect_remove_from_front_of_list();
-
- _tx_thread_smp_protect_remove_from_front_of_list
-
- B _got_lock_after_waiting
-
-_did_not_get_lock:
-
- /* For one reason or another, we didn't get the lock. */
-
- /* Were we removed from the list? This can happen if we're a thread
- and we got preempted. */
- // if (_tx_thread_smp_protect_wait_counts[this_core] == 0)
- // {
-
- LDR x3, =_tx_thread_smp_protect_wait_counts // Load wait list counts
- LDR w4, [x3, x1, LSL #2] // Load waiting value for this core
- CMP w4, #0
- BNE _already_in_list1 // Is this core already in the list?
-
- /* Add ourselves to the list. */
- // _tx_thread_smp_protect_wait_list_add(this_core);
-
- _tx_thread_smp_protect_wait_list_add // Call macro to add ourselves to the list
-
- /* Our waiting count was also reset when we were preempted. Increment it again. */
- // _tx_thread_smp_protect_wait_counts[this_core]++;
-
- LDR x3, =_tx_thread_smp_protect_wait_counts // Load wait list counts
- LDR w4, [x3, x1, LSL #2] // Load waiting value for this core
- ADD w4, w4, #1 // Increment wait value
- STR w4, [x3, x1, LSL #2] // Store new wait value value
-
- // }
-
-_already_in_list1:
-
- /* Restore interrupts and try again. */
-
+_get_protection:
+ MOV x4, #1 // Build lock value
+ STXR w5, w4, [x1] // Attempt to get the protection
+ CBZ w5, _got_protection // Did it succeed? w5 = 0 means success!
MSR DAIF, x0 // Restore interrupts
- ISB //
-#ifdef TX_ENABLE_WFE
- WFE // Go into standby
-#endif
- B _try_to_get_lock // On waking, restart the protection attempt
-
-_got_lock_after_waiting:
-
- /* We're no longer waiting. */
- // _tx_thread_smp_protect_wait_counts[this_core]--;
-
- LDR x3, =_tx_thread_smp_protect_wait_counts // Load waiting list
- LDR w4, [x3, x1, LSL #2] // Load current wait value
- SUB w4, w4, #1 // Decrement wait value
- STR w4, [x3, x1, LSL #2] // Store new wait value value
-
- /* Restore registers and return. */
-
-_return:
-
+ B _tx_thread_smp_protect // Restart the protection attempt
+
+_got_protection:
+ DMB ISH //
+ STR w2, [x1, #4] // Save owning core
+_owned:
+ LDR w5, [x1, #8] // Pickup ownership count
+ ADD w5, w5, #1 // Increment ownership count
+ STR w5, [x1, #8] // Store ownership count
+ DMB ISH //
RET
diff --git a/ports_smp/cortex_a75_smp/ac6/src/tx_thread_smp_unprotect.S b/ports_smp/cortex_a75_smp/ac6/src/tx_thread_smp_unprotect.S
index a783cde6..f9d8692a 100644
--- a/ports_smp/cortex_a75_smp/ac6/src/tx_thread_smp_unprotect.S
+++ b/ports_smp/cortex_a75_smp/ac6/src/tx_thread_smp_unprotect.S
@@ -60,10 +60,13 @@
/* */
/* DATE NAME DESCRIPTION */
/* */
-/* 09-30-2020 William E. Lamie Initial Version 6.1 */
-/* 01-31-2022 Andres Mlinar Updated comments, */
-/* added ARMv8.2-A support, */
+/* 09-30-2020 William E. Lamie Initial Version 6.1 */
+/* 01-31-2022 Andres Mlinar Updated comments, */
+/* added ARMv8.2-A support, */
/* resulting in version 6.1.10 */
+/* 04-25-2022 William E. Lamie Modified comments, removed */
+/* FIFO queueing, */
+/* resulting in version 6.1.11 */
/* */
/**************************************************************************/
.global _tx_thread_smp_unprotect
@@ -105,11 +108,6 @@ _tx_thread_smp_unprotect:
CMP w3, #0 // Is the preempt disable flag set?
BNE _still_protected // Yes, skip the protection release
- LDR x2,=_tx_thread_smp_protect_wait_counts // Build build address of wait counts
- LDR w3, [x2, x1, LSL #2] // Pickup wait list value
- CMP w3, #0 // Are any entities on this core waiting?
- BNE _still_protected // Yes, skip the protection release
-
LDR x2,=_tx_thread_smp_protection // Build address of protection structure
MOV w3, #0xFFFFFFFF // Build invalid value
STR w3, [x2, #4] // Mark the protected core as invalid
diff --git a/ports_smp/cortex_a75_smp/gnu/example_build/tx/.cproject b/ports_smp/cortex_a75_smp/gnu/example_build/tx/.cproject
index ec20edd2..3547a1d3 100644
--- a/ports_smp/cortex_a75_smp/gnu/example_build/tx/.cproject
+++ b/ports_smp/cortex_a75_smp/gnu/example_build/tx/.cproject
@@ -129,6 +129,12 @@
+
+
+
+
+
+
diff --git a/ports_smp/cortex_a75_smp/gnu/src/tx_thread_context_restore.S b/ports_smp/cortex_a75_smp/gnu/src/tx_thread_context_restore.S
index 4df471ac..21deb75d 100644
--- a/ports_smp/cortex_a75_smp/gnu/src/tx_thread_context_restore.S
+++ b/ports_smp/cortex_a75_smp/gnu/src/tx_thread_context_restore.S
@@ -21,9 +21,6 @@
/**************************************************************************/
-/* Include macros for modifying the wait list. */
-#include "tx_thread_smp_protection_wait_list_macros.h"
-
.text
.align 3
/**************************************************************************/
@@ -31,7 +28,7 @@
/* FUNCTION RELEASE */
/* */
/* _tx_thread_context_restore ARMv8-A-SMP */
-/* 6.1.10 */
+/* 6.1.11 */
/* AUTHOR */
/* */
/* William E. Lamie, Microsoft Corporation */
@@ -63,10 +60,13 @@
/* */
/* DATE NAME DESCRIPTION */
/* */
-/* 09-30-2020 William E. Lamie Initial Version 6.1 */
-/* 01-31-2022 Andres Mlinar Updated comments, */
-/* added ARMv8.2-A support, */
+/* 09-30-2020 William E. Lamie Initial Version 6.1 */
+/* 01-31-2022 Andres Mlinar Updated comments, */
+/* added ARMv8.2-A support, */
/* resulting in version 6.1.10 */
+/* 04-25-2022 William E. Lamie Modified comments, removed */
+/* FIFO queueing, */
+/* resulting in version 6.1.11 */
/* */
/**************************************************************************/
// VOID _tx_thread_context_restore(VOID)
@@ -216,74 +216,6 @@ __tx_thread_no_preempt_restore:
// {
__tx_thread_preempt_restore:
- /* Was the thread being preempted waiting for the lock? */
- // if (_tx_thread_smp_protect_wait_counts[this_core] != 0)
- // {
-
- LDR x2, =_tx_thread_smp_protect_wait_counts // Load waiting count list
- LDR w3, [x2, x8, LSL #2] // Load waiting value for this core
- CMP w3, #0
- BEQ _nobody_waiting_for_lock // Is the core waiting for the lock?
-
- /* Do we not have the lock? This means the ISR never got the inter-core lock. */
- // if (_tx_thread_smp_protection.tx_thread_smp_protect_owned != this_core)
- // {
-
- LDR x2, =_tx_thread_smp_protection // Load address of protection structure
- LDR w3, [x2, #4] // Pickup the owning core
- CMP w8, w3 // Compare our core to the owning core
- BEQ _this_core_has_lock // Do we have the lock?
-
- /* We don't have the lock. This core should be in the list. Remove it. */
- // _tx_thread_smp_protect_wait_list_remove(this_core);
-
- _tx_thread_smp_protect_wait_list_remove // Call macro to remove core from the list
- B _nobody_waiting_for_lock // Leave
-
- // }
- // else
- // {
- /* We have the lock. This means the ISR got the inter-core lock, but
- never released it because it saw that there was someone waiting.
- Note this core is not in the list. */
-
-_this_core_has_lock:
-
- /* We're no longer waiting. Note that this should be zero since this happens during thread preemption. */
- // _tx_thread_smp_protect_wait_counts[core]--;
-
- LDR x2, =_tx_thread_smp_protect_wait_counts // Load waiting count list
- LDR w3, [x2, x8, LSL #2] // Load waiting value for this core
- SUB w3, w3, #1 // Decrement waiting value. Should be zero now
- STR w3, [x2, x8, LSL #2] // Store new waiting value
-
- /* Now release the inter-core lock. */
-
- /* Set protected core as invalid. */
- // _tx_thread_smp_protection.tx_thread_smp_protect_core = 0xFFFFFFFF;
-
- LDR x2, =_tx_thread_smp_protection // Load address of protection structure
- MOV w3, #0xFFFFFFFF // Build invalid value
- STR w3, [x2, #4] // Mark the protected core as invalid
- DMB ISH // Ensure that accesses to shared resource have completed
-
- /* Release protection. */
- // _tx_thread_smp_protection.tx_thread_smp_protect_in_force = 0;
-
- MOV w3, #0 // Build release protection value
- STR w3, [x2, #0] // Release the protection
- DSB ISH // To ensure update of the protection occurs before other CPUs awake
-
- /* Wake up waiting processors. Note interrupts are already enabled. */
-
-#ifdef TX_ENABLE_WFE
- SEV // Send event to other CPUs
-#endif
-
- // }
- // }
-
-_nobody_waiting_for_lock:
LDR x4, [x0, #8] // Switch to thread stack pointer
MOV sp, x4 //
diff --git a/ports_smp/cortex_a75_smp/gnu/src/tx_thread_smp_protect.S b/ports_smp/cortex_a75_smp/gnu/src/tx_thread_smp_protect.S
index 9cde3e08..b65af329 100644
--- a/ports_smp/cortex_a75_smp/gnu/src/tx_thread_smp_protect.S
+++ b/ports_smp/cortex_a75_smp/gnu/src/tx_thread_smp_protect.S
@@ -32,7 +32,7 @@
/* FUNCTION RELEASE */
/* */
/* _tx_thread_smp_protect Cortex-A35-SMP/AC6 */
-/* 6.1.10 */
+/* 6.1.11 */
/* AUTHOR */
/* */
/* William E. Lamie, Microsoft Corporation */
@@ -63,11 +63,14 @@
/* */
/* DATE NAME DESCRIPTION */
/* */
-/* 09-30-2020 William E. Lamie Initial Version 6.1 */
-/* 01-31-2022 Andres Mlinar Updated comments, */
-/* added ARMv8.2-A support, */
-/* improved SMP code, */
+/* 09-30-2020 William E. Lamie Initial Version 6.1 */
+/* 01-31-2022 Andres Mlinar Updated comments, */
+/* added ARMv8.2-A support, */
+/* improved SMP code, */
/* resulting in version 6.1.10 */
+/* 04-25-2022 William E. Lamie Modified comments, removed */
+/* FIFO queueing, */
+/* resulting in version 6.1.11 */
/* */
/**************************************************************************/
.global _tx_thread_smp_protect
@@ -81,293 +84,49 @@ _tx_thread_smp_protect:
/* Pickup the CPU ID. */
- MRS x1, MPIDR_EL1 // Pickup the core ID
+ MRS x2, MPIDR_EL1 // Pickup the core ID
#ifdef TX_ARMV8_2
#if TX_THREAD_SMP_CLUSTERS > 1
- UBFX x7, x1, #16, #8 // Isolate cluster ID
+ UBFX x7, x2, #16, #8 // Isolate cluster ID
#endif
- UBFX x1, x1, #8, #8 // Isolate core ID
+ UBFX x2, x2, #8, #8 // Isolate core ID
#else
#if TX_THREAD_SMP_CLUSTERS > 1
- UBFX x7, x1, #8, #8 // Isolate cluster ID
+ UBFX x7, x2, #8, #8 // Isolate cluster ID
#endif
- UBFX x1, x1, #0, #8 // Isolate core ID
+ UBFX x2, x2, #0, #8 // Isolate core ID
#endif
#if TX_THREAD_SMP_CLUSTERS > 1
- ADDS x1, x1, x7, LSL #2 // Calculate CPU ID
+ ADDS x2, x2, x7, LSL #2 // Calculate CPU ID
#endif
- /* Do we already have protection? */
- // if (this_core == _tx_thread_smp_protection.tx_thread_smp_protect_core)
- // {
-
- LDR x2, =_tx_thread_smp_protection // Build address to protection structure
- LDR w3, [x2, #4] // Pickup the owning core
- CMP w1, w3 // Is it not this core?
- BNE _protection_not_owned // No, the protection is not already owned
-
- /* We already have protection. */
-
- /* Increment the protection count. */
- // _tx_thread_smp_protection.tx_thread_smp_protect_count++;
-
- LDR w3, [x2, #8] // Pickup ownership count
- ADD w3, w3, #1 // Increment ownership count
- STR w3, [x2, #8] // Store ownership count
- DMB ISH
-
- B _return
-
-_protection_not_owned:
-
- /* Is the lock available? */
- // if (_tx_thread_smp_protection.tx_thread_smp_protect_in_force == 0)
- // {
-
- LDAXR w3, [x2, #0] // Pickup the protection flag
- CMP w3, #0
- BNE _start_waiting // No, protection not available
-
- /* Is the list empty? */
- // if (_tx_thread_smp_protect_wait_list_head == _tx_thread_smp_protect_wait_list_tail)
- // {
-
- LDR x3, =_tx_thread_smp_protect_wait_list_head
- LDR w3, [x3]
- LDR x4, =_tx_thread_smp_protect_wait_list_tail
- LDR w4, [x4]
- CMP w3, w4
- BNE _list_not_empty
-
- /* Try to get the lock. */
- // if (write_exclusive(&_tx_thread_smp_protection.tx_thread_smp_protect_in_force, 1) == SUCCESS)
- // {
-
- MOV w3, #1 // Build lock value
- STXR w4, w3, [x2, #0] // Attempt to get the protection
- CMP w4, #0
- BNE _start_waiting // Did it fail?
-
- /* We got the lock! */
- // _tx_thread_smp_protect_lock_got();
-
- DMB ISH // Ensure write to protection finishes
- _tx_thread_smp_protect_lock_got // Call the lock got function
-
- B _return
-
-_list_not_empty:
-
- /* Are we at the front of the list? */
- // if (this_core == _tx_thread_smp_protect_wait_list[_tx_thread_smp_protect_wait_list_head])
- // {
-
- LDR x3, =_tx_thread_smp_protect_wait_list_head // Get the address of the head
- LDR w3, [x3] // Get the value of the head
- LDR x4, =_tx_thread_smp_protect_wait_list // Get the address of the list
- LDR w4, [x4, x3, LSL #2] // Get the value at the head index
-
- CMP w1, w4
- BNE _start_waiting
-
- /* Is the lock still available? */
- // if (_tx_thread_smp_protection.tx_thread_smp_protect_in_force == 0)
- // {
-
- LDAXR w3, [x2, #0] // Pickup the protection flag
- CMP w3, #0
- BNE _start_waiting // No, protection not available
-
- /* Get the lock. */
- // _tx_thread_smp_protection.tx_thread_smp_protect_in_force = 1;
-
- MOV w3, #1 // Build lock value
- STXR w4, w3, [x2, #0] // Attempt to get the protection
- CMP w4, #0
- BNE _start_waiting // Did it fail?
- DMB ISH //
-
- /* Got the lock. */
- // _tx_thread_smp_protect_lock_got();
-
- _tx_thread_smp_protect_lock_got
-
- /* Remove this core from the wait list. */
- // _tx_thread_smp_protect_remove_from_front_of_list();
-
- _tx_thread_smp_protect_remove_from_front_of_list
-
- B _return
-
-_start_waiting:
-
- /* For one reason or another, we didn't get the lock. */
-
- /* Increment wait count. */
- // _tx_thread_smp_protect_wait_counts[this_core]++;
-
- LDR x3, =_tx_thread_smp_protect_wait_counts // Load wait list counts
- LDR w4, [x3, x1, LSL #2] // Load waiting value for this core
- ADD w4, w4, #1 // Increment wait value
- STR w4, [x3, x1, LSL #2] // Store new wait value
-
- /* Have we not added ourselves to the list yet? */
- // if (_tx_thread_smp_protect_wait_counts[this_core] == 1)
- // {
-
- CMP w4, #1
- BNE _already_in_list0 // Is this core already waiting?
-
- /* Add ourselves to the list. */
- // _tx_thread_smp_protect_wait_list_add(this_core);
-
- _tx_thread_smp_protect_wait_list_add // Call macro to add ourselves to the list
-
- // }
-
-_already_in_list0:
-
- /* Restore interrupts. */
+ LDR x1, =_tx_thread_smp_protection // Build address to protection structure
+ LDR w3, [x1, #4] // Pickup the owning core
+ CMP w3, w2 // Is it this core?
+ BEQ _owned // Yes, the protection is already owned
+ LDAXR w4, [x1, #0] // Pickup the protection flag
+ CBZ w4, _get_protection // Yes, get the protection
MSR DAIF, x0 // Restore interrupts
ISB //
#ifdef TX_ENABLE_WFE
WFE // Go into standby
#endif
+ B _tx_thread_smp_protect // On waking, restart the protection attempt
- /* We do this until we have the lock. */
- // while (1)
- // {
-
-_try_to_get_lock:
-
- /* Disable interrupts so we don't get preempted. */
-
- MRS x0, DAIF // Pickup current interrupt posture
- MSR DAIFSet, 0x3 // Lockout interrupts
-
- /* Pickup the CPU ID. */
-
- MRS x1, MPIDR_EL1 // Pickup the core ID
-#ifdef TX_ARMV8_2
-#if TX_THREAD_SMP_CLUSTERS > 1
- UBFX x7, x1, #16, #8 // Isolate cluster ID
-#endif
- UBFX x1, x1, #8, #8 // Isolate core ID
-#else
-#if TX_THREAD_SMP_CLUSTERS > 1
- UBFX x7, x1, #8, #8 // Isolate cluster ID
-#endif
- UBFX x1, x1, #0, #8 // Isolate core ID
-#endif
-#if TX_THREAD_SMP_CLUSTERS > 1
- ADDS x1, x1, x7, LSL #2 // Calculate CPU ID
-#endif
-
- /* Do we already have protection? */
- // if (this_core == _tx_thread_smp_protection.tx_thread_smp_protect_core)
- // {
-
- LDR w3, [x2, #4] // Pickup the owning core
- CMP w3, w1 // Is it this core?
- BEQ _got_lock_after_waiting // Yes, the protection is already owned. This means
- // an ISR preempted us and got protection
-
- // }
-
- /* Are we at the front of the list? */
- // if (this_core == _tx_thread_smp_protect_wait_list[_tx_thread_smp_protect_wait_list_head])
- // {
-
- LDR x3, =_tx_thread_smp_protect_wait_list_head // Get the address of the head
- LDR w3, [x3] // Get the value of the head
- LDR x4, =_tx_thread_smp_protect_wait_list // Get the address of the list
- LDR w4, [x4, x3, LSL #2] // Get the value at the head index
-
- CMP w1, w4
- BNE _did_not_get_lock
-
- /* Is the lock still available? */
- // if (_tx_thread_smp_protection.tx_thread_smp_protect_in_force == 0)
- // {
-
- LDAXR w3, [x2, #0] // Pickup the protection flag
- CMP w3, #0
- BNE _did_not_get_lock // No, protection not available
-
- /* Get the lock. */
- // _tx_thread_smp_protection.tx_thread_smp_protect_in_force = 1;
-
- MOV w3, #1 // Build lock value
- STXR w4, w3, [x2, #0] // Attempt to get the protection
- CMP w4, #0
- BNE _did_not_get_lock // Did it fail?
- DMB ISH //
-
- /* Got the lock. */
- // _tx_thread_smp_protect_lock_got();
-
- _tx_thread_smp_protect_lock_got
-
- /* Remove this core from the wait list. */
- // _tx_thread_smp_protect_remove_from_front_of_list();
-
- _tx_thread_smp_protect_remove_from_front_of_list
-
- B _got_lock_after_waiting
-
-_did_not_get_lock:
-
- /* For one reason or another, we didn't get the lock. */
-
- /* Were we removed from the list? This can happen if we're a thread
- and we got preempted. */
- // if (_tx_thread_smp_protect_wait_counts[this_core] == 0)
- // {
-
- LDR x3, =_tx_thread_smp_protect_wait_counts // Load wait list counts
- LDR w4, [x3, x1, LSL #2] // Load waiting value for this core
- CMP w4, #0
- BNE _already_in_list1 // Is this core already in the list?
-
- /* Add ourselves to the list. */
- // _tx_thread_smp_protect_wait_list_add(this_core);
-
- _tx_thread_smp_protect_wait_list_add // Call macro to add ourselves to the list
-
- /* Our waiting count was also reset when we were preempted. Increment it again. */
- // _tx_thread_smp_protect_wait_counts[this_core]++;
-
- LDR x3, =_tx_thread_smp_protect_wait_counts // Load wait list counts
- LDR w4, [x3, x1, LSL #2] // Load waiting value for this core
- ADD w4, w4, #1 // Increment wait value
- STR w4, [x3, x1, LSL #2] // Store new wait value value
-
- // }
-
-_already_in_list1:
-
- /* Restore interrupts and try again. */
-
+_get_protection:
+ MOV x4, #1 // Build lock value
+ STXR w5, w4, [x1] // Attempt to get the protection
+ CBZ w5, _got_protection // Did it succeed? w5 = 0 means success!
MSR DAIF, x0 // Restore interrupts
- ISB //
-#ifdef TX_ENABLE_WFE
- WFE // Go into standby
-#endif
- B _try_to_get_lock // On waking, restart the protection attempt
-
-_got_lock_after_waiting:
-
- /* We're no longer waiting. */
- // _tx_thread_smp_protect_wait_counts[this_core]--;
-
- LDR x3, =_tx_thread_smp_protect_wait_counts // Load waiting list
- LDR w4, [x3, x1, LSL #2] // Load current wait value
- SUB w4, w4, #1 // Decrement wait value
- STR w4, [x3, x1, LSL #2] // Store new wait value value
-
- /* Restore registers and return. */
-
-_return:
-
+ B _tx_thread_smp_protect // Restart the protection attempt
+
+_got_protection:
+ DMB ISH //
+ STR w2, [x1, #4] // Save owning core
+_owned:
+ LDR w5, [x1, #8] // Pickup ownership count
+ ADD w5, w5, #1 // Increment ownership count
+ STR w5, [x1, #8] // Store ownership count
+ DMB ISH //
RET
diff --git a/ports_smp/cortex_a75_smp/gnu/src/tx_thread_smp_unprotect.S b/ports_smp/cortex_a75_smp/gnu/src/tx_thread_smp_unprotect.S
index a783cde6..f9d8692a 100644
--- a/ports_smp/cortex_a75_smp/gnu/src/tx_thread_smp_unprotect.S
+++ b/ports_smp/cortex_a75_smp/gnu/src/tx_thread_smp_unprotect.S
@@ -60,10 +60,13 @@
/* */
/* DATE NAME DESCRIPTION */
/* */
-/* 09-30-2020 William E. Lamie Initial Version 6.1 */
-/* 01-31-2022 Andres Mlinar Updated comments, */
-/* added ARMv8.2-A support, */
+/* 09-30-2020 William E. Lamie Initial Version 6.1 */
+/* 01-31-2022 Andres Mlinar Updated comments, */
+/* added ARMv8.2-A support, */
/* resulting in version 6.1.10 */
+/* 04-25-2022 William E. Lamie Modified comments, removed */
+/* FIFO queueing, */
+/* resulting in version 6.1.11 */
/* */
/**************************************************************************/
.global _tx_thread_smp_unprotect
@@ -105,11 +108,6 @@ _tx_thread_smp_unprotect:
CMP w3, #0 // Is the preempt disable flag set?
BNE _still_protected // Yes, skip the protection release
- LDR x2,=_tx_thread_smp_protect_wait_counts // Build build address of wait counts
- LDR w3, [x2, x1, LSL #2] // Pickup wait list value
- CMP w3, #0 // Are any entities on this core waiting?
- BNE _still_protected // Yes, skip the protection release
-
LDR x2,=_tx_thread_smp_protection // Build address of protection structure
MOV w3, #0xFFFFFFFF // Build invalid value
STR w3, [x2, #4] // Mark the protected core as invalid
diff --git a/ports_smp/cortex_a76_smp/ac6/example_build/sample_threadx/.cproject b/ports_smp/cortex_a76_smp/ac6/example_build/sample_threadx/.cproject
index 590d7745..c896ea80 100644
--- a/ports_smp/cortex_a76_smp/ac6/example_build/sample_threadx/.cproject
+++ b/ports_smp/cortex_a76_smp/ac6/example_build/sample_threadx/.cproject
@@ -23,7 +23,7 @@
-
+
@@ -47,7 +47,7 @@
-
+
@@ -63,7 +63,7 @@
-
+
diff --git a/ports_smp/cortex_a76_smp/ac6/example_build/tx/.cproject b/ports_smp/cortex_a76_smp/ac6/example_build/tx/.cproject
index 33721eb4..7e346fb2 100644
--- a/ports_smp/cortex_a76_smp/ac6/example_build/tx/.cproject
+++ b/ports_smp/cortex_a76_smp/ac6/example_build/tx/.cproject
@@ -49,7 +49,7 @@
-
+
@@ -129,11 +129,7 @@
-
-
-
-
-
+
diff --git a/ports_smp/cortex_a76_smp/ac6/src/tx_thread_context_restore.S b/ports_smp/cortex_a76_smp/ac6/src/tx_thread_context_restore.S
index 4df471ac..21deb75d 100644
--- a/ports_smp/cortex_a76_smp/ac6/src/tx_thread_context_restore.S
+++ b/ports_smp/cortex_a76_smp/ac6/src/tx_thread_context_restore.S
@@ -21,9 +21,6 @@
/**************************************************************************/
-/* Include macros for modifying the wait list. */
-#include "tx_thread_smp_protection_wait_list_macros.h"
-
.text
.align 3
/**************************************************************************/
@@ -31,7 +28,7 @@
/* FUNCTION RELEASE */
/* */
/* _tx_thread_context_restore ARMv8-A-SMP */
-/* 6.1.10 */
+/* 6.1.11 */
/* AUTHOR */
/* */
/* William E. Lamie, Microsoft Corporation */
@@ -63,10 +60,13 @@
/* */
/* DATE NAME DESCRIPTION */
/* */
-/* 09-30-2020 William E. Lamie Initial Version 6.1 */
-/* 01-31-2022 Andres Mlinar Updated comments, */
-/* added ARMv8.2-A support, */
+/* 09-30-2020 William E. Lamie Initial Version 6.1 */
+/* 01-31-2022 Andres Mlinar Updated comments, */
+/* added ARMv8.2-A support, */
/* resulting in version 6.1.10 */
+/* 04-25-2022 William E. Lamie Modified comments, removed */
+/* FIFO queueing, */
+/* resulting in version 6.1.11 */
/* */
/**************************************************************************/
// VOID _tx_thread_context_restore(VOID)
@@ -216,74 +216,6 @@ __tx_thread_no_preempt_restore:
// {
__tx_thread_preempt_restore:
- /* Was the thread being preempted waiting for the lock? */
- // if (_tx_thread_smp_protect_wait_counts[this_core] != 0)
- // {
-
- LDR x2, =_tx_thread_smp_protect_wait_counts // Load waiting count list
- LDR w3, [x2, x8, LSL #2] // Load waiting value for this core
- CMP w3, #0
- BEQ _nobody_waiting_for_lock // Is the core waiting for the lock?
-
- /* Do we not have the lock? This means the ISR never got the inter-core lock. */
- // if (_tx_thread_smp_protection.tx_thread_smp_protect_owned != this_core)
- // {
-
- LDR x2, =_tx_thread_smp_protection // Load address of protection structure
- LDR w3, [x2, #4] // Pickup the owning core
- CMP w8, w3 // Compare our core to the owning core
- BEQ _this_core_has_lock // Do we have the lock?
-
- /* We don't have the lock. This core should be in the list. Remove it. */
- // _tx_thread_smp_protect_wait_list_remove(this_core);
-
- _tx_thread_smp_protect_wait_list_remove // Call macro to remove core from the list
- B _nobody_waiting_for_lock // Leave
-
- // }
- // else
- // {
- /* We have the lock. This means the ISR got the inter-core lock, but
- never released it because it saw that there was someone waiting.
- Note this core is not in the list. */
-
-_this_core_has_lock:
-
- /* We're no longer waiting. Note that this should be zero since this happens during thread preemption. */
- // _tx_thread_smp_protect_wait_counts[core]--;
-
- LDR x2, =_tx_thread_smp_protect_wait_counts // Load waiting count list
- LDR w3, [x2, x8, LSL #2] // Load waiting value for this core
- SUB w3, w3, #1 // Decrement waiting value. Should be zero now
- STR w3, [x2, x8, LSL #2] // Store new waiting value
-
- /* Now release the inter-core lock. */
-
- /* Set protected core as invalid. */
- // _tx_thread_smp_protection.tx_thread_smp_protect_core = 0xFFFFFFFF;
-
- LDR x2, =_tx_thread_smp_protection // Load address of protection structure
- MOV w3, #0xFFFFFFFF // Build invalid value
- STR w3, [x2, #4] // Mark the protected core as invalid
- DMB ISH // Ensure that accesses to shared resource have completed
-
- /* Release protection. */
- // _tx_thread_smp_protection.tx_thread_smp_protect_in_force = 0;
-
- MOV w3, #0 // Build release protection value
- STR w3, [x2, #0] // Release the protection
- DSB ISH // To ensure update of the protection occurs before other CPUs awake
-
- /* Wake up waiting processors. Note interrupts are already enabled. */
-
-#ifdef TX_ENABLE_WFE
- SEV // Send event to other CPUs
-#endif
-
- // }
- // }
-
-_nobody_waiting_for_lock:
LDR x4, [x0, #8] // Switch to thread stack pointer
MOV sp, x4 //
diff --git a/ports_smp/cortex_a76_smp/ac6/src/tx_thread_smp_protect.S b/ports_smp/cortex_a76_smp/ac6/src/tx_thread_smp_protect.S
index 9cde3e08..b65af329 100644
--- a/ports_smp/cortex_a76_smp/ac6/src/tx_thread_smp_protect.S
+++ b/ports_smp/cortex_a76_smp/ac6/src/tx_thread_smp_protect.S
@@ -32,7 +32,7 @@
/* FUNCTION RELEASE */
/* */
/* _tx_thread_smp_protect Cortex-A35-SMP/AC6 */
-/* 6.1.10 */
+/* 6.1.11 */
/* AUTHOR */
/* */
/* William E. Lamie, Microsoft Corporation */
@@ -63,11 +63,14 @@
/* */
/* DATE NAME DESCRIPTION */
/* */
-/* 09-30-2020 William E. Lamie Initial Version 6.1 */
-/* 01-31-2022 Andres Mlinar Updated comments, */
-/* added ARMv8.2-A support, */
-/* improved SMP code, */
+/* 09-30-2020 William E. Lamie Initial Version 6.1 */
+/* 01-31-2022 Andres Mlinar Updated comments, */
+/* added ARMv8.2-A support, */
+/* improved SMP code, */
/* resulting in version 6.1.10 */
+/* 04-25-2022 William E. Lamie Modified comments, removed */
+/* FIFO queueing, */
+/* resulting in version 6.1.11 */
/* */
/**************************************************************************/
.global _tx_thread_smp_protect
@@ -81,293 +84,49 @@ _tx_thread_smp_protect:
/* Pickup the CPU ID. */
- MRS x1, MPIDR_EL1 // Pickup the core ID
+ MRS x2, MPIDR_EL1 // Pickup the core ID
#ifdef TX_ARMV8_2
#if TX_THREAD_SMP_CLUSTERS > 1
- UBFX x7, x1, #16, #8 // Isolate cluster ID
+ UBFX x7, x2, #16, #8 // Isolate cluster ID
#endif
- UBFX x1, x1, #8, #8 // Isolate core ID
+ UBFX x2, x2, #8, #8 // Isolate core ID
#else
#if TX_THREAD_SMP_CLUSTERS > 1
- UBFX x7, x1, #8, #8 // Isolate cluster ID
+ UBFX x7, x2, #8, #8 // Isolate cluster ID
#endif
- UBFX x1, x1, #0, #8 // Isolate core ID
+ UBFX x2, x2, #0, #8 // Isolate core ID
#endif
#if TX_THREAD_SMP_CLUSTERS > 1
- ADDS x1, x1, x7, LSL #2 // Calculate CPU ID
+ ADDS x2, x2, x7, LSL #2 // Calculate CPU ID
#endif
- /* Do we already have protection? */
- // if (this_core == _tx_thread_smp_protection.tx_thread_smp_protect_core)
- // {
-
- LDR x2, =_tx_thread_smp_protection // Build address to protection structure
- LDR w3, [x2, #4] // Pickup the owning core
- CMP w1, w3 // Is it not this core?
- BNE _protection_not_owned // No, the protection is not already owned
-
- /* We already have protection. */
-
- /* Increment the protection count. */
- // _tx_thread_smp_protection.tx_thread_smp_protect_count++;
-
- LDR w3, [x2, #8] // Pickup ownership count
- ADD w3, w3, #1 // Increment ownership count
- STR w3, [x2, #8] // Store ownership count
- DMB ISH
-
- B _return
-
-_protection_not_owned:
-
- /* Is the lock available? */
- // if (_tx_thread_smp_protection.tx_thread_smp_protect_in_force == 0)
- // {
-
- LDAXR w3, [x2, #0] // Pickup the protection flag
- CMP w3, #0
- BNE _start_waiting // No, protection not available
-
- /* Is the list empty? */
- // if (_tx_thread_smp_protect_wait_list_head == _tx_thread_smp_protect_wait_list_tail)
- // {
-
- LDR x3, =_tx_thread_smp_protect_wait_list_head
- LDR w3, [x3]
- LDR x4, =_tx_thread_smp_protect_wait_list_tail
- LDR w4, [x4]
- CMP w3, w4
- BNE _list_not_empty
-
- /* Try to get the lock. */
- // if (write_exclusive(&_tx_thread_smp_protection.tx_thread_smp_protect_in_force, 1) == SUCCESS)
- // {
-
- MOV w3, #1 // Build lock value
- STXR w4, w3, [x2, #0] // Attempt to get the protection
- CMP w4, #0
- BNE _start_waiting // Did it fail?
-
- /* We got the lock! */
- // _tx_thread_smp_protect_lock_got();
-
- DMB ISH // Ensure write to protection finishes
- _tx_thread_smp_protect_lock_got // Call the lock got function
-
- B _return
-
-_list_not_empty:
-
- /* Are we at the front of the list? */
- // if (this_core == _tx_thread_smp_protect_wait_list[_tx_thread_smp_protect_wait_list_head])
- // {
-
- LDR x3, =_tx_thread_smp_protect_wait_list_head // Get the address of the head
- LDR w3, [x3] // Get the value of the head
- LDR x4, =_tx_thread_smp_protect_wait_list // Get the address of the list
- LDR w4, [x4, x3, LSL #2] // Get the value at the head index
-
- CMP w1, w4
- BNE _start_waiting
-
- /* Is the lock still available? */
- // if (_tx_thread_smp_protection.tx_thread_smp_protect_in_force == 0)
- // {
-
- LDAXR w3, [x2, #0] // Pickup the protection flag
- CMP w3, #0
- BNE _start_waiting // No, protection not available
-
- /* Get the lock. */
- // _tx_thread_smp_protection.tx_thread_smp_protect_in_force = 1;
-
- MOV w3, #1 // Build lock value
- STXR w4, w3, [x2, #0] // Attempt to get the protection
- CMP w4, #0
- BNE _start_waiting // Did it fail?
- DMB ISH //
-
- /* Got the lock. */
- // _tx_thread_smp_protect_lock_got();
-
- _tx_thread_smp_protect_lock_got
-
- /* Remove this core from the wait list. */
- // _tx_thread_smp_protect_remove_from_front_of_list();
-
- _tx_thread_smp_protect_remove_from_front_of_list
-
- B _return
-
-_start_waiting:
-
- /* For one reason or another, we didn't get the lock. */
-
- /* Increment wait count. */
- // _tx_thread_smp_protect_wait_counts[this_core]++;
-
- LDR x3, =_tx_thread_smp_protect_wait_counts // Load wait list counts
- LDR w4, [x3, x1, LSL #2] // Load waiting value for this core
- ADD w4, w4, #1 // Increment wait value
- STR w4, [x3, x1, LSL #2] // Store new wait value
-
- /* Have we not added ourselves to the list yet? */
- // if (_tx_thread_smp_protect_wait_counts[this_core] == 1)
- // {
-
- CMP w4, #1
- BNE _already_in_list0 // Is this core already waiting?
-
- /* Add ourselves to the list. */
- // _tx_thread_smp_protect_wait_list_add(this_core);
-
- _tx_thread_smp_protect_wait_list_add // Call macro to add ourselves to the list
-
- // }
-
-_already_in_list0:
-
- /* Restore interrupts. */
+ LDR x1, =_tx_thread_smp_protection // Build address to protection structure
+ LDR w3, [x1, #4] // Pickup the owning core
+ CMP w3, w2 // Is it this core?
+ BEQ _owned // Yes, the protection is already owned
+ LDAXR w4, [x1, #0] // Pickup the protection flag
+ CBZ w4, _get_protection // Yes, get the protection
MSR DAIF, x0 // Restore interrupts
ISB //
#ifdef TX_ENABLE_WFE
WFE // Go into standby
#endif
+ B _tx_thread_smp_protect // On waking, restart the protection attempt
- /* We do this until we have the lock. */
- // while (1)
- // {
-
-_try_to_get_lock:
-
- /* Disable interrupts so we don't get preempted. */
-
- MRS x0, DAIF // Pickup current interrupt posture
- MSR DAIFSet, 0x3 // Lockout interrupts
-
- /* Pickup the CPU ID. */
-
- MRS x1, MPIDR_EL1 // Pickup the core ID
-#ifdef TX_ARMV8_2
-#if TX_THREAD_SMP_CLUSTERS > 1
- UBFX x7, x1, #16, #8 // Isolate cluster ID
-#endif
- UBFX x1, x1, #8, #8 // Isolate core ID
-#else
-#if TX_THREAD_SMP_CLUSTERS > 1
- UBFX x7, x1, #8, #8 // Isolate cluster ID
-#endif
- UBFX x1, x1, #0, #8 // Isolate core ID
-#endif
-#if TX_THREAD_SMP_CLUSTERS > 1
- ADDS x1, x1, x7, LSL #2 // Calculate CPU ID
-#endif
-
- /* Do we already have protection? */
- // if (this_core == _tx_thread_smp_protection.tx_thread_smp_protect_core)
- // {
-
- LDR w3, [x2, #4] // Pickup the owning core
- CMP w3, w1 // Is it this core?
- BEQ _got_lock_after_waiting // Yes, the protection is already owned. This means
- // an ISR preempted us and got protection
-
- // }
-
- /* Are we at the front of the list? */
- // if (this_core == _tx_thread_smp_protect_wait_list[_tx_thread_smp_protect_wait_list_head])
- // {
-
- LDR x3, =_tx_thread_smp_protect_wait_list_head // Get the address of the head
- LDR w3, [x3] // Get the value of the head
- LDR x4, =_tx_thread_smp_protect_wait_list // Get the address of the list
- LDR w4, [x4, x3, LSL #2] // Get the value at the head index
-
- CMP w1, w4
- BNE _did_not_get_lock
-
- /* Is the lock still available? */
- // if (_tx_thread_smp_protection.tx_thread_smp_protect_in_force == 0)
- // {
-
- LDAXR w3, [x2, #0] // Pickup the protection flag
- CMP w3, #0
- BNE _did_not_get_lock // No, protection not available
-
- /* Get the lock. */
- // _tx_thread_smp_protection.tx_thread_smp_protect_in_force = 1;
-
- MOV w3, #1 // Build lock value
- STXR w4, w3, [x2, #0] // Attempt to get the protection
- CMP w4, #0
- BNE _did_not_get_lock // Did it fail?
- DMB ISH //
-
- /* Got the lock. */
- // _tx_thread_smp_protect_lock_got();
-
- _tx_thread_smp_protect_lock_got
-
- /* Remove this core from the wait list. */
- // _tx_thread_smp_protect_remove_from_front_of_list();
-
- _tx_thread_smp_protect_remove_from_front_of_list
-
- B _got_lock_after_waiting
-
-_did_not_get_lock:
-
- /* For one reason or another, we didn't get the lock. */
-
- /* Were we removed from the list? This can happen if we're a thread
- and we got preempted. */
- // if (_tx_thread_smp_protect_wait_counts[this_core] == 0)
- // {
-
- LDR x3, =_tx_thread_smp_protect_wait_counts // Load wait list counts
- LDR w4, [x3, x1, LSL #2] // Load waiting value for this core
- CMP w4, #0
- BNE _already_in_list1 // Is this core already in the list?
-
- /* Add ourselves to the list. */
- // _tx_thread_smp_protect_wait_list_add(this_core);
-
- _tx_thread_smp_protect_wait_list_add // Call macro to add ourselves to the list
-
- /* Our waiting count was also reset when we were preempted. Increment it again. */
- // _tx_thread_smp_protect_wait_counts[this_core]++;
-
- LDR x3, =_tx_thread_smp_protect_wait_counts // Load wait list counts
- LDR w4, [x3, x1, LSL #2] // Load waiting value for this core
- ADD w4, w4, #1 // Increment wait value
- STR w4, [x3, x1, LSL #2] // Store new wait value value
-
- // }
-
-_already_in_list1:
-
- /* Restore interrupts and try again. */
-
+_get_protection:
+ MOV x4, #1 // Build lock value
+ STXR w5, w4, [x1] // Attempt to get the protection
+ CBZ w5, _got_protection // Did it succeed? w5 = 0 means success!
MSR DAIF, x0 // Restore interrupts
- ISB //
-#ifdef TX_ENABLE_WFE
- WFE // Go into standby
-#endif
- B _try_to_get_lock // On waking, restart the protection attempt
-
-_got_lock_after_waiting:
-
- /* We're no longer waiting. */
- // _tx_thread_smp_protect_wait_counts[this_core]--;
-
- LDR x3, =_tx_thread_smp_protect_wait_counts // Load waiting list
- LDR w4, [x3, x1, LSL #2] // Load current wait value
- SUB w4, w4, #1 // Decrement wait value
- STR w4, [x3, x1, LSL #2] // Store new wait value value
-
- /* Restore registers and return. */
-
-_return:
-
+ B _tx_thread_smp_protect // Restart the protection attempt
+
+_got_protection:
+ DMB ISH //
+ STR w2, [x1, #4] // Save owning core
+_owned:
+ LDR w5, [x1, #8] // Pickup ownership count
+ ADD w5, w5, #1 // Increment ownership count
+ STR w5, [x1, #8] // Store ownership count
+ DMB ISH //
RET
diff --git a/ports_smp/cortex_a76_smp/ac6/src/tx_thread_smp_unprotect.S b/ports_smp/cortex_a76_smp/ac6/src/tx_thread_smp_unprotect.S
index a783cde6..f9d8692a 100644
--- a/ports_smp/cortex_a76_smp/ac6/src/tx_thread_smp_unprotect.S
+++ b/ports_smp/cortex_a76_smp/ac6/src/tx_thread_smp_unprotect.S
@@ -60,10 +60,13 @@
/* */
/* DATE NAME DESCRIPTION */
/* */
-/* 09-30-2020 William E. Lamie Initial Version 6.1 */
-/* 01-31-2022 Andres Mlinar Updated comments, */
-/* added ARMv8.2-A support, */
+/* 09-30-2020 William E. Lamie Initial Version 6.1 */
+/* 01-31-2022 Andres Mlinar Updated comments, */
+/* added ARMv8.2-A support, */
/* resulting in version 6.1.10 */
+/* 04-25-2022 William E. Lamie Modified comments, removed */
+/* FIFO queueing, */
+/* resulting in version 6.1.11 */
/* */
/**************************************************************************/
.global _tx_thread_smp_unprotect
@@ -105,11 +108,6 @@ _tx_thread_smp_unprotect:
CMP w3, #0 // Is the preempt disable flag set?
BNE _still_protected // Yes, skip the protection release
- LDR x2,=_tx_thread_smp_protect_wait_counts // Build build address of wait counts
- LDR w3, [x2, x1, LSL #2] // Pickup wait list value
- CMP w3, #0 // Are any entities on this core waiting?
- BNE _still_protected // Yes, skip the protection release
-
LDR x2,=_tx_thread_smp_protection // Build address of protection structure
MOV w3, #0xFFFFFFFF // Build invalid value
STR w3, [x2, #4] // Mark the protected core as invalid
diff --git a/ports_smp/cortex_a76_smp/gnu/example_build/tx/.cproject b/ports_smp/cortex_a76_smp/gnu/example_build/tx/.cproject
index ec20edd2..3547a1d3 100644
--- a/ports_smp/cortex_a76_smp/gnu/example_build/tx/.cproject
+++ b/ports_smp/cortex_a76_smp/gnu/example_build/tx/.cproject
@@ -129,6 +129,12 @@
+
+
+
+
+
+
diff --git a/ports_smp/cortex_a76_smp/gnu/src/tx_thread_context_restore.S b/ports_smp/cortex_a76_smp/gnu/src/tx_thread_context_restore.S
index 4df471ac..21deb75d 100644
--- a/ports_smp/cortex_a76_smp/gnu/src/tx_thread_context_restore.S
+++ b/ports_smp/cortex_a76_smp/gnu/src/tx_thread_context_restore.S
@@ -21,9 +21,6 @@
/**************************************************************************/
-/* Include macros for modifying the wait list. */
-#include "tx_thread_smp_protection_wait_list_macros.h"
-
.text
.align 3
/**************************************************************************/
@@ -31,7 +28,7 @@
/* FUNCTION RELEASE */
/* */
/* _tx_thread_context_restore ARMv8-A-SMP */
-/* 6.1.10 */
+/* 6.1.11 */
/* AUTHOR */
/* */
/* William E. Lamie, Microsoft Corporation */
@@ -63,10 +60,13 @@
/* */
/* DATE NAME DESCRIPTION */
/* */
-/* 09-30-2020 William E. Lamie Initial Version 6.1 */
-/* 01-31-2022 Andres Mlinar Updated comments, */
-/* added ARMv8.2-A support, */
+/* 09-30-2020 William E. Lamie Initial Version 6.1 */
+/* 01-31-2022 Andres Mlinar Updated comments, */
+/* added ARMv8.2-A support, */
/* resulting in version 6.1.10 */
+/* 04-25-2022 William E. Lamie Modified comments, removed */
+/* FIFO queueing, */
+/* resulting in version 6.1.11 */
/* */
/**************************************************************************/
// VOID _tx_thread_context_restore(VOID)
@@ -216,74 +216,6 @@ __tx_thread_no_preempt_restore:
// {
__tx_thread_preempt_restore:
- /* Was the thread being preempted waiting for the lock? */
- // if (_tx_thread_smp_protect_wait_counts[this_core] != 0)
- // {
-
- LDR x2, =_tx_thread_smp_protect_wait_counts // Load waiting count list
- LDR w3, [x2, x8, LSL #2] // Load waiting value for this core
- CMP w3, #0
- BEQ _nobody_waiting_for_lock // Is the core waiting for the lock?
-
- /* Do we not have the lock? This means the ISR never got the inter-core lock. */
- // if (_tx_thread_smp_protection.tx_thread_smp_protect_owned != this_core)
- // {
-
- LDR x2, =_tx_thread_smp_protection // Load address of protection structure
- LDR w3, [x2, #4] // Pickup the owning core
- CMP w8, w3 // Compare our core to the owning core
- BEQ _this_core_has_lock // Do we have the lock?
-
- /* We don't have the lock. This core should be in the list. Remove it. */
- // _tx_thread_smp_protect_wait_list_remove(this_core);
-
- _tx_thread_smp_protect_wait_list_remove // Call macro to remove core from the list
- B _nobody_waiting_for_lock // Leave
-
- // }
- // else
- // {
- /* We have the lock. This means the ISR got the inter-core lock, but
- never released it because it saw that there was someone waiting.
- Note this core is not in the list. */
-
-_this_core_has_lock:
-
- /* We're no longer waiting. Note that this should be zero since this happens during thread preemption. */
- // _tx_thread_smp_protect_wait_counts[core]--;
-
- LDR x2, =_tx_thread_smp_protect_wait_counts // Load waiting count list
- LDR w3, [x2, x8, LSL #2] // Load waiting value for this core
- SUB w3, w3, #1 // Decrement waiting value. Should be zero now
- STR w3, [x2, x8, LSL #2] // Store new waiting value
-
- /* Now release the inter-core lock. */
-
- /* Set protected core as invalid. */
- // _tx_thread_smp_protection.tx_thread_smp_protect_core = 0xFFFFFFFF;
-
- LDR x2, =_tx_thread_smp_protection // Load address of protection structure
- MOV w3, #0xFFFFFFFF // Build invalid value
- STR w3, [x2, #4] // Mark the protected core as invalid
- DMB ISH // Ensure that accesses to shared resource have completed
-
- /* Release protection. */
- // _tx_thread_smp_protection.tx_thread_smp_protect_in_force = 0;
-
- MOV w3, #0 // Build release protection value
- STR w3, [x2, #0] // Release the protection
- DSB ISH // To ensure update of the protection occurs before other CPUs awake
-
- /* Wake up waiting processors. Note interrupts are already enabled. */
-
-#ifdef TX_ENABLE_WFE
- SEV // Send event to other CPUs
-#endif
-
- // }
- // }
-
-_nobody_waiting_for_lock:
LDR x4, [x0, #8] // Switch to thread stack pointer
MOV sp, x4 //
diff --git a/ports_smp/cortex_a76_smp/gnu/src/tx_thread_smp_protect.S b/ports_smp/cortex_a76_smp/gnu/src/tx_thread_smp_protect.S
index 9cde3e08..b65af329 100644
--- a/ports_smp/cortex_a76_smp/gnu/src/tx_thread_smp_protect.S
+++ b/ports_smp/cortex_a76_smp/gnu/src/tx_thread_smp_protect.S
@@ -32,7 +32,7 @@
/* FUNCTION RELEASE */
/* */
/* _tx_thread_smp_protect Cortex-A35-SMP/AC6 */
-/* 6.1.10 */
+/* 6.1.11 */
/* AUTHOR */
/* */
/* William E. Lamie, Microsoft Corporation */
@@ -63,11 +63,14 @@
/* */
/* DATE NAME DESCRIPTION */
/* */
-/* 09-30-2020 William E. Lamie Initial Version 6.1 */
-/* 01-31-2022 Andres Mlinar Updated comments, */
-/* added ARMv8.2-A support, */
-/* improved SMP code, */
+/* 09-30-2020 William E. Lamie Initial Version 6.1 */
+/* 01-31-2022 Andres Mlinar Updated comments, */
+/* added ARMv8.2-A support, */
+/* improved SMP code, */
/* resulting in version 6.1.10 */
+/* 04-25-2022 William E. Lamie Modified comments, removed */
+/* FIFO queueing, */
+/* resulting in version 6.1.11 */
/* */
/**************************************************************************/
.global _tx_thread_smp_protect
@@ -81,293 +84,49 @@ _tx_thread_smp_protect:
/* Pickup the CPU ID. */
- MRS x1, MPIDR_EL1 // Pickup the core ID
+ MRS x2, MPIDR_EL1 // Pickup the core ID
#ifdef TX_ARMV8_2
#if TX_THREAD_SMP_CLUSTERS > 1
- UBFX x7, x1, #16, #8 // Isolate cluster ID
+ UBFX x7, x2, #16, #8 // Isolate cluster ID
#endif
- UBFX x1, x1, #8, #8 // Isolate core ID
+ UBFX x2, x2, #8, #8 // Isolate core ID
#else
#if TX_THREAD_SMP_CLUSTERS > 1
- UBFX x7, x1, #8, #8 // Isolate cluster ID
+ UBFX x7, x2, #8, #8 // Isolate cluster ID
#endif
- UBFX x1, x1, #0, #8 // Isolate core ID
+ UBFX x2, x2, #0, #8 // Isolate core ID
#endif
#if TX_THREAD_SMP_CLUSTERS > 1
- ADDS x1, x1, x7, LSL #2 // Calculate CPU ID
+ ADDS x2, x2, x7, LSL #2 // Calculate CPU ID
#endif
- /* Do we already have protection? */
- // if (this_core == _tx_thread_smp_protection.tx_thread_smp_protect_core)
- // {
-
- LDR x2, =_tx_thread_smp_protection // Build address to protection structure
- LDR w3, [x2, #4] // Pickup the owning core
- CMP w1, w3 // Is it not this core?
- BNE _protection_not_owned // No, the protection is not already owned
-
- /* We already have protection. */
-
- /* Increment the protection count. */
- // _tx_thread_smp_protection.tx_thread_smp_protect_count++;
-
- LDR w3, [x2, #8] // Pickup ownership count
- ADD w3, w3, #1 // Increment ownership count
- STR w3, [x2, #8] // Store ownership count
- DMB ISH
-
- B _return
-
-_protection_not_owned:
-
- /* Is the lock available? */
- // if (_tx_thread_smp_protection.tx_thread_smp_protect_in_force == 0)
- // {
-
- LDAXR w3, [x2, #0] // Pickup the protection flag
- CMP w3, #0
- BNE _start_waiting // No, protection not available
-
- /* Is the list empty? */
- // if (_tx_thread_smp_protect_wait_list_head == _tx_thread_smp_protect_wait_list_tail)
- // {
-
- LDR x3, =_tx_thread_smp_protect_wait_list_head
- LDR w3, [x3]
- LDR x4, =_tx_thread_smp_protect_wait_list_tail
- LDR w4, [x4]
- CMP w3, w4
- BNE _list_not_empty
-
- /* Try to get the lock. */
- // if (write_exclusive(&_tx_thread_smp_protection.tx_thread_smp_protect_in_force, 1) == SUCCESS)
- // {
-
- MOV w3, #1 // Build lock value
- STXR w4, w3, [x2, #0] // Attempt to get the protection
- CMP w4, #0
- BNE _start_waiting // Did it fail?
-
- /* We got the lock! */
- // _tx_thread_smp_protect_lock_got();
-
- DMB ISH // Ensure write to protection finishes
- _tx_thread_smp_protect_lock_got // Call the lock got function
-
- B _return
-
-_list_not_empty:
-
- /* Are we at the front of the list? */
- // if (this_core == _tx_thread_smp_protect_wait_list[_tx_thread_smp_protect_wait_list_head])
- // {
-
- LDR x3, =_tx_thread_smp_protect_wait_list_head // Get the address of the head
- LDR w3, [x3] // Get the value of the head
- LDR x4, =_tx_thread_smp_protect_wait_list // Get the address of the list
- LDR w4, [x4, x3, LSL #2] // Get the value at the head index
-
- CMP w1, w4
- BNE _start_waiting
-
- /* Is the lock still available? */
- // if (_tx_thread_smp_protection.tx_thread_smp_protect_in_force == 0)
- // {
-
- LDAXR w3, [x2, #0] // Pickup the protection flag
- CMP w3, #0
- BNE _start_waiting // No, protection not available
-
- /* Get the lock. */
- // _tx_thread_smp_protection.tx_thread_smp_protect_in_force = 1;
-
- MOV w3, #1 // Build lock value
- STXR w4, w3, [x2, #0] // Attempt to get the protection
- CMP w4, #0
- BNE _start_waiting // Did it fail?
- DMB ISH //
-
- /* Got the lock. */
- // _tx_thread_smp_protect_lock_got();
-
- _tx_thread_smp_protect_lock_got
-
- /* Remove this core from the wait list. */
- // _tx_thread_smp_protect_remove_from_front_of_list();
-
- _tx_thread_smp_protect_remove_from_front_of_list
-
- B _return
-
-_start_waiting:
-
- /* For one reason or another, we didn't get the lock. */
-
- /* Increment wait count. */
- // _tx_thread_smp_protect_wait_counts[this_core]++;
-
- LDR x3, =_tx_thread_smp_protect_wait_counts // Load wait list counts
- LDR w4, [x3, x1, LSL #2] // Load waiting value for this core
- ADD w4, w4, #1 // Increment wait value
- STR w4, [x3, x1, LSL #2] // Store new wait value
-
- /* Have we not added ourselves to the list yet? */
- // if (_tx_thread_smp_protect_wait_counts[this_core] == 1)
- // {
-
- CMP w4, #1
- BNE _already_in_list0 // Is this core already waiting?
-
- /* Add ourselves to the list. */
- // _tx_thread_smp_protect_wait_list_add(this_core);
-
- _tx_thread_smp_protect_wait_list_add // Call macro to add ourselves to the list
-
- // }
-
-_already_in_list0:
-
- /* Restore interrupts. */
+ LDR x1, =_tx_thread_smp_protection // Build address to protection structure
+ LDR w3, [x1, #4] // Pickup the owning core
+ CMP w3, w2 // Is it this core?
+ BEQ _owned // Yes, the protection is already owned
+ LDAXR w4, [x1, #0] // Pickup the protection flag
+ CBZ w4, _get_protection // Yes, get the protection
MSR DAIF, x0 // Restore interrupts
ISB //
#ifdef TX_ENABLE_WFE
WFE // Go into standby
#endif
+ B _tx_thread_smp_protect // On waking, restart the protection attempt
- /* We do this until we have the lock. */
- // while (1)
- // {
-
-_try_to_get_lock:
-
- /* Disable interrupts so we don't get preempted. */
-
- MRS x0, DAIF // Pickup current interrupt posture
- MSR DAIFSet, 0x3 // Lockout interrupts
-
- /* Pickup the CPU ID. */
-
- MRS x1, MPIDR_EL1 // Pickup the core ID
-#ifdef TX_ARMV8_2
-#if TX_THREAD_SMP_CLUSTERS > 1
- UBFX x7, x1, #16, #8 // Isolate cluster ID
-#endif
- UBFX x1, x1, #8, #8 // Isolate core ID
-#else
-#if TX_THREAD_SMP_CLUSTERS > 1
- UBFX x7, x1, #8, #8 // Isolate cluster ID
-#endif
- UBFX x1, x1, #0, #8 // Isolate core ID
-#endif
-#if TX_THREAD_SMP_CLUSTERS > 1
- ADDS x1, x1, x7, LSL #2 // Calculate CPU ID
-#endif
-
- /* Do we already have protection? */
- // if (this_core == _tx_thread_smp_protection.tx_thread_smp_protect_core)
- // {
-
- LDR w3, [x2, #4] // Pickup the owning core
- CMP w3, w1 // Is it this core?
- BEQ _got_lock_after_waiting // Yes, the protection is already owned. This means
- // an ISR preempted us and got protection
-
- // }
-
- /* Are we at the front of the list? */
- // if (this_core == _tx_thread_smp_protect_wait_list[_tx_thread_smp_protect_wait_list_head])
- // {
-
- LDR x3, =_tx_thread_smp_protect_wait_list_head // Get the address of the head
- LDR w3, [x3] // Get the value of the head
- LDR x4, =_tx_thread_smp_protect_wait_list // Get the address of the list
- LDR w4, [x4, x3, LSL #2] // Get the value at the head index
-
- CMP w1, w4
- BNE _did_not_get_lock
-
- /* Is the lock still available? */
- // if (_tx_thread_smp_protection.tx_thread_smp_protect_in_force == 0)
- // {
-
- LDAXR w3, [x2, #0] // Pickup the protection flag
- CMP w3, #0
- BNE _did_not_get_lock // No, protection not available
-
- /* Get the lock. */
- // _tx_thread_smp_protection.tx_thread_smp_protect_in_force = 1;
-
- MOV w3, #1 // Build lock value
- STXR w4, w3, [x2, #0] // Attempt to get the protection
- CMP w4, #0
- BNE _did_not_get_lock // Did it fail?
- DMB ISH //
-
- /* Got the lock. */
- // _tx_thread_smp_protect_lock_got();
-
- _tx_thread_smp_protect_lock_got
-
- /* Remove this core from the wait list. */
- // _tx_thread_smp_protect_remove_from_front_of_list();
-
- _tx_thread_smp_protect_remove_from_front_of_list
-
- B _got_lock_after_waiting
-
-_did_not_get_lock:
-
- /* For one reason or another, we didn't get the lock. */
-
- /* Were we removed from the list? This can happen if we're a thread
- and we got preempted. */
- // if (_tx_thread_smp_protect_wait_counts[this_core] == 0)
- // {
-
- LDR x3, =_tx_thread_smp_protect_wait_counts // Load wait list counts
- LDR w4, [x3, x1, LSL #2] // Load waiting value for this core
- CMP w4, #0
- BNE _already_in_list1 // Is this core already in the list?
-
- /* Add ourselves to the list. */
- // _tx_thread_smp_protect_wait_list_add(this_core);
-
- _tx_thread_smp_protect_wait_list_add // Call macro to add ourselves to the list
-
- /* Our waiting count was also reset when we were preempted. Increment it again. */
- // _tx_thread_smp_protect_wait_counts[this_core]++;
-
- LDR x3, =_tx_thread_smp_protect_wait_counts // Load wait list counts
- LDR w4, [x3, x1, LSL #2] // Load waiting value for this core
- ADD w4, w4, #1 // Increment wait value
- STR w4, [x3, x1, LSL #2] // Store new wait value value
-
- // }
-
-_already_in_list1:
-
- /* Restore interrupts and try again. */
-
+_get_protection:
+ MOV x4, #1 // Build lock value
+ STXR w5, w4, [x1] // Attempt to get the protection
+ CBZ w5, _got_protection // Did it succeed? w5 = 0 means success!
MSR DAIF, x0 // Restore interrupts
- ISB //
-#ifdef TX_ENABLE_WFE
- WFE // Go into standby
-#endif
- B _try_to_get_lock // On waking, restart the protection attempt
-
-_got_lock_after_waiting:
-
- /* We're no longer waiting. */
- // _tx_thread_smp_protect_wait_counts[this_core]--;
-
- LDR x3, =_tx_thread_smp_protect_wait_counts // Load waiting list
- LDR w4, [x3, x1, LSL #2] // Load current wait value
- SUB w4, w4, #1 // Decrement wait value
- STR w4, [x3, x1, LSL #2] // Store new wait value value
-
- /* Restore registers and return. */
-
-_return:
-
+ B _tx_thread_smp_protect // Restart the protection attempt
+
+_got_protection:
+ DMB ISH //
+ STR w2, [x1, #4] // Save owning core
+_owned:
+ LDR w5, [x1, #8] // Pickup ownership count
+ ADD w5, w5, #1 // Increment ownership count
+ STR w5, [x1, #8] // Store ownership count
+ DMB ISH //
RET
diff --git a/ports_smp/cortex_a76_smp/gnu/src/tx_thread_smp_unprotect.S b/ports_smp/cortex_a76_smp/gnu/src/tx_thread_smp_unprotect.S
index a783cde6..f9d8692a 100644
--- a/ports_smp/cortex_a76_smp/gnu/src/tx_thread_smp_unprotect.S
+++ b/ports_smp/cortex_a76_smp/gnu/src/tx_thread_smp_unprotect.S
@@ -60,10 +60,13 @@
/* */
/* DATE NAME DESCRIPTION */
/* */
-/* 09-30-2020 William E. Lamie Initial Version 6.1 */
-/* 01-31-2022 Andres Mlinar Updated comments, */
-/* added ARMv8.2-A support, */
+/* 09-30-2020 William E. Lamie Initial Version 6.1 */
+/* 01-31-2022 Andres Mlinar Updated comments, */
+/* added ARMv8.2-A support, */
/* resulting in version 6.1.10 */
+/* 04-25-2022 William E. Lamie Modified comments, removed */
+/* FIFO queueing, */
+/* resulting in version 6.1.11 */
/* */
/**************************************************************************/
.global _tx_thread_smp_unprotect
@@ -105,11 +108,6 @@ _tx_thread_smp_unprotect:
CMP w3, #0 // Is the preempt disable flag set?
BNE _still_protected // Yes, skip the protection release
- LDR x2,=_tx_thread_smp_protect_wait_counts // Build build address of wait counts
- LDR w3, [x2, x1, LSL #2] // Pickup wait list value
- CMP w3, #0 // Are any entities on this core waiting?
- BNE _still_protected // Yes, skip the protection release
-
LDR x2,=_tx_thread_smp_protection // Build address of protection structure
MOV w3, #0xFFFFFFFF // Build invalid value
STR w3, [x2, #4] // Mark the protected core as invalid
diff --git a/ports_smp/cortex_a76ae_smp/ac6/example_build/sample_threadx/.cproject b/ports_smp/cortex_a76ae_smp/ac6/example_build/sample_threadx/.cproject
index 4a1565ae..b3292001 100644
--- a/ports_smp/cortex_a76ae_smp/ac6/example_build/sample_threadx/.cproject
+++ b/ports_smp/cortex_a76ae_smp/ac6/example_build/sample_threadx/.cproject
@@ -23,7 +23,7 @@
-
+
@@ -47,7 +47,7 @@
-
+
@@ -63,7 +63,7 @@
-
+
diff --git a/ports_smp/cortex_a76ae_smp/ac6/example_build/tx/.cproject b/ports_smp/cortex_a76ae_smp/ac6/example_build/tx/.cproject
index b662b32b..fb71ffbf 100644
--- a/ports_smp/cortex_a76ae_smp/ac6/example_build/tx/.cproject
+++ b/ports_smp/cortex_a76ae_smp/ac6/example_build/tx/.cproject
@@ -49,7 +49,7 @@
-
+
@@ -129,11 +129,7 @@
-
-
-
-
-
+
diff --git a/ports_smp/cortex_a76ae_smp/ac6/src/tx_thread_context_restore.S b/ports_smp/cortex_a76ae_smp/ac6/src/tx_thread_context_restore.S
index 4df471ac..21deb75d 100644
--- a/ports_smp/cortex_a76ae_smp/ac6/src/tx_thread_context_restore.S
+++ b/ports_smp/cortex_a76ae_smp/ac6/src/tx_thread_context_restore.S
@@ -21,9 +21,6 @@
/**************************************************************************/
-/* Include macros for modifying the wait list. */
-#include "tx_thread_smp_protection_wait_list_macros.h"
-
.text
.align 3
/**************************************************************************/
@@ -31,7 +28,7 @@
/* FUNCTION RELEASE */
/* */
/* _tx_thread_context_restore ARMv8-A-SMP */
-/* 6.1.10 */
+/* 6.1.11 */
/* AUTHOR */
/* */
/* William E. Lamie, Microsoft Corporation */
@@ -63,10 +60,13 @@
/* */
/* DATE NAME DESCRIPTION */
/* */
-/* 09-30-2020 William E. Lamie Initial Version 6.1 */
-/* 01-31-2022 Andres Mlinar Updated comments, */
-/* added ARMv8.2-A support, */
+/* 09-30-2020 William E. Lamie Initial Version 6.1 */
+/* 01-31-2022 Andres Mlinar Updated comments, */
+/* added ARMv8.2-A support, */
/* resulting in version 6.1.10 */
+/* 04-25-2022 William E. Lamie Modified comments, removed */
+/* FIFO queueing, */
+/* resulting in version 6.1.11 */
/* */
/**************************************************************************/
// VOID _tx_thread_context_restore(VOID)
@@ -216,74 +216,6 @@ __tx_thread_no_preempt_restore:
// {
__tx_thread_preempt_restore:
- /* Was the thread being preempted waiting for the lock? */
- // if (_tx_thread_smp_protect_wait_counts[this_core] != 0)
- // {
-
- LDR x2, =_tx_thread_smp_protect_wait_counts // Load waiting count list
- LDR w3, [x2, x8, LSL #2] // Load waiting value for this core
- CMP w3, #0
- BEQ _nobody_waiting_for_lock // Is the core waiting for the lock?
-
- /* Do we not have the lock? This means the ISR never got the inter-core lock. */
- // if (_tx_thread_smp_protection.tx_thread_smp_protect_owned != this_core)
- // {
-
- LDR x2, =_tx_thread_smp_protection // Load address of protection structure
- LDR w3, [x2, #4] // Pickup the owning core
- CMP w8, w3 // Compare our core to the owning core
- BEQ _this_core_has_lock // Do we have the lock?
-
- /* We don't have the lock. This core should be in the list. Remove it. */
- // _tx_thread_smp_protect_wait_list_remove(this_core);
-
- _tx_thread_smp_protect_wait_list_remove // Call macro to remove core from the list
- B _nobody_waiting_for_lock // Leave
-
- // }
- // else
- // {
- /* We have the lock. This means the ISR got the inter-core lock, but
- never released it because it saw that there was someone waiting.
- Note this core is not in the list. */
-
-_this_core_has_lock:
-
- /* We're no longer waiting. Note that this should be zero since this happens during thread preemption. */
- // _tx_thread_smp_protect_wait_counts[core]--;
-
- LDR x2, =_tx_thread_smp_protect_wait_counts // Load waiting count list
- LDR w3, [x2, x8, LSL #2] // Load waiting value for this core
- SUB w3, w3, #1 // Decrement waiting value. Should be zero now
- STR w3, [x2, x8, LSL #2] // Store new waiting value
-
- /* Now release the inter-core lock. */
-
- /* Set protected core as invalid. */
- // _tx_thread_smp_protection.tx_thread_smp_protect_core = 0xFFFFFFFF;
-
- LDR x2, =_tx_thread_smp_protection // Load address of protection structure
- MOV w3, #0xFFFFFFFF // Build invalid value
- STR w3, [x2, #4] // Mark the protected core as invalid
- DMB ISH // Ensure that accesses to shared resource have completed
-
- /* Release protection. */
- // _tx_thread_smp_protection.tx_thread_smp_protect_in_force = 0;
-
- MOV w3, #0 // Build release protection value
- STR w3, [x2, #0] // Release the protection
- DSB ISH // To ensure update of the protection occurs before other CPUs awake
-
- /* Wake up waiting processors. Note interrupts are already enabled. */
-
-#ifdef TX_ENABLE_WFE
- SEV // Send event to other CPUs
-#endif
-
- // }
- // }
-
-_nobody_waiting_for_lock:
LDR x4, [x0, #8] // Switch to thread stack pointer
MOV sp, x4 //
diff --git a/ports_smp/cortex_a76ae_smp/ac6/src/tx_thread_smp_protect.S b/ports_smp/cortex_a76ae_smp/ac6/src/tx_thread_smp_protect.S
index 9cde3e08..b65af329 100644
--- a/ports_smp/cortex_a76ae_smp/ac6/src/tx_thread_smp_protect.S
+++ b/ports_smp/cortex_a76ae_smp/ac6/src/tx_thread_smp_protect.S
@@ -32,7 +32,7 @@
/* FUNCTION RELEASE */
/* */
/* _tx_thread_smp_protect Cortex-A35-SMP/AC6 */
-/* 6.1.10 */
+/* 6.1.11 */
/* AUTHOR */
/* */
/* William E. Lamie, Microsoft Corporation */
@@ -63,11 +63,14 @@
/* */
/* DATE NAME DESCRIPTION */
/* */
-/* 09-30-2020 William E. Lamie Initial Version 6.1 */
-/* 01-31-2022 Andres Mlinar Updated comments, */
-/* added ARMv8.2-A support, */
-/* improved SMP code, */
+/* 09-30-2020 William E. Lamie Initial Version 6.1 */
+/* 01-31-2022 Andres Mlinar Updated comments, */
+/* added ARMv8.2-A support, */
+/* improved SMP code, */
/* resulting in version 6.1.10 */
+/* 04-25-2022 William E. Lamie Modified comments, removed */
+/* FIFO queueing, */
+/* resulting in version 6.1.11 */
/* */
/**************************************************************************/
.global _tx_thread_smp_protect
@@ -81,293 +84,49 @@ _tx_thread_smp_protect:
/* Pickup the CPU ID. */
- MRS x1, MPIDR_EL1 // Pickup the core ID
+ MRS x2, MPIDR_EL1 // Pickup the core ID
#ifdef TX_ARMV8_2
#if TX_THREAD_SMP_CLUSTERS > 1
- UBFX x7, x1, #16, #8 // Isolate cluster ID
+ UBFX x7, x2, #16, #8 // Isolate cluster ID
#endif
- UBFX x1, x1, #8, #8 // Isolate core ID
+ UBFX x2, x2, #8, #8 // Isolate core ID
#else
#if TX_THREAD_SMP_CLUSTERS > 1
- UBFX x7, x1, #8, #8 // Isolate cluster ID
+ UBFX x7, x2, #8, #8 // Isolate cluster ID
#endif
- UBFX x1, x1, #0, #8 // Isolate core ID
+ UBFX x2, x2, #0, #8 // Isolate core ID
#endif
#if TX_THREAD_SMP_CLUSTERS > 1
- ADDS x1, x1, x7, LSL #2 // Calculate CPU ID
+ ADDS x2, x2, x7, LSL #2 // Calculate CPU ID
#endif
- /* Do we already have protection? */
- // if (this_core == _tx_thread_smp_protection.tx_thread_smp_protect_core)
- // {
-
- LDR x2, =_tx_thread_smp_protection // Build address to protection structure
- LDR w3, [x2, #4] // Pickup the owning core
- CMP w1, w3 // Is it not this core?
- BNE _protection_not_owned // No, the protection is not already owned
-
- /* We already have protection. */
-
- /* Increment the protection count. */
- // _tx_thread_smp_protection.tx_thread_smp_protect_count++;
-
- LDR w3, [x2, #8] // Pickup ownership count
- ADD w3, w3, #1 // Increment ownership count
- STR w3, [x2, #8] // Store ownership count
- DMB ISH
-
- B _return
-
-_protection_not_owned:
-
- /* Is the lock available? */
- // if (_tx_thread_smp_protection.tx_thread_smp_protect_in_force == 0)
- // {
-
- LDAXR w3, [x2, #0] // Pickup the protection flag
- CMP w3, #0
- BNE _start_waiting // No, protection not available
-
- /* Is the list empty? */
- // if (_tx_thread_smp_protect_wait_list_head == _tx_thread_smp_protect_wait_list_tail)
- // {
-
- LDR x3, =_tx_thread_smp_protect_wait_list_head
- LDR w3, [x3]
- LDR x4, =_tx_thread_smp_protect_wait_list_tail
- LDR w4, [x4]
- CMP w3, w4
- BNE _list_not_empty
-
- /* Try to get the lock. */
- // if (write_exclusive(&_tx_thread_smp_protection.tx_thread_smp_protect_in_force, 1) == SUCCESS)
- // {
-
- MOV w3, #1 // Build lock value
- STXR w4, w3, [x2, #0] // Attempt to get the protection
- CMP w4, #0
- BNE _start_waiting // Did it fail?
-
- /* We got the lock! */
- // _tx_thread_smp_protect_lock_got();
-
- DMB ISH // Ensure write to protection finishes
- _tx_thread_smp_protect_lock_got // Call the lock got function
-
- B _return
-
-_list_not_empty:
-
- /* Are we at the front of the list? */
- // if (this_core == _tx_thread_smp_protect_wait_list[_tx_thread_smp_protect_wait_list_head])
- // {
-
- LDR x3, =_tx_thread_smp_protect_wait_list_head // Get the address of the head
- LDR w3, [x3] // Get the value of the head
- LDR x4, =_tx_thread_smp_protect_wait_list // Get the address of the list
- LDR w4, [x4, x3, LSL #2] // Get the value at the head index
-
- CMP w1, w4
- BNE _start_waiting
-
- /* Is the lock still available? */
- // if (_tx_thread_smp_protection.tx_thread_smp_protect_in_force == 0)
- // {
-
- LDAXR w3, [x2, #0] // Pickup the protection flag
- CMP w3, #0
- BNE _start_waiting // No, protection not available
-
- /* Get the lock. */
- // _tx_thread_smp_protection.tx_thread_smp_protect_in_force = 1;
-
- MOV w3, #1 // Build lock value
- STXR w4, w3, [x2, #0] // Attempt to get the protection
- CMP w4, #0
- BNE _start_waiting // Did it fail?
- DMB ISH //
-
- /* Got the lock. */
- // _tx_thread_smp_protect_lock_got();
-
- _tx_thread_smp_protect_lock_got
-
- /* Remove this core from the wait list. */
- // _tx_thread_smp_protect_remove_from_front_of_list();
-
- _tx_thread_smp_protect_remove_from_front_of_list
-
- B _return
-
-_start_waiting:
-
- /* For one reason or another, we didn't get the lock. */
-
- /* Increment wait count. */
- // _tx_thread_smp_protect_wait_counts[this_core]++;
-
- LDR x3, =_tx_thread_smp_protect_wait_counts // Load wait list counts
- LDR w4, [x3, x1, LSL #2] // Load waiting value for this core
- ADD w4, w4, #1 // Increment wait value
- STR w4, [x3, x1, LSL #2] // Store new wait value
-
- /* Have we not added ourselves to the list yet? */
- // if (_tx_thread_smp_protect_wait_counts[this_core] == 1)
- // {
-
- CMP w4, #1
- BNE _already_in_list0 // Is this core already waiting?
-
- /* Add ourselves to the list. */
- // _tx_thread_smp_protect_wait_list_add(this_core);
-
- _tx_thread_smp_protect_wait_list_add // Call macro to add ourselves to the list
-
- // }
-
-_already_in_list0:
-
- /* Restore interrupts. */
+ LDR x1, =_tx_thread_smp_protection // Build address to protection structure
+ LDR w3, [x1, #4] // Pickup the owning core
+ CMP w3, w2 // Is it this core?
+ BEQ _owned // Yes, the protection is already owned
+ LDAXR w4, [x1, #0] // Pickup the protection flag
+ CBZ w4, _get_protection // Yes, get the protection
MSR DAIF, x0 // Restore interrupts
ISB //
#ifdef TX_ENABLE_WFE
WFE // Go into standby
#endif
+ B _tx_thread_smp_protect // On waking, restart the protection attempt
- /* We do this until we have the lock. */
- // while (1)
- // {
-
-_try_to_get_lock:
-
- /* Disable interrupts so we don't get preempted. */
-
- MRS x0, DAIF // Pickup current interrupt posture
- MSR DAIFSet, 0x3 // Lockout interrupts
-
- /* Pickup the CPU ID. */
-
- MRS x1, MPIDR_EL1 // Pickup the core ID
-#ifdef TX_ARMV8_2
-#if TX_THREAD_SMP_CLUSTERS > 1
- UBFX x7, x1, #16, #8 // Isolate cluster ID
-#endif
- UBFX x1, x1, #8, #8 // Isolate core ID
-#else
-#if TX_THREAD_SMP_CLUSTERS > 1
- UBFX x7, x1, #8, #8 // Isolate cluster ID
-#endif
- UBFX x1, x1, #0, #8 // Isolate core ID
-#endif
-#if TX_THREAD_SMP_CLUSTERS > 1
- ADDS x1, x1, x7, LSL #2 // Calculate CPU ID
-#endif
-
- /* Do we already have protection? */
- // if (this_core == _tx_thread_smp_protection.tx_thread_smp_protect_core)
- // {
-
- LDR w3, [x2, #4] // Pickup the owning core
- CMP w3, w1 // Is it this core?
- BEQ _got_lock_after_waiting // Yes, the protection is already owned. This means
- // an ISR preempted us and got protection
-
- // }
-
- /* Are we at the front of the list? */
- // if (this_core == _tx_thread_smp_protect_wait_list[_tx_thread_smp_protect_wait_list_head])
- // {
-
- LDR x3, =_tx_thread_smp_protect_wait_list_head // Get the address of the head
- LDR w3, [x3] // Get the value of the head
- LDR x4, =_tx_thread_smp_protect_wait_list // Get the address of the list
- LDR w4, [x4, x3, LSL #2] // Get the value at the head index
-
- CMP w1, w4
- BNE _did_not_get_lock
-
- /* Is the lock still available? */
- // if (_tx_thread_smp_protection.tx_thread_smp_protect_in_force == 0)
- // {
-
- LDAXR w3, [x2, #0] // Pickup the protection flag
- CMP w3, #0
- BNE _did_not_get_lock // No, protection not available
-
- /* Get the lock. */
- // _tx_thread_smp_protection.tx_thread_smp_protect_in_force = 1;
-
- MOV w3, #1 // Build lock value
- STXR w4, w3, [x2, #0] // Attempt to get the protection
- CMP w4, #0
- BNE _did_not_get_lock // Did it fail?
- DMB ISH //
-
- /* Got the lock. */
- // _tx_thread_smp_protect_lock_got();
-
- _tx_thread_smp_protect_lock_got
-
- /* Remove this core from the wait list. */
- // _tx_thread_smp_protect_remove_from_front_of_list();
-
- _tx_thread_smp_protect_remove_from_front_of_list
-
- B _got_lock_after_waiting
-
-_did_not_get_lock:
-
- /* For one reason or another, we didn't get the lock. */
-
- /* Were we removed from the list? This can happen if we're a thread
- and we got preempted. */
- // if (_tx_thread_smp_protect_wait_counts[this_core] == 0)
- // {
-
- LDR x3, =_tx_thread_smp_protect_wait_counts // Load wait list counts
- LDR w4, [x3, x1, LSL #2] // Load waiting value for this core
- CMP w4, #0
- BNE _already_in_list1 // Is this core already in the list?
-
- /* Add ourselves to the list. */
- // _tx_thread_smp_protect_wait_list_add(this_core);
-
- _tx_thread_smp_protect_wait_list_add // Call macro to add ourselves to the list
-
- /* Our waiting count was also reset when we were preempted. Increment it again. */
- // _tx_thread_smp_protect_wait_counts[this_core]++;
-
- LDR x3, =_tx_thread_smp_protect_wait_counts // Load wait list counts
- LDR w4, [x3, x1, LSL #2] // Load waiting value for this core
- ADD w4, w4, #1 // Increment wait value
- STR w4, [x3, x1, LSL #2] // Store new wait value value
-
- // }
-
-_already_in_list1:
-
- /* Restore interrupts and try again. */
-
+_get_protection:
+ MOV x4, #1 // Build lock value
+ STXR w5, w4, [x1] // Attempt to get the protection
+ CBZ w5, _got_protection // Did it succeed? w5 = 0 means success!
MSR DAIF, x0 // Restore interrupts
- ISB //
-#ifdef TX_ENABLE_WFE
- WFE // Go into standby
-#endif
- B _try_to_get_lock // On waking, restart the protection attempt
-
-_got_lock_after_waiting:
-
- /* We're no longer waiting. */
- // _tx_thread_smp_protect_wait_counts[this_core]--;
-
- LDR x3, =_tx_thread_smp_protect_wait_counts // Load waiting list
- LDR w4, [x3, x1, LSL #2] // Load current wait value
- SUB w4, w4, #1 // Decrement wait value
- STR w4, [x3, x1, LSL #2] // Store new wait value value
-
- /* Restore registers and return. */
-
-_return:
-
+ B _tx_thread_smp_protect // Restart the protection attempt
+
+_got_protection:
+ DMB ISH //
+ STR w2, [x1, #4] // Save owning core
+_owned:
+ LDR w5, [x1, #8] // Pickup ownership count
+ ADD w5, w5, #1 // Increment ownership count
+ STR w5, [x1, #8] // Store ownership count
+ DMB ISH //
RET
diff --git a/ports_smp/cortex_a76ae_smp/ac6/src/tx_thread_smp_unprotect.S b/ports_smp/cortex_a76ae_smp/ac6/src/tx_thread_smp_unprotect.S
index a783cde6..f9d8692a 100644
--- a/ports_smp/cortex_a76ae_smp/ac6/src/tx_thread_smp_unprotect.S
+++ b/ports_smp/cortex_a76ae_smp/ac6/src/tx_thread_smp_unprotect.S
@@ -60,10 +60,13 @@
/* */
/* DATE NAME DESCRIPTION */
/* */
-/* 09-30-2020 William E. Lamie Initial Version 6.1 */
-/* 01-31-2022 Andres Mlinar Updated comments, */
-/* added ARMv8.2-A support, */
+/* 09-30-2020 William E. Lamie Initial Version 6.1 */
+/* 01-31-2022 Andres Mlinar Updated comments, */
+/* added ARMv8.2-A support, */
/* resulting in version 6.1.10 */
+/* 04-25-2022 William E. Lamie Modified comments, removed */
+/* FIFO queueing, */
+/* resulting in version 6.1.11 */
/* */
/**************************************************************************/
.global _tx_thread_smp_unprotect
@@ -105,11 +108,6 @@ _tx_thread_smp_unprotect:
CMP w3, #0 // Is the preempt disable flag set?
BNE _still_protected // Yes, skip the protection release
- LDR x2,=_tx_thread_smp_protect_wait_counts // Build build address of wait counts
- LDR w3, [x2, x1, LSL #2] // Pickup wait list value
- CMP w3, #0 // Are any entities on this core waiting?
- BNE _still_protected // Yes, skip the protection release
-
LDR x2,=_tx_thread_smp_protection // Build address of protection structure
MOV w3, #0xFFFFFFFF // Build invalid value
STR w3, [x2, #4] // Mark the protected core as invalid
diff --git a/ports_smp/cortex_a76ae_smp/gnu/example_build/tx/.cproject b/ports_smp/cortex_a76ae_smp/gnu/example_build/tx/.cproject
index ec20edd2..3547a1d3 100644
--- a/ports_smp/cortex_a76ae_smp/gnu/example_build/tx/.cproject
+++ b/ports_smp/cortex_a76ae_smp/gnu/example_build/tx/.cproject
@@ -129,6 +129,12 @@
+
+
+
+
+
+
diff --git a/ports_smp/cortex_a76ae_smp/gnu/src/tx_thread_context_restore.S b/ports_smp/cortex_a76ae_smp/gnu/src/tx_thread_context_restore.S
index 4df471ac..21deb75d 100644
--- a/ports_smp/cortex_a76ae_smp/gnu/src/tx_thread_context_restore.S
+++ b/ports_smp/cortex_a76ae_smp/gnu/src/tx_thread_context_restore.S
@@ -21,9 +21,6 @@
/**************************************************************************/
-/* Include macros for modifying the wait list. */
-#include "tx_thread_smp_protection_wait_list_macros.h"
-
.text
.align 3
/**************************************************************************/
@@ -31,7 +28,7 @@
/* FUNCTION RELEASE */
/* */
/* _tx_thread_context_restore ARMv8-A-SMP */
-/* 6.1.10 */
+/* 6.1.11 */
/* AUTHOR */
/* */
/* William E. Lamie, Microsoft Corporation */
@@ -63,10 +60,13 @@
/* */
/* DATE NAME DESCRIPTION */
/* */
-/* 09-30-2020 William E. Lamie Initial Version 6.1 */
-/* 01-31-2022 Andres Mlinar Updated comments, */
-/* added ARMv8.2-A support, */
+/* 09-30-2020 William E. Lamie Initial Version 6.1 */
+/* 01-31-2022 Andres Mlinar Updated comments, */
+/* added ARMv8.2-A support, */
/* resulting in version 6.1.10 */
+/* 04-25-2022 William E. Lamie Modified comments, removed */
+/* FIFO queueing, */
+/* resulting in version 6.1.11 */
/* */
/**************************************************************************/
// VOID _tx_thread_context_restore(VOID)
@@ -216,74 +216,6 @@ __tx_thread_no_preempt_restore:
// {
__tx_thread_preempt_restore:
- /* Was the thread being preempted waiting for the lock? */
- // if (_tx_thread_smp_protect_wait_counts[this_core] != 0)
- // {
-
- LDR x2, =_tx_thread_smp_protect_wait_counts // Load waiting count list
- LDR w3, [x2, x8, LSL #2] // Load waiting value for this core
- CMP w3, #0
- BEQ _nobody_waiting_for_lock // Is the core waiting for the lock?
-
- /* Do we not have the lock? This means the ISR never got the inter-core lock. */
- // if (_tx_thread_smp_protection.tx_thread_smp_protect_owned != this_core)
- // {
-
- LDR x2, =_tx_thread_smp_protection // Load address of protection structure
- LDR w3, [x2, #4] // Pickup the owning core
- CMP w8, w3 // Compare our core to the owning core
- BEQ _this_core_has_lock // Do we have the lock?
-
- /* We don't have the lock. This core should be in the list. Remove it. */
- // _tx_thread_smp_protect_wait_list_remove(this_core);
-
- _tx_thread_smp_protect_wait_list_remove // Call macro to remove core from the list
- B _nobody_waiting_for_lock // Leave
-
- // }
- // else
- // {
- /* We have the lock. This means the ISR got the inter-core lock, but
- never released it because it saw that there was someone waiting.
- Note this core is not in the list. */
-
-_this_core_has_lock:
-
- /* We're no longer waiting. Note that this should be zero since this happens during thread preemption. */
- // _tx_thread_smp_protect_wait_counts[core]--;
-
- LDR x2, =_tx_thread_smp_protect_wait_counts // Load waiting count list
- LDR w3, [x2, x8, LSL #2] // Load waiting value for this core
- SUB w3, w3, #1 // Decrement waiting value. Should be zero now
- STR w3, [x2, x8, LSL #2] // Store new waiting value
-
- /* Now release the inter-core lock. */
-
- /* Set protected core as invalid. */
- // _tx_thread_smp_protection.tx_thread_smp_protect_core = 0xFFFFFFFF;
-
- LDR x2, =_tx_thread_smp_protection // Load address of protection structure
- MOV w3, #0xFFFFFFFF // Build invalid value
- STR w3, [x2, #4] // Mark the protected core as invalid
- DMB ISH // Ensure that accesses to shared resource have completed
-
- /* Release protection. */
- // _tx_thread_smp_protection.tx_thread_smp_protect_in_force = 0;
-
- MOV w3, #0 // Build release protection value
- STR w3, [x2, #0] // Release the protection
- DSB ISH // To ensure update of the protection occurs before other CPUs awake
-
- /* Wake up waiting processors. Note interrupts are already enabled. */
-
-#ifdef TX_ENABLE_WFE
- SEV // Send event to other CPUs
-#endif
-
- // }
- // }
-
-_nobody_waiting_for_lock:
LDR x4, [x0, #8] // Switch to thread stack pointer
MOV sp, x4 //
diff --git a/ports_smp/cortex_a76ae_smp/gnu/src/tx_thread_smp_protect.S b/ports_smp/cortex_a76ae_smp/gnu/src/tx_thread_smp_protect.S
index 9cde3e08..b65af329 100644
--- a/ports_smp/cortex_a76ae_smp/gnu/src/tx_thread_smp_protect.S
+++ b/ports_smp/cortex_a76ae_smp/gnu/src/tx_thread_smp_protect.S
@@ -32,7 +32,7 @@
/* FUNCTION RELEASE */
/* */
/* _tx_thread_smp_protect Cortex-A35-SMP/AC6 */
-/* 6.1.10 */
+/* 6.1.11 */
/* AUTHOR */
/* */
/* William E. Lamie, Microsoft Corporation */
@@ -63,11 +63,14 @@
/* */
/* DATE NAME DESCRIPTION */
/* */
-/* 09-30-2020 William E. Lamie Initial Version 6.1 */
-/* 01-31-2022 Andres Mlinar Updated comments, */
-/* added ARMv8.2-A support, */
-/* improved SMP code, */
+/* 09-30-2020 William E. Lamie Initial Version 6.1 */
+/* 01-31-2022 Andres Mlinar Updated comments, */
+/* added ARMv8.2-A support, */
+/* improved SMP code, */
/* resulting in version 6.1.10 */
+/* 04-25-2022 William E. Lamie Modified comments, removed */
+/* FIFO queueing, */
+/* resulting in version 6.1.11 */
/* */
/**************************************************************************/
.global _tx_thread_smp_protect
@@ -81,293 +84,49 @@ _tx_thread_smp_protect:
/* Pickup the CPU ID. */
- MRS x1, MPIDR_EL1 // Pickup the core ID
+ MRS x2, MPIDR_EL1 // Pickup the core ID
#ifdef TX_ARMV8_2
#if TX_THREAD_SMP_CLUSTERS > 1
- UBFX x7, x1, #16, #8 // Isolate cluster ID
+ UBFX x7, x2, #16, #8 // Isolate cluster ID
#endif
- UBFX x1, x1, #8, #8 // Isolate core ID
+ UBFX x2, x2, #8, #8 // Isolate core ID
#else
#if TX_THREAD_SMP_CLUSTERS > 1
- UBFX x7, x1, #8, #8 // Isolate cluster ID
+ UBFX x7, x2, #8, #8 // Isolate cluster ID
#endif
- UBFX x1, x1, #0, #8 // Isolate core ID
+ UBFX x2, x2, #0, #8 // Isolate core ID
#endif
#if TX_THREAD_SMP_CLUSTERS > 1
- ADDS x1, x1, x7, LSL #2 // Calculate CPU ID
+ ADDS x2, x2, x7, LSL #2 // Calculate CPU ID
#endif
- /* Do we already have protection? */
- // if (this_core == _tx_thread_smp_protection.tx_thread_smp_protect_core)
- // {
-
- LDR x2, =_tx_thread_smp_protection // Build address to protection structure
- LDR w3, [x2, #4] // Pickup the owning core
- CMP w1, w3 // Is it not this core?
- BNE _protection_not_owned // No, the protection is not already owned
-
- /* We already have protection. */
-
- /* Increment the protection count. */
- // _tx_thread_smp_protection.tx_thread_smp_protect_count++;
-
- LDR w3, [x2, #8] // Pickup ownership count
- ADD w3, w3, #1 // Increment ownership count
- STR w3, [x2, #8] // Store ownership count
- DMB ISH
-
- B _return
-
-_protection_not_owned:
-
- /* Is the lock available? */
- // if (_tx_thread_smp_protection.tx_thread_smp_protect_in_force == 0)
- // {
-
- LDAXR w3, [x2, #0] // Pickup the protection flag
- CMP w3, #0
- BNE _start_waiting // No, protection not available
-
- /* Is the list empty? */
- // if (_tx_thread_smp_protect_wait_list_head == _tx_thread_smp_protect_wait_list_tail)
- // {
-
- LDR x3, =_tx_thread_smp_protect_wait_list_head
- LDR w3, [x3]
- LDR x4, =_tx_thread_smp_protect_wait_list_tail
- LDR w4, [x4]
- CMP w3, w4
- BNE _list_not_empty
-
- /* Try to get the lock. */
- // if (write_exclusive(&_tx_thread_smp_protection.tx_thread_smp_protect_in_force, 1) == SUCCESS)
- // {
-
- MOV w3, #1 // Build lock value
- STXR w4, w3, [x2, #0] // Attempt to get the protection
- CMP w4, #0
- BNE _start_waiting // Did it fail?
-
- /* We got the lock! */
- // _tx_thread_smp_protect_lock_got();
-
- DMB ISH // Ensure write to protection finishes
- _tx_thread_smp_protect_lock_got // Call the lock got function
-
- B _return
-
-_list_not_empty:
-
- /* Are we at the front of the list? */
- // if (this_core == _tx_thread_smp_protect_wait_list[_tx_thread_smp_protect_wait_list_head])
- // {
-
- LDR x3, =_tx_thread_smp_protect_wait_list_head // Get the address of the head
- LDR w3, [x3] // Get the value of the head
- LDR x4, =_tx_thread_smp_protect_wait_list // Get the address of the list
- LDR w4, [x4, x3, LSL #2] // Get the value at the head index
-
- CMP w1, w4
- BNE _start_waiting
-
- /* Is the lock still available? */
- // if (_tx_thread_smp_protection.tx_thread_smp_protect_in_force == 0)
- // {
-
- LDAXR w3, [x2, #0] // Pickup the protection flag
- CMP w3, #0
- BNE _start_waiting // No, protection not available
-
- /* Get the lock. */
- // _tx_thread_smp_protection.tx_thread_smp_protect_in_force = 1;
-
- MOV w3, #1 // Build lock value
- STXR w4, w3, [x2, #0] // Attempt to get the protection
- CMP w4, #0
- BNE _start_waiting // Did it fail?
- DMB ISH //
-
- /* Got the lock. */
- // _tx_thread_smp_protect_lock_got();
-
- _tx_thread_smp_protect_lock_got
-
- /* Remove this core from the wait list. */
- // _tx_thread_smp_protect_remove_from_front_of_list();
-
- _tx_thread_smp_protect_remove_from_front_of_list
-
- B _return
-
-_start_waiting:
-
- /* For one reason or another, we didn't get the lock. */
-
- /* Increment wait count. */
- // _tx_thread_smp_protect_wait_counts[this_core]++;
-
- LDR x3, =_tx_thread_smp_protect_wait_counts // Load wait list counts
- LDR w4, [x3, x1, LSL #2] // Load waiting value for this core
- ADD w4, w4, #1 // Increment wait value
- STR w4, [x3, x1, LSL #2] // Store new wait value
-
- /* Have we not added ourselves to the list yet? */
- // if (_tx_thread_smp_protect_wait_counts[this_core] == 1)
- // {
-
- CMP w4, #1
- BNE _already_in_list0 // Is this core already waiting?
-
- /* Add ourselves to the list. */
- // _tx_thread_smp_protect_wait_list_add(this_core);
-
- _tx_thread_smp_protect_wait_list_add // Call macro to add ourselves to the list
-
- // }
-
-_already_in_list0:
-
- /* Restore interrupts. */
+ LDR x1, =_tx_thread_smp_protection // Build address to protection structure
+ LDR w3, [x1, #4] // Pickup the owning core
+ CMP w3, w2 // Is it this core?
+ BEQ _owned // Yes, the protection is already owned
+ LDAXR w4, [x1, #0] // Pickup the protection flag
+ CBZ w4, _get_protection // Yes, get the protection
MSR DAIF, x0 // Restore interrupts
ISB //
#ifdef TX_ENABLE_WFE
WFE // Go into standby
#endif
+ B _tx_thread_smp_protect // On waking, restart the protection attempt
- /* We do this until we have the lock. */
- // while (1)
- // {
-
-_try_to_get_lock:
-
- /* Disable interrupts so we don't get preempted. */
-
- MRS x0, DAIF // Pickup current interrupt posture
- MSR DAIFSet, 0x3 // Lockout interrupts
-
- /* Pickup the CPU ID. */
-
- MRS x1, MPIDR_EL1 // Pickup the core ID
-#ifdef TX_ARMV8_2
-#if TX_THREAD_SMP_CLUSTERS > 1
- UBFX x7, x1, #16, #8 // Isolate cluster ID
-#endif
- UBFX x1, x1, #8, #8 // Isolate core ID
-#else
-#if TX_THREAD_SMP_CLUSTERS > 1
- UBFX x7, x1, #8, #8 // Isolate cluster ID
-#endif
- UBFX x1, x1, #0, #8 // Isolate core ID
-#endif
-#if TX_THREAD_SMP_CLUSTERS > 1
- ADDS x1, x1, x7, LSL #2 // Calculate CPU ID
-#endif
-
- /* Do we already have protection? */
- // if (this_core == _tx_thread_smp_protection.tx_thread_smp_protect_core)
- // {
-
- LDR w3, [x2, #4] // Pickup the owning core
- CMP w3, w1 // Is it this core?
- BEQ _got_lock_after_waiting // Yes, the protection is already owned. This means
- // an ISR preempted us and got protection
-
- // }
-
- /* Are we at the front of the list? */
- // if (this_core == _tx_thread_smp_protect_wait_list[_tx_thread_smp_protect_wait_list_head])
- // {
-
- LDR x3, =_tx_thread_smp_protect_wait_list_head // Get the address of the head
- LDR w3, [x3] // Get the value of the head
- LDR x4, =_tx_thread_smp_protect_wait_list // Get the address of the list
- LDR w4, [x4, x3, LSL #2] // Get the value at the head index
-
- CMP w1, w4
- BNE _did_not_get_lock
-
- /* Is the lock still available? */
- // if (_tx_thread_smp_protection.tx_thread_smp_protect_in_force == 0)
- // {
-
- LDAXR w3, [x2, #0] // Pickup the protection flag
- CMP w3, #0
- BNE _did_not_get_lock // No, protection not available
-
- /* Get the lock. */
- // _tx_thread_smp_protection.tx_thread_smp_protect_in_force = 1;
-
- MOV w3, #1 // Build lock value
- STXR w4, w3, [x2, #0] // Attempt to get the protection
- CMP w4, #0
- BNE _did_not_get_lock // Did it fail?
- DMB ISH //
-
- /* Got the lock. */
- // _tx_thread_smp_protect_lock_got();
-
- _tx_thread_smp_protect_lock_got
-
- /* Remove this core from the wait list. */
- // _tx_thread_smp_protect_remove_from_front_of_list();
-
- _tx_thread_smp_protect_remove_from_front_of_list
-
- B _got_lock_after_waiting
-
-_did_not_get_lock:
-
- /* For one reason or another, we didn't get the lock. */
-
- /* Were we removed from the list? This can happen if we're a thread
- and we got preempted. */
- // if (_tx_thread_smp_protect_wait_counts[this_core] == 0)
- // {
-
- LDR x3, =_tx_thread_smp_protect_wait_counts // Load wait list counts
- LDR w4, [x3, x1, LSL #2] // Load waiting value for this core
- CMP w4, #0
- BNE _already_in_list1 // Is this core already in the list?
-
- /* Add ourselves to the list. */
- // _tx_thread_smp_protect_wait_list_add(this_core);
-
- _tx_thread_smp_protect_wait_list_add // Call macro to add ourselves to the list
-
- /* Our waiting count was also reset when we were preempted. Increment it again. */
- // _tx_thread_smp_protect_wait_counts[this_core]++;
-
- LDR x3, =_tx_thread_smp_protect_wait_counts // Load wait list counts
- LDR w4, [x3, x1, LSL #2] // Load waiting value for this core
- ADD w4, w4, #1 // Increment wait value
- STR w4, [x3, x1, LSL #2] // Store new wait value value
-
- // }
-
-_already_in_list1:
-
- /* Restore interrupts and try again. */
-
+_get_protection:
+ MOV x4, #1 // Build lock value
+ STXR w5, w4, [x1] // Attempt to get the protection
+ CBZ w5, _got_protection // Did it succeed? w5 = 0 means success!
MSR DAIF, x0 // Restore interrupts
- ISB //
-#ifdef TX_ENABLE_WFE
- WFE // Go into standby
-#endif
- B _try_to_get_lock // On waking, restart the protection attempt
-
-_got_lock_after_waiting:
-
- /* We're no longer waiting. */
- // _tx_thread_smp_protect_wait_counts[this_core]--;
-
- LDR x3, =_tx_thread_smp_protect_wait_counts // Load waiting list
- LDR w4, [x3, x1, LSL #2] // Load current wait value
- SUB w4, w4, #1 // Decrement wait value
- STR w4, [x3, x1, LSL #2] // Store new wait value value
-
- /* Restore registers and return. */
-
-_return:
-
+ B _tx_thread_smp_protect // Restart the protection attempt
+
+_got_protection:
+ DMB ISH //
+ STR w2, [x1, #4] // Save owning core
+_owned:
+ LDR w5, [x1, #8] // Pickup ownership count
+ ADD w5, w5, #1 // Increment ownership count
+ STR w5, [x1, #8] // Store ownership count
+ DMB ISH //
RET
diff --git a/ports_smp/cortex_a76ae_smp/gnu/src/tx_thread_smp_unprotect.S b/ports_smp/cortex_a76ae_smp/gnu/src/tx_thread_smp_unprotect.S
index a783cde6..f9d8692a 100644
--- a/ports_smp/cortex_a76ae_smp/gnu/src/tx_thread_smp_unprotect.S
+++ b/ports_smp/cortex_a76ae_smp/gnu/src/tx_thread_smp_unprotect.S
@@ -60,10 +60,13 @@
/* */
/* DATE NAME DESCRIPTION */
/* */
-/* 09-30-2020 William E. Lamie Initial Version 6.1 */
-/* 01-31-2022 Andres Mlinar Updated comments, */
-/* added ARMv8.2-A support, */
+/* 09-30-2020 William E. Lamie Initial Version 6.1 */
+/* 01-31-2022 Andres Mlinar Updated comments, */
+/* added ARMv8.2-A support, */
/* resulting in version 6.1.10 */
+/* 04-25-2022 William E. Lamie Modified comments, removed */
+/* FIFO queueing, */
+/* resulting in version 6.1.11 */
/* */
/**************************************************************************/
.global _tx_thread_smp_unprotect
@@ -105,11 +108,6 @@ _tx_thread_smp_unprotect:
CMP w3, #0 // Is the preempt disable flag set?
BNE _still_protected // Yes, skip the protection release
- LDR x2,=_tx_thread_smp_protect_wait_counts // Build build address of wait counts
- LDR w3, [x2, x1, LSL #2] // Pickup wait list value
- CMP w3, #0 // Are any entities on this core waiting?
- BNE _still_protected // Yes, skip the protection release
-
LDR x2,=_tx_thread_smp_protection // Build address of protection structure
MOV w3, #0xFFFFFFFF // Build invalid value
STR w3, [x2, #4] // Mark the protected core as invalid
diff --git a/ports_smp/cortex_a77_smp/ac6/example_build/sample_threadx/.cproject b/ports_smp/cortex_a77_smp/ac6/example_build/sample_threadx/.cproject
index 552e0f6e..4b7a0722 100644
--- a/ports_smp/cortex_a77_smp/ac6/example_build/sample_threadx/.cproject
+++ b/ports_smp/cortex_a77_smp/ac6/example_build/sample_threadx/.cproject
@@ -23,7 +23,7 @@
-
+
@@ -47,7 +47,7 @@
-
+
@@ -63,7 +63,7 @@
-
+
diff --git a/ports_smp/cortex_a77_smp/ac6/example_build/tx/.cproject b/ports_smp/cortex_a77_smp/ac6/example_build/tx/.cproject
index 7dd056a4..8e2dce0d 100644
--- a/ports_smp/cortex_a77_smp/ac6/example_build/tx/.cproject
+++ b/ports_smp/cortex_a77_smp/ac6/example_build/tx/.cproject
@@ -49,7 +49,7 @@
-
+
@@ -129,11 +129,7 @@
-
-
-
-
-
+
diff --git a/ports_smp/cortex_a77_smp/ac6/src/tx_thread_context_restore.S b/ports_smp/cortex_a77_smp/ac6/src/tx_thread_context_restore.S
index 4df471ac..21deb75d 100644
--- a/ports_smp/cortex_a77_smp/ac6/src/tx_thread_context_restore.S
+++ b/ports_smp/cortex_a77_smp/ac6/src/tx_thread_context_restore.S
@@ -21,9 +21,6 @@
/**************************************************************************/
-/* Include macros for modifying the wait list. */
-#include "tx_thread_smp_protection_wait_list_macros.h"
-
.text
.align 3
/**************************************************************************/
@@ -31,7 +28,7 @@
/* FUNCTION RELEASE */
/* */
/* _tx_thread_context_restore ARMv8-A-SMP */
-/* 6.1.10 */
+/* 6.1.11 */
/* AUTHOR */
/* */
/* William E. Lamie, Microsoft Corporation */
@@ -63,10 +60,13 @@
/* */
/* DATE NAME DESCRIPTION */
/* */
-/* 09-30-2020 William E. Lamie Initial Version 6.1 */
-/* 01-31-2022 Andres Mlinar Updated comments, */
-/* added ARMv8.2-A support, */
+/* 09-30-2020 William E. Lamie Initial Version 6.1 */
+/* 01-31-2022 Andres Mlinar Updated comments, */
+/* added ARMv8.2-A support, */
/* resulting in version 6.1.10 */
+/* 04-25-2022 William E. Lamie Modified comments, removed */
+/* FIFO queueing, */
+/* resulting in version 6.1.11 */
/* */
/**************************************************************************/
// VOID _tx_thread_context_restore(VOID)
@@ -216,74 +216,6 @@ __tx_thread_no_preempt_restore:
// {
__tx_thread_preempt_restore:
- /* Was the thread being preempted waiting for the lock? */
- // if (_tx_thread_smp_protect_wait_counts[this_core] != 0)
- // {
-
- LDR x2, =_tx_thread_smp_protect_wait_counts // Load waiting count list
- LDR w3, [x2, x8, LSL #2] // Load waiting value for this core
- CMP w3, #0
- BEQ _nobody_waiting_for_lock // Is the core waiting for the lock?
-
- /* Do we not have the lock? This means the ISR never got the inter-core lock. */
- // if (_tx_thread_smp_protection.tx_thread_smp_protect_owned != this_core)
- // {
-
- LDR x2, =_tx_thread_smp_protection // Load address of protection structure
- LDR w3, [x2, #4] // Pickup the owning core
- CMP w8, w3 // Compare our core to the owning core
- BEQ _this_core_has_lock // Do we have the lock?
-
- /* We don't have the lock. This core should be in the list. Remove it. */
- // _tx_thread_smp_protect_wait_list_remove(this_core);
-
- _tx_thread_smp_protect_wait_list_remove // Call macro to remove core from the list
- B _nobody_waiting_for_lock // Leave
-
- // }
- // else
- // {
- /* We have the lock. This means the ISR got the inter-core lock, but
- never released it because it saw that there was someone waiting.
- Note this core is not in the list. */
-
-_this_core_has_lock:
-
- /* We're no longer waiting. Note that this should be zero since this happens during thread preemption. */
- // _tx_thread_smp_protect_wait_counts[core]--;
-
- LDR x2, =_tx_thread_smp_protect_wait_counts // Load waiting count list
- LDR w3, [x2, x8, LSL #2] // Load waiting value for this core
- SUB w3, w3, #1 // Decrement waiting value. Should be zero now
- STR w3, [x2, x8, LSL #2] // Store new waiting value
-
- /* Now release the inter-core lock. */
-
- /* Set protected core as invalid. */
- // _tx_thread_smp_protection.tx_thread_smp_protect_core = 0xFFFFFFFF;
-
- LDR x2, =_tx_thread_smp_protection // Load address of protection structure
- MOV w3, #0xFFFFFFFF // Build invalid value
- STR w3, [x2, #4] // Mark the protected core as invalid
- DMB ISH // Ensure that accesses to shared resource have completed
-
- /* Release protection. */
- // _tx_thread_smp_protection.tx_thread_smp_protect_in_force = 0;
-
- MOV w3, #0 // Build release protection value
- STR w3, [x2, #0] // Release the protection
- DSB ISH // To ensure update of the protection occurs before other CPUs awake
-
- /* Wake up waiting processors. Note interrupts are already enabled. */
-
-#ifdef TX_ENABLE_WFE
- SEV // Send event to other CPUs
-#endif
-
- // }
- // }
-
-_nobody_waiting_for_lock:
LDR x4, [x0, #8] // Switch to thread stack pointer
MOV sp, x4 //
diff --git a/ports_smp/cortex_a77_smp/ac6/src/tx_thread_smp_protect.S b/ports_smp/cortex_a77_smp/ac6/src/tx_thread_smp_protect.S
index 9cde3e08..b65af329 100644
--- a/ports_smp/cortex_a77_smp/ac6/src/tx_thread_smp_protect.S
+++ b/ports_smp/cortex_a77_smp/ac6/src/tx_thread_smp_protect.S
@@ -32,7 +32,7 @@
/* FUNCTION RELEASE */
/* */
/* _tx_thread_smp_protect Cortex-A35-SMP/AC6 */
-/* 6.1.10 */
+/* 6.1.11 */
/* AUTHOR */
/* */
/* William E. Lamie, Microsoft Corporation */
@@ -63,11 +63,14 @@
/* */
/* DATE NAME DESCRIPTION */
/* */
-/* 09-30-2020 William E. Lamie Initial Version 6.1 */
-/* 01-31-2022 Andres Mlinar Updated comments, */
-/* added ARMv8.2-A support, */
-/* improved SMP code, */
+/* 09-30-2020 William E. Lamie Initial Version 6.1 */
+/* 01-31-2022 Andres Mlinar Updated comments, */
+/* added ARMv8.2-A support, */
+/* improved SMP code, */
/* resulting in version 6.1.10 */
+/* 04-25-2022 William E. Lamie Modified comments, removed */
+/* FIFO queueing, */
+/* resulting in version 6.1.11 */
/* */
/**************************************************************************/
.global _tx_thread_smp_protect
@@ -81,293 +84,49 @@ _tx_thread_smp_protect:
/* Pickup the CPU ID. */
- MRS x1, MPIDR_EL1 // Pickup the core ID
+ MRS x2, MPIDR_EL1 // Pickup the core ID
#ifdef TX_ARMV8_2
#if TX_THREAD_SMP_CLUSTERS > 1
- UBFX x7, x1, #16, #8 // Isolate cluster ID
+ UBFX x7, x2, #16, #8 // Isolate cluster ID
#endif
- UBFX x1, x1, #8, #8 // Isolate core ID
+ UBFX x2, x2, #8, #8 // Isolate core ID
#else
#if TX_THREAD_SMP_CLUSTERS > 1
- UBFX x7, x1, #8, #8 // Isolate cluster ID
+ UBFX x7, x2, #8, #8 // Isolate cluster ID
#endif
- UBFX x1, x1, #0, #8 // Isolate core ID
+ UBFX x2, x2, #0, #8 // Isolate core ID
#endif
#if TX_THREAD_SMP_CLUSTERS > 1
- ADDS x1, x1, x7, LSL #2 // Calculate CPU ID
+ ADDS x2, x2, x7, LSL #2 // Calculate CPU ID
#endif
- /* Do we already have protection? */
- // if (this_core == _tx_thread_smp_protection.tx_thread_smp_protect_core)
- // {
-
- LDR x2, =_tx_thread_smp_protection // Build address to protection structure
- LDR w3, [x2, #4] // Pickup the owning core
- CMP w1, w3 // Is it not this core?
- BNE _protection_not_owned // No, the protection is not already owned
-
- /* We already have protection. */
-
- /* Increment the protection count. */
- // _tx_thread_smp_protection.tx_thread_smp_protect_count++;
-
- LDR w3, [x2, #8] // Pickup ownership count
- ADD w3, w3, #1 // Increment ownership count
- STR w3, [x2, #8] // Store ownership count
- DMB ISH
-
- B _return
-
-_protection_not_owned:
-
- /* Is the lock available? */
- // if (_tx_thread_smp_protection.tx_thread_smp_protect_in_force == 0)
- // {
-
- LDAXR w3, [x2, #0] // Pickup the protection flag
- CMP w3, #0
- BNE _start_waiting // No, protection not available
-
- /* Is the list empty? */
- // if (_tx_thread_smp_protect_wait_list_head == _tx_thread_smp_protect_wait_list_tail)
- // {
-
- LDR x3, =_tx_thread_smp_protect_wait_list_head
- LDR w3, [x3]
- LDR x4, =_tx_thread_smp_protect_wait_list_tail
- LDR w4, [x4]
- CMP w3, w4
- BNE _list_not_empty
-
- /* Try to get the lock. */
- // if (write_exclusive(&_tx_thread_smp_protection.tx_thread_smp_protect_in_force, 1) == SUCCESS)
- // {
-
- MOV w3, #1 // Build lock value
- STXR w4, w3, [x2, #0] // Attempt to get the protection
- CMP w4, #0
- BNE _start_waiting // Did it fail?
-
- /* We got the lock! */
- // _tx_thread_smp_protect_lock_got();
-
- DMB ISH // Ensure write to protection finishes
- _tx_thread_smp_protect_lock_got // Call the lock got function
-
- B _return
-
-_list_not_empty:
-
- /* Are we at the front of the list? */
- // if (this_core == _tx_thread_smp_protect_wait_list[_tx_thread_smp_protect_wait_list_head])
- // {
-
- LDR x3, =_tx_thread_smp_protect_wait_list_head // Get the address of the head
- LDR w3, [x3] // Get the value of the head
- LDR x4, =_tx_thread_smp_protect_wait_list // Get the address of the list
- LDR w4, [x4, x3, LSL #2] // Get the value at the head index
-
- CMP w1, w4
- BNE _start_waiting
-
- /* Is the lock still available? */
- // if (_tx_thread_smp_protection.tx_thread_smp_protect_in_force == 0)
- // {
-
- LDAXR w3, [x2, #0] // Pickup the protection flag
- CMP w3, #0
- BNE _start_waiting // No, protection not available
-
- /* Get the lock. */
- // _tx_thread_smp_protection.tx_thread_smp_protect_in_force = 1;
-
- MOV w3, #1 // Build lock value
- STXR w4, w3, [x2, #0] // Attempt to get the protection
- CMP w4, #0
- BNE _start_waiting // Did it fail?
- DMB ISH //
-
- /* Got the lock. */
- // _tx_thread_smp_protect_lock_got();
-
- _tx_thread_smp_protect_lock_got
-
- /* Remove this core from the wait list. */
- // _tx_thread_smp_protect_remove_from_front_of_list();
-
- _tx_thread_smp_protect_remove_from_front_of_list
-
- B _return
-
-_start_waiting:
-
- /* For one reason or another, we didn't get the lock. */
-
- /* Increment wait count. */
- // _tx_thread_smp_protect_wait_counts[this_core]++;
-
- LDR x3, =_tx_thread_smp_protect_wait_counts // Load wait list counts
- LDR w4, [x3, x1, LSL #2] // Load waiting value for this core
- ADD w4, w4, #1 // Increment wait value
- STR w4, [x3, x1, LSL #2] // Store new wait value
-
- /* Have we not added ourselves to the list yet? */
- // if (_tx_thread_smp_protect_wait_counts[this_core] == 1)
- // {
-
- CMP w4, #1
- BNE _already_in_list0 // Is this core already waiting?
-
- /* Add ourselves to the list. */
- // _tx_thread_smp_protect_wait_list_add(this_core);
-
- _tx_thread_smp_protect_wait_list_add // Call macro to add ourselves to the list
-
- // }
-
-_already_in_list0:
-
- /* Restore interrupts. */
+ LDR x1, =_tx_thread_smp_protection // Build address to protection structure
+ LDR w3, [x1, #4] // Pickup the owning core
+ CMP w3, w2 // Is it this core?
+ BEQ _owned // Yes, the protection is already owned
+ LDAXR w4, [x1, #0] // Pickup the protection flag
+ CBZ w4, _get_protection // Yes, get the protection
MSR DAIF, x0 // Restore interrupts
ISB //
#ifdef TX_ENABLE_WFE
WFE // Go into standby
#endif
+ B _tx_thread_smp_protect // On waking, restart the protection attempt
- /* We do this until we have the lock. */
- // while (1)
- // {
-
-_try_to_get_lock:
-
- /* Disable interrupts so we don't get preempted. */
-
- MRS x0, DAIF // Pickup current interrupt posture
- MSR DAIFSet, 0x3 // Lockout interrupts
-
- /* Pickup the CPU ID. */
-
- MRS x1, MPIDR_EL1 // Pickup the core ID
-#ifdef TX_ARMV8_2
-#if TX_THREAD_SMP_CLUSTERS > 1
- UBFX x7, x1, #16, #8 // Isolate cluster ID
-#endif
- UBFX x1, x1, #8, #8 // Isolate core ID
-#else
-#if TX_THREAD_SMP_CLUSTERS > 1
- UBFX x7, x1, #8, #8 // Isolate cluster ID
-#endif
- UBFX x1, x1, #0, #8 // Isolate core ID
-#endif
-#if TX_THREAD_SMP_CLUSTERS > 1
- ADDS x1, x1, x7, LSL #2 // Calculate CPU ID
-#endif
-
- /* Do we already have protection? */
- // if (this_core == _tx_thread_smp_protection.tx_thread_smp_protect_core)
- // {
-
- LDR w3, [x2, #4] // Pickup the owning core
- CMP w3, w1 // Is it this core?
- BEQ _got_lock_after_waiting // Yes, the protection is already owned. This means
- // an ISR preempted us and got protection
-
- // }
-
- /* Are we at the front of the list? */
- // if (this_core == _tx_thread_smp_protect_wait_list[_tx_thread_smp_protect_wait_list_head])
- // {
-
- LDR x3, =_tx_thread_smp_protect_wait_list_head // Get the address of the head
- LDR w3, [x3] // Get the value of the head
- LDR x4, =_tx_thread_smp_protect_wait_list // Get the address of the list
- LDR w4, [x4, x3, LSL #2] // Get the value at the head index
-
- CMP w1, w4
- BNE _did_not_get_lock
-
- /* Is the lock still available? */
- // if (_tx_thread_smp_protection.tx_thread_smp_protect_in_force == 0)
- // {
-
- LDAXR w3, [x2, #0] // Pickup the protection flag
- CMP w3, #0
- BNE _did_not_get_lock // No, protection not available
-
- /* Get the lock. */
- // _tx_thread_smp_protection.tx_thread_smp_protect_in_force = 1;
-
- MOV w3, #1 // Build lock value
- STXR w4, w3, [x2, #0] // Attempt to get the protection
- CMP w4, #0
- BNE _did_not_get_lock // Did it fail?
- DMB ISH //
-
- /* Got the lock. */
- // _tx_thread_smp_protect_lock_got();
-
- _tx_thread_smp_protect_lock_got
-
- /* Remove this core from the wait list. */
- // _tx_thread_smp_protect_remove_from_front_of_list();
-
- _tx_thread_smp_protect_remove_from_front_of_list
-
- B _got_lock_after_waiting
-
-_did_not_get_lock:
-
- /* For one reason or another, we didn't get the lock. */
-
- /* Were we removed from the list? This can happen if we're a thread
- and we got preempted. */
- // if (_tx_thread_smp_protect_wait_counts[this_core] == 0)
- // {
-
- LDR x3, =_tx_thread_smp_protect_wait_counts // Load wait list counts
- LDR w4, [x3, x1, LSL #2] // Load waiting value for this core
- CMP w4, #0
- BNE _already_in_list1 // Is this core already in the list?
-
- /* Add ourselves to the list. */
- // _tx_thread_smp_protect_wait_list_add(this_core);
-
- _tx_thread_smp_protect_wait_list_add // Call macro to add ourselves to the list
-
- /* Our waiting count was also reset when we were preempted. Increment it again. */
- // _tx_thread_smp_protect_wait_counts[this_core]++;
-
- LDR x3, =_tx_thread_smp_protect_wait_counts // Load wait list counts
- LDR w4, [x3, x1, LSL #2] // Load waiting value for this core
- ADD w4, w4, #1 // Increment wait value
- STR w4, [x3, x1, LSL #2] // Store new wait value value
-
- // }
-
-_already_in_list1:
-
- /* Restore interrupts and try again. */
-
+_get_protection:
+ MOV x4, #1 // Build lock value
+ STXR w5, w4, [x1] // Attempt to get the protection
+ CBZ w5, _got_protection // Did it succeed? w5 = 0 means success!
MSR DAIF, x0 // Restore interrupts
- ISB //
-#ifdef TX_ENABLE_WFE
- WFE // Go into standby
-#endif
- B _try_to_get_lock // On waking, restart the protection attempt
-
-_got_lock_after_waiting:
-
- /* We're no longer waiting. */
- // _tx_thread_smp_protect_wait_counts[this_core]--;
-
- LDR x3, =_tx_thread_smp_protect_wait_counts // Load waiting list
- LDR w4, [x3, x1, LSL #2] // Load current wait value
- SUB w4, w4, #1 // Decrement wait value
- STR w4, [x3, x1, LSL #2] // Store new wait value value
-
- /* Restore registers and return. */
-
-_return:
-
+ B _tx_thread_smp_protect // Restart the protection attempt
+
+_got_protection:
+ DMB ISH //
+ STR w2, [x1, #4] // Save owning core
+_owned:
+ LDR w5, [x1, #8] // Pickup ownership count
+ ADD w5, w5, #1 // Increment ownership count
+ STR w5, [x1, #8] // Store ownership count
+ DMB ISH //
RET
diff --git a/ports_smp/cortex_a77_smp/ac6/src/tx_thread_smp_unprotect.S b/ports_smp/cortex_a77_smp/ac6/src/tx_thread_smp_unprotect.S
index a783cde6..f9d8692a 100644
--- a/ports_smp/cortex_a77_smp/ac6/src/tx_thread_smp_unprotect.S
+++ b/ports_smp/cortex_a77_smp/ac6/src/tx_thread_smp_unprotect.S
@@ -60,10 +60,13 @@
/* */
/* DATE NAME DESCRIPTION */
/* */
-/* 09-30-2020 William E. Lamie Initial Version 6.1 */
-/* 01-31-2022 Andres Mlinar Updated comments, */
-/* added ARMv8.2-A support, */
+/* 09-30-2020 William E. Lamie Initial Version 6.1 */
+/* 01-31-2022 Andres Mlinar Updated comments, */
+/* added ARMv8.2-A support, */
/* resulting in version 6.1.10 */
+/* 04-25-2022 William E. Lamie Modified comments, removed */
+/* FIFO queueing, */
+/* resulting in version 6.1.11 */
/* */
/**************************************************************************/
.global _tx_thread_smp_unprotect
@@ -105,11 +108,6 @@ _tx_thread_smp_unprotect:
CMP w3, #0 // Is the preempt disable flag set?
BNE _still_protected // Yes, skip the protection release
- LDR x2,=_tx_thread_smp_protect_wait_counts // Build build address of wait counts
- LDR w3, [x2, x1, LSL #2] // Pickup wait list value
- CMP w3, #0 // Are any entities on this core waiting?
- BNE _still_protected // Yes, skip the protection release
-
LDR x2,=_tx_thread_smp_protection // Build address of protection structure
MOV w3, #0xFFFFFFFF // Build invalid value
STR w3, [x2, #4] // Mark the protected core as invalid
diff --git a/ports_smp/cortex_a77_smp/gnu/example_build/tx/.cproject b/ports_smp/cortex_a77_smp/gnu/example_build/tx/.cproject
index ec20edd2..3547a1d3 100644
--- a/ports_smp/cortex_a77_smp/gnu/example_build/tx/.cproject
+++ b/ports_smp/cortex_a77_smp/gnu/example_build/tx/.cproject
@@ -129,6 +129,12 @@
+
+
+
+
+
+
diff --git a/ports_smp/cortex_a77_smp/gnu/src/tx_thread_context_restore.S b/ports_smp/cortex_a77_smp/gnu/src/tx_thread_context_restore.S
index 4df471ac..21deb75d 100644
--- a/ports_smp/cortex_a77_smp/gnu/src/tx_thread_context_restore.S
+++ b/ports_smp/cortex_a77_smp/gnu/src/tx_thread_context_restore.S
@@ -21,9 +21,6 @@
/**************************************************************************/
-/* Include macros for modifying the wait list. */
-#include "tx_thread_smp_protection_wait_list_macros.h"
-
.text
.align 3
/**************************************************************************/
@@ -31,7 +28,7 @@
/* FUNCTION RELEASE */
/* */
/* _tx_thread_context_restore ARMv8-A-SMP */
-/* 6.1.10 */
+/* 6.1.11 */
/* AUTHOR */
/* */
/* William E. Lamie, Microsoft Corporation */
@@ -63,10 +60,13 @@
/* */
/* DATE NAME DESCRIPTION */
/* */
-/* 09-30-2020 William E. Lamie Initial Version 6.1 */
-/* 01-31-2022 Andres Mlinar Updated comments, */
-/* added ARMv8.2-A support, */
+/* 09-30-2020 William E. Lamie Initial Version 6.1 */
+/* 01-31-2022 Andres Mlinar Updated comments, */
+/* added ARMv8.2-A support, */
/* resulting in version 6.1.10 */
+/* 04-25-2022 William E. Lamie Modified comments, removed */
+/* FIFO queueing, */
+/* resulting in version 6.1.11 */
/* */
/**************************************************************************/
// VOID _tx_thread_context_restore(VOID)
@@ -216,74 +216,6 @@ __tx_thread_no_preempt_restore:
// {
__tx_thread_preempt_restore:
- /* Was the thread being preempted waiting for the lock? */
- // if (_tx_thread_smp_protect_wait_counts[this_core] != 0)
- // {
-
- LDR x2, =_tx_thread_smp_protect_wait_counts // Load waiting count list
- LDR w3, [x2, x8, LSL #2] // Load waiting value for this core
- CMP w3, #0
- BEQ _nobody_waiting_for_lock // Is the core waiting for the lock?
-
- /* Do we not have the lock? This means the ISR never got the inter-core lock. */
- // if (_tx_thread_smp_protection.tx_thread_smp_protect_owned != this_core)
- // {
-
- LDR x2, =_tx_thread_smp_protection // Load address of protection structure
- LDR w3, [x2, #4] // Pickup the owning core
- CMP w8, w3 // Compare our core to the owning core
- BEQ _this_core_has_lock // Do we have the lock?
-
- /* We don't have the lock. This core should be in the list. Remove it. */
- // _tx_thread_smp_protect_wait_list_remove(this_core);
-
- _tx_thread_smp_protect_wait_list_remove // Call macro to remove core from the list
- B _nobody_waiting_for_lock // Leave
-
- // }
- // else
- // {
- /* We have the lock. This means the ISR got the inter-core lock, but
- never released it because it saw that there was someone waiting.
- Note this core is not in the list. */
-
-_this_core_has_lock:
-
- /* We're no longer waiting. Note that this should be zero since this happens during thread preemption. */
- // _tx_thread_smp_protect_wait_counts[core]--;
-
- LDR x2, =_tx_thread_smp_protect_wait_counts // Load waiting count list
- LDR w3, [x2, x8, LSL #2] // Load waiting value for this core
- SUB w3, w3, #1 // Decrement waiting value. Should be zero now
- STR w3, [x2, x8, LSL #2] // Store new waiting value
-
- /* Now release the inter-core lock. */
-
- /* Set protected core as invalid. */
- // _tx_thread_smp_protection.tx_thread_smp_protect_core = 0xFFFFFFFF;
-
- LDR x2, =_tx_thread_smp_protection // Load address of protection structure
- MOV w3, #0xFFFFFFFF // Build invalid value
- STR w3, [x2, #4] // Mark the protected core as invalid
- DMB ISH // Ensure that accesses to shared resource have completed
-
- /* Release protection. */
- // _tx_thread_smp_protection.tx_thread_smp_protect_in_force = 0;
-
- MOV w3, #0 // Build release protection value
- STR w3, [x2, #0] // Release the protection
- DSB ISH // To ensure update of the protection occurs before other CPUs awake
-
- /* Wake up waiting processors. Note interrupts are already enabled. */
-
-#ifdef TX_ENABLE_WFE
- SEV // Send event to other CPUs
-#endif
-
- // }
- // }
-
-_nobody_waiting_for_lock:
LDR x4, [x0, #8] // Switch to thread stack pointer
MOV sp, x4 //
diff --git a/ports_smp/cortex_a77_smp/gnu/src/tx_thread_smp_protect.S b/ports_smp/cortex_a77_smp/gnu/src/tx_thread_smp_protect.S
index 9cde3e08..b65af329 100644
--- a/ports_smp/cortex_a77_smp/gnu/src/tx_thread_smp_protect.S
+++ b/ports_smp/cortex_a77_smp/gnu/src/tx_thread_smp_protect.S
@@ -32,7 +32,7 @@
/* FUNCTION RELEASE */
/* */
/* _tx_thread_smp_protect Cortex-A35-SMP/AC6 */
-/* 6.1.10 */
+/* 6.1.11 */
/* AUTHOR */
/* */
/* William E. Lamie, Microsoft Corporation */
@@ -63,11 +63,14 @@
/* */
/* DATE NAME DESCRIPTION */
/* */
-/* 09-30-2020 William E. Lamie Initial Version 6.1 */
-/* 01-31-2022 Andres Mlinar Updated comments, */
-/* added ARMv8.2-A support, */
-/* improved SMP code, */
+/* 09-30-2020 William E. Lamie Initial Version 6.1 */
+/* 01-31-2022 Andres Mlinar Updated comments, */
+/* added ARMv8.2-A support, */
+/* improved SMP code, */
/* resulting in version 6.1.10 */
+/* 04-25-2022 William E. Lamie Modified comments, removed */
+/* FIFO queueing, */
+/* resulting in version 6.1.11 */
/* */
/**************************************************************************/
.global _tx_thread_smp_protect
@@ -81,293 +84,49 @@ _tx_thread_smp_protect:
/* Pickup the CPU ID. */
- MRS x1, MPIDR_EL1 // Pickup the core ID
+ MRS x2, MPIDR_EL1 // Pickup the core ID
#ifdef TX_ARMV8_2
#if TX_THREAD_SMP_CLUSTERS > 1
- UBFX x7, x1, #16, #8 // Isolate cluster ID
+ UBFX x7, x2, #16, #8 // Isolate cluster ID
#endif
- UBFX x1, x1, #8, #8 // Isolate core ID
+ UBFX x2, x2, #8, #8 // Isolate core ID
#else
#if TX_THREAD_SMP_CLUSTERS > 1
- UBFX x7, x1, #8, #8 // Isolate cluster ID
+ UBFX x7, x2, #8, #8 // Isolate cluster ID
#endif
- UBFX x1, x1, #0, #8 // Isolate core ID
+ UBFX x2, x2, #0, #8 // Isolate core ID
#endif
#if TX_THREAD_SMP_CLUSTERS > 1
- ADDS x1, x1, x7, LSL #2 // Calculate CPU ID
+ ADDS x2, x2, x7, LSL #2 // Calculate CPU ID
#endif
- /* Do we already have protection? */
- // if (this_core == _tx_thread_smp_protection.tx_thread_smp_protect_core)
- // {
-
- LDR x2, =_tx_thread_smp_protection // Build address to protection structure
- LDR w3, [x2, #4] // Pickup the owning core
- CMP w1, w3 // Is it not this core?
- BNE _protection_not_owned // No, the protection is not already owned
-
- /* We already have protection. */
-
- /* Increment the protection count. */
- // _tx_thread_smp_protection.tx_thread_smp_protect_count++;
-
- LDR w3, [x2, #8] // Pickup ownership count
- ADD w3, w3, #1 // Increment ownership count
- STR w3, [x2, #8] // Store ownership count
- DMB ISH
-
- B _return
-
-_protection_not_owned:
-
- /* Is the lock available? */
- // if (_tx_thread_smp_protection.tx_thread_smp_protect_in_force == 0)
- // {
-
- LDAXR w3, [x2, #0] // Pickup the protection flag
- CMP w3, #0
- BNE _start_waiting // No, protection not available
-
- /* Is the list empty? */
- // if (_tx_thread_smp_protect_wait_list_head == _tx_thread_smp_protect_wait_list_tail)
- // {
-
- LDR x3, =_tx_thread_smp_protect_wait_list_head
- LDR w3, [x3]
- LDR x4, =_tx_thread_smp_protect_wait_list_tail
- LDR w4, [x4]
- CMP w3, w4
- BNE _list_not_empty
-
- /* Try to get the lock. */
- // if (write_exclusive(&_tx_thread_smp_protection.tx_thread_smp_protect_in_force, 1) == SUCCESS)
- // {
-
- MOV w3, #1 // Build lock value
- STXR w4, w3, [x2, #0] // Attempt to get the protection
- CMP w4, #0
- BNE _start_waiting // Did it fail?
-
- /* We got the lock! */
- // _tx_thread_smp_protect_lock_got();
-
- DMB ISH // Ensure write to protection finishes
- _tx_thread_smp_protect_lock_got // Call the lock got function
-
- B _return
-
-_list_not_empty:
-
- /* Are we at the front of the list? */
- // if (this_core == _tx_thread_smp_protect_wait_list[_tx_thread_smp_protect_wait_list_head])
- // {
-
- LDR x3, =_tx_thread_smp_protect_wait_list_head // Get the address of the head
- LDR w3, [x3] // Get the value of the head
- LDR x4, =_tx_thread_smp_protect_wait_list // Get the address of the list
- LDR w4, [x4, x3, LSL #2] // Get the value at the head index
-
- CMP w1, w4
- BNE _start_waiting
-
- /* Is the lock still available? */
- // if (_tx_thread_smp_protection.tx_thread_smp_protect_in_force == 0)
- // {
-
- LDAXR w3, [x2, #0] // Pickup the protection flag
- CMP w3, #0
- BNE _start_waiting // No, protection not available
-
- /* Get the lock. */
- // _tx_thread_smp_protection.tx_thread_smp_protect_in_force = 1;
-
- MOV w3, #1 // Build lock value
- STXR w4, w3, [x2, #0] // Attempt to get the protection
- CMP w4, #0
- BNE _start_waiting // Did it fail?
- DMB ISH //
-
- /* Got the lock. */
- // _tx_thread_smp_protect_lock_got();
-
- _tx_thread_smp_protect_lock_got
-
- /* Remove this core from the wait list. */
- // _tx_thread_smp_protect_remove_from_front_of_list();
-
- _tx_thread_smp_protect_remove_from_front_of_list
-
- B _return
-
-_start_waiting:
-
- /* For one reason or another, we didn't get the lock. */
-
- /* Increment wait count. */
- // _tx_thread_smp_protect_wait_counts[this_core]++;
-
- LDR x3, =_tx_thread_smp_protect_wait_counts // Load wait list counts
- LDR w4, [x3, x1, LSL #2] // Load waiting value for this core
- ADD w4, w4, #1 // Increment wait value
- STR w4, [x3, x1, LSL #2] // Store new wait value
-
- /* Have we not added ourselves to the list yet? */
- // if (_tx_thread_smp_protect_wait_counts[this_core] == 1)
- // {
-
- CMP w4, #1
- BNE _already_in_list0 // Is this core already waiting?
-
- /* Add ourselves to the list. */
- // _tx_thread_smp_protect_wait_list_add(this_core);
-
- _tx_thread_smp_protect_wait_list_add // Call macro to add ourselves to the list
-
- // }
-
-_already_in_list0:
-
- /* Restore interrupts. */
+ LDR x1, =_tx_thread_smp_protection // Build address to protection structure
+ LDR w3, [x1, #4] // Pickup the owning core
+ CMP w3, w2 // Is it this core?
+ BEQ _owned // Yes, the protection is already owned
+ LDAXR w4, [x1, #0] // Pickup the protection flag
+ CBZ w4, _get_protection // Yes, get the protection
MSR DAIF, x0 // Restore interrupts
ISB //
#ifdef TX_ENABLE_WFE
WFE // Go into standby
#endif
+ B _tx_thread_smp_protect // On waking, restart the protection attempt
- /* We do this until we have the lock. */
- // while (1)
- // {
-
-_try_to_get_lock:
-
- /* Disable interrupts so we don't get preempted. */
-
- MRS x0, DAIF // Pickup current interrupt posture
- MSR DAIFSet, 0x3 // Lockout interrupts
-
- /* Pickup the CPU ID. */
-
- MRS x1, MPIDR_EL1 // Pickup the core ID
-#ifdef TX_ARMV8_2
-#if TX_THREAD_SMP_CLUSTERS > 1
- UBFX x7, x1, #16, #8 // Isolate cluster ID
-#endif
- UBFX x1, x1, #8, #8 // Isolate core ID
-#else
-#if TX_THREAD_SMP_CLUSTERS > 1
- UBFX x7, x1, #8, #8 // Isolate cluster ID
-#endif
- UBFX x1, x1, #0, #8 // Isolate core ID
-#endif
-#if TX_THREAD_SMP_CLUSTERS > 1
- ADDS x1, x1, x7, LSL #2 // Calculate CPU ID
-#endif
-
- /* Do we already have protection? */
- // if (this_core == _tx_thread_smp_protection.tx_thread_smp_protect_core)
- // {
-
- LDR w3, [x2, #4] // Pickup the owning core
- CMP w3, w1 // Is it this core?
- BEQ _got_lock_after_waiting // Yes, the protection is already owned. This means
- // an ISR preempted us and got protection
-
- // }
-
- /* Are we at the front of the list? */
- // if (this_core == _tx_thread_smp_protect_wait_list[_tx_thread_smp_protect_wait_list_head])
- // {
-
- LDR x3, =_tx_thread_smp_protect_wait_list_head // Get the address of the head
- LDR w3, [x3] // Get the value of the head
- LDR x4, =_tx_thread_smp_protect_wait_list // Get the address of the list
- LDR w4, [x4, x3, LSL #2] // Get the value at the head index
-
- CMP w1, w4
- BNE _did_not_get_lock
-
- /* Is the lock still available? */
- // if (_tx_thread_smp_protection.tx_thread_smp_protect_in_force == 0)
- // {
-
- LDAXR w3, [x2, #0] // Pickup the protection flag
- CMP w3, #0
- BNE _did_not_get_lock // No, protection not available
-
- /* Get the lock. */
- // _tx_thread_smp_protection.tx_thread_smp_protect_in_force = 1;
-
- MOV w3, #1 // Build lock value
- STXR w4, w3, [x2, #0] // Attempt to get the protection
- CMP w4, #0
- BNE _did_not_get_lock // Did it fail?
- DMB ISH //
-
- /* Got the lock. */
- // _tx_thread_smp_protect_lock_got();
-
- _tx_thread_smp_protect_lock_got
-
- /* Remove this core from the wait list. */
- // _tx_thread_smp_protect_remove_from_front_of_list();
-
- _tx_thread_smp_protect_remove_from_front_of_list
-
- B _got_lock_after_waiting
-
-_did_not_get_lock:
-
- /* For one reason or another, we didn't get the lock. */
-
- /* Were we removed from the list? This can happen if we're a thread
- and we got preempted. */
- // if (_tx_thread_smp_protect_wait_counts[this_core] == 0)
- // {
-
- LDR x3, =_tx_thread_smp_protect_wait_counts // Load wait list counts
- LDR w4, [x3, x1, LSL #2] // Load waiting value for this core
- CMP w4, #0
- BNE _already_in_list1 // Is this core already in the list?
-
- /* Add ourselves to the list. */
- // _tx_thread_smp_protect_wait_list_add(this_core);
-
- _tx_thread_smp_protect_wait_list_add // Call macro to add ourselves to the list
-
- /* Our waiting count was also reset when we were preempted. Increment it again. */
- // _tx_thread_smp_protect_wait_counts[this_core]++;
-
- LDR x3, =_tx_thread_smp_protect_wait_counts // Load wait list counts
- LDR w4, [x3, x1, LSL #2] // Load waiting value for this core
- ADD w4, w4, #1 // Increment wait value
- STR w4, [x3, x1, LSL #2] // Store new wait value value
-
- // }
-
-_already_in_list1:
-
- /* Restore interrupts and try again. */
-
+_get_protection:
+ MOV x4, #1 // Build lock value
+ STXR w5, w4, [x1] // Attempt to get the protection
+ CBZ w5, _got_protection // Did it succeed? w5 = 0 means success!
MSR DAIF, x0 // Restore interrupts
- ISB //
-#ifdef TX_ENABLE_WFE
- WFE // Go into standby
-#endif
- B _try_to_get_lock // On waking, restart the protection attempt
-
-_got_lock_after_waiting:
-
- /* We're no longer waiting. */
- // _tx_thread_smp_protect_wait_counts[this_core]--;
-
- LDR x3, =_tx_thread_smp_protect_wait_counts // Load waiting list
- LDR w4, [x3, x1, LSL #2] // Load current wait value
- SUB w4, w4, #1 // Decrement wait value
- STR w4, [x3, x1, LSL #2] // Store new wait value value
-
- /* Restore registers and return. */
-
-_return:
-
+ B _tx_thread_smp_protect // Restart the protection attempt
+
+_got_protection:
+ DMB ISH //
+ STR w2, [x1, #4] // Save owning core
+_owned:
+ LDR w5, [x1, #8] // Pickup ownership count
+ ADD w5, w5, #1 // Increment ownership count
+ STR w5, [x1, #8] // Store ownership count
+ DMB ISH //
RET
diff --git a/ports_smp/cortex_a77_smp/gnu/src/tx_thread_smp_unprotect.S b/ports_smp/cortex_a77_smp/gnu/src/tx_thread_smp_unprotect.S
index a783cde6..f9d8692a 100644
--- a/ports_smp/cortex_a77_smp/gnu/src/tx_thread_smp_unprotect.S
+++ b/ports_smp/cortex_a77_smp/gnu/src/tx_thread_smp_unprotect.S
@@ -60,10 +60,13 @@
/* */
/* DATE NAME DESCRIPTION */
/* */
-/* 09-30-2020 William E. Lamie Initial Version 6.1 */
-/* 01-31-2022 Andres Mlinar Updated comments, */
-/* added ARMv8.2-A support, */
+/* 09-30-2020 William E. Lamie Initial Version 6.1 */
+/* 01-31-2022 Andres Mlinar Updated comments, */
+/* added ARMv8.2-A support, */
/* resulting in version 6.1.10 */
+/* 04-25-2022 William E. Lamie Modified comments, removed */
+/* FIFO queueing, */
+/* resulting in version 6.1.11 */
/* */
/**************************************************************************/
.global _tx_thread_smp_unprotect
@@ -105,11 +108,6 @@ _tx_thread_smp_unprotect:
CMP w3, #0 // Is the preempt disable flag set?
BNE _still_protected // Yes, skip the protection release
- LDR x2,=_tx_thread_smp_protect_wait_counts // Build build address of wait counts
- LDR w3, [x2, x1, LSL #2] // Pickup wait list value
- CMP w3, #0 // Are any entities on this core waiting?
- BNE _still_protected // Yes, skip the protection release
-
LDR x2,=_tx_thread_smp_protection // Build address of protection structure
MOV w3, #0xFFFFFFFF // Build invalid value
STR w3, [x2, #4] // Mark the protected core as invalid
diff --git a/utility/execution_profile_kit/tx_execution_profile.c b/utility/execution_profile_kit/tx_execution_profile.c
index 5ec6dd5e..7928a237 100644
--- a/utility/execution_profile_kit/tx_execution_profile.c
+++ b/utility/execution_profile_kit/tx_execution_profile.c
@@ -28,27 +28,27 @@
#include "tx_api.h"
#include "tx_execution_profile.h"
-/* Note to developers upgrading from ThreadX version 5: In ThreadX 5, the instruction was to
+/* Note to developers upgrading from ThreadX version 5: In ThreadX 5, the instruction was to
modify TX_THREAD_EXTENSION_3, and to define the symbol TX_ENABLE_EXECUTION_CHANGE_NOTIFY.
For ThreadX 6, user no long need to modify TX_THREAD_EXTENSION_3, and shall use the symbol
- TX_EXECUTION_PROFILE_ENABLE instead of TX_ENABLE_EXECUTION_CHANGE_NOTIFY.
+ TX_EXECUTION_PROFILE_ENABLE instead of TX_ENABLE_EXECUTION_CHANGE_NOTIFY.
For backward compatibiliy reasons, project upgraded from ThreadX 5 may still be able to use
- Execution Profile without changes to existing project, users are strongly recommended to
+ Execution Profile without changes to existing project, users are strongly recommended to
make the change. */
#if defined(TX_ENABLE_EXECUTION_CHANGE_NOTIFY) || defined(TX_EXECUTION_PROFILE_ENABLE)
-/* The thread execution profile kit is designed to track thread execution time
- based on the hardware timer defined by TX_EXECUTION_TIME_SOURCE and
- TX_EXECUTION_MAX_TIME_SOURCE below. When the thread's total time reaches
+/* The thread execution profile kit is designed to track thread execution time
+ based on the hardware timer defined by TX_EXECUTION_TIME_SOURCE and
+ TX_EXECUTION_MAX_TIME_SOURCE below. When the thread's total time reaches
the maximum value, it remains there until the time is reset to 0 via a call
- to tx_thread_execution_time_reset. There are several assumptions to the
+ to tx_thread_execution_time_reset. There are several assumptions to the
operation of this kit, as follows:
- 1. The TX_EXECUTION_TIME_SOURCE and TX_EXECUTION_MAX_TIME_SOURCE macros are
+ 1. The TX_EXECUTION_TIME_SOURCE and TX_EXECUTION_MAX_TIME_SOURCE macros are
defined to utilize a local hardware time source.
2. ThreadX 5.4 (or later) is being used, with the assembly code enabled to
@@ -59,9 +59,9 @@
VOID _tx_execution_isr_enter(void);
VOID _tx_execution_isr_exit(void);
- 3. The ThreadX library assembly code must be rebuilt with TX_EXECUTION_PROFILE_ENABLE so
- that these macros are expanded in the TX_THREAD structure and so the assembly code macros
- are enabled to call the execution profile routines.
+ 3. The ThreadX library assembly code must be rebuilt with TX_EXECUTION_PROFILE_ENABLE so
+ that these macros are expanded in the TX_THREAD structure and so the assembly code macros
+ are enabled to call the execution profile routines.
4. Add tx_execution_profile.c to the application build. */
@@ -86,7 +86,7 @@ EXECUTION_TIME _tx_execution_thread_time_total;
and _tx_thread_context_restore are tracked by this utility. */
EXECUTION_TIME _tx_execution_isr_time_total;
-EXECUTION_TIME_SOURCE_TYPE _tx_execution_isr_time_last_start;
+EXECUTION_TIME_SOURCE_TYPE _tx_execution_isr_time_last_start;
/* Define the system idle time gathering information. For idle time that exceeds the range of the timer
@@ -95,19 +95,67 @@ EXECUTION_TIME_SOURCE_TYPE _tx_execution_isr_time_last_start;
EXECUTION_TIME _tx_execution_idle_time_total;
EXECUTION_TIME_SOURCE_TYPE _tx_execution_idle_time_last_start;
-
+UINT _tx_execution_idle_active;
/* For Cortex-M targets, we need to keep track of nested interrupts internally. */
#ifdef TX_CORTEX_M_EPK
ULONG _tx_execution_isr_nest_counter = 0;
#endif
+
+/**************************************************************************/
+/* */
+/* FUNCTION RELEASE */
+/* */
+/* _tx_execution_initialize PORTABLE C */
+/* 6.1.11 */
+/* AUTHOR */
+/* */
+/* Scott Larson, Microsoft Corporation */
+/* */
+/* DESCRIPTION */
+/* */
+/* This function is called at initialization. */
+/* */
+/* INPUT */
+/* */
+/* None */
+/* */
+/* OUTPUT */
+/* */
+/* None */
+/* */
+/* CALLS */
+/* */
+/* None */
+/* */
+/* CALLED BY */
+/* */
+/* xxx xxx */
+/* */
+/* RELEASE HISTORY */
+/* */
+/* DATE NAME DESCRIPTION */
+/* */
+/* 04-25-2022 Scott Larson Initial Version 6.1.11 */
+/* */
+/**************************************************************************/
+VOID _tx_execution_initialize(void)
+{
+ /* In idle mode until a thread is scheduled or ISR occurs. */
+ _tx_execution_idle_active = TX_TRUE;
+
+ /* Pickup the start of idle time. */
+ _tx_execution_idle_time_last_start = TX_EXECUTION_TIME_SOURCE;
+}
+
+
/**************************************************************************/
/* */
/* FUNCTION RELEASE */
/* */
/* _tx_execution_thread_enter PORTABLE C */
-/* 6.1.7 */
+/* 6.1.11 */
/* AUTHOR */
/* */
/* William E. Lamie, Microsoft Corporation */
@@ -137,6 +185,9 @@ ULONG _tx_execution_isr_nest_counter = 0;
/* DATE NAME DESCRIPTION */
/* */
/* 06-02-2021 William E. Lamie Initial Version 6.1.7 */
+/* 04-25-2022 Scott Larson Modified comments and fixed */
+/* wrap-around calculation, */
+/* resulting in version 6.1.11 */
/* */
/**************************************************************************/
VOID _tx_execution_thread_enter(void)
@@ -164,7 +215,7 @@ EXECUTION_TIME new_total_time;
last_start_time = _tx_execution_idle_time_last_start;
/* Determine if idle time is being measured. */
- if (last_start_time)
+ if (_tx_execution_idle_active)
{
/* Determine how to calculate the difference. */
@@ -173,12 +224,12 @@ EXECUTION_TIME new_total_time;
/* Simply subtract. */
delta_time = (EXECUTION_TIME) (current_time - last_start_time);
- }
+ }
else
{
/* Timer wrapped, compute the delta assuming incrementing time counter. */
- delta_time = (EXECUTION_TIME) (current_time + (((EXECUTION_TIME_SOURCE_TYPE) TX_EXECUTION_MAX_TIME_SOURCE) - last_start_time));
+ delta_time = (EXECUTION_TIME) (current_time + ((((EXECUTION_TIME_SOURCE_TYPE) TX_EXECUTION_MAX_TIME_SOURCE) + 1) - last_start_time));
}
/* Pickup the total time. */
@@ -196,10 +247,10 @@ EXECUTION_TIME new_total_time;
}
/* Now store back the total idle time. */
- _tx_execution_idle_time_total = new_total_time;
+ _tx_execution_idle_time_total = new_total_time;
/* Disable the idle time measurement. */
- _tx_execution_idle_time_last_start = 0;
+ _tx_execution_idle_active = TX_FALSE;
}
}
@@ -209,7 +260,7 @@ EXECUTION_TIME new_total_time;
/* FUNCTION RELEASE */
/* */
/* _tx_execution_thread_exit PORTABLE C */
-/* 6.1.7 */
+/* 6.1.11 */
/* AUTHOR */
/* */
/* William E. Lamie, Microsoft Corporation */
@@ -239,6 +290,9 @@ EXECUTION_TIME new_total_time;
/* DATE NAME DESCRIPTION */
/* */
/* 06-02-2021 William E. Lamie Initial Version 6.1.7 */
+/* 04-25-2022 Scott Larson Modified comments and fixed */
+/* wrap-around calculation, */
+/* resulting in version 6.1.11 */
/* */
/**************************************************************************/
VOID _tx_execution_thread_exit(void)
@@ -283,7 +337,7 @@ EXECUTION_TIME delta_time;
{
/* Timer wrapped, compute the delta assuming incrementing time counter. */
- delta_time = (EXECUTION_TIME) (current_time + (((EXECUTION_TIME_SOURCE_TYPE) TX_EXECUTION_MAX_TIME_SOURCE) - last_start_time));
+ delta_time = (EXECUTION_TIME) (current_time + ((((EXECUTION_TIME_SOURCE_TYPE) TX_EXECUTION_MAX_TIME_SOURCE) + 1) - last_start_time));
}
/* Pickup the total time. */
@@ -303,7 +357,7 @@ EXECUTION_TIME delta_time;
/* Store back the new total time. */
thread_ptr -> tx_thread_execution_time_total = new_total_time;
- /* Now accumulate this thread's execution time into the total thread execution time. */
+ /* Now accumulate this thread's execution time into the total thread execution time. */
new_total_time = _tx_execution_thread_time_total + delta_time;
/* Determine if a rollover on the total time is present. */
@@ -312,18 +366,18 @@ EXECUTION_TIME delta_time;
/* Rollover. Set the total time to max value. */
new_total_time = (EXECUTION_TIME) TX_EXECUTION_MAX_TIME_SOURCE;
- }
+ }
/* Store back the new total time. */
_tx_execution_thread_time_total = new_total_time;
}
-
+
/* Is the system now idle? */
if (_tx_thread_execute_ptr == TX_NULL)
{
-
/* Yes, idle system. Pickup the start of idle time. */
_tx_execution_idle_time_last_start = TX_EXECUTION_TIME_SOURCE;
+ _tx_execution_idle_active = TX_TRUE;
}
}
}
@@ -334,7 +388,7 @@ EXECUTION_TIME delta_time;
/* FUNCTION RELEASE */
/* */
/* _tx_execution_isr_enter PORTABLE C */
-/* 6.1.7 */
+/* 6.1.11 */
/* AUTHOR */
/* */
/* William E. Lamie, Microsoft Corporation */
@@ -364,6 +418,9 @@ EXECUTION_TIME delta_time;
/* DATE NAME DESCRIPTION */
/* */
/* 06-02-2021 William E. Lamie Initial Version 6.1.7 */
+/* 04-25-2022 Scott Larson Modified comments and fixed */
+/* wrap-around calculation, */
+/* resulting in version 6.1.11 */
/* */
/**************************************************************************/
VOID _tx_execution_isr_enter(void)
@@ -381,7 +438,7 @@ EXECUTION_TIME delta_time;
_tx_execution_isr_nest_counter++;
#endif
- /* Determine if this is the first interrupt. Nested interrupts are all treated as
+ /* Determine if this is the first interrupt. Nested interrupts are all treated as
general interrupt processing. */
#ifdef TX_CORTEX_M_EPK
if ((TX_THREAD_GET_SYSTEM_STATE()) && (_tx_execution_isr_nest_counter == 1))
@@ -420,7 +477,7 @@ EXECUTION_TIME delta_time;
{
/* Timer wrapped, compute the delta assuming incrementing time counter. */
- delta_time = (EXECUTION_TIME) (current_time + (((EXECUTION_TIME_SOURCE_TYPE) TX_EXECUTION_MAX_TIME_SOURCE) - last_start_time));
+ delta_time = (EXECUTION_TIME) (current_time + ((((EXECUTION_TIME_SOURCE_TYPE) TX_EXECUTION_MAX_TIME_SOURCE) + 1) - last_start_time));
}
/* Pickup the total time. */
@@ -440,7 +497,7 @@ EXECUTION_TIME delta_time;
/* Store back the new total time. */
thread_ptr -> tx_thread_execution_time_total = new_total_time;
- /* Now accumulate this thread's execution time into the total thread execution time. */
+ /* Now accumulate this thread's execution time into the total thread execution time. */
new_total_time = _tx_execution_thread_time_total + delta_time;
/* Determine if a rollover on the total time is present. */
@@ -457,7 +514,7 @@ EXECUTION_TIME delta_time;
}
/* Has idle time started? */
- else if (_tx_execution_idle_time_last_start)
+ else if (_tx_execution_idle_active)
{
/* Pickup the last idle start time. */
@@ -474,7 +531,7 @@ EXECUTION_TIME delta_time;
{
/* Timer wrapped, compute the delta assuming incrementing time counter. */
- delta_time = (EXECUTION_TIME) (current_time + (((EXECUTION_TIME_SOURCE_TYPE) TX_EXECUTION_MAX_TIME_SOURCE) - last_start_time));
+ delta_time = (EXECUTION_TIME) (current_time + ((((EXECUTION_TIME_SOURCE_TYPE) TX_EXECUTION_MAX_TIME_SOURCE) + 1) - last_start_time));
}
/* Pickup the total time. */
@@ -495,7 +552,7 @@ EXECUTION_TIME delta_time;
_tx_execution_idle_time_total = new_total_time;
/* Disable the idle time measurement. */
- _tx_execution_idle_time_last_start = 0;
+ _tx_execution_idle_active = TX_FALSE;
}
/* Save the ISR start time. */
@@ -509,7 +566,7 @@ EXECUTION_TIME delta_time;
/* FUNCTION RELEASE */
/* */
/* _tx_execution_isr_exit PORTABLE C */
-/* 6.1.7 */
+/* 6.1.11 */
/* AUTHOR */
/* */
/* William E. Lamie, Microsoft Corporation */
@@ -539,6 +596,9 @@ EXECUTION_TIME delta_time;
/* DATE NAME DESCRIPTION */
/* */
/* 06-02-2021 William E. Lamie Initial Version 6.1.7 */
+/* 04-25-2022 Scott Larson Modified comments and fixed */
+/* wrap-around calculation, */
+/* resulting in version 6.1.11 */
/* */
/**************************************************************************/
VOID _tx_execution_isr_exit(void)
@@ -620,6 +680,7 @@ EXECUTION_TIME delta_time;
/* Yes, idle system. Pickup the start of idle time. */
_tx_execution_idle_time_last_start = TX_EXECUTION_TIME_SOURCE;
+ _tx_execution_idle_active = TX_TRUE;
}
}
diff --git a/utility/execution_profile_kit/tx_execution_profile.h b/utility/execution_profile_kit/tx_execution_profile.h
index 09ca91a9..9973c22d 100644
--- a/utility/execution_profile_kit/tx_execution_profile.h
+++ b/utility/execution_profile_kit/tx_execution_profile.h
@@ -58,20 +58,21 @@ typedef unsigned long EXECUTION_TIME_SOURCE_TYPE;
/* Example for Cortex-M targets: */
#ifndef TX_EXECUTION_TIME_SOURCE
-#define TX_EXECUTION_TIME_SOURCE (EXECUTION_TIME_SOURCE_TYPE) *((ULONG *) 0xE0001004)
+#define TX_EXECUTION_TIME_SOURCE (EXECUTION_TIME_SOURCE_TYPE) *((volatile ULONG *) 0xE0001004)
#endif
#ifndef TX_EXECUTION_MAX_TIME_SOURCE
#define TX_EXECUTION_MAX_TIME_SOURCE 0xFFFFFFFF
#endif
/* For 64-bit time source, the constant would be: */
-/*#define TX_EXECUTION_TIME_SOURCE (EXECUTION_TIME_SOURCE_TYPE) *((unsigned long long *) 0xE0001004) */
+/*#define TX_EXECUTION_TIME_SOURCE (EXECUTION_TIME_SOURCE_TYPE) *((volatile unsigned long long *) 0xE0001004) */
/*#define TX_EXECUTION_MAX_TIME_SOURCE 0xFFFFFFFFFFFFFFFF */
/* Define APIs of the execution profile kit. */
struct TX_THREAD_STRUCT;
+VOID _tx_execution_initialize(void);
VOID _tx_execution_thread_enter(void);
VOID _tx_execution_thread_exit(void);
VOID _tx_execution_isr_enter(void);