Release 6.1.9

This commit is contained in:
Yuxin Zhou
2021-10-14 00:51:26 +00:00
parent 215df45d4b
commit 1af8404c54
1812 changed files with 60698 additions and 249862 deletions

File diff suppressed because it is too large Load Diff

View File

@@ -56,12 +56,12 @@
#define TX_BLOCK_POOL_ID ((ULONG) 0x424C4F43)
/* Determine if in-line component initialization is supported by the
/* Determine if in-line component initialization is supported by the
caller. */
#ifdef TX_INVOKE_INLINE_INITIALIZATION
/* Yes, in-line initialization is supported, remap the block memory pool
/* Yes, in-line initialization is supported, remap the block memory pool
initialization function. */
#ifndef TX_BLOCK_POOL_ENABLE_PERFORMANCE_INFO

View File

@@ -68,12 +68,12 @@
#endif
/* Determine if in-line component initialization is supported by the
/* Determine if in-line component initialization is supported by the
caller. */
#ifdef TX_INVOKE_INLINE_INITIALIZATION
/* Yes, in-line initialization is supported, remap the byte memory pool
/* Yes, in-line initialization is supported, remap the byte memory pool
initialization function. */
#ifndef TX_BYTE_POOL_ENABLE_PERFORMANCE_INFO

View File

@@ -58,11 +58,11 @@
#define TX_EVENT_FLAGS_CLEAR_MASK ((UINT) 0x1)
/* Determine if in-line component initialization is supported by the
/* Determine if in-line component initialization is supported by the
caller. */
#ifdef TX_INVOKE_INLINE_INITIALIZATION
/* Yes, in-line initialization is supported, remap the event flag initialization
/* Yes, in-line initialization is supported, remap the event flag initialization
function. */
#ifndef TX_EVENT_FLAGS_ENABLE_PERFORMANCE_INFO

View File

@@ -67,15 +67,15 @@ VOID _tx_initialize_low_level(VOID);
/* Define the macro for adding additional port-specific global data. This macro is defined
as white space, unless defined by tx_port.h. */
#ifndef TX_PORT_SPECIFIC_DATA
#define TX_PORT_SPECIFIC_DATA
#endif
/* Define the macro for adding additional port-specific pre and post initialization processing.
/* Define the macro for adding additional port-specific pre and post initialization processing.
These macros is defined as white space, unless defined by tx_port.h. */
#ifndef TX_PORT_SPECIFIC_PRE_INITIALIZATION
#define TX_PORT_SPECIFIC_PRE_INITIALIZATION
#endif
@@ -102,9 +102,9 @@ VOID _tx_initialize_low_level(VOID);
#endif
/* Define the unused memory pointer. The value of the first available
/* Define the unused memory pointer. The value of the first available
memory address is placed in this variable in the low-level
initialization function. The content of this variable is passed
initialization function. The content of this variable is passed
to the application's system definition function. */
INITIALIZE_DECLARE VOID *_tx_initialize_unused_memory;

View File

@@ -56,12 +56,12 @@
#define TX_MUTEX_ID ((ULONG) 0x4D555445)
/* Determine if in-line component initialization is supported by the
/* Determine if in-line component initialization is supported by the
caller. */
#ifdef TX_INVOKE_INLINE_INITIALIZATION
/* Yes, in-line initialization is supported, remap the mutex initialization
/* Yes, in-line initialization is supported, remap the mutex initialization
function. */
#ifndef TX_MUTEX_ENABLE_PERFORMANCE_INFO

View File

@@ -56,11 +56,11 @@
#define TX_QUEUE_ID ((ULONG) 0x51554555)
/* Determine if in-line component initialization is supported by the
/* Determine if in-line component initialization is supported by the
caller. */
#ifdef TX_INVOKE_INLINE_INITIALIZATION
/* Yes, in-line initialization is supported, remap the queue initialization
/* Yes, in-line initialization is supported, remap the queue initialization
function. */
#ifndef TX_QUEUE_ENABLE_PERFORMANCE_INFO
@@ -85,7 +85,7 @@ VOID _tx_queue_initialize(VOID);
#endif
/* Define the message copy macro. Note that the source and destination
/* Define the message copy macro. Note that the source and destination
pointers must be modified since they are used subsequently. */
#ifndef TX_QUEUE_MESSAGE_COPY

View File

@@ -56,10 +56,10 @@
#define TX_SEMAPHORE_ID ((ULONG) 0x53454D41)
/* Determine if in-line component initialization is supported by the
/* Determine if in-line component initialization is supported by the
caller. */
#ifdef TX_INVOKE_INLINE_INITIALIZATION
/* Yes, in-line initialization is supported, remap the
/* Yes, in-line initialization is supported, remap the
semaphore initialization function. */
#ifndef TX_SEMAPHORE_ENABLE_PERFORMANCE_INFO
#define _tx_semaphore_initialize() \
@@ -76,7 +76,7 @@
#endif
#define TX_SEMAPHORE_INIT
#else
/* No in-line initialization is supported, use standard
/* No in-line initialization is supported, use standard
function call. */
VOID _tx_semaphore_initialize(VOID);
#endif

View File

@@ -26,7 +26,7 @@
/* COMPONENT DEFINITION RELEASE */
/* */
/* tx_thread.h PORTABLE C */
/* 6.1.2 */
/* 6.1.9 */
/* AUTHOR */
/* */
/* William E. Lamie, Microsoft Corporation */
@@ -48,6 +48,9 @@
/* moved TX_THREAD_GET_SYSTEM_ */
/* STATE to tx_api.h, */
/* resulting in version 6.1.2 */
/* 10-15-2021 Scott Larson Modified comment(s), improved */
/* stack check error handling, */
/* resulting in version 6.1.9 */
/* */
/**************************************************************************/
@@ -90,32 +93,33 @@
/* Define state change macro that can be used by run-mode debug agents to keep track of thread
state changes. By default, it is mapped to white space. */
#ifndef TX_THREAD_STATE_CHANGE
#define TX_THREAD_STATE_CHANGE(a, b)
#endif
/* Define the macro to get the current thread pointer. This is particularly useful in SMP
/* Define the macro to get the current thread pointer. This is particularly useful in SMP
versions of ThreadX to add additional processing. The default implementation is to simply
access the global current thread pointer directly. */
#ifndef TX_THREAD_GET_CURRENT
#define TX_THREAD_GET_CURRENT(a) (a) = _tx_thread_current_ptr;
#endif
/* Define the macro to set the current thread pointer. This is particularly useful in SMP
/* Define the macro to set the current thread pointer. This is particularly useful in SMP
versions of ThreadX to add additional processing. The default implementation is to simply
access the global current thread pointer directly. */
#ifndef TX_THREAD_SET_CURRENT
#define TX_THREAD_SET_CURRENT(a) _tx_thread_current_ptr = (a);
#endif
/* Define the get system state macro. By default, it simply maps to the variable _tx_thread_system_state. */
/* This symbol is moved to tx_api.h. Therefore removed from this file.
/* This symbol is moved to tx_api.h. Therefore removed from this file.
#ifndef TX_THREAD_GET_SYSTEM_STATE
#define TX_THREAD_GET_SYSTEM_STATE() _tx_thread_system_state
#endif
@@ -144,10 +148,10 @@
#endif
/* Define the lowest bit set macro. Note, that this may be overridden
/* Define the lowest bit set macro. Note, that this may be overridden
by a port specific definition if there is supporting assembly language
instructions in the architecture. */
#ifndef TX_LOWEST_SET_BIT_CALCULATE
#define TX_LOWEST_SET_BIT_CALCULATE(m, b) \
(b) = ((ULONG) 0); \
@@ -212,9 +216,9 @@
#endif
/* Define the default thread stack checking. This can be overridden by
a particular port, which is necessary if the stack growth is from
low address to high address (the default logic is for stacks that
/* Define the default thread stack checking. This can be overridden by
a particular port, which is necessary if the stack growth is from
low address to high address (the default logic is for stacks that
grow from high address to low address. */
#ifndef TX_THREAD_STACK_CHECK
@@ -311,7 +315,7 @@ THREAD_DECLARE TX_THREAD * _tx_thread_current_ptr;
/* Define the variable that holds the next thread to execute. It is important
to remember that this is not necessarily equal to the current thread
to remember that this is not necessarily equal to the current thread
pointer. */
THREAD_DECLARE TX_THREAD * _tx_thread_execute_ptr;
@@ -328,7 +332,7 @@ THREAD_DECLARE ULONG _tx_thread_created_count;
/* Define the current state variable. When this value is 0, a thread
is executing or the system is idle. Other values indicate that
is executing or the system is idle. Other values indicate that
interrupt or initialization processing is active. This variable is
initialized to TX_INITIALIZE_IN_PROGRESS to indicate initialization is
active. */
@@ -337,15 +341,15 @@ THREAD_DECLARE volatile ULONG _tx_thread_system_state;
/* Define the 32-bit priority bit-maps. There is one priority bit map for each
32 priority levels supported. If only 32 priorities are supported there is
only one bit map. Each bit within a priority bit map represents that one
32 priority levels supported. If only 32 priorities are supported there is
only one bit map. Each bit within a priority bit map represents that one
or more threads at the associated thread priority are ready. */
THREAD_DECLARE ULONG _tx_thread_priority_maps[TX_MAX_PRIORITIES/32];
/* Define the priority map active bit map that specifies which of the previously
defined priority maps have something set. This is only necessary if more than
/* Define the priority map active bit map that specifies which of the previously
defined priority maps have something set. This is only necessary if more than
32 priorities are supported. */
#if TX_MAX_PRIORITIES > 32
@@ -355,17 +359,17 @@ THREAD_DECLARE ULONG _tx_thread_priority_map_active;
#ifndef TX_DISABLE_PREEMPTION_THRESHOLD
/* Define the 32-bit preempt priority bit maps. There is one preempt bit map
for each 32 priority levels supported. If only 32 priorities are supported
there is only one bit map. Each set set bit corresponds to a preempted priority
level that had preemption-threshold active to protect against preemption of a
/* Define the 32-bit preempt priority bit maps. There is one preempt bit map
for each 32 priority levels supported. If only 32 priorities are supported
there is only one bit map. Each set set bit corresponds to a preempted priority
level that had preemption-threshold active to protect against preemption of a
range of relatively higher priority threads. */
THREAD_DECLARE ULONG _tx_thread_preempted_maps[TX_MAX_PRIORITIES/32];
/* Define the preempt map active bit map that specifies which of the previously
defined preempt maps have something set. This is only necessary if more than
/* Define the preempt map active bit map that specifies which of the previously
defined preempt maps have something set. This is only necessary if more than
32 priorities are supported. */
#if TX_MAX_PRIORITIES > 32
@@ -373,7 +377,7 @@ THREAD_DECLARE ULONG _tx_thread_preempted_map_active;
#endif
#endif
/* Define the variable that holds the highest priority group ready for
/* Define the variable that holds the highest priority group ready for
execution. It is important to note that this is not necessarily the same
as the priority of the thread pointed to by _tx_execute_thread. */
@@ -389,13 +393,13 @@ THREAD_DECLARE TX_THREAD * _tx_thread_priority_list[TX_MAX_PRIORITIES];
/* Define the global preempt disable variable. If this is non-zero, preemption is
disabled. It is used internally by ThreadX to prevent preemption of a thread in
disabled. It is used internally by ThreadX to prevent preemption of a thread in
the middle of a service that is resuming or suspending another thread. */
THREAD_DECLARE volatile UINT _tx_thread_preempt_disable;
/* Define the global function pointer for mutex cleanup on thread completion or
/* Define the global function pointer for mutex cleanup on thread completion or
termination. This pointer is setup during mutex initialization. */
THREAD_DECLARE VOID (*_tx_thread_mutex_release)(TX_THREAD *thread_ptr);
@@ -407,7 +411,7 @@ THREAD_DECLARE VOID (*_tx_thread_mutex_release)(TX_THREAD *thread_pt
Bit(s) Meaning
31 TX_NOT_INTERRUPTABLE defined
30 TX_INLINE_THREAD_RESUME_SUSPEND define
30 TX_INLINE_THREAD_RESUME_SUSPEND define
29-24 Priority groups 1 -> 32 priorities
2 -> 64 priorities
3 -> 96 priorities
@@ -437,10 +441,10 @@ THREAD_DECLARE VOID (*_tx_thread_mutex_release)(TX_THREAD *thread_pt
THREAD_DECLARE ULONG _tx_build_options;
#ifdef TX_ENABLE_STACK_CHECKING
#if defined(TX_ENABLE_STACK_CHECKING) || defined(TX_PORT_THREAD_STACK_ERROR_HANDLING)
/* Define the global function pointer for stack error handling. If a stack error is
detected and the application has registered a stack error handler, it will be
/* Define the global function pointer for stack error handling. If a stack error is
detected and the application has registered a stack error handler, it will be
called via this function pointer. */
THREAD_DECLARE VOID (*_tx_thread_application_stack_error_handler)(TX_THREAD *thread_ptr);
@@ -455,20 +459,20 @@ THREAD_DECLARE VOID (*_tx_thread_application_stack_error_handler)(TX
THREAD_DECLARE ULONG _tx_thread_performance_resume_count;
/* Define the total number of thread suspensions. Each time a thread enters a
/* Define the total number of thread suspensions. Each time a thread enters a
suspended state this variable is incremented. */
THREAD_DECLARE ULONG _tx_thread_performance_suspend_count;
/* Define the total number of solicited thread preemptions. Each time a thread is
/* Define the total number of solicited thread preemptions. Each time a thread is
preempted by directly calling a ThreadX service, this variable is incremented. */
THREAD_DECLARE ULONG _tx_thread_performance_solicited_preemption_count;
/* Define the total number of interrupt thread preemptions. Each time a thread is
preempted as a result of an ISR calling a ThreadX service, this variable is
/* Define the total number of interrupt thread preemptions. Each time a thread is
preempted as a result of an ISR calling a ThreadX service, this variable is
incremented. */
THREAD_DECLARE ULONG _tx_thread_performance_interrupt_preemption_count;
@@ -480,45 +484,45 @@ THREAD_DECLARE ULONG _tx_thread_performance_interrupt_preemption_coun
THREAD_DECLARE ULONG _tx_thread_performance_priority_inversion_count;
/* Define the total number of time-slices. Each time a time-slice operation is
actually performed (another thread is setup for running) this variable is
/* Define the total number of time-slices. Each time a time-slice operation is
actually performed (another thread is setup for running) this variable is
incremented. */
THREAD_DECLARE ULONG _tx_thread_performance_time_slice_count;
/* Define the total number of thread relinquish operations. Each time a thread
/* Define the total number of thread relinquish operations. Each time a thread
relinquish operation is actually performed (another thread is setup for running)
this variable is incremented. */
THREAD_DECLARE ULONG _tx_thread_performance_relinquish_count;
/* Define the total number of thread timeouts. Each time a thread has a
/* Define the total number of thread timeouts. Each time a thread has a
timeout this variable is incremented. */
THREAD_DECLARE ULONG _tx_thread_performance_timeout_count;
/* Define the total number of thread wait aborts. Each time a thread's suspension
/* Define the total number of thread wait aborts. Each time a thread's suspension
is lifted by the tx_thread_wait_abort call this variable is incremented. */
THREAD_DECLARE ULONG _tx_thread_performance_wait_abort_count;
/* Define the total number of idle system thread returns. Each time a thread returns to
/* Define the total number of idle system thread returns. Each time a thread returns to
an idle system (no other thread is ready to run) this variable is incremented. */
THREAD_DECLARE ULONG _tx_thread_performance_idle_return_count;
/* Define the total number of non-idle system thread returns. Each time a thread returns to
/* Define the total number of non-idle system thread returns. Each time a thread returns to
a non-idle system (another thread is ready to run) this variable is incremented. */
THREAD_DECLARE ULONG _tx_thread_performance_non_idle_return_count;
/* Define the last TX_THREAD_EXECUTE_LOG_SIZE threads scheduled in ThreadX. This
/* Define the last TX_THREAD_EXECUTE_LOG_SIZE threads scheduled in ThreadX. This
is a circular list, where the index points to the oldest entry. */
THREAD_DECLARE ULONG _tx_thread_performance__execute_log_index;

View File

@@ -79,7 +79,7 @@ VOID _tx_timer_thread_entry(ULONG timer_thread_input);
#endif
/* Define the system clock value that is continually incremented by the
/* Define the system clock value that is continually incremented by the
periodic timer interrupt processing. */
TIMER_DECLARE volatile ULONG _tx_timer_system_clock;
@@ -116,7 +116,7 @@ TIMER_DECLARE TX_TIMER_INTERNAL **_tx_timer_list_end;
TIMER_DECLARE TX_TIMER_INTERNAL **_tx_timer_current_ptr;
/* Define the timer expiration flag. This is used to indicate that a timer
/* Define the timer expiration flag. This is used to indicate that a timer
has expired. */
TIMER_DECLARE UINT _tx_timer_expired;
@@ -195,8 +195,8 @@ TIMER_DECLARE ULONG _tx_timer_performance_expiration_count;
/* Define the total number of timer expiration adjustments. These are required
if the expiration time is greater than the size of the timer list. In such
cases, the timer is placed at the end of the list and then reactivated
if the expiration time is greater than the size of the timer list. In such
cases, the timer is placed at the end of the list and then reactivated
as many times as necessary to finally achieve the resulting timeout. */
TIMER_DECLARE ULONG _tx_timer_performance__expiration_adjust_count;

View File

@@ -78,8 +78,8 @@
#endif
/* Define the default clock source for trace event entry time stamp. The following two item are port specific.
For example, if the time source is at the address 0x0a800024 and is 16-bits in size, the clock
/* Define the default clock source for trace event entry time stamp. The following two item are port specific.
For example, if the time source is at the address 0x0a800024 and is 16-bits in size, the clock
source constants would be:
#define TX_TRACE_TIME_SOURCE *((ULONG *) 0x0a800024)
@@ -101,8 +101,8 @@
/* ThreadX Trace Description. The ThreadX Trace feature is designed to capture
events in real-time in a circular event buffer. This buffer may be analyzed by other
tools. The high-level format of the Trace structure is:
events in real-time in a circular event buffer. This buffer may be analyzed by other
tools. The high-level format of the Trace structure is:
[Trace Control Header ]
[Trace Object Registry - Entry 0 ]
@@ -115,23 +115,23 @@
*/
/* Trace Control Header. The Trace Control Header contains information that
defines the format of the Trace Object Registry as well as the location and
current entry of the Trace Buffer itself. The high-level format of the
/* Trace Control Header. The Trace Control Header contains information that
defines the format of the Trace Object Registry as well as the location and
current entry of the Trace Buffer itself. The high-level format of the
Trace Control Header is:
Entry Size Description
[Trace ID] 4 This 4-byte field contains the ThreadX Trace
Identification. If the trace buffer is valid, the
contents are 0x54585442 (TXTB). Since it is written as
contents are 0x54585442 (TXTB). Since it is written as
a 32-bit unsigned word, this value is also used to
determine if the event trace information is in
determine if the event trace information is in
little or big endian format.
[Timer Valid Mask] 4 Mask of valid bits in the 32-bit time stamp. This
[Timer Valid Mask] 4 Mask of valid bits in the 32-bit time stamp. This
enables use of 32, 24, 16, or event 8-bit timers.
If the time source is 32-bits, the mask is
0xFFFFFFFF. If the time source is 16-bits, the
If the time source is 32-bits, the mask is
0xFFFFFFFF. If the time source is 16-bits, the
mask is 0x0000FFFF.
[Trace Base Address] 4 The base address for all trace pointer. Subtracting
the pointer and this address will yield the proper
@@ -143,7 +143,7 @@
[Trace Buffer Start Pointer] 4 Pointer to the start of the Trace Buffer Area
[Trace Buffer End Pointer] 4 Pointer to the end of the Trace Buffer Area
[Trace Buffer Current Pointer] 4 Pointer to the oldest entry in the Trace Buffer.
This entry will be overwritten on the next event and
This entry will be overwritten on the next event and
incremented to the next event (wrapping to the top
if the buffer end pointer is exceeded).
[Reserved] 4 Reserved 4 bytes, should be 0xAAAAAAAA
@@ -173,7 +173,7 @@ typedef struct TX_TRACE_HEADER_STRUCT
} TX_TRACE_HEADER;
/* Trace Object Registry. The Trace Object Registry is used to map the object pointer in the trace buffer to
/* Trace Object Registry. The Trace Object Registry is used to map the object pointer in the trace buffer to
the application's name for the object (defined during object creation in ThreadX). */
#ifndef TX_TRACE_OBJECT_REGISTRY_NAME
@@ -181,7 +181,7 @@ typedef struct TX_TRACE_HEADER_STRUCT
#endif
/* Define the object name types as well as the contents of any additional parameters that might be useful in
/* Define the object name types as well as the contents of any additional parameters that might be useful in
trace analysis. */
#define TX_TRACE_OBJECT_TYPE_NOT_VALID ((UCHAR) 0) /* Object is not valid */
@@ -214,27 +214,27 @@ typedef struct TX_TRACE_OBJECT_ENTRY_STRUCT
Entry Size Description
[Thread Pointer] 4 This 4-byte field contains the pointer to the
ThreadX thread running that caused the event.
[Thread Pointer] 4 This 4-byte field contains the pointer to the
ThreadX thread running that caused the event.
If this field is NULL, the entry hasn't been used
yet. If this field is 0xFFFFFFFF, the event occurred
from within an ISR. If this entry is 0xF0F0F0F0, the
from within an ISR. If this entry is 0xF0F0F0F0, the
event occurred during initialization.
[Thread Priority or 4 This 4-byte field contains the current thread pointer for interrupt
Current Thread events or the thread preemption-threshold/priority for thread events.
Preemption-Threshold/
Priority]
[Event ID] 4 This 4-byte field contains the Event ID of the event. A value of
[Event ID] 4 This 4-byte field contains the Event ID of the event. A value of
0xFFFFFFFF indicates the event is invalid. All events are marked
as invalid during initialization.
[Time Stamp] 4 This 4-byte field contains the time stamp of the event.
[Information Field 1] 4 This 4-byte field contains the first 4-bytes of information
[Information Field 1] 4 This 4-byte field contains the first 4-bytes of information
specific to the event.
[Information Field 2] 4 This 4-byte field contains the second 4-bytes of information
[Information Field 2] 4 This 4-byte field contains the second 4-bytes of information
specific to the event.
[Information Field 3] 4 This 4-byte field contains the third 4-bytes of information
[Information Field 3] 4 This 4-byte field contains the third 4-bytes of information
specific to the event.
[Information Field 4] 4 This 4-byte field contains the fourth 4-bytes of information
[Information Field 4] 4 This 4-byte field contains the fourth 4-bytes of information
specific to the event.
*/
@@ -244,15 +244,15 @@ typedef struct TX_TRACE_OBJECT_ENTRY_STRUCT
/* Define ThreadX Trace Events, along with a brief description of the additional information fields,
where I1 -> Information Field 1, I2 -> Information Field 2, etc. */
/* Event numbers 0 through 4095 are reserved by Azure RTOS. Specific event assignments are:
ThreadX events: 1-199
/* Event numbers 0 through 4095 are reserved by Azure RTOS. Specific event assignments are:
ThreadX events: 1-199
FileX events: 200-299
NetX events: 300-599
USBX events: 600-999
User-defined event numbers start at 4096 and continue through 65535, as defined by the constants
TX_TRACE_USER_EVENT_START and TX_TRACE_USER_EVENT_END, respectively. User events should be based
User-defined event numbers start at 4096 and continue through 65535, as defined by the constants
TX_TRACE_USER_EVENT_START and TX_TRACE_USER_EVENT_END, respectively. User events should be based
on these constants in case the user event number assignment is changed in future releases. */
/* Define the basic ThreadX thread scheduling events first. */
@@ -417,7 +417,7 @@ TRACE_DECLARE TX_TRACE_BUFFER_ENTRY *_tx_trace_buffer_end_ptr;
TRACE_DECLARE TX_TRACE_BUFFER_ENTRY *_tx_trace_buffer_current_ptr;
/* Define the trace event enable bits, where each bit represents a type of event that can be enabled
/* Define the trace event enable bits, where each bit represents a type of event that can be enabled
or disabled dynamically by the application. */
TRACE_DECLARE ULONG _tx_trace_event_enable_bits;
@@ -429,9 +429,9 @@ TRACE_DECLARE ULONG _tx_trace_event_enable_bits;
TRACE_DECLARE ULONG _tx_trace_simulated_time;
/* Define the function pointer used to call the application when the trace buffer wraps. If NULL,
/* Define the function pointer used to call the application when the trace buffer wraps. If NULL,
the application has not registered a callback function. */
TRACE_DECLARE VOID (*_tx_trace_full_notify_function)(VOID *buffer);

View File

@@ -26,7 +26,7 @@
/* PORT SPECIFIC C INFORMATION RELEASE */
/* */
/* tx_user.h PORTABLE C */
/* 6.1.5 */
/* 6.1.9 */
/* */
/* AUTHOR */
/* */
@@ -51,6 +51,13 @@
/* added option to remove */
/* FileX pointer, */
/* resulting in version 6.1.5 */
/* 06-02-2021 Scott Larson Added options for multiple */
/* block pool search & delay, */
/* resulting in version 6.1.7 */
/* 10-15-2021 Yuxin Zhou Modified comment(s), added */
/* user-configurable symbol */
/* TX_TIMER_TICKS_PER_SECOND */
/* resulting in version 6.1.9 */
/* */
/**************************************************************************/
@@ -59,9 +66,9 @@
/* Define various build options for the ThreadX port. The application should either make changes
here by commenting or un-commenting the conditional compilation defined OR supply the defines
though the compiler's equivalent of the -D option.
here by commenting or un-commenting the conditional compilation defined OR supply the defines
though the compiler's equivalent of the -D option.
For maximum speed, the following should be defined:
TX_MAX_PRIORITIES 32
@@ -73,9 +80,9 @@
TX_REACTIVATE_INLINE
TX_DISABLE_STACK_FILLING
TX_INLINE_THREAD_RESUME_SUSPEND
For minimum size, the following should be defined:
TX_MAX_PRIORITIES 32
TX_DISABLE_PREEMPTION_THRESHOLD
TX_DISABLE_REDUNDANT_CLEARING
@@ -83,12 +90,12 @@
TX_NO_FILEX_POINTER
TX_NOT_INTERRUPTABLE
TX_TIMER_PROCESS_IN_ISR
Of course, many of these defines reduce functionality and/or change the behavior of the
system in ways that may not be worth the trade-off. For example, the TX_TIMER_PROCESS_IN_ISR
results in faster and smaller code, however, it increases the amount of processing in the ISR.
In addition, some services that are available in timers are not available from ISRs and will
therefore return an error if this option is used. This may or may not be desirable for a
therefore return an error if this option is used. This may or may not be desirable for a
given application. */
@@ -103,19 +110,28 @@
#define TX_TIMER_THREAD_PRIORITY ????
*/
/* Define the common timer tick reference for use by other middleware components. The default
value is 10ms (i.e. 100 ticks, defined in tx_api.h), but may be replaced by a port-specific
version in tx_port.h or here.
Note: the actual hardware timer value may need to be changed (usually in tx_initialize_low_level). */
/*
#define TX_TIMER_TICKS_PER_SECOND ((ULONG) 100)
*/
/* Determine if there is a FileX pointer in the thread control block.
By default, the pointer is there for legacy/backwards compatibility.
By default, the pointer is there for legacy/backwards compatibility.
The pointer must also be there for applications using FileX.
Define this to save space in the thread control block.
Define this to save space in the thread control block.
*/
/*
#define TX_NO_FILEX_POINTER
*/
/* Determine if timer expirations (application timers, timeouts, and tx_thread_sleep calls
should be processed within the a system timer thread or directly in the timer ISR.
By default, the timer thread is used. When the following is defined, the timer expiration
/* Determine if timer expirations (application timers, timeouts, and tx_thread_sleep calls
should be processed within the a system timer thread or directly in the timer ISR.
By default, the timer thread is used. When the following is defined, the timer expiration
processing is done directly from the timer ISR, thereby eliminating the timer thread control
block, stack, and context switching to activate it. */
@@ -140,7 +156,7 @@
#define TX_DISABLE_STACK_FILLING
*/
/* Determine whether or not stack checking is enabled. By default, ThreadX stack checking is
/* Determine whether or not stack checking is enabled. By default, ThreadX stack checking is
disabled. When the following is defined, ThreadX thread stack checking is enabled. If stack
checking is enabled (TX_ENABLE_STACK_CHECKING is defined), the TX_DISABLE_STACK_FILLING
define is negated, thereby forcing the stack fill which is necessary for the stack checking
@@ -150,7 +166,7 @@
#define TX_ENABLE_STACK_CHECKING
*/
/* Determine if preemption-threshold should be disabled. By default, preemption-threshold is
/* Determine if preemption-threshold should be disabled. By default, preemption-threshold is
enabled. If the application does not use preemption-threshold, it may be disabled to reduce
code size and improve performance. */
@@ -158,7 +174,7 @@
#define TX_DISABLE_PREEMPTION_THRESHOLD
*/
/* Determine if global ThreadX variables should be cleared. If the compiler startup code clears
/* Determine if global ThreadX variables should be cleared. If the compiler startup code clears
the .bss section prior to ThreadX running, the define can be used to eliminate unnecessary
clearing of ThreadX global variables. */
@@ -166,9 +182,9 @@
#define TX_DISABLE_REDUNDANT_CLEARING
*/
/* Determine if no timer processing is required. This option will help eliminate the timer
processing when not needed. The user will also have to comment out the call to
tx_timer_interrupt, which is typically made from assembly language in
/* Determine if no timer processing is required. This option will help eliminate the timer
processing when not needed. The user will also have to comment out the call to
tx_timer_interrupt, which is typically made from assembly language in
tx_initialize_low_level. Note: if TX_NO_TIMER is used, the define TX_TIMER_PROCESS_IN_ISR
must also be used and tx_timer_initialize must be removed from ThreadX library. */
@@ -188,8 +204,8 @@
*/
/* Determine if the tx_thread_resume and tx_thread_suspend services should have their internal
code in-line. This results in a larger image, but improves the performance of the thread
/* Determine if the tx_thread_resume and tx_thread_suspend services should have their internal
code in-line. This results in a larger image, but improves the performance of the thread
resume and suspend services. */
/*
@@ -197,7 +213,7 @@
*/
/* Determine if the internal ThreadX code is non-interruptable. This results in smaller code
/* Determine if the internal ThreadX code is non-interruptable. This results in smaller code
size and less processing overhead, but increases the interrupt lockout time. */
/*
@@ -205,8 +221,8 @@
*/
/* Determine if the trace event logging code should be enabled. This causes slight increases in
code size and overhead, but provides the ability to generate system trace information which
/* Determine if the trace event logging code should be enabled. This causes slight increases in
code size and overhead, but provides the ability to generate system trace information which
is available for viewing in TraceX. */
/*
@@ -270,5 +286,17 @@
#define TX_TIMER_ENABLE_PERFORMANCE_INFO
*/
/* Override options for byte pool searches of multiple blocks. */
/*
#define TX_BYTE_POOL_MULTIPLE_BLOCK_SEARCH 20
*/
/* Override options for byte pool search delay to avoid thrashing. */
/*
#define TX_BYTE_POOL_DELAY_VALUE 3
*/
#endif

View File

@@ -170,7 +170,7 @@ ULONG lower_tbu;
/* Save the pool's address in the block for when it is released! */
temp_ptr = TX_BLOCK_POOL_TO_UCHAR_POINTER_CONVERT(pool_ptr);
*next_block_ptr = temp_ptr;
#ifdef TX_ENABLE_EVENT_TRACE
/* Check that the event time stamp is unchanged. A different
@@ -182,7 +182,7 @@ ULONG lower_tbu;
/* Is the time stamp the same? */
if (time_stamp == entry_ptr -> tx_trace_buffer_entry_time_stamp)
{
/* Timestamp is the same, update the entry with the address. */
#ifdef TX_MISRA_ENABLE
entry_ptr -> tx_trace_buffer_entry_info_2 = TX_POINTER_TO_ULONG_CONVERT(*block_ptr);
@@ -200,7 +200,7 @@ ULONG lower_tbu;
/* Set status to success. */
status = TX_SUCCESS;
/* Restore interrupts. */
TX_RESTORE
}
@@ -229,7 +229,7 @@ ULONG lower_tbu;
{
/* Prepare for suspension of this thread. */
#ifdef TX_BLOCK_POOL_ENABLE_PERFORMANCE_INFO
/* Increment the total suspensions counter. */
@@ -261,7 +261,7 @@ ULONG lower_tbu;
/* Pickup the number of suspended threads. */
suspended_count = (pool_ptr -> tx_block_pool_suspended_count);
/* Increment the number of suspended threads. */
(pool_ptr -> tx_block_pool_suspended_count)++;
@@ -322,11 +322,11 @@ ULONG lower_tbu;
allocate event. In that case, do nothing here. */
if (entry_ptr != TX_NULL)
{
/* Is the time-stamp the same? */
if (time_stamp == entry_ptr -> tx_trace_buffer_entry_time_stamp)
{
/* Timestamp is the same, update the entry with the address. */
#ifdef TX_MISRA_ENABLE
entry_ptr -> tx_trace_buffer_entry_info_2 = TX_POINTER_TO_ULONG_CONVERT(*block_ptr);

View File

@@ -87,7 +87,7 @@ UINT suspended_count;
TX_THREAD *next_thread;
TX_THREAD *previous_thread;
#ifndef TX_NOT_INTERRUPTABLE
/* Disable interrupts to remove the suspended thread from the block pool. */
@@ -96,7 +96,7 @@ TX_THREAD *previous_thread;
/* Determine if the cleanup is still required. */
if (thread_ptr -> tx_thread_suspend_cleanup == &(_tx_block_pool_cleanup))
{
/* Check for valid suspension sequence. */
if (suspension_sequence == thread_ptr -> tx_thread_suspension_sequence)
{
@@ -107,7 +107,7 @@ TX_THREAD *previous_thread;
/* Check for a NULL byte pool pointer. */
if (pool_ptr != TX_NULL)
{
/* Check for valid pool ID. */
if (pool_ptr -> tx_block_pool_id == TX_BLOCK_POOL_ID)
{
@@ -133,13 +133,13 @@ TX_THREAD *previous_thread;
suspended_count = pool_ptr -> tx_block_pool_suspended_count;
/* Remove the suspended thread from the list. */
/* See if this is the only suspended thread on the list. */
if (suspended_count == TX_NO_SUSPENSIONS)
{
/* Yes, the only suspended thread. */
/* Update the head pointer. */
pool_ptr -> tx_block_pool_suspension_list = TX_NULL;
}
@@ -157,7 +157,7 @@ TX_THREAD *previous_thread;
/* Determine if we need to update the head pointer. */
if (pool_ptr -> tx_block_pool_suspension_list == thread_ptr)
{
/* Update the list head pointer. */
pool_ptr -> tx_block_pool_suspension_list = next_thread;
}
@@ -168,7 +168,7 @@ TX_THREAD *previous_thread;
if (thread_ptr -> tx_thread_state == TX_BLOCK_MEMORY)
{
/* Timeout condition and the thread still suspended on the block pool.
/* Timeout condition and the thread still suspended on the block pool.
Setup return error status and resume the thread. */
#ifdef TX_BLOCK_POOL_ENABLE_PERFORMANCE_INFO

View File

@@ -97,7 +97,7 @@ TX_BLOCK_POOL *previous_pool;
an ALIGN_TYPE (typically this is a 32-bit ULONG). This helps guarantee proper alignment. */
block_size = (((block_size + (sizeof(ALIGN_TYPE))) - ((ALIGN_TYPE) 1))/(sizeof(ALIGN_TYPE))) * (sizeof(ALIGN_TYPE));
/* Round the pool size down to something that is evenly divisible by
/* Round the pool size down to something that is evenly divisible by
an ALIGN_TYPE (typically this is a 32-bit ULONG). */
pool_size = (pool_size/(sizeof(ALIGN_TYPE))) * (sizeof(ALIGN_TYPE));
@@ -106,7 +106,7 @@ TX_BLOCK_POOL *previous_pool;
pool_ptr -> tx_block_pool_start = TX_VOID_TO_UCHAR_POINTER_CONVERT(pool_start);
pool_ptr -> tx_block_pool_size = pool_size;
pool_ptr -> tx_block_pool_block_size = (UINT) block_size;
/* Calculate the total number of blocks. */
total_blocks = pool_size/(block_size + (sizeof(UCHAR *)));
@@ -145,7 +145,7 @@ TX_BLOCK_POOL *previous_pool;
/* Set the last block's forward pointer to NULL. */
block_link_ptr = TX_UCHAR_TO_INDIRECT_UCHAR_POINTER_CONVERT(block_ptr);
*block_link_ptr = TX_NULL;
/* Setup the starting pool address. */
pool_ptr -> tx_block_pool_available_list = TX_VOID_TO_UCHAR_POINTER_CONVERT(pool_start);
@@ -180,7 +180,7 @@ TX_BLOCK_POOL *previous_pool;
pool_ptr -> tx_block_pool_created_previous = previous_pool;
pool_ptr -> tx_block_pool_created_next = next_pool;
}
/* Increment the created count. */
_tx_block_pool_created_count++;
@@ -208,7 +208,7 @@ TX_BLOCK_POOL *previous_pool;
/* Not enough memory for one block, return appropriate error. */
status = TX_SIZE_ERROR;
}
/* Return completion status. */
return(status);
}

View File

@@ -126,7 +126,7 @@ TX_BLOCK_POOL *previous_pool;
/* See if we have to update the created list head pointer. */
if (_tx_block_pool_created_ptr == pool_ptr)
{
/* Yes, move the head pointer to the next link. */
_tx_block_pool_created_ptr = next_pool;
}
@@ -148,14 +148,14 @@ TX_BLOCK_POOL *previous_pool;
on this block pool. */
while (suspended_count != TX_NO_SUSPENSIONS)
{
/* Decrement the suspension count. */
suspended_count--;
/* Lockout interrupts. */
TX_DISABLE
/* Clear the cleanup pointer, this prevents the timeout from doing
/* Clear the cleanup pointer, this prevents the timeout from doing
anything. */
thread_ptr -> tx_thread_suspend_cleanup = TX_NULL;

View File

@@ -77,8 +77,8 @@
/* resulting in version 6.1 */
/* */
/**************************************************************************/
UINT _tx_block_pool_info_get(TX_BLOCK_POOL *pool_ptr, CHAR **name, ULONG *available_blocks,
ULONG *total_blocks, TX_THREAD **first_suspended,
UINT _tx_block_pool_info_get(TX_BLOCK_POOL *pool_ptr, CHAR **name, ULONG *available_blocks,
ULONG *total_blocks, TX_THREAD **first_suspended,
ULONG *suspended_count, TX_BLOCK_POOL **next_pool)
{
@@ -100,42 +100,42 @@ TX_INTERRUPT_SAVE_AREA
/* Retrieve the name of the block pool. */
if (name != TX_NULL)
{
*name = pool_ptr -> tx_block_pool_name;
}
/* Retrieve the number of available blocks in the block pool. */
if (available_blocks != TX_NULL)
{
*available_blocks = (ULONG) pool_ptr -> tx_block_pool_available;
}
/* Retrieve the total number of blocks in the block pool. */
if (total_blocks != TX_NULL)
{
*total_blocks = (ULONG) pool_ptr -> tx_block_pool_total;
}
/* Retrieve the first thread suspended on this block pool. */
if (first_suspended != TX_NULL)
{
*first_suspended = pool_ptr -> tx_block_pool_suspension_list;
}
/* Retrieve the number of threads suspended on this block pool. */
if (suspended_count != TX_NULL)
{
*suspended_count = (ULONG) pool_ptr -> tx_block_pool_suspended_count;
}
/* Retrieve the pointer to the next block pool created. */
if (next_pool != TX_NULL)
{
*next_pool = pool_ptr -> tx_block_pool_created_next;
}

View File

@@ -93,7 +93,7 @@ UINT status;
/* Determine if this is a legal request. */
if (pool_ptr == TX_NULL)
{
/* Block pool pointer is illegal, return error. */
status = TX_PTR_ERROR;
}
@@ -101,13 +101,13 @@ UINT status;
/* Determine if the pool ID is invalid. */
else if (pool_ptr -> tx_block_pool_id != TX_BLOCK_POOL_ID)
{
/* Block pool pointer is illegal, return error. */
status = TX_PTR_ERROR;
}
else
{
/* Disable interrupts. */
TX_DISABLE
@@ -123,28 +123,28 @@ UINT status;
/* Retrieve the number of allocations from this block pool. */
if (allocates != TX_NULL)
{
*allocates = pool_ptr -> tx_block_pool_performance_allocate_count;
}
/* Retrieve the number of blocks released to this block pool. */
if (releases != TX_NULL)
{
*releases = pool_ptr -> tx_block_pool_performance_release_count;
}
/* Retrieve the number of thread suspensions on this block pool. */
if (suspensions != TX_NULL)
{
*suspensions = pool_ptr -> tx_block_pool_performance_suspension_count;
}
/* Retrieve the number of thread timeouts on this block pool. */
if (timeouts != TX_NULL)
{
*timeouts = pool_ptr -> tx_block_pool_performance_timeout_count;
}
@@ -157,7 +157,7 @@ UINT status;
#else
UINT status;
/* Access input arguments just for the sake of lint, MISRA, etc. */
if (pool_ptr != TX_NULL)
{
@@ -191,7 +191,7 @@ UINT status;
}
else
{
/* Not enabled, return error. */
status = TX_FEATURE_NOT_ENABLED;
}

View File

@@ -100,28 +100,28 @@ TX_INTERRUPT_SAVE_AREA
/* Retrieve the total number of block allocations. */
if (allocates != TX_NULL)
{
*allocates = _tx_block_pool_performance_allocate_count;
}
/* Retrieve the total number of blocks released. */
if (releases != TX_NULL)
{
*releases = _tx_block_pool_performance_release_count;
}
/* Retrieve the total number of block pool thread suspensions. */
if (suspensions != TX_NULL)
{
*suspensions = _tx_block_pool_performance_suspension_count;
}
/* Retrieve the total number of block pool thread timeouts. */
if (timeouts != TX_NULL)
{
*timeouts = _tx_block_pool_performance_timeout_count;
}
@@ -139,35 +139,35 @@ UINT status;
/* Access input arguments just for the sake of lint, MISRA, etc. */
if (allocates != TX_NULL)
{
/* Not enabled, return error. */
status = TX_FEATURE_NOT_ENABLED;
}
else if (releases != TX_NULL)
{
/* Not enabled, return error. */
status = TX_FEATURE_NOT_ENABLED;
}
else if (suspensions != TX_NULL)
{
/* Not enabled, return error. */
status = TX_FEATURE_NOT_ENABLED;
}
else if (timeouts != TX_NULL)
{
/* Not enabled, return error. */
status = TX_FEATURE_NOT_ENABLED;
}
else
{
/* Not enabled, return error. */
status = TX_FEATURE_NOT_ENABLED;
}
/* Return completion status. */
return(status);
#endif

View File

@@ -130,7 +130,7 @@ UINT list_changed;
/* Remember the suspension count and head pointer. */
head_ptr = pool_ptr -> tx_block_pool_suspension_list;
/* Default the highest priority thread to the thread at the front of the list. */
priority_thread_ptr = head_ptr;
@@ -142,7 +142,7 @@ UINT list_changed;
/* Set the list changed flag to false. */
list_changed = TX_FALSE;
/* Search through the list to find the highest priority thread. */
do
{
@@ -160,33 +160,33 @@ UINT list_changed;
/* Disable interrupts again. */
TX_DISABLE
/* Determine if any changes to the list have occurred while
/* Determine if any changes to the list have occurred while
interrupts were enabled. */
/* Is the list head the same? */
if (head_ptr != pool_ptr -> tx_block_pool_suspension_list)
{
/* The list head has changed, set the list changed flag. */
list_changed = TX_TRUE;
}
else
{
/* Is the suspended count the same? */
if (suspended_count != pool_ptr -> tx_block_pool_suspended_count)
{
/* The list head has changed, set the list changed flag. */
list_changed = TX_TRUE;
}
}
/* Determine if the list has changed. */
if (list_changed == TX_FALSE)
{
/* Move the thread pointer to the next thread. */
thread_ptr = thread_ptr -> tx_thread_suspended_next;
}
@@ -202,7 +202,7 @@ UINT list_changed;
/* Setup search pointer. */
thread_ptr = priority_thread_ptr -> tx_thread_suspended_next;
/* Reset the list changed flag. */
list_changed = TX_FALSE;
}
@@ -212,12 +212,12 @@ UINT list_changed;
/* Release preemption. */
_tx_thread_preempt_disable--;
/* Now determine if the highest priority thread is at the front
/* Now determine if the highest priority thread is at the front
of the list. */
if (priority_thread_ptr != head_ptr)
{
/* No, we need to move the highest priority suspended thread to the
/* No, we need to move the highest priority suspended thread to the
front of the list. */
/* First, remove the highest priority thread by updating the

View File

@@ -90,7 +90,7 @@ TX_THREAD *previous_thread;
/* Disable interrupts to put this block back in the pool. */
TX_DISABLE
/* Pickup the pool pointer which is just previous to the starting
/* Pickup the pool pointer which is just previous to the starting
address of the block that the caller sees. */
work_ptr = TX_VOID_TO_UCHAR_POINTER_CONVERT(block_ptr);
work_ptr = TX_UCHAR_POINTER_SUB(work_ptr, (sizeof(UCHAR *)));
@@ -121,7 +121,7 @@ TX_THREAD *previous_thread;
/* Decrement the number of threads suspended. */
(pool_ptr -> tx_block_pool_suspended_count)--;
/* Pickup the suspended count. */
suspended_count = (pool_ptr -> tx_block_pool_suspended_count);
@@ -148,7 +148,7 @@ TX_THREAD *previous_thread;
next_thread -> tx_thread_suspended_previous = previous_thread;
previous_thread -> tx_thread_suspended_next = next_thread;
}
/* Prepare for resumption of the first thread. */
/* Clear cleanup routine to avoid timeout. */

View File

@@ -181,17 +181,17 @@ ULONG lower_tbu;
/* Determine if we are finished. */
if (work_ptr != TX_NULL)
{
/* Yes, we have found a block the search is finished. */
finished = TX_TRUE;
}
else
{
/* No block was found, does this thread still own the pool? */
if (pool_ptr -> tx_byte_pool_owner == thread_ptr)
{
/* Yes, then we have looked through the entire pool and haven't found the memory. */
finished = TX_TRUE;
}
@@ -217,7 +217,7 @@ ULONG lower_tbu;
/* Is the timestamp the same? */
if (time_stamp == entry_ptr -> tx_trace_buffer_entry_time_stamp)
{
/* Timestamp is the same, update the entry with the address. */
#ifdef TX_MISRA_ENABLE
entry_ptr -> tx_trace_buffer_entry_info_2 = TX_POINTER_TO_ULONG_CONVERT(*memory_ptr);
@@ -242,7 +242,7 @@ ULONG lower_tbu;
/* Restore interrupts. */
TX_RESTORE
/* Set the status to success. */
status = TX_SUCCESS;
}
@@ -304,7 +304,7 @@ ULONG lower_tbu;
/* Increment the suspension count. */
(pool_ptr -> tx_byte_pool_suspended_count)++;
/* Setup suspension list. */
if (suspended_count == TX_NO_SUSPENSIONS)
{
@@ -366,7 +366,7 @@ ULONG lower_tbu;
/* Is the timestamp the same? */
if (time_stamp == entry_ptr -> tx_trace_buffer_entry_time_stamp)
{
/* Timestamp is the same, update the entry with the address. */
#ifdef TX_MISRA_ENABLE
entry_ptr -> tx_trace_buffer_entry_info_2 = TX_POINTER_TO_ULONG_CONVERT(*memory_ptr);
@@ -396,7 +396,7 @@ ULONG lower_tbu;
}
else
{
/* Restore interrupts. */
TX_RESTORE

View File

@@ -87,7 +87,7 @@ UINT suspended_count;
TX_THREAD *next_thread;
TX_THREAD *previous_thread;
#ifndef TX_NOT_INTERRUPTABLE
/* Disable interrupts to remove the suspended thread from the byte pool. */
@@ -96,7 +96,7 @@ TX_THREAD *previous_thread;
/* Determine if the cleanup is still required. */
if (thread_ptr -> tx_thread_suspend_cleanup == &(_tx_byte_pool_cleanup))
{
/* Check for valid suspension sequence. */
if (suspension_sequence == thread_ptr -> tx_thread_suspension_sequence)
{
@@ -107,7 +107,7 @@ TX_THREAD *previous_thread;
/* Check for a NULL byte pool pointer. */
if (pool_ptr != TX_NULL)
{
/* Check for valid pool ID. */
if (pool_ptr -> tx_byte_pool_id == TX_BYTE_POOL_ID)
{
@@ -126,18 +126,18 @@ TX_THREAD *previous_thread;
/* Decrement the suspension count. */
pool_ptr -> tx_byte_pool_suspended_count--;
/* Pickup the suspended count. */
suspended_count = pool_ptr -> tx_byte_pool_suspended_count;
/* Remove the suspended thread from the list. */
/* See if this is the only suspended thread on the list. */
if (suspended_count == TX_NO_SUSPENSIONS)
{
/* Yes, the only suspended thread. */
/* Update the head pointer. */
pool_ptr -> tx_byte_pool_suspension_list = TX_NULL;
}
@@ -155,7 +155,7 @@ TX_THREAD *previous_thread;
/* Determine if we need to update the head pointer. */
if (pool_ptr -> tx_byte_pool_suspension_list == thread_ptr)
{
/* Update the list head pointer. */
pool_ptr -> tx_byte_pool_suspension_list = next_thread;
}
@@ -166,7 +166,7 @@ TX_THREAD *previous_thread;
if (thread_ptr -> tx_thread_state == TX_BYTE_MEMORY)
{
/* Timeout condition and the thread still suspended on the byte pool.
/* Timeout condition and the thread still suspended on the byte pool.
Setup return error status and resume the thread. */
#ifdef TX_BYTE_POOL_ENABLE_PERFORMANCE_INFO

View File

@@ -89,7 +89,7 @@ ALIGN_TYPE *free_ptr;
/* Initialize the byte pool control block to all zeros. */
TX_MEMSET(pool_ptr, 0, (sizeof(TX_BYTE_POOL)));
/* Round the pool size down to something that is evenly divisible by
/* Round the pool size down to something that is evenly divisible by
an ULONG. */
pool_size = (pool_size/(sizeof(ALIGN_TYPE))) * (sizeof(ALIGN_TYPE));
@@ -104,17 +104,17 @@ ALIGN_TYPE *free_ptr;
pool_ptr -> tx_byte_pool_list = TX_VOID_TO_UCHAR_POINTER_CONVERT(pool_start);
pool_ptr -> tx_byte_pool_search = TX_VOID_TO_UCHAR_POINTER_CONVERT(pool_start);
/* Initially, the pool will have two blocks. One large block at the
/* Initially, the pool will have two blocks. One large block at the
beginning that is available and a small allocated block at the end
of the pool that is there just for the algorithm. Be sure to count
the available block's header in the available bytes count. */
pool_ptr -> tx_byte_pool_available = pool_size - ((sizeof(VOID *)) + (sizeof(ALIGN_TYPE)));
pool_ptr -> tx_byte_pool_fragments = ((UINT) 2);
/* Each block contains a "next" pointer that points to the next block in the pool followed by a ALIGN_TYPE
field that contains either the constant TX_BYTE_BLOCK_FREE (if the block is free) or a pointer to the
owning pool (if the block is allocated). */
/* Calculate the end of the pool's memory area. */
block_ptr = TX_VOID_TO_UCHAR_POINTER_CONVERT(pool_start);
block_ptr = TX_UCHAR_POINTER_ADD(block_ptr, pool_size);
@@ -177,7 +177,7 @@ ALIGN_TYPE *free_ptr;
/* Increment the number of created byte pools. */
_tx_byte_pool_created_count++;
/* Optional byte pool create extended processing. */
TX_BYTE_POOL_CREATE_EXTENSION(pool_ptr)

View File

@@ -110,7 +110,7 @@ TX_BYTE_POOL *previous_pool;
/* Decrement the number of byte pools created. */
_tx_byte_pool_created_count--;
/* See if the byte pool is the only one on the list. */
if (_tx_byte_pool_created_count == TX_EMPTY)
{
@@ -130,7 +130,7 @@ TX_BYTE_POOL *previous_pool;
/* See if we have to update the created list head pointer. */
if (_tx_byte_pool_created_ptr == pool_ptr)
{
/* Yes, move the head pointer to the next link. */
_tx_byte_pool_created_ptr = next_pool;
}
@@ -144,7 +144,7 @@ TX_BYTE_POOL *previous_pool;
pool_ptr -> tx_byte_pool_suspension_list = TX_NULL;
suspended_count = pool_ptr -> tx_byte_pool_suspended_count;
pool_ptr -> tx_byte_pool_suspended_count = TX_NO_SUSPENSIONS;
/* Restore interrupts. */
TX_RESTORE
@@ -152,14 +152,14 @@ TX_BYTE_POOL *previous_pool;
on this byte pool. */
while (suspended_count != TX_NO_SUSPENSIONS)
{
/* Decrement the suspension count. */
suspended_count--;
/* Lockout interrupts. */
TX_DISABLE
/* Clear the cleanup pointer, this prevents the timeout from doing
/* Clear the cleanup pointer, this prevents the timeout from doing
anything. */
thread_ptr -> tx_thread_suspend_cleanup = TX_NULL;

View File

@@ -77,8 +77,8 @@
/* resulting in version 6.1 */
/* */
/**************************************************************************/
UINT _tx_byte_pool_info_get(TX_BYTE_POOL *pool_ptr, CHAR **name, ULONG *available_bytes,
ULONG *fragments, TX_THREAD **first_suspended,
UINT _tx_byte_pool_info_get(TX_BYTE_POOL *pool_ptr, CHAR **name, ULONG *available_bytes,
ULONG *fragments, TX_THREAD **first_suspended,
ULONG *suspended_count, TX_BYTE_POOL **next_pool)
{
@@ -100,42 +100,42 @@ TX_INTERRUPT_SAVE_AREA
/* Retrieve the name of the byte pool. */
if (name != TX_NULL)
{
*name = pool_ptr -> tx_byte_pool_name;
}
/* Retrieve the number of available bytes in the byte pool. */
if (available_bytes != TX_NULL)
{
*available_bytes = pool_ptr -> tx_byte_pool_available;
}
/* Retrieve the total number of bytes in the byte pool. */
if (fragments != TX_NULL)
{
*fragments = (ULONG) pool_ptr -> tx_byte_pool_fragments;
}
/* Retrieve the first thread suspended on this byte pool. */
if (first_suspended != TX_NULL)
{
*first_suspended = pool_ptr -> tx_byte_pool_suspension_list;
}
/* Retrieve the number of threads suspended on this byte pool. */
if (suspended_count != TX_NULL)
{
*suspended_count = (ULONG) pool_ptr -> tx_byte_pool_suspended_count;
}
/* Retrieve the pointer to the next byte pool created. */
if (next_pool != TX_NULL)
{
*next_pool = pool_ptr -> tx_byte_pool_created_next;
}

View File

@@ -102,15 +102,15 @@ UINT status;
/* Determine if this is a legal request. */
if (pool_ptr == TX_NULL)
{
/* Byte pool pointer is illegal, return error. */
status = TX_PTR_ERROR;
}
/* Determine if the pool ID is invalid. */
else if (pool_ptr -> tx_byte_pool_id != TX_BYTE_POOL_ID)
{
/* Byte pool pointer is illegal, return error. */
status = TX_PTR_ERROR;
}
@@ -132,7 +132,7 @@ UINT status;
/* Retrieve the number of allocates on this byte pool. */
if (allocates != TX_NULL)
{
*allocates = pool_ptr -> tx_byte_pool_performance_allocate_count;
}
@@ -146,35 +146,35 @@ UINT status;
/* Retrieve the number of fragments searched in this byte pool. */
if (fragments_searched != TX_NULL)
{
*fragments_searched = pool_ptr -> tx_byte_pool_performance_search_count;
}
/* Retrieve the number of fragments merged on this byte pool. */
if (merges != TX_NULL)
{
*merges = pool_ptr -> tx_byte_pool_performance_merge_count;
}
/* Retrieve the number of fragment splits on this byte pool. */
if (splits != TX_NULL)
{
*splits = pool_ptr -> tx_byte_pool_performance_split_count;
}
/* Retrieve the number of suspensions on this byte pool. */
if (suspensions != TX_NULL)
{
*suspensions = pool_ptr -> tx_byte_pool_performance_suspension_count;
}
/* Retrieve the number of timeouts on this byte pool. */
if (timeouts != TX_NULL)
{
*timeouts = pool_ptr -> tx_byte_pool_performance_timeout_count;
}
@@ -184,7 +184,7 @@ UINT status;
/* Return completion status. */
status = TX_SUCCESS;
}
/* Return completion status. */
return(status);
#else
@@ -195,55 +195,55 @@ UINT status;
/* Access input arguments just for the sake of lint, MISRA, etc. */
if (pool_ptr != TX_NULL)
{
/* Not enabled, return error. */
status = TX_FEATURE_NOT_ENABLED;
}
else if (allocates != TX_NULL)
{
/* Not enabled, return error. */
status = TX_FEATURE_NOT_ENABLED;
}
else if (releases != TX_NULL)
{
/* Not enabled, return error. */
status = TX_FEATURE_NOT_ENABLED;
}
else if (fragments_searched != TX_NULL)
{
/* Not enabled, return error. */
status = TX_FEATURE_NOT_ENABLED;
}
else if (merges != TX_NULL)
{
/* Not enabled, return error. */
status = TX_FEATURE_NOT_ENABLED;
}
else if (splits != TX_NULL)
{
/* Not enabled, return error. */
status = TX_FEATURE_NOT_ENABLED;
}
else if (suspensions != TX_NULL)
{
/* Not enabled, return error. */
status = TX_FEATURE_NOT_ENABLED;
}
else if (timeouts != TX_NULL)
{
/* Not enabled, return error. */
status = TX_FEATURE_NOT_ENABLED;
}
else
{
/* Not enabled, return error. */
status = TX_FEATURE_NOT_ENABLED;
}

View File

@@ -109,58 +109,58 @@ TX_INTERRUPT_SAVE_AREA
/* Retrieve the total number of byte pool allocates. */
if (allocates != TX_NULL)
{
*allocates = _tx_byte_pool_performance_allocate_count;
}
/* Retrieve the total number of byte pool releases. */
if (releases != TX_NULL)
{
*releases = _tx_byte_pool_performance_release_count;
}
/* Retrieve the total number of byte pool fragments searched. */
if (fragments_searched != TX_NULL)
{
*fragments_searched = _tx_byte_pool_performance_search_count;
}
/* Retrieve the total number of byte pool fragments merged. */
if (merges != TX_NULL)
{
*merges = _tx_byte_pool_performance_merge_count;
}
/* Retrieve the total number of byte pool fragment splits. */
if (splits != TX_NULL)
{
*splits = _tx_byte_pool_performance_split_count;
}
/* Retrieve the total number of byte pool suspensions. */
if (suspensions != TX_NULL)
{
*suspensions = _tx_byte_pool_performance_suspension_count;
}
/* Retrieve the total number of byte pool timeouts. */
if (timeouts != TX_NULL)
{
*timeouts = _tx_byte_pool_performance_timeout_count;
}
/* Restore interrupts. */
TX_RESTORE
/* Return completion status. */
return(TX_SUCCESS);
#else
UINT status;
@@ -215,7 +215,7 @@ UINT status;
/* Not enabled, return error. */
status = TX_FEATURE_NOT_ENABLED;
}
/* Return completion status. */
return(status);
#endif

View File

@@ -161,19 +161,19 @@ UINT list_changed;
/* Disable interrupts again. */
TX_DISABLE
/* Determine if any changes to the list have occurred while
/* Determine if any changes to the list have occurred while
interrupts were enabled. */
/* Is the list head the same? */
if (head_ptr != pool_ptr -> tx_byte_pool_suspension_list)
{
/* The list head has changed, set the list changed flag. */
list_changed = TX_TRUE;
}
else
{
/* Is the suspended count the same? */
if (suspended_count != pool_ptr -> tx_byte_pool_suspended_count)
{
@@ -212,12 +212,12 @@ UINT list_changed;
/* Release preemption. */
_tx_thread_preempt_disable--;
/* Now determine if the highest priority thread is at the front
/* Now determine if the highest priority thread is at the front
of the list. */
if (priority_thread_ptr != head_ptr)
{
/* No, we need to move the highest priority suspended thread to the
/* No, we need to move the highest priority suspended thread to the
front of the list. */
/* First, remove the highest priority thread by updating the

View File

@@ -98,7 +98,7 @@ UCHAR **suspend_info_ptr;
/* Default to successful status. */
status = TX_SUCCESS;
/* Set the pool pointer to NULL. */
pool_ptr = TX_NULL;
@@ -109,7 +109,7 @@ UCHAR **suspend_info_ptr;
work_ptr = TX_VOID_TO_UCHAR_POINTER_CONVERT(memory_ptr);
if (work_ptr != TX_NULL)
{
/* Back off the memory pointer to pickup its header. */
work_ptr = TX_UCHAR_POINTER_SUB(work_ptr, ((sizeof(UCHAR *)) + (sizeof(ALIGN_TYPE))));
@@ -127,7 +127,7 @@ UCHAR **suspend_info_ptr;
/* See if we have a valid pool pointer. */
if (pool_ptr == TX_NULL)
{
/* Return pointer error. */
status = TX_PTR_ERROR;
}
@@ -137,10 +137,10 @@ UCHAR **suspend_info_ptr;
/* See if we have a valid pool. */
if (pool_ptr -> tx_byte_pool_id != TX_BYTE_POOL_ID)
{
/* Return pointer error. */
status = TX_PTR_ERROR;
/* Reset the pool pointer is NULL. */
pool_ptr = TX_NULL;
}
@@ -163,13 +163,13 @@ UCHAR **suspend_info_ptr;
/* Determine if the pointer is valid. */
if (pool_ptr == TX_NULL)
{
/* Restore interrupts. */
TX_RESTORE
}
else
{
/* At this point, we know that the pointer is valid. */
/* Pickup thread pointer. */
@@ -201,7 +201,7 @@ UCHAR **suspend_info_ptr;
/* Update the number of available bytes in the pool. */
block_link_ptr = TX_UCHAR_TO_INDIRECT_UCHAR_POINTER_CONVERT(work_ptr);
next_block_ptr = *block_link_ptr;
pool_ptr -> tx_byte_pool_available =
pool_ptr -> tx_byte_pool_available =
pool_ptr -> tx_byte_pool_available + TX_UCHAR_POINTER_DIF(next_block_ptr, work_ptr);
/* Determine if the free block is prior to current search pointer. */
@@ -215,8 +215,8 @@ UCHAR **suspend_info_ptr;
/* Determine if there are threads suspended on this byte pool. */
if (pool_ptr -> tx_byte_pool_suspended_count != TX_NO_SUSPENSIONS)
{
/* Now examine the suspension list to find threads waiting for
/* Now examine the suspension list to find threads waiting for
memory. Maybe it is now available! */
while (pool_ptr -> tx_byte_pool_suspended_count != TX_NO_SUSPENSIONS)
{
@@ -245,7 +245,7 @@ UCHAR **suspend_info_ptr;
/* If there is not enough memory, break this loop! */
if (work_ptr == TX_NULL)
{
/* Break out of the loop. */
break;
}
@@ -257,7 +257,7 @@ UCHAR **suspend_info_ptr;
/* Also, makes sure the memory size is the same. */
if (susp_thread_ptr -> tx_thread_suspend_info == memory_size)
{
/* Remove the suspended thread from the list. */
/* Decrement the number of threads suspended. */
@@ -302,7 +302,7 @@ UCHAR **suspend_info_ptr;
/* Clear the memory pointer to indicate that it was given to the suspended thread. */
work_ptr = TX_NULL;
/* Put return status into the thread control block. */
susp_thread_ptr -> tx_thread_suspend_status = TX_SUCCESS;
@@ -328,11 +328,11 @@ UCHAR **suspend_info_ptr;
TX_DISABLE
}
}
/* Determine if the memory was given to the suspended thread. */
if (work_ptr != TX_NULL)
{
/* No, it wasn't given to the suspended thread. */
/* Put the memory back on the available list since this thread is no longer
@@ -345,7 +345,7 @@ UCHAR **suspend_info_ptr;
/* Update the number of available bytes in the pool. */
block_link_ptr = TX_UCHAR_TO_INDIRECT_UCHAR_POINTER_CONVERT(work_ptr);
next_block_ptr = *block_link_ptr;
pool_ptr -> tx_byte_pool_available =
pool_ptr -> tx_byte_pool_available =
pool_ptr -> tx_byte_pool_available + TX_UCHAR_POINTER_DIF(next_block_ptr, work_ptr);
/* Determine if the current pointer is before the search pointer. */
@@ -357,7 +357,7 @@ UCHAR **suspend_info_ptr;
}
}
}
/* Restore interrupts. */
TX_RESTORE
@@ -366,7 +366,7 @@ UCHAR **suspend_info_ptr;
}
else
{
/* No, threads suspended, restore interrupts. */
TX_RESTORE
}

View File

@@ -104,11 +104,11 @@ TX_THREAD *previous_thread;
/* Setup pointer to event flags control block. */
group_ptr = TX_VOID_TO_EVENT_FLAGS_POINTER_CONVERT(thread_ptr -> tx_thread_suspend_control_block);
/* Check for a NULL event flags control block pointer. */
if (group_ptr != TX_NULL)
{
/* Is the group pointer ID valid? */
if (group_ptr -> tx_event_flags_group_id == TX_EVENT_FLAGS_ID)
{
@@ -133,9 +133,9 @@ TX_THREAD *previous_thread;
/* Pickup the suspension head. */
suspension_head = group_ptr -> tx_event_flags_group_suspension_list;
/* Determine if the cleanup is being done while a set operation was interrupted. If the
/* Determine if the cleanup is being done while a set operation was interrupted. If the
suspended count is non-zero and the suspension head is NULL, the list is being processed
and cannot be touched from here. The suspension list removal will instead take place
and cannot be touched from here. The suspension list removal will instead take place
inside the event flag set code. */
if (suspension_head != TX_NULL)
{
@@ -144,7 +144,7 @@ TX_THREAD *previous_thread;
/* Decrement the local suspension count. */
suspended_count--;
/* Store the updated suspended count. */
group_ptr -> tx_event_flags_group_suspended_count = suspended_count;
@@ -153,7 +153,7 @@ TX_THREAD *previous_thread;
{
/* Yes, the only suspended thread. */
/* Update the head pointer. */
group_ptr -> tx_event_flags_group_suspension_list = TX_NULL;
}
@@ -161,17 +161,17 @@ TX_THREAD *previous_thread;
{
/* At least one more thread is on the same suspension list. */
/* Update the links of the adjacent threads. */
next_thread = thread_ptr -> tx_thread_suspended_next;
previous_thread = thread_ptr -> tx_thread_suspended_previous;
next_thread -> tx_thread_suspended_previous = previous_thread;
previous_thread -> tx_thread_suspended_next = next_thread;
/* Determine if we need to update the head pointer. */
if (suspension_head == thread_ptr)
{
/* Update the list head pointer. */
group_ptr -> tx_event_flags_group_suspension_list = next_thread;
}
@@ -179,7 +179,7 @@ TX_THREAD *previous_thread;
}
else
{
/* In this case, the search pointer in an interrupted event flag set must be reset. */
group_ptr -> tx_event_flags_group_reset_search = TX_TRUE;
}
@@ -189,7 +189,7 @@ TX_THREAD *previous_thread;
if (thread_ptr -> tx_thread_state == TX_EVENT_FLAG)
{
/* Timeout condition and the thread still suspended on the event flags group.
/* Timeout condition and the thread still suspended on the event flags group.
Setup return error status and resume the thread. */
#ifdef TX_EVENT_FLAGS_ENABLE_PERFORMANCE_INFO
@@ -216,8 +216,8 @@ TX_THREAD *previous_thread;
/* Restore interrupts. */
TX_RESTORE
/* Resume the thread! Check for preemption even though we are executing
from the system timer thread right now which normally executes at the
/* Resume the thread! Check for preemption even though we are executing
from the system timer thread right now which normally executes at the
highest priority. */
_tx_thread_system_resume(thread_ptr);

View File

@@ -86,7 +86,7 @@ TX_EVENT_FLAGS_GROUP *previous_group;
/* Setup the basic event flags group fields. */
group_ptr -> tx_event_flags_group_name = name_ptr;
/* Disable interrupts to put the event flags group on the created list. */
TX_DISABLE
@@ -121,7 +121,7 @@ TX_EVENT_FLAGS_GROUP *previous_group;
/* Increment the number of created event flag groups. */
_tx_event_flags_created_count++;
/* Optional event flag group create extended processing. */
TX_EVENT_FLAGS_GROUP_CREATE_EXTENSION(group_ptr)

View File

@@ -106,7 +106,7 @@ TX_EVENT_FLAGS_GROUP *previous_group;
/* Decrement the number of created event flag groups. */
_tx_event_flags_created_count--;
/* See if this group is the only one on the list. */
if (_tx_event_flags_created_count == TX_EMPTY)
{
@@ -126,7 +126,7 @@ TX_EVENT_FLAGS_GROUP *previous_group;
/* See if we have to update the created list head pointer. */
if (_tx_event_flags_created_ptr == group_ptr)
{
/* Yes, move the head pointer to the next link. */
_tx_event_flags_created_ptr = next_group;
}
@@ -144,18 +144,18 @@ TX_EVENT_FLAGS_GROUP *previous_group;
/* Restore interrupts. */
TX_RESTORE
/* Walk through the event flag suspension list to resume any and all threads
/* Walk through the event flag suspension list to resume any and all threads
suspended on this group. */
while (suspended_count != TX_NO_SUSPENSIONS)
{
/* Decrement the number of suspended threads. */
suspended_count--;
/* Lockout interrupts. */
TX_DISABLE
/* Clear the cleanup pointer, this prevents the timeout from doing
/* Clear the cleanup pointer, this prevents the timeout from doing
anything. */
thread_ptr -> tx_thread_suspend_cleanup = TX_NULL;

View File

@@ -130,16 +130,16 @@ UINT interrupted_set_request;
/* Check for AND condition. All flags must be present to satisfy request. */
if (and_request == TX_AND)
{
/* AND request is present. */
/* Calculate the flags present. */
flags_satisfied = (current_flags & requested_flags);
/* Determine if they satisfy the AND request. */
if (flags_satisfied != requested_flags)
{
/* No, not all the requested flags are present. Clear the flags present variable. */
flags_satisfied = ((ULONG) 0);
}
@@ -150,7 +150,7 @@ UINT interrupted_set_request;
/* OR request is present. Simply or the requested flags and the current flags. */
flags_satisfied = (current_flags & requested_flags);
}
/* Determine if the request is satisfied. */
if (flags_satisfied != ((ULONG) 0))
{
@@ -164,7 +164,7 @@ UINT interrupted_set_request;
/* Determine whether or not clearing needs to take place. */
if (clear_request == TX_TRUE)
{
/* Yes, clear the flags that satisfied this request. */
group_ptr -> tx_event_flags_group_current =
group_ptr -> tx_event_flags_group_current & (~requested_flags);
@@ -190,16 +190,16 @@ UINT interrupted_set_request;
/* Check for AND condition. All flags must be present to satisfy request. */
if (and_request == TX_AND)
{
/* AND request is present. */
/* Calculate the flags present. */
flags_satisfied = (current_flags & requested_flags);
/* Determine if they satisfy the AND request. */
if (flags_satisfied != requested_flags)
{
/* No, not all the requested flags are present. Clear the flags present variable. */
flags_satisfied = ((ULONG) 0);
}
@@ -211,7 +211,7 @@ UINT interrupted_set_request;
to see if any are present. */
flags_satisfied = (current_flags & requested_flags);
}
/* Determine if the request is satisfied. */
if (flags_satisfied != ((ULONG) 0))
{
@@ -235,7 +235,7 @@ UINT interrupted_set_request;
set request. */
if (group_ptr -> tx_event_flags_group_suspended_count != TX_NO_SUSPENSIONS)
{
if (group_ptr -> tx_event_flags_group_suspension_list == TX_NULL)
{
@@ -252,7 +252,7 @@ UINT interrupted_set_request;
event clearing until the set operation is complete. */
/* Remember the events to clear. */
group_ptr -> tx_event_flags_group_delayed_clear =
group_ptr -> tx_event_flags_group_delayed_clear =
group_ptr -> tx_event_flags_group_delayed_clear | requested_flags;
}
else
@@ -279,7 +279,7 @@ UINT interrupted_set_request;
/* Determine if the preempt disable flag is non-zero. */
if (_tx_thread_preempt_disable != ((UINT) 0))
{
/* Suspension is not allowed if the preempt disable flag is non-zero at this point, return error completion. */
status = TX_NO_EVENTS;
}
@@ -296,7 +296,7 @@ UINT interrupted_set_request;
/* Increment the number of event flags suspensions on this semaphore. */
group_ptr -> tx_event_flags_group___performance_suspension_count++;
#endif
/* Pickup thread pointer. */
TX_THREAD_GET_CURRENT(thread_ptr)
@@ -325,7 +325,7 @@ UINT interrupted_set_request;
/* Pickup the suspended count. */
suspended_count = group_ptr -> tx_event_flags_group_suspended_count;
/* Setup suspension list. */
if (suspended_count == TX_NO_SUSPENSIONS)
{
@@ -350,7 +350,7 @@ UINT interrupted_set_request;
/* Increment the number of threads suspended. */
group_ptr -> tx_event_flags_group_suspended_count++;
/* Set the state to suspended. */
thread_ptr -> tx_thread_state = TX_EVENT_FLAG;
@@ -377,10 +377,10 @@ UINT interrupted_set_request;
/* Call actual thread suspension routine. */
_tx_thread_system_suspend(thread_ptr);
/* Disable interrupts. */
TX_DISABLE
/* Return the completion status. */
status = thread_ptr -> tx_thread_suspend_status;
#endif
@@ -388,7 +388,7 @@ UINT interrupted_set_request;
}
else
{
/* Immediate return, return error completion. */
status = TX_NO_EVENTS;
}

View File

@@ -79,8 +79,8 @@
/* resulting in version 6.1 */
/* */
/**************************************************************************/
UINT _tx_event_flags_info_get(TX_EVENT_FLAGS_GROUP *group_ptr, CHAR **name, ULONG *current_flags,
TX_THREAD **first_suspended, ULONG *suspended_count,
UINT _tx_event_flags_info_get(TX_EVENT_FLAGS_GROUP *group_ptr, CHAR **name, ULONG *current_flags,
TX_THREAD **first_suspended, ULONG *suspended_count,
TX_EVENT_FLAGS_GROUP **next_group)
{
@@ -102,7 +102,7 @@ TX_INTERRUPT_SAVE_AREA
/* Retrieve the name of the event flag group. */
if (name != TX_NULL)
{
*name = group_ptr -> tx_event_flags_group_name;
}
@@ -111,31 +111,31 @@ TX_INTERRUPT_SAVE_AREA
{
/* Pickup the current flags and apply delayed clearing. */
*current_flags = group_ptr -> tx_event_flags_group_current &
*current_flags = group_ptr -> tx_event_flags_group_current &
~group_ptr -> tx_event_flags_group_delayed_clear;
}
/* Retrieve the first thread suspended on this event flag group. */
if (first_suspended != TX_NULL)
{
*first_suspended = group_ptr -> tx_event_flags_group_suspension_list;
}
/* Retrieve the number of threads suspended on this event flag group. */
if (suspended_count != TX_NULL)
{
*suspended_count = (ULONG) group_ptr -> tx_event_flags_group_suspended_count;
}
/* Retrieve the pointer to the next event flag group created. */
if (next_group != TX_NULL)
{
*next_group = group_ptr -> tx_event_flags_group_created_next;
}
/* Restore interrupts. */
TX_RESTORE

View File

@@ -94,15 +94,15 @@ UINT status;
/* Determine if this is a legal request. */
if (group_ptr == TX_NULL)
{
/* Event flags group pointer is illegal, return error. */
status = TX_PTR_ERROR;
}
/* Determine if the event group ID is invalid. */
else if (group_ptr -> tx_event_flags_group_id != TX_EVENT_FLAGS_ID)
{
/* Event flags group pointer is illegal, return error. */
status = TX_PTR_ERROR;
}
@@ -124,34 +124,34 @@ UINT status;
/* Retrieve the number of set operations on this event flag group. */
if (sets != TX_NULL)
{
*sets = group_ptr -> tx_event_flags_group_performance_set_count;
}
/* Retrieve the number of get operations on this event flag group. */
if (gets != TX_NULL)
{
*gets = group_ptr -> tx_event_flags_group__performance_get_count;
}
/* Retrieve the number of thread suspensions on this event flag group. */
if (suspensions != TX_NULL)
{
*suspensions = group_ptr -> tx_event_flags_group___performance_suspension_count;
}
/* Retrieve the number of thread timeouts on this event flag group. */
if (timeouts != TX_NULL)
{
*timeouts = group_ptr -> tx_event_flags_group____performance_timeout_count;
}
/* Restore interrupts. */
TX_RESTORE
/* Return successful completion. */
status = TX_SUCCESS;
}

View File

@@ -101,37 +101,37 @@ TX_INTERRUPT_SAVE_AREA
/* Retrieve the total number of event flag set operations. */
if (sets != TX_NULL)
{
*sets = _tx_event_flags_performance_set_count;
}
/* Retrieve the total number of event flag get operations. */
if (gets != TX_NULL)
{
*gets = _tx_event_flags_performance_get_count;
}
/* Retrieve the total number of event flag thread suspensions. */
if (suspensions != TX_NULL)
{
*suspensions = _tx_event_flags_performance_suspension_count;
}
/* Retrieve the total number of event flag thread timeouts. */
if (timeouts != TX_NULL)
{
*timeouts = _tx_event_flags_performance_timeout_count;
}
/* Restore interrupts. */
TX_RESTORE
/* Return completion status. */
return(TX_SUCCESS);
#else
UINT status;

View File

@@ -137,7 +137,7 @@ VOID (*events_set_notify)(struct TX_EVENT_FLAGS_GROUP_STRUCT *notify_
set request. */
if (group_ptr -> tx_event_flags_group_suspended_count != TX_NO_SUSPENSIONS)
{
if (group_ptr -> tx_event_flags_group_suspension_list == TX_NULL)
{
@@ -154,15 +154,15 @@ VOID (*events_set_notify)(struct TX_EVENT_FLAGS_GROUP_STRUCT *notify_
event clearing until the set operation is complete. */
/* Remember the events to clear. */
group_ptr -> tx_event_flags_group_delayed_clear =
group_ptr -> tx_event_flags_group_delayed_clear =
group_ptr -> tx_event_flags_group_delayed_clear | ~flags_to_set;
}
else
{
#endif
/* Previous set operation was not interrupted, simply clear the
specified flags by "ANDing" the flags into the current events
/* Previous set operation was not interrupted, simply clear the
specified flags by "ANDing" the flags into the current events
of the group. */
group_ptr -> tx_event_flags_group_current =
group_ptr -> tx_event_flags_group_current & flags_to_set;
@@ -195,7 +195,7 @@ VOID (*events_set_notify)(struct TX_EVENT_FLAGS_GROUP_STRUCT *notify_
{
/* Yes, we need to neutralize the delayed clearing as well. */
group_ptr -> tx_event_flags_group_delayed_clear =
group_ptr -> tx_event_flags_group_delayed_clear =
group_ptr -> tx_event_flags_group_delayed_clear & ~flags_to_set;
}
#endif
@@ -210,7 +210,7 @@ VOID (*events_set_notify)(struct TX_EVENT_FLAGS_GROUP_STRUCT *notify_
if (group_ptr -> tx_event_flags_group_suspension_list != TX_NULL)
{
/* Determine if there is just a single thread waiting on the event
/* Determine if there is just a single thread waiting on the event
flag group. */
if (suspended_count == ((UINT) 1))
{
@@ -223,7 +223,7 @@ VOID (*events_set_notify)(struct TX_EVENT_FLAGS_GROUP_STRUCT *notify_
/* Pickup the current event flags. */
current_event_flags = group_ptr -> tx_event_flags_group_current;
/* Pickup the suspend information. */
requested_flags = thread_ptr -> tx_thread_suspend_info;
@@ -236,16 +236,16 @@ VOID (*events_set_notify)(struct TX_EVENT_FLAGS_GROUP_STRUCT *notify_
/* Check for AND condition. All flags must be present to satisfy request. */
if (and_request == TX_AND)
{
/* AND request is present. */
/* Calculate the flags present. */
flags_satisfied = (current_event_flags & requested_flags);
/* Determine if they satisfy the AND request. */
if (flags_satisfied != requested_flags)
{
/* No, not all the requested flags are present. Clear the flags present variable. */
flags_satisfied = ((ULONG) 0);
}
@@ -256,7 +256,7 @@ VOID (*events_set_notify)(struct TX_EVENT_FLAGS_GROUP_STRUCT *notify_
/* OR request is present. Simply or the requested flags and the current flags. */
flags_satisfied = (current_event_flags & requested_flags);
}
/* Determine if the request is satisfied. */
if (flags_satisfied != ((ULONG) 0))
{
@@ -315,7 +315,7 @@ VOID (*events_set_notify)(struct TX_EVENT_FLAGS_GROUP_STRUCT *notify_
else
{
/* Otherwise, the event flag requests of multiple threads must be
/* Otherwise, the event flag requests of multiple threads must be
examined. */
/* Setup thread pointer, keep a local copy of the head pointer. */
@@ -325,7 +325,7 @@ VOID (*events_set_notify)(struct TX_EVENT_FLAGS_GROUP_STRUCT *notify_
/* Clear the suspended list head pointer to thwart manipulation of
the list in ISR's while we are processing here. */
group_ptr -> tx_event_flags_group_suspension_list = TX_NULL;
/* Setup the satisfied thread pointers. */
satisfied_list = TX_NULL;
last_satisfied = TX_NULL;
@@ -382,16 +382,16 @@ VOID (*events_set_notify)(struct TX_EVENT_FLAGS_GROUP_STRUCT *notify_
/* Check for AND condition. All flags must be present to satisfy request. */
if (and_request == TX_AND)
{
/* AND request is present. */
/* Calculate the flags present. */
flags_satisfied = (current_event_flags & requested_flags);
/* Determine if they satisfy the AND request. */
if (flags_satisfied != requested_flags)
{
/* No, not all the requested flags are present. Clear the flags present variable. */
flags_satisfied = ((ULONG) 0);
}
@@ -402,13 +402,13 @@ VOID (*events_set_notify)(struct TX_EVENT_FLAGS_GROUP_STRUCT *notify_
/* OR request is present. Simply or the requested flags and the current flags. */
flags_satisfied = (current_event_flags & requested_flags);
}
/* Check to see if the thread had a timeout or wait abort during the event search processing.
If so, just set the flags satisfied to ensure the processing here removes the thread from
/* Check to see if the thread had a timeout or wait abort during the event search processing.
If so, just set the flags satisfied to ensure the processing here removes the thread from
the suspension list. */
if (thread_ptr -> tx_thread_state != TX_EVENT_FLAG)
{
/* Simply set the satisfied flags to 1 in order to remove the thread from the suspension list. */
flags_satisfied = ((ULONG) 1);
}
@@ -421,7 +421,7 @@ VOID (*events_set_notify)(struct TX_EVENT_FLAGS_GROUP_STRUCT *notify_
/* Set the preempt check flag. */
preempt_check = TX_TRUE;
/* Determine if the thread is still suspended on the event flag group. If not, a wait
abort must have been done from an ISR. */
if (thread_ptr -> tx_thread_state == TX_EVENT_FLAG)
@@ -437,11 +437,11 @@ VOID (*events_set_notify)(struct TX_EVENT_FLAGS_GROUP_STRUCT *notify_
/* Determine whether or not clearing needs to take place. */
if (clear_request == TX_TRUE)
{
/* Yes, clear the flags that satisfied this request. */
group_ptr -> tx_event_flags_group_current = group_ptr -> tx_event_flags_group_current & ~requested_flags;
}
/* Prepare for resumption of the first thread. */
/* Clear cleanup routine to avoid timeout. */
@@ -478,7 +478,7 @@ VOID (*events_set_notify)(struct TX_EVENT_FLAGS_GROUP_STRUCT *notify_
list. */
if (suspended_list == thread_ptr)
{
/* Yes, head pointer needs to be updated. */
suspended_list = thread_ptr -> tx_thread_suspended_next;
}
@@ -494,7 +494,7 @@ VOID (*events_set_notify)(struct TX_EVENT_FLAGS_GROUP_STRUCT *notify_
/* First thread on the satisfied list. */
satisfied_list = thread_ptr;
last_satisfied = thread_ptr;
/* Setup initial next pointer. */
thread_ptr -> tx_thread_suspended_next = TX_NULL;
}
@@ -502,7 +502,7 @@ VOID (*events_set_notify)(struct TX_EVENT_FLAGS_GROUP_STRUCT *notify_
{
/* Not the first thread on the satisfied list. */
/* Link it up at the end. */
last_satisfied -> tx_thread_suspended_next = thread_ptr;
thread_ptr -> tx_thread_suspended_next = TX_NULL;
@@ -515,7 +515,7 @@ VOID (*events_set_notify)(struct TX_EVENT_FLAGS_GROUP_STRUCT *notify_
/* Decrement the suspension count. */
suspended_count--;
} while (suspended_count != TX_NO_SUSPENSIONS);
/* Setup the group's suspension list head again. */
@@ -543,7 +543,7 @@ VOID (*events_set_notify)(struct TX_EVENT_FLAGS_GROUP_STRUCT *notify_
thread_ptr = satisfied_list;
while(thread_ptr != TX_NULL)
{
/* Get next pointer first. */
next_thread_ptr = thread_ptr -> tx_thread_suspended_next;
@@ -586,7 +586,7 @@ VOID (*events_set_notify)(struct TX_EVENT_FLAGS_GROUP_STRUCT *notify_
/* Determine if we need to set the reset search field. */
if (group_ptr -> tx_event_flags_group_suspended_count != TX_NO_SUSPENSIONS)
{
/* We interrupted a search of an event flag group suspension
list. Make sure we reset the search. */
group_ptr -> tx_event_flags_group_reset_search = TX_TRUE;

View File

@@ -44,9 +44,9 @@
#include "tx_byte_pool.h"
/* Define the unused memory pointer. The value of the first available
/* Define the unused memory pointer. The value of the first available
memory address is placed in this variable in the low-level
initialization function. The content of this variable is passed
initialization function. The content of this variable is passed
to the application's system definition function. */
VOID *_tx_initialize_unused_memory;

View File

@@ -98,8 +98,8 @@ VOID _tx_initialize_kernel_enter(VOID)
/* No, the initialization still needs to take place. */
/* Ensure that the system state variable is set to indicate
initialization is in progress. Note that this variable is
/* Ensure that the system state variable is set to indicate
initialization is in progress. Note that this variable is
later used to represent interrupt nesting. */
_tx_thread_system_state = TX_INITIALIZE_IN_PROGRESS;
@@ -109,9 +109,9 @@ VOID _tx_initialize_kernel_enter(VOID)
/* Invoke the low-level initialization to handle all processor specific
initialization issues. */
_tx_initialize_low_level();
/* Invoke the high-level initialization to exercise all of the
ThreadX components and the application's initialization
/* Invoke the high-level initialization to exercise all of the
ThreadX components and the application's initialization
function. */
_tx_initialize_high_level();
@@ -122,8 +122,8 @@ VOID _tx_initialize_kernel_enter(VOID)
/* Optional processing extension. */
TX_INITIALIZE_KERNEL_ENTER_EXTENSION
/* Ensure that the system state variable is set to indicate
initialization is in progress. Note that this variable is
/* Ensure that the system state variable is set to indicate
initialization is in progress. Note that this variable is
later used to represent interrupt nesting. */
_tx_thread_system_state = TX_INITIALIZE_IN_PROGRESS;
@@ -131,7 +131,7 @@ VOID _tx_initialize_kernel_enter(VOID)
first available memory address to it. */
tx_application_define(_tx_initialize_unused_memory);
/* Set the system state in preparation for entering the thread
/* Set the system state in preparation for entering the thread
scheduler. */
_tx_thread_system_state = TX_INITIALIZE_IS_FINISHED;

View File

@@ -76,8 +76,8 @@
VOID _tx_initialize_kernel_setup(VOID)
{
/* Ensure that the system state variable is set to indicate
initialization is in progress. Note that this variable is
/* Ensure that the system state variable is set to indicate
initialization is in progress. Note that this variable is
later used to represent interrupt nesting. */
_tx_thread_system_state = TX_INITIALIZE_IN_PROGRESS;
@@ -87,9 +87,9 @@ VOID _tx_initialize_kernel_setup(VOID)
/* Invoke the low-level initialization to handle all processor specific
initialization issues. */
_tx_initialize_low_level();
/* Invoke the high-level initialization to exercise all of the
ThreadX components and the application's initialization
/* Invoke the high-level initialization to exercise all of the
ThreadX components and the application's initialization
function. */
_tx_initialize_high_level();

View File

@@ -92,7 +92,7 @@ ULONG _tx_misra_uchar_pointer_dif(UCHAR *ptr1, UCHAR *ptr2)
{
ULONG value;
value = (ULONG)(ptr1 - ptr2);
return(value);
}
@@ -150,7 +150,7 @@ ULONG *_tx_misra_ulong_pointer_sub(ULONG *ptr, ULONG amount)
ULONG _tx_misra_ulong_pointer_dif(ULONG *ptr1, ULONG *ptr2)
{
ULONG value;
value = (ULONG)(ptr1 - ptr2);
return(value);
}
@@ -362,7 +362,7 @@ TX_THREAD *trace_thread_ptr;
#endif
trace_event_ptr++;
if (trace_event_ptr >= _tx_trace_buffer_end_ptr)
{
{
trace_event_ptr = _tx_trace_buffer_start_ptr;
_tx_trace_buffer_current_ptr = trace_event_ptr;
_tx_trace_header_ptr -> tx_trace_header_buffer_current_pointer = (ULONG) trace_event_ptr;
@@ -813,7 +813,7 @@ UCHAR *_tx_misra_entry_to_uchar_pointer_convert(TX_TRACE_BUFFER_ENTRY *pointer)
/* Return a UCHAR pointer. */
return((UCHAR *) ((VOID *) pointer));
}
#endif

View File

@@ -100,14 +100,14 @@ TX_THREAD *previous_thread;
/* Check for valid suspension sequence. */
if (suspension_sequence == thread_ptr -> tx_thread_suspension_sequence)
{
/* Setup pointer to mutex control block. */
mutex_ptr = TX_VOID_TO_MUTEX_POINTER_CONVERT(thread_ptr -> tx_thread_suspend_control_block);
/* Check for NULL mutex pointer. */
if (mutex_ptr != TX_NULL)
{
/* Determine if the mutex ID is valid. */
if (mutex_ptr -> tx_mutex_id == TX_MUTEX_ID)
{
@@ -133,7 +133,7 @@ TX_THREAD *previous_thread;
suspended_count = mutex_ptr -> tx_mutex_suspended_count;
/* Remove the suspended thread from the list. */
/* See if this is the only suspended thread on the list. */
if (suspended_count == TX_NO_SUSPENSIONS)
{
@@ -147,7 +147,7 @@ TX_THREAD *previous_thread;
{
/* At least one more thread is on the same suspension list. */
/* Update the links of the adjacent threads. */
next_thread = thread_ptr -> tx_thread_suspended_next;
previous_thread = thread_ptr -> tx_thread_suspended_previous;
@@ -157,18 +157,18 @@ TX_THREAD *previous_thread;
/* Determine if we need to update the head pointer. */
if (mutex_ptr -> tx_mutex_suspension_list == thread_ptr)
{
/* Update the list head pointer. */
mutex_ptr -> tx_mutex_suspension_list = next_thread;
}
}
/* Now we need to determine if this cleanup is from a terminate, timeout,
or from a wait abort. */
if (thread_ptr -> tx_thread_state == TX_MUTEX_SUSP)
{
/* Timeout condition and the thread still suspended on the mutex.
/* Timeout condition and the thread still suspended on the mutex.
Setup return error status and resume the thread. */
#ifdef TX_MUTEX_ENABLE_PERFORMANCE_INFO
@@ -194,7 +194,7 @@ TX_THREAD *previous_thread;
/* Restore interrupts. */
TX_RESTORE
/* Resume the thread! */
_tx_thread_system_resume(thread_ptr);
@@ -208,7 +208,7 @@ TX_THREAD *previous_thread;
}
}
}
/* Restore interrupts. */
TX_RESTORE
#endif
@@ -269,21 +269,21 @@ UINT status;
/* Disable interrupts. */
TX_DISABLE
/* Temporarily disable preemption. */
_tx_thread_preempt_disable++;
/* Loop to look at all the mutexes. */
do
{
/* Pickup the mutex head pointer. */
mutex_ptr = thread_ptr -> tx_thread_owned_mutex_list;
/* Determine if there is a mutex. */
if (mutex_ptr != TX_NULL)
{
/* Yes, set the ownership count to 1. */
mutex_ptr -> tx_mutex_ownership_count = ((UINT) 1);
@@ -307,10 +307,10 @@ UINT status;
mutex_ptr = thread_ptr -> tx_thread_owned_mutex_list;
}
} while (mutex_ptr != TX_NULL);
/* Restore preemption. */
_tx_thread_preempt_disable--;
/* Restore interrupts. */
TX_RESTORE
}

View File

@@ -88,7 +88,7 @@ TX_MUTEX *previous_mutex;
/* Setup the basic mutex fields. */
mutex_ptr -> tx_mutex_name = name_ptr;
mutex_ptr -> tx_mutex_inherit = inherit;
/* Disable interrupts to place the mutex on the created list. */
TX_DISABLE
@@ -126,7 +126,7 @@ TX_MUTEX *previous_mutex;
/* Increment the ownership count. */
_tx_mutex_created_count++;
/* Optional mutex create extended processing. */
TX_MUTEX_CREATE_EXTENSION(mutex_ptr)

View File

@@ -110,7 +110,7 @@ UINT status;
/* Decrement the created count. */
_tx_mutex_created_count--;
/* See if the mutex is the only one on the list. */
if (_tx_mutex_created_count == TX_EMPTY)
{
@@ -130,7 +130,7 @@ UINT status;
/* See if we have to update the created list head pointer. */
if (_tx_mutex_created_ptr == mutex_ptr)
{
/* Yes, move the head pointer to the next link. */
_tx_mutex_created_ptr = next_mutex;
}
@@ -156,7 +156,7 @@ UINT status;
{
/* Yes, remove this mutex from the owned list. */
/* Set the ownership count to 1. */
mutex_ptr -> tx_mutex_ownership_count = ((UINT) 1);
@@ -184,14 +184,14 @@ UINT status;
on this mutex. */
while (suspended_count != ((ULONG) 0))
{
/* Decrement the suspension count. */
suspended_count--;
/* Lockout interrupts. */
TX_DISABLE
/* Clear the cleanup pointer, this prevents the timeout from doing
/* Clear the cleanup pointer, this prevents the timeout from doing
anything. */
thread_ptr -> tx_thread_suspend_cleanup = TX_NULL;
@@ -215,7 +215,7 @@ UINT status;
/* Restore interrupts. */
TX_RESTORE
/* Resume the thread. */
_tx_thread_system_resume(thread_ptr);
#endif

View File

@@ -126,7 +126,7 @@ UINT status;
/* Determine if priority inheritance is required. */
if (mutex_ptr -> tx_mutex_inherit == TX_TRUE)
{
/* Remember the current priority of thread. */
mutex_ptr -> tx_mutex_original_priority = thread_ptr -> tx_thread_priority;
@@ -178,7 +178,7 @@ UINT status;
else if (mutex_ptr -> tx_mutex_owner == thread_ptr)
{
/* The owning thread is requesting the mutex again, just
/* The owning thread is requesting the mutex again, just
increment the ownership count. */
mutex_ptr -> tx_mutex_ownership_count++;
@@ -279,7 +279,7 @@ UINT status;
previous_thread -> tx_thread_suspended_next = thread_ptr;
next_thread -> tx_thread_suspended_previous = thread_ptr;
}
/* Increment the suspension count. */
mutex_ptr -> tx_mutex_suspended_count++;
@@ -288,7 +288,7 @@ UINT status;
#ifdef TX_NOT_INTERRUPTABLE
/* Determine if we need to raise the priority of the thread
/* Determine if we need to raise the priority of the thread
owning the mutex. */
if (mutex_ptr -> tx_mutex_inherit == TX_TRUE)
{
@@ -304,7 +304,7 @@ UINT status;
/* Determine if we have to update inherit priority level of the mutex owner. */
if (thread_ptr -> tx_thread_priority < mutex_owner -> tx_thread_inherit_priority)
{
/* Remember the new priority inheritance priority. */
mutex_owner -> tx_thread_inherit_priority = thread_ptr -> tx_thread_priority;
}
@@ -347,7 +347,7 @@ UINT status;
/* Restore interrupts. */
TX_RESTORE
/* Determine if we need to raise the priority of the thread
/* Determine if we need to raise the priority of the thread
owning the mutex. */
if (mutex_ptr -> tx_mutex_inherit == TX_TRUE)
{
@@ -363,7 +363,7 @@ UINT status;
/* Determine if we have to update inherit priority level of the mutex owner. */
if (thread_ptr -> tx_thread_priority < mutex_owner -> tx_thread_inherit_priority)
{
/* Remember the new priority inheritance priority. */
mutex_owner -> tx_thread_inherit_priority = thread_ptr -> tx_thread_priority;
}

View File

@@ -79,7 +79,7 @@
/* */
/**************************************************************************/
UINT _tx_mutex_info_get(TX_MUTEX *mutex_ptr, CHAR **name, ULONG *count, TX_THREAD **owner,
TX_THREAD **first_suspended, ULONG *suspended_count,
TX_THREAD **first_suspended, ULONG *suspended_count,
TX_MUTEX **next_mutex)
{
@@ -101,45 +101,45 @@ TX_INTERRUPT_SAVE_AREA
/* Retrieve the name of the mutex. */
if (name != TX_NULL)
{
*name = mutex_ptr -> tx_mutex_name;
}
/* Retrieve the current ownership count of the mutex. */
if (count != TX_NULL)
{
*count = ((ULONG) mutex_ptr -> tx_mutex_ownership_count);
}
/* Retrieve the current owner of the mutex. */
if (owner != TX_NULL)
{
*owner = mutex_ptr -> tx_mutex_owner;
}
/* Retrieve the first thread suspended on this mutex. */
if (first_suspended != TX_NULL)
{
*first_suspended = mutex_ptr -> tx_mutex_suspension_list;
}
/* Retrieve the number of threads suspended on this mutex. */
if (suspended_count != TX_NULL)
{
*suspended_count = (ULONG) mutex_ptr -> tx_mutex_suspended_count;
}
/* Retrieve the pointer to the next mutex created. */
if (next_mutex != TX_NULL)
{
*next_mutex = mutex_ptr -> tx_mutex_created_next;
}
/* Restore interrupts. */
TX_RESTORE

View File

@@ -99,15 +99,15 @@ UINT status;
/* Determine if this is a legal request. */
if (mutex_ptr == TX_NULL)
{
/* Mutex pointer is illegal, return error. */
status = TX_PTR_ERROR;
}
/* Determine if the mutex ID is invalid. */
else if (mutex_ptr -> tx_mutex_id != TX_MUTEX_ID)
{
/* Mutex pointer is illegal, return error. */
status = TX_PTR_ERROR;
}
@@ -129,45 +129,45 @@ UINT status;
/* Retrieve the number of puts on this mutex. */
if (puts != TX_NULL)
{
*puts = mutex_ptr -> tx_mutex_performance_put_count;
}
/* Retrieve the number of gets on this mutex. */
if (gets != TX_NULL)
{
*gets = mutex_ptr -> tx_mutex_performance_get_count;
}
/* Retrieve the number of suspensions on this mutex. */
if (suspensions != TX_NULL)
{
*suspensions = mutex_ptr -> tx_mutex_performance_suspension_count;
}
/* Retrieve the number of timeouts on this mutex. */
if (timeouts != TX_NULL)
{
*timeouts = mutex_ptr -> tx_mutex_performance_timeout_count;
}
/* Retrieve the number of priority inversions on this mutex. */
if (inversions != TX_NULL)
{
*inversions = mutex_ptr -> tx_mutex_performance_priority_inversion_count;
}
/* Retrieve the number of priority inheritances on this mutex. */
if (inheritances != TX_NULL)
{
*inheritances = mutex_ptr -> tx_mutex_performance__priority_inheritance_count;
}
/* Restore interrupts. */
TX_RESTORE
}
@@ -225,7 +225,7 @@ UINT status;
status = TX_FEATURE_NOT_ENABLED;
}
#endif
/* Return completion status. */
return(status);
}

View File

@@ -82,7 +82,7 @@
/* resulting in version 6.1 */
/* */
/**************************************************************************/
UINT _tx_mutex_performance_system_info_get(ULONG *puts, ULONG *gets, ULONG *suspensions,
UINT _tx_mutex_performance_system_info_get(ULONG *puts, ULONG *gets, ULONG *suspensions,
ULONG *timeouts, ULONG *inversions, ULONG *inheritances)
{
@@ -106,51 +106,51 @@ TX_INTERRUPT_SAVE_AREA
/* Retrieve the total number of mutex puts. */
if (puts != TX_NULL)
{
*puts = _tx_mutex_performance_put_count;
}
/* Retrieve the total number of mutex gets. */
if (gets != TX_NULL)
{
*gets = _tx_mutex_performance_get_count;
}
/* Retrieve the total number of mutex suspensions. */
if (suspensions != TX_NULL)
{
*suspensions = _tx_mutex_performance_suspension_count;
}
/* Retrieve the total number of mutex timeouts. */
if (timeouts != TX_NULL)
{
*timeouts = _tx_mutex_performance_timeout_count;
}
/* Retrieve the total number of mutex priority inversions. */
if (inversions != TX_NULL)
{
*inversions = _tx_mutex_performance_priority_inversion_count;
}
/* Retrieve the total number of mutex priority inheritances. */
if (inheritances != TX_NULL)
{
*inheritances = _tx_mutex_performance__priority_inheritance_count;
}
/* Restore interrupts. */
TX_RESTORE
/* Return completion status. */
return(TX_SUCCESS);
#else
UINT status;
@@ -159,43 +159,43 @@ UINT status;
/* Access input arguments just for the sake of lint, MISRA, etc. */
if (puts != TX_NULL)
{
/* Not enabled, return error. */
status = TX_FEATURE_NOT_ENABLED;
}
else if (gets != TX_NULL)
{
/* Not enabled, return error. */
status = TX_FEATURE_NOT_ENABLED;
}
else if (suspensions != TX_NULL)
{
/* Not enabled, return error. */
status = TX_FEATURE_NOT_ENABLED;
}
else if (timeouts != TX_NULL)
{
/* Not enabled, return error. */
status = TX_FEATURE_NOT_ENABLED;
}
else if (inversions != TX_NULL)
{
/* Not enabled, return error. */
status = TX_FEATURE_NOT_ENABLED;
}
else if (inheritances != TX_NULL)
{
/* Not enabled, return error. */
status = TX_FEATURE_NOT_ENABLED;
}
else
{
/* Not enabled, return error. */
status = TX_FEATURE_NOT_ENABLED;
}

View File

@@ -164,28 +164,28 @@ UINT status;
/* Disable interrupts again. */
TX_DISABLE
/* Determine if any changes to the list have occurred while
/* Determine if any changes to the list have occurred while
interrupts were enabled. */
/* Is the list head the same? */
if (head_ptr != mutex_ptr -> tx_mutex_suspension_list)
{
/* The list head has changed, set the list changed flag. */
list_changed = TX_TRUE;
}
else
{
/* Is the suspended count the same? */
if (suspended_count != mutex_ptr -> tx_mutex_suspended_count)
{
/* The list head has changed, set the list changed flag. */
list_changed = TX_TRUE;
}
}
/* Determine if the list has changed. */
if (list_changed == TX_FALSE)
{
@@ -215,12 +215,12 @@ UINT status;
/* Release preemption. */
_tx_thread_preempt_disable--;
/* Now determine if the highest priority thread is at the front
/* Now determine if the highest priority thread is at the front
of the list. */
if (priority_thread_ptr != head_ptr)
{
/* No, we need to move the highest priority suspended thread to the
/* No, we need to move the highest priority suspended thread to the
front of the list. */
/* First, remove the highest priority thread by updating the

Some files were not shown because too many files have changed in this diff Show More