[core] decouple main loop cadence from scheduler wake timing (#15792)

This commit is contained in:
J. Nick Koston
2026-04-21 14:48:21 +02:00
committed by GitHub
parent 1504ac3d19
commit e4f413adad
15 changed files with 644 additions and 62 deletions
@@ -137,11 +137,12 @@ bool RuntimeStatsCollector::compare_total_time(Component *a, Component *b) {
return a->runtime_stats_.total_time_us > b->runtime_stats_.total_time_us;
}
void RuntimeStatsCollector::process_pending_stats(uint32_t current_time) {
if ((int32_t) (current_time - this->next_log_time_) >= 0) {
this->log_stats_();
this->next_log_time_ = current_time + this->log_interval_;
}
// Slow path for process_pending_stats — gate already checked by the inline
// wrapper in runtime_stats.h. Out-of-line keeps the log_stats_ machinery out
// of Application::loop().
void RuntimeStatsCollector::process_pending_stats_slow_(uint32_t current_time) {
this->log_stats_();
this->next_log_time_ = current_time + this->log_interval_;
}
} // namespace runtime_stats
@@ -6,6 +6,7 @@
#include <cstdint>
#include "esphome/core/hal.h"
#include "esphome/core/helpers.h"
#include "esphome/core/log.h"
namespace esphome {
@@ -26,14 +27,24 @@ class RuntimeStatsCollector {
}
uint32_t get_log_interval() const { return this->log_interval_; }
// Process any pending stats printing (should be called after component loop)
void process_pending_stats(uint32_t current_time);
// Process any pending stats printing. Called on every Application::loop()
// tick, so the common "not yet time to log" path must be cheap — inline
// the gate check and keep the actual logging work out-of-line.
void ESPHOME_ALWAYS_INLINE process_pending_stats(uint32_t current_time) {
if ((int32_t) (current_time - this->next_log_time_) >= 0) [[unlikely]] {
this->process_pending_stats_slow_(current_time);
}
}
// Record the wall time of one main loop iteration excluding the yield/sleep.
// Called once per loop from Application::loop().
// active_us = total time between loop start and just before yield.
// before_us = time spent in before_loop_tasks_ (scheduler + ISR enable_loop).
// tail_us = time spent in after_loop_tasks_ + the trailing record/stats prefix.
// before_us = time spent in Phase A (scheduler tick) excluding time
// already attributed to per-component stats.
// tail_us = time spent in after_component_phase_ + the trailing record/stats
// prefix. Only meaningful on component-phase ticks; reported
// as 0 on Phase A-only ticks (no component phase ran, so any
// overhead between Phase A and stats belongs to "residual").
// Residual overhead at log time = active Σ(component) before tail,
// which captures per-iteration inter-component bookkeeping (set_current_component,
// WarnIfComponentBlockingGuard construction/destruction, feed_wdt_with_time calls,
@@ -55,6 +66,7 @@ class RuntimeStatsCollector {
}
protected:
void process_pending_stats_slow_(uint32_t current_time);
void log_stats_();
// Static comparators — member functions have friend access, lambdas do not
static bool compare_period_time(Component *a, Component *b);
+6 -3
View File
@@ -93,8 +93,11 @@ void Application::setup() {
do {
uint32_t now = millis();
// Process pending loop enables to handle GPIO interrupts during setup
this->before_loop_tasks_(now);
// Service scheduler and process pending loop enables to handle GPIO
// interrupts during setup. During setup we always run the component
// phase (no loop_interval_ gate), so call both helpers unconditionally.
this->scheduler_tick_(now);
this->before_component_phase_();
for (uint32_t j = 0; j <= i; j++) {
// Update loop_component_start_time_ right before calling each component
@@ -103,7 +106,7 @@ void Application::setup() {
this->feed_wdt();
}
this->after_loop_tasks_();
this->after_component_phase_();
yield();
} while (!component->can_proceed() && !component->is_failed());
}
+104 -49
View File
@@ -426,8 +426,9 @@ class Application {
void enable_component_loop_(Component *component);
void enable_pending_loops_();
void activate_looping_component_(uint16_t index);
inline uint32_t ESPHOME_ALWAYS_INLINE before_loop_tasks_(uint32_t loop_start_time);
inline void ESPHOME_ALWAYS_INLINE after_loop_tasks_() { this->in_loop_ = false; }
inline uint32_t ESPHOME_ALWAYS_INLINE scheduler_tick_(uint32_t now);
inline void ESPHOME_ALWAYS_INLINE before_component_phase_();
inline void ESPHOME_ALWAYS_INLINE after_component_phase_() { this->in_loop_ = false; }
/// Process dump_config output one component per loop iteration.
/// Extracted from loop() to keep cold startup/reconnect logging out of the hot path.
@@ -582,16 +583,25 @@ inline void Application::drain_wake_notifications_() {
}
#endif // USE_HOST
inline uint32_t ESPHOME_ALWAYS_INLINE Application::before_loop_tasks_(uint32_t loop_start_time) {
// Phase A: drain wake notifications and run the scheduler. Invoked on every
// Application::loop() tick regardless of whether a component phase runs, so
// scheduler items fire at their requested cadence even when the caller has
// raised loop_interval_ for power savings (see Application::loop()).
// Returns the timestamp of the last scheduler item that ran (or `now`
// unchanged if none ran), so the caller's WDT feed stays monotonic with the
// per-item feeds inside scheduler.call() without an extra millis().
inline uint32_t ESPHOME_ALWAYS_INLINE Application::scheduler_tick_(uint32_t now) {
#ifdef USE_HOST
// Drain wake notifications first to clear socket for next wake
this->drain_wake_notifications_();
#endif
return this->scheduler.call(now);
}
// Scheduler::call feeds the WDT per item and returns the timestamp of the
// last fired item, or the input unchanged when nothing ran.
uint32_t last_op_end_time = this->scheduler.call(loop_start_time);
// Phase B entry: only invoked when a component loop phase is about to run.
// Processes pending enable_loop requests from ISRs and marks in_loop_ so
// reentrant modifications during component.loop() are safe.
inline void ESPHOME_ALWAYS_INLINE Application::before_component_phase_() {
// Process any pending enable_loop requests from ISRs
// This must be done before marking in_loop_ = true to avoid race conditions
if (this->has_pending_enable_loop_requests_) {
@@ -608,7 +618,6 @@ inline uint32_t ESPHOME_ALWAYS_INLINE Application::before_loop_tasks_(uint32_t l
// Mark that we're in the loop for safe reentrant modifications
this->in_loop_ = true;
return last_op_end_time;
}
inline void ESPHOME_ALWAYS_INLINE Application::loop() {
@@ -623,46 +632,77 @@ inline void ESPHOME_ALWAYS_INLINE Application::loop() {
// so charging it again to "before" would double-count.
uint64_t loop_recorded_snap = ComponentRuntimeStats::global_recorded_us;
#endif
// Get the initial loop time at the start
uint32_t last_op_end_time = millis();
// Phase A: always service the scheduler. Decouples scheduler cadence from
// loop_interval_ so raised intervals (for power savings) don't drag scheduled
// items forward. A tick that only runs the scheduler is cheap.
// scheduler_tick_ returns the timestamp of the last scheduler item that ran
// (advanced by its per-item feeds) or `now` unchanged. We adopt it as `now`
// so the gate check and WDT feed both reflect actual elapsed time after
// scheduler dispatch, without an extra millis() call.
uint32_t now = this->scheduler_tick_(millis());
// Guarantee one WDT feed per tick even when the scheduler had nothing to
// dispatch and the component phase is gated out — covers configs with no
// looping components and no scheduler work (setup() has its own
// per-component feed_wdt calls, so only do this here, not in scheduler_tick_).
this->feed_wdt_with_time(now);
// Returned timestamp keeps us monotonic with last_wdt_feed_ (advanced by
// the scheduler's per-item feeds) without an extra millis() call.
last_op_end_time = this->before_loop_tasks_(last_op_end_time);
// Guarantee a WDT touch every tick — covers configs with no looping
// components and no scheduler work, where the per-item / per-component
// feeds never fire. Rate-limited inline fast path, ~free when unneeded.
this->feed_wdt_with_time(last_op_end_time);
#ifdef USE_RUNTIME_STATS
uint32_t loop_before_end_us = micros();
uint64_t loop_before_scheduled_us = ComponentRuntimeStats::global_recorded_us - loop_recorded_snap;
// Only meaningful when do_component_phase is true; initialized to 0 so the
// tail bucket receives 0 on Phase A-only ticks (no component tail happened,
// the gate-check / stats-prefix overhead belongs to "residual", not "tail").
uint32_t loop_tail_start_us = 0;
#endif
for (this->current_loop_index_ = 0; this->current_loop_index_ < this->looping_components_active_end_;
this->current_loop_index_++) {
Component *component = this->looping_components_[this->current_loop_index_];
// Gate the component phase on loop_interval_, an active high-frequency
// request, or an explicit wake from a background producer. A scheduler-only
// wake (e.g. set_interval firing under a raised loop_interval_) leaves the
// component phase gated; an external producer that called wake_loop_*
// (MQTT RX, USB RX, BLE event, etc.) needs the component phase to actually
// run so its component's loop() can drain the queued work — that is the
// long-standing semantic of wake_loop_threadsafe(), and the wake_request
// flag preserves it. wake_request_take() exchange-clears the flag; wakes
// that arrive during Phase B re-set it and run Phase B again on the next
// iteration.
const bool high_frequency = HighFrequencyLoopRequester::is_high_frequency();
const uint32_t elapsed = now - this->last_loop_;
const bool woke = esphome::wake_request_take();
const bool do_component_phase = high_frequency || woke || (elapsed >= this->loop_interval_);
// Update the cached time before each component runs
this->loop_component_start_time_ = last_op_end_time;
if (do_component_phase) {
this->before_component_phase_();
{
this->set_current_component(component);
WarnIfComponentBlockingGuard guard{component, last_op_end_time};
component->loop();
// Use the finish method to get the current time as the end time
last_op_end_time = guard.finish();
uint32_t last_op_end_time = now;
for (this->current_loop_index_ = 0; this->current_loop_index_ < this->looping_components_active_end_;
this->current_loop_index_++) {
Component *component = this->looping_components_[this->current_loop_index_];
// Update the cached time before each component runs
this->loop_component_start_time_ = last_op_end_time;
{
this->set_current_component(component);
WarnIfComponentBlockingGuard guard{component, last_op_end_time};
component->loop();
// Use the finish method to get the current time as the end time
last_op_end_time = guard.finish();
}
this->feed_wdt_with_time(last_op_end_time);
}
this->feed_wdt_with_time(last_op_end_time);
#ifdef USE_RUNTIME_STATS
loop_tail_start_us = micros();
#endif
this->last_loop_ = last_op_end_time;
now = last_op_end_time;
this->after_component_phase_();
}
#ifdef USE_RUNTIME_STATS
uint32_t loop_tail_start_us = micros();
#endif
this->after_loop_tasks_();
#ifdef USE_RUNTIME_STATS
// Process any pending runtime stats printing after all components have run
// This ensures stats printing doesn't affect component timing measurements
// Record per-tick timing on every loop, not just component-phase ticks.
// record_loop_active is a small accumulator; process_pending_stats is an
// inline gate check that early-outs unless now >= next_log_time_.
if (global_runtime_stats != nullptr) {
uint32_t loop_now_us = micros();
// Subtract scheduled-component time from the "before" bucket so it is
@@ -671,25 +711,40 @@ inline void ESPHOME_ALWAYS_INLINE Application::loop() {
uint32_t loop_before_overhead_us = loop_before_wall_us > loop_before_scheduled_us
? loop_before_wall_us - static_cast<uint32_t>(loop_before_scheduled_us)
: 0;
global_runtime_stats->record_loop_active(loop_now_us - loop_active_start_us, loop_before_overhead_us,
loop_now_us - loop_tail_start_us);
global_runtime_stats->process_pending_stats(last_op_end_time);
// tail_us is only defined when Phase B ran; 0 on Phase A-only ticks so the
// stats bucket keeps its "component-phase trailing overhead" meaning.
uint32_t loop_tail_us = do_component_phase ? (loop_now_us - loop_tail_start_us) : 0;
global_runtime_stats->record_loop_active(loop_now_us - loop_active_start_us, loop_before_overhead_us, loop_tail_us);
global_runtime_stats->process_pending_stats(now);
}
#endif
// Use the last component's end time instead of calling millis() again
// Compute sleep: bounded by time-until-next-component-phase and the
// scheduler's next deadline. When a scheduler timer fires it re-enters
// loop(), Phase A services it, and the component phase stays gated by
// loop_interval_. When a background producer calls wake_loop_threadsafe()
// it sets the wake_request flag and wakes select() / the task notification;
// the gate above sees the flag and runs Phase B too so the producer's
// component can drain its queued work without waiting up to loop_interval_.
//
// Re-read HighFrequencyLoopRequester::is_high_frequency() here instead of
// reusing the cached `high_frequency` captured above: a component calling
// HighFrequencyLoopRequester::start() from within its loop() would
// otherwise sit under the stale value and sleep for up to loop_interval_
// before the request took effect. That was fine pre-decoupling (the old
// main loop also called the function fresh at the sleep point) but now
// matters much more — loop_interval_ is a power-saving knob documented
// to accept multi-second values, so the stale path could add seconds of
// latency on an HF request. The call is a trivial atomic read.
uint32_t delay_time = 0;
auto elapsed = last_op_end_time - this->last_loop_;
if (elapsed < this->loop_interval_ && !HighFrequencyLoopRequester::is_high_frequency()) {
delay_time = this->loop_interval_ - elapsed;
uint32_t next_schedule = this->scheduler.next_schedule_in(last_op_end_time).value_or(delay_time);
// next_schedule is max 0.5*delay_time
// otherwise interval=0 schedules result in constant looping with almost no sleep
next_schedule = std::max(next_schedule, delay_time / 2);
delay_time = std::min(next_schedule, delay_time);
if (!HighFrequencyLoopRequester::is_high_frequency()) {
const uint32_t elapsed_since_phase = now - this->last_loop_;
const uint32_t until_phase =
(elapsed_since_phase >= this->loop_interval_) ? 0 : (this->loop_interval_ - elapsed_since_phase);
const uint32_t until_sched = this->scheduler.next_schedule_in(now).value_or(until_phase);
delay_time = std::min(until_phase, until_sched);
}
this->yield_with_select_(delay_time);
this->last_loop_ = last_op_end_time;
if (this->dump_config_at_ < this->components_.size()) {
this->process_dump_config_();
+16
View File
@@ -12,9 +12,22 @@
namespace esphome {
// === Wake-requested flag storage ===
#ifdef ESPHOME_THREAD_MULTI_ATOMICS
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
std::atomic<uint8_t> g_wake_requested{0};
#else
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
volatile uint8_t g_wake_requested = 0;
#endif
// === ESP32 / LibreTiny — IRAM_ATTR entry points ===
#if defined(USE_ESP32) || defined(USE_LIBRETINY)
void IRAM_ATTR wake_loop_isrsafe(BaseType_t *px_higher_priority_task_woken) {
// ISR-safe: set flag before notify so the wake is visible on the next gate
// check. wake_request_set() is just an aligned 8-bit store / atomic store
// and is safe from IRAM.
wake_request_set();
esphome_main_task_notify_from_isr(px_higher_priority_task_woken);
}
void IRAM_ATTR wake_loop_any_context() { wake_main_task_any_context(); }
@@ -72,6 +85,9 @@ void wakeable_delay(uint32_t ms) {
// === Host (UDP loopback socket) ===
#ifdef USE_HOST
void wake_loop_threadsafe() {
// Set flag before sending so the consumer's gate check on the next loop()
// entry observes the wake regardless of select() scheduling.
wake_request_set();
if (App.wake_socket_fd_ >= 0) {
const char dummy = 1;
::send(App.wake_socket_fd_, &dummy, 1, 0);
+50 -1
View File
@@ -7,6 +7,10 @@
#include "esphome/core/defines.h"
#include "esphome/core/hal.h"
#ifdef ESPHOME_THREAD_MULTI_ATOMICS
#include <atomic>
#endif
#if defined(USE_ESP32) || defined(USE_LIBRETINY)
#include "esphome/core/main_task.h"
#endif
@@ -25,12 +29,48 @@ namespace esphome {
extern volatile bool g_main_loop_woke;
#endif
// === wake_request flag — signals Application::loop() that a producer queued
// work for some component's loop() to drain (MQTT RX, USB RX, BLE event, etc.)
// and the component phase should run this tick instead of being held off by
// the loop_interval_ gate. Set by every wake_loop_* entry point; consumed
// (via exchange-and-clear) at the gate in Application::loop(). ===
//
// std::atomic<uint8_t> rather than std::atomic<bool> because GCC on Xtensa
// generates an indirect function call for atomic<bool> ops instead of inlining
// them — same workaround applied in scheduler.h for the SchedulerItem::remove
// flag. On non-atomic platforms a volatile uint8_t suffices: 8-bit aligned
// loads/stores are atomic on every supported MCU, and the platform signal
// that follows wake_request_set() (FreeRTOS task-notify, esp_schedule, socket
// send) provides the cross-thread/cross-core memory barrier.
#ifdef ESPHOME_THREAD_MULTI_ATOMICS
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
extern std::atomic<uint8_t> g_wake_requested;
__attribute__((always_inline)) inline void wake_request_set() { g_wake_requested.store(1, std::memory_order_release); }
__attribute__((always_inline)) inline bool wake_request_take() {
return g_wake_requested.exchange(0, std::memory_order_acquire) != 0;
}
#else
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
extern volatile uint8_t g_wake_requested;
__attribute__((always_inline)) inline void wake_request_set() { g_wake_requested = 1; }
__attribute__((always_inline)) inline bool wake_request_take() {
uint8_t v = g_wake_requested;
g_wake_requested = 0;
return v != 0;
}
#endif
// === ESP32 / LibreTiny (FreeRTOS) ===
#if defined(USE_ESP32) || defined(USE_LIBRETINY)
/// Wake the main loop from any context (ISR or task).
/// always_inline so callers placed in IRAM keep the whole wake path in IRAM.
__attribute__((always_inline)) inline void wake_main_task_any_context() {
// Set the wake-requested flag BEFORE the task notification so the consumer
// (Application::loop() gate) is guaranteed to see it on its next gate check.
wake_request_set();
if (in_isr_context()) {
BaseType_t px_higher_priority_task_woken = pdFALSE;
esphome_main_task_notify_from_isr(&px_higher_priority_task_woken);
@@ -50,7 +90,10 @@ __attribute__((always_inline)) inline void wake_main_task_any_context() {
void wake_loop_isrsafe(BaseType_t *px_higher_priority_task_woken);
void wake_loop_any_context();
inline void wake_loop_threadsafe() { esphome_main_task_notify(); }
inline void wake_loop_threadsafe() {
wake_request_set();
esphome_main_task_notify();
}
namespace internal {
inline void wakeable_delay(uint32_t ms) {
@@ -67,6 +110,9 @@ inline void wakeable_delay(uint32_t ms) {
/// Inline implementation — IRAM callers inline this directly.
inline void ESPHOME_ALWAYS_INLINE wake_loop_impl() {
// Set the wake-requested flag BEFORE esp_schedule so the consumer is
// guaranteed to see it on its next gate check.
wake_request_set();
g_main_loop_woke = true;
esp_schedule();
}
@@ -98,6 +144,9 @@ inline void wakeable_delay(uint32_t ms) {
#elif defined(USE_RP2040)
inline void wake_loop_any_context() {
// Set the wake-requested flag BEFORE the SEV so the consumer is guaranteed
// to see it on its next gate check.
wake_request_set();
g_main_loop_woke = true;
__sev();
}
@@ -0,0 +1,19 @@
import esphome.codegen as cg
import esphome.config_validation as cv
from esphome.const import CONF_ID
CODEOWNERS = ["@esphome/tests"]
wake_test_component_ns = cg.esphome_ns.namespace("wake_test_component")
WakeTestComponent = wake_test_component_ns.class_("WakeTestComponent", cg.Component)
CONFIG_SCHEMA = cv.Schema(
{
cv.GenerateID(): cv.declare_id(WakeTestComponent),
}
).extend(cv.COMPONENT_SCHEMA)
async def to_code(config):
var = cg.new_Pvariable(config[CONF_ID])
await cg.register_component(var, config)
@@ -0,0 +1,19 @@
#include "wake_test_component.h"
#include "esphome/core/application.h"
#include "esphome/core/log.h"
#include <chrono>
#include <thread>
namespace esphome::wake_test_component {
static const char *const TAG = "wake_test_component";
void WakeTestComponent::start_async_wake() {
ESP_LOGI(TAG, "Spawning async wake thread (50ms delay)");
std::thread([] {
std::this_thread::sleep_for(std::chrono::milliseconds(50));
App.wake_loop_threadsafe();
}).detach();
}
} // namespace esphome::wake_test_component
@@ -0,0 +1,27 @@
#pragma once
#include "esphome/core/component.h"
#include <atomic>
namespace esphome::wake_test_component {
class WakeTestComponent : public Component {
public:
void setup() override {}
void loop() override { this->loop_count_.fetch_add(1, std::memory_order_relaxed); }
int get_loop_count() const { return this->loop_count_.load(std::memory_order_relaxed); }
// Spawn a detached thread that sleeps briefly then calls
// App.wake_loop_threadsafe(). Used by the integration test to verify a
// cross-thread wake forces a component-phase iteration even when
// loop_interval_ has been raised high enough to gate it off otherwise.
void start_async_wake();
float get_setup_priority() const override { return setup_priority::DATA; }
protected:
std::atomic<int> loop_count_{0};
};
} // namespace esphome::wake_test_component
@@ -0,0 +1,60 @@
esphome:
name: loop-interval-decouple
on_boot:
priority: -100
then:
- lambda: |-
// Raise loop_interval_ to 500ms. With the decoupling fix the
// component phase should run ~twice per second while the 50ms
// scheduler interval below still fires at its requested cadence.
App.set_loop_interval(500);
# Start measurement after 1s so boot transients settle.
- delay: 1000ms
- lambda: |-
id(loop_at_start) = id(loop_counter)->get_loop_count();
id(sched_at_start) = id(sched_count);
ESP_LOGI("test", "MEASUREMENT_STARTED loop=%d sched=%d",
id(loop_at_start), id(sched_at_start));
# Observe for 2s.
- delay: 2000ms
- lambda: |-
int loop_delta = id(loop_counter)->get_loop_count() - id(loop_at_start);
int sched_delta = id(sched_count) - id(sched_at_start);
ESP_LOGI("test", "MEASUREMENT_DONE loop_delta=%d sched_delta=%d",
loop_delta, sched_delta);
host:
api:
logger:
level: INFO
logs:
loop_test_component: WARN # Silence per-loop log spam
external_components:
- source:
type: local
path: EXTERNAL_COMPONENT_PATH
globals:
- id: sched_count
type: int
initial_value: "0"
- id: loop_at_start
type: int
initial_value: "0"
- id: sched_at_start
type: int
initial_value: "0"
loop_test_component:
components:
- id: loop_counter
name: loop_counter
interval:
# Fast scheduler interval — with the decoupling fix this should fire at
# its requested 50ms cadence regardless of loop_interval_.
- interval: 50ms
then:
- lambda: |-
id(sched_count) += 1;
@@ -0,0 +1,51 @@
esphome:
name: loop-default-not-pulled
on_boot:
priority: -100
then:
# Leave loop_interval_ at its default (16 ms → ~62 Hz). Do NOT call
# set_loop_interval here. The fast scheduler interval below used to
# pull the component phase forward to ~128 Hz via the old
# std::max(next_schedule, delay_time / 2) floor.
# Start measurement after 1s so boot transients settle.
- delay: 1000ms
- lambda: |-
id(loop_at_start) = id(loop_counter)->get_loop_count();
ESP_LOGI("test", "MEASUREMENT_STARTED loop=%d", id(loop_at_start));
# Observe for 2s.
- delay: 2000ms
- lambda: |-
int loop_delta = id(loop_counter)->get_loop_count() - id(loop_at_start);
ESP_LOGI("test", "MEASUREMENT_DONE loop_delta=%d", loop_delta);
host:
api:
logger:
level: INFO
logs:
loop_test_component: WARN # Silence per-loop log spam
external_components:
- source:
type: local
path: EXTERNAL_COMPONENT_PATH
globals:
- id: loop_at_start
type: int
initial_value: "0"
loop_test_component:
components:
- id: loop_counter
name: loop_counter
interval:
# Fast scheduler interval (well under loop_interval_/2 = 8ms). In the
# pre-decoupling code this would have pulled the component phase forward
# to ~128 Hz. After the decoupling fix the component phase stays at
# ~62 Hz regardless.
- interval: 5ms
then:
- lambda: |-
// No-op; the presence of a due scheduler item is what matters.
@@ -0,0 +1,52 @@
esphome:
name: wake-loop-phase-b
on_boot:
priority: -100
then:
- lambda: |-
// Raise loop_interval_ to 2000ms. Without the wake-request flag,
// a wake_loop_threadsafe() call would only run Phase A (scheduler)
// and leave the component phase gated for ~2s.
App.set_loop_interval(2000);
# Let boot transients settle.
- delay: 1000ms
- lambda: |-
// Snapshot the loop counter, then ask the component to spawn a
// background thread that calls App.wake_loop_threadsafe() after
// ~50ms. With the fix, that wake forces Phase B on the next tick
// and the counter increments well within the 500ms observation
// window below.
id(count_at_start) = id(wake_counter)->get_loop_count();
id(start_time) = millis();
id(wake_counter)->start_async_wake();
ESP_LOGI("test", "WAKE_STARTED count=%d", id(count_at_start));
# Observation window must be much shorter than loop_interval_ (2000ms)
# so a "false pass" isn't possible by simply waiting out the gate.
- delay: 500ms
- lambda: |-
int count_now = id(wake_counter)->get_loop_count();
int delta = count_now - id(count_at_start);
uint32_t elapsed = millis() - id(start_time);
ESP_LOGI("test", "WAKE_RESULT delta=%d elapsed=%u", delta, elapsed);
host:
api:
logger:
level: INFO
external_components:
- source:
type: local
path: EXTERNAL_COMPONENT_PATH
components: [wake_test_component]
globals:
- id: count_at_start
type: int
initial_value: "0"
- id: start_time
type: uint32_t
initial_value: "0"
wake_test_component:
id: wake_counter
@@ -0,0 +1,75 @@
"""Test that loop_interval_ no longer clamps scheduler cadence.
Regression test for the decoupling of Application::loop() component-phase
cadence from scheduler wake timing.
Setup:
- App.set_loop_interval(500) — raised for power-savings style cadence
- Scheduler interval at 50ms — should fire at 50ms regardless of loop_interval_
- Component loop (LoopTestComponent) — should run at 500ms cadence
Before the decoupling fix the old `std::max(next_schedule, delay_time / 2)`
floor clamped the sleep to ~250ms, so the 50ms scheduler only fired ~8 times
per 2s (vs the ~40 expected). After the fix the scheduler fires close to its
requested cadence while the component phase stays gated at loop_interval_.
"""
from __future__ import annotations
import asyncio
import re
import pytest
from .types import APIClientConnectedFactory, RunCompiledFunction
@pytest.mark.asyncio
async def test_loop_interval_decoupling(
yaml_config: str,
run_compiled: RunCompiledFunction,
api_client_connected: APIClientConnectedFactory,
) -> None:
"""Raised loop_interval_ must not clamp scheduler item cadence."""
loop = asyncio.get_running_loop()
measurement_done: asyncio.Future[tuple[int, int]] = loop.create_future()
def on_log_line(line: str) -> None:
match = re.search(r"MEASUREMENT_DONE loop_delta=(\d+) sched_delta=(\d+)", line)
if match and not measurement_done.done():
measurement_done.set_result((int(match.group(1)), int(match.group(2))))
async with (
run_compiled(yaml_config, line_callback=on_log_line),
api_client_connected() as client,
):
device_info = await client.device_info()
assert device_info is not None
assert device_info.name == "loop-interval-decouple"
try:
loop_delta, sched_delta = await asyncio.wait_for(
measurement_done, timeout=10.0
)
except TimeoutError:
pytest.fail("MEASUREMENT_DONE marker never appeared")
# Observation window = 2s, loop_interval_ = 500ms.
# Component phase should fire ~4 times in 2s. The upper bound must be
# less than 8: the pre-decoupling behavior clamped to ~250ms cadence
# giving ~8 loops/2s, so allowing 8 would let the old behavior pass.
# Lower bound 3 (not 2) keeps the test honest: a >30% slowdown from
# the ~4 nominal is not normal CI jitter and should fail.
assert 3 <= loop_delta <= 6, (
f"Component loop should fire ~4 times in 2s at loop_interval=500ms, "
f"got {loop_delta}"
)
# Scheduler interval = 50ms → ~40 fires in 2s. Before the decoupling
# fix this clamped to ~8 fires. Assert >= 20 to catch the old clamped
# behavior with comfortable jitter headroom for slow CI hosts.
assert sched_delta >= 20, (
f"50ms scheduler interval should fire ~40 times in 2s but only "
f"fired {sched_delta}. This indicates loop_interval_ is still "
f"clamping scheduler cadence."
)
@@ -0,0 +1,67 @@
"""Test that a fast scheduler item does not pull the component phase forward.
Regression test for the original ~128 Hz → ~62 Hz bug fixed by decoupling
Application::loop() component-phase cadence from scheduler wake timing.
Setup:
- loop_interval_ left at its default (16 ms → ~62 Hz component phase).
- Scheduler interval at 5 ms (well under the old loop_interval_/2 = 8 ms floor).
Before the decoupling fix the ``std::max(next_schedule, delay_time / 2)`` floor
clamped the sleep to ~8 ms whenever any scheduler item was due sooner than
loop_interval_/2. That pulled the component phase forward to ~128 Hz — twice
what the documented ~62 Hz default promised. After the fix the component
phase stays at ~62 Hz regardless of scheduler activity.
"""
from __future__ import annotations
import asyncio
import re
import pytest
from .types import APIClientConnectedFactory, RunCompiledFunction
@pytest.mark.asyncio
async def test_loop_interval_default_not_pulled_forward(
yaml_config: str,
run_compiled: RunCompiledFunction,
api_client_connected: APIClientConnectedFactory,
) -> None:
"""Fast scheduler item must not pull component phase past default ~62 Hz."""
loop = asyncio.get_running_loop()
measurement_done: asyncio.Future[int] = loop.create_future()
def on_log_line(line: str) -> None:
match = re.search(r"MEASUREMENT_DONE loop_delta=(\d+)", line)
if match and not measurement_done.done():
measurement_done.set_result(int(match.group(1)))
async with (
run_compiled(yaml_config, line_callback=on_log_line),
api_client_connected() as client,
):
device_info = await client.device_info()
assert device_info is not None
assert device_info.name == "loop-default-not-pulled"
try:
loop_delta = await asyncio.wait_for(measurement_done, timeout=10.0)
except TimeoutError:
pytest.fail("MEASUREMENT_DONE marker never appeared")
# Observation window = 2s, loop_interval_ default = 16ms → ~62 Hz →
# ~125 component-phase iterations expected.
# Pre-fix behavior: the 5 ms scheduler interval tripped the old
# delay_time/2 = 8 ms floor, pulling the phase to ~128 Hz → ~256.
# Upper bound 180 is comfortably below the ~256 pre-fix rate but
# above the ~125 nominal with CI jitter.
# Lower bound 80 covers very slow CI hosts without permitting a
# complete regression.
assert 80 <= loop_delta <= 180, (
f"Component loop at default loop_interval_ should fire ~125 times "
f"in 2s (≈62 Hz × 2s); got {loop_delta}. Values >200 indicate the "
f"scheduler is again pulling the component phase forward."
)
@@ -0,0 +1,76 @@
"""Test that wake_loop_threadsafe() forces a component-phase iteration.
Regression test for the wake-request flag added to Application::loop()'s
Phase A / Phase B gate. Background producers (MQTT RX, USB RX, BLE event,
etc.) call App.wake_loop_threadsafe() expecting their component's loop()
to drain queued work; if the component phase stays gated by loop_interval_,
the work waits up to loop_interval_ ms instead of running on the next tick.
Setup:
- App.set_loop_interval(2000) — a wide gate that would clearly mask the bug.
- A test component spawns a detached std::thread that sleeps 50 ms and then
calls App.wake_loop_threadsafe() from a non-main thread.
- The on_boot block snapshots the component's loop counter before/after a
500 ms observation window.
Without the fix, delta=0 (the gate holds Phase B for ~2 s).
With the fix, delta>=1 (the wake forces Phase B within one tick of the wake).
"""
from __future__ import annotations
import asyncio
from pathlib import Path
import re
import pytest
from .types import APIClientConnectedFactory, RunCompiledFunction
@pytest.mark.asyncio
async def test_wake_loop_forces_phase_b(
yaml_config: str,
run_compiled: RunCompiledFunction,
api_client_connected: APIClientConnectedFactory,
) -> None:
"""A wake_loop_threadsafe() call from a background thread must trigger the
component phase within the next tick, even when loop_interval_ is raised
well above the observation window."""
external_components_path = str(
Path(__file__).parent / "fixtures" / "external_components"
)
yaml_config = yaml_config.replace(
"EXTERNAL_COMPONENT_PATH", external_components_path
)
loop = asyncio.get_running_loop()
result: asyncio.Future[tuple[int, int]] = loop.create_future()
def on_log_line(line: str) -> None:
match = re.search(r"WAKE_RESULT delta=(\d+) elapsed=(\d+)", line)
if match and not result.done():
result.set_result((int(match.group(1)), int(match.group(2))))
async with (
run_compiled(yaml_config, line_callback=on_log_line),
api_client_connected() as client,
):
device_info = await client.device_info()
assert device_info is not None
assert device_info.name == "wake-loop-phase-b"
try:
delta, elapsed = await asyncio.wait_for(result, timeout=15.0)
except TimeoutError:
pytest.fail("WAKE_RESULT marker never appeared")
# Without the fix, delta would be 0 — loop_interval_=2000ms held
# Phase B off for the full 500ms observation window. With the fix
# the wake from the background thread (~50ms after start) forces
# Phase B on the next tick, so the counter increments at least once.
assert delta >= 1, (
f"wake_loop_threadsafe() from a background thread should force "
f"Phase B within the next tick; observed delta={delta} after "
f"{elapsed}ms with loop_interval_=2000ms"
)