[ci] Add automated memory impact analysis for pull requests (#11242)

Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com>
Co-authored-by: pre-commit-ci-lite[bot] <117423508+pre-commit-ci-lite[bot]@users.noreply.github.com>
This commit is contained in:
J. Nick Koston
2025-10-19 08:43:38 -10:00
committed by GitHub
parent f25af18655
commit a0922bc8b0
24 changed files with 3772 additions and 49 deletions

View File

@@ -1,4 +1,5 @@
[run]
omit =
esphome/components/*
esphome/analyze_memory/*
tests/integration/*

View File

@@ -175,6 +175,7 @@ jobs:
changed-components-with-tests: ${{ steps.determine.outputs.changed-components-with-tests }}
directly-changed-components-with-tests: ${{ steps.determine.outputs.directly-changed-components-with-tests }}
component-test-count: ${{ steps.determine.outputs.component-test-count }}
memory_impact: ${{ steps.determine.outputs.memory-impact }}
steps:
- name: Check out code from GitHub
uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
@@ -204,6 +205,7 @@ jobs:
echo "changed-components-with-tests=$(echo "$output" | jq -c '.changed_components_with_tests')" >> $GITHUB_OUTPUT
echo "directly-changed-components-with-tests=$(echo "$output" | jq -c '.directly_changed_components_with_tests')" >> $GITHUB_OUTPUT
echo "component-test-count=$(echo "$output" | jq -r '.component_test_count')" >> $GITHUB_OUTPUT
echo "memory-impact=$(echo "$output" | jq -c '.memory_impact')" >> $GITHUB_OUTPUT
integration-tests:
name: Run integration tests
@@ -521,6 +523,292 @@ jobs:
- uses: pre-commit-ci/lite-action@5d6cc0eb514c891a40562a58a8e71576c5c7fb43 # v1.1.0
if: always()
memory-impact-target-branch:
name: Build target branch for memory impact
runs-on: ubuntu-24.04
needs:
- common
- determine-jobs
if: github.event_name == 'pull_request' && fromJSON(needs.determine-jobs.outputs.memory_impact).should_run == 'true'
outputs:
ram_usage: ${{ steps.extract.outputs.ram_usage }}
flash_usage: ${{ steps.extract.outputs.flash_usage }}
cache_hit: ${{ steps.cache-memory-analysis.outputs.cache-hit }}
skip: ${{ steps.check-script.outputs.skip }}
steps:
- name: Check out target branch
uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
with:
ref: ${{ github.base_ref }}
# Check if memory impact extraction script exists on target branch
# If not, skip the analysis (this handles older branches that don't have the feature)
- name: Check for memory impact script
id: check-script
run: |
if [ -f "script/ci_memory_impact_extract.py" ]; then
echo "skip=false" >> $GITHUB_OUTPUT
else
echo "skip=true" >> $GITHUB_OUTPUT
echo "::warning::ci_memory_impact_extract.py not found on target branch, skipping memory impact analysis"
fi
# All remaining steps only run if script exists
- name: Generate cache key
id: cache-key
if: steps.check-script.outputs.skip != 'true'
run: |
# Get the commit SHA of the target branch
target_sha=$(git rev-parse HEAD)
# Hash the build infrastructure files (all files that affect build/analysis)
infra_hash=$(cat \
script/test_build_components.py \
script/ci_memory_impact_extract.py \
script/analyze_component_buses.py \
script/merge_component_configs.py \
script/ci_helpers.py \
.github/workflows/ci.yml \
| sha256sum | cut -d' ' -f1)
# Get platform and components from job inputs
platform="${{ fromJSON(needs.determine-jobs.outputs.memory_impact).platform }}"
components='${{ toJSON(fromJSON(needs.determine-jobs.outputs.memory_impact).components) }}'
components_hash=$(echo "$components" | sha256sum | cut -d' ' -f1)
# Combine into cache key
cache_key="memory-analysis-target-${target_sha}-${infra_hash}-${platform}-${components_hash}"
echo "cache-key=${cache_key}" >> $GITHUB_OUTPUT
echo "Cache key: ${cache_key}"
- name: Restore cached memory analysis
id: cache-memory-analysis
if: steps.check-script.outputs.skip != 'true'
uses: actions/cache/restore@0057852bfaa89a56745cba8c7296529d2fc39830 # v4.3.0
with:
path: memory-analysis-target.json
key: ${{ steps.cache-key.outputs.cache-key }}
- name: Cache status
if: steps.check-script.outputs.skip != 'true'
run: |
if [ "${{ steps.cache-memory-analysis.outputs.cache-hit }}" == "true" ]; then
echo "✓ Cache hit! Using cached memory analysis results."
echo " Skipping build step to save time."
else
echo "✗ Cache miss. Will build and analyze memory usage."
fi
- name: Restore Python
if: steps.check-script.outputs.skip != 'true' && steps.cache-memory-analysis.outputs.cache-hit != 'true'
uses: ./.github/actions/restore-python
with:
python-version: ${{ env.DEFAULT_PYTHON }}
cache-key: ${{ needs.common.outputs.cache-key }}
- name: Cache platformio
if: steps.check-script.outputs.skip != 'true' && steps.cache-memory-analysis.outputs.cache-hit != 'true'
uses: actions/cache/restore@0057852bfaa89a56745cba8c7296529d2fc39830 # v4.3.0
with:
path: ~/.platformio
key: platformio-memory-${{ fromJSON(needs.determine-jobs.outputs.memory_impact).platform }}-${{ hashFiles('platformio.ini') }}
- name: Build, compile, and analyze memory
if: steps.check-script.outputs.skip != 'true' && steps.cache-memory-analysis.outputs.cache-hit != 'true'
id: build
run: |
. venv/bin/activate
components='${{ toJSON(fromJSON(needs.determine-jobs.outputs.memory_impact).components) }}'
platform="${{ fromJSON(needs.determine-jobs.outputs.memory_impact).platform }}"
echo "Building with test_build_components.py for $platform with components:"
echo "$components" | jq -r '.[]' | sed 's/^/ - /'
# Use test_build_components.py which handles grouping automatically
# Pass components as comma-separated list
component_list=$(echo "$components" | jq -r 'join(",")')
echo "Compiling with test_build_components.py..."
# Run build and extract memory with auto-detection of build directory for detailed analysis
# Use tee to show output in CI while also piping to extraction script
python script/test_build_components.py \
-e compile \
-c "$component_list" \
-t "$platform" 2>&1 | \
tee /dev/stderr | \
python script/ci_memory_impact_extract.py \
--output-env \
--output-json memory-analysis-target.json
- name: Save memory analysis to cache
if: steps.check-script.outputs.skip != 'true' && steps.cache-memory-analysis.outputs.cache-hit != 'true' && steps.build.outcome == 'success'
uses: actions/cache/save@0057852bfaa89a56745cba8c7296529d2fc39830 # v4.3.0
with:
path: memory-analysis-target.json
key: ${{ steps.cache-key.outputs.cache-key }}
- name: Extract memory usage for outputs
id: extract
if: steps.check-script.outputs.skip != 'true'
run: |
if [ -f memory-analysis-target.json ]; then
ram=$(jq -r '.ram_bytes' memory-analysis-target.json)
flash=$(jq -r '.flash_bytes' memory-analysis-target.json)
echo "ram_usage=${ram}" >> $GITHUB_OUTPUT
echo "flash_usage=${flash}" >> $GITHUB_OUTPUT
echo "RAM: ${ram} bytes, Flash: ${flash} bytes"
else
echo "Error: memory-analysis-target.json not found"
exit 1
fi
- name: Upload memory analysis JSON
uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2
with:
name: memory-analysis-target
path: memory-analysis-target.json
if-no-files-found: warn
retention-days: 1
memory-impact-pr-branch:
name: Build PR branch for memory impact
runs-on: ubuntu-24.04
needs:
- common
- determine-jobs
if: github.event_name == 'pull_request' && fromJSON(needs.determine-jobs.outputs.memory_impact).should_run == 'true'
outputs:
ram_usage: ${{ steps.extract.outputs.ram_usage }}
flash_usage: ${{ steps.extract.outputs.flash_usage }}
steps:
- name: Check out PR branch
uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
- name: Restore Python
uses: ./.github/actions/restore-python
with:
python-version: ${{ env.DEFAULT_PYTHON }}
cache-key: ${{ needs.common.outputs.cache-key }}
- name: Cache platformio
uses: actions/cache/restore@0057852bfaa89a56745cba8c7296529d2fc39830 # v4.3.0
with:
path: ~/.platformio
key: platformio-memory-${{ fromJSON(needs.determine-jobs.outputs.memory_impact).platform }}-${{ hashFiles('platformio.ini') }}
- name: Build, compile, and analyze memory
id: extract
run: |
. venv/bin/activate
components='${{ toJSON(fromJSON(needs.determine-jobs.outputs.memory_impact).components) }}'
platform="${{ fromJSON(needs.determine-jobs.outputs.memory_impact).platform }}"
echo "Building with test_build_components.py for $platform with components:"
echo "$components" | jq -r '.[]' | sed 's/^/ - /'
# Use test_build_components.py which handles grouping automatically
# Pass components as comma-separated list
component_list=$(echo "$components" | jq -r 'join(",")')
echo "Compiling with test_build_components.py..."
# Run build and extract memory with auto-detection of build directory for detailed analysis
# Use tee to show output in CI while also piping to extraction script
python script/test_build_components.py \
-e compile \
-c "$component_list" \
-t "$platform" 2>&1 | \
tee /dev/stderr | \
python script/ci_memory_impact_extract.py \
--output-env \
--output-json memory-analysis-pr.json
- name: Upload memory analysis JSON
uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2
with:
name: memory-analysis-pr
path: memory-analysis-pr.json
if-no-files-found: warn
retention-days: 1
memory-impact-comment:
name: Comment memory impact
runs-on: ubuntu-24.04
needs:
- common
- determine-jobs
- memory-impact-target-branch
- memory-impact-pr-branch
if: github.event_name == 'pull_request' && fromJSON(needs.determine-jobs.outputs.memory_impact).should_run == 'true' && needs.memory-impact-target-branch.outputs.skip != 'true'
permissions:
contents: read
pull-requests: write
steps:
- name: Check out code
uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
- name: Restore Python
uses: ./.github/actions/restore-python
with:
python-version: ${{ env.DEFAULT_PYTHON }}
cache-key: ${{ needs.common.outputs.cache-key }}
- name: Download target analysis JSON
uses: actions/download-artifact@d3f86a106a0bac45b974a628896c90dbdf5c8093 # v4.3.0
with:
name: memory-analysis-target
path: ./memory-analysis
continue-on-error: true
- name: Download PR analysis JSON
uses: actions/download-artifact@d3f86a106a0bac45b974a628896c90dbdf5c8093 # v4.3.0
with:
name: memory-analysis-pr
path: ./memory-analysis
continue-on-error: true
- name: Post or update PR comment
env:
GH_TOKEN: ${{ github.token }}
COMPONENTS: ${{ toJSON(fromJSON(needs.determine-jobs.outputs.memory_impact).components) }}
PLATFORM: ${{ fromJSON(needs.determine-jobs.outputs.memory_impact).platform }}
TARGET_RAM: ${{ needs.memory-impact-target-branch.outputs.ram_usage }}
TARGET_FLASH: ${{ needs.memory-impact-target-branch.outputs.flash_usage }}
PR_RAM: ${{ needs.memory-impact-pr-branch.outputs.ram_usage }}
PR_FLASH: ${{ needs.memory-impact-pr-branch.outputs.flash_usage }}
TARGET_CACHE_HIT: ${{ needs.memory-impact-target-branch.outputs.cache_hit }}
run: |
. venv/bin/activate
# Check if analysis JSON files exist
target_json_arg=""
pr_json_arg=""
if [ -f ./memory-analysis/memory-analysis-target.json ]; then
echo "Found target analysis JSON"
target_json_arg="--target-json ./memory-analysis/memory-analysis-target.json"
else
echo "No target analysis JSON found"
fi
if [ -f ./memory-analysis/memory-analysis-pr.json ]; then
echo "Found PR analysis JSON"
pr_json_arg="--pr-json ./memory-analysis/memory-analysis-pr.json"
else
echo "No PR analysis JSON found"
fi
# Add cache flag if target was cached
cache_flag=""
if [ "$TARGET_CACHE_HIT" == "true" ]; then
cache_flag="--target-cache-hit"
fi
python script/ci_memory_impact_comment.py \
--pr-number "${{ github.event.pull_request.number }}" \
--components "$COMPONENTS" \
--platform "$PLATFORM" \
--target-ram "$TARGET_RAM" \
--target-flash "$TARGET_FLASH" \
--pr-ram "$PR_RAM" \
--pr-flash "$PR_FLASH" \
$target_json_arg \
$pr_json_arg \
$cache_flag
ci-status:
name: CI Status
runs-on: ubuntu-24.04
@@ -535,6 +823,9 @@ jobs:
- test-build-components-splitter
- test-build-components-split
- pre-commit-ci-lite
- memory-impact-target-branch
- memory-impact-pr-branch
- memory-impact-comment
if: always()
steps:
- name: Success

View File

@@ -466,7 +466,9 @@ def write_cpp_file() -> int:
def compile_program(args: ArgsProtocol, config: ConfigType) -> int:
from esphome import platformio_api
_LOGGER.info("Compiling app...")
# NOTE: "Build path:" format is parsed by script/ci_memory_impact_extract.py
# If you change this format, update the regex in that script as well
_LOGGER.info("Compiling app... Build path: %s", CORE.build_path)
rc = platformio_api.run_compile(config, CORE.verbose)
if rc != 0:
return rc

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,6 @@
"""Main entry point for running the memory analyzer as a module."""
from .cli import main
if __name__ == "__main__":
main()

View File

@@ -0,0 +1,408 @@
"""CLI interface for memory analysis with report generation."""
from collections import defaultdict
import sys
from . import (
_COMPONENT_API,
_COMPONENT_CORE,
_COMPONENT_PREFIX_ESPHOME,
_COMPONENT_PREFIX_EXTERNAL,
MemoryAnalyzer,
)
class MemoryAnalyzerCLI(MemoryAnalyzer):
"""Memory analyzer with CLI-specific report generation."""
# Column width constants
COL_COMPONENT: int = 29
COL_FLASH_TEXT: int = 14
COL_FLASH_DATA: int = 14
COL_RAM_DATA: int = 12
COL_RAM_BSS: int = 12
COL_TOTAL_FLASH: int = 15
COL_TOTAL_RAM: int = 12
COL_SEPARATOR: int = 3 # " | "
# Core analysis column widths
COL_CORE_SUBCATEGORY: int = 30
COL_CORE_SIZE: int = 12
COL_CORE_COUNT: int = 6
COL_CORE_PERCENT: int = 10
# Calculate table width once at class level
TABLE_WIDTH: int = (
COL_COMPONENT
+ COL_SEPARATOR
+ COL_FLASH_TEXT
+ COL_SEPARATOR
+ COL_FLASH_DATA
+ COL_SEPARATOR
+ COL_RAM_DATA
+ COL_SEPARATOR
+ COL_RAM_BSS
+ COL_SEPARATOR
+ COL_TOTAL_FLASH
+ COL_SEPARATOR
+ COL_TOTAL_RAM
)
@staticmethod
def _make_separator_line(*widths: int) -> str:
"""Create a separator line with given column widths.
Args:
widths: Column widths to create separators for
Returns:
Separator line like "----+---------+-----"
"""
return "-+-".join("-" * width for width in widths)
# Pre-computed separator lines
MAIN_TABLE_SEPARATOR: str = _make_separator_line(
COL_COMPONENT,
COL_FLASH_TEXT,
COL_FLASH_DATA,
COL_RAM_DATA,
COL_RAM_BSS,
COL_TOTAL_FLASH,
COL_TOTAL_RAM,
)
CORE_TABLE_SEPARATOR: str = _make_separator_line(
COL_CORE_SUBCATEGORY,
COL_CORE_SIZE,
COL_CORE_COUNT,
COL_CORE_PERCENT,
)
def generate_report(self, detailed: bool = False) -> str:
"""Generate a formatted memory report."""
components = sorted(
self.components.items(), key=lambda x: x[1].flash_total, reverse=True
)
# Calculate totals
total_flash = sum(c.flash_total for _, c in components)
total_ram = sum(c.ram_total for _, c in components)
# Build report
lines: list[str] = []
lines.append("=" * self.TABLE_WIDTH)
lines.append("Component Memory Analysis".center(self.TABLE_WIDTH))
lines.append("=" * self.TABLE_WIDTH)
lines.append("")
# Main table - fixed column widths
lines.append(
f"{'Component':<{self.COL_COMPONENT}} | {'Flash (text)':>{self.COL_FLASH_TEXT}} | {'Flash (data)':>{self.COL_FLASH_DATA}} | {'RAM (data)':>{self.COL_RAM_DATA}} | {'RAM (bss)':>{self.COL_RAM_BSS}} | {'Total Flash':>{self.COL_TOTAL_FLASH}} | {'Total RAM':>{self.COL_TOTAL_RAM}}"
)
lines.append(self.MAIN_TABLE_SEPARATOR)
for name, mem in components:
if mem.flash_total > 0 or mem.ram_total > 0:
flash_rodata = mem.rodata_size + mem.data_size
lines.append(
f"{name:<{self.COL_COMPONENT}} | {mem.text_size:>{self.COL_FLASH_TEXT - 2},} B | {flash_rodata:>{self.COL_FLASH_DATA - 2},} B | "
f"{mem.data_size:>{self.COL_RAM_DATA - 2},} B | {mem.bss_size:>{self.COL_RAM_BSS - 2},} B | "
f"{mem.flash_total:>{self.COL_TOTAL_FLASH - 2},} B | {mem.ram_total:>{self.COL_TOTAL_RAM - 2},} B"
)
lines.append(self.MAIN_TABLE_SEPARATOR)
lines.append(
f"{'TOTAL':<{self.COL_COMPONENT}} | {' ':>{self.COL_FLASH_TEXT}} | {' ':>{self.COL_FLASH_DATA}} | "
f"{' ':>{self.COL_RAM_DATA}} | {' ':>{self.COL_RAM_BSS}} | "
f"{total_flash:>{self.COL_TOTAL_FLASH - 2},} B | {total_ram:>{self.COL_TOTAL_RAM - 2},} B"
)
# Top consumers
lines.append("")
lines.append("Top Flash Consumers:")
for i, (name, mem) in enumerate(components[:25]):
if mem.flash_total > 0:
percentage = (
(mem.flash_total / total_flash * 100) if total_flash > 0 else 0
)
lines.append(
f"{i + 1}. {name} ({mem.flash_total:,} B) - {percentage:.1f}% of analyzed flash"
)
lines.append("")
lines.append("Top RAM Consumers:")
ram_components = sorted(components, key=lambda x: x[1].ram_total, reverse=True)
for i, (name, mem) in enumerate(ram_components[:25]):
if mem.ram_total > 0:
percentage = (mem.ram_total / total_ram * 100) if total_ram > 0 else 0
lines.append(
f"{i + 1}. {name} ({mem.ram_total:,} B) - {percentage:.1f}% of analyzed RAM"
)
lines.append("")
lines.append(
"Note: This analysis covers symbols in the ELF file. Some runtime allocations may not be included."
)
lines.append("=" * self.TABLE_WIDTH)
# Add ESPHome core detailed analysis if there are core symbols
if self._esphome_core_symbols:
lines.append("")
lines.append("=" * self.TABLE_WIDTH)
lines.append(
f"{_COMPONENT_CORE} Detailed Analysis".center(self.TABLE_WIDTH)
)
lines.append("=" * self.TABLE_WIDTH)
lines.append("")
# Group core symbols by subcategory
core_subcategories: dict[str, list[tuple[str, str, int]]] = defaultdict(
list
)
for symbol, demangled, size in self._esphome_core_symbols:
# Categorize based on demangled name patterns
subcategory = self._categorize_esphome_core_symbol(demangled)
core_subcategories[subcategory].append((symbol, demangled, size))
# Sort subcategories by total size
sorted_subcategories = sorted(
[
(name, symbols, sum(s[2] for s in symbols))
for name, symbols in core_subcategories.items()
],
key=lambda x: x[2],
reverse=True,
)
lines.append(
f"{'Subcategory':<{self.COL_CORE_SUBCATEGORY}} | {'Size':>{self.COL_CORE_SIZE}} | "
f"{'Count':>{self.COL_CORE_COUNT}} | {'% of Core':>{self.COL_CORE_PERCENT}}"
)
lines.append(self.CORE_TABLE_SEPARATOR)
core_total = sum(size for _, _, size in self._esphome_core_symbols)
for subcategory, symbols, total_size in sorted_subcategories:
percentage = (total_size / core_total * 100) if core_total > 0 else 0
lines.append(
f"{subcategory:<{self.COL_CORE_SUBCATEGORY}} | {total_size:>{self.COL_CORE_SIZE - 2},} B | "
f"{len(symbols):>{self.COL_CORE_COUNT}} | {percentage:>{self.COL_CORE_PERCENT - 1}.1f}%"
)
# Top 15 largest core symbols
lines.append("")
lines.append(f"Top 15 Largest {_COMPONENT_CORE} Symbols:")
sorted_core_symbols = sorted(
self._esphome_core_symbols, key=lambda x: x[2], reverse=True
)
for i, (symbol, demangled, size) in enumerate(sorted_core_symbols[:15]):
lines.append(f"{i + 1}. {demangled} ({size:,} B)")
lines.append("=" * self.TABLE_WIDTH)
# Add detailed analysis for top ESPHome and external components
esphome_components = [
(name, mem)
for name, mem in components
if name.startswith(_COMPONENT_PREFIX_ESPHOME) and name != _COMPONENT_CORE
]
external_components = [
(name, mem)
for name, mem in components
if name.startswith(_COMPONENT_PREFIX_EXTERNAL)
]
top_esphome_components = sorted(
esphome_components, key=lambda x: x[1].flash_total, reverse=True
)[:30]
# Include all external components (they're usually important)
top_external_components = sorted(
external_components, key=lambda x: x[1].flash_total, reverse=True
)
# Check if API component exists and ensure it's included
api_component = None
for name, mem in components:
if name == _COMPONENT_API:
api_component = (name, mem)
break
# Combine all components to analyze: top ESPHome + all external + API if not already included
components_to_analyze = list(top_esphome_components) + list(
top_external_components
)
if api_component and api_component not in components_to_analyze:
components_to_analyze.append(api_component)
if components_to_analyze:
for comp_name, comp_mem in components_to_analyze:
if not (comp_symbols := self._component_symbols.get(comp_name, [])):
continue
lines.append("")
lines.append("=" * self.TABLE_WIDTH)
lines.append(f"{comp_name} Detailed Analysis".center(self.TABLE_WIDTH))
lines.append("=" * self.TABLE_WIDTH)
lines.append("")
# Sort symbols by size
sorted_symbols = sorted(comp_symbols, key=lambda x: x[2], reverse=True)
lines.append(f"Total symbols: {len(sorted_symbols)}")
lines.append(f"Total size: {comp_mem.flash_total:,} B")
lines.append("")
# Show all symbols > 100 bytes for better visibility
large_symbols = [
(sym, dem, size) for sym, dem, size in sorted_symbols if size > 100
]
lines.append(
f"{comp_name} Symbols > 100 B ({len(large_symbols)} symbols):"
)
for i, (symbol, demangled, size) in enumerate(large_symbols):
lines.append(f"{i + 1}. {demangled} ({size:,} B)")
lines.append("=" * self.TABLE_WIDTH)
return "\n".join(lines)
def dump_uncategorized_symbols(self, output_file: str | None = None) -> None:
"""Dump uncategorized symbols for analysis."""
# Sort by size descending
sorted_symbols = sorted(
self._uncategorized_symbols, key=lambda x: x[2], reverse=True
)
lines = ["Uncategorized Symbols Analysis", "=" * 80]
lines.append(f"Total uncategorized symbols: {len(sorted_symbols)}")
lines.append(
f"Total uncategorized size: {sum(s[2] for s in sorted_symbols):,} bytes"
)
lines.append("")
lines.append(f"{'Size':>10} | {'Symbol':<60} | Demangled")
lines.append("-" * 10 + "-+-" + "-" * 60 + "-+-" + "-" * 40)
for symbol, demangled, size in sorted_symbols[:100]: # Top 100
demangled_display = (
demangled[:100] if symbol != demangled else "[not demangled]"
)
lines.append(f"{size:>10,} | {symbol[:60]:<60} | {demangled_display}")
if len(sorted_symbols) > 100:
lines.append(f"\n... and {len(sorted_symbols) - 100} more symbols")
content = "\n".join(lines)
if output_file:
with open(output_file, "w", encoding="utf-8") as f:
f.write(content)
else:
print(content)
def analyze_elf(
elf_path: str,
objdump_path: str | None = None,
readelf_path: str | None = None,
detailed: bool = False,
external_components: set[str] | None = None,
) -> str:
"""Analyze an ELF file and return a memory report."""
analyzer = MemoryAnalyzerCLI(
elf_path, objdump_path, readelf_path, external_components
)
analyzer.analyze()
return analyzer.generate_report(detailed)
def main():
"""CLI entrypoint for memory analysis."""
if len(sys.argv) < 2:
print("Usage: python -m esphome.analyze_memory <build_directory>")
print("\nAnalyze memory usage from an ESPHome build directory.")
print("The build directory should contain firmware.elf and idedata will be")
print("loaded from ~/.esphome/.internal/idedata/<device>.json")
print("\nExamples:")
print(" python -m esphome.analyze_memory ~/.esphome/build/my-device")
print(" python -m esphome.analyze_memory .esphome/build/my-device")
print(" python -m esphome.analyze_memory my-device # Short form")
sys.exit(1)
build_dir = sys.argv[1]
# Load build directory
import json
from pathlib import Path
from esphome.platformio_api import IDEData
build_path = Path(build_dir)
# If no path separator in name, assume it's a device name
if "/" not in build_dir and not build_path.is_dir():
# Try current directory first
cwd_path = Path.cwd() / ".esphome" / "build" / build_dir
if cwd_path.is_dir():
build_path = cwd_path
print(f"Using build directory: {build_path}", file=sys.stderr)
else:
# Fall back to home directory
build_path = Path.home() / ".esphome" / "build" / build_dir
print(f"Using build directory: {build_path}", file=sys.stderr)
if not build_path.is_dir():
print(f"Error: {build_path} is not a directory", file=sys.stderr)
sys.exit(1)
# Find firmware.elf
elf_file = None
for elf_candidate in [
build_path / "firmware.elf",
build_path / ".pioenvs" / build_path.name / "firmware.elf",
]:
if elf_candidate.exists():
elf_file = str(elf_candidate)
break
if not elf_file:
print(f"Error: firmware.elf not found in {build_dir}", file=sys.stderr)
sys.exit(1)
# Find idedata.json - check current directory first, then home
device_name = build_path.name
idedata_candidates = [
Path.cwd() / ".esphome" / "idedata" / f"{device_name}.json",
Path.home() / ".esphome" / "idedata" / f"{device_name}.json",
]
idedata = None
for idedata_path in idedata_candidates:
if not idedata_path.exists():
continue
try:
with open(idedata_path, encoding="utf-8") as f:
raw_data = json.load(f)
idedata = IDEData(raw_data)
print(f"Loaded idedata from: {idedata_path}", file=sys.stderr)
break
except (json.JSONDecodeError, OSError) as e:
print(f"Warning: Failed to load idedata: {e}", file=sys.stderr)
if not idedata:
print(
f"Warning: idedata not found (searched {idedata_candidates[0]} and {idedata_candidates[1]})",
file=sys.stderr,
)
analyzer = MemoryAnalyzerCLI(elf_file, idedata=idedata)
analyzer.analyze()
report = analyzer.generate_report()
print(report)
if __name__ == "__main__":
main()

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,121 @@
"""Helper functions for memory analysis."""
from functools import cache
from pathlib import Path
from .const import SECTION_MAPPING
# Import namespace constant from parent module
# Note: This would create a circular import if done at module level,
# so we'll define it locally here as well
_NAMESPACE_ESPHOME = "esphome::"
# Get the list of actual ESPHome components by scanning the components directory
@cache
def get_esphome_components():
"""Get set of actual ESPHome components from the components directory."""
# Find the components directory relative to this file
# Go up two levels from analyze_memory/helpers.py to esphome/
current_dir = Path(__file__).parent.parent
components_dir = current_dir / "components"
if not components_dir.exists() or not components_dir.is_dir():
return frozenset()
return frozenset(
item.name
for item in components_dir.iterdir()
if item.is_dir()
and not item.name.startswith(".")
and not item.name.startswith("__")
)
@cache
def get_component_class_patterns(component_name: str) -> list[str]:
"""Generate component class name patterns for symbol matching.
Args:
component_name: The component name (e.g., "ota", "wifi", "api")
Returns:
List of pattern strings to match against demangled symbols
"""
component_upper = component_name.upper()
component_camel = component_name.replace("_", "").title()
return [
f"{_NAMESPACE_ESPHOME}{component_upper}Component", # e.g., esphome::OTAComponent
f"{_NAMESPACE_ESPHOME}ESPHome{component_upper}Component", # e.g., esphome::ESPHomeOTAComponent
f"{_NAMESPACE_ESPHOME}{component_camel}Component", # e.g., esphome::OtaComponent
f"{_NAMESPACE_ESPHOME}ESPHome{component_camel}Component", # e.g., esphome::ESPHomeOtaComponent
]
def map_section_name(raw_section: str) -> str | None:
"""Map raw section name to standard section.
Args:
raw_section: Raw section name from ELF file (e.g., ".iram0.text", ".rodata.str1.1")
Returns:
Standard section name (".text", ".rodata", ".data", ".bss") or None
"""
for standard_section, patterns in SECTION_MAPPING.items():
if any(pattern in raw_section for pattern in patterns):
return standard_section
return None
def parse_symbol_line(line: str) -> tuple[str, str, int, str] | None:
"""Parse a single symbol line from objdump output.
Args:
line: Line from objdump -t output
Returns:
Tuple of (section, name, size, address) or None if not a valid symbol.
Format: address l/g w/d F/O section size name
Example: 40084870 l F .iram0.text 00000000 _xt_user_exc
"""
parts = line.split()
if len(parts) < 5:
return None
try:
# Validate and extract address
address = parts[0]
int(address, 16)
except ValueError:
return None
# Look for F (function) or O (object) flag
if "F" not in parts and "O" not in parts:
return None
# Find section, size, and name
for i, part in enumerate(parts):
if not part.startswith("."):
continue
section = map_section_name(part)
if not section:
break
# Need at least size field after section
if i + 1 >= len(parts):
break
try:
size = int(parts[i + 1], 16)
except ValueError:
break
# Need symbol name and non-zero size
if i + 2 >= len(parts) or size == 0:
break
name = " ".join(parts[i + 2 :])
return (section, name, size, address)
return None

View File

@@ -374,3 +374,23 @@ class IDEData:
return f"{self.cc_path[:-7]}addr2line.exe"
return f"{self.cc_path[:-3]}addr2line"
@property
def objdump_path(self) -> str:
# replace gcc at end with objdump
path = self.cc_path
return (
f"{path[:-7]}objdump.exe"
if path.endswith(".exe")
else f"{path[:-3]}objdump"
)
@property
def readelf_path(self) -> str:
# replace gcc at end with readelf
path = self.cc_path
return (
f"{path[:-7]}readelf.exe"
if path.endswith(".exe")
else f"{path[:-3]}readelf"
)

View File

@@ -34,6 +34,8 @@ from typing import Any
# Add esphome to path
sys.path.insert(0, str(Path(__file__).parent.parent))
from helpers import BASE_BUS_COMPONENTS
from esphome import yaml_util
from esphome.config_helpers import Extend, Remove
@@ -67,18 +69,6 @@ NO_BUSES_SIGNATURE = "no_buses"
# Isolated components have unique signatures and cannot be merged with others
ISOLATED_SIGNATURE_PREFIX = "isolated_"
# Base bus components - these ARE the bus implementations and should not
# be flagged as needing migration since they are the platform/base components
BASE_BUS_COMPONENTS = {
"i2c",
"spi",
"uart",
"modbus",
"canbus",
"remote_transmitter",
"remote_receiver",
}
# Components that must be tested in isolation (not grouped or batched with others)
# These have known build issues that prevent grouping
# NOTE: This should be kept in sync with both test_build_components and split_components_for_ci.py

23
script/ci_helpers.py Executable file
View File

@@ -0,0 +1,23 @@
"""Common helper functions for CI scripts."""
from __future__ import annotations
import os
def write_github_output(outputs: dict[str, str | int]) -> None:
"""Write multiple outputs to GITHUB_OUTPUT or stdout.
When running in GitHub Actions, writes to the GITHUB_OUTPUT file.
When running locally, writes to stdout for debugging.
Args:
outputs: Dictionary of key-value pairs to write
"""
github_output = os.environ.get("GITHUB_OUTPUT")
if github_output:
with open(github_output, "a", encoding="utf-8") as f:
f.writelines(f"{key}={value}\n" for key, value in outputs.items())
else:
for key, value in outputs.items():
print(f"{key}={value}")

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,281 @@
#!/usr/bin/env python3
"""Extract memory usage statistics from ESPHome build output.
This script parses the PlatformIO build output to extract RAM and flash
usage statistics for a compiled component. It's used by the CI workflow to
compare memory usage between branches.
The script reads compile output from stdin and looks for the standard
PlatformIO output format:
RAM: [==== ] 36.1% (used 29548 bytes from 81920 bytes)
Flash: [=== ] 34.0% (used 348511 bytes from 1023984 bytes)
Optionally performs detailed memory analysis if a build directory is provided.
"""
from __future__ import annotations
import argparse
import json
from pathlib import Path
import re
import sys
# Add esphome to path
sys.path.insert(0, str(Path(__file__).parent.parent))
# pylint: disable=wrong-import-position
from esphome.analyze_memory import MemoryAnalyzer
from esphome.platformio_api import IDEData
from script.ci_helpers import write_github_output
# Regex patterns for extracting memory usage from PlatformIO output
_RAM_PATTERN = re.compile(r"RAM:\s+\[.*?\]\s+\d+\.\d+%\s+\(used\s+(\d+)\s+bytes")
_FLASH_PATTERN = re.compile(r"Flash:\s+\[.*?\]\s+\d+\.\d+%\s+\(used\s+(\d+)\s+bytes")
_BUILD_PATH_PATTERN = re.compile(r"Build path: (.+)")
def extract_from_compile_output(
output_text: str,
) -> tuple[int | None, int | None, str | None]:
"""Extract memory usage and build directory from PlatformIO compile output.
Supports multiple builds (for component groups or isolated components).
When test_build_components.py creates multiple builds, this sums the
memory usage across all builds.
Looks for lines like:
RAM: [==== ] 36.1% (used 29548 bytes from 81920 bytes)
Flash: [=== ] 34.0% (used 348511 bytes from 1023984 bytes)
Also extracts build directory from lines like:
INFO Compiling app... Build path: /path/to/build
Args:
output_text: Compile output text (may contain multiple builds)
Returns:
Tuple of (total_ram_bytes, total_flash_bytes, build_dir) or (None, None, None) if not found
"""
# Find all RAM and Flash matches (may be multiple builds)
ram_matches = _RAM_PATTERN.findall(output_text)
flash_matches = _FLASH_PATTERN.findall(output_text)
if not ram_matches or not flash_matches:
return None, None, None
# Sum all builds (handles multiple component groups)
total_ram = sum(int(match) for match in ram_matches)
total_flash = sum(int(match) for match in flash_matches)
# Extract build directory from ESPHome's explicit build path output
# Look for: INFO Compiling app... Build path: /path/to/build
# Note: Multiple builds reuse the same build path (each overwrites the previous)
build_dir = None
if match := _BUILD_PATH_PATTERN.search(output_text):
build_dir = match.group(1).strip()
return total_ram, total_flash, build_dir
def run_detailed_analysis(build_dir: str) -> dict | None:
"""Run detailed memory analysis on build directory.
Args:
build_dir: Path to ESPHome build directory
Returns:
Dictionary with analysis results or None if analysis fails
"""
build_path = Path(build_dir)
if not build_path.exists():
print(f"Build directory not found: {build_dir}", file=sys.stderr)
return None
# Find firmware.elf
elf_path = None
for elf_candidate in [
build_path / "firmware.elf",
build_path / ".pioenvs" / build_path.name / "firmware.elf",
]:
if elf_candidate.exists():
elf_path = str(elf_candidate)
break
if not elf_path:
print(f"firmware.elf not found in {build_dir}", file=sys.stderr)
return None
# Find idedata.json - check multiple locations
device_name = build_path.name
idedata_candidates = [
# In .pioenvs for test builds
build_path / ".pioenvs" / device_name / "idedata.json",
# In .esphome/idedata for regular builds
Path.home() / ".esphome" / "idedata" / f"{device_name}.json",
# Check parent directories for .esphome/idedata (for test_build_components)
build_path.parent.parent.parent / "idedata" / f"{device_name}.json",
]
idedata = None
for idedata_path in idedata_candidates:
if not idedata_path.exists():
continue
try:
with open(idedata_path, encoding="utf-8") as f:
raw_data = json.load(f)
idedata = IDEData(raw_data)
print(f"Loaded idedata from: {idedata_path}", file=sys.stderr)
break
except (json.JSONDecodeError, OSError) as e:
print(
f"Warning: Failed to load idedata from {idedata_path}: {e}",
file=sys.stderr,
)
analyzer = MemoryAnalyzer(elf_path, idedata=idedata)
components = analyzer.analyze()
# Convert to JSON-serializable format
result = {
"components": {
name: {
"text": mem.text_size,
"rodata": mem.rodata_size,
"data": mem.data_size,
"bss": mem.bss_size,
"flash_total": mem.flash_total,
"ram_total": mem.ram_total,
"symbol_count": mem.symbol_count,
}
for name, mem in components.items()
},
"symbols": {},
}
# Build symbol map
for section in analyzer.sections.values():
for symbol_name, size, _ in section.symbols:
if size > 0:
demangled = analyzer._demangle_symbol(symbol_name)
result["symbols"][demangled] = size
return result
def main() -> int:
"""Main entry point."""
parser = argparse.ArgumentParser(
description="Extract memory usage from ESPHome build output"
)
parser.add_argument(
"--output-env",
action="store_true",
help="Output to GITHUB_OUTPUT environment file",
)
parser.add_argument(
"--build-dir",
help="Optional build directory for detailed memory analysis (overrides auto-detection)",
)
parser.add_argument(
"--output-json",
help="Optional path to save detailed analysis JSON",
)
parser.add_argument(
"--output-build-dir",
help="Optional path to write the detected build directory",
)
args = parser.parse_args()
# Read compile output from stdin
compile_output = sys.stdin.read()
# Extract memory usage and build directory
ram_bytes, flash_bytes, detected_build_dir = extract_from_compile_output(
compile_output
)
if ram_bytes is None or flash_bytes is None:
print("Failed to extract memory usage from compile output", file=sys.stderr)
print("Expected lines like:", file=sys.stderr)
print(
" RAM: [==== ] 36.1% (used 29548 bytes from 81920 bytes)",
file=sys.stderr,
)
print(
" Flash: [=== ] 34.0% (used 348511 bytes from 1023984 bytes)",
file=sys.stderr,
)
return 1
# Count how many builds were found
num_builds = len(_RAM_PATTERN.findall(compile_output))
if num_builds > 1:
print(
f"Found {num_builds} builds - summing memory usage across all builds",
file=sys.stderr,
)
print(
"WARNING: Detailed analysis will only cover the last build",
file=sys.stderr,
)
print(f"Total RAM: {ram_bytes} bytes", file=sys.stderr)
print(f"Total Flash: {flash_bytes} bytes", file=sys.stderr)
# Determine which build directory to use (explicit arg overrides auto-detection)
build_dir = args.build_dir or detected_build_dir
if detected_build_dir:
print(f"Detected build directory: {detected_build_dir}", file=sys.stderr)
if num_builds > 1:
print(
f" (using last of {num_builds} builds for detailed analysis)",
file=sys.stderr,
)
# Write build directory to file if requested
if args.output_build_dir and build_dir:
build_dir_path = Path(args.output_build_dir)
build_dir_path.parent.mkdir(parents=True, exist_ok=True)
build_dir_path.write_text(build_dir)
print(f"Wrote build directory to {args.output_build_dir}", file=sys.stderr)
# Run detailed analysis if build directory available
detailed_analysis = None
if build_dir:
print(f"Running detailed analysis on {build_dir}", file=sys.stderr)
detailed_analysis = run_detailed_analysis(build_dir)
# Save JSON output if requested
if args.output_json:
output_data = {
"ram_bytes": ram_bytes,
"flash_bytes": flash_bytes,
"detailed_analysis": detailed_analysis,
}
output_path = Path(args.output_json)
output_path.parent.mkdir(parents=True, exist_ok=True)
with open(output_path, "w", encoding="utf-8") as f:
json.dump(output_data, f, indent=2)
print(f"Saved analysis to {args.output_json}", file=sys.stderr)
if args.output_env:
# Output to GitHub Actions
write_github_output(
{
"ram_usage": ram_bytes,
"flash_usage": flash_bytes,
}
)
else:
print(f"{ram_bytes},{flash_bytes}")
return 0
if __name__ == "__main__":
sys.exit(main())

View File

@@ -10,7 +10,13 @@ what files have changed. It outputs JSON with the following structure:
"clang_format": true/false,
"python_linters": true/false,
"changed_components": ["component1", "component2", ...],
"component_test_count": 5
"component_test_count": 5,
"memory_impact": {
"should_run": "true/false",
"components": ["component1", "component2", ...],
"platform": "esp32-idf",
"use_merged_config": "true"
}
}
The CI workflow uses this information to:
@@ -20,6 +26,7 @@ The CI workflow uses this information to:
- Skip or run Python linters (ruff, flake8, pylint, pyupgrade)
- Determine which components to test individually
- Decide how to split component tests (if there are many)
- Run memory impact analysis whenever there are changed components (merged config), and also for core-only changes
Usage:
python script/determine-jobs.py [-b BRANCH]
@@ -31,6 +38,8 @@ Options:
from __future__ import annotations
import argparse
from collections import Counter
from enum import StrEnum
from functools import cache
import json
import os
@@ -40,16 +49,47 @@ import sys
from typing import Any
from helpers import (
BASE_BUS_COMPONENTS,
CPP_FILE_EXTENSIONS,
ESPHOME_COMPONENTS_PATH,
PYTHON_FILE_EXTENSIONS,
changed_files,
get_all_dependencies,
get_component_from_path,
get_component_test_files,
get_components_from_integration_fixtures,
parse_test_filename,
root_path,
)
class Platform(StrEnum):
"""Platform identifiers for memory impact analysis."""
ESP8266_ARD = "esp8266-ard"
ESP32_IDF = "esp32-idf"
ESP32_C3_IDF = "esp32-c3-idf"
ESP32_C6_IDF = "esp32-c6-idf"
ESP32_S2_IDF = "esp32-s2-idf"
ESP32_S3_IDF = "esp32-s3-idf"
# Memory impact analysis constants
MEMORY_IMPACT_FALLBACK_COMPONENT = "api" # Representative component for core changes
MEMORY_IMPACT_FALLBACK_PLATFORM = Platform.ESP32_IDF # Most representative platform
# Platform preference order for memory impact analysis
# Prefer newer platforms first as they represent the future of ESPHome
# ESP8266 is most constrained but many new features don't support it
MEMORY_IMPACT_PLATFORM_PREFERENCE = [
Platform.ESP32_C6_IDF, # ESP32-C6 IDF (newest, supports Thread/Zigbee)
Platform.ESP8266_ARD, # ESP8266 Arduino (most memory constrained - best for impact analysis)
Platform.ESP32_IDF, # ESP32 IDF platform (primary ESP32 platform, most representative)
Platform.ESP32_C3_IDF, # ESP32-C3 IDF
Platform.ESP32_S2_IDF, # ESP32-S2 IDF
Platform.ESP32_S3_IDF, # ESP32-S3 IDF
]
def should_run_integration_tests(branch: str | None = None) -> bool:
"""Determine if integration tests should run based on changed files.
@@ -105,12 +145,9 @@ def should_run_integration_tests(branch: str | None = None) -> bool:
# Check if any required components changed
for file in files:
if file.startswith(ESPHOME_COMPONENTS_PATH):
parts = file.split("/")
if len(parts) >= 3:
component = parts[2]
if component in all_required_components:
return True
component = get_component_from_path(file)
if component and component in all_required_components:
return True
return False
@@ -224,10 +261,136 @@ def _component_has_tests(component: str) -> bool:
Returns:
True if the component has test YAML files
"""
tests_dir = Path(root_path) / "tests" / "components" / component
if not tests_dir.exists():
return False
return any(tests_dir.glob("test.*.yaml"))
return bool(get_component_test_files(component))
def detect_memory_impact_config(
branch: str | None = None,
) -> dict[str, Any]:
"""Determine memory impact analysis configuration.
Always runs memory impact analysis when there are changed components,
building a merged configuration with all changed components (like
test_build_components.py does) to get comprehensive memory analysis.
Args:
branch: Branch to compare against
Returns:
Dictionary with memory impact analysis parameters:
- should_run: "true" or "false"
- components: list of component names to analyze
- platform: platform name for the merged build
- use_merged_config: "true" (always use merged config)
"""
# Get actually changed files (not dependencies)
files = changed_files(branch)
# Find all changed components (excluding core and base bus components)
changed_component_set: set[str] = set()
has_core_changes = False
for file in files:
component = get_component_from_path(file)
if component:
# Skip base bus components as they're used across many builds
if component not in BASE_BUS_COMPONENTS:
changed_component_set.add(component)
elif file.startswith("esphome/"):
# Core ESPHome files changed (not component-specific)
has_core_changes = True
# If no components changed but core changed, test representative component
force_fallback_platform = False
if not changed_component_set and has_core_changes:
print(
f"Memory impact: No components changed, but core files changed. "
f"Testing {MEMORY_IMPACT_FALLBACK_COMPONENT} component on {MEMORY_IMPACT_FALLBACK_PLATFORM}.",
file=sys.stderr,
)
changed_component_set.add(MEMORY_IMPACT_FALLBACK_COMPONENT)
force_fallback_platform = True # Use fallback platform (most representative)
elif not changed_component_set:
# No components and no core changes
return {"should_run": "false"}
# Find components that have tests and collect their supported platforms
components_with_tests: list[str] = []
component_platforms_map: dict[
str, set[Platform]
] = {} # Track which platforms each component supports
for component in sorted(changed_component_set):
# Look for test files on preferred platforms
test_files = get_component_test_files(component)
if not test_files:
continue
# Check if component has tests for any preferred platform
available_platforms = [
platform
for test_file in test_files
if (platform := parse_test_filename(test_file)[1]) != "all"
and platform in MEMORY_IMPACT_PLATFORM_PREFERENCE
]
if not available_platforms:
continue
component_platforms_map[component] = set(available_platforms)
components_with_tests.append(component)
# If no components have tests, don't run memory impact
if not components_with_tests:
return {"should_run": "false"}
# Find common platforms supported by ALL components
# This ensures we can build all components together in a merged config
common_platforms = set(MEMORY_IMPACT_PLATFORM_PREFERENCE)
for component, platforms in component_platforms_map.items():
common_platforms &= platforms
# Select the most preferred platform from the common set
# Exception: for core changes, use fallback platform (most representative of codebase)
if force_fallback_platform:
platform = MEMORY_IMPACT_FALLBACK_PLATFORM
elif common_platforms:
# Pick the most preferred platform that all components support
platform = min(common_platforms, key=MEMORY_IMPACT_PLATFORM_PREFERENCE.index)
else:
# No common platform - pick the most commonly supported platform
# This allows testing components individually even if they can't be merged
# Count how many components support each platform
platform_counts = Counter(
p for platforms in component_platforms_map.values() for p in platforms
)
# Pick the platform supported by most components, preferring earlier in MEMORY_IMPACT_PLATFORM_PREFERENCE
platform = max(
platform_counts.keys(),
key=lambda p: (
platform_counts[p],
-MEMORY_IMPACT_PLATFORM_PREFERENCE.index(p),
),
)
# Debug output
print("Memory impact analysis:", file=sys.stderr)
print(f" Changed components: {sorted(changed_component_set)}", file=sys.stderr)
print(f" Components with tests: {components_with_tests}", file=sys.stderr)
print(
f" Component platforms: {dict(sorted(component_platforms_map.items()))}",
file=sys.stderr,
)
print(f" Common platforms: {sorted(common_platforms)}", file=sys.stderr)
print(f" Selected platform: {platform}", file=sys.stderr)
return {
"should_run": "true",
"components": components_with_tests,
"platform": platform,
"use_merged_config": "true",
}
def main() -> None:
@@ -279,6 +442,9 @@ def main() -> None:
if component not in directly_changed_components
]
# Detect components for memory impact analysis (merged config)
memory_impact = detect_memory_impact_config(args.branch)
# Build output
output: dict[str, Any] = {
"integration_tests": run_integration,
@@ -292,6 +458,7 @@ def main() -> None:
"component_test_count": len(changed_components_with_tests),
"directly_changed_count": len(directly_changed_with_tests),
"dependency_only_count": len(dependency_only_components),
"memory_impact": memory_impact,
}
# Output as JSON

View File

@@ -29,6 +29,18 @@ YAML_FILE_EXTENSIONS = (".yaml", ".yml")
# Component path prefix
ESPHOME_COMPONENTS_PATH = "esphome/components/"
# Base bus components - these ARE the bus implementations and should not
# be flagged as needing migration since they are the platform/base components
BASE_BUS_COMPONENTS = {
"i2c",
"spi",
"uart",
"modbus",
"canbus",
"remote_transmitter",
"remote_receiver",
}
def parse_list_components_output(output: str) -> list[str]:
"""Parse the output from list-components.py script.
@@ -46,6 +58,65 @@ def parse_list_components_output(output: str) -> list[str]:
return [c.strip() for c in output.strip().split("\n") if c.strip()]
def parse_test_filename(test_file: Path) -> tuple[str, str]:
"""Parse test filename to extract test name and platform.
Test files follow the naming pattern: test.<platform>.yaml or test-<variant>.<platform>.yaml
Args:
test_file: Path to test file
Returns:
Tuple of (test_name, platform)
"""
parts = test_file.stem.split(".")
if len(parts) == 2:
return parts[0], parts[1] # test, platform
return parts[0], "all"
def get_component_from_path(file_path: str) -> str | None:
"""Extract component name from a file path.
Args:
file_path: Path to a file (e.g., "esphome/components/wifi/wifi.cpp")
Returns:
Component name if path is in components directory, None otherwise
"""
if not file_path.startswith(ESPHOME_COMPONENTS_PATH):
return None
parts = file_path.split("/")
if len(parts) >= 3:
return parts[2]
return None
def get_component_test_files(
component: str, *, all_variants: bool = False
) -> list[Path]:
"""Get test files for a component.
Args:
component: Component name (e.g., "wifi")
all_variants: If True, returns all test files including variants (test-*.yaml).
If False, returns only base test files (test.*.yaml).
Default is False.
Returns:
List of test file paths for the component, or empty list if none exist
"""
tests_dir = Path(root_path) / "tests" / "components" / component
if not tests_dir.exists():
return []
if all_variants:
# Match both test.*.yaml and test-*.yaml patterns
return list(tests_dir.glob("test[.-]*.yaml"))
# Match only test.*.yaml (base tests)
return list(tests_dir.glob("test.*.yaml"))
def styled(color: str | tuple[str, ...], msg: str, reset: bool = True) -> str:
prefix = "".join(color) if isinstance(color, tuple) else color
suffix = colorama.Style.RESET_ALL if reset else ""
@@ -314,11 +385,9 @@ def _filter_changed_ci(files: list[str]) -> list[str]:
# because changes in one file can affect other files in the same component.
filtered_files = []
for f in files:
if f.startswith(ESPHOME_COMPONENTS_PATH):
# Check if file belongs to any of the changed components
parts = f.split("/")
if len(parts) >= 3 and parts[2] in component_set:
filtered_files.append(f)
component = get_component_from_path(f)
if component and component in component_set:
filtered_files.append(f)
return filtered_files

View File

@@ -4,7 +4,7 @@ from collections.abc import Callable
from pathlib import Path
import sys
from helpers import changed_files, git_ls_files
from helpers import changed_files, get_component_from_path, git_ls_files
from esphome.const import (
KEY_CORE,
@@ -30,11 +30,9 @@ def get_all_component_files() -> list[str]:
def extract_component_names_array_from_files_array(files):
components = []
for file in files:
file_parts = file.split("/")
if len(file_parts) >= 4:
component_name = file_parts[2]
if component_name not in components:
components.append(component_name)
component_name = get_component_from_path(file)
if component_name and component_name not in components:
components.append(component_name)
return components

View File

@@ -28,6 +28,7 @@ from script.analyze_component_buses import (
create_grouping_signature,
merge_compatible_bus_groups,
)
from script.helpers import get_component_test_files
# Weighting for batch creation
# Isolated components can't be grouped/merged, so they count as 10x
@@ -45,17 +46,12 @@ def has_test_files(component_name: str, tests_dir: Path) -> bool:
Args:
component_name: Name of the component
tests_dir: Path to tests/components directory
tests_dir: Path to tests/components directory (unused, kept for compatibility)
Returns:
True if the component has test.*.yaml files
"""
component_dir = tests_dir / component_name
if not component_dir.exists() or not component_dir.is_dir():
return False
# Check for test.*.yaml files
return any(component_dir.glob("test.*.yaml"))
return bool(get_component_test_files(component_name))
def create_intelligent_batches(

View File

@@ -0,0 +1,27 @@
{{ comment_marker }}
## Memory Impact Analysis
**Components:** {{ components_str }}
**Platform:** `{{ platform }}`
| Metric | Target Branch | This PR | Change |
|--------|--------------|---------|--------|
| **RAM** | {{ target_ram }} | {{ pr_ram }} | {{ ram_change }} |
| **Flash** | {{ target_flash }} | {{ pr_flash }} | {{ flash_change }} |
{% if component_breakdown %}
{{ component_breakdown }}
{% endif %}
{% if symbol_changes %}
{{ symbol_changes }}
{% endif %}
{%- if target_cache_hit %}
> ⚡ Target branch analysis was loaded from cache (build skipped for faster CI).
{%- endif %}
---
> **Note:** This analysis measures **static RAM and Flash usage** only (compile-time allocation).
> **Dynamic memory (heap)** cannot be measured automatically.
> **⚠️ You must test this PR on a real device** to measure free heap and ensure no runtime memory issues.
*This analysis runs automatically when components change. Memory usage is measured from {{ config_note }}.*

View File

@@ -0,0 +1,15 @@
<details open>
<summary>📊 Component Memory Breakdown</summary>
| Component | Target Flash | PR Flash | Change |
|-----------|--------------|----------|--------|
{% for comp, target_flash, pr_flash, delta in changed_components[:max_rows] -%}
{% set threshold = component_change_threshold if comp.startswith("[esphome]") else none -%}
| `{{ comp }}` | {{ target_flash|format_bytes }} | {{ pr_flash|format_bytes }} | {{ format_change(target_flash, pr_flash, threshold=threshold) }} |
{% endfor -%}
{% if changed_components|length > max_rows -%}
| ... | ... | ... | *({{ changed_components|length - max_rows }} more components not shown)* |
{% endif -%}
</details>

View File

@@ -0,0 +1,8 @@
{#- Macro for formatting symbol names in tables -#}
{%- macro format_symbol(symbol, max_length, truncate_length) -%}
{%- if symbol|length <= max_length -%}
`{{ symbol }}`
{%- else -%}
<details><summary><code>{{ symbol[:truncate_length] }}...</code></summary><code>{{ symbol }}</code></details>
{%- endif -%}
{%- endmacro -%}

View File

@@ -0,0 +1,51 @@
{%- from 'ci_memory_impact_macros.j2' import format_symbol -%}
<details>
<summary>🔍 Symbol-Level Changes (click to expand)</summary>
{% if changed_symbols %}
### Changed Symbols
| Symbol | Target Size | PR Size | Change |
|--------|-------------|---------|--------|
{% for symbol, target_size, pr_size, delta in changed_symbols[:max_changed_rows] -%}
| {{ format_symbol(symbol, symbol_max_length, symbol_truncate_length) }} | {{ target_size|format_bytes }} | {{ pr_size|format_bytes }} | {{ format_change(target_size, pr_size) }} |
{% endfor -%}
{% if changed_symbols|length > max_changed_rows -%}
| ... | ... | ... | *({{ changed_symbols|length - max_changed_rows }} more changed symbols not shown)* |
{% endif -%}
{% endif %}
{% if new_symbols %}
### New Symbols (top {{ max_new_rows }})
| Symbol | Size |
|--------|------|
{% for symbol, size in new_symbols[:max_new_rows] -%}
| {{ format_symbol(symbol, symbol_max_length, symbol_truncate_length) }} | {{ size|format_bytes }} |
{% endfor -%}
{% if new_symbols|length > max_new_rows -%}
{% set total_new_size = new_symbols|sum(attribute=1) -%}
| *{{ new_symbols|length - max_new_rows }} more new symbols...* | *Total: {{ total_new_size|format_bytes }}* |
{% endif -%}
{% endif %}
{% if removed_symbols %}
### Removed Symbols (top {{ max_removed_rows }})
| Symbol | Size |
|--------|------|
{% for symbol, size in removed_symbols[:max_removed_rows] -%}
| {{ format_symbol(symbol, symbol_max_length, symbol_truncate_length) }} | {{ size|format_bytes }} |
{% endfor -%}
{% if removed_symbols|length > max_removed_rows -%}
{% set total_removed_size = removed_symbols|sum(attribute=1) -%}
| *{{ removed_symbols|length - max_removed_rows }} more removed symbols...* | *Total: {{ total_removed_size|format_bytes }}* |
{% endif -%}
{% endif %}
</details>

View File

@@ -39,6 +39,7 @@ from script.analyze_component_buses import (
merge_compatible_bus_groups,
uses_local_file_references,
)
from script.helpers import get_component_test_files
from script.merge_component_configs import merge_component_configs
@@ -82,13 +83,14 @@ def show_disk_space_if_ci(esphome_command: str) -> None:
def find_component_tests(
components_dir: Path, component_pattern: str = "*"
components_dir: Path, component_pattern: str = "*", base_only: bool = False
) -> dict[str, list[Path]]:
"""Find all component test files.
Args:
components_dir: Path to tests/components directory
component_pattern: Glob pattern for component names
base_only: If True, only find base test files (test.*.yaml), not variant files (test-*.yaml)
Returns:
Dictionary mapping component name to list of test files
@@ -99,9 +101,10 @@ def find_component_tests(
if not comp_dir.is_dir():
continue
# Find test files matching test.*.yaml or test-*.yaml patterns
for test_file in comp_dir.glob("test[.-]*.yaml"):
component_tests[comp_dir.name].append(test_file)
# Get test files using helper function
test_files = get_component_test_files(comp_dir.name, all_variants=not base_only)
if test_files:
component_tests[comp_dir.name] = test_files
return dict(component_tests)
@@ -931,6 +934,7 @@ def test_components(
continue_on_fail: bool,
enable_grouping: bool = True,
isolated_components: set[str] | None = None,
base_only: bool = False,
) -> int:
"""Test components with optional intelligent grouping.
@@ -944,6 +948,7 @@ def test_components(
These are tested WITHOUT --testing-mode to enable full validation
(pin conflicts, etc). This is used in CI for directly changed components
to catch issues that would be missed with --testing-mode.
base_only: If True, only test base test files (test.*.yaml), not variant files (test-*.yaml)
Returns:
Exit code (0 for success, 1 for failure)
@@ -961,7 +966,7 @@ def test_components(
# Find all component tests
all_tests = {}
for pattern in component_patterns:
all_tests.update(find_component_tests(tests_dir, pattern))
all_tests.update(find_component_tests(tests_dir, pattern, base_only))
if not all_tests:
print(f"No components found matching: {component_patterns}")
@@ -1122,6 +1127,11 @@ def main() -> int:
"These are tested WITHOUT --testing-mode to enable full validation. "
"Used in CI for directly changed components to catch pin conflicts and other issues.",
)
parser.add_argument(
"--base-only",
action="store_true",
help="Only test base test files (test.*.yaml), not variant files (test-*.yaml)",
)
args = parser.parse_args()
@@ -1140,6 +1150,7 @@ def main() -> int:
continue_on_fail=args.continue_on_fail,
enable_grouping=not args.no_grouping,
isolated_components=isolated_components,
base_only=args.base_only,
)

View File

@@ -17,6 +17,9 @@ script_dir = os.path.abspath(
)
sys.path.insert(0, script_dir)
# Import helpers module for patching
import helpers # noqa: E402
spec = importlib.util.spec_from_file_location(
"determine_jobs", os.path.join(script_dir, "determine-jobs.py")
)
@@ -59,15 +62,29 @@ def mock_subprocess_run() -> Generator[Mock, None, None]:
yield mock
@pytest.fixture
def mock_changed_files() -> Generator[Mock, None, None]:
"""Mock changed_files for memory impact detection."""
with patch.object(determine_jobs, "changed_files") as mock:
# Default to empty list
mock.return_value = []
yield mock
def test_main_all_tests_should_run(
mock_should_run_integration_tests: Mock,
mock_should_run_clang_tidy: Mock,
mock_should_run_clang_format: Mock,
mock_should_run_python_linters: Mock,
mock_subprocess_run: Mock,
mock_changed_files: Mock,
capsys: pytest.CaptureFixture[str],
monkeypatch: pytest.MonkeyPatch,
) -> None:
"""Test when all tests should run."""
# Ensure we're not in GITHUB_ACTIONS mode for this test
monkeypatch.delenv("GITHUB_ACTIONS", raising=False)
mock_should_run_integration_tests.return_value = True
mock_should_run_clang_tidy.return_value = True
mock_should_run_clang_format.return_value = True
@@ -100,6 +117,9 @@ def test_main_all_tests_should_run(
assert output["component_test_count"] == len(
output["changed_components_with_tests"]
)
# memory_impact should be present
assert "memory_impact" in output
assert output["memory_impact"]["should_run"] == "false" # No files changed
def test_main_no_tests_should_run(
@@ -108,9 +128,14 @@ def test_main_no_tests_should_run(
mock_should_run_clang_format: Mock,
mock_should_run_python_linters: Mock,
mock_subprocess_run: Mock,
mock_changed_files: Mock,
capsys: pytest.CaptureFixture[str],
monkeypatch: pytest.MonkeyPatch,
) -> None:
"""Test when no tests should run."""
# Ensure we're not in GITHUB_ACTIONS mode for this test
monkeypatch.delenv("GITHUB_ACTIONS", raising=False)
mock_should_run_integration_tests.return_value = False
mock_should_run_clang_tidy.return_value = False
mock_should_run_clang_format.return_value = False
@@ -136,6 +161,9 @@ def test_main_no_tests_should_run(
assert output["changed_components"] == []
assert output["changed_components_with_tests"] == []
assert output["component_test_count"] == 0
# memory_impact should be present
assert "memory_impact" in output
assert output["memory_impact"]["should_run"] == "false"
def test_main_list_components_fails(
@@ -169,9 +197,14 @@ def test_main_with_branch_argument(
mock_should_run_clang_format: Mock,
mock_should_run_python_linters: Mock,
mock_subprocess_run: Mock,
mock_changed_files: Mock,
capsys: pytest.CaptureFixture[str],
monkeypatch: pytest.MonkeyPatch,
) -> None:
"""Test with branch argument."""
# Ensure we're not in GITHUB_ACTIONS mode for this test
monkeypatch.delenv("GITHUB_ACTIONS", raising=False)
mock_should_run_integration_tests.return_value = False
mock_should_run_clang_tidy.return_value = True
mock_should_run_clang_format.return_value = False
@@ -216,6 +249,9 @@ def test_main_with_branch_argument(
assert output["component_test_count"] == len(
output["changed_components_with_tests"]
)
# memory_impact should be present
assert "memory_impact" in output
assert output["memory_impact"]["should_run"] == "false"
def test_should_run_integration_tests(
@@ -403,10 +439,15 @@ def test_main_filters_components_without_tests(
mock_should_run_clang_format: Mock,
mock_should_run_python_linters: Mock,
mock_subprocess_run: Mock,
mock_changed_files: Mock,
capsys: pytest.CaptureFixture[str],
tmp_path: Path,
monkeypatch: pytest.MonkeyPatch,
) -> None:
"""Test that components without test files are filtered out."""
# Ensure we're not in GITHUB_ACTIONS mode for this test
monkeypatch.delenv("GITHUB_ACTIONS", raising=False)
mock_should_run_integration_tests.return_value = False
mock_should_run_clang_tidy.return_value = False
mock_should_run_clang_format.return_value = False
@@ -440,9 +481,10 @@ def test_main_filters_components_without_tests(
airthings_dir = tests_dir / "airthings_ble"
airthings_dir.mkdir(parents=True)
# Mock root_path to use tmp_path
# Mock root_path to use tmp_path (need to patch both determine_jobs and helpers)
with (
patch.object(determine_jobs, "root_path", str(tmp_path)),
patch.object(helpers, "root_path", str(tmp_path)),
patch("sys.argv", ["determine-jobs.py"]),
):
# Clear the cache since we're mocking root_path
@@ -459,3 +501,188 @@ def test_main_filters_components_without_tests(
assert set(output["changed_components_with_tests"]) == {"wifi", "sensor"}
# component_test_count should be based on components with tests
assert output["component_test_count"] == 2
# memory_impact should be present
assert "memory_impact" in output
assert output["memory_impact"]["should_run"] == "false"
# Tests for detect_memory_impact_config function
def test_detect_memory_impact_config_with_common_platform(tmp_path: Path) -> None:
"""Test memory impact detection when components share a common platform."""
# Create test directory structure
tests_dir = tmp_path / "tests" / "components"
# wifi component with esp32-idf test
wifi_dir = tests_dir / "wifi"
wifi_dir.mkdir(parents=True)
(wifi_dir / "test.esp32-idf.yaml").write_text("test: wifi")
# api component with esp32-idf test
api_dir = tests_dir / "api"
api_dir.mkdir(parents=True)
(api_dir / "test.esp32-idf.yaml").write_text("test: api")
# Mock changed_files to return wifi and api component changes
with (
patch.object(determine_jobs, "root_path", str(tmp_path)),
patch.object(helpers, "root_path", str(tmp_path)),
patch.object(determine_jobs, "changed_files") as mock_changed_files,
):
mock_changed_files.return_value = [
"esphome/components/wifi/wifi.cpp",
"esphome/components/api/api.cpp",
]
determine_jobs._component_has_tests.cache_clear()
result = determine_jobs.detect_memory_impact_config()
assert result["should_run"] == "true"
assert set(result["components"]) == {"wifi", "api"}
assert result["platform"] == "esp32-idf" # Common platform
assert result["use_merged_config"] == "true"
def test_detect_memory_impact_config_core_only_changes(tmp_path: Path) -> None:
"""Test memory impact detection with core-only changes (no component changes)."""
# Create test directory structure with fallback component
tests_dir = tmp_path / "tests" / "components"
# api component (fallback component) with esp32-idf test
api_dir = tests_dir / "api"
api_dir.mkdir(parents=True)
(api_dir / "test.esp32-idf.yaml").write_text("test: api")
# Mock changed_files to return only core files (no component files)
with (
patch.object(determine_jobs, "root_path", str(tmp_path)),
patch.object(helpers, "root_path", str(tmp_path)),
patch.object(determine_jobs, "changed_files") as mock_changed_files,
):
mock_changed_files.return_value = [
"esphome/core/application.cpp",
"esphome/core/component.h",
]
determine_jobs._component_has_tests.cache_clear()
result = determine_jobs.detect_memory_impact_config()
assert result["should_run"] == "true"
assert result["components"] == ["api"] # Fallback component
assert result["platform"] == "esp32-idf" # Fallback platform
assert result["use_merged_config"] == "true"
def test_detect_memory_impact_config_no_common_platform(tmp_path: Path) -> None:
"""Test memory impact detection when components have no common platform."""
# Create test directory structure
tests_dir = tmp_path / "tests" / "components"
# wifi component only has esp32-idf test
wifi_dir = tests_dir / "wifi"
wifi_dir.mkdir(parents=True)
(wifi_dir / "test.esp32-idf.yaml").write_text("test: wifi")
# logger component only has esp8266-ard test
logger_dir = tests_dir / "logger"
logger_dir.mkdir(parents=True)
(logger_dir / "test.esp8266-ard.yaml").write_text("test: logger")
# Mock changed_files to return both components
with (
patch.object(determine_jobs, "root_path", str(tmp_path)),
patch.object(helpers, "root_path", str(tmp_path)),
patch.object(determine_jobs, "changed_files") as mock_changed_files,
):
mock_changed_files.return_value = [
"esphome/components/wifi/wifi.cpp",
"esphome/components/logger/logger.cpp",
]
determine_jobs._component_has_tests.cache_clear()
result = determine_jobs.detect_memory_impact_config()
# Should pick the most frequently supported platform
assert result["should_run"] == "true"
assert set(result["components"]) == {"wifi", "logger"}
# When no common platform, picks most commonly supported
# esp8266-ard is preferred over esp32-idf in the preference list
assert result["platform"] in ["esp32-idf", "esp8266-ard"]
assert result["use_merged_config"] == "true"
def test_detect_memory_impact_config_no_changes(tmp_path: Path) -> None:
"""Test memory impact detection when no files changed."""
# Mock changed_files to return empty list
with (
patch.object(determine_jobs, "root_path", str(tmp_path)),
patch.object(helpers, "root_path", str(tmp_path)),
patch.object(determine_jobs, "changed_files") as mock_changed_files,
):
mock_changed_files.return_value = []
determine_jobs._component_has_tests.cache_clear()
result = determine_jobs.detect_memory_impact_config()
assert result["should_run"] == "false"
def test_detect_memory_impact_config_no_components_with_tests(tmp_path: Path) -> None:
"""Test memory impact detection when changed components have no tests."""
# Create test directory structure
tests_dir = tmp_path / "tests" / "components"
# Create component directory but no test files
custom_component_dir = tests_dir / "my_custom_component"
custom_component_dir.mkdir(parents=True)
# Mock changed_files to return component without tests
with (
patch.object(determine_jobs, "root_path", str(tmp_path)),
patch.object(helpers, "root_path", str(tmp_path)),
patch.object(determine_jobs, "changed_files") as mock_changed_files,
):
mock_changed_files.return_value = [
"esphome/components/my_custom_component/component.cpp",
]
determine_jobs._component_has_tests.cache_clear()
result = determine_jobs.detect_memory_impact_config()
assert result["should_run"] == "false"
def test_detect_memory_impact_config_skips_base_bus_components(tmp_path: Path) -> None:
"""Test that base bus components (i2c, spi, uart) are skipped."""
# Create test directory structure
tests_dir = tmp_path / "tests" / "components"
# i2c component (should be skipped as it's a base bus component)
i2c_dir = tests_dir / "i2c"
i2c_dir.mkdir(parents=True)
(i2c_dir / "test.esp32-idf.yaml").write_text("test: i2c")
# wifi component (should not be skipped)
wifi_dir = tests_dir / "wifi"
wifi_dir.mkdir(parents=True)
(wifi_dir / "test.esp32-idf.yaml").write_text("test: wifi")
# Mock changed_files to return both i2c and wifi
with (
patch.object(determine_jobs, "root_path", str(tmp_path)),
patch.object(helpers, "root_path", str(tmp_path)),
patch.object(determine_jobs, "changed_files") as mock_changed_files,
):
mock_changed_files.return_value = [
"esphome/components/i2c/i2c.cpp",
"esphome/components/wifi/wifi.cpp",
]
determine_jobs._component_has_tests.cache_clear()
result = determine_jobs.detect_memory_impact_config()
# Should only include wifi, not i2c
assert result["should_run"] == "true"
assert result["components"] == ["wifi"]
assert "i2c" not in result["components"]

View File

@@ -387,6 +387,42 @@ def test_idedata_addr2line_path_unix(setup_core: Path) -> None:
assert result == "/usr/bin/addr2line"
def test_idedata_objdump_path_windows(setup_core: Path) -> None:
"""Test IDEData.objdump_path on Windows."""
raw_data = {"prog_path": "/path/to/firmware.elf", "cc_path": "C:\\tools\\gcc.exe"}
idedata = platformio_api.IDEData(raw_data)
result = idedata.objdump_path
assert result == "C:\\tools\\objdump.exe"
def test_idedata_objdump_path_unix(setup_core: Path) -> None:
"""Test IDEData.objdump_path on Unix."""
raw_data = {"prog_path": "/path/to/firmware.elf", "cc_path": "/usr/bin/gcc"}
idedata = platformio_api.IDEData(raw_data)
result = idedata.objdump_path
assert result == "/usr/bin/objdump"
def test_idedata_readelf_path_windows(setup_core: Path) -> None:
"""Test IDEData.readelf_path on Windows."""
raw_data = {"prog_path": "/path/to/firmware.elf", "cc_path": "C:\\tools\\gcc.exe"}
idedata = platformio_api.IDEData(raw_data)
result = idedata.readelf_path
assert result == "C:\\tools\\readelf.exe"
def test_idedata_readelf_path_unix(setup_core: Path) -> None:
"""Test IDEData.readelf_path on Unix."""
raw_data = {"prog_path": "/path/to/firmware.elf", "cc_path": "/usr/bin/gcc"}
idedata = platformio_api.IDEData(raw_data)
result = idedata.readelf_path
assert result == "/usr/bin/readelf"
def test_patch_structhash(setup_core: Path) -> None:
"""Test patch_structhash monkey patches platformio functions."""
# Create simple namespace objects to act as modules