mirror of
https://github.com/lvgl/lvgl.git
synced 2026-02-05 21:42:20 +08:00
Some checks failed
Arduino Lint / lint (push) Has been cancelled
Build Examples with C++ Compiler / build-examples (push) Has been cancelled
MicroPython CI / Build esp32 port (push) Has been cancelled
MicroPython CI / Build rp2 port (push) Has been cancelled
MicroPython CI / Build stm32 port (push) Has been cancelled
MicroPython CI / Build unix port (push) Has been cancelled
C/C++ CI / Build OPTIONS_16BIT - Ubuntu (push) Has been cancelled
C/C++ CI / Build OPTIONS_24BIT - Ubuntu (push) Has been cancelled
C/C++ CI / Build OPTIONS_FULL_32BIT - Ubuntu (push) Has been cancelled
C/C++ CI / Build OPTIONS_NORMAL_8BIT - Ubuntu (push) Has been cancelled
C/C++ CI / Build OPTIONS_SDL - Ubuntu (push) Has been cancelled
C/C++ CI / Build OPTIONS_16BIT - cl - Windows (push) Has been cancelled
C/C++ CI / Build OPTIONS_16BIT - gcc - Windows (push) Has been cancelled
C/C++ CI / Build OPTIONS_24BIT - cl - Windows (push) Has been cancelled
C/C++ CI / Build OPTIONS_24BIT - gcc - Windows (push) Has been cancelled
C/C++ CI / Build OPTIONS_FULL_32BIT - cl - Windows (push) Has been cancelled
C/C++ CI / Build OPTIONS_FULL_32BIT - gcc - Windows (push) Has been cancelled
C/C++ CI / Build ESP IDF ESP32S3 (push) Has been cancelled
C/C++ CI / Run tests with 32bit build (push) Has been cancelled
C/C++ CI / Run tests with 64bit build (push) Has been cancelled
BOM Check / bom-check (push) Has been cancelled
Verify that lv_conf_internal.h matches repository state / verify-conf-internal (push) Has been cancelled
Verify the widget property name / verify-property-name (push) Has been cancelled
Verify code formatting / verify-formatting (push) Has been cancelled
Compare file templates with file names / template-check (push) Has been cancelled
Build docs / build-and-deploy (push) Has been cancelled
Test API JSON generator / Test API JSON (push) Has been cancelled
Install LVGL using CMake / build-examples (push) Has been cancelled
Check Makefile / Build using Makefile (push) Has been cancelled
Check Makefile for UEFI / Build using Makefile for UEFI (push) Has been cancelled
Emulated Performance Test / ARM Emulated Benchmark - Script Check (scripts/perf/tests/benchmark_results_comment/test.sh) (push) Has been cancelled
Emulated Performance Test / ARM Emulated Benchmark - Script Check (scripts/perf/tests/filter_docker_logs/test.sh) (push) Has been cancelled
Emulated Performance Test / ARM Emulated Benchmark - Script Check (scripts/perf/tests/serialize_results/test.sh) (push) Has been cancelled
Emulated Performance Test / ARM Emulated Benchmark 32b - lv_conf_perf32b (push) Has been cancelled
Emulated Performance Test / ARM Emulated Benchmark 64b - lv_conf_perf64b (push) Has been cancelled
Emulated Performance Test / ARM Emulated Benchmark - Save PR Number (push) Has been cancelled
Hardware Performance Test / Hardware Performance Benchmark (push) Has been cancelled
Hardware Performance Test / HW Benchmark - Save PR Number (push) Has been cancelled
Performance Tests CI / Perf Tests OPTIONS_TEST_PERF_32B - Ubuntu (push) Has been cancelled
Performance Tests CI / Perf Tests OPTIONS_TEST_PERF_64B - Ubuntu (push) Has been cancelled
Port repo release update / run-release-branch-updater (push) Has been cancelled
Verify Font License / verify-font-license (push) Has been cancelled
Verify Kconfig / verify-kconfig (push) Has been cancelled
Signed-off-by: dependabot[bot] <support@github.com> Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
171 lines
6.1 KiB
YAML
171 lines
6.1 KiB
YAML
name: Emulated Performance Test Results Handler
|
|
|
|
on:
|
|
workflow_run:
|
|
workflows: [Emulated Performance Test]
|
|
types:
|
|
- completed
|
|
|
|
concurrency:
|
|
group: ${{ github.event.workflow_run.event }}-${{ github.event.workflow_run.head_branch }}-${{ github.workflow }}
|
|
cancel-in-progress: true
|
|
|
|
permissions:
|
|
contents: write
|
|
pull-requests: write
|
|
|
|
jobs:
|
|
check_scripts:
|
|
if: ${{ github.event.workflow_run.conclusion == 'success' }}
|
|
runs-on: ubuntu-24.04
|
|
name: ARM Emulated Benchmark - Script Check
|
|
strategy:
|
|
fail-fast: false
|
|
matrix:
|
|
test_script:
|
|
- scripts/perf/tests/benchmark_results_comment/test.sh
|
|
- scripts/perf/tests/serialize_results/test.sh
|
|
steps:
|
|
- name: Checkout
|
|
uses: actions/checkout@v6
|
|
with:
|
|
fetch-depth: 0
|
|
- name: Check Script
|
|
run: |
|
|
pip3 install msgpack==1.1.0
|
|
./${{ matrix.test_script }}
|
|
|
|
handle_results:
|
|
needs: check_scripts
|
|
if: ${{ github.event.workflow_run.conclusion == 'success' }}
|
|
runs-on: ubuntu-24.04
|
|
name: ARM Emulated Benchmark - Handle Results
|
|
steps:
|
|
- name: Checkout
|
|
uses: actions/checkout@v6
|
|
with:
|
|
fetch-depth: 0
|
|
|
|
- name: Download Results from master
|
|
if: ${{ github.event.workflow_run.event == 'push' && github.event.workflow_run.head_branch == 'master' }}
|
|
uses: dawidd6/action-download-artifact@v14
|
|
with:
|
|
workflow: perf_emulation.yml
|
|
path: artifacts
|
|
allow_forks: false
|
|
|
|
|
|
- name: Download Results from PR
|
|
if: ${{ github.event.workflow_run.event == 'pull_request' }}
|
|
uses: dawidd6/action-download-artifact@v14
|
|
with:
|
|
workflow: perf_emulation.yml
|
|
path: artifacts
|
|
# The artifact needs to be downloaded from a PR run that comes from a forked repository
|
|
allow_forks: true
|
|
|
|
- name: Move JSON files to a single folder
|
|
run: |
|
|
mkdir input
|
|
find artifacts -name "*.json" -exec mv {} input/ \;
|
|
|
|
- name: Collect 'master' Results
|
|
uses: robinraju/release-downloader@v1
|
|
continue-on-error: true # The release may not exist yet
|
|
with:
|
|
preRelease: true
|
|
tag: emulated-benchmark-latest
|
|
fileName: results*.mpk
|
|
|
|
- name: Move PR data files to current folder
|
|
if: ${{ github.event.workflow_run.event == 'pull_request' }}
|
|
run: |
|
|
mv artifacts/pr_number/pr_number .
|
|
|
|
- name: Prepare Comment
|
|
if: ${{ github.event.workflow_run.event == 'pull_request' }}
|
|
run: |
|
|
pip3 install msgpack==1.1.0
|
|
if ls results*.mpk 1> /dev/null 2>&1; then
|
|
python3 scripts/perf/benchmark_results_comment.py \
|
|
--previous results*.mpk \
|
|
--new input/results*.json \
|
|
--output comment.md
|
|
else
|
|
echo "No previous results found, generating comment without comparison."
|
|
python3 scripts/perf/benchmark_results_comment.py \
|
|
--new input/results*.json \
|
|
--output comment.md
|
|
fi
|
|
- name: Comment PR
|
|
if: ${{ github.event.workflow_run.event == 'pull_request' }}
|
|
uses: actions/github-script@v8
|
|
with:
|
|
script: |
|
|
const fs = require('fs');
|
|
const commentPath = 'comment.md';
|
|
const prPath = 'pr_number';
|
|
|
|
if (!fs.existsSync(commentPath)) {
|
|
throw new Error('Error: comment.md not found! Exiting.');
|
|
}
|
|
if (!fs.existsSync(prPath)) {
|
|
throw new Error('Error: pr_number not found! Exiting.');
|
|
}
|
|
|
|
const commentBody = fs.readFileSync(commentPath, 'utf8').trim();
|
|
const prNumber = Number(fs.readFileSync(prPath, 'utf8').trim());
|
|
|
|
// Try to find if a comment already exists so we avoid spamming the PR with comments
|
|
const { data: comments } = await github.rest.issues.listComments({
|
|
issue_number: prNumber,
|
|
owner: context.repo.owner,
|
|
repo: context.repo.repo,
|
|
});
|
|
const existingComment = comments.find(comment =>
|
|
comment.body.includes(':robot: This comment was automatically generated by a bot.')
|
|
);
|
|
|
|
try {
|
|
// Now we either edit the already existing comment or we generate a new one
|
|
if (existingComment) {
|
|
await github.rest.issues.updateComment({
|
|
comment_id: existingComment.id,
|
|
owner: context.repo.owner,
|
|
repo: context.repo.repo,
|
|
body: commentBody
|
|
});
|
|
console.log(`Updated existing comment (ID: ${existingComment.id})`);
|
|
} else {
|
|
await github.rest.issues.createComment({
|
|
issue_number: prNumber,
|
|
owner: context.repo.owner,
|
|
repo: context.repo.repo,
|
|
body: commentBody
|
|
});
|
|
console.log('Created new comment');
|
|
}
|
|
} catch (error) {
|
|
console.error("Error:", error.message);
|
|
}
|
|
|
|
- name: Serialize Results
|
|
if: ${{ github.event.workflow_run.event == 'push' && github.event.workflow_run.head_branch == 'master' }}
|
|
run: |
|
|
# Here the input folder already exists from a previous step
|
|
pip3 install msgpack==1.1.0
|
|
mkdir output
|
|
find . -maxdepth 1 \( -name "results*.mpk" \) -exec mv -t input {} +
|
|
python scripts/perf/serialize_results.py --input input --output output --commit-hash ${{ github.sha }}
|
|
|
|
- name: Store Results in Benchmark Release
|
|
if: ${{ github.event.workflow_run.event == 'push' && github.event.workflow_run.head_branch == 'master' }}
|
|
uses: softprops/action-gh-release@v2
|
|
with:
|
|
name: Emulated Benchmark Latest
|
|
files: output/results*.mpk
|
|
tag_name: emulated-benchmark-latest
|
|
prerelease: true
|
|
body: This pre-release is automatically generated and serves as a repository for benchmark results.
|
|
|